2 * Copyright (C) 2011 Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * Copyright (C) 2012 David Goulet <dgoulet@efficios.com>
6 * SPDX-License-Identifier: GPL-2.0-only
10 #include "common/index/ctf-index.h"
18 #include <sys/socket.h>
19 #include <sys/types.h>
24 #include <bin/lttng-consumerd/health-consumerd.h>
25 #include <common/common.h>
26 #include <common/utils.h>
27 #include <common/time.h>
28 #include <common/compat/poll.h>
29 #include <common/compat/endian.h>
30 #include <common/index/index.h>
31 #include <common/kernel-ctl/kernel-ctl.h>
32 #include <common/sessiond-comm/relayd.h>
33 #include <common/sessiond-comm/sessiond-comm.h>
34 #include <common/kernel-consumer/kernel-consumer.h>
35 #include <common/relayd/relayd.h>
36 #include <common/ust-consumer/ust-consumer.h>
37 #include <common/consumer/consumer-timer.h>
38 #include <common/consumer/consumer.h>
39 #include <common/consumer/consumer-stream.h>
40 #include <common/consumer/consumer-testpoint.h>
41 #include <common/align.h>
42 #include <common/consumer/consumer-metadata-cache.h>
43 #include <common/trace-chunk.h>
44 #include <common/trace-chunk-registry.h>
45 #include <common/string-utils/format.h>
46 #include <common/dynamic-array.h>
48 struct lttng_consumer_global_data consumer_data
= {
51 .type
= LTTNG_CONSUMER_UNKNOWN
,
54 enum consumer_channel_action
{
57 CONSUMER_CHANNEL_QUIT
,
60 struct consumer_channel_msg
{
61 enum consumer_channel_action action
;
62 struct lttng_consumer_channel
*chan
; /* add */
63 uint64_t key
; /* del */
66 /* Flag used to temporarily pause data consumption from testpoints. */
67 int data_consumption_paused
;
70 * Flag to inform the polling thread to quit when all fd hung up. Updated by
71 * the consumer_thread_receive_fds when it notices that all fds has hung up.
72 * Also updated by the signal handler (consumer_should_exit()). Read by the
78 * Global hash table containing respectively metadata and data streams. The
79 * stream element in this ht should only be updated by the metadata poll thread
80 * for the metadata and the data poll thread for the data.
82 static struct lttng_ht
*metadata_ht
;
83 static struct lttng_ht
*data_ht
;
85 static const char *get_consumer_domain(void)
87 switch (consumer_data
.type
) {
88 case LTTNG_CONSUMER_KERNEL
:
89 return DEFAULT_KERNEL_TRACE_DIR
;
90 case LTTNG_CONSUMER64_UST
:
92 case LTTNG_CONSUMER32_UST
:
93 return DEFAULT_UST_TRACE_DIR
;
100 * Notify a thread lttng pipe to poll back again. This usually means that some
101 * global state has changed so we just send back the thread in a poll wait
104 static void notify_thread_lttng_pipe(struct lttng_pipe
*pipe
)
106 struct lttng_consumer_stream
*null_stream
= NULL
;
110 (void) lttng_pipe_write(pipe
, &null_stream
, sizeof(null_stream
));
113 static void notify_health_quit_pipe(int *pipe
)
117 ret
= lttng_write(pipe
[1], "4", 1);
119 PERROR("write consumer health quit");
123 static void notify_channel_pipe(struct lttng_consumer_local_data
*ctx
,
124 struct lttng_consumer_channel
*chan
,
126 enum consumer_channel_action action
)
128 struct consumer_channel_msg msg
;
131 memset(&msg
, 0, sizeof(msg
));
136 ret
= lttng_write(ctx
->consumer_channel_pipe
[1], &msg
, sizeof(msg
));
137 if (ret
< sizeof(msg
)) {
138 PERROR("notify_channel_pipe write error");
142 void notify_thread_del_channel(struct lttng_consumer_local_data
*ctx
,
145 notify_channel_pipe(ctx
, NULL
, key
, CONSUMER_CHANNEL_DEL
);
148 static int read_channel_pipe(struct lttng_consumer_local_data
*ctx
,
149 struct lttng_consumer_channel
**chan
,
151 enum consumer_channel_action
*action
)
153 struct consumer_channel_msg msg
;
156 ret
= lttng_read(ctx
->consumer_channel_pipe
[0], &msg
, sizeof(msg
));
157 if (ret
< sizeof(msg
)) {
161 *action
= msg
.action
;
169 * Cleanup the stream list of a channel. Those streams are not yet globally
172 static void clean_channel_stream_list(struct lttng_consumer_channel
*channel
)
174 struct lttng_consumer_stream
*stream
, *stmp
;
178 /* Delete streams that might have been left in the stream list. */
179 cds_list_for_each_entry_safe(stream
, stmp
, &channel
->streams
.head
,
181 cds_list_del(&stream
->send_node
);
183 * Once a stream is added to this list, the buffers were created so we
184 * have a guarantee that this call will succeed. Setting the monitor
185 * mode to 0 so we don't lock nor try to delete the stream from the
189 consumer_stream_destroy(stream
, NULL
);
194 * Find a stream. The consumer_data.lock must be locked during this
197 static struct lttng_consumer_stream
*find_stream(uint64_t key
,
200 struct lttng_ht_iter iter
;
201 struct lttng_ht_node_u64
*node
;
202 struct lttng_consumer_stream
*stream
= NULL
;
206 /* -1ULL keys are lookup failures */
207 if (key
== (uint64_t) -1ULL) {
213 lttng_ht_lookup(ht
, &key
, &iter
);
214 node
= lttng_ht_iter_get_node_u64(&iter
);
216 stream
= caa_container_of(node
, struct lttng_consumer_stream
, node
);
224 static void steal_stream_key(uint64_t key
, struct lttng_ht
*ht
)
226 struct lttng_consumer_stream
*stream
;
229 stream
= find_stream(key
, ht
);
231 stream
->key
= (uint64_t) -1ULL;
233 * We don't want the lookup to match, but we still need
234 * to iterate on this stream when iterating over the hash table. Just
235 * change the node key.
237 stream
->node
.key
= (uint64_t) -1ULL;
243 * Return a channel object for the given key.
245 * RCU read side lock MUST be acquired before calling this function and
246 * protects the channel ptr.
248 struct lttng_consumer_channel
*consumer_find_channel(uint64_t key
)
250 struct lttng_ht_iter iter
;
251 struct lttng_ht_node_u64
*node
;
252 struct lttng_consumer_channel
*channel
= NULL
;
254 /* -1ULL keys are lookup failures */
255 if (key
== (uint64_t) -1ULL) {
259 lttng_ht_lookup(consumer_data
.channel_ht
, &key
, &iter
);
260 node
= lttng_ht_iter_get_node_u64(&iter
);
262 channel
= caa_container_of(node
, struct lttng_consumer_channel
, node
);
269 * There is a possibility that the consumer does not have enough time between
270 * the close of the channel on the session daemon and the cleanup in here thus
271 * once we have a channel add with an existing key, we know for sure that this
272 * channel will eventually get cleaned up by all streams being closed.
274 * This function just nullifies the already existing channel key.
276 static void steal_channel_key(uint64_t key
)
278 struct lttng_consumer_channel
*channel
;
281 channel
= consumer_find_channel(key
);
283 channel
->key
= (uint64_t) -1ULL;
285 * We don't want the lookup to match, but we still need to iterate on
286 * this channel when iterating over the hash table. Just change the
289 channel
->node
.key
= (uint64_t) -1ULL;
294 static void free_channel_rcu(struct rcu_head
*head
)
296 struct lttng_ht_node_u64
*node
=
297 caa_container_of(head
, struct lttng_ht_node_u64
, head
);
298 struct lttng_consumer_channel
*channel
=
299 caa_container_of(node
, struct lttng_consumer_channel
, node
);
301 switch (consumer_data
.type
) {
302 case LTTNG_CONSUMER_KERNEL
:
304 case LTTNG_CONSUMER32_UST
:
305 case LTTNG_CONSUMER64_UST
:
306 lttng_ustconsumer_free_channel(channel
);
309 ERR("Unknown consumer_data type");
316 * RCU protected relayd socket pair free.
318 static void free_relayd_rcu(struct rcu_head
*head
)
320 struct lttng_ht_node_u64
*node
=
321 caa_container_of(head
, struct lttng_ht_node_u64
, head
);
322 struct consumer_relayd_sock_pair
*relayd
=
323 caa_container_of(node
, struct consumer_relayd_sock_pair
, node
);
326 * Close all sockets. This is done in the call RCU since we don't want the
327 * socket fds to be reassigned thus potentially creating bad state of the
330 * We do not have to lock the control socket mutex here since at this stage
331 * there is no one referencing to this relayd object.
333 (void) relayd_close(&relayd
->control_sock
);
334 (void) relayd_close(&relayd
->data_sock
);
336 pthread_mutex_destroy(&relayd
->ctrl_sock_mutex
);
341 * Destroy and free relayd socket pair object.
343 void consumer_destroy_relayd(struct consumer_relayd_sock_pair
*relayd
)
346 struct lttng_ht_iter iter
;
348 if (relayd
== NULL
) {
352 DBG("Consumer destroy and close relayd socket pair");
354 iter
.iter
.node
= &relayd
->node
.node
;
355 ret
= lttng_ht_del(consumer_data
.relayd_ht
, &iter
);
357 /* We assume the relayd is being or is destroyed */
361 /* RCU free() call */
362 call_rcu(&relayd
->node
.head
, free_relayd_rcu
);
366 * Remove a channel from the global list protected by a mutex. This function is
367 * also responsible for freeing its data structures.
369 void consumer_del_channel(struct lttng_consumer_channel
*channel
)
371 struct lttng_ht_iter iter
;
373 DBG("Consumer delete channel key %" PRIu64
, channel
->key
);
375 pthread_mutex_lock(&consumer_data
.lock
);
376 pthread_mutex_lock(&channel
->lock
);
378 /* Destroy streams that might have been left in the stream list. */
379 clean_channel_stream_list(channel
);
381 if (channel
->live_timer_enabled
== 1) {
382 consumer_timer_live_stop(channel
);
384 if (channel
->monitor_timer_enabled
== 1) {
385 consumer_timer_monitor_stop(channel
);
388 switch (consumer_data
.type
) {
389 case LTTNG_CONSUMER_KERNEL
:
391 case LTTNG_CONSUMER32_UST
:
392 case LTTNG_CONSUMER64_UST
:
393 lttng_ustconsumer_del_channel(channel
);
396 ERR("Unknown consumer_data type");
401 lttng_trace_chunk_put(channel
->trace_chunk
);
402 channel
->trace_chunk
= NULL
;
404 if (channel
->is_published
) {
408 iter
.iter
.node
= &channel
->node
.node
;
409 ret
= lttng_ht_del(consumer_data
.channel_ht
, &iter
);
412 iter
.iter
.node
= &channel
->channels_by_session_id_ht_node
.node
;
413 ret
= lttng_ht_del(consumer_data
.channels_by_session_id_ht
,
419 channel
->is_deleted
= true;
420 call_rcu(&channel
->node
.head
, free_channel_rcu
);
422 pthread_mutex_unlock(&channel
->lock
);
423 pthread_mutex_unlock(&consumer_data
.lock
);
427 * Iterate over the relayd hash table and destroy each element. Finally,
428 * destroy the whole hash table.
430 static void cleanup_relayd_ht(void)
432 struct lttng_ht_iter iter
;
433 struct consumer_relayd_sock_pair
*relayd
;
437 cds_lfht_for_each_entry(consumer_data
.relayd_ht
->ht
, &iter
.iter
, relayd
,
439 consumer_destroy_relayd(relayd
);
444 lttng_ht_destroy(consumer_data
.relayd_ht
);
448 * Update the end point status of all streams having the given network sequence
449 * index (relayd index).
451 * It's atomically set without having the stream mutex locked which is fine
452 * because we handle the write/read race with a pipe wakeup for each thread.
454 static void update_endpoint_status_by_netidx(uint64_t net_seq_idx
,
455 enum consumer_endpoint_status status
)
457 struct lttng_ht_iter iter
;
458 struct lttng_consumer_stream
*stream
;
460 DBG("Consumer set delete flag on stream by idx %" PRIu64
, net_seq_idx
);
464 /* Let's begin with metadata */
465 cds_lfht_for_each_entry(metadata_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
466 if (stream
->net_seq_idx
== net_seq_idx
) {
467 uatomic_set(&stream
->endpoint_status
, status
);
468 DBG("Delete flag set to metadata stream %d", stream
->wait_fd
);
472 /* Follow up by the data streams */
473 cds_lfht_for_each_entry(data_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
474 if (stream
->net_seq_idx
== net_seq_idx
) {
475 uatomic_set(&stream
->endpoint_status
, status
);
476 DBG("Delete flag set to data stream %d", stream
->wait_fd
);
483 * Cleanup a relayd object by flagging every associated streams for deletion,
484 * destroying the object meaning removing it from the relayd hash table,
485 * closing the sockets and freeing the memory in a RCU call.
487 * If a local data context is available, notify the threads that the streams'
488 * state have changed.
490 void lttng_consumer_cleanup_relayd(struct consumer_relayd_sock_pair
*relayd
)
496 DBG("Cleaning up relayd object ID %"PRIu64
, relayd
->net_seq_idx
);
498 /* Save the net sequence index before destroying the object */
499 netidx
= relayd
->net_seq_idx
;
502 * Delete the relayd from the relayd hash table, close the sockets and free
503 * the object in a RCU call.
505 consumer_destroy_relayd(relayd
);
507 /* Set inactive endpoint to all streams */
508 update_endpoint_status_by_netidx(netidx
, CONSUMER_ENDPOINT_INACTIVE
);
511 * With a local data context, notify the threads that the streams' state
512 * have changed. The write() action on the pipe acts as an "implicit"
513 * memory barrier ordering the updates of the end point status from the
514 * read of this status which happens AFTER receiving this notify.
516 notify_thread_lttng_pipe(relayd
->ctx
->consumer_data_pipe
);
517 notify_thread_lttng_pipe(relayd
->ctx
->consumer_metadata_pipe
);
521 * Flag a relayd socket pair for destruction. Destroy it if the refcount
524 * RCU read side lock MUST be aquired before calling this function.
526 void consumer_flag_relayd_for_destroy(struct consumer_relayd_sock_pair
*relayd
)
530 /* Set destroy flag for this object */
531 uatomic_set(&relayd
->destroy_flag
, 1);
533 /* Destroy the relayd if refcount is 0 */
534 if (uatomic_read(&relayd
->refcount
) == 0) {
535 consumer_destroy_relayd(relayd
);
540 * Completly destroy stream from every visiable data structure and the given
543 * One this call returns, the stream object is not longer usable nor visible.
545 void consumer_del_stream(struct lttng_consumer_stream
*stream
,
548 consumer_stream_destroy(stream
, ht
);
552 * XXX naming of del vs destroy is all mixed up.
554 void consumer_del_stream_for_data(struct lttng_consumer_stream
*stream
)
556 consumer_stream_destroy(stream
, data_ht
);
559 void consumer_del_stream_for_metadata(struct lttng_consumer_stream
*stream
)
561 consumer_stream_destroy(stream
, metadata_ht
);
564 void consumer_stream_update_channel_attributes(
565 struct lttng_consumer_stream
*stream
,
566 struct lttng_consumer_channel
*channel
)
568 stream
->channel_read_only_attributes
.tracefile_size
=
569 channel
->tracefile_size
;
573 * Add a stream to the global list protected by a mutex.
575 void consumer_add_data_stream(struct lttng_consumer_stream
*stream
)
577 struct lttng_ht
*ht
= data_ht
;
582 DBG3("Adding consumer stream %" PRIu64
, stream
->key
);
584 pthread_mutex_lock(&consumer_data
.lock
);
585 pthread_mutex_lock(&stream
->chan
->lock
);
586 pthread_mutex_lock(&stream
->chan
->timer_lock
);
587 pthread_mutex_lock(&stream
->lock
);
590 /* Steal stream identifier to avoid having streams with the same key */
591 steal_stream_key(stream
->key
, ht
);
593 lttng_ht_add_unique_u64(ht
, &stream
->node
);
595 lttng_ht_add_u64(consumer_data
.stream_per_chan_id_ht
,
596 &stream
->node_channel_id
);
599 * Add stream to the stream_list_ht of the consumer data. No need to steal
600 * the key since the HT does not use it and we allow to add redundant keys
603 lttng_ht_add_u64(consumer_data
.stream_list_ht
, &stream
->node_session_id
);
606 * When nb_init_stream_left reaches 0, we don't need to trigger any action
607 * in terms of destroying the associated channel, because the action that
608 * causes the count to become 0 also causes a stream to be added. The
609 * channel deletion will thus be triggered by the following removal of this
612 if (uatomic_read(&stream
->chan
->nb_init_stream_left
) > 0) {
613 /* Increment refcount before decrementing nb_init_stream_left */
615 uatomic_dec(&stream
->chan
->nb_init_stream_left
);
618 /* Update consumer data once the node is inserted. */
619 consumer_data
.stream_count
++;
620 consumer_data
.need_update
= 1;
623 pthread_mutex_unlock(&stream
->lock
);
624 pthread_mutex_unlock(&stream
->chan
->timer_lock
);
625 pthread_mutex_unlock(&stream
->chan
->lock
);
626 pthread_mutex_unlock(&consumer_data
.lock
);
630 * Add relayd socket to global consumer data hashtable. RCU read side lock MUST
631 * be acquired before calling this.
633 static int add_relayd(struct consumer_relayd_sock_pair
*relayd
)
636 struct lttng_ht_node_u64
*node
;
637 struct lttng_ht_iter iter
;
641 lttng_ht_lookup(consumer_data
.relayd_ht
,
642 &relayd
->net_seq_idx
, &iter
);
643 node
= lttng_ht_iter_get_node_u64(&iter
);
647 lttng_ht_add_unique_u64(consumer_data
.relayd_ht
, &relayd
->node
);
654 * Allocate and return a consumer relayd socket.
656 static struct consumer_relayd_sock_pair
*consumer_allocate_relayd_sock_pair(
657 uint64_t net_seq_idx
)
659 struct consumer_relayd_sock_pair
*obj
= NULL
;
661 /* net sequence index of -1 is a failure */
662 if (net_seq_idx
== (uint64_t) -1ULL) {
666 obj
= zmalloc(sizeof(struct consumer_relayd_sock_pair
));
668 PERROR("zmalloc relayd sock");
672 obj
->net_seq_idx
= net_seq_idx
;
674 obj
->destroy_flag
= 0;
675 obj
->control_sock
.sock
.fd
= -1;
676 obj
->data_sock
.sock
.fd
= -1;
677 lttng_ht_node_init_u64(&obj
->node
, obj
->net_seq_idx
);
678 pthread_mutex_init(&obj
->ctrl_sock_mutex
, NULL
);
685 * Find a relayd socket pair in the global consumer data.
687 * Return the object if found else NULL.
688 * RCU read-side lock must be held across this call and while using the
691 struct consumer_relayd_sock_pair
*consumer_find_relayd(uint64_t key
)
693 struct lttng_ht_iter iter
;
694 struct lttng_ht_node_u64
*node
;
695 struct consumer_relayd_sock_pair
*relayd
= NULL
;
697 /* Negative keys are lookup failures */
698 if (key
== (uint64_t) -1ULL) {
702 lttng_ht_lookup(consumer_data
.relayd_ht
, &key
,
704 node
= lttng_ht_iter_get_node_u64(&iter
);
706 relayd
= caa_container_of(node
, struct consumer_relayd_sock_pair
, node
);
714 * Find a relayd and send the stream
716 * Returns 0 on success, < 0 on error
718 int consumer_send_relayd_stream(struct lttng_consumer_stream
*stream
,
722 struct consumer_relayd_sock_pair
*relayd
;
725 assert(stream
->net_seq_idx
!= -1ULL);
728 /* The stream is not metadata. Get relayd reference if exists. */
730 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
731 if (relayd
!= NULL
) {
732 /* Add stream on the relayd */
733 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
734 ret
= relayd_add_stream(&relayd
->control_sock
, stream
->name
,
735 get_consumer_domain(), path
, &stream
->relayd_stream_id
,
736 stream
->chan
->tracefile_size
,
737 stream
->chan
->tracefile_count
,
738 stream
->trace_chunk
);
739 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
741 ERR("Relayd add stream failed. Cleaning up relayd %" PRIu64
".", relayd
->net_seq_idx
);
742 lttng_consumer_cleanup_relayd(relayd
);
746 uatomic_inc(&relayd
->refcount
);
747 stream
->sent_to_relayd
= 1;
749 ERR("Stream %" PRIu64
" relayd ID %" PRIu64
" unknown. Can't send it.",
750 stream
->key
, stream
->net_seq_idx
);
755 DBG("Stream %s with key %" PRIu64
" sent to relayd id %" PRIu64
,
756 stream
->name
, stream
->key
, stream
->net_seq_idx
);
764 * Find a relayd and send the streams sent message
766 * Returns 0 on success, < 0 on error
768 int consumer_send_relayd_streams_sent(uint64_t net_seq_idx
)
771 struct consumer_relayd_sock_pair
*relayd
;
773 assert(net_seq_idx
!= -1ULL);
775 /* The stream is not metadata. Get relayd reference if exists. */
777 relayd
= consumer_find_relayd(net_seq_idx
);
778 if (relayd
!= NULL
) {
779 /* Add stream on the relayd */
780 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
781 ret
= relayd_streams_sent(&relayd
->control_sock
);
782 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
784 ERR("Relayd streams sent failed. Cleaning up relayd %" PRIu64
".", relayd
->net_seq_idx
);
785 lttng_consumer_cleanup_relayd(relayd
);
789 ERR("Relayd ID %" PRIu64
" unknown. Can't send streams_sent.",
796 DBG("All streams sent relayd id %" PRIu64
, net_seq_idx
);
804 * Find a relayd and close the stream
806 void close_relayd_stream(struct lttng_consumer_stream
*stream
)
808 struct consumer_relayd_sock_pair
*relayd
;
810 /* The stream is not metadata. Get relayd reference if exists. */
812 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
814 consumer_stream_relayd_close(stream
, relayd
);
820 * Handle stream for relayd transmission if the stream applies for network
821 * streaming where the net sequence index is set.
823 * Return destination file descriptor or negative value on error.
825 static int write_relayd_stream_header(struct lttng_consumer_stream
*stream
,
826 size_t data_size
, unsigned long padding
,
827 struct consumer_relayd_sock_pair
*relayd
)
830 struct lttcomm_relayd_data_hdr data_hdr
;
836 /* Reset data header */
837 memset(&data_hdr
, 0, sizeof(data_hdr
));
839 if (stream
->metadata_flag
) {
840 /* Caller MUST acquire the relayd control socket lock */
841 ret
= relayd_send_metadata(&relayd
->control_sock
, data_size
);
846 /* Metadata are always sent on the control socket. */
847 outfd
= relayd
->control_sock
.sock
.fd
;
849 /* Set header with stream information */
850 data_hdr
.stream_id
= htobe64(stream
->relayd_stream_id
);
851 data_hdr
.data_size
= htobe32(data_size
);
852 data_hdr
.padding_size
= htobe32(padding
);
855 * Note that net_seq_num below is assigned with the *current* value of
856 * next_net_seq_num and only after that the next_net_seq_num will be
857 * increment. This is why when issuing a command on the relayd using
858 * this next value, 1 should always be substracted in order to compare
859 * the last seen sequence number on the relayd side to the last sent.
861 data_hdr
.net_seq_num
= htobe64(stream
->next_net_seq_num
);
862 /* Other fields are zeroed previously */
864 ret
= relayd_send_data_hdr(&relayd
->data_sock
, &data_hdr
,
870 ++stream
->next_net_seq_num
;
872 /* Set to go on data socket */
873 outfd
= relayd
->data_sock
.sock
.fd
;
881 * Write a character on the metadata poll pipe to wake the metadata thread.
882 * Returns 0 on success, -1 on error.
884 int consumer_metadata_wakeup_pipe(const struct lttng_consumer_channel
*channel
)
888 DBG("Waking up metadata poll thread (writing to pipe): channel name = '%s'",
890 if (channel
->monitor
&& channel
->metadata_stream
) {
891 const char dummy
= 'c';
892 const ssize_t write_ret
= lttng_write(
893 channel
->metadata_stream
->ust_metadata_poll_pipe
[1],
897 if (errno
== EWOULDBLOCK
) {
899 * This is fine, the metadata poll thread
900 * is having a hard time keeping-up, but
901 * it will eventually wake-up and consume
902 * the available data.
906 PERROR("Failed to write to UST metadata pipe while attempting to wake-up the metadata poll thread");
918 * Trigger a dump of the metadata content. Following/during the succesful
919 * completion of this call, the metadata poll thread will start receiving
920 * metadata packets to consume.
922 * The caller must hold the channel and stream locks.
925 int consumer_metadata_stream_dump(struct lttng_consumer_stream
*stream
)
929 ASSERT_LOCKED(stream
->chan
->lock
);
930 ASSERT_LOCKED(stream
->lock
);
931 assert(stream
->metadata_flag
);
932 assert(stream
->chan
->trace_chunk
);
934 switch (consumer_data
.type
) {
935 case LTTNG_CONSUMER_KERNEL
:
937 * Reset the position of what has been read from the
938 * metadata cache to 0 so we can dump it again.
940 ret
= kernctl_metadata_cache_dump(stream
->wait_fd
);
942 case LTTNG_CONSUMER32_UST
:
943 case LTTNG_CONSUMER64_UST
:
945 * Reset the position pushed from the metadata cache so it
946 * will write from the beginning on the next push.
948 stream
->ust_metadata_pushed
= 0;
949 ret
= consumer_metadata_wakeup_pipe(stream
->chan
);
952 ERR("Unknown consumer_data type");
956 ERR("Failed to dump the metadata cache");
962 int lttng_consumer_channel_set_trace_chunk(
963 struct lttng_consumer_channel
*channel
,
964 struct lttng_trace_chunk
*new_trace_chunk
)
966 pthread_mutex_lock(&channel
->lock
);
967 if (channel
->is_deleted
) {
969 * The channel has been logically deleted and should no longer
970 * be used. It has released its reference to its current trace
971 * chunk and should not acquire a new one.
973 * Return success as there is nothing for the caller to do.
979 * The acquisition of the reference cannot fail (barring
980 * a severe internal error) since a reference to the published
981 * chunk is already held by the caller.
983 if (new_trace_chunk
) {
984 const bool acquired_reference
= lttng_trace_chunk_get(
987 assert(acquired_reference
);
990 lttng_trace_chunk_put(channel
->trace_chunk
);
991 channel
->trace_chunk
= new_trace_chunk
;
993 pthread_mutex_unlock(&channel
->lock
);
998 * Allocate and return a new lttng_consumer_channel object using the given key
999 * to initialize the hash table node.
1001 * On error, return NULL.
1003 struct lttng_consumer_channel
*consumer_allocate_channel(uint64_t key
,
1004 uint64_t session_id
,
1005 const uint64_t *chunk_id
,
1006 const char *pathname
,
1009 enum lttng_event_output output
,
1010 uint64_t tracefile_size
,
1011 uint64_t tracefile_count
,
1012 uint64_t session_id_per_pid
,
1013 unsigned int monitor
,
1014 unsigned int live_timer_interval
,
1015 bool is_in_live_session
,
1016 const char *root_shm_path
,
1017 const char *shm_path
)
1019 struct lttng_consumer_channel
*channel
= NULL
;
1020 struct lttng_trace_chunk
*trace_chunk
= NULL
;
1023 trace_chunk
= lttng_trace_chunk_registry_find_chunk(
1024 consumer_data
.chunk_registry
, session_id
,
1027 ERR("Failed to find trace chunk reference during creation of channel");
1032 channel
= zmalloc(sizeof(*channel
));
1033 if (channel
== NULL
) {
1034 PERROR("malloc struct lttng_consumer_channel");
1039 channel
->refcount
= 0;
1040 channel
->session_id
= session_id
;
1041 channel
->session_id_per_pid
= session_id_per_pid
;
1042 channel
->relayd_id
= relayd_id
;
1043 channel
->tracefile_size
= tracefile_size
;
1044 channel
->tracefile_count
= tracefile_count
;
1045 channel
->monitor
= monitor
;
1046 channel
->live_timer_interval
= live_timer_interval
;
1047 channel
->is_live
= is_in_live_session
;
1048 pthread_mutex_init(&channel
->lock
, NULL
);
1049 pthread_mutex_init(&channel
->timer_lock
, NULL
);
1052 case LTTNG_EVENT_SPLICE
:
1053 channel
->output
= CONSUMER_CHANNEL_SPLICE
;
1055 case LTTNG_EVENT_MMAP
:
1056 channel
->output
= CONSUMER_CHANNEL_MMAP
;
1066 * In monitor mode, the streams associated with the channel will be put in
1067 * a special list ONLY owned by this channel. So, the refcount is set to 1
1068 * here meaning that the channel itself has streams that are referenced.
1070 * On a channel deletion, once the channel is no longer visible, the
1071 * refcount is decremented and checked for a zero value to delete it. With
1072 * streams in no monitor mode, it will now be safe to destroy the channel.
1074 if (!channel
->monitor
) {
1075 channel
->refcount
= 1;
1078 strncpy(channel
->pathname
, pathname
, sizeof(channel
->pathname
));
1079 channel
->pathname
[sizeof(channel
->pathname
) - 1] = '\0';
1081 strncpy(channel
->name
, name
, sizeof(channel
->name
));
1082 channel
->name
[sizeof(channel
->name
) - 1] = '\0';
1084 if (root_shm_path
) {
1085 strncpy(channel
->root_shm_path
, root_shm_path
, sizeof(channel
->root_shm_path
));
1086 channel
->root_shm_path
[sizeof(channel
->root_shm_path
) - 1] = '\0';
1089 strncpy(channel
->shm_path
, shm_path
, sizeof(channel
->shm_path
));
1090 channel
->shm_path
[sizeof(channel
->shm_path
) - 1] = '\0';
1093 lttng_ht_node_init_u64(&channel
->node
, channel
->key
);
1094 lttng_ht_node_init_u64(&channel
->channels_by_session_id_ht_node
,
1095 channel
->session_id
);
1097 channel
->wait_fd
= -1;
1098 CDS_INIT_LIST_HEAD(&channel
->streams
.head
);
1101 int ret
= lttng_consumer_channel_set_trace_chunk(channel
,
1108 DBG("Allocated channel (key %" PRIu64
")", channel
->key
);
1111 lttng_trace_chunk_put(trace_chunk
);
1114 consumer_del_channel(channel
);
1120 * Add a channel to the global list protected by a mutex.
1122 * Always return 0 indicating success.
1124 int consumer_add_channel(struct lttng_consumer_channel
*channel
,
1125 struct lttng_consumer_local_data
*ctx
)
1127 pthread_mutex_lock(&consumer_data
.lock
);
1128 pthread_mutex_lock(&channel
->lock
);
1129 pthread_mutex_lock(&channel
->timer_lock
);
1132 * This gives us a guarantee that the channel we are about to add to the
1133 * channel hash table will be unique. See this function comment on the why
1134 * we need to steel the channel key at this stage.
1136 steal_channel_key(channel
->key
);
1139 lttng_ht_add_unique_u64(consumer_data
.channel_ht
, &channel
->node
);
1140 lttng_ht_add_u64(consumer_data
.channels_by_session_id_ht
,
1141 &channel
->channels_by_session_id_ht_node
);
1143 channel
->is_published
= true;
1145 pthread_mutex_unlock(&channel
->timer_lock
);
1146 pthread_mutex_unlock(&channel
->lock
);
1147 pthread_mutex_unlock(&consumer_data
.lock
);
1149 if (channel
->wait_fd
!= -1 && channel
->type
== CONSUMER_CHANNEL_TYPE_DATA
) {
1150 notify_channel_pipe(ctx
, channel
, -1, CONSUMER_CHANNEL_ADD
);
1157 * Allocate the pollfd structure and the local view of the out fds to avoid
1158 * doing a lookup in the linked list and concurrency issues when writing is
1159 * needed. Called with consumer_data.lock held.
1161 * Returns the number of fds in the structures.
1163 static int update_poll_array(struct lttng_consumer_local_data
*ctx
,
1164 struct pollfd
**pollfd
, struct lttng_consumer_stream
**local_stream
,
1165 struct lttng_ht
*ht
, int *nb_inactive_fd
)
1168 struct lttng_ht_iter iter
;
1169 struct lttng_consumer_stream
*stream
;
1174 assert(local_stream
);
1176 DBG("Updating poll fd array");
1177 *nb_inactive_fd
= 0;
1179 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, stream
, node
.node
) {
1181 * Only active streams with an active end point can be added to the
1182 * poll set and local stream storage of the thread.
1184 * There is a potential race here for endpoint_status to be updated
1185 * just after the check. However, this is OK since the stream(s) will
1186 * be deleted once the thread is notified that the end point state has
1187 * changed where this function will be called back again.
1189 * We track the number of inactive FDs because they still need to be
1190 * closed by the polling thread after a wakeup on the data_pipe or
1193 if (stream
->endpoint_status
== CONSUMER_ENDPOINT_INACTIVE
) {
1194 (*nb_inactive_fd
)++;
1198 * This clobbers way too much the debug output. Uncomment that if you
1199 * need it for debugging purposes.
1201 (*pollfd
)[i
].fd
= stream
->wait_fd
;
1202 (*pollfd
)[i
].events
= POLLIN
| POLLPRI
;
1203 local_stream
[i
] = stream
;
1209 * Insert the consumer_data_pipe at the end of the array and don't
1210 * increment i so nb_fd is the number of real FD.
1212 (*pollfd
)[i
].fd
= lttng_pipe_get_readfd(ctx
->consumer_data_pipe
);
1213 (*pollfd
)[i
].events
= POLLIN
| POLLPRI
;
1215 (*pollfd
)[i
+ 1].fd
= lttng_pipe_get_readfd(ctx
->consumer_wakeup_pipe
);
1216 (*pollfd
)[i
+ 1].events
= POLLIN
| POLLPRI
;
1221 * Poll on the should_quit pipe and the command socket return -1 on
1222 * error, 1 if should exit, 0 if data is available on the command socket
1224 int lttng_consumer_poll_socket(struct pollfd
*consumer_sockpoll
)
1229 num_rdy
= poll(consumer_sockpoll
, 2, -1);
1230 if (num_rdy
== -1) {
1232 * Restart interrupted system call.
1234 if (errno
== EINTR
) {
1237 PERROR("Poll error");
1240 if (consumer_sockpoll
[0].revents
& (POLLIN
| POLLPRI
)) {
1241 DBG("consumer_should_quit wake up");
1248 * Set the error socket.
1250 void lttng_consumer_set_error_sock(struct lttng_consumer_local_data
*ctx
,
1253 ctx
->consumer_error_socket
= sock
;
1257 * Set the command socket path.
1259 void lttng_consumer_set_command_sock_path(
1260 struct lttng_consumer_local_data
*ctx
, char *sock
)
1262 ctx
->consumer_command_sock_path
= sock
;
1266 * Send return code to the session daemon.
1267 * If the socket is not defined, we return 0, it is not a fatal error
1269 int lttng_consumer_send_error(struct lttng_consumer_local_data
*ctx
, int cmd
)
1271 if (ctx
->consumer_error_socket
> 0) {
1272 return lttcomm_send_unix_sock(ctx
->consumer_error_socket
, &cmd
,
1273 sizeof(enum lttcomm_sessiond_command
));
1280 * Close all the tracefiles and stream fds and MUST be called when all
1281 * instances are destroyed i.e. when all threads were joined and are ended.
1283 void lttng_consumer_cleanup(void)
1285 struct lttng_ht_iter iter
;
1286 struct lttng_consumer_channel
*channel
;
1287 unsigned int trace_chunks_left
;
1291 cds_lfht_for_each_entry(consumer_data
.channel_ht
->ht
, &iter
.iter
, channel
,
1293 consumer_del_channel(channel
);
1298 lttng_ht_destroy(consumer_data
.channel_ht
);
1299 lttng_ht_destroy(consumer_data
.channels_by_session_id_ht
);
1301 cleanup_relayd_ht();
1303 lttng_ht_destroy(consumer_data
.stream_per_chan_id_ht
);
1306 * This HT contains streams that are freed by either the metadata thread or
1307 * the data thread so we do *nothing* on the hash table and simply destroy
1310 lttng_ht_destroy(consumer_data
.stream_list_ht
);
1313 * Trace chunks in the registry may still exist if the session
1314 * daemon has encountered an internal error and could not
1315 * tear down its sessions and/or trace chunks properly.
1317 * Release the session daemon's implicit reference to any remaining
1318 * trace chunk and print an error if any trace chunk was found. Note
1319 * that there are _no_ legitimate cases for trace chunks to be left,
1320 * it is a leak. However, it can happen following a crash of the
1321 * session daemon and not emptying the registry would cause an assertion
1324 trace_chunks_left
= lttng_trace_chunk_registry_put_each_chunk(
1325 consumer_data
.chunk_registry
);
1326 if (trace_chunks_left
) {
1327 ERR("%u trace chunks are leaked by lttng-consumerd. "
1328 "This can be caused by an internal error of the session daemon.",
1331 /* Run all callbacks freeing each chunk. */
1333 lttng_trace_chunk_registry_destroy(consumer_data
.chunk_registry
);
1337 * Called from signal handler.
1339 void lttng_consumer_should_exit(struct lttng_consumer_local_data
*ctx
)
1343 CMM_STORE_SHARED(consumer_quit
, 1);
1344 ret
= lttng_write(ctx
->consumer_should_quit
[1], "4", 1);
1346 PERROR("write consumer quit");
1349 DBG("Consumer flag that it should quit");
1354 * Flush pending writes to trace output disk file.
1357 void lttng_consumer_sync_trace_file(struct lttng_consumer_stream
*stream
,
1361 int outfd
= stream
->out_fd
;
1364 * This does a blocking write-and-wait on any page that belongs to the
1365 * subbuffer prior to the one we just wrote.
1366 * Don't care about error values, as these are just hints and ways to
1367 * limit the amount of page cache used.
1369 if (orig_offset
< stream
->max_sb_size
) {
1372 lttng_sync_file_range(outfd
, orig_offset
- stream
->max_sb_size
,
1373 stream
->max_sb_size
,
1374 SYNC_FILE_RANGE_WAIT_BEFORE
1375 | SYNC_FILE_RANGE_WRITE
1376 | SYNC_FILE_RANGE_WAIT_AFTER
);
1378 * Give hints to the kernel about how we access the file:
1379 * POSIX_FADV_DONTNEED : we won't re-access data in a near future after
1382 * We need to call fadvise again after the file grows because the
1383 * kernel does not seem to apply fadvise to non-existing parts of the
1386 * Call fadvise _after_ having waited for the page writeback to
1387 * complete because the dirty page writeback semantic is not well
1388 * defined. So it can be expected to lead to lower throughput in
1391 ret
= posix_fadvise(outfd
, orig_offset
- stream
->max_sb_size
,
1392 stream
->max_sb_size
, POSIX_FADV_DONTNEED
);
1393 if (ret
&& ret
!= -ENOSYS
) {
1395 PERROR("posix_fadvise on fd %i", outfd
);
1400 * Initialise the necessary environnement :
1401 * - create a new context
1402 * - create the poll_pipe
1403 * - create the should_quit pipe (for signal handler)
1404 * - create the thread pipe (for splice)
1406 * Takes a function pointer as argument, this function is called when data is
1407 * available on a buffer. This function is responsible to do the
1408 * kernctl_get_next_subbuf, read the data with mmap or splice depending on the
1409 * buffer configuration and then kernctl_put_next_subbuf at the end.
1411 * Returns a pointer to the new context or NULL on error.
1413 struct lttng_consumer_local_data
*lttng_consumer_create(
1414 enum lttng_consumer_type type
,
1415 ssize_t (*buffer_ready
)(struct lttng_consumer_stream
*stream
,
1416 struct lttng_consumer_local_data
*ctx
, bool locked_by_caller
),
1417 int (*recv_channel
)(struct lttng_consumer_channel
*channel
),
1418 int (*recv_stream
)(struct lttng_consumer_stream
*stream
),
1419 int (*update_stream
)(uint64_t stream_key
, uint32_t state
))
1422 struct lttng_consumer_local_data
*ctx
;
1424 assert(consumer_data
.type
== LTTNG_CONSUMER_UNKNOWN
||
1425 consumer_data
.type
== type
);
1426 consumer_data
.type
= type
;
1428 ctx
= zmalloc(sizeof(struct lttng_consumer_local_data
));
1430 PERROR("allocating context");
1434 ctx
->consumer_error_socket
= -1;
1435 ctx
->consumer_metadata_socket
= -1;
1436 pthread_mutex_init(&ctx
->metadata_socket_lock
, NULL
);
1437 /* assign the callbacks */
1438 ctx
->on_buffer_ready
= buffer_ready
;
1439 ctx
->on_recv_channel
= recv_channel
;
1440 ctx
->on_recv_stream
= recv_stream
;
1441 ctx
->on_update_stream
= update_stream
;
1443 ctx
->consumer_data_pipe
= lttng_pipe_open(0);
1444 if (!ctx
->consumer_data_pipe
) {
1445 goto error_poll_pipe
;
1448 ctx
->consumer_wakeup_pipe
= lttng_pipe_open(0);
1449 if (!ctx
->consumer_wakeup_pipe
) {
1450 goto error_wakeup_pipe
;
1453 ret
= pipe(ctx
->consumer_should_quit
);
1455 PERROR("Error creating recv pipe");
1456 goto error_quit_pipe
;
1459 ret
= pipe(ctx
->consumer_channel_pipe
);
1461 PERROR("Error creating channel pipe");
1462 goto error_channel_pipe
;
1465 ctx
->consumer_metadata_pipe
= lttng_pipe_open(0);
1466 if (!ctx
->consumer_metadata_pipe
) {
1467 goto error_metadata_pipe
;
1470 ctx
->channel_monitor_pipe
= -1;
1474 error_metadata_pipe
:
1475 utils_close_pipe(ctx
->consumer_channel_pipe
);
1477 utils_close_pipe(ctx
->consumer_should_quit
);
1479 lttng_pipe_destroy(ctx
->consumer_wakeup_pipe
);
1481 lttng_pipe_destroy(ctx
->consumer_data_pipe
);
1489 * Iterate over all streams of the hashtable and free them properly.
1491 static void destroy_data_stream_ht(struct lttng_ht
*ht
)
1493 struct lttng_ht_iter iter
;
1494 struct lttng_consumer_stream
*stream
;
1501 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, stream
, node
.node
) {
1503 * Ignore return value since we are currently cleaning up so any error
1506 (void) consumer_del_stream(stream
, ht
);
1510 lttng_ht_destroy(ht
);
1514 * Iterate over all streams of the metadata hashtable and free them
1517 static void destroy_metadata_stream_ht(struct lttng_ht
*ht
)
1519 struct lttng_ht_iter iter
;
1520 struct lttng_consumer_stream
*stream
;
1527 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, stream
, node
.node
) {
1529 * Ignore return value since we are currently cleaning up so any error
1532 (void) consumer_del_metadata_stream(stream
, ht
);
1536 lttng_ht_destroy(ht
);
1540 * Close all fds associated with the instance and free the context.
1542 void lttng_consumer_destroy(struct lttng_consumer_local_data
*ctx
)
1546 DBG("Consumer destroying it. Closing everything.");
1552 destroy_data_stream_ht(data_ht
);
1553 destroy_metadata_stream_ht(metadata_ht
);
1555 ret
= close(ctx
->consumer_error_socket
);
1559 ret
= close(ctx
->consumer_metadata_socket
);
1563 utils_close_pipe(ctx
->consumer_channel_pipe
);
1564 lttng_pipe_destroy(ctx
->consumer_data_pipe
);
1565 lttng_pipe_destroy(ctx
->consumer_metadata_pipe
);
1566 lttng_pipe_destroy(ctx
->consumer_wakeup_pipe
);
1567 utils_close_pipe(ctx
->consumer_should_quit
);
1569 unlink(ctx
->consumer_command_sock_path
);
1574 * Write the metadata stream id on the specified file descriptor.
1576 static int write_relayd_metadata_id(int fd
,
1577 struct lttng_consumer_stream
*stream
,
1578 unsigned long padding
)
1581 struct lttcomm_relayd_metadata_payload hdr
;
1583 hdr
.stream_id
= htobe64(stream
->relayd_stream_id
);
1584 hdr
.padding_size
= htobe32(padding
);
1585 ret
= lttng_write(fd
, (void *) &hdr
, sizeof(hdr
));
1586 if (ret
< sizeof(hdr
)) {
1588 * This error means that the fd's end is closed so ignore the PERROR
1589 * not to clubber the error output since this can happen in a normal
1592 if (errno
!= EPIPE
) {
1593 PERROR("write metadata stream id");
1595 DBG3("Consumer failed to write relayd metadata id (errno: %d)", errno
);
1597 * Set ret to a negative value because if ret != sizeof(hdr), we don't
1598 * handle writting the missing part so report that as an error and
1599 * don't lie to the caller.
1604 DBG("Metadata stream id %" PRIu64
" with padding %lu written before data",
1605 stream
->relayd_stream_id
, padding
);
1612 * Mmap the ring buffer, read it and write the data to the tracefile. This is a
1613 * core function for writing trace buffers to either the local filesystem or
1616 * It must be called with the stream and the channel lock held.
1618 * Careful review MUST be put if any changes occur!
1620 * Returns the number of bytes written
1622 ssize_t
lttng_consumer_on_read_subbuffer_mmap(
1623 struct lttng_consumer_stream
*stream
,
1624 const struct lttng_buffer_view
*buffer
,
1625 unsigned long padding
)
1628 off_t orig_offset
= stream
->out_fd_offset
;
1629 /* Default is on the disk */
1630 int outfd
= stream
->out_fd
;
1631 struct consumer_relayd_sock_pair
*relayd
= NULL
;
1632 unsigned int relayd_hang_up
= 0;
1633 const size_t subbuf_content_size
= buffer
->size
- padding
;
1636 /* RCU lock for the relayd pointer */
1638 assert(stream
->net_seq_idx
!= (uint64_t) -1ULL ||
1639 stream
->trace_chunk
);
1641 /* Flag that the current stream if set for network streaming. */
1642 if (stream
->net_seq_idx
!= (uint64_t) -1ULL) {
1643 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
1644 if (relayd
== NULL
) {
1650 /* Handle stream on the relayd if the output is on the network */
1652 unsigned long netlen
= subbuf_content_size
;
1655 * Lock the control socket for the complete duration of the function
1656 * since from this point on we will use the socket.
1658 if (stream
->metadata_flag
) {
1659 /* Metadata requires the control socket. */
1660 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
1661 if (stream
->reset_metadata_flag
) {
1662 ret
= relayd_reset_metadata(&relayd
->control_sock
,
1663 stream
->relayd_stream_id
,
1664 stream
->metadata_version
);
1669 stream
->reset_metadata_flag
= 0;
1671 netlen
+= sizeof(struct lttcomm_relayd_metadata_payload
);
1674 ret
= write_relayd_stream_header(stream
, netlen
, padding
, relayd
);
1679 /* Use the returned socket. */
1682 /* Write metadata stream id before payload */
1683 if (stream
->metadata_flag
) {
1684 ret
= write_relayd_metadata_id(outfd
, stream
, padding
);
1691 write_len
= subbuf_content_size
;
1693 /* No streaming; we have to write the full padding. */
1694 if (stream
->metadata_flag
&& stream
->reset_metadata_flag
) {
1695 ret
= utils_truncate_stream_file(stream
->out_fd
, 0);
1697 ERR("Reset metadata file");
1700 stream
->reset_metadata_flag
= 0;
1704 * Check if we need to change the tracefile before writing the packet.
1706 if (stream
->chan
->tracefile_size
> 0 &&
1707 (stream
->tracefile_size_current
+ buffer
->size
) >
1708 stream
->chan
->tracefile_size
) {
1709 ret
= consumer_stream_rotate_output_files(stream
);
1713 outfd
= stream
->out_fd
;
1716 stream
->tracefile_size_current
+= buffer
->size
;
1717 write_len
= buffer
->size
;
1721 * This call guarantee that len or less is returned. It's impossible to
1722 * receive a ret value that is bigger than len.
1724 ret
= lttng_write(outfd
, buffer
->data
, write_len
);
1725 DBG("Consumer mmap write() ret %zd (len %zu)", ret
, write_len
);
1726 if (ret
< 0 || ((size_t) ret
!= write_len
)) {
1728 * Report error to caller if nothing was written else at least send the
1736 /* Socket operation failed. We consider the relayd dead */
1737 if (errno
== EPIPE
) {
1739 * This is possible if the fd is closed on the other side
1740 * (outfd) or any write problem. It can be verbose a bit for a
1741 * normal execution if for instance the relayd is stopped
1742 * abruptly. This can happen so set this to a DBG statement.
1744 DBG("Consumer mmap write detected relayd hang up");
1746 /* Unhandled error, print it and stop function right now. */
1747 PERROR("Error in write mmap (ret %zd != write_len %zu)", ret
,
1752 stream
->output_written
+= ret
;
1754 /* This call is useless on a socket so better save a syscall. */
1756 /* This won't block, but will start writeout asynchronously */
1757 lttng_sync_file_range(outfd
, stream
->out_fd_offset
, write_len
,
1758 SYNC_FILE_RANGE_WRITE
);
1759 stream
->out_fd_offset
+= write_len
;
1760 lttng_consumer_sync_trace_file(stream
, orig_offset
);
1765 * This is a special case that the relayd has closed its socket. Let's
1766 * cleanup the relayd object and all associated streams.
1768 if (relayd
&& relayd_hang_up
) {
1769 ERR("Relayd hangup. Cleaning up relayd %" PRIu64
".", relayd
->net_seq_idx
);
1770 lttng_consumer_cleanup_relayd(relayd
);
1774 /* Unlock only if ctrl socket used */
1775 if (relayd
&& stream
->metadata_flag
) {
1776 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
1784 * Splice the data from the ring buffer to the tracefile.
1786 * It must be called with the stream lock held.
1788 * Returns the number of bytes spliced.
1790 ssize_t
lttng_consumer_on_read_subbuffer_splice(
1791 struct lttng_consumer_local_data
*ctx
,
1792 struct lttng_consumer_stream
*stream
, unsigned long len
,
1793 unsigned long padding
)
1795 ssize_t ret
= 0, written
= 0, ret_splice
= 0;
1797 off_t orig_offset
= stream
->out_fd_offset
;
1798 int fd
= stream
->wait_fd
;
1799 /* Default is on the disk */
1800 int outfd
= stream
->out_fd
;
1801 struct consumer_relayd_sock_pair
*relayd
= NULL
;
1803 unsigned int relayd_hang_up
= 0;
1805 switch (consumer_data
.type
) {
1806 case LTTNG_CONSUMER_KERNEL
:
1808 case LTTNG_CONSUMER32_UST
:
1809 case LTTNG_CONSUMER64_UST
:
1810 /* Not supported for user space tracing */
1813 ERR("Unknown consumer_data type");
1817 /* RCU lock for the relayd pointer */
1820 /* Flag that the current stream if set for network streaming. */
1821 if (stream
->net_seq_idx
!= (uint64_t) -1ULL) {
1822 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
1823 if (relayd
== NULL
) {
1828 splice_pipe
= stream
->splice_pipe
;
1830 /* Write metadata stream id before payload */
1832 unsigned long total_len
= len
;
1834 if (stream
->metadata_flag
) {
1836 * Lock the control socket for the complete duration of the function
1837 * since from this point on we will use the socket.
1839 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
1841 if (stream
->reset_metadata_flag
) {
1842 ret
= relayd_reset_metadata(&relayd
->control_sock
,
1843 stream
->relayd_stream_id
,
1844 stream
->metadata_version
);
1849 stream
->reset_metadata_flag
= 0;
1851 ret
= write_relayd_metadata_id(splice_pipe
[1], stream
,
1859 total_len
+= sizeof(struct lttcomm_relayd_metadata_payload
);
1862 ret
= write_relayd_stream_header(stream
, total_len
, padding
, relayd
);
1868 /* Use the returned socket. */
1871 /* No streaming, we have to set the len with the full padding */
1874 if (stream
->metadata_flag
&& stream
->reset_metadata_flag
) {
1875 ret
= utils_truncate_stream_file(stream
->out_fd
, 0);
1877 ERR("Reset metadata file");
1880 stream
->reset_metadata_flag
= 0;
1883 * Check if we need to change the tracefile before writing the packet.
1885 if (stream
->chan
->tracefile_size
> 0 &&
1886 (stream
->tracefile_size_current
+ len
) >
1887 stream
->chan
->tracefile_size
) {
1888 ret
= consumer_stream_rotate_output_files(stream
);
1893 outfd
= stream
->out_fd
;
1896 stream
->tracefile_size_current
+= len
;
1900 DBG("splice chan to pipe offset %lu of len %lu (fd : %d, pipe: %d)",
1901 (unsigned long)offset
, len
, fd
, splice_pipe
[1]);
1902 ret_splice
= splice(fd
, &offset
, splice_pipe
[1], NULL
, len
,
1903 SPLICE_F_MOVE
| SPLICE_F_MORE
);
1904 DBG("splice chan to pipe, ret %zd", ret_splice
);
1905 if (ret_splice
< 0) {
1908 PERROR("Error in relay splice");
1912 /* Handle stream on the relayd if the output is on the network */
1913 if (relayd
&& stream
->metadata_flag
) {
1914 size_t metadata_payload_size
=
1915 sizeof(struct lttcomm_relayd_metadata_payload
);
1917 /* Update counter to fit the spliced data */
1918 ret_splice
+= metadata_payload_size
;
1919 len
+= metadata_payload_size
;
1921 * We do this so the return value can match the len passed as
1922 * argument to this function.
1924 written
-= metadata_payload_size
;
1927 /* Splice data out */
1928 ret_splice
= splice(splice_pipe
[0], NULL
, outfd
, NULL
,
1929 ret_splice
, SPLICE_F_MOVE
| SPLICE_F_MORE
);
1930 DBG("Consumer splice pipe to file (out_fd: %d), ret %zd",
1932 if (ret_splice
< 0) {
1937 } else if (ret_splice
> len
) {
1939 * We don't expect this code path to be executed but you never know
1940 * so this is an extra protection agains a buggy splice().
1943 written
+= ret_splice
;
1944 PERROR("Wrote more data than requested %zd (len: %lu)", ret_splice
,
1948 /* All good, update current len and continue. */
1952 /* This call is useless on a socket so better save a syscall. */
1954 /* This won't block, but will start writeout asynchronously */
1955 lttng_sync_file_range(outfd
, stream
->out_fd_offset
, ret_splice
,
1956 SYNC_FILE_RANGE_WRITE
);
1957 stream
->out_fd_offset
+= ret_splice
;
1959 stream
->output_written
+= ret_splice
;
1960 written
+= ret_splice
;
1963 lttng_consumer_sync_trace_file(stream
, orig_offset
);
1969 * This is a special case that the relayd has closed its socket. Let's
1970 * cleanup the relayd object and all associated streams.
1972 if (relayd
&& relayd_hang_up
) {
1973 ERR("Relayd hangup. Cleaning up relayd %" PRIu64
".", relayd
->net_seq_idx
);
1974 lttng_consumer_cleanup_relayd(relayd
);
1975 /* Skip splice error so the consumer does not fail */
1980 /* send the appropriate error description to sessiond */
1983 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_SPLICE_EINVAL
);
1986 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_SPLICE_ENOMEM
);
1989 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_SPLICE_ESPIPE
);
1994 if (relayd
&& stream
->metadata_flag
) {
1995 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
2003 * Sample the snapshot positions for a specific fd
2005 * Returns 0 on success, < 0 on error
2007 int lttng_consumer_sample_snapshot_positions(struct lttng_consumer_stream
*stream
)
2009 switch (consumer_data
.type
) {
2010 case LTTNG_CONSUMER_KERNEL
:
2011 return lttng_kconsumer_sample_snapshot_positions(stream
);
2012 case LTTNG_CONSUMER32_UST
:
2013 case LTTNG_CONSUMER64_UST
:
2014 return lttng_ustconsumer_sample_snapshot_positions(stream
);
2016 ERR("Unknown consumer_data type");
2022 * Take a snapshot for a specific fd
2024 * Returns 0 on success, < 0 on error
2026 int lttng_consumer_take_snapshot(struct lttng_consumer_stream
*stream
)
2028 switch (consumer_data
.type
) {
2029 case LTTNG_CONSUMER_KERNEL
:
2030 return lttng_kconsumer_take_snapshot(stream
);
2031 case LTTNG_CONSUMER32_UST
:
2032 case LTTNG_CONSUMER64_UST
:
2033 return lttng_ustconsumer_take_snapshot(stream
);
2035 ERR("Unknown consumer_data type");
2042 * Get the produced position
2044 * Returns 0 on success, < 0 on error
2046 int lttng_consumer_get_produced_snapshot(struct lttng_consumer_stream
*stream
,
2049 switch (consumer_data
.type
) {
2050 case LTTNG_CONSUMER_KERNEL
:
2051 return lttng_kconsumer_get_produced_snapshot(stream
, pos
);
2052 case LTTNG_CONSUMER32_UST
:
2053 case LTTNG_CONSUMER64_UST
:
2054 return lttng_ustconsumer_get_produced_snapshot(stream
, pos
);
2056 ERR("Unknown consumer_data type");
2063 * Get the consumed position (free-running counter position in bytes).
2065 * Returns 0 on success, < 0 on error
2067 int lttng_consumer_get_consumed_snapshot(struct lttng_consumer_stream
*stream
,
2070 switch (consumer_data
.type
) {
2071 case LTTNG_CONSUMER_KERNEL
:
2072 return lttng_kconsumer_get_consumed_snapshot(stream
, pos
);
2073 case LTTNG_CONSUMER32_UST
:
2074 case LTTNG_CONSUMER64_UST
:
2075 return lttng_ustconsumer_get_consumed_snapshot(stream
, pos
);
2077 ERR("Unknown consumer_data type");
2083 int lttng_consumer_recv_cmd(struct lttng_consumer_local_data
*ctx
,
2084 int sock
, struct pollfd
*consumer_sockpoll
)
2086 switch (consumer_data
.type
) {
2087 case LTTNG_CONSUMER_KERNEL
:
2088 return lttng_kconsumer_recv_cmd(ctx
, sock
, consumer_sockpoll
);
2089 case LTTNG_CONSUMER32_UST
:
2090 case LTTNG_CONSUMER64_UST
:
2091 return lttng_ustconsumer_recv_cmd(ctx
, sock
, consumer_sockpoll
);
2093 ERR("Unknown consumer_data type");
2100 void lttng_consumer_close_all_metadata(void)
2102 switch (consumer_data
.type
) {
2103 case LTTNG_CONSUMER_KERNEL
:
2105 * The Kernel consumer has a different metadata scheme so we don't
2106 * close anything because the stream will be closed by the session
2110 case LTTNG_CONSUMER32_UST
:
2111 case LTTNG_CONSUMER64_UST
:
2113 * Close all metadata streams. The metadata hash table is passed and
2114 * this call iterates over it by closing all wakeup fd. This is safe
2115 * because at this point we are sure that the metadata producer is
2116 * either dead or blocked.
2118 lttng_ustconsumer_close_all_metadata(metadata_ht
);
2121 ERR("Unknown consumer_data type");
2127 * Clean up a metadata stream and free its memory.
2129 void consumer_del_metadata_stream(struct lttng_consumer_stream
*stream
,
2130 struct lttng_ht
*ht
)
2132 struct lttng_consumer_channel
*channel
= NULL
;
2133 bool free_channel
= false;
2137 * This call should NEVER receive regular stream. It must always be
2138 * metadata stream and this is crucial for data structure synchronization.
2140 assert(stream
->metadata_flag
);
2142 DBG3("Consumer delete metadata stream %d", stream
->wait_fd
);
2144 pthread_mutex_lock(&consumer_data
.lock
);
2146 * Note that this assumes that a stream's channel is never changed and
2147 * that the stream's lock doesn't need to be taken to sample its
2150 channel
= stream
->chan
;
2151 pthread_mutex_lock(&channel
->lock
);
2152 pthread_mutex_lock(&stream
->lock
);
2153 if (channel
->metadata_cache
) {
2154 /* Only applicable to userspace consumers. */
2155 pthread_mutex_lock(&channel
->metadata_cache
->lock
);
2158 /* Remove any reference to that stream. */
2159 consumer_stream_delete(stream
, ht
);
2161 /* Close down everything including the relayd if one. */
2162 consumer_stream_close(stream
);
2163 /* Destroy tracer buffers of the stream. */
2164 consumer_stream_destroy_buffers(stream
);
2166 /* Atomically decrement channel refcount since other threads can use it. */
2167 if (!uatomic_sub_return(&channel
->refcount
, 1)
2168 && !uatomic_read(&channel
->nb_init_stream_left
)) {
2169 /* Go for channel deletion! */
2170 free_channel
= true;
2172 stream
->chan
= NULL
;
2175 * Nullify the stream reference so it is not used after deletion. The
2176 * channel lock MUST be acquired before being able to check for a NULL
2179 channel
->metadata_stream
= NULL
;
2181 if (channel
->metadata_cache
) {
2182 pthread_mutex_unlock(&channel
->metadata_cache
->lock
);
2184 pthread_mutex_unlock(&stream
->lock
);
2185 pthread_mutex_unlock(&channel
->lock
);
2186 pthread_mutex_unlock(&consumer_data
.lock
);
2189 consumer_del_channel(channel
);
2192 lttng_trace_chunk_put(stream
->trace_chunk
);
2193 stream
->trace_chunk
= NULL
;
2194 consumer_stream_free(stream
);
2198 * Action done with the metadata stream when adding it to the consumer internal
2199 * data structures to handle it.
2201 void consumer_add_metadata_stream(struct lttng_consumer_stream
*stream
)
2203 struct lttng_ht
*ht
= metadata_ht
;
2204 struct lttng_ht_iter iter
;
2205 struct lttng_ht_node_u64
*node
;
2210 DBG3("Adding metadata stream %" PRIu64
" to hash table", stream
->key
);
2212 pthread_mutex_lock(&consumer_data
.lock
);
2213 pthread_mutex_lock(&stream
->chan
->lock
);
2214 pthread_mutex_lock(&stream
->chan
->timer_lock
);
2215 pthread_mutex_lock(&stream
->lock
);
2218 * From here, refcounts are updated so be _careful_ when returning an error
2225 * Lookup the stream just to make sure it does not exist in our internal
2226 * state. This should NEVER happen.
2228 lttng_ht_lookup(ht
, &stream
->key
, &iter
);
2229 node
= lttng_ht_iter_get_node_u64(&iter
);
2233 * When nb_init_stream_left reaches 0, we don't need to trigger any action
2234 * in terms of destroying the associated channel, because the action that
2235 * causes the count to become 0 also causes a stream to be added. The
2236 * channel deletion will thus be triggered by the following removal of this
2239 if (uatomic_read(&stream
->chan
->nb_init_stream_left
) > 0) {
2240 /* Increment refcount before decrementing nb_init_stream_left */
2242 uatomic_dec(&stream
->chan
->nb_init_stream_left
);
2245 lttng_ht_add_unique_u64(ht
, &stream
->node
);
2247 lttng_ht_add_u64(consumer_data
.stream_per_chan_id_ht
,
2248 &stream
->node_channel_id
);
2251 * Add stream to the stream_list_ht of the consumer data. No need to steal
2252 * the key since the HT does not use it and we allow to add redundant keys
2255 lttng_ht_add_u64(consumer_data
.stream_list_ht
, &stream
->node_session_id
);
2259 pthread_mutex_unlock(&stream
->lock
);
2260 pthread_mutex_unlock(&stream
->chan
->lock
);
2261 pthread_mutex_unlock(&stream
->chan
->timer_lock
);
2262 pthread_mutex_unlock(&consumer_data
.lock
);
2266 * Delete data stream that are flagged for deletion (endpoint_status).
2268 static void validate_endpoint_status_data_stream(void)
2270 struct lttng_ht_iter iter
;
2271 struct lttng_consumer_stream
*stream
;
2273 DBG("Consumer delete flagged data stream");
2276 cds_lfht_for_each_entry(data_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
2277 /* Validate delete flag of the stream */
2278 if (stream
->endpoint_status
== CONSUMER_ENDPOINT_ACTIVE
) {
2281 /* Delete it right now */
2282 consumer_del_stream(stream
, data_ht
);
2288 * Delete metadata stream that are flagged for deletion (endpoint_status).
2290 static void validate_endpoint_status_metadata_stream(
2291 struct lttng_poll_event
*pollset
)
2293 struct lttng_ht_iter iter
;
2294 struct lttng_consumer_stream
*stream
;
2296 DBG("Consumer delete flagged metadata stream");
2301 cds_lfht_for_each_entry(metadata_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
2302 /* Validate delete flag of the stream */
2303 if (stream
->endpoint_status
== CONSUMER_ENDPOINT_ACTIVE
) {
2307 * Remove from pollset so the metadata thread can continue without
2308 * blocking on a deleted stream.
2310 lttng_poll_del(pollset
, stream
->wait_fd
);
2312 /* Delete it right now */
2313 consumer_del_metadata_stream(stream
, metadata_ht
);
2319 * Thread polls on metadata file descriptor and write them on disk or on the
2322 void *consumer_thread_metadata_poll(void *data
)
2324 int ret
, i
, pollfd
, err
= -1;
2325 uint32_t revents
, nb_fd
;
2326 struct lttng_consumer_stream
*stream
= NULL
;
2327 struct lttng_ht_iter iter
;
2328 struct lttng_ht_node_u64
*node
;
2329 struct lttng_poll_event events
;
2330 struct lttng_consumer_local_data
*ctx
= data
;
2333 rcu_register_thread();
2335 health_register(health_consumerd
, HEALTH_CONSUMERD_TYPE_METADATA
);
2337 if (testpoint(consumerd_thread_metadata
)) {
2338 goto error_testpoint
;
2341 health_code_update();
2343 DBG("Thread metadata poll started");
2345 /* Size is set to 1 for the consumer_metadata pipe */
2346 ret
= lttng_poll_create(&events
, 2, LTTNG_CLOEXEC
);
2348 ERR("Poll set creation failed");
2352 ret
= lttng_poll_add(&events
,
2353 lttng_pipe_get_readfd(ctx
->consumer_metadata_pipe
), LPOLLIN
);
2359 DBG("Metadata main loop started");
2363 health_code_update();
2364 health_poll_entry();
2365 DBG("Metadata poll wait");
2366 ret
= lttng_poll_wait(&events
, -1);
2367 DBG("Metadata poll return from wait with %d fd(s)",
2368 LTTNG_POLL_GETNB(&events
));
2370 DBG("Metadata event caught in thread");
2372 if (errno
== EINTR
) {
2373 ERR("Poll EINTR caught");
2376 if (LTTNG_POLL_GETNB(&events
) == 0) {
2377 err
= 0; /* All is OK */
2384 /* From here, the event is a metadata wait fd */
2385 for (i
= 0; i
< nb_fd
; i
++) {
2386 health_code_update();
2388 revents
= LTTNG_POLL_GETEV(&events
, i
);
2389 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
2391 if (pollfd
== lttng_pipe_get_readfd(ctx
->consumer_metadata_pipe
)) {
2392 if (revents
& LPOLLIN
) {
2395 pipe_len
= lttng_pipe_read(ctx
->consumer_metadata_pipe
,
2396 &stream
, sizeof(stream
));
2397 if (pipe_len
< sizeof(stream
)) {
2399 PERROR("read metadata stream");
2402 * Remove the pipe from the poll set and continue the loop
2403 * since their might be data to consume.
2405 lttng_poll_del(&events
,
2406 lttng_pipe_get_readfd(ctx
->consumer_metadata_pipe
));
2407 lttng_pipe_read_close(ctx
->consumer_metadata_pipe
);
2411 /* A NULL stream means that the state has changed. */
2412 if (stream
== NULL
) {
2413 /* Check for deleted streams. */
2414 validate_endpoint_status_metadata_stream(&events
);
2418 DBG("Adding metadata stream %d to poll set",
2421 /* Add metadata stream to the global poll events list */
2422 lttng_poll_add(&events
, stream
->wait_fd
,
2423 LPOLLIN
| LPOLLPRI
| LPOLLHUP
);
2424 } else if (revents
& (LPOLLERR
| LPOLLHUP
)) {
2425 DBG("Metadata thread pipe hung up");
2427 * Remove the pipe from the poll set and continue the loop
2428 * since their might be data to consume.
2430 lttng_poll_del(&events
,
2431 lttng_pipe_get_readfd(ctx
->consumer_metadata_pipe
));
2432 lttng_pipe_read_close(ctx
->consumer_metadata_pipe
);
2435 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
2439 /* Handle other stream */
2445 uint64_t tmp_id
= (uint64_t) pollfd
;
2447 lttng_ht_lookup(metadata_ht
, &tmp_id
, &iter
);
2449 node
= lttng_ht_iter_get_node_u64(&iter
);
2452 stream
= caa_container_of(node
, struct lttng_consumer_stream
,
2455 if (revents
& (LPOLLIN
| LPOLLPRI
)) {
2456 /* Get the data out of the metadata file descriptor */
2457 DBG("Metadata available on fd %d", pollfd
);
2458 assert(stream
->wait_fd
== pollfd
);
2461 health_code_update();
2463 len
= ctx
->on_buffer_ready(stream
, ctx
, false);
2465 * We don't check the return value here since if we get
2466 * a negative len, it means an error occurred thus we
2467 * simply remove it from the poll set and free the
2472 /* It's ok to have an unavailable sub-buffer */
2473 if (len
< 0 && len
!= -EAGAIN
&& len
!= -ENODATA
) {
2474 /* Clean up stream from consumer and free it. */
2475 lttng_poll_del(&events
, stream
->wait_fd
);
2476 consumer_del_metadata_stream(stream
, metadata_ht
);
2478 } else if (revents
& (LPOLLERR
| LPOLLHUP
)) {
2479 DBG("Metadata fd %d is hup|err.", pollfd
);
2480 if (!stream
->hangup_flush_done
2481 && (consumer_data
.type
== LTTNG_CONSUMER32_UST
2482 || consumer_data
.type
== LTTNG_CONSUMER64_UST
)) {
2483 DBG("Attempting to flush and consume the UST buffers");
2484 lttng_ustconsumer_on_stream_hangup(stream
);
2486 /* We just flushed the stream now read it. */
2488 health_code_update();
2490 len
= ctx
->on_buffer_ready(stream
, ctx
, false);
2492 * We don't check the return value here since if we get
2493 * a negative len, it means an error occurred thus we
2494 * simply remove it from the poll set and free the
2500 lttng_poll_del(&events
, stream
->wait_fd
);
2502 * This call update the channel states, closes file descriptors
2503 * and securely free the stream.
2505 consumer_del_metadata_stream(stream
, metadata_ht
);
2507 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
2511 /* Release RCU lock for the stream looked up */
2519 DBG("Metadata poll thread exiting");
2521 lttng_poll_clean(&events
);
2526 ERR("Health error occurred in %s", __func__
);
2528 health_unregister(health_consumerd
);
2529 rcu_unregister_thread();
2534 * This thread polls the fds in the set to consume the data and write
2535 * it to tracefile if necessary.
2537 void *consumer_thread_data_poll(void *data
)
2539 int num_rdy
, num_hup
, high_prio
, ret
, i
, err
= -1;
2540 struct pollfd
*pollfd
= NULL
;
2541 /* local view of the streams */
2542 struct lttng_consumer_stream
**local_stream
= NULL
, *new_stream
= NULL
;
2543 /* local view of consumer_data.fds_count */
2545 /* 2 for the consumer_data_pipe and wake up pipe */
2546 const int nb_pipes_fd
= 2;
2547 /* Number of FDs with CONSUMER_ENDPOINT_INACTIVE but still open. */
2548 int nb_inactive_fd
= 0;
2549 struct lttng_consumer_local_data
*ctx
= data
;
2552 rcu_register_thread();
2554 health_register(health_consumerd
, HEALTH_CONSUMERD_TYPE_DATA
);
2556 if (testpoint(consumerd_thread_data
)) {
2557 goto error_testpoint
;
2560 health_code_update();
2562 local_stream
= zmalloc(sizeof(struct lttng_consumer_stream
*));
2563 if (local_stream
== NULL
) {
2564 PERROR("local_stream malloc");
2569 health_code_update();
2575 * the fds set has been updated, we need to update our
2576 * local array as well
2578 pthread_mutex_lock(&consumer_data
.lock
);
2579 if (consumer_data
.need_update
) {
2584 local_stream
= NULL
;
2586 /* Allocate for all fds */
2587 pollfd
= zmalloc((consumer_data
.stream_count
+ nb_pipes_fd
) * sizeof(struct pollfd
));
2588 if (pollfd
== NULL
) {
2589 PERROR("pollfd malloc");
2590 pthread_mutex_unlock(&consumer_data
.lock
);
2594 local_stream
= zmalloc((consumer_data
.stream_count
+ nb_pipes_fd
) *
2595 sizeof(struct lttng_consumer_stream
*));
2596 if (local_stream
== NULL
) {
2597 PERROR("local_stream malloc");
2598 pthread_mutex_unlock(&consumer_data
.lock
);
2601 ret
= update_poll_array(ctx
, &pollfd
, local_stream
,
2602 data_ht
, &nb_inactive_fd
);
2604 ERR("Error in allocating pollfd or local_outfds");
2605 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_POLL_ERROR
);
2606 pthread_mutex_unlock(&consumer_data
.lock
);
2610 consumer_data
.need_update
= 0;
2612 pthread_mutex_unlock(&consumer_data
.lock
);
2614 /* No FDs and consumer_quit, consumer_cleanup the thread */
2615 if (nb_fd
== 0 && nb_inactive_fd
== 0 &&
2616 CMM_LOAD_SHARED(consumer_quit
) == 1) {
2617 err
= 0; /* All is OK */
2620 /* poll on the array of fds */
2622 DBG("polling on %d fd", nb_fd
+ nb_pipes_fd
);
2623 if (testpoint(consumerd_thread_data_poll
)) {
2626 health_poll_entry();
2627 num_rdy
= poll(pollfd
, nb_fd
+ nb_pipes_fd
, -1);
2629 DBG("poll num_rdy : %d", num_rdy
);
2630 if (num_rdy
== -1) {
2632 * Restart interrupted system call.
2634 if (errno
== EINTR
) {
2637 PERROR("Poll error");
2638 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_POLL_ERROR
);
2640 } else if (num_rdy
== 0) {
2641 DBG("Polling thread timed out");
2645 if (caa_unlikely(data_consumption_paused
)) {
2646 DBG("Data consumption paused, sleeping...");
2652 * If the consumer_data_pipe triggered poll go directly to the
2653 * beginning of the loop to update the array. We want to prioritize
2654 * array update over low-priority reads.
2656 if (pollfd
[nb_fd
].revents
& (POLLIN
| POLLPRI
)) {
2657 ssize_t pipe_readlen
;
2659 DBG("consumer_data_pipe wake up");
2660 pipe_readlen
= lttng_pipe_read(ctx
->consumer_data_pipe
,
2661 &new_stream
, sizeof(new_stream
));
2662 if (pipe_readlen
< sizeof(new_stream
)) {
2663 PERROR("Consumer data pipe");
2664 /* Continue so we can at least handle the current stream(s). */
2669 * If the stream is NULL, just ignore it. It's also possible that
2670 * the sessiond poll thread changed the consumer_quit state and is
2671 * waking us up to test it.
2673 if (new_stream
== NULL
) {
2674 validate_endpoint_status_data_stream();
2678 /* Continue to update the local streams and handle prio ones */
2682 /* Handle wakeup pipe. */
2683 if (pollfd
[nb_fd
+ 1].revents
& (POLLIN
| POLLPRI
)) {
2685 ssize_t pipe_readlen
;
2687 pipe_readlen
= lttng_pipe_read(ctx
->consumer_wakeup_pipe
, &dummy
,
2689 if (pipe_readlen
< 0) {
2690 PERROR("Consumer data wakeup pipe");
2692 /* We've been awakened to handle stream(s). */
2693 ctx
->has_wakeup
= 0;
2696 /* Take care of high priority channels first. */
2697 for (i
= 0; i
< nb_fd
; i
++) {
2698 health_code_update();
2700 if (local_stream
[i
] == NULL
) {
2703 if (pollfd
[i
].revents
& POLLPRI
) {
2704 DBG("Urgent read on fd %d", pollfd
[i
].fd
);
2706 len
= ctx
->on_buffer_ready(local_stream
[i
], ctx
, false);
2707 /* it's ok to have an unavailable sub-buffer */
2708 if (len
< 0 && len
!= -EAGAIN
&& len
!= -ENODATA
) {
2709 /* Clean the stream and free it. */
2710 consumer_del_stream(local_stream
[i
], data_ht
);
2711 local_stream
[i
] = NULL
;
2712 } else if (len
> 0) {
2713 local_stream
[i
]->data_read
= 1;
2719 * If we read high prio channel in this loop, try again
2720 * for more high prio data.
2726 /* Take care of low priority channels. */
2727 for (i
= 0; i
< nb_fd
; i
++) {
2728 health_code_update();
2730 if (local_stream
[i
] == NULL
) {
2733 if ((pollfd
[i
].revents
& POLLIN
) ||
2734 local_stream
[i
]->hangup_flush_done
||
2735 local_stream
[i
]->has_data
) {
2736 DBG("Normal read on fd %d", pollfd
[i
].fd
);
2737 len
= ctx
->on_buffer_ready(local_stream
[i
], ctx
, false);
2738 /* it's ok to have an unavailable sub-buffer */
2739 if (len
< 0 && len
!= -EAGAIN
&& len
!= -ENODATA
) {
2740 /* Clean the stream and free it. */
2741 consumer_del_stream(local_stream
[i
], data_ht
);
2742 local_stream
[i
] = NULL
;
2743 } else if (len
> 0) {
2744 local_stream
[i
]->data_read
= 1;
2749 /* Handle hangup and errors */
2750 for (i
= 0; i
< nb_fd
; i
++) {
2751 health_code_update();
2753 if (local_stream
[i
] == NULL
) {
2756 if (!local_stream
[i
]->hangup_flush_done
2757 && (pollfd
[i
].revents
& (POLLHUP
| POLLERR
| POLLNVAL
))
2758 && (consumer_data
.type
== LTTNG_CONSUMER32_UST
2759 || consumer_data
.type
== LTTNG_CONSUMER64_UST
)) {
2760 DBG("fd %d is hup|err|nval. Attempting flush and read.",
2762 lttng_ustconsumer_on_stream_hangup(local_stream
[i
]);
2763 /* Attempt read again, for the data we just flushed. */
2764 local_stream
[i
]->data_read
= 1;
2767 * If the poll flag is HUP/ERR/NVAL and we have
2768 * read no data in this pass, we can remove the
2769 * stream from its hash table.
2771 if ((pollfd
[i
].revents
& POLLHUP
)) {
2772 DBG("Polling fd %d tells it has hung up.", pollfd
[i
].fd
);
2773 if (!local_stream
[i
]->data_read
) {
2774 consumer_del_stream(local_stream
[i
], data_ht
);
2775 local_stream
[i
] = NULL
;
2778 } else if (pollfd
[i
].revents
& POLLERR
) {
2779 ERR("Error returned in polling fd %d.", pollfd
[i
].fd
);
2780 if (!local_stream
[i
]->data_read
) {
2781 consumer_del_stream(local_stream
[i
], data_ht
);
2782 local_stream
[i
] = NULL
;
2785 } else if (pollfd
[i
].revents
& POLLNVAL
) {
2786 ERR("Polling fd %d tells fd is not open.", pollfd
[i
].fd
);
2787 if (!local_stream
[i
]->data_read
) {
2788 consumer_del_stream(local_stream
[i
], data_ht
);
2789 local_stream
[i
] = NULL
;
2793 if (local_stream
[i
] != NULL
) {
2794 local_stream
[i
]->data_read
= 0;
2801 DBG("polling thread exiting");
2806 * Close the write side of the pipe so epoll_wait() in
2807 * consumer_thread_metadata_poll can catch it. The thread is monitoring the
2808 * read side of the pipe. If we close them both, epoll_wait strangely does
2809 * not return and could create a endless wait period if the pipe is the
2810 * only tracked fd in the poll set. The thread will take care of closing
2813 (void) lttng_pipe_write_close(ctx
->consumer_metadata_pipe
);
2818 ERR("Health error occurred in %s", __func__
);
2820 health_unregister(health_consumerd
);
2822 rcu_unregister_thread();
2827 * Close wake-up end of each stream belonging to the channel. This will
2828 * allow the poll() on the stream read-side to detect when the
2829 * write-side (application) finally closes them.
2832 void consumer_close_channel_streams(struct lttng_consumer_channel
*channel
)
2834 struct lttng_ht
*ht
;
2835 struct lttng_consumer_stream
*stream
;
2836 struct lttng_ht_iter iter
;
2838 ht
= consumer_data
.stream_per_chan_id_ht
;
2841 cds_lfht_for_each_entry_duplicate(ht
->ht
,
2842 ht
->hash_fct(&channel
->key
, lttng_ht_seed
),
2843 ht
->match_fct
, &channel
->key
,
2844 &iter
.iter
, stream
, node_channel_id
.node
) {
2846 * Protect against teardown with mutex.
2848 pthread_mutex_lock(&stream
->lock
);
2849 if (cds_lfht_is_node_deleted(&stream
->node
.node
)) {
2852 switch (consumer_data
.type
) {
2853 case LTTNG_CONSUMER_KERNEL
:
2855 case LTTNG_CONSUMER32_UST
:
2856 case LTTNG_CONSUMER64_UST
:
2857 if (stream
->metadata_flag
) {
2858 /* Safe and protected by the stream lock. */
2859 lttng_ustconsumer_close_metadata(stream
->chan
);
2862 * Note: a mutex is taken internally within
2863 * liblttng-ust-ctl to protect timer wakeup_fd
2864 * use from concurrent close.
2866 lttng_ustconsumer_close_stream_wakeup(stream
);
2870 ERR("Unknown consumer_data type");
2874 pthread_mutex_unlock(&stream
->lock
);
2879 static void destroy_channel_ht(struct lttng_ht
*ht
)
2881 struct lttng_ht_iter iter
;
2882 struct lttng_consumer_channel
*channel
;
2890 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, channel
, wait_fd_node
.node
) {
2891 ret
= lttng_ht_del(ht
, &iter
);
2896 lttng_ht_destroy(ht
);
2900 * This thread polls the channel fds to detect when they are being
2901 * closed. It closes all related streams if the channel is detected as
2902 * closed. It is currently only used as a shim layer for UST because the
2903 * consumerd needs to keep the per-stream wakeup end of pipes open for
2906 void *consumer_thread_channel_poll(void *data
)
2908 int ret
, i
, pollfd
, err
= -1;
2909 uint32_t revents
, nb_fd
;
2910 struct lttng_consumer_channel
*chan
= NULL
;
2911 struct lttng_ht_iter iter
;
2912 struct lttng_ht_node_u64
*node
;
2913 struct lttng_poll_event events
;
2914 struct lttng_consumer_local_data
*ctx
= data
;
2915 struct lttng_ht
*channel_ht
;
2917 rcu_register_thread();
2919 health_register(health_consumerd
, HEALTH_CONSUMERD_TYPE_CHANNEL
);
2921 if (testpoint(consumerd_thread_channel
)) {
2922 goto error_testpoint
;
2925 health_code_update();
2927 channel_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
2929 /* ENOMEM at this point. Better to bail out. */
2933 DBG("Thread channel poll started");
2935 /* Size is set to 1 for the consumer_channel pipe */
2936 ret
= lttng_poll_create(&events
, 2, LTTNG_CLOEXEC
);
2938 ERR("Poll set creation failed");
2942 ret
= lttng_poll_add(&events
, ctx
->consumer_channel_pipe
[0], LPOLLIN
);
2948 DBG("Channel main loop started");
2952 health_code_update();
2953 DBG("Channel poll wait");
2954 health_poll_entry();
2955 ret
= lttng_poll_wait(&events
, -1);
2956 DBG("Channel poll return from wait with %d fd(s)",
2957 LTTNG_POLL_GETNB(&events
));
2959 DBG("Channel event caught in thread");
2961 if (errno
== EINTR
) {
2962 ERR("Poll EINTR caught");
2965 if (LTTNG_POLL_GETNB(&events
) == 0) {
2966 err
= 0; /* All is OK */
2973 /* From here, the event is a channel wait fd */
2974 for (i
= 0; i
< nb_fd
; i
++) {
2975 health_code_update();
2977 revents
= LTTNG_POLL_GETEV(&events
, i
);
2978 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
2980 if (pollfd
== ctx
->consumer_channel_pipe
[0]) {
2981 if (revents
& LPOLLIN
) {
2982 enum consumer_channel_action action
;
2985 ret
= read_channel_pipe(ctx
, &chan
, &key
, &action
);
2988 ERR("Error reading channel pipe");
2990 lttng_poll_del(&events
, ctx
->consumer_channel_pipe
[0]);
2995 case CONSUMER_CHANNEL_ADD
:
2996 DBG("Adding channel %d to poll set",
2999 lttng_ht_node_init_u64(&chan
->wait_fd_node
,
3002 lttng_ht_add_unique_u64(channel_ht
,
3003 &chan
->wait_fd_node
);
3005 /* Add channel to the global poll events list */
3006 lttng_poll_add(&events
, chan
->wait_fd
,
3007 LPOLLERR
| LPOLLHUP
);
3009 case CONSUMER_CHANNEL_DEL
:
3012 * This command should never be called if the channel
3013 * has streams monitored by either the data or metadata
3014 * thread. The consumer only notify this thread with a
3015 * channel del. command if it receives a destroy
3016 * channel command from the session daemon that send it
3017 * if a command prior to the GET_CHANNEL failed.
3021 chan
= consumer_find_channel(key
);
3024 ERR("UST consumer get channel key %" PRIu64
" not found for del channel", key
);
3027 lttng_poll_del(&events
, chan
->wait_fd
);
3028 iter
.iter
.node
= &chan
->wait_fd_node
.node
;
3029 ret
= lttng_ht_del(channel_ht
, &iter
);
3032 switch (consumer_data
.type
) {
3033 case LTTNG_CONSUMER_KERNEL
:
3035 case LTTNG_CONSUMER32_UST
:
3036 case LTTNG_CONSUMER64_UST
:
3037 health_code_update();
3038 /* Destroy streams that might have been left in the stream list. */
3039 clean_channel_stream_list(chan
);
3042 ERR("Unknown consumer_data type");
3047 * Release our own refcount. Force channel deletion even if
3048 * streams were not initialized.
3050 if (!uatomic_sub_return(&chan
->refcount
, 1)) {
3051 consumer_del_channel(chan
);
3056 case CONSUMER_CHANNEL_QUIT
:
3058 * Remove the pipe from the poll set and continue the loop
3059 * since their might be data to consume.
3061 lttng_poll_del(&events
, ctx
->consumer_channel_pipe
[0]);
3064 ERR("Unknown action");
3067 } else if (revents
& (LPOLLERR
| LPOLLHUP
)) {
3068 DBG("Channel thread pipe hung up");
3070 * Remove the pipe from the poll set and continue the loop
3071 * since their might be data to consume.
3073 lttng_poll_del(&events
, ctx
->consumer_channel_pipe
[0]);
3076 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
3080 /* Handle other stream */
3086 uint64_t tmp_id
= (uint64_t) pollfd
;
3088 lttng_ht_lookup(channel_ht
, &tmp_id
, &iter
);
3090 node
= lttng_ht_iter_get_node_u64(&iter
);
3093 chan
= caa_container_of(node
, struct lttng_consumer_channel
,
3096 /* Check for error event */
3097 if (revents
& (LPOLLERR
| LPOLLHUP
)) {
3098 DBG("Channel fd %d is hup|err.", pollfd
);
3100 lttng_poll_del(&events
, chan
->wait_fd
);
3101 ret
= lttng_ht_del(channel_ht
, &iter
);
3105 * This will close the wait fd for each stream associated to
3106 * this channel AND monitored by the data/metadata thread thus
3107 * will be clean by the right thread.
3109 consumer_close_channel_streams(chan
);
3111 /* Release our own refcount */
3112 if (!uatomic_sub_return(&chan
->refcount
, 1)
3113 && !uatomic_read(&chan
->nb_init_stream_left
)) {
3114 consumer_del_channel(chan
);
3117 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
3122 /* Release RCU lock for the channel looked up */
3130 lttng_poll_clean(&events
);
3132 destroy_channel_ht(channel_ht
);
3135 DBG("Channel poll thread exiting");
3138 ERR("Health error occurred in %s", __func__
);
3140 health_unregister(health_consumerd
);
3141 rcu_unregister_thread();
3145 static int set_metadata_socket(struct lttng_consumer_local_data
*ctx
,
3146 struct pollfd
*sockpoll
, int client_socket
)
3153 ret
= lttng_consumer_poll_socket(sockpoll
);
3157 DBG("Metadata connection on client_socket");
3159 /* Blocking call, waiting for transmission */
3160 ctx
->consumer_metadata_socket
= lttcomm_accept_unix_sock(client_socket
);
3161 if (ctx
->consumer_metadata_socket
< 0) {
3162 WARN("On accept metadata");
3173 * This thread listens on the consumerd socket and receives the file
3174 * descriptors from the session daemon.
3176 void *consumer_thread_sessiond_poll(void *data
)
3178 int sock
= -1, client_socket
, ret
, err
= -1;
3180 * structure to poll for incoming data on communication socket avoids
3181 * making blocking sockets.
3183 struct pollfd consumer_sockpoll
[2];
3184 struct lttng_consumer_local_data
*ctx
= data
;
3186 rcu_register_thread();
3188 health_register(health_consumerd
, HEALTH_CONSUMERD_TYPE_SESSIOND
);
3190 if (testpoint(consumerd_thread_sessiond
)) {
3191 goto error_testpoint
;
3194 health_code_update();
3196 DBG("Creating command socket %s", ctx
->consumer_command_sock_path
);
3197 unlink(ctx
->consumer_command_sock_path
);
3198 client_socket
= lttcomm_create_unix_sock(ctx
->consumer_command_sock_path
);
3199 if (client_socket
< 0) {
3200 ERR("Cannot create command socket");
3204 ret
= lttcomm_listen_unix_sock(client_socket
);
3209 DBG("Sending ready command to lttng-sessiond");
3210 ret
= lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_COMMAND_SOCK_READY
);
3211 /* return < 0 on error, but == 0 is not fatal */
3213 ERR("Error sending ready command to lttng-sessiond");
3217 /* prepare the FDs to poll : to client socket and the should_quit pipe */
3218 consumer_sockpoll
[0].fd
= ctx
->consumer_should_quit
[0];
3219 consumer_sockpoll
[0].events
= POLLIN
| POLLPRI
;
3220 consumer_sockpoll
[1].fd
= client_socket
;
3221 consumer_sockpoll
[1].events
= POLLIN
| POLLPRI
;
3223 ret
= lttng_consumer_poll_socket(consumer_sockpoll
);
3231 DBG("Connection on client_socket");
3233 /* Blocking call, waiting for transmission */
3234 sock
= lttcomm_accept_unix_sock(client_socket
);
3241 * Setup metadata socket which is the second socket connection on the
3242 * command unix socket.
3244 ret
= set_metadata_socket(ctx
, consumer_sockpoll
, client_socket
);
3253 /* This socket is not useful anymore. */
3254 ret
= close(client_socket
);
3256 PERROR("close client_socket");
3260 /* update the polling structure to poll on the established socket */
3261 consumer_sockpoll
[1].fd
= sock
;
3262 consumer_sockpoll
[1].events
= POLLIN
| POLLPRI
;
3265 health_code_update();
3267 health_poll_entry();
3268 ret
= lttng_consumer_poll_socket(consumer_sockpoll
);
3277 DBG("Incoming command on sock");
3278 ret
= lttng_consumer_recv_cmd(ctx
, sock
, consumer_sockpoll
);
3281 * This could simply be a session daemon quitting. Don't output
3284 DBG("Communication interrupted on command socket");
3288 if (CMM_LOAD_SHARED(consumer_quit
)) {
3289 DBG("consumer_thread_receive_fds received quit from signal");
3290 err
= 0; /* All is OK */
3293 DBG("Received command on sock");
3299 DBG("Consumer thread sessiond poll exiting");
3302 * Close metadata streams since the producer is the session daemon which
3305 * NOTE: for now, this only applies to the UST tracer.
3307 lttng_consumer_close_all_metadata();
3310 * when all fds have hung up, the polling thread
3313 CMM_STORE_SHARED(consumer_quit
, 1);
3316 * Notify the data poll thread to poll back again and test the
3317 * consumer_quit state that we just set so to quit gracefully.
3319 notify_thread_lttng_pipe(ctx
->consumer_data_pipe
);
3321 notify_channel_pipe(ctx
, NULL
, -1, CONSUMER_CHANNEL_QUIT
);
3323 notify_health_quit_pipe(health_quit_pipe
);
3325 /* Cleaning up possibly open sockets. */
3329 PERROR("close sock sessiond poll");
3332 if (client_socket
>= 0) {
3333 ret
= close(client_socket
);
3335 PERROR("close client_socket sessiond poll");
3342 ERR("Health error occurred in %s", __func__
);
3344 health_unregister(health_consumerd
);
3346 rcu_unregister_thread();
3350 static int post_consume(struct lttng_consumer_stream
*stream
,
3351 const struct stream_subbuffer
*subbuffer
,
3352 struct lttng_consumer_local_data
*ctx
)
3356 const size_t count
= lttng_dynamic_array_get_count(
3357 &stream
->read_subbuffer_ops
.post_consume_cbs
);
3359 for (i
= 0; i
< count
; i
++) {
3360 const post_consume_cb op
= *(post_consume_cb
*) lttng_dynamic_array_get_element(
3361 &stream
->read_subbuffer_ops
.post_consume_cbs
,
3364 ret
= op(stream
, subbuffer
, ctx
);
3373 ssize_t
lttng_consumer_read_subbuffer(struct lttng_consumer_stream
*stream
,
3374 struct lttng_consumer_local_data
*ctx
,
3375 bool locked_by_caller
)
3377 ssize_t ret
, written_bytes
= 0;
3379 struct stream_subbuffer subbuffer
= {};
3381 if (!locked_by_caller
) {
3382 stream
->read_subbuffer_ops
.lock(stream
);
3385 if (stream
->read_subbuffer_ops
.on_wake_up
) {
3386 ret
= stream
->read_subbuffer_ops
.on_wake_up(stream
);
3393 * If the stream was flagged to be ready for rotation before we extract
3394 * the next packet, rotate it now.
3396 if (stream
->rotate_ready
) {
3397 DBG("Rotate stream before consuming data");
3398 ret
= lttng_consumer_rotate_stream(ctx
, stream
);
3400 ERR("Stream rotation error before consuming data");
3405 ret
= stream
->read_subbuffer_ops
.get_next_subbuffer(stream
, &subbuffer
);
3407 if (ret
== -ENODATA
) {
3415 ret
= stream
->read_subbuffer_ops
.pre_consume_subbuffer(
3416 stream
, &subbuffer
);
3418 goto error_put_subbuf
;
3421 written_bytes
= stream
->read_subbuffer_ops
.consume_subbuffer(
3422 ctx
, stream
, &subbuffer
);
3423 if (written_bytes
<= 0) {
3424 ERR("Error consuming subbuffer: (%zd)", written_bytes
);
3425 ret
= (int) written_bytes
;
3426 goto error_put_subbuf
;
3429 ret
= stream
->read_subbuffer_ops
.put_next_subbuffer(stream
, &subbuffer
);
3434 ret
= post_consume(stream
, &subbuffer
, ctx
);
3440 * After extracting the packet, we check if the stream is now ready to
3441 * be rotated and perform the action immediately.
3443 * Don't overwrite `ret` as callers expect the number of bytes
3444 * consumed to be returned on success.
3446 rotation_ret
= lttng_consumer_stream_is_rotate_ready(stream
);
3447 if (rotation_ret
== 1) {
3448 rotation_ret
= lttng_consumer_rotate_stream(ctx
, stream
);
3449 if (rotation_ret
< 0) {
3451 ERR("Stream rotation error after consuming data");
3455 } else if (rotation_ret
< 0) {
3457 ERR("Failed to check if stream was ready to rotate after consuming data");
3462 if (stream
->read_subbuffer_ops
.on_sleep
) {
3463 stream
->read_subbuffer_ops
.on_sleep(stream
, ctx
);
3466 ret
= written_bytes
;
3468 if (!locked_by_caller
) {
3469 stream
->read_subbuffer_ops
.unlock(stream
);
3474 (void) stream
->read_subbuffer_ops
.put_next_subbuffer(stream
, &subbuffer
);
3478 int lttng_consumer_on_recv_stream(struct lttng_consumer_stream
*stream
)
3480 switch (consumer_data
.type
) {
3481 case LTTNG_CONSUMER_KERNEL
:
3482 return lttng_kconsumer_on_recv_stream(stream
);
3483 case LTTNG_CONSUMER32_UST
:
3484 case LTTNG_CONSUMER64_UST
:
3485 return lttng_ustconsumer_on_recv_stream(stream
);
3487 ERR("Unknown consumer_data type");
3494 * Allocate and set consumer data hash tables.
3496 int lttng_consumer_init(void)
3498 consumer_data
.channel_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3499 if (!consumer_data
.channel_ht
) {
3503 consumer_data
.channels_by_session_id_ht
=
3504 lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3505 if (!consumer_data
.channels_by_session_id_ht
) {
3509 consumer_data
.relayd_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3510 if (!consumer_data
.relayd_ht
) {
3514 consumer_data
.stream_list_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3515 if (!consumer_data
.stream_list_ht
) {
3519 consumer_data
.stream_per_chan_id_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3520 if (!consumer_data
.stream_per_chan_id_ht
) {
3524 data_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3529 metadata_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3534 consumer_data
.chunk_registry
= lttng_trace_chunk_registry_create();
3535 if (!consumer_data
.chunk_registry
) {
3546 * Process the ADD_RELAYD command receive by a consumer.
3548 * This will create a relayd socket pair and add it to the relayd hash table.
3549 * The caller MUST acquire a RCU read side lock before calling it.
3551 void consumer_add_relayd_socket(uint64_t net_seq_idx
, int sock_type
,
3552 struct lttng_consumer_local_data
*ctx
, int sock
,
3553 struct pollfd
*consumer_sockpoll
,
3554 struct lttcomm_relayd_sock
*relayd_sock
, uint64_t sessiond_id
,
3555 uint64_t relayd_session_id
)
3557 int fd
= -1, ret
= -1, relayd_created
= 0;
3558 enum lttcomm_return_code ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
3559 struct consumer_relayd_sock_pair
*relayd
= NULL
;
3562 assert(relayd_sock
);
3564 DBG("Consumer adding relayd socket (idx: %" PRIu64
")", net_seq_idx
);
3566 /* Get relayd reference if exists. */
3567 relayd
= consumer_find_relayd(net_seq_idx
);
3568 if (relayd
== NULL
) {
3569 assert(sock_type
== LTTNG_STREAM_CONTROL
);
3570 /* Not found. Allocate one. */
3571 relayd
= consumer_allocate_relayd_sock_pair(net_seq_idx
);
3572 if (relayd
== NULL
) {
3573 ret_code
= LTTCOMM_CONSUMERD_ENOMEM
;
3576 relayd
->sessiond_session_id
= sessiond_id
;
3581 * This code path MUST continue to the consumer send status message to
3582 * we can notify the session daemon and continue our work without
3583 * killing everything.
3587 * relayd key should never be found for control socket.
3589 assert(sock_type
!= LTTNG_STREAM_CONTROL
);
3592 /* First send a status message before receiving the fds. */
3593 ret
= consumer_send_status_msg(sock
, LTTCOMM_CONSUMERD_SUCCESS
);
3595 /* Somehow, the session daemon is not responding anymore. */
3596 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_FATAL
);
3597 goto error_nosignal
;
3600 /* Poll on consumer socket. */
3601 ret
= lttng_consumer_poll_socket(consumer_sockpoll
);
3603 /* Needing to exit in the middle of a command: error. */
3604 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_POLL_ERROR
);
3605 goto error_nosignal
;
3608 /* Get relayd socket from session daemon */
3609 ret
= lttcomm_recv_fds_unix_sock(sock
, &fd
, 1);
3610 if (ret
!= sizeof(fd
)) {
3611 fd
= -1; /* Just in case it gets set with an invalid value. */
3614 * Failing to receive FDs might indicate a major problem such as
3615 * reaching a fd limit during the receive where the kernel returns a
3616 * MSG_CTRUNC and fails to cleanup the fd in the queue. Any case, we
3617 * don't take any chances and stop everything.
3619 * XXX: Feature request #558 will fix that and avoid this possible
3620 * issue when reaching the fd limit.
3622 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_ERROR_RECV_FD
);
3623 ret_code
= LTTCOMM_CONSUMERD_ERROR_RECV_FD
;
3627 /* Copy socket information and received FD */
3628 switch (sock_type
) {
3629 case LTTNG_STREAM_CONTROL
:
3630 /* Copy received lttcomm socket */
3631 lttcomm_copy_sock(&relayd
->control_sock
.sock
, &relayd_sock
->sock
);
3632 ret
= lttcomm_create_sock(&relayd
->control_sock
.sock
);
3633 /* Handle create_sock error. */
3635 ret_code
= LTTCOMM_CONSUMERD_ENOMEM
;
3639 * Close the socket created internally by
3640 * lttcomm_create_sock, so we can replace it by the one
3641 * received from sessiond.
3643 if (close(relayd
->control_sock
.sock
.fd
)) {
3647 /* Assign new file descriptor */
3648 relayd
->control_sock
.sock
.fd
= fd
;
3649 /* Assign version values. */
3650 relayd
->control_sock
.major
= relayd_sock
->major
;
3651 relayd
->control_sock
.minor
= relayd_sock
->minor
;
3653 relayd
->relayd_session_id
= relayd_session_id
;
3656 case LTTNG_STREAM_DATA
:
3657 /* Copy received lttcomm socket */
3658 lttcomm_copy_sock(&relayd
->data_sock
.sock
, &relayd_sock
->sock
);
3659 ret
= lttcomm_create_sock(&relayd
->data_sock
.sock
);
3660 /* Handle create_sock error. */
3662 ret_code
= LTTCOMM_CONSUMERD_ENOMEM
;
3666 * Close the socket created internally by
3667 * lttcomm_create_sock, so we can replace it by the one
3668 * received from sessiond.
3670 if (close(relayd
->data_sock
.sock
.fd
)) {
3674 /* Assign new file descriptor */
3675 relayd
->data_sock
.sock
.fd
= fd
;
3676 /* Assign version values. */
3677 relayd
->data_sock
.major
= relayd_sock
->major
;
3678 relayd
->data_sock
.minor
= relayd_sock
->minor
;
3681 ERR("Unknown relayd socket type (%d)", sock_type
);
3682 ret_code
= LTTCOMM_CONSUMERD_FATAL
;
3686 DBG("Consumer %s socket created successfully with net idx %" PRIu64
" (fd: %d)",
3687 sock_type
== LTTNG_STREAM_CONTROL
? "control" : "data",
3688 relayd
->net_seq_idx
, fd
);
3690 * We gave the ownership of the fd to the relayd structure. Set the
3691 * fd to -1 so we don't call close() on it in the error path below.
3695 /* We successfully added the socket. Send status back. */
3696 ret
= consumer_send_status_msg(sock
, ret_code
);
3698 /* Somehow, the session daemon is not responding anymore. */
3699 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_FATAL
);
3700 goto error_nosignal
;
3704 * Add relayd socket pair to consumer data hashtable. If object already
3705 * exists or on error, the function gracefully returns.
3714 if (consumer_send_status_msg(sock
, ret_code
) < 0) {
3715 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_FATAL
);
3719 /* Close received socket if valid. */
3722 PERROR("close received socket");
3726 if (relayd_created
) {
3732 * Search for a relayd associated to the session id and return the reference.
3734 * A rcu read side lock MUST be acquire before calling this function and locked
3735 * until the relayd object is no longer necessary.
3737 static struct consumer_relayd_sock_pair
*find_relayd_by_session_id(uint64_t id
)
3739 struct lttng_ht_iter iter
;
3740 struct consumer_relayd_sock_pair
*relayd
= NULL
;
3742 /* Iterate over all relayd since they are indexed by net_seq_idx. */
3743 cds_lfht_for_each_entry(consumer_data
.relayd_ht
->ht
, &iter
.iter
, relayd
,
3746 * Check by sessiond id which is unique here where the relayd session
3747 * id might not be when having multiple relayd.
3749 if (relayd
->sessiond_session_id
== id
) {
3750 /* Found the relayd. There can be only one per id. */
3762 * Check if for a given session id there is still data needed to be extract
3765 * Return 1 if data is pending or else 0 meaning ready to be read.
3767 int consumer_data_pending(uint64_t id
)
3770 struct lttng_ht_iter iter
;
3771 struct lttng_ht
*ht
;
3772 struct lttng_consumer_stream
*stream
;
3773 struct consumer_relayd_sock_pair
*relayd
= NULL
;
3774 int (*data_pending
)(struct lttng_consumer_stream
*);
3776 DBG("Consumer data pending command on session id %" PRIu64
, id
);
3779 pthread_mutex_lock(&consumer_data
.lock
);
3781 switch (consumer_data
.type
) {
3782 case LTTNG_CONSUMER_KERNEL
:
3783 data_pending
= lttng_kconsumer_data_pending
;
3785 case LTTNG_CONSUMER32_UST
:
3786 case LTTNG_CONSUMER64_UST
:
3787 data_pending
= lttng_ustconsumer_data_pending
;
3790 ERR("Unknown consumer data type");
3794 /* Ease our life a bit */
3795 ht
= consumer_data
.stream_list_ht
;
3797 cds_lfht_for_each_entry_duplicate(ht
->ht
,
3798 ht
->hash_fct(&id
, lttng_ht_seed
),
3800 &iter
.iter
, stream
, node_session_id
.node
) {
3801 pthread_mutex_lock(&stream
->lock
);
3804 * A removed node from the hash table indicates that the stream has
3805 * been deleted thus having a guarantee that the buffers are closed
3806 * on the consumer side. However, data can still be transmitted
3807 * over the network so don't skip the relayd check.
3809 ret
= cds_lfht_is_node_deleted(&stream
->node
.node
);
3811 /* Check the stream if there is data in the buffers. */
3812 ret
= data_pending(stream
);
3814 pthread_mutex_unlock(&stream
->lock
);
3819 pthread_mutex_unlock(&stream
->lock
);
3822 relayd
= find_relayd_by_session_id(id
);
3824 unsigned int is_data_inflight
= 0;
3826 /* Send init command for data pending. */
3827 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
3828 ret
= relayd_begin_data_pending(&relayd
->control_sock
,
3829 relayd
->relayd_session_id
);
3831 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
3832 /* Communication error thus the relayd so no data pending. */
3833 goto data_not_pending
;
3836 cds_lfht_for_each_entry_duplicate(ht
->ht
,
3837 ht
->hash_fct(&id
, lttng_ht_seed
),
3839 &iter
.iter
, stream
, node_session_id
.node
) {
3840 if (stream
->metadata_flag
) {
3841 ret
= relayd_quiescent_control(&relayd
->control_sock
,
3842 stream
->relayd_stream_id
);
3844 ret
= relayd_data_pending(&relayd
->control_sock
,
3845 stream
->relayd_stream_id
,
3846 stream
->next_net_seq_num
- 1);
3850 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
3852 } else if (ret
< 0) {
3853 ERR("Relayd data pending failed. Cleaning up relayd %" PRIu64
".", relayd
->net_seq_idx
);
3854 lttng_consumer_cleanup_relayd(relayd
);
3855 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
3856 goto data_not_pending
;
3860 /* Send end command for data pending. */
3861 ret
= relayd_end_data_pending(&relayd
->control_sock
,
3862 relayd
->relayd_session_id
, &is_data_inflight
);
3863 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
3865 ERR("Relayd end data pending failed. Cleaning up relayd %" PRIu64
".", relayd
->net_seq_idx
);
3866 lttng_consumer_cleanup_relayd(relayd
);
3867 goto data_not_pending
;
3869 if (is_data_inflight
) {
3875 * Finding _no_ node in the hash table and no inflight data means that the
3876 * stream(s) have been removed thus data is guaranteed to be available for
3877 * analysis from the trace files.
3881 /* Data is available to be read by a viewer. */
3882 pthread_mutex_unlock(&consumer_data
.lock
);
3887 /* Data is still being extracted from buffers. */
3888 pthread_mutex_unlock(&consumer_data
.lock
);
3894 * Send a ret code status message to the sessiond daemon.
3896 * Return the sendmsg() return value.
3898 int consumer_send_status_msg(int sock
, int ret_code
)
3900 struct lttcomm_consumer_status_msg msg
;
3902 memset(&msg
, 0, sizeof(msg
));
3903 msg
.ret_code
= ret_code
;
3905 return lttcomm_send_unix_sock(sock
, &msg
, sizeof(msg
));
3909 * Send a channel status message to the sessiond daemon.
3911 * Return the sendmsg() return value.
3913 int consumer_send_status_channel(int sock
,
3914 struct lttng_consumer_channel
*channel
)
3916 struct lttcomm_consumer_status_channel msg
;
3920 memset(&msg
, 0, sizeof(msg
));
3922 msg
.ret_code
= LTTCOMM_CONSUMERD_CHANNEL_FAIL
;
3924 msg
.ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
3925 msg
.key
= channel
->key
;
3926 msg
.stream_count
= channel
->streams
.count
;
3929 return lttcomm_send_unix_sock(sock
, &msg
, sizeof(msg
));
3932 unsigned long consumer_get_consume_start_pos(unsigned long consumed_pos
,
3933 unsigned long produced_pos
, uint64_t nb_packets_per_stream
,
3934 uint64_t max_sb_size
)
3936 unsigned long start_pos
;
3938 if (!nb_packets_per_stream
) {
3939 return consumed_pos
; /* Grab everything */
3941 start_pos
= produced_pos
- offset_align_floor(produced_pos
, max_sb_size
);
3942 start_pos
-= max_sb_size
* nb_packets_per_stream
;
3943 if ((long) (start_pos
- consumed_pos
) < 0) {
3944 return consumed_pos
; /* Grab everything */
3949 /* Stream lock must be held by the caller. */
3950 static int sample_stream_positions(struct lttng_consumer_stream
*stream
,
3951 unsigned long *produced
, unsigned long *consumed
)
3955 ASSERT_LOCKED(stream
->lock
);
3957 ret
= lttng_consumer_sample_snapshot_positions(stream
);
3959 ERR("Failed to sample snapshot positions");
3963 ret
= lttng_consumer_get_produced_snapshot(stream
, produced
);
3965 ERR("Failed to sample produced position");
3969 ret
= lttng_consumer_get_consumed_snapshot(stream
, consumed
);
3971 ERR("Failed to sample consumed position");
3980 * Sample the rotate position for all the streams of a channel. If a stream
3981 * is already at the rotate position (produced == consumed), we flag it as
3982 * ready for rotation. The rotation of ready streams occurs after we have
3983 * replied to the session daemon that we have finished sampling the positions.
3984 * Must be called with RCU read-side lock held to ensure existence of channel.
3986 * Returns 0 on success, < 0 on error
3988 int lttng_consumer_rotate_channel(struct lttng_consumer_channel
*channel
,
3989 uint64_t key
, uint64_t relayd_id
, uint32_t metadata
,
3990 struct lttng_consumer_local_data
*ctx
)
3993 struct lttng_consumer_stream
*stream
;
3994 struct lttng_ht_iter iter
;
3995 struct lttng_ht
*ht
= consumer_data
.stream_per_chan_id_ht
;
3996 struct lttng_dynamic_array stream_rotation_positions
;
3997 uint64_t next_chunk_id
, stream_count
= 0;
3998 enum lttng_trace_chunk_status chunk_status
;
3999 const bool is_local_trace
= relayd_id
== -1ULL;
4000 struct consumer_relayd_sock_pair
*relayd
= NULL
;
4001 bool rotating_to_new_chunk
= true;
4002 /* Array of `struct lttng_consumer_stream *` */
4003 struct lttng_dynamic_pointer_array streams_packet_to_open
;
4006 DBG("Consumer sample rotate position for channel %" PRIu64
, key
);
4008 lttng_dynamic_array_init(&stream_rotation_positions
,
4009 sizeof(struct relayd_stream_rotation_position
), NULL
);
4010 lttng_dynamic_pointer_array_init(&streams_packet_to_open
, NULL
);
4014 pthread_mutex_lock(&channel
->lock
);
4015 assert(channel
->trace_chunk
);
4016 chunk_status
= lttng_trace_chunk_get_id(channel
->trace_chunk
,
4018 if (chunk_status
!= LTTNG_TRACE_CHUNK_STATUS_OK
) {
4020 goto end_unlock_channel
;
4023 cds_lfht_for_each_entry_duplicate(ht
->ht
,
4024 ht
->hash_fct(&channel
->key
, lttng_ht_seed
),
4025 ht
->match_fct
, &channel
->key
, &iter
.iter
,
4026 stream
, node_channel_id
.node
) {
4027 unsigned long produced_pos
= 0, consumed_pos
= 0;
4029 health_code_update();
4032 * Lock stream because we are about to change its state.
4034 pthread_mutex_lock(&stream
->lock
);
4036 if (stream
->trace_chunk
== stream
->chan
->trace_chunk
) {
4037 rotating_to_new_chunk
= false;
4041 * Do not flush a packet when rotating from a NULL trace
4042 * chunk. The stream has no means to output data, and the prior
4043 * rotation which rotated to NULL performed that side-effect
4044 * already. No new data can be produced when a stream has no
4045 * associated trace chunk (e.g. a stop followed by a rotate).
4047 if (stream
->trace_chunk
) {
4050 if (stream
->metadata_flag
) {
4052 * Don't produce an empty metadata packet,
4053 * simply close the current one.
4055 * Metadata is regenerated on every trace chunk
4056 * switch; there is no concern that no data was
4059 flush_active
= true;
4062 * Only flush an empty packet if the "packet
4063 * open" could not be performed on transition
4064 * to a new trace chunk and no packets were
4065 * consumed within the chunk's lifetime.
4067 if (stream
->opened_packet_in_current_trace_chunk
) {
4068 flush_active
= true;
4071 * Stream could have been full at the
4072 * time of rotation, but then have had
4073 * no activity at all.
4075 * It is important to flush a packet
4076 * to prevent 0-length files from being
4077 * produced as most viewers choke on
4080 * Unfortunately viewers will not be
4081 * able to know that tracing was active
4082 * for this stream during this trace
4085 ret
= sample_stream_positions(stream
, &produced_pos
, &consumed_pos
);
4087 goto end_unlock_stream
;
4091 * Don't flush an empty packet if data
4092 * was produced; it will be consumed
4093 * before the rotation completes.
4095 flush_active
= produced_pos
!= consumed_pos
;
4096 if (!flush_active
) {
4097 enum lttng_trace_chunk_status chunk_status
;
4098 const char *trace_chunk_name
;
4099 uint64_t trace_chunk_id
;
4101 chunk_status
= lttng_trace_chunk_get_name(
4102 stream
->trace_chunk
,
4105 if (chunk_status
== LTTNG_TRACE_CHUNK_STATUS_NONE
) {
4106 trace_chunk_name
= "none";
4110 * Consumer trace chunks are
4113 chunk_status
= lttng_trace_chunk_get_id(
4114 stream
->trace_chunk
,
4116 assert(chunk_status
==
4117 LTTNG_TRACE_CHUNK_STATUS_OK
);
4119 DBG("Unable to open packet for stream during trace chunk's lifetime. "
4120 "Flushing an empty packet to prevent an empty file from being created: "
4121 "stream id = %" PRIu64
", trace chunk name = `%s`, trace chunk id = %" PRIu64
,
4122 stream
->key
, trace_chunk_name
, trace_chunk_id
);
4128 * Close the current packet before sampling the
4129 * ring buffer positions.
4131 ret
= consumer_stream_flush_buffer(stream
, flush_active
);
4133 ERR("Failed to flush stream %" PRIu64
" during channel rotation",
4135 goto end_unlock_stream
;
4139 ret
= lttng_consumer_take_snapshot(stream
);
4140 if (ret
< 0 && ret
!= -ENODATA
&& ret
!= -EAGAIN
) {
4141 ERR("Failed to sample snapshot position during channel rotation");
4142 goto end_unlock_stream
;
4145 ret
= lttng_consumer_get_produced_snapshot(stream
,
4148 ERR("Failed to sample produced position during channel rotation");
4149 goto end_unlock_stream
;
4152 ret
= lttng_consumer_get_consumed_snapshot(stream
,
4155 ERR("Failed to sample consumed position during channel rotation");
4156 goto end_unlock_stream
;
4160 * Align produced position on the start-of-packet boundary of the first
4161 * packet going into the next trace chunk.
4163 produced_pos
= ALIGN_FLOOR(produced_pos
, stream
->max_sb_size
);
4164 if (consumed_pos
== produced_pos
) {
4165 DBG("Set rotate ready for stream %" PRIu64
" produced = %lu consumed = %lu",
4166 stream
->key
, produced_pos
, consumed_pos
);
4167 stream
->rotate_ready
= true;
4169 DBG("Different consumed and produced positions "
4170 "for stream %" PRIu64
" produced = %lu consumed = %lu",
4171 stream
->key
, produced_pos
, consumed_pos
);
4174 * The rotation position is based on the packet_seq_num of the
4175 * packet following the last packet that was consumed for this
4176 * stream, incremented by the offset between produced and
4177 * consumed positions. This rotation position is a lower bound
4178 * (inclusive) at which the next trace chunk starts. Since it
4179 * is a lower bound, it is OK if the packet_seq_num does not
4180 * correspond exactly to the same packet identified by the
4181 * consumed_pos, which can happen in overwrite mode.
4183 if (stream
->sequence_number_unavailable
) {
4185 * Rotation should never be performed on a session which
4186 * interacts with a pre-2.8 lttng-modules, which does
4187 * not implement packet sequence number.
4189 ERR("Failure to rotate stream %" PRIu64
": sequence number unavailable",
4192 goto end_unlock_stream
;
4194 stream
->rotate_position
= stream
->last_sequence_number
+ 1 +
4195 ((produced_pos
- consumed_pos
) / stream
->max_sb_size
);
4196 DBG("Set rotation position for stream %" PRIu64
" at position %" PRIu64
,
4197 stream
->key
, stream
->rotate_position
);
4199 if (!is_local_trace
) {
4201 * The relay daemon control protocol expects a rotation
4202 * position as "the sequence number of the first packet
4203 * _after_ the current trace chunk".
4205 const struct relayd_stream_rotation_position position
= {
4206 .stream_id
= stream
->relayd_stream_id
,
4207 .rotate_at_seq_num
= stream
->rotate_position
,
4210 ret
= lttng_dynamic_array_add_element(
4211 &stream_rotation_positions
,
4214 ERR("Failed to allocate stream rotation position");
4215 goto end_unlock_stream
;
4220 stream
->opened_packet_in_current_trace_chunk
= false;
4222 if (rotating_to_new_chunk
&& !stream
->metadata_flag
) {
4224 * Attempt to flush an empty packet as close to the
4225 * rotation point as possible. In the event where a
4226 * stream remains inactive after the rotation point,
4227 * this ensures that the new trace chunk has a
4228 * beginning timestamp set at the begining of the
4229 * trace chunk instead of only creating an empty
4230 * packet when the trace chunk is stopped.
4232 * This indicates to the viewers that the stream
4233 * was being recorded, but more importantly it
4234 * allows viewers to determine a useable trace
4237 * This presents a problem in the case where the
4238 * ring-buffer is completely full.
4240 * Consider the following scenario:
4241 * - The consumption of data is slow (slow network,
4243 * - The ring buffer is full,
4244 * - A rotation is initiated,
4245 * - The flush below does nothing (no space left to
4246 * open a new packet),
4247 * - The other streams rotate very soon, and new
4248 * data is produced in the new chunk,
4249 * - This stream completes its rotation long after the
4250 * rotation was initiated
4251 * - The session is stopped before any event can be
4252 * produced in this stream's buffers.
4254 * The resulting trace chunk will have a single packet
4255 * temporaly at the end of the trace chunk for this
4256 * stream making the stream intersection more narrow
4257 * than it should be.
4259 * To work-around this, an empty flush is performed
4260 * after the first consumption of a packet during a
4261 * rotation if open_packet fails. The idea is that
4262 * consuming a packet frees enough space to switch
4263 * packets in this scenario and allows the tracer to
4264 * "stamp" the beginning of the new trace chunk at the
4265 * earliest possible point.
4267 * The packet open is performed after the channel
4268 * rotation to ensure that no attempt to open a packet
4269 * is performed in a stream that has no active trace
4272 ret
= lttng_dynamic_pointer_array_add_pointer(
4273 &streams_packet_to_open
, stream
);
4275 PERROR("Failed to add a stream pointer to array of streams in which to open a packet");
4277 goto end_unlock_stream
;
4281 pthread_mutex_unlock(&stream
->lock
);
4285 if (!is_local_trace
) {
4286 relayd
= consumer_find_relayd(relayd_id
);
4288 ERR("Failed to find relayd %" PRIu64
, relayd_id
);
4290 goto end_unlock_channel
;
4293 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
4294 ret
= relayd_rotate_streams(&relayd
->control_sock
, stream_count
,
4295 rotating_to_new_chunk
? &next_chunk_id
: NULL
,
4296 (const struct relayd_stream_rotation_position
*)
4297 stream_rotation_positions
.buffer
4299 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
4301 ERR("Relayd rotate stream failed. Cleaning up relayd %" PRIu64
,
4302 relayd
->net_seq_idx
);
4303 lttng_consumer_cleanup_relayd(relayd
);
4304 goto end_unlock_channel
;
4308 for (stream_idx
= 0;
4309 stream_idx
< lttng_dynamic_pointer_array_get_count(
4310 &streams_packet_to_open
);
4312 enum consumer_stream_open_packet_status status
;
4314 stream
= lttng_dynamic_pointer_array_get_pointer(
4315 &streams_packet_to_open
, stream_idx
);
4317 pthread_mutex_lock(&stream
->lock
);
4318 status
= consumer_stream_open_packet(stream
);
4319 pthread_mutex_unlock(&stream
->lock
);
4321 case CONSUMER_STREAM_OPEN_PACKET_STATUS_OPENED
:
4322 DBG("Opened a packet after a rotation: stream id = %" PRIu64
4323 ", channel name = %s, session id = %" PRIu64
,
4324 stream
->key
, stream
->chan
->name
,
4325 stream
->chan
->session_id
);
4327 case CONSUMER_STREAM_OPEN_PACKET_STATUS_NO_SPACE
:
4329 * Can't open a packet as there is no space left
4330 * in the buffer. A new packet will be opened
4331 * once one has been consumed.
4333 DBG("No space left to open a packet after a rotation: stream id = %" PRIu64
4334 ", channel name = %s, session id = %" PRIu64
,
4335 stream
->key
, stream
->chan
->name
,
4336 stream
->chan
->session_id
);
4338 case CONSUMER_STREAM_OPEN_PACKET_STATUS_ERROR
:
4339 /* Logged by callee. */
4341 goto end_unlock_channel
;
4347 pthread_mutex_unlock(&channel
->lock
);
4352 pthread_mutex_unlock(&stream
->lock
);
4354 pthread_mutex_unlock(&channel
->lock
);
4357 lttng_dynamic_array_reset(&stream_rotation_positions
);
4358 lttng_dynamic_pointer_array_reset(&streams_packet_to_open
);
4363 int consumer_clear_buffer(struct lttng_consumer_stream
*stream
)
4366 unsigned long consumed_pos_before
, consumed_pos_after
;
4368 ret
= lttng_consumer_sample_snapshot_positions(stream
);
4370 ERR("Taking snapshot positions");
4374 ret
= lttng_consumer_get_consumed_snapshot(stream
, &consumed_pos_before
);
4376 ERR("Consumed snapshot position");
4380 switch (consumer_data
.type
) {
4381 case LTTNG_CONSUMER_KERNEL
:
4382 ret
= kernctl_buffer_clear(stream
->wait_fd
);
4384 ERR("Failed to clear kernel stream (ret = %d)", ret
);
4388 case LTTNG_CONSUMER32_UST
:
4389 case LTTNG_CONSUMER64_UST
:
4390 lttng_ustconsumer_clear_buffer(stream
);
4393 ERR("Unknown consumer_data type");
4397 ret
= lttng_consumer_sample_snapshot_positions(stream
);
4399 ERR("Taking snapshot positions");
4402 ret
= lttng_consumer_get_consumed_snapshot(stream
, &consumed_pos_after
);
4404 ERR("Consumed snapshot position");
4407 DBG("clear: before: %lu after: %lu", consumed_pos_before
, consumed_pos_after
);
4413 int consumer_clear_stream(struct lttng_consumer_stream
*stream
)
4417 ret
= consumer_stream_flush_buffer(stream
, 1);
4419 ERR("Failed to flush stream %" PRIu64
" during channel clear",
4421 ret
= LTTCOMM_CONSUMERD_FATAL
;
4425 ret
= consumer_clear_buffer(stream
);
4427 ERR("Failed to clear stream %" PRIu64
" during channel clear",
4429 ret
= LTTCOMM_CONSUMERD_FATAL
;
4433 ret
= LTTCOMM_CONSUMERD_SUCCESS
;
4439 int consumer_clear_unmonitored_channel(struct lttng_consumer_channel
*channel
)
4442 struct lttng_consumer_stream
*stream
;
4445 pthread_mutex_lock(&channel
->lock
);
4446 cds_list_for_each_entry(stream
, &channel
->streams
.head
, send_node
) {
4447 health_code_update();
4448 pthread_mutex_lock(&stream
->lock
);
4449 ret
= consumer_clear_stream(stream
);
4453 pthread_mutex_unlock(&stream
->lock
);
4455 pthread_mutex_unlock(&channel
->lock
);
4460 pthread_mutex_unlock(&stream
->lock
);
4461 pthread_mutex_unlock(&channel
->lock
);
4467 * Check if a stream is ready to be rotated after extracting it.
4469 * Return 1 if it is ready for rotation, 0 if it is not, a negative value on
4470 * error. Stream lock must be held.
4472 int lttng_consumer_stream_is_rotate_ready(struct lttng_consumer_stream
*stream
)
4474 DBG("Check is rotate ready for stream %" PRIu64
4475 " ready %u rotate_position %" PRIu64
4476 " last_sequence_number %" PRIu64
,
4477 stream
->key
, stream
->rotate_ready
,
4478 stream
->rotate_position
, stream
->last_sequence_number
);
4479 if (stream
->rotate_ready
) {
4484 * If packet seq num is unavailable, it means we are interacting
4485 * with a pre-2.8 lttng-modules which does not implement the
4486 * sequence number. Rotation should never be used by sessiond in this
4489 if (stream
->sequence_number_unavailable
) {
4490 ERR("Internal error: rotation used on stream %" PRIu64
4491 " with unavailable sequence number",
4496 if (stream
->rotate_position
== -1ULL ||
4497 stream
->last_sequence_number
== -1ULL) {
4502 * Rotate position not reached yet. The stream rotate position is
4503 * the position of the next packet belonging to the next trace chunk,
4504 * but consumerd considers rotation ready when reaching the last
4505 * packet of the current chunk, hence the "rotate_position - 1".
4508 DBG("Check is rotate ready for stream %" PRIu64
4509 " last_sequence_number %" PRIu64
4510 " rotate_position %" PRIu64
,
4511 stream
->key
, stream
->last_sequence_number
,
4512 stream
->rotate_position
);
4513 if (stream
->last_sequence_number
>= stream
->rotate_position
- 1) {
4521 * Reset the state for a stream after a rotation occurred.
4523 void lttng_consumer_reset_stream_rotate_state(struct lttng_consumer_stream
*stream
)
4525 DBG("lttng_consumer_reset_stream_rotate_state for stream %" PRIu64
,
4527 stream
->rotate_position
= -1ULL;
4528 stream
->rotate_ready
= false;
4532 * Perform the rotation a local stream file.
4535 int rotate_local_stream(struct lttng_consumer_local_data
*ctx
,
4536 struct lttng_consumer_stream
*stream
)
4540 DBG("Rotate local stream: stream key %" PRIu64
", channel key %" PRIu64
,
4543 stream
->tracefile_size_current
= 0;
4544 stream
->tracefile_count_current
= 0;
4546 if (stream
->out_fd
>= 0) {
4547 ret
= close(stream
->out_fd
);
4549 PERROR("Failed to close stream out_fd of channel \"%s\"",
4550 stream
->chan
->name
);
4552 stream
->out_fd
= -1;
4555 if (stream
->index_file
) {
4556 lttng_index_file_put(stream
->index_file
);
4557 stream
->index_file
= NULL
;
4560 if (!stream
->trace_chunk
) {
4564 ret
= consumer_stream_create_output_files(stream
, true);
4570 * Performs the stream rotation for the rotate session feature if needed.
4571 * It must be called with the channel and stream locks held.
4573 * Return 0 on success, a negative number of error.
4575 int lttng_consumer_rotate_stream(struct lttng_consumer_local_data
*ctx
,
4576 struct lttng_consumer_stream
*stream
)
4580 DBG("Consumer rotate stream %" PRIu64
, stream
->key
);
4583 * Update the stream's 'current' chunk to the session's (channel)
4584 * now-current chunk.
4586 lttng_trace_chunk_put(stream
->trace_chunk
);
4587 if (stream
->chan
->trace_chunk
== stream
->trace_chunk
) {
4589 * A channel can be rotated and not have a "next" chunk
4590 * to transition to. In that case, the channel's "current chunk"
4591 * has not been closed yet, but it has not been updated to
4592 * a "next" trace chunk either. Hence, the stream, like its
4593 * parent channel, becomes part of no chunk and can't output
4594 * anything until a new trace chunk is created.
4596 stream
->trace_chunk
= NULL
;
4597 } else if (stream
->chan
->trace_chunk
&&
4598 !lttng_trace_chunk_get(stream
->chan
->trace_chunk
)) {
4599 ERR("Failed to acquire a reference to channel's trace chunk during stream rotation");
4604 * Update the stream's trace chunk to its parent channel's
4605 * current trace chunk.
4607 stream
->trace_chunk
= stream
->chan
->trace_chunk
;
4610 if (stream
->net_seq_idx
== (uint64_t) -1ULL) {
4611 ret
= rotate_local_stream(ctx
, stream
);
4613 ERR("Failed to rotate stream, ret = %i", ret
);
4618 if (stream
->metadata_flag
&& stream
->trace_chunk
) {
4620 * If the stream has transitioned to a new trace
4621 * chunk, the metadata should be re-dumped to the
4624 * However, it is possible for a stream to transition to
4625 * a "no-chunk" state. This can happen if a rotation
4626 * occurs on an inactive session. In such cases, the metadata
4627 * regeneration will happen when the next trace chunk is
4630 ret
= consumer_metadata_stream_dump(stream
);
4635 lttng_consumer_reset_stream_rotate_state(stream
);
4644 * Rotate all the ready streams now.
4646 * This is especially important for low throughput streams that have already
4647 * been consumed, we cannot wait for their next packet to perform the
4649 * Need to be called with RCU read-side lock held to ensure existence of
4652 * Returns 0 on success, < 0 on error
4654 int lttng_consumer_rotate_ready_streams(struct lttng_consumer_channel
*channel
,
4655 uint64_t key
, struct lttng_consumer_local_data
*ctx
)
4658 struct lttng_consumer_stream
*stream
;
4659 struct lttng_ht_iter iter
;
4660 struct lttng_ht
*ht
= consumer_data
.stream_per_chan_id_ht
;
4664 DBG("Consumer rotate ready streams in channel %" PRIu64
, key
);
4666 cds_lfht_for_each_entry_duplicate(ht
->ht
,
4667 ht
->hash_fct(&channel
->key
, lttng_ht_seed
),
4668 ht
->match_fct
, &channel
->key
, &iter
.iter
,
4669 stream
, node_channel_id
.node
) {
4670 health_code_update();
4672 pthread_mutex_lock(&stream
->chan
->lock
);
4673 pthread_mutex_lock(&stream
->lock
);
4675 if (!stream
->rotate_ready
) {
4676 pthread_mutex_unlock(&stream
->lock
);
4677 pthread_mutex_unlock(&stream
->chan
->lock
);
4680 DBG("Consumer rotate ready stream %" PRIu64
, stream
->key
);
4682 ret
= lttng_consumer_rotate_stream(ctx
, stream
);
4683 pthread_mutex_unlock(&stream
->lock
);
4684 pthread_mutex_unlock(&stream
->chan
->lock
);
4697 enum lttcomm_return_code
lttng_consumer_init_command(
4698 struct lttng_consumer_local_data
*ctx
,
4699 const lttng_uuid sessiond_uuid
)
4701 enum lttcomm_return_code ret
;
4702 char uuid_str
[LTTNG_UUID_STR_LEN
];
4704 if (ctx
->sessiond_uuid
.is_set
) {
4705 ret
= LTTCOMM_CONSUMERD_ALREADY_SET
;
4709 ctx
->sessiond_uuid
.is_set
= true;
4710 memcpy(ctx
->sessiond_uuid
.value
, sessiond_uuid
, sizeof(lttng_uuid
));
4711 ret
= LTTCOMM_CONSUMERD_SUCCESS
;
4712 lttng_uuid_to_str(sessiond_uuid
, uuid_str
);
4713 DBG("Received session daemon UUID: %s", uuid_str
);
4718 enum lttcomm_return_code
lttng_consumer_create_trace_chunk(
4719 const uint64_t *relayd_id
, uint64_t session_id
,
4721 time_t chunk_creation_timestamp
,
4722 const char *chunk_override_name
,
4723 const struct lttng_credentials
*credentials
,
4724 struct lttng_directory_handle
*chunk_directory_handle
)
4727 enum lttcomm_return_code ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
4728 struct lttng_trace_chunk
*created_chunk
= NULL
, *published_chunk
= NULL
;
4729 enum lttng_trace_chunk_status chunk_status
;
4730 char relayd_id_buffer
[MAX_INT_DEC_LEN(*relayd_id
)];
4731 char creation_timestamp_buffer
[ISO8601_STR_LEN
];
4732 const char *relayd_id_str
= "(none)";
4733 const char *creation_timestamp_str
;
4734 struct lttng_ht_iter iter
;
4735 struct lttng_consumer_channel
*channel
;
4738 /* Only used for logging purposes. */
4739 ret
= snprintf(relayd_id_buffer
, sizeof(relayd_id_buffer
),
4740 "%" PRIu64
, *relayd_id
);
4741 if (ret
> 0 && ret
< sizeof(relayd_id_buffer
)) {
4742 relayd_id_str
= relayd_id_buffer
;
4744 relayd_id_str
= "(formatting error)";
4748 /* Local protocol error. */
4749 assert(chunk_creation_timestamp
);
4750 ret
= time_to_iso8601_str(chunk_creation_timestamp
,
4751 creation_timestamp_buffer
,
4752 sizeof(creation_timestamp_buffer
));
4753 creation_timestamp_str
= !ret
? creation_timestamp_buffer
:
4754 "(formatting error)";
4756 DBG("Consumer create trace chunk command: relay_id = %s"
4757 ", session_id = %" PRIu64
", chunk_id = %" PRIu64
4758 ", chunk_override_name = %s"
4759 ", chunk_creation_timestamp = %s",
4760 relayd_id_str
, session_id
, chunk_id
,
4761 chunk_override_name
? : "(none)",
4762 creation_timestamp_str
);
4765 * The trace chunk registry, as used by the consumer daemon, implicitly
4766 * owns the trace chunks. This is only needed in the consumer since
4767 * the consumer has no notion of a session beyond session IDs being
4768 * used to identify other objects.
4770 * The lttng_trace_chunk_registry_publish() call below provides a
4771 * reference which is not released; it implicitly becomes the session
4772 * daemon's reference to the chunk in the consumer daemon.
4774 * The lifetime of trace chunks in the consumer daemon is managed by
4775 * the session daemon through the LTTNG_CONSUMER_CREATE_TRACE_CHUNK
4776 * and LTTNG_CONSUMER_DESTROY_TRACE_CHUNK commands.
4778 created_chunk
= lttng_trace_chunk_create(chunk_id
,
4779 chunk_creation_timestamp
, NULL
);
4780 if (!created_chunk
) {
4781 ERR("Failed to create trace chunk");
4782 ret_code
= LTTCOMM_CONSUMERD_CREATE_TRACE_CHUNK_FAILED
;
4786 if (chunk_override_name
) {
4787 chunk_status
= lttng_trace_chunk_override_name(created_chunk
,
4788 chunk_override_name
);
4789 if (chunk_status
!= LTTNG_TRACE_CHUNK_STATUS_OK
) {
4790 ret_code
= LTTCOMM_CONSUMERD_CREATE_TRACE_CHUNK_FAILED
;
4795 if (chunk_directory_handle
) {
4796 chunk_status
= lttng_trace_chunk_set_credentials(created_chunk
,
4798 if (chunk_status
!= LTTNG_TRACE_CHUNK_STATUS_OK
) {
4799 ERR("Failed to set trace chunk credentials");
4800 ret_code
= LTTCOMM_CONSUMERD_CREATE_TRACE_CHUNK_FAILED
;
4804 * The consumer daemon has no ownership of the chunk output
4807 chunk_status
= lttng_trace_chunk_set_as_user(created_chunk
,
4808 chunk_directory_handle
);
4809 chunk_directory_handle
= NULL
;
4810 if (chunk_status
!= LTTNG_TRACE_CHUNK_STATUS_OK
) {
4811 ERR("Failed to set trace chunk's directory handle");
4812 ret_code
= LTTCOMM_CONSUMERD_CREATE_TRACE_CHUNK_FAILED
;
4817 published_chunk
= lttng_trace_chunk_registry_publish_chunk(
4818 consumer_data
.chunk_registry
, session_id
,
4820 lttng_trace_chunk_put(created_chunk
);
4821 created_chunk
= NULL
;
4822 if (!published_chunk
) {
4823 ERR("Failed to publish trace chunk");
4824 ret_code
= LTTCOMM_CONSUMERD_CREATE_TRACE_CHUNK_FAILED
;
4829 cds_lfht_for_each_entry_duplicate(consumer_data
.channels_by_session_id_ht
->ht
,
4830 consumer_data
.channels_by_session_id_ht
->hash_fct(
4831 &session_id
, lttng_ht_seed
),
4832 consumer_data
.channels_by_session_id_ht
->match_fct
,
4833 &session_id
, &iter
.iter
, channel
,
4834 channels_by_session_id_ht_node
.node
) {
4835 ret
= lttng_consumer_channel_set_trace_chunk(channel
,
4839 * Roll-back the creation of this chunk.
4841 * This is important since the session daemon will
4842 * assume that the creation of this chunk failed and
4843 * will never ask for it to be closed, resulting
4844 * in a leak and an inconsistent state for some
4847 enum lttcomm_return_code close_ret
;
4848 char path
[LTTNG_PATH_MAX
];
4850 DBG("Failed to set new trace chunk on existing channels, rolling back");
4851 close_ret
= lttng_consumer_close_trace_chunk(relayd_id
,
4852 session_id
, chunk_id
,
4853 chunk_creation_timestamp
, NULL
,
4855 if (close_ret
!= LTTCOMM_CONSUMERD_SUCCESS
) {
4856 ERR("Failed to roll-back the creation of new chunk: session_id = %" PRIu64
", chunk_id = %" PRIu64
,
4857 session_id
, chunk_id
);
4860 ret_code
= LTTCOMM_CONSUMERD_CREATE_TRACE_CHUNK_FAILED
;
4866 struct consumer_relayd_sock_pair
*relayd
;
4868 relayd
= consumer_find_relayd(*relayd_id
);
4870 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
4871 ret
= relayd_create_trace_chunk(
4872 &relayd
->control_sock
, published_chunk
);
4873 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
4875 ERR("Failed to find relay daemon socket: relayd_id = %" PRIu64
, *relayd_id
);
4878 if (!relayd
|| ret
) {
4879 enum lttcomm_return_code close_ret
;
4880 char path
[LTTNG_PATH_MAX
];
4882 close_ret
= lttng_consumer_close_trace_chunk(relayd_id
,
4885 chunk_creation_timestamp
,
4887 if (close_ret
!= LTTCOMM_CONSUMERD_SUCCESS
) {
4888 ERR("Failed to roll-back the creation of new chunk: session_id = %" PRIu64
", chunk_id = %" PRIu64
,
4893 ret_code
= LTTCOMM_CONSUMERD_CREATE_TRACE_CHUNK_FAILED
;
4900 /* Release the reference returned by the "publish" operation. */
4901 lttng_trace_chunk_put(published_chunk
);
4902 lttng_trace_chunk_put(created_chunk
);
4906 enum lttcomm_return_code
lttng_consumer_close_trace_chunk(
4907 const uint64_t *relayd_id
, uint64_t session_id
,
4908 uint64_t chunk_id
, time_t chunk_close_timestamp
,
4909 const enum lttng_trace_chunk_command_type
*close_command
,
4912 enum lttcomm_return_code ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
4913 struct lttng_trace_chunk
*chunk
;
4914 char relayd_id_buffer
[MAX_INT_DEC_LEN(*relayd_id
)];
4915 const char *relayd_id_str
= "(none)";
4916 const char *close_command_name
= "none";
4917 struct lttng_ht_iter iter
;
4918 struct lttng_consumer_channel
*channel
;
4919 enum lttng_trace_chunk_status chunk_status
;
4924 /* Only used for logging purposes. */
4925 ret
= snprintf(relayd_id_buffer
, sizeof(relayd_id_buffer
),
4926 "%" PRIu64
, *relayd_id
);
4927 if (ret
> 0 && ret
< sizeof(relayd_id_buffer
)) {
4928 relayd_id_str
= relayd_id_buffer
;
4930 relayd_id_str
= "(formatting error)";
4933 if (close_command
) {
4934 close_command_name
= lttng_trace_chunk_command_type_get_name(
4938 DBG("Consumer close trace chunk command: relayd_id = %s"
4939 ", session_id = %" PRIu64
", chunk_id = %" PRIu64
4940 ", close command = %s",
4941 relayd_id_str
, session_id
, chunk_id
,
4942 close_command_name
);
4944 chunk
= lttng_trace_chunk_registry_find_chunk(
4945 consumer_data
.chunk_registry
, session_id
, chunk_id
);
4947 ERR("Failed to find chunk: session_id = %" PRIu64
4948 ", chunk_id = %" PRIu64
,
4949 session_id
, chunk_id
);
4950 ret_code
= LTTCOMM_CONSUMERD_UNKNOWN_TRACE_CHUNK
;
4954 chunk_status
= lttng_trace_chunk_set_close_timestamp(chunk
,
4955 chunk_close_timestamp
);
4956 if (chunk_status
!= LTTNG_TRACE_CHUNK_STATUS_OK
) {
4957 ret_code
= LTTCOMM_CONSUMERD_CLOSE_TRACE_CHUNK_FAILED
;
4961 if (close_command
) {
4962 chunk_status
= lttng_trace_chunk_set_close_command(
4963 chunk
, *close_command
);
4964 if (chunk_status
!= LTTNG_TRACE_CHUNK_STATUS_OK
) {
4965 ret_code
= LTTCOMM_CONSUMERD_CLOSE_TRACE_CHUNK_FAILED
;
4971 * chunk is now invalid to access as we no longer hold a reference to
4972 * it; it is only kept around to compare it (by address) to the
4973 * current chunk found in the session's channels.
4976 cds_lfht_for_each_entry(consumer_data
.channel_ht
->ht
, &iter
.iter
,
4977 channel
, node
.node
) {
4981 * Only change the channel's chunk to NULL if it still
4982 * references the chunk being closed. The channel may
4983 * reference a newer channel in the case of a session
4984 * rotation. When a session rotation occurs, the "next"
4985 * chunk is created before the "current" chunk is closed.
4987 if (channel
->trace_chunk
!= chunk
) {
4990 ret
= lttng_consumer_channel_set_trace_chunk(channel
, NULL
);
4993 * Attempt to close the chunk on as many channels as
4996 ret_code
= LTTCOMM_CONSUMERD_CLOSE_TRACE_CHUNK_FAILED
;
5002 struct consumer_relayd_sock_pair
*relayd
;
5004 relayd
= consumer_find_relayd(*relayd_id
);
5006 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
5007 ret
= relayd_close_trace_chunk(
5008 &relayd
->control_sock
, chunk
,
5010 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
5012 ERR("Failed to find relay daemon socket: relayd_id = %" PRIu64
,
5016 if (!relayd
|| ret
) {
5017 ret_code
= LTTCOMM_CONSUMERD_CLOSE_TRACE_CHUNK_FAILED
;
5025 * Release the reference returned by the "find" operation and
5026 * the session daemon's implicit reference to the chunk.
5028 lttng_trace_chunk_put(chunk
);
5029 lttng_trace_chunk_put(chunk
);
5034 enum lttcomm_return_code
lttng_consumer_trace_chunk_exists(
5035 const uint64_t *relayd_id
, uint64_t session_id
,
5039 enum lttcomm_return_code ret_code
;
5040 char relayd_id_buffer
[MAX_INT_DEC_LEN(*relayd_id
)];
5041 const char *relayd_id_str
= "(none)";
5042 const bool is_local_trace
= !relayd_id
;
5043 struct consumer_relayd_sock_pair
*relayd
= NULL
;
5044 bool chunk_exists_local
, chunk_exists_remote
;
5049 /* Only used for logging purposes. */
5050 ret
= snprintf(relayd_id_buffer
, sizeof(relayd_id_buffer
),
5051 "%" PRIu64
, *relayd_id
);
5052 if (ret
> 0 && ret
< sizeof(relayd_id_buffer
)) {
5053 relayd_id_str
= relayd_id_buffer
;
5055 relayd_id_str
= "(formatting error)";
5059 DBG("Consumer trace chunk exists command: relayd_id = %s"
5060 ", chunk_id = %" PRIu64
, relayd_id_str
,
5062 ret
= lttng_trace_chunk_registry_chunk_exists(
5063 consumer_data
.chunk_registry
, session_id
,
5064 chunk_id
, &chunk_exists_local
);
5066 /* Internal error. */
5067 ERR("Failed to query the existence of a trace chunk");
5068 ret_code
= LTTCOMM_CONSUMERD_FATAL
;
5071 DBG("Trace chunk %s locally",
5072 chunk_exists_local
? "exists" : "does not exist");
5073 if (chunk_exists_local
) {
5074 ret_code
= LTTCOMM_CONSUMERD_TRACE_CHUNK_EXISTS_LOCAL
;
5076 } else if (is_local_trace
) {
5077 ret_code
= LTTCOMM_CONSUMERD_UNKNOWN_TRACE_CHUNK
;
5082 relayd
= consumer_find_relayd(*relayd_id
);
5084 ERR("Failed to find relayd %" PRIu64
, *relayd_id
);
5085 ret_code
= LTTCOMM_CONSUMERD_INVALID_PARAMETERS
;
5086 goto end_rcu_unlock
;
5088 DBG("Looking up existence of trace chunk on relay daemon");
5089 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
5090 ret
= relayd_trace_chunk_exists(&relayd
->control_sock
, chunk_id
,
5091 &chunk_exists_remote
);
5092 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
5094 ERR("Failed to look-up the existence of trace chunk on relay daemon");
5095 ret_code
= LTTCOMM_CONSUMERD_RELAYD_FAIL
;
5096 goto end_rcu_unlock
;
5099 ret_code
= chunk_exists_remote
?
5100 LTTCOMM_CONSUMERD_TRACE_CHUNK_EXISTS_REMOTE
:
5101 LTTCOMM_CONSUMERD_UNKNOWN_TRACE_CHUNK
;
5102 DBG("Trace chunk %s on relay daemon",
5103 chunk_exists_remote
? "exists" : "does not exist");
5112 int consumer_clear_monitored_channel(struct lttng_consumer_channel
*channel
)
5114 struct lttng_ht
*ht
;
5115 struct lttng_consumer_stream
*stream
;
5116 struct lttng_ht_iter iter
;
5119 ht
= consumer_data
.stream_per_chan_id_ht
;
5122 cds_lfht_for_each_entry_duplicate(ht
->ht
,
5123 ht
->hash_fct(&channel
->key
, lttng_ht_seed
),
5124 ht
->match_fct
, &channel
->key
,
5125 &iter
.iter
, stream
, node_channel_id
.node
) {
5127 * Protect against teardown with mutex.
5129 pthread_mutex_lock(&stream
->lock
);
5130 if (cds_lfht_is_node_deleted(&stream
->node
.node
)) {
5133 ret
= consumer_clear_stream(stream
);
5138 pthread_mutex_unlock(&stream
->lock
);
5141 return LTTCOMM_CONSUMERD_SUCCESS
;
5144 pthread_mutex_unlock(&stream
->lock
);
5149 int lttng_consumer_clear_channel(struct lttng_consumer_channel
*channel
)
5153 DBG("Consumer clear channel %" PRIu64
, channel
->key
);
5155 if (channel
->type
== CONSUMER_CHANNEL_TYPE_METADATA
) {
5157 * Nothing to do for the metadata channel/stream.
5158 * Snapshot mechanism already take care of the metadata
5159 * handling/generation, and monitored channels only need to
5160 * have their data stream cleared..
5162 ret
= LTTCOMM_CONSUMERD_SUCCESS
;
5166 if (!channel
->monitor
) {
5167 ret
= consumer_clear_unmonitored_channel(channel
);
5169 ret
= consumer_clear_monitored_channel(channel
);
5175 enum lttcomm_return_code
lttng_consumer_open_channel_packets(
5176 struct lttng_consumer_channel
*channel
)
5178 struct lttng_consumer_stream
*stream
;
5179 enum lttcomm_return_code ret
= LTTCOMM_CONSUMERD_SUCCESS
;
5181 if (channel
->metadata_stream
) {
5182 ERR("Open channel packets command attempted on a metadata channel");
5183 ret
= LTTCOMM_CONSUMERD_INVALID_PARAMETERS
;
5188 cds_list_for_each_entry(stream
, &channel
->streams
.head
, send_node
) {
5189 enum consumer_stream_open_packet_status status
;
5191 pthread_mutex_lock(&stream
->lock
);
5192 if (cds_lfht_is_node_deleted(&stream
->node
.node
)) {
5196 status
= consumer_stream_open_packet(stream
);
5198 case CONSUMER_STREAM_OPEN_PACKET_STATUS_OPENED
:
5199 DBG("Opened a packet in \"open channel packets\" command: stream id = %" PRIu64
5200 ", channel name = %s, session id = %" PRIu64
,
5201 stream
->key
, stream
->chan
->name
,
5202 stream
->chan
->session_id
);
5203 stream
->opened_packet_in_current_trace_chunk
= true;
5205 case CONSUMER_STREAM_OPEN_PACKET_STATUS_NO_SPACE
:
5206 DBG("No space left to open a packet in \"open channel packets\" command: stream id = %" PRIu64
5207 ", channel name = %s, session id = %" PRIu64
,
5208 stream
->key
, stream
->chan
->name
,
5209 stream
->chan
->session_id
);
5211 case CONSUMER_STREAM_OPEN_PACKET_STATUS_ERROR
:
5213 * Only unexpected internal errors can lead to this
5214 * failing. Report an unknown error.
5216 ERR("Failed to flush empty buffer in \"open channel packets\" command: stream id = %" PRIu64
5217 ", channel id = %" PRIu64
5218 ", channel name = %s"
5219 ", session id = %" PRIu64
,
5220 stream
->key
, channel
->key
,
5221 channel
->name
, channel
->session_id
);
5222 ret
= LTTCOMM_CONSUMERD_UNKNOWN_ERROR
;
5229 pthread_mutex_unlock(&stream
->lock
);
5238 pthread_mutex_unlock(&stream
->lock
);
5239 goto end_rcu_unlock
;