2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * 2012 - David Goulet <dgoulet@efficios.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
27 #include <sys/socket.h>
28 #include <sys/types.h>
33 #include <bin/lttng-consumerd/health-consumerd.h>
34 #include <common/common.h>
35 #include <common/utils.h>
36 #include <common/time.h>
37 #include <common/compat/poll.h>
38 #include <common/compat/endian.h>
39 #include <common/index/index.h>
40 #include <common/kernel-ctl/kernel-ctl.h>
41 #include <common/sessiond-comm/relayd.h>
42 #include <common/sessiond-comm/sessiond-comm.h>
43 #include <common/kernel-consumer/kernel-consumer.h>
44 #include <common/relayd/relayd.h>
45 #include <common/ust-consumer/ust-consumer.h>
46 #include <common/consumer/consumer-timer.h>
47 #include <common/consumer/consumer.h>
48 #include <common/consumer/consumer-stream.h>
49 #include <common/consumer/consumer-testpoint.h>
50 #include <common/align.h>
51 #include <common/consumer/consumer-metadata-cache.h>
52 #include <common/trace-chunk.h>
53 #include <common/trace-chunk-registry.h>
54 #include <common/string-utils/format.h>
55 #include <common/dynamic-array.h>
57 struct lttng_consumer_global_data consumer_data
= {
60 .type
= LTTNG_CONSUMER_UNKNOWN
,
63 enum consumer_channel_action
{
66 CONSUMER_CHANNEL_QUIT
,
69 struct consumer_channel_msg
{
70 enum consumer_channel_action action
;
71 struct lttng_consumer_channel
*chan
; /* add */
72 uint64_t key
; /* del */
75 /* Flag used to temporarily pause data consumption from testpoints. */
76 int data_consumption_paused
;
79 * Flag to inform the polling thread to quit when all fd hung up. Updated by
80 * the consumer_thread_receive_fds when it notices that all fds has hung up.
81 * Also updated by the signal handler (consumer_should_exit()). Read by the
87 * Global hash table containing respectively metadata and data streams. The
88 * stream element in this ht should only be updated by the metadata poll thread
89 * for the metadata and the data poll thread for the data.
91 static struct lttng_ht
*metadata_ht
;
92 static struct lttng_ht
*data_ht
;
95 * Notify a thread lttng pipe to poll back again. This usually means that some
96 * global state has changed so we just send back the thread in a poll wait
99 static void notify_thread_lttng_pipe(struct lttng_pipe
*pipe
)
101 struct lttng_consumer_stream
*null_stream
= NULL
;
105 (void) lttng_pipe_write(pipe
, &null_stream
, sizeof(null_stream
));
108 static void notify_health_quit_pipe(int *pipe
)
112 ret
= lttng_write(pipe
[1], "4", 1);
114 PERROR("write consumer health quit");
118 static void notify_channel_pipe(struct lttng_consumer_local_data
*ctx
,
119 struct lttng_consumer_channel
*chan
,
121 enum consumer_channel_action action
)
123 struct consumer_channel_msg msg
;
126 memset(&msg
, 0, sizeof(msg
));
131 ret
= lttng_write(ctx
->consumer_channel_pipe
[1], &msg
, sizeof(msg
));
132 if (ret
< sizeof(msg
)) {
133 PERROR("notify_channel_pipe write error");
137 void notify_thread_del_channel(struct lttng_consumer_local_data
*ctx
,
140 notify_channel_pipe(ctx
, NULL
, key
, CONSUMER_CHANNEL_DEL
);
143 static int read_channel_pipe(struct lttng_consumer_local_data
*ctx
,
144 struct lttng_consumer_channel
**chan
,
146 enum consumer_channel_action
*action
)
148 struct consumer_channel_msg msg
;
151 ret
= lttng_read(ctx
->consumer_channel_pipe
[0], &msg
, sizeof(msg
));
152 if (ret
< sizeof(msg
)) {
156 *action
= msg
.action
;
164 * Cleanup the stream list of a channel. Those streams are not yet globally
167 static void clean_channel_stream_list(struct lttng_consumer_channel
*channel
)
169 struct lttng_consumer_stream
*stream
, *stmp
;
173 /* Delete streams that might have been left in the stream list. */
174 cds_list_for_each_entry_safe(stream
, stmp
, &channel
->streams
.head
,
176 cds_list_del(&stream
->send_node
);
178 * Once a stream is added to this list, the buffers were created so we
179 * have a guarantee that this call will succeed. Setting the monitor
180 * mode to 0 so we don't lock nor try to delete the stream from the
184 consumer_stream_destroy(stream
, NULL
);
189 * Find a stream. The consumer_data.lock must be locked during this
192 static struct lttng_consumer_stream
*find_stream(uint64_t key
,
195 struct lttng_ht_iter iter
;
196 struct lttng_ht_node_u64
*node
;
197 struct lttng_consumer_stream
*stream
= NULL
;
201 /* -1ULL keys are lookup failures */
202 if (key
== (uint64_t) -1ULL) {
208 lttng_ht_lookup(ht
, &key
, &iter
);
209 node
= lttng_ht_iter_get_node_u64(&iter
);
211 stream
= caa_container_of(node
, struct lttng_consumer_stream
, node
);
219 static void steal_stream_key(uint64_t key
, struct lttng_ht
*ht
)
221 struct lttng_consumer_stream
*stream
;
224 stream
= find_stream(key
, ht
);
226 stream
->key
= (uint64_t) -1ULL;
228 * We don't want the lookup to match, but we still need
229 * to iterate on this stream when iterating over the hash table. Just
230 * change the node key.
232 stream
->node
.key
= (uint64_t) -1ULL;
238 * Return a channel object for the given key.
240 * RCU read side lock MUST be acquired before calling this function and
241 * protects the channel ptr.
243 struct lttng_consumer_channel
*consumer_find_channel(uint64_t key
)
245 struct lttng_ht_iter iter
;
246 struct lttng_ht_node_u64
*node
;
247 struct lttng_consumer_channel
*channel
= NULL
;
249 /* -1ULL keys are lookup failures */
250 if (key
== (uint64_t) -1ULL) {
254 lttng_ht_lookup(consumer_data
.channel_ht
, &key
, &iter
);
255 node
= lttng_ht_iter_get_node_u64(&iter
);
257 channel
= caa_container_of(node
, struct lttng_consumer_channel
, node
);
264 * There is a possibility that the consumer does not have enough time between
265 * the close of the channel on the session daemon and the cleanup in here thus
266 * once we have a channel add with an existing key, we know for sure that this
267 * channel will eventually get cleaned up by all streams being closed.
269 * This function just nullifies the already existing channel key.
271 static void steal_channel_key(uint64_t key
)
273 struct lttng_consumer_channel
*channel
;
276 channel
= consumer_find_channel(key
);
278 channel
->key
= (uint64_t) -1ULL;
280 * We don't want the lookup to match, but we still need to iterate on
281 * this channel when iterating over the hash table. Just change the
284 channel
->node
.key
= (uint64_t) -1ULL;
289 static void free_channel_rcu(struct rcu_head
*head
)
291 struct lttng_ht_node_u64
*node
=
292 caa_container_of(head
, struct lttng_ht_node_u64
, head
);
293 struct lttng_consumer_channel
*channel
=
294 caa_container_of(node
, struct lttng_consumer_channel
, node
);
296 switch (consumer_data
.type
) {
297 case LTTNG_CONSUMER_KERNEL
:
299 case LTTNG_CONSUMER32_UST
:
300 case LTTNG_CONSUMER64_UST
:
301 lttng_ustconsumer_free_channel(channel
);
304 ERR("Unknown consumer_data type");
311 * RCU protected relayd socket pair free.
313 static void free_relayd_rcu(struct rcu_head
*head
)
315 struct lttng_ht_node_u64
*node
=
316 caa_container_of(head
, struct lttng_ht_node_u64
, head
);
317 struct consumer_relayd_sock_pair
*relayd
=
318 caa_container_of(node
, struct consumer_relayd_sock_pair
, node
);
321 * Close all sockets. This is done in the call RCU since we don't want the
322 * socket fds to be reassigned thus potentially creating bad state of the
325 * We do not have to lock the control socket mutex here since at this stage
326 * there is no one referencing to this relayd object.
328 (void) relayd_close(&relayd
->control_sock
);
329 (void) relayd_close(&relayd
->data_sock
);
331 pthread_mutex_destroy(&relayd
->ctrl_sock_mutex
);
336 * Destroy and free relayd socket pair object.
338 void consumer_destroy_relayd(struct consumer_relayd_sock_pair
*relayd
)
341 struct lttng_ht_iter iter
;
343 if (relayd
== NULL
) {
347 DBG("Consumer destroy and close relayd socket pair");
349 iter
.iter
.node
= &relayd
->node
.node
;
350 ret
= lttng_ht_del(consumer_data
.relayd_ht
, &iter
);
352 /* We assume the relayd is being or is destroyed */
356 /* RCU free() call */
357 call_rcu(&relayd
->node
.head
, free_relayd_rcu
);
361 * Remove a channel from the global list protected by a mutex. This function is
362 * also responsible for freeing its data structures.
364 void consumer_del_channel(struct lttng_consumer_channel
*channel
)
366 struct lttng_ht_iter iter
;
368 DBG("Consumer delete channel key %" PRIu64
, channel
->key
);
370 pthread_mutex_lock(&consumer_data
.lock
);
371 pthread_mutex_lock(&channel
->lock
);
373 /* Destroy streams that might have been left in the stream list. */
374 clean_channel_stream_list(channel
);
376 if (channel
->live_timer_enabled
== 1) {
377 consumer_timer_live_stop(channel
);
379 if (channel
->monitor_timer_enabled
== 1) {
380 consumer_timer_monitor_stop(channel
);
383 switch (consumer_data
.type
) {
384 case LTTNG_CONSUMER_KERNEL
:
386 case LTTNG_CONSUMER32_UST
:
387 case LTTNG_CONSUMER64_UST
:
388 lttng_ustconsumer_del_channel(channel
);
391 ERR("Unknown consumer_data type");
396 lttng_trace_chunk_put(channel
->trace_chunk
);
397 channel
->trace_chunk
= NULL
;
399 if (channel
->is_published
) {
403 iter
.iter
.node
= &channel
->node
.node
;
404 ret
= lttng_ht_del(consumer_data
.channel_ht
, &iter
);
407 iter
.iter
.node
= &channel
->channels_by_session_id_ht_node
.node
;
408 ret
= lttng_ht_del(consumer_data
.channels_by_session_id_ht
,
414 channel
->is_deleted
= true;
415 call_rcu(&channel
->node
.head
, free_channel_rcu
);
417 pthread_mutex_unlock(&channel
->lock
);
418 pthread_mutex_unlock(&consumer_data
.lock
);
422 * Iterate over the relayd hash table and destroy each element. Finally,
423 * destroy the whole hash table.
425 static void cleanup_relayd_ht(void)
427 struct lttng_ht_iter iter
;
428 struct consumer_relayd_sock_pair
*relayd
;
432 cds_lfht_for_each_entry(consumer_data
.relayd_ht
->ht
, &iter
.iter
, relayd
,
434 consumer_destroy_relayd(relayd
);
439 lttng_ht_destroy(consumer_data
.relayd_ht
);
443 * Update the end point status of all streams having the given network sequence
444 * index (relayd index).
446 * It's atomically set without having the stream mutex locked which is fine
447 * because we handle the write/read race with a pipe wakeup for each thread.
449 static void update_endpoint_status_by_netidx(uint64_t net_seq_idx
,
450 enum consumer_endpoint_status status
)
452 struct lttng_ht_iter iter
;
453 struct lttng_consumer_stream
*stream
;
455 DBG("Consumer set delete flag on stream by idx %" PRIu64
, net_seq_idx
);
459 /* Let's begin with metadata */
460 cds_lfht_for_each_entry(metadata_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
461 if (stream
->net_seq_idx
== net_seq_idx
) {
462 uatomic_set(&stream
->endpoint_status
, status
);
463 DBG("Delete flag set to metadata stream %d", stream
->wait_fd
);
467 /* Follow up by the data streams */
468 cds_lfht_for_each_entry(data_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
469 if (stream
->net_seq_idx
== net_seq_idx
) {
470 uatomic_set(&stream
->endpoint_status
, status
);
471 DBG("Delete flag set to data stream %d", stream
->wait_fd
);
478 * Cleanup a relayd object by flagging every associated streams for deletion,
479 * destroying the object meaning removing it from the relayd hash table,
480 * closing the sockets and freeing the memory in a RCU call.
482 * If a local data context is available, notify the threads that the streams'
483 * state have changed.
485 void lttng_consumer_cleanup_relayd(struct consumer_relayd_sock_pair
*relayd
)
491 DBG("Cleaning up relayd object ID %"PRIu64
, relayd
->net_seq_idx
);
493 /* Save the net sequence index before destroying the object */
494 netidx
= relayd
->net_seq_idx
;
497 * Delete the relayd from the relayd hash table, close the sockets and free
498 * the object in a RCU call.
500 consumer_destroy_relayd(relayd
);
502 /* Set inactive endpoint to all streams */
503 update_endpoint_status_by_netidx(netidx
, CONSUMER_ENDPOINT_INACTIVE
);
506 * With a local data context, notify the threads that the streams' state
507 * have changed. The write() action on the pipe acts as an "implicit"
508 * memory barrier ordering the updates of the end point status from the
509 * read of this status which happens AFTER receiving this notify.
511 notify_thread_lttng_pipe(relayd
->ctx
->consumer_data_pipe
);
512 notify_thread_lttng_pipe(relayd
->ctx
->consumer_metadata_pipe
);
516 * Flag a relayd socket pair for destruction. Destroy it if the refcount
519 * RCU read side lock MUST be aquired before calling this function.
521 void consumer_flag_relayd_for_destroy(struct consumer_relayd_sock_pair
*relayd
)
525 /* Set destroy flag for this object */
526 uatomic_set(&relayd
->destroy_flag
, 1);
528 /* Destroy the relayd if refcount is 0 */
529 if (uatomic_read(&relayd
->refcount
) == 0) {
530 consumer_destroy_relayd(relayd
);
535 * Completly destroy stream from every visiable data structure and the given
538 * One this call returns, the stream object is not longer usable nor visible.
540 void consumer_del_stream(struct lttng_consumer_stream
*stream
,
543 consumer_stream_destroy(stream
, ht
);
547 * XXX naming of del vs destroy is all mixed up.
549 void consumer_del_stream_for_data(struct lttng_consumer_stream
*stream
)
551 consumer_stream_destroy(stream
, data_ht
);
554 void consumer_del_stream_for_metadata(struct lttng_consumer_stream
*stream
)
556 consumer_stream_destroy(stream
, metadata_ht
);
559 void consumer_stream_update_channel_attributes(
560 struct lttng_consumer_stream
*stream
,
561 struct lttng_consumer_channel
*channel
)
563 stream
->channel_read_only_attributes
.tracefile_size
=
564 channel
->tracefile_size
;
567 struct lttng_consumer_stream
*consumer_allocate_stream(uint64_t channel_key
,
569 const char *channel_name
,
572 struct lttng_trace_chunk
*trace_chunk
,
575 enum consumer_channel_type type
,
576 unsigned int monitor
)
579 struct lttng_consumer_stream
*stream
;
581 stream
= zmalloc(sizeof(*stream
));
582 if (stream
== NULL
) {
583 PERROR("malloc struct lttng_consumer_stream");
588 if (trace_chunk
&& !lttng_trace_chunk_get(trace_chunk
)) {
589 ERR("Failed to acquire trace chunk reference during the creation of a stream");
595 stream
->key
= stream_key
;
596 stream
->trace_chunk
= trace_chunk
;
598 stream
->out_fd_offset
= 0;
599 stream
->output_written
= 0;
600 stream
->net_seq_idx
= relayd_id
;
601 stream
->session_id
= session_id
;
602 stream
->monitor
= monitor
;
603 stream
->endpoint_status
= CONSUMER_ENDPOINT_ACTIVE
;
604 stream
->index_file
= NULL
;
605 stream
->last_sequence_number
= -1ULL;
606 stream
->rotate_position
= -1ULL;
607 pthread_mutex_init(&stream
->lock
, NULL
);
608 pthread_mutex_init(&stream
->metadata_timer_lock
, NULL
);
610 /* If channel is the metadata, flag this stream as metadata. */
611 if (type
== CONSUMER_CHANNEL_TYPE_METADATA
) {
612 stream
->metadata_flag
= 1;
613 /* Metadata is flat out. */
614 strncpy(stream
->name
, DEFAULT_METADATA_NAME
, sizeof(stream
->name
));
615 /* Live rendez-vous point. */
616 pthread_cond_init(&stream
->metadata_rdv
, NULL
);
617 pthread_mutex_init(&stream
->metadata_rdv_lock
, NULL
);
619 /* Format stream name to <channel_name>_<cpu_number> */
620 ret
= snprintf(stream
->name
, sizeof(stream
->name
), "%s_%d",
623 PERROR("snprintf stream name");
628 /* Key is always the wait_fd for streams. */
629 lttng_ht_node_init_u64(&stream
->node
, stream
->key
);
631 /* Init node per channel id key */
632 lttng_ht_node_init_u64(&stream
->node_channel_id
, channel_key
);
634 /* Init session id node with the stream session id */
635 lttng_ht_node_init_u64(&stream
->node_session_id
, stream
->session_id
);
637 DBG3("Allocated stream %s (key %" PRIu64
", chan_key %" PRIu64
638 " relayd_id %" PRIu64
", session_id %" PRIu64
,
639 stream
->name
, stream
->key
, channel_key
,
640 stream
->net_seq_idx
, stream
->session_id
);
647 lttng_trace_chunk_put(stream
->trace_chunk
);
657 * Add a stream to the global list protected by a mutex.
659 void consumer_add_data_stream(struct lttng_consumer_stream
*stream
)
661 struct lttng_ht
*ht
= data_ht
;
666 DBG3("Adding consumer stream %" PRIu64
, stream
->key
);
668 pthread_mutex_lock(&consumer_data
.lock
);
669 pthread_mutex_lock(&stream
->chan
->lock
);
670 pthread_mutex_lock(&stream
->chan
->timer_lock
);
671 pthread_mutex_lock(&stream
->lock
);
674 /* Steal stream identifier to avoid having streams with the same key */
675 steal_stream_key(stream
->key
, ht
);
677 lttng_ht_add_unique_u64(ht
, &stream
->node
);
679 lttng_ht_add_u64(consumer_data
.stream_per_chan_id_ht
,
680 &stream
->node_channel_id
);
683 * Add stream to the stream_list_ht of the consumer data. No need to steal
684 * the key since the HT does not use it and we allow to add redundant keys
687 lttng_ht_add_u64(consumer_data
.stream_list_ht
, &stream
->node_session_id
);
690 * When nb_init_stream_left reaches 0, we don't need to trigger any action
691 * in terms of destroying the associated channel, because the action that
692 * causes the count to become 0 also causes a stream to be added. The
693 * channel deletion will thus be triggered by the following removal of this
696 if (uatomic_read(&stream
->chan
->nb_init_stream_left
) > 0) {
697 /* Increment refcount before decrementing nb_init_stream_left */
699 uatomic_dec(&stream
->chan
->nb_init_stream_left
);
702 /* Update consumer data once the node is inserted. */
703 consumer_data
.stream_count
++;
704 consumer_data
.need_update
= 1;
707 pthread_mutex_unlock(&stream
->lock
);
708 pthread_mutex_unlock(&stream
->chan
->timer_lock
);
709 pthread_mutex_unlock(&stream
->chan
->lock
);
710 pthread_mutex_unlock(&consumer_data
.lock
);
714 * Add relayd socket to global consumer data hashtable. RCU read side lock MUST
715 * be acquired before calling this.
717 static int add_relayd(struct consumer_relayd_sock_pair
*relayd
)
720 struct lttng_ht_node_u64
*node
;
721 struct lttng_ht_iter iter
;
725 lttng_ht_lookup(consumer_data
.relayd_ht
,
726 &relayd
->net_seq_idx
, &iter
);
727 node
= lttng_ht_iter_get_node_u64(&iter
);
731 lttng_ht_add_unique_u64(consumer_data
.relayd_ht
, &relayd
->node
);
738 * Allocate and return a consumer relayd socket.
740 static struct consumer_relayd_sock_pair
*consumer_allocate_relayd_sock_pair(
741 uint64_t net_seq_idx
)
743 struct consumer_relayd_sock_pair
*obj
= NULL
;
745 /* net sequence index of -1 is a failure */
746 if (net_seq_idx
== (uint64_t) -1ULL) {
750 obj
= zmalloc(sizeof(struct consumer_relayd_sock_pair
));
752 PERROR("zmalloc relayd sock");
756 obj
->net_seq_idx
= net_seq_idx
;
758 obj
->destroy_flag
= 0;
759 obj
->control_sock
.sock
.fd
= -1;
760 obj
->data_sock
.sock
.fd
= -1;
761 lttng_ht_node_init_u64(&obj
->node
, obj
->net_seq_idx
);
762 pthread_mutex_init(&obj
->ctrl_sock_mutex
, NULL
);
769 * Find a relayd socket pair in the global consumer data.
771 * Return the object if found else NULL.
772 * RCU read-side lock must be held across this call and while using the
775 struct consumer_relayd_sock_pair
*consumer_find_relayd(uint64_t key
)
777 struct lttng_ht_iter iter
;
778 struct lttng_ht_node_u64
*node
;
779 struct consumer_relayd_sock_pair
*relayd
= NULL
;
781 /* Negative keys are lookup failures */
782 if (key
== (uint64_t) -1ULL) {
786 lttng_ht_lookup(consumer_data
.relayd_ht
, &key
,
788 node
= lttng_ht_iter_get_node_u64(&iter
);
790 relayd
= caa_container_of(node
, struct consumer_relayd_sock_pair
, node
);
798 * Find a relayd and send the stream
800 * Returns 0 on success, < 0 on error
802 int consumer_send_relayd_stream(struct lttng_consumer_stream
*stream
,
806 struct consumer_relayd_sock_pair
*relayd
;
809 assert(stream
->net_seq_idx
!= -1ULL);
812 /* The stream is not metadata. Get relayd reference if exists. */
814 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
815 if (relayd
!= NULL
) {
816 /* Add stream on the relayd */
817 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
818 ret
= relayd_add_stream(&relayd
->control_sock
, stream
->name
,
819 path
, &stream
->relayd_stream_id
,
820 stream
->chan
->tracefile_size
,
821 stream
->chan
->tracefile_count
,
822 stream
->trace_chunk
);
823 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
825 ERR("Relayd add stream failed. Cleaning up relayd %" PRIu64
".", relayd
->net_seq_idx
);
826 lttng_consumer_cleanup_relayd(relayd
);
830 uatomic_inc(&relayd
->refcount
);
831 stream
->sent_to_relayd
= 1;
833 ERR("Stream %" PRIu64
" relayd ID %" PRIu64
" unknown. Can't send it.",
834 stream
->key
, stream
->net_seq_idx
);
839 DBG("Stream %s with key %" PRIu64
" sent to relayd id %" PRIu64
,
840 stream
->name
, stream
->key
, stream
->net_seq_idx
);
848 * Find a relayd and send the streams sent message
850 * Returns 0 on success, < 0 on error
852 int consumer_send_relayd_streams_sent(uint64_t net_seq_idx
)
855 struct consumer_relayd_sock_pair
*relayd
;
857 assert(net_seq_idx
!= -1ULL);
859 /* The stream is not metadata. Get relayd reference if exists. */
861 relayd
= consumer_find_relayd(net_seq_idx
);
862 if (relayd
!= NULL
) {
863 /* Add stream on the relayd */
864 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
865 ret
= relayd_streams_sent(&relayd
->control_sock
);
866 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
868 ERR("Relayd streams sent failed. Cleaning up relayd %" PRIu64
".", relayd
->net_seq_idx
);
869 lttng_consumer_cleanup_relayd(relayd
);
873 ERR("Relayd ID %" PRIu64
" unknown. Can't send streams_sent.",
880 DBG("All streams sent relayd id %" PRIu64
, net_seq_idx
);
888 * Find a relayd and close the stream
890 void close_relayd_stream(struct lttng_consumer_stream
*stream
)
892 struct consumer_relayd_sock_pair
*relayd
;
894 /* The stream is not metadata. Get relayd reference if exists. */
896 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
898 consumer_stream_relayd_close(stream
, relayd
);
904 * Handle stream for relayd transmission if the stream applies for network
905 * streaming where the net sequence index is set.
907 * Return destination file descriptor or negative value on error.
909 static int write_relayd_stream_header(struct lttng_consumer_stream
*stream
,
910 size_t data_size
, unsigned long padding
,
911 struct consumer_relayd_sock_pair
*relayd
)
914 struct lttcomm_relayd_data_hdr data_hdr
;
920 /* Reset data header */
921 memset(&data_hdr
, 0, sizeof(data_hdr
));
923 if (stream
->metadata_flag
) {
924 /* Caller MUST acquire the relayd control socket lock */
925 ret
= relayd_send_metadata(&relayd
->control_sock
, data_size
);
930 /* Metadata are always sent on the control socket. */
931 outfd
= relayd
->control_sock
.sock
.fd
;
933 /* Set header with stream information */
934 data_hdr
.stream_id
= htobe64(stream
->relayd_stream_id
);
935 data_hdr
.data_size
= htobe32(data_size
);
936 data_hdr
.padding_size
= htobe32(padding
);
939 * Note that net_seq_num below is assigned with the *current* value of
940 * next_net_seq_num and only after that the next_net_seq_num will be
941 * increment. This is why when issuing a command on the relayd using
942 * this next value, 1 should always be substracted in order to compare
943 * the last seen sequence number on the relayd side to the last sent.
945 data_hdr
.net_seq_num
= htobe64(stream
->next_net_seq_num
);
946 /* Other fields are zeroed previously */
948 ret
= relayd_send_data_hdr(&relayd
->data_sock
, &data_hdr
,
954 ++stream
->next_net_seq_num
;
956 /* Set to go on data socket */
957 outfd
= relayd
->data_sock
.sock
.fd
;
965 * Trigger a dump of the metadata content. Following/during the succesful
966 * completion of this call, the metadata poll thread will start receiving
967 * metadata packets to consume.
969 * The caller must hold the channel and stream locks.
972 int consumer_metadata_stream_dump(struct lttng_consumer_stream
*stream
)
976 ASSERT_LOCKED(stream
->chan
->lock
);
977 ASSERT_LOCKED(stream
->lock
);
978 assert(stream
->metadata_flag
);
979 assert(stream
->chan
->trace_chunk
);
981 switch (consumer_data
.type
) {
982 case LTTNG_CONSUMER_KERNEL
:
984 * Reset the position of what has been read from the
985 * metadata cache to 0 so we can dump it again.
987 ret
= kernctl_metadata_cache_dump(stream
->wait_fd
);
989 case LTTNG_CONSUMER32_UST
:
990 case LTTNG_CONSUMER64_UST
:
992 * Reset the position pushed from the metadata cache so it
993 * will write from the beginning on the next push.
995 stream
->ust_metadata_pushed
= 0;
996 ret
= consumer_metadata_wakeup_pipe(stream
->chan
);
999 ERR("Unknown consumer_data type");
1003 ERR("Failed to dump the metadata cache");
1009 int lttng_consumer_channel_set_trace_chunk(
1010 struct lttng_consumer_channel
*channel
,
1011 struct lttng_trace_chunk
*new_trace_chunk
)
1013 pthread_mutex_lock(&channel
->lock
);
1014 if (channel
->is_deleted
) {
1016 * The channel has been logically deleted and should no longer
1017 * be used. It has released its reference to its current trace
1018 * chunk and should not acquire a new one.
1020 * Return success as there is nothing for the caller to do.
1026 * The acquisition of the reference cannot fail (barring
1027 * a severe internal error) since a reference to the published
1028 * chunk is already held by the caller.
1030 if (new_trace_chunk
) {
1031 const bool acquired_reference
= lttng_trace_chunk_get(
1034 assert(acquired_reference
);
1037 lttng_trace_chunk_put(channel
->trace_chunk
);
1038 channel
->trace_chunk
= new_trace_chunk
;
1040 pthread_mutex_unlock(&channel
->lock
);
1045 * Allocate and return a new lttng_consumer_channel object using the given key
1046 * to initialize the hash table node.
1048 * On error, return NULL.
1050 struct lttng_consumer_channel
*consumer_allocate_channel(uint64_t key
,
1051 uint64_t session_id
,
1052 const uint64_t *chunk_id
,
1053 const char *pathname
,
1056 enum lttng_event_output output
,
1057 uint64_t tracefile_size
,
1058 uint64_t tracefile_count
,
1059 uint64_t session_id_per_pid
,
1060 unsigned int monitor
,
1061 unsigned int live_timer_interval
,
1062 const char *root_shm_path
,
1063 const char *shm_path
)
1065 struct lttng_consumer_channel
*channel
= NULL
;
1066 struct lttng_trace_chunk
*trace_chunk
= NULL
;
1069 trace_chunk
= lttng_trace_chunk_registry_find_chunk(
1070 consumer_data
.chunk_registry
, session_id
,
1073 ERR("Failed to find trace chunk reference during creation of channel");
1078 channel
= zmalloc(sizeof(*channel
));
1079 if (channel
== NULL
) {
1080 PERROR("malloc struct lttng_consumer_channel");
1085 channel
->refcount
= 0;
1086 channel
->session_id
= session_id
;
1087 channel
->session_id_per_pid
= session_id_per_pid
;
1088 channel
->relayd_id
= relayd_id
;
1089 channel
->tracefile_size
= tracefile_size
;
1090 channel
->tracefile_count
= tracefile_count
;
1091 channel
->monitor
= monitor
;
1092 channel
->live_timer_interval
= live_timer_interval
;
1093 pthread_mutex_init(&channel
->lock
, NULL
);
1094 pthread_mutex_init(&channel
->timer_lock
, NULL
);
1097 case LTTNG_EVENT_SPLICE
:
1098 channel
->output
= CONSUMER_CHANNEL_SPLICE
;
1100 case LTTNG_EVENT_MMAP
:
1101 channel
->output
= CONSUMER_CHANNEL_MMAP
;
1111 * In monitor mode, the streams associated with the channel will be put in
1112 * a special list ONLY owned by this channel. So, the refcount is set to 1
1113 * here meaning that the channel itself has streams that are referenced.
1115 * On a channel deletion, once the channel is no longer visible, the
1116 * refcount is decremented and checked for a zero value to delete it. With
1117 * streams in no monitor mode, it will now be safe to destroy the channel.
1119 if (!channel
->monitor
) {
1120 channel
->refcount
= 1;
1123 strncpy(channel
->pathname
, pathname
, sizeof(channel
->pathname
));
1124 channel
->pathname
[sizeof(channel
->pathname
) - 1] = '\0';
1126 strncpy(channel
->name
, name
, sizeof(channel
->name
));
1127 channel
->name
[sizeof(channel
->name
) - 1] = '\0';
1129 if (root_shm_path
) {
1130 strncpy(channel
->root_shm_path
, root_shm_path
, sizeof(channel
->root_shm_path
));
1131 channel
->root_shm_path
[sizeof(channel
->root_shm_path
) - 1] = '\0';
1134 strncpy(channel
->shm_path
, shm_path
, sizeof(channel
->shm_path
));
1135 channel
->shm_path
[sizeof(channel
->shm_path
) - 1] = '\0';
1138 lttng_ht_node_init_u64(&channel
->node
, channel
->key
);
1139 lttng_ht_node_init_u64(&channel
->channels_by_session_id_ht_node
,
1140 channel
->session_id
);
1142 channel
->wait_fd
= -1;
1143 CDS_INIT_LIST_HEAD(&channel
->streams
.head
);
1146 int ret
= lttng_consumer_channel_set_trace_chunk(channel
,
1153 DBG("Allocated channel (key %" PRIu64
")", channel
->key
);
1156 lttng_trace_chunk_put(trace_chunk
);
1159 consumer_del_channel(channel
);
1165 * Add a channel to the global list protected by a mutex.
1167 * Always return 0 indicating success.
1169 int consumer_add_channel(struct lttng_consumer_channel
*channel
,
1170 struct lttng_consumer_local_data
*ctx
)
1172 pthread_mutex_lock(&consumer_data
.lock
);
1173 pthread_mutex_lock(&channel
->lock
);
1174 pthread_mutex_lock(&channel
->timer_lock
);
1177 * This gives us a guarantee that the channel we are about to add to the
1178 * channel hash table will be unique. See this function comment on the why
1179 * we need to steel the channel key at this stage.
1181 steal_channel_key(channel
->key
);
1184 lttng_ht_add_unique_u64(consumer_data
.channel_ht
, &channel
->node
);
1185 lttng_ht_add_u64(consumer_data
.channels_by_session_id_ht
,
1186 &channel
->channels_by_session_id_ht_node
);
1188 channel
->is_published
= true;
1190 pthread_mutex_unlock(&channel
->timer_lock
);
1191 pthread_mutex_unlock(&channel
->lock
);
1192 pthread_mutex_unlock(&consumer_data
.lock
);
1194 if (channel
->wait_fd
!= -1 && channel
->type
== CONSUMER_CHANNEL_TYPE_DATA
) {
1195 notify_channel_pipe(ctx
, channel
, -1, CONSUMER_CHANNEL_ADD
);
1202 * Allocate the pollfd structure and the local view of the out fds to avoid
1203 * doing a lookup in the linked list and concurrency issues when writing is
1204 * needed. Called with consumer_data.lock held.
1206 * Returns the number of fds in the structures.
1208 static int update_poll_array(struct lttng_consumer_local_data
*ctx
,
1209 struct pollfd
**pollfd
, struct lttng_consumer_stream
**local_stream
,
1210 struct lttng_ht
*ht
, int *nb_inactive_fd
)
1213 struct lttng_ht_iter iter
;
1214 struct lttng_consumer_stream
*stream
;
1219 assert(local_stream
);
1221 DBG("Updating poll fd array");
1222 *nb_inactive_fd
= 0;
1224 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, stream
, node
.node
) {
1226 * Only active streams with an active end point can be added to the
1227 * poll set and local stream storage of the thread.
1229 * There is a potential race here for endpoint_status to be updated
1230 * just after the check. However, this is OK since the stream(s) will
1231 * be deleted once the thread is notified that the end point state has
1232 * changed where this function will be called back again.
1234 * We track the number of inactive FDs because they still need to be
1235 * closed by the polling thread after a wakeup on the data_pipe or
1238 if (stream
->endpoint_status
== CONSUMER_ENDPOINT_INACTIVE
) {
1239 (*nb_inactive_fd
)++;
1243 * This clobbers way too much the debug output. Uncomment that if you
1244 * need it for debugging purposes.
1246 (*pollfd
)[i
].fd
= stream
->wait_fd
;
1247 (*pollfd
)[i
].events
= POLLIN
| POLLPRI
;
1248 local_stream
[i
] = stream
;
1254 * Insert the consumer_data_pipe at the end of the array and don't
1255 * increment i so nb_fd is the number of real FD.
1257 (*pollfd
)[i
].fd
= lttng_pipe_get_readfd(ctx
->consumer_data_pipe
);
1258 (*pollfd
)[i
].events
= POLLIN
| POLLPRI
;
1260 (*pollfd
)[i
+ 1].fd
= lttng_pipe_get_readfd(ctx
->consumer_wakeup_pipe
);
1261 (*pollfd
)[i
+ 1].events
= POLLIN
| POLLPRI
;
1266 * Poll on the should_quit pipe and the command socket return -1 on
1267 * error, 1 if should exit, 0 if data is available on the command socket
1269 int lttng_consumer_poll_socket(struct pollfd
*consumer_sockpoll
)
1274 num_rdy
= poll(consumer_sockpoll
, 2, -1);
1275 if (num_rdy
== -1) {
1277 * Restart interrupted system call.
1279 if (errno
== EINTR
) {
1282 PERROR("Poll error");
1285 if (consumer_sockpoll
[0].revents
& (POLLIN
| POLLPRI
)) {
1286 DBG("consumer_should_quit wake up");
1293 * Set the error socket.
1295 void lttng_consumer_set_error_sock(struct lttng_consumer_local_data
*ctx
,
1298 ctx
->consumer_error_socket
= sock
;
1302 * Set the command socket path.
1304 void lttng_consumer_set_command_sock_path(
1305 struct lttng_consumer_local_data
*ctx
, char *sock
)
1307 ctx
->consumer_command_sock_path
= sock
;
1311 * Send return code to the session daemon.
1312 * If the socket is not defined, we return 0, it is not a fatal error
1314 int lttng_consumer_send_error(struct lttng_consumer_local_data
*ctx
, int cmd
)
1316 if (ctx
->consumer_error_socket
> 0) {
1317 return lttcomm_send_unix_sock(ctx
->consumer_error_socket
, &cmd
,
1318 sizeof(enum lttcomm_sessiond_command
));
1325 * Close all the tracefiles and stream fds and MUST be called when all
1326 * instances are destroyed i.e. when all threads were joined and are ended.
1328 void lttng_consumer_cleanup(void)
1330 struct lttng_ht_iter iter
;
1331 struct lttng_consumer_channel
*channel
;
1332 unsigned int trace_chunks_left
;
1336 cds_lfht_for_each_entry(consumer_data
.channel_ht
->ht
, &iter
.iter
, channel
,
1338 consumer_del_channel(channel
);
1343 lttng_ht_destroy(consumer_data
.channel_ht
);
1344 lttng_ht_destroy(consumer_data
.channels_by_session_id_ht
);
1346 cleanup_relayd_ht();
1348 lttng_ht_destroy(consumer_data
.stream_per_chan_id_ht
);
1351 * This HT contains streams that are freed by either the metadata thread or
1352 * the data thread so we do *nothing* on the hash table and simply destroy
1355 lttng_ht_destroy(consumer_data
.stream_list_ht
);
1358 * Trace chunks in the registry may still exist if the session
1359 * daemon has encountered an internal error and could not
1360 * tear down its sessions and/or trace chunks properly.
1362 * Release the session daemon's implicit reference to any remaining
1363 * trace chunk and print an error if any trace chunk was found. Note
1364 * that there are _no_ legitimate cases for trace chunks to be left,
1365 * it is a leak. However, it can happen following a crash of the
1366 * session daemon and not emptying the registry would cause an assertion
1369 trace_chunks_left
= lttng_trace_chunk_registry_put_each_chunk(
1370 consumer_data
.chunk_registry
);
1371 if (trace_chunks_left
) {
1372 ERR("%u trace chunks are leaked by lttng-consumerd. "
1373 "This can be caused by an internal error of the session daemon.",
1376 /* Run all callbacks freeing each chunk. */
1378 lttng_trace_chunk_registry_destroy(consumer_data
.chunk_registry
);
1382 * Called from signal handler.
1384 void lttng_consumer_should_exit(struct lttng_consumer_local_data
*ctx
)
1388 CMM_STORE_SHARED(consumer_quit
, 1);
1389 ret
= lttng_write(ctx
->consumer_should_quit
[1], "4", 1);
1391 PERROR("write consumer quit");
1394 DBG("Consumer flag that it should quit");
1399 * Flush pending writes to trace output disk file.
1402 void lttng_consumer_sync_trace_file(struct lttng_consumer_stream
*stream
,
1406 int outfd
= stream
->out_fd
;
1409 * This does a blocking write-and-wait on any page that belongs to the
1410 * subbuffer prior to the one we just wrote.
1411 * Don't care about error values, as these are just hints and ways to
1412 * limit the amount of page cache used.
1414 if (orig_offset
< stream
->max_sb_size
) {
1417 lttng_sync_file_range(outfd
, orig_offset
- stream
->max_sb_size
,
1418 stream
->max_sb_size
,
1419 SYNC_FILE_RANGE_WAIT_BEFORE
1420 | SYNC_FILE_RANGE_WRITE
1421 | SYNC_FILE_RANGE_WAIT_AFTER
);
1423 * Give hints to the kernel about how we access the file:
1424 * POSIX_FADV_DONTNEED : we won't re-access data in a near future after
1427 * We need to call fadvise again after the file grows because the
1428 * kernel does not seem to apply fadvise to non-existing parts of the
1431 * Call fadvise _after_ having waited for the page writeback to
1432 * complete because the dirty page writeback semantic is not well
1433 * defined. So it can be expected to lead to lower throughput in
1436 ret
= posix_fadvise(outfd
, orig_offset
- stream
->max_sb_size
,
1437 stream
->max_sb_size
, POSIX_FADV_DONTNEED
);
1438 if (ret
&& ret
!= -ENOSYS
) {
1440 PERROR("posix_fadvise on fd %i", outfd
);
1445 * Initialise the necessary environnement :
1446 * - create a new context
1447 * - create the poll_pipe
1448 * - create the should_quit pipe (for signal handler)
1449 * - create the thread pipe (for splice)
1451 * Takes a function pointer as argument, this function is called when data is
1452 * available on a buffer. This function is responsible to do the
1453 * kernctl_get_next_subbuf, read the data with mmap or splice depending on the
1454 * buffer configuration and then kernctl_put_next_subbuf at the end.
1456 * Returns a pointer to the new context or NULL on error.
1458 struct lttng_consumer_local_data
*lttng_consumer_create(
1459 enum lttng_consumer_type type
,
1460 ssize_t (*buffer_ready
)(struct lttng_consumer_stream
*stream
,
1461 struct lttng_consumer_local_data
*ctx
),
1462 int (*recv_channel
)(struct lttng_consumer_channel
*channel
),
1463 int (*recv_stream
)(struct lttng_consumer_stream
*stream
),
1464 int (*update_stream
)(uint64_t stream_key
, uint32_t state
))
1467 struct lttng_consumer_local_data
*ctx
;
1469 assert(consumer_data
.type
== LTTNG_CONSUMER_UNKNOWN
||
1470 consumer_data
.type
== type
);
1471 consumer_data
.type
= type
;
1473 ctx
= zmalloc(sizeof(struct lttng_consumer_local_data
));
1475 PERROR("allocating context");
1479 ctx
->consumer_error_socket
= -1;
1480 ctx
->consumer_metadata_socket
= -1;
1481 pthread_mutex_init(&ctx
->metadata_socket_lock
, NULL
);
1482 /* assign the callbacks */
1483 ctx
->on_buffer_ready
= buffer_ready
;
1484 ctx
->on_recv_channel
= recv_channel
;
1485 ctx
->on_recv_stream
= recv_stream
;
1486 ctx
->on_update_stream
= update_stream
;
1488 ctx
->consumer_data_pipe
= lttng_pipe_open(0);
1489 if (!ctx
->consumer_data_pipe
) {
1490 goto error_poll_pipe
;
1493 ctx
->consumer_wakeup_pipe
= lttng_pipe_open(0);
1494 if (!ctx
->consumer_wakeup_pipe
) {
1495 goto error_wakeup_pipe
;
1498 ret
= pipe(ctx
->consumer_should_quit
);
1500 PERROR("Error creating recv pipe");
1501 goto error_quit_pipe
;
1504 ret
= pipe(ctx
->consumer_channel_pipe
);
1506 PERROR("Error creating channel pipe");
1507 goto error_channel_pipe
;
1510 ctx
->consumer_metadata_pipe
= lttng_pipe_open(0);
1511 if (!ctx
->consumer_metadata_pipe
) {
1512 goto error_metadata_pipe
;
1515 ctx
->channel_monitor_pipe
= -1;
1519 error_metadata_pipe
:
1520 utils_close_pipe(ctx
->consumer_channel_pipe
);
1522 utils_close_pipe(ctx
->consumer_should_quit
);
1524 lttng_pipe_destroy(ctx
->consumer_wakeup_pipe
);
1526 lttng_pipe_destroy(ctx
->consumer_data_pipe
);
1534 * Iterate over all streams of the hashtable and free them properly.
1536 static void destroy_data_stream_ht(struct lttng_ht
*ht
)
1538 struct lttng_ht_iter iter
;
1539 struct lttng_consumer_stream
*stream
;
1546 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, stream
, node
.node
) {
1548 * Ignore return value since we are currently cleaning up so any error
1551 (void) consumer_del_stream(stream
, ht
);
1555 lttng_ht_destroy(ht
);
1559 * Iterate over all streams of the metadata hashtable and free them
1562 static void destroy_metadata_stream_ht(struct lttng_ht
*ht
)
1564 struct lttng_ht_iter iter
;
1565 struct lttng_consumer_stream
*stream
;
1572 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, stream
, node
.node
) {
1574 * Ignore return value since we are currently cleaning up so any error
1577 (void) consumer_del_metadata_stream(stream
, ht
);
1581 lttng_ht_destroy(ht
);
1585 * Close all fds associated with the instance and free the context.
1587 void lttng_consumer_destroy(struct lttng_consumer_local_data
*ctx
)
1591 DBG("Consumer destroying it. Closing everything.");
1597 destroy_data_stream_ht(data_ht
);
1598 destroy_metadata_stream_ht(metadata_ht
);
1600 ret
= close(ctx
->consumer_error_socket
);
1604 ret
= close(ctx
->consumer_metadata_socket
);
1608 utils_close_pipe(ctx
->consumer_channel_pipe
);
1609 lttng_pipe_destroy(ctx
->consumer_data_pipe
);
1610 lttng_pipe_destroy(ctx
->consumer_metadata_pipe
);
1611 lttng_pipe_destroy(ctx
->consumer_wakeup_pipe
);
1612 utils_close_pipe(ctx
->consumer_should_quit
);
1614 unlink(ctx
->consumer_command_sock_path
);
1619 * Write the metadata stream id on the specified file descriptor.
1621 static int write_relayd_metadata_id(int fd
,
1622 struct lttng_consumer_stream
*stream
,
1623 unsigned long padding
)
1626 struct lttcomm_relayd_metadata_payload hdr
;
1628 hdr
.stream_id
= htobe64(stream
->relayd_stream_id
);
1629 hdr
.padding_size
= htobe32(padding
);
1630 ret
= lttng_write(fd
, (void *) &hdr
, sizeof(hdr
));
1631 if (ret
< sizeof(hdr
)) {
1633 * This error means that the fd's end is closed so ignore the PERROR
1634 * not to clubber the error output since this can happen in a normal
1637 if (errno
!= EPIPE
) {
1638 PERROR("write metadata stream id");
1640 DBG3("Consumer failed to write relayd metadata id (errno: %d)", errno
);
1642 * Set ret to a negative value because if ret != sizeof(hdr), we don't
1643 * handle writting the missing part so report that as an error and
1644 * don't lie to the caller.
1649 DBG("Metadata stream id %" PRIu64
" with padding %lu written before data",
1650 stream
->relayd_stream_id
, padding
);
1657 * Mmap the ring buffer, read it and write the data to the tracefile. This is a
1658 * core function for writing trace buffers to either the local filesystem or
1661 * It must be called with the stream and the channel lock held.
1663 * Careful review MUST be put if any changes occur!
1665 * Returns the number of bytes written
1667 ssize_t
lttng_consumer_on_read_subbuffer_mmap(
1668 struct lttng_consumer_local_data
*ctx
,
1669 struct lttng_consumer_stream
*stream
, unsigned long len
,
1670 unsigned long padding
,
1671 struct ctf_packet_index
*index
)
1673 unsigned long mmap_offset
;
1676 off_t orig_offset
= stream
->out_fd_offset
;
1677 /* Default is on the disk */
1678 int outfd
= stream
->out_fd
;
1679 struct consumer_relayd_sock_pair
*relayd
= NULL
;
1680 unsigned int relayd_hang_up
= 0;
1682 /* RCU lock for the relayd pointer */
1684 assert(stream
->net_seq_idx
!= (uint64_t) -1ULL ||
1685 stream
->trace_chunk
);
1687 /* Flag that the current stream if set for network streaming. */
1688 if (stream
->net_seq_idx
!= (uint64_t) -1ULL) {
1689 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
1690 if (relayd
== NULL
) {
1696 /* get the offset inside the fd to mmap */
1697 switch (consumer_data
.type
) {
1698 case LTTNG_CONSUMER_KERNEL
:
1699 mmap_base
= stream
->mmap_base
;
1700 ret
= kernctl_get_mmap_read_offset(stream
->wait_fd
, &mmap_offset
);
1702 PERROR("tracer ctl get_mmap_read_offset");
1706 case LTTNG_CONSUMER32_UST
:
1707 case LTTNG_CONSUMER64_UST
:
1708 mmap_base
= lttng_ustctl_get_mmap_base(stream
);
1710 ERR("read mmap get mmap base for stream %s", stream
->name
);
1714 ret
= lttng_ustctl_get_mmap_read_offset(stream
, &mmap_offset
);
1716 PERROR("tracer ctl get_mmap_read_offset");
1722 ERR("Unknown consumer_data type");
1726 /* Handle stream on the relayd if the output is on the network */
1728 unsigned long netlen
= len
;
1731 * Lock the control socket for the complete duration of the function
1732 * since from this point on we will use the socket.
1734 if (stream
->metadata_flag
) {
1735 /* Metadata requires the control socket. */
1736 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
1737 if (stream
->reset_metadata_flag
) {
1738 ret
= relayd_reset_metadata(&relayd
->control_sock
,
1739 stream
->relayd_stream_id
,
1740 stream
->metadata_version
);
1745 stream
->reset_metadata_flag
= 0;
1747 netlen
+= sizeof(struct lttcomm_relayd_metadata_payload
);
1750 ret
= write_relayd_stream_header(stream
, netlen
, padding
, relayd
);
1755 /* Use the returned socket. */
1758 /* Write metadata stream id before payload */
1759 if (stream
->metadata_flag
) {
1760 ret
= write_relayd_metadata_id(outfd
, stream
, padding
);
1767 /* No streaming, we have to set the len with the full padding */
1770 if (stream
->metadata_flag
&& stream
->reset_metadata_flag
) {
1771 ret
= utils_truncate_stream_file(stream
->out_fd
, 0);
1773 ERR("Reset metadata file");
1776 stream
->reset_metadata_flag
= 0;
1780 * Check if we need to change the tracefile before writing the packet.
1782 if (stream
->chan
->tracefile_size
> 0 &&
1783 (stream
->tracefile_size_current
+ len
) >
1784 stream
->chan
->tracefile_size
) {
1785 ret
= consumer_stream_rotate_output_files(stream
);
1789 outfd
= stream
->out_fd
;
1792 stream
->tracefile_size_current
+= len
;
1794 index
->offset
= htobe64(stream
->out_fd_offset
);
1799 * This call guarantee that len or less is returned. It's impossible to
1800 * receive a ret value that is bigger than len.
1802 ret
= lttng_write(outfd
, mmap_base
+ mmap_offset
, len
);
1803 DBG("Consumer mmap write() ret %zd (len %lu)", ret
, len
);
1804 if (ret
< 0 || ((size_t) ret
!= len
)) {
1806 * Report error to caller if nothing was written else at least send the
1814 /* Socket operation failed. We consider the relayd dead */
1815 if (errno
== EPIPE
) {
1817 * This is possible if the fd is closed on the other side
1818 * (outfd) or any write problem. It can be verbose a bit for a
1819 * normal execution if for instance the relayd is stopped
1820 * abruptly. This can happen so set this to a DBG statement.
1822 DBG("Consumer mmap write detected relayd hang up");
1824 /* Unhandled error, print it and stop function right now. */
1825 PERROR("Error in write mmap (ret %zd != len %lu)", ret
, len
);
1829 stream
->output_written
+= ret
;
1831 /* This call is useless on a socket so better save a syscall. */
1833 /* This won't block, but will start writeout asynchronously */
1834 lttng_sync_file_range(outfd
, stream
->out_fd_offset
, len
,
1835 SYNC_FILE_RANGE_WRITE
);
1836 stream
->out_fd_offset
+= len
;
1837 lttng_consumer_sync_trace_file(stream
, orig_offset
);
1842 * This is a special case that the relayd has closed its socket. Let's
1843 * cleanup the relayd object and all associated streams.
1845 if (relayd
&& relayd_hang_up
) {
1846 ERR("Relayd hangup. Cleaning up relayd %" PRIu64
".", relayd
->net_seq_idx
);
1847 lttng_consumer_cleanup_relayd(relayd
);
1851 /* Unlock only if ctrl socket used */
1852 if (relayd
&& stream
->metadata_flag
) {
1853 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
1861 * Splice the data from the ring buffer to the tracefile.
1863 * It must be called with the stream lock held.
1865 * Returns the number of bytes spliced.
1867 ssize_t
lttng_consumer_on_read_subbuffer_splice(
1868 struct lttng_consumer_local_data
*ctx
,
1869 struct lttng_consumer_stream
*stream
, unsigned long len
,
1870 unsigned long padding
,
1871 struct ctf_packet_index
*index
)
1873 ssize_t ret
= 0, written
= 0, ret_splice
= 0;
1875 off_t orig_offset
= stream
->out_fd_offset
;
1876 int fd
= stream
->wait_fd
;
1877 /* Default is on the disk */
1878 int outfd
= stream
->out_fd
;
1879 struct consumer_relayd_sock_pair
*relayd
= NULL
;
1881 unsigned int relayd_hang_up
= 0;
1883 switch (consumer_data
.type
) {
1884 case LTTNG_CONSUMER_KERNEL
:
1886 case LTTNG_CONSUMER32_UST
:
1887 case LTTNG_CONSUMER64_UST
:
1888 /* Not supported for user space tracing */
1891 ERR("Unknown consumer_data type");
1895 /* RCU lock for the relayd pointer */
1898 /* Flag that the current stream if set for network streaming. */
1899 if (stream
->net_seq_idx
!= (uint64_t) -1ULL) {
1900 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
1901 if (relayd
== NULL
) {
1906 splice_pipe
= stream
->splice_pipe
;
1908 /* Write metadata stream id before payload */
1910 unsigned long total_len
= len
;
1912 if (stream
->metadata_flag
) {
1914 * Lock the control socket for the complete duration of the function
1915 * since from this point on we will use the socket.
1917 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
1919 if (stream
->reset_metadata_flag
) {
1920 ret
= relayd_reset_metadata(&relayd
->control_sock
,
1921 stream
->relayd_stream_id
,
1922 stream
->metadata_version
);
1927 stream
->reset_metadata_flag
= 0;
1929 ret
= write_relayd_metadata_id(splice_pipe
[1], stream
,
1937 total_len
+= sizeof(struct lttcomm_relayd_metadata_payload
);
1940 ret
= write_relayd_stream_header(stream
, total_len
, padding
, relayd
);
1946 /* Use the returned socket. */
1949 /* No streaming, we have to set the len with the full padding */
1952 if (stream
->metadata_flag
&& stream
->reset_metadata_flag
) {
1953 ret
= utils_truncate_stream_file(stream
->out_fd
, 0);
1955 ERR("Reset metadata file");
1958 stream
->reset_metadata_flag
= 0;
1961 * Check if we need to change the tracefile before writing the packet.
1963 if (stream
->chan
->tracefile_size
> 0 &&
1964 (stream
->tracefile_size_current
+ len
) >
1965 stream
->chan
->tracefile_size
) {
1966 ret
= consumer_stream_rotate_output_files(stream
);
1971 outfd
= stream
->out_fd
;
1974 stream
->tracefile_size_current
+= len
;
1975 index
->offset
= htobe64(stream
->out_fd_offset
);
1979 DBG("splice chan to pipe offset %lu of len %lu (fd : %d, pipe: %d)",
1980 (unsigned long)offset
, len
, fd
, splice_pipe
[1]);
1981 ret_splice
= splice(fd
, &offset
, splice_pipe
[1], NULL
, len
,
1982 SPLICE_F_MOVE
| SPLICE_F_MORE
);
1983 DBG("splice chan to pipe, ret %zd", ret_splice
);
1984 if (ret_splice
< 0) {
1987 PERROR("Error in relay splice");
1991 /* Handle stream on the relayd if the output is on the network */
1992 if (relayd
&& stream
->metadata_flag
) {
1993 size_t metadata_payload_size
=
1994 sizeof(struct lttcomm_relayd_metadata_payload
);
1996 /* Update counter to fit the spliced data */
1997 ret_splice
+= metadata_payload_size
;
1998 len
+= metadata_payload_size
;
2000 * We do this so the return value can match the len passed as
2001 * argument to this function.
2003 written
-= metadata_payload_size
;
2006 /* Splice data out */
2007 ret_splice
= splice(splice_pipe
[0], NULL
, outfd
, NULL
,
2008 ret_splice
, SPLICE_F_MOVE
| SPLICE_F_MORE
);
2009 DBG("Consumer splice pipe to file (out_fd: %d), ret %zd",
2011 if (ret_splice
< 0) {
2016 } else if (ret_splice
> len
) {
2018 * We don't expect this code path to be executed but you never know
2019 * so this is an extra protection agains a buggy splice().
2022 written
+= ret_splice
;
2023 PERROR("Wrote more data than requested %zd (len: %lu)", ret_splice
,
2027 /* All good, update current len and continue. */
2031 /* This call is useless on a socket so better save a syscall. */
2033 /* This won't block, but will start writeout asynchronously */
2034 lttng_sync_file_range(outfd
, stream
->out_fd_offset
, ret_splice
,
2035 SYNC_FILE_RANGE_WRITE
);
2036 stream
->out_fd_offset
+= ret_splice
;
2038 stream
->output_written
+= ret_splice
;
2039 written
+= ret_splice
;
2042 lttng_consumer_sync_trace_file(stream
, orig_offset
);
2048 * This is a special case that the relayd has closed its socket. Let's
2049 * cleanup the relayd object and all associated streams.
2051 if (relayd
&& relayd_hang_up
) {
2052 ERR("Relayd hangup. Cleaning up relayd %" PRIu64
".", relayd
->net_seq_idx
);
2053 lttng_consumer_cleanup_relayd(relayd
);
2054 /* Skip splice error so the consumer does not fail */
2059 /* send the appropriate error description to sessiond */
2062 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_SPLICE_EINVAL
);
2065 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_SPLICE_ENOMEM
);
2068 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_SPLICE_ESPIPE
);
2073 if (relayd
&& stream
->metadata_flag
) {
2074 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
2082 * Sample the snapshot positions for a specific fd
2084 * Returns 0 on success, < 0 on error
2086 int lttng_consumer_sample_snapshot_positions(struct lttng_consumer_stream
*stream
)
2088 switch (consumer_data
.type
) {
2089 case LTTNG_CONSUMER_KERNEL
:
2090 return lttng_kconsumer_sample_snapshot_positions(stream
);
2091 case LTTNG_CONSUMER32_UST
:
2092 case LTTNG_CONSUMER64_UST
:
2093 return lttng_ustconsumer_sample_snapshot_positions(stream
);
2095 ERR("Unknown consumer_data type");
2101 * Take a snapshot for a specific fd
2103 * Returns 0 on success, < 0 on error
2105 int lttng_consumer_take_snapshot(struct lttng_consumer_stream
*stream
)
2107 switch (consumer_data
.type
) {
2108 case LTTNG_CONSUMER_KERNEL
:
2109 return lttng_kconsumer_take_snapshot(stream
);
2110 case LTTNG_CONSUMER32_UST
:
2111 case LTTNG_CONSUMER64_UST
:
2112 return lttng_ustconsumer_take_snapshot(stream
);
2114 ERR("Unknown consumer_data type");
2121 * Get the produced position
2123 * Returns 0 on success, < 0 on error
2125 int lttng_consumer_get_produced_snapshot(struct lttng_consumer_stream
*stream
,
2128 switch (consumer_data
.type
) {
2129 case LTTNG_CONSUMER_KERNEL
:
2130 return lttng_kconsumer_get_produced_snapshot(stream
, pos
);
2131 case LTTNG_CONSUMER32_UST
:
2132 case LTTNG_CONSUMER64_UST
:
2133 return lttng_ustconsumer_get_produced_snapshot(stream
, pos
);
2135 ERR("Unknown consumer_data type");
2142 * Get the consumed position (free-running counter position in bytes).
2144 * Returns 0 on success, < 0 on error
2146 int lttng_consumer_get_consumed_snapshot(struct lttng_consumer_stream
*stream
,
2149 switch (consumer_data
.type
) {
2150 case LTTNG_CONSUMER_KERNEL
:
2151 return lttng_kconsumer_get_consumed_snapshot(stream
, pos
);
2152 case LTTNG_CONSUMER32_UST
:
2153 case LTTNG_CONSUMER64_UST
:
2154 return lttng_ustconsumer_get_consumed_snapshot(stream
, pos
);
2156 ERR("Unknown consumer_data type");
2162 int lttng_consumer_recv_cmd(struct lttng_consumer_local_data
*ctx
,
2163 int sock
, struct pollfd
*consumer_sockpoll
)
2165 switch (consumer_data
.type
) {
2166 case LTTNG_CONSUMER_KERNEL
:
2167 return lttng_kconsumer_recv_cmd(ctx
, sock
, consumer_sockpoll
);
2168 case LTTNG_CONSUMER32_UST
:
2169 case LTTNG_CONSUMER64_UST
:
2170 return lttng_ustconsumer_recv_cmd(ctx
, sock
, consumer_sockpoll
);
2172 ERR("Unknown consumer_data type");
2179 void lttng_consumer_close_all_metadata(void)
2181 switch (consumer_data
.type
) {
2182 case LTTNG_CONSUMER_KERNEL
:
2184 * The Kernel consumer has a different metadata scheme so we don't
2185 * close anything because the stream will be closed by the session
2189 case LTTNG_CONSUMER32_UST
:
2190 case LTTNG_CONSUMER64_UST
:
2192 * Close all metadata streams. The metadata hash table is passed and
2193 * this call iterates over it by closing all wakeup fd. This is safe
2194 * because at this point we are sure that the metadata producer is
2195 * either dead or blocked.
2197 lttng_ustconsumer_close_all_metadata(metadata_ht
);
2200 ERR("Unknown consumer_data type");
2206 * Clean up a metadata stream and free its memory.
2208 void consumer_del_metadata_stream(struct lttng_consumer_stream
*stream
,
2209 struct lttng_ht
*ht
)
2211 struct lttng_consumer_channel
*channel
= NULL
;
2212 bool free_channel
= false;
2216 * This call should NEVER receive regular stream. It must always be
2217 * metadata stream and this is crucial for data structure synchronization.
2219 assert(stream
->metadata_flag
);
2221 DBG3("Consumer delete metadata stream %d", stream
->wait_fd
);
2223 pthread_mutex_lock(&consumer_data
.lock
);
2225 * Note that this assumes that a stream's channel is never changed and
2226 * that the stream's lock doesn't need to be taken to sample its
2229 channel
= stream
->chan
;
2230 pthread_mutex_lock(&channel
->lock
);
2231 pthread_mutex_lock(&stream
->lock
);
2232 if (channel
->metadata_cache
) {
2233 /* Only applicable to userspace consumers. */
2234 pthread_mutex_lock(&channel
->metadata_cache
->lock
);
2237 /* Remove any reference to that stream. */
2238 consumer_stream_delete(stream
, ht
);
2240 /* Close down everything including the relayd if one. */
2241 consumer_stream_close(stream
);
2242 /* Destroy tracer buffers of the stream. */
2243 consumer_stream_destroy_buffers(stream
);
2245 /* Atomically decrement channel refcount since other threads can use it. */
2246 if (!uatomic_sub_return(&channel
->refcount
, 1)
2247 && !uatomic_read(&channel
->nb_init_stream_left
)) {
2248 /* Go for channel deletion! */
2249 free_channel
= true;
2251 stream
->chan
= NULL
;
2254 * Nullify the stream reference so it is not used after deletion. The
2255 * channel lock MUST be acquired before being able to check for a NULL
2258 channel
->metadata_stream
= NULL
;
2260 if (channel
->metadata_cache
) {
2261 pthread_mutex_unlock(&channel
->metadata_cache
->lock
);
2263 pthread_mutex_unlock(&stream
->lock
);
2264 pthread_mutex_unlock(&channel
->lock
);
2265 pthread_mutex_unlock(&consumer_data
.lock
);
2268 consumer_del_channel(channel
);
2271 lttng_trace_chunk_put(stream
->trace_chunk
);
2272 stream
->trace_chunk
= NULL
;
2273 consumer_stream_free(stream
);
2277 * Action done with the metadata stream when adding it to the consumer internal
2278 * data structures to handle it.
2280 void consumer_add_metadata_stream(struct lttng_consumer_stream
*stream
)
2282 struct lttng_ht
*ht
= metadata_ht
;
2283 struct lttng_ht_iter iter
;
2284 struct lttng_ht_node_u64
*node
;
2289 DBG3("Adding metadata stream %" PRIu64
" to hash table", stream
->key
);
2291 pthread_mutex_lock(&consumer_data
.lock
);
2292 pthread_mutex_lock(&stream
->chan
->lock
);
2293 pthread_mutex_lock(&stream
->chan
->timer_lock
);
2294 pthread_mutex_lock(&stream
->lock
);
2297 * From here, refcounts are updated so be _careful_ when returning an error
2304 * Lookup the stream just to make sure it does not exist in our internal
2305 * state. This should NEVER happen.
2307 lttng_ht_lookup(ht
, &stream
->key
, &iter
);
2308 node
= lttng_ht_iter_get_node_u64(&iter
);
2312 * When nb_init_stream_left reaches 0, we don't need to trigger any action
2313 * in terms of destroying the associated channel, because the action that
2314 * causes the count to become 0 also causes a stream to be added. The
2315 * channel deletion will thus be triggered by the following removal of this
2318 if (uatomic_read(&stream
->chan
->nb_init_stream_left
) > 0) {
2319 /* Increment refcount before decrementing nb_init_stream_left */
2321 uatomic_dec(&stream
->chan
->nb_init_stream_left
);
2324 lttng_ht_add_unique_u64(ht
, &stream
->node
);
2326 lttng_ht_add_u64(consumer_data
.stream_per_chan_id_ht
,
2327 &stream
->node_channel_id
);
2330 * Add stream to the stream_list_ht of the consumer data. No need to steal
2331 * the key since the HT does not use it and we allow to add redundant keys
2334 lttng_ht_add_u64(consumer_data
.stream_list_ht
, &stream
->node_session_id
);
2338 pthread_mutex_unlock(&stream
->lock
);
2339 pthread_mutex_unlock(&stream
->chan
->lock
);
2340 pthread_mutex_unlock(&stream
->chan
->timer_lock
);
2341 pthread_mutex_unlock(&consumer_data
.lock
);
2345 * Delete data stream that are flagged for deletion (endpoint_status).
2347 static void validate_endpoint_status_data_stream(void)
2349 struct lttng_ht_iter iter
;
2350 struct lttng_consumer_stream
*stream
;
2352 DBG("Consumer delete flagged data stream");
2355 cds_lfht_for_each_entry(data_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
2356 /* Validate delete flag of the stream */
2357 if (stream
->endpoint_status
== CONSUMER_ENDPOINT_ACTIVE
) {
2360 /* Delete it right now */
2361 consumer_del_stream(stream
, data_ht
);
2367 * Delete metadata stream that are flagged for deletion (endpoint_status).
2369 static void validate_endpoint_status_metadata_stream(
2370 struct lttng_poll_event
*pollset
)
2372 struct lttng_ht_iter iter
;
2373 struct lttng_consumer_stream
*stream
;
2375 DBG("Consumer delete flagged metadata stream");
2380 cds_lfht_for_each_entry(metadata_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
2381 /* Validate delete flag of the stream */
2382 if (stream
->endpoint_status
== CONSUMER_ENDPOINT_ACTIVE
) {
2386 * Remove from pollset so the metadata thread can continue without
2387 * blocking on a deleted stream.
2389 lttng_poll_del(pollset
, stream
->wait_fd
);
2391 /* Delete it right now */
2392 consumer_del_metadata_stream(stream
, metadata_ht
);
2398 * Thread polls on metadata file descriptor and write them on disk or on the
2401 void *consumer_thread_metadata_poll(void *data
)
2403 int ret
, i
, pollfd
, err
= -1;
2404 uint32_t revents
, nb_fd
;
2405 struct lttng_consumer_stream
*stream
= NULL
;
2406 struct lttng_ht_iter iter
;
2407 struct lttng_ht_node_u64
*node
;
2408 struct lttng_poll_event events
;
2409 struct lttng_consumer_local_data
*ctx
= data
;
2412 rcu_register_thread();
2414 health_register(health_consumerd
, HEALTH_CONSUMERD_TYPE_METADATA
);
2416 if (testpoint(consumerd_thread_metadata
)) {
2417 goto error_testpoint
;
2420 health_code_update();
2422 DBG("Thread metadata poll started");
2424 /* Size is set to 1 for the consumer_metadata pipe */
2425 ret
= lttng_poll_create(&events
, 2, LTTNG_CLOEXEC
);
2427 ERR("Poll set creation failed");
2431 ret
= lttng_poll_add(&events
,
2432 lttng_pipe_get_readfd(ctx
->consumer_metadata_pipe
), LPOLLIN
);
2438 DBG("Metadata main loop started");
2442 health_code_update();
2443 health_poll_entry();
2444 DBG("Metadata poll wait");
2445 ret
= lttng_poll_wait(&events
, -1);
2446 DBG("Metadata poll return from wait with %d fd(s)",
2447 LTTNG_POLL_GETNB(&events
));
2449 DBG("Metadata event caught in thread");
2451 if (errno
== EINTR
) {
2452 ERR("Poll EINTR caught");
2455 if (LTTNG_POLL_GETNB(&events
) == 0) {
2456 err
= 0; /* All is OK */
2463 /* From here, the event is a metadata wait fd */
2464 for (i
= 0; i
< nb_fd
; i
++) {
2465 health_code_update();
2467 revents
= LTTNG_POLL_GETEV(&events
, i
);
2468 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
2470 if (pollfd
== lttng_pipe_get_readfd(ctx
->consumer_metadata_pipe
)) {
2471 if (revents
& LPOLLIN
) {
2474 pipe_len
= lttng_pipe_read(ctx
->consumer_metadata_pipe
,
2475 &stream
, sizeof(stream
));
2476 if (pipe_len
< sizeof(stream
)) {
2478 PERROR("read metadata stream");
2481 * Remove the pipe from the poll set and continue the loop
2482 * since their might be data to consume.
2484 lttng_poll_del(&events
,
2485 lttng_pipe_get_readfd(ctx
->consumer_metadata_pipe
));
2486 lttng_pipe_read_close(ctx
->consumer_metadata_pipe
);
2490 /* A NULL stream means that the state has changed. */
2491 if (stream
== NULL
) {
2492 /* Check for deleted streams. */
2493 validate_endpoint_status_metadata_stream(&events
);
2497 DBG("Adding metadata stream %d to poll set",
2500 /* Add metadata stream to the global poll events list */
2501 lttng_poll_add(&events
, stream
->wait_fd
,
2502 LPOLLIN
| LPOLLPRI
| LPOLLHUP
);
2503 } else if (revents
& (LPOLLERR
| LPOLLHUP
)) {
2504 DBG("Metadata thread pipe hung up");
2506 * Remove the pipe from the poll set and continue the loop
2507 * since their might be data to consume.
2509 lttng_poll_del(&events
,
2510 lttng_pipe_get_readfd(ctx
->consumer_metadata_pipe
));
2511 lttng_pipe_read_close(ctx
->consumer_metadata_pipe
);
2514 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
2518 /* Handle other stream */
2524 uint64_t tmp_id
= (uint64_t) pollfd
;
2526 lttng_ht_lookup(metadata_ht
, &tmp_id
, &iter
);
2528 node
= lttng_ht_iter_get_node_u64(&iter
);
2531 stream
= caa_container_of(node
, struct lttng_consumer_stream
,
2534 if (revents
& (LPOLLIN
| LPOLLPRI
)) {
2535 /* Get the data out of the metadata file descriptor */
2536 DBG("Metadata available on fd %d", pollfd
);
2537 assert(stream
->wait_fd
== pollfd
);
2540 health_code_update();
2542 len
= ctx
->on_buffer_ready(stream
, ctx
);
2544 * We don't check the return value here since if we get
2545 * a negative len, it means an error occurred thus we
2546 * simply remove it from the poll set and free the
2551 /* It's ok to have an unavailable sub-buffer */
2552 if (len
< 0 && len
!= -EAGAIN
&& len
!= -ENODATA
) {
2553 /* Clean up stream from consumer and free it. */
2554 lttng_poll_del(&events
, stream
->wait_fd
);
2555 consumer_del_metadata_stream(stream
, metadata_ht
);
2557 } else if (revents
& (LPOLLERR
| LPOLLHUP
)) {
2558 DBG("Metadata fd %d is hup|err.", pollfd
);
2559 if (!stream
->hangup_flush_done
2560 && (consumer_data
.type
== LTTNG_CONSUMER32_UST
2561 || consumer_data
.type
== LTTNG_CONSUMER64_UST
)) {
2562 DBG("Attempting to flush and consume the UST buffers");
2563 lttng_ustconsumer_on_stream_hangup(stream
);
2565 /* We just flushed the stream now read it. */
2567 health_code_update();
2569 len
= ctx
->on_buffer_ready(stream
, ctx
);
2571 * We don't check the return value here since if we get
2572 * a negative len, it means an error occurred thus we
2573 * simply remove it from the poll set and free the
2579 lttng_poll_del(&events
, stream
->wait_fd
);
2581 * This call update the channel states, closes file descriptors
2582 * and securely free the stream.
2584 consumer_del_metadata_stream(stream
, metadata_ht
);
2586 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
2590 /* Release RCU lock for the stream looked up */
2598 DBG("Metadata poll thread exiting");
2600 lttng_poll_clean(&events
);
2605 ERR("Health error occurred in %s", __func__
);
2607 health_unregister(health_consumerd
);
2608 rcu_unregister_thread();
2613 * This thread polls the fds in the set to consume the data and write
2614 * it to tracefile if necessary.
2616 void *consumer_thread_data_poll(void *data
)
2618 int num_rdy
, num_hup
, high_prio
, ret
, i
, err
= -1;
2619 struct pollfd
*pollfd
= NULL
;
2620 /* local view of the streams */
2621 struct lttng_consumer_stream
**local_stream
= NULL
, *new_stream
= NULL
;
2622 /* local view of consumer_data.fds_count */
2624 /* 2 for the consumer_data_pipe and wake up pipe */
2625 const int nb_pipes_fd
= 2;
2626 /* Number of FDs with CONSUMER_ENDPOINT_INACTIVE but still open. */
2627 int nb_inactive_fd
= 0;
2628 struct lttng_consumer_local_data
*ctx
= data
;
2631 rcu_register_thread();
2633 health_register(health_consumerd
, HEALTH_CONSUMERD_TYPE_DATA
);
2635 if (testpoint(consumerd_thread_data
)) {
2636 goto error_testpoint
;
2639 health_code_update();
2641 local_stream
= zmalloc(sizeof(struct lttng_consumer_stream
*));
2642 if (local_stream
== NULL
) {
2643 PERROR("local_stream malloc");
2648 health_code_update();
2654 * the fds set has been updated, we need to update our
2655 * local array as well
2657 pthread_mutex_lock(&consumer_data
.lock
);
2658 if (consumer_data
.need_update
) {
2663 local_stream
= NULL
;
2665 /* Allocate for all fds */
2666 pollfd
= zmalloc((consumer_data
.stream_count
+ nb_pipes_fd
) * sizeof(struct pollfd
));
2667 if (pollfd
== NULL
) {
2668 PERROR("pollfd malloc");
2669 pthread_mutex_unlock(&consumer_data
.lock
);
2673 local_stream
= zmalloc((consumer_data
.stream_count
+ nb_pipes_fd
) *
2674 sizeof(struct lttng_consumer_stream
*));
2675 if (local_stream
== NULL
) {
2676 PERROR("local_stream malloc");
2677 pthread_mutex_unlock(&consumer_data
.lock
);
2680 ret
= update_poll_array(ctx
, &pollfd
, local_stream
,
2681 data_ht
, &nb_inactive_fd
);
2683 ERR("Error in allocating pollfd or local_outfds");
2684 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_POLL_ERROR
);
2685 pthread_mutex_unlock(&consumer_data
.lock
);
2689 consumer_data
.need_update
= 0;
2691 pthread_mutex_unlock(&consumer_data
.lock
);
2693 /* No FDs and consumer_quit, consumer_cleanup the thread */
2694 if (nb_fd
== 0 && nb_inactive_fd
== 0 &&
2695 CMM_LOAD_SHARED(consumer_quit
) == 1) {
2696 err
= 0; /* All is OK */
2699 /* poll on the array of fds */
2701 DBG("polling on %d fd", nb_fd
+ nb_pipes_fd
);
2702 if (testpoint(consumerd_thread_data_poll
)) {
2705 health_poll_entry();
2706 num_rdy
= poll(pollfd
, nb_fd
+ nb_pipes_fd
, -1);
2708 DBG("poll num_rdy : %d", num_rdy
);
2709 if (num_rdy
== -1) {
2711 * Restart interrupted system call.
2713 if (errno
== EINTR
) {
2716 PERROR("Poll error");
2717 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_POLL_ERROR
);
2719 } else if (num_rdy
== 0) {
2720 DBG("Polling thread timed out");
2724 if (caa_unlikely(data_consumption_paused
)) {
2725 DBG("Data consumption paused, sleeping...");
2731 * If the consumer_data_pipe triggered poll go directly to the
2732 * beginning of the loop to update the array. We want to prioritize
2733 * array update over low-priority reads.
2735 if (pollfd
[nb_fd
].revents
& (POLLIN
| POLLPRI
)) {
2736 ssize_t pipe_readlen
;
2738 DBG("consumer_data_pipe wake up");
2739 pipe_readlen
= lttng_pipe_read(ctx
->consumer_data_pipe
,
2740 &new_stream
, sizeof(new_stream
));
2741 if (pipe_readlen
< sizeof(new_stream
)) {
2742 PERROR("Consumer data pipe");
2743 /* Continue so we can at least handle the current stream(s). */
2748 * If the stream is NULL, just ignore it. It's also possible that
2749 * the sessiond poll thread changed the consumer_quit state and is
2750 * waking us up to test it.
2752 if (new_stream
== NULL
) {
2753 validate_endpoint_status_data_stream();
2757 /* Continue to update the local streams and handle prio ones */
2761 /* Handle wakeup pipe. */
2762 if (pollfd
[nb_fd
+ 1].revents
& (POLLIN
| POLLPRI
)) {
2764 ssize_t pipe_readlen
;
2766 pipe_readlen
= lttng_pipe_read(ctx
->consumer_wakeup_pipe
, &dummy
,
2768 if (pipe_readlen
< 0) {
2769 PERROR("Consumer data wakeup pipe");
2771 /* We've been awakened to handle stream(s). */
2772 ctx
->has_wakeup
= 0;
2775 /* Take care of high priority channels first. */
2776 for (i
= 0; i
< nb_fd
; i
++) {
2777 health_code_update();
2779 if (local_stream
[i
] == NULL
) {
2782 if (pollfd
[i
].revents
& POLLPRI
) {
2783 DBG("Urgent read on fd %d", pollfd
[i
].fd
);
2785 len
= ctx
->on_buffer_ready(local_stream
[i
], ctx
);
2786 /* it's ok to have an unavailable sub-buffer */
2787 if (len
< 0 && len
!= -EAGAIN
&& len
!= -ENODATA
) {
2788 /* Clean the stream and free it. */
2789 consumer_del_stream(local_stream
[i
], data_ht
);
2790 local_stream
[i
] = NULL
;
2791 } else if (len
> 0) {
2792 local_stream
[i
]->data_read
= 1;
2798 * If we read high prio channel in this loop, try again
2799 * for more high prio data.
2805 /* Take care of low priority channels. */
2806 for (i
= 0; i
< nb_fd
; i
++) {
2807 health_code_update();
2809 if (local_stream
[i
] == NULL
) {
2812 if ((pollfd
[i
].revents
& POLLIN
) ||
2813 local_stream
[i
]->hangup_flush_done
||
2814 local_stream
[i
]->has_data
) {
2815 DBG("Normal read on fd %d", pollfd
[i
].fd
);
2816 len
= ctx
->on_buffer_ready(local_stream
[i
], ctx
);
2817 /* it's ok to have an unavailable sub-buffer */
2818 if (len
< 0 && len
!= -EAGAIN
&& len
!= -ENODATA
) {
2819 /* Clean the stream and free it. */
2820 consumer_del_stream(local_stream
[i
], data_ht
);
2821 local_stream
[i
] = NULL
;
2822 } else if (len
> 0) {
2823 local_stream
[i
]->data_read
= 1;
2828 /* Handle hangup and errors */
2829 for (i
= 0; i
< nb_fd
; i
++) {
2830 health_code_update();
2832 if (local_stream
[i
] == NULL
) {
2835 if (!local_stream
[i
]->hangup_flush_done
2836 && (pollfd
[i
].revents
& (POLLHUP
| POLLERR
| POLLNVAL
))
2837 && (consumer_data
.type
== LTTNG_CONSUMER32_UST
2838 || consumer_data
.type
== LTTNG_CONSUMER64_UST
)) {
2839 DBG("fd %d is hup|err|nval. Attempting flush and read.",
2841 lttng_ustconsumer_on_stream_hangup(local_stream
[i
]);
2842 /* Attempt read again, for the data we just flushed. */
2843 local_stream
[i
]->data_read
= 1;
2846 * If the poll flag is HUP/ERR/NVAL and we have
2847 * read no data in this pass, we can remove the
2848 * stream from its hash table.
2850 if ((pollfd
[i
].revents
& POLLHUP
)) {
2851 DBG("Polling fd %d tells it has hung up.", pollfd
[i
].fd
);
2852 if (!local_stream
[i
]->data_read
) {
2853 consumer_del_stream(local_stream
[i
], data_ht
);
2854 local_stream
[i
] = NULL
;
2857 } else if (pollfd
[i
].revents
& POLLERR
) {
2858 ERR("Error returned in polling fd %d.", pollfd
[i
].fd
);
2859 if (!local_stream
[i
]->data_read
) {
2860 consumer_del_stream(local_stream
[i
], data_ht
);
2861 local_stream
[i
] = NULL
;
2864 } else if (pollfd
[i
].revents
& POLLNVAL
) {
2865 ERR("Polling fd %d tells fd is not open.", pollfd
[i
].fd
);
2866 if (!local_stream
[i
]->data_read
) {
2867 consumer_del_stream(local_stream
[i
], data_ht
);
2868 local_stream
[i
] = NULL
;
2872 if (local_stream
[i
] != NULL
) {
2873 local_stream
[i
]->data_read
= 0;
2880 DBG("polling thread exiting");
2885 * Close the write side of the pipe so epoll_wait() in
2886 * consumer_thread_metadata_poll can catch it. The thread is monitoring the
2887 * read side of the pipe. If we close them both, epoll_wait strangely does
2888 * not return and could create a endless wait period if the pipe is the
2889 * only tracked fd in the poll set. The thread will take care of closing
2892 (void) lttng_pipe_write_close(ctx
->consumer_metadata_pipe
);
2897 ERR("Health error occurred in %s", __func__
);
2899 health_unregister(health_consumerd
);
2901 rcu_unregister_thread();
2906 * Close wake-up end of each stream belonging to the channel. This will
2907 * allow the poll() on the stream read-side to detect when the
2908 * write-side (application) finally closes them.
2911 void consumer_close_channel_streams(struct lttng_consumer_channel
*channel
)
2913 struct lttng_ht
*ht
;
2914 struct lttng_consumer_stream
*stream
;
2915 struct lttng_ht_iter iter
;
2917 ht
= consumer_data
.stream_per_chan_id_ht
;
2920 cds_lfht_for_each_entry_duplicate(ht
->ht
,
2921 ht
->hash_fct(&channel
->key
, lttng_ht_seed
),
2922 ht
->match_fct
, &channel
->key
,
2923 &iter
.iter
, stream
, node_channel_id
.node
) {
2925 * Protect against teardown with mutex.
2927 pthread_mutex_lock(&stream
->lock
);
2928 if (cds_lfht_is_node_deleted(&stream
->node
.node
)) {
2931 switch (consumer_data
.type
) {
2932 case LTTNG_CONSUMER_KERNEL
:
2934 case LTTNG_CONSUMER32_UST
:
2935 case LTTNG_CONSUMER64_UST
:
2936 if (stream
->metadata_flag
) {
2937 /* Safe and protected by the stream lock. */
2938 lttng_ustconsumer_close_metadata(stream
->chan
);
2941 * Note: a mutex is taken internally within
2942 * liblttng-ust-ctl to protect timer wakeup_fd
2943 * use from concurrent close.
2945 lttng_ustconsumer_close_stream_wakeup(stream
);
2949 ERR("Unknown consumer_data type");
2953 pthread_mutex_unlock(&stream
->lock
);
2958 static void destroy_channel_ht(struct lttng_ht
*ht
)
2960 struct lttng_ht_iter iter
;
2961 struct lttng_consumer_channel
*channel
;
2969 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, channel
, wait_fd_node
.node
) {
2970 ret
= lttng_ht_del(ht
, &iter
);
2975 lttng_ht_destroy(ht
);
2979 * This thread polls the channel fds to detect when they are being
2980 * closed. It closes all related streams if the channel is detected as
2981 * closed. It is currently only used as a shim layer for UST because the
2982 * consumerd needs to keep the per-stream wakeup end of pipes open for
2985 void *consumer_thread_channel_poll(void *data
)
2987 int ret
, i
, pollfd
, err
= -1;
2988 uint32_t revents
, nb_fd
;
2989 struct lttng_consumer_channel
*chan
= NULL
;
2990 struct lttng_ht_iter iter
;
2991 struct lttng_ht_node_u64
*node
;
2992 struct lttng_poll_event events
;
2993 struct lttng_consumer_local_data
*ctx
= data
;
2994 struct lttng_ht
*channel_ht
;
2996 rcu_register_thread();
2998 health_register(health_consumerd
, HEALTH_CONSUMERD_TYPE_CHANNEL
);
3000 if (testpoint(consumerd_thread_channel
)) {
3001 goto error_testpoint
;
3004 health_code_update();
3006 channel_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3008 /* ENOMEM at this point. Better to bail out. */
3012 DBG("Thread channel poll started");
3014 /* Size is set to 1 for the consumer_channel pipe */
3015 ret
= lttng_poll_create(&events
, 2, LTTNG_CLOEXEC
);
3017 ERR("Poll set creation failed");
3021 ret
= lttng_poll_add(&events
, ctx
->consumer_channel_pipe
[0], LPOLLIN
);
3027 DBG("Channel main loop started");
3031 health_code_update();
3032 DBG("Channel poll wait");
3033 health_poll_entry();
3034 ret
= lttng_poll_wait(&events
, -1);
3035 DBG("Channel poll return from wait with %d fd(s)",
3036 LTTNG_POLL_GETNB(&events
));
3038 DBG("Channel event caught in thread");
3040 if (errno
== EINTR
) {
3041 ERR("Poll EINTR caught");
3044 if (LTTNG_POLL_GETNB(&events
) == 0) {
3045 err
= 0; /* All is OK */
3052 /* From here, the event is a channel wait fd */
3053 for (i
= 0; i
< nb_fd
; i
++) {
3054 health_code_update();
3056 revents
= LTTNG_POLL_GETEV(&events
, i
);
3057 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
3059 if (pollfd
== ctx
->consumer_channel_pipe
[0]) {
3060 if (revents
& LPOLLIN
) {
3061 enum consumer_channel_action action
;
3064 ret
= read_channel_pipe(ctx
, &chan
, &key
, &action
);
3067 ERR("Error reading channel pipe");
3069 lttng_poll_del(&events
, ctx
->consumer_channel_pipe
[0]);
3074 case CONSUMER_CHANNEL_ADD
:
3075 DBG("Adding channel %d to poll set",
3078 lttng_ht_node_init_u64(&chan
->wait_fd_node
,
3081 lttng_ht_add_unique_u64(channel_ht
,
3082 &chan
->wait_fd_node
);
3084 /* Add channel to the global poll events list */
3085 lttng_poll_add(&events
, chan
->wait_fd
,
3086 LPOLLERR
| LPOLLHUP
);
3088 case CONSUMER_CHANNEL_DEL
:
3091 * This command should never be called if the channel
3092 * has streams monitored by either the data or metadata
3093 * thread. The consumer only notify this thread with a
3094 * channel del. command if it receives a destroy
3095 * channel command from the session daemon that send it
3096 * if a command prior to the GET_CHANNEL failed.
3100 chan
= consumer_find_channel(key
);
3103 ERR("UST consumer get channel key %" PRIu64
" not found for del channel", key
);
3106 lttng_poll_del(&events
, chan
->wait_fd
);
3107 iter
.iter
.node
= &chan
->wait_fd_node
.node
;
3108 ret
= lttng_ht_del(channel_ht
, &iter
);
3111 switch (consumer_data
.type
) {
3112 case LTTNG_CONSUMER_KERNEL
:
3114 case LTTNG_CONSUMER32_UST
:
3115 case LTTNG_CONSUMER64_UST
:
3116 health_code_update();
3117 /* Destroy streams that might have been left in the stream list. */
3118 clean_channel_stream_list(chan
);
3121 ERR("Unknown consumer_data type");
3126 * Release our own refcount. Force channel deletion even if
3127 * streams were not initialized.
3129 if (!uatomic_sub_return(&chan
->refcount
, 1)) {
3130 consumer_del_channel(chan
);
3135 case CONSUMER_CHANNEL_QUIT
:
3137 * Remove the pipe from the poll set and continue the loop
3138 * since their might be data to consume.
3140 lttng_poll_del(&events
, ctx
->consumer_channel_pipe
[0]);
3143 ERR("Unknown action");
3146 } else if (revents
& (LPOLLERR
| LPOLLHUP
)) {
3147 DBG("Channel thread pipe hung up");
3149 * Remove the pipe from the poll set and continue the loop
3150 * since their might be data to consume.
3152 lttng_poll_del(&events
, ctx
->consumer_channel_pipe
[0]);
3155 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
3159 /* Handle other stream */
3165 uint64_t tmp_id
= (uint64_t) pollfd
;
3167 lttng_ht_lookup(channel_ht
, &tmp_id
, &iter
);
3169 node
= lttng_ht_iter_get_node_u64(&iter
);
3172 chan
= caa_container_of(node
, struct lttng_consumer_channel
,
3175 /* Check for error event */
3176 if (revents
& (LPOLLERR
| LPOLLHUP
)) {
3177 DBG("Channel fd %d is hup|err.", pollfd
);
3179 lttng_poll_del(&events
, chan
->wait_fd
);
3180 ret
= lttng_ht_del(channel_ht
, &iter
);
3184 * This will close the wait fd for each stream associated to
3185 * this channel AND monitored by the data/metadata thread thus
3186 * will be clean by the right thread.
3188 consumer_close_channel_streams(chan
);
3190 /* Release our own refcount */
3191 if (!uatomic_sub_return(&chan
->refcount
, 1)
3192 && !uatomic_read(&chan
->nb_init_stream_left
)) {
3193 consumer_del_channel(chan
);
3196 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
3201 /* Release RCU lock for the channel looked up */
3209 lttng_poll_clean(&events
);
3211 destroy_channel_ht(channel_ht
);
3214 DBG("Channel poll thread exiting");
3217 ERR("Health error occurred in %s", __func__
);
3219 health_unregister(health_consumerd
);
3220 rcu_unregister_thread();
3224 static int set_metadata_socket(struct lttng_consumer_local_data
*ctx
,
3225 struct pollfd
*sockpoll
, int client_socket
)
3232 ret
= lttng_consumer_poll_socket(sockpoll