2 * Copyright (C) 2013 Julien Desfossez <jdesfossez@efficios.com>
3 * Copyright (C) 2013 David Goulet <dgoulet@efficios.com>
4 * Copyright (C) 2015 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 * SPDX-License-Identifier: GPL-2.0-only
22 #include <sys/mount.h>
23 #include <sys/resource.h>
24 #include <sys/socket.h>
26 #include <sys/types.h>
29 #include <urcu/futex.h>
30 #include <urcu/rculist.h>
31 #include <urcu/uatomic.h>
33 #include <common/common.h>
34 #include <common/compat/endian.h>
35 #include <common/compat/poll.h>
36 #include <common/compat/socket.h>
37 #include <common/defaults.h>
38 #include <common/fd-tracker/utils.h>
39 #include <common/fs-handle.h>
40 #include <common/futex.h>
41 #include <common/index/index.h>
42 #include <common/sessiond-comm/inet.h>
43 #include <common/sessiond-comm/relayd.h>
44 #include <common/sessiond-comm/sessiond-comm.h>
45 #include <common/uri.h>
46 #include <common/utils.h>
47 #include <lttng/lttng.h>
50 #include "connection.h"
51 #include "ctf-trace.h"
52 #include "health-relayd.h"
54 #include "lttng-relayd.h"
57 #include "testpoint.h"
59 #include "viewer-session.h"
60 #include "viewer-stream.h"
62 #define SESSION_BUF_DEFAULT_COUNT 16
64 static struct lttng_uri
*live_uri
;
67 * This pipe is used to inform the worker thread that a command is queued and
68 * ready to be processed.
70 static int live_conn_pipe
[2] = { -1, -1 };
72 /* Shared between threads */
73 static int live_dispatch_thread_exit
;
75 static pthread_t live_listener_thread
;
76 static pthread_t live_dispatcher_thread
;
77 static pthread_t live_worker_thread
;
80 * Relay command queue.
82 * The live_thread_listener and live_thread_dispatcher communicate with this
85 static struct relay_conn_queue viewer_conn_queue
;
87 static uint64_t last_relay_viewer_session_id
;
88 static pthread_mutex_t last_relay_viewer_session_id_lock
=
89 PTHREAD_MUTEX_INITIALIZER
;
95 void cleanup_relayd_live(void)
103 * Receive a request buffer using a given socket, destination allocated buffer
106 * Return the size of the received message or else a negative value on error
107 * with errno being set by recvmsg() syscall.
110 ssize_t
recv_request(struct lttcomm_sock
*sock
, void *buf
, size_t size
)
114 ret
= sock
->ops
->recvmsg(sock
, buf
, size
, 0);
115 if (ret
< 0 || ret
!= size
) {
117 /* Orderly shutdown. Not necessary to print an error. */
118 DBG("Socket %d did an orderly shutdown", sock
->fd
);
120 ERR("Relay failed to receive request.");
129 * Send a response buffer using a given socket, source allocated buffer of
132 * Return the size of the sent message or else a negative value on error with
133 * errno being set by sendmsg() syscall.
136 ssize_t
send_response(struct lttcomm_sock
*sock
, void *buf
, size_t size
)
140 ret
= sock
->ops
->sendmsg(sock
, buf
, size
, 0);
142 ERR("Relayd failed to send response.");
149 * Atomically check if new streams got added in one of the sessions attached
150 * and reset the flag to 0.
152 * Returns 1 if new streams got added, 0 if nothing changed, a negative value
156 int check_new_streams(struct relay_connection
*conn
)
158 struct relay_session
*session
;
159 unsigned long current_val
;
162 if (!conn
->viewer_session
) {
166 cds_list_for_each_entry_rcu(session
,
167 &conn
->viewer_session
->session_list
,
168 viewer_session_node
) {
169 if (!session_get(session
)) {
172 current_val
= uatomic_cmpxchg(&session
->new_streams
, 1, 0);
174 session_put(session
);
185 * Send viewer streams to the given socket. The ignore_sent_flag indicates if
186 * this function should ignore the sent flag or not.
188 * Return 0 on success or else a negative value.
191 ssize_t
send_viewer_streams(struct lttcomm_sock
*sock
,
192 uint64_t session_id
, unsigned int ignore_sent_flag
)
195 struct lttng_ht_iter iter
;
196 struct relay_viewer_stream
*vstream
;
200 cds_lfht_for_each_entry(viewer_streams_ht
->ht
, &iter
.iter
, vstream
,
202 struct ctf_trace
*ctf_trace
;
203 struct lttng_viewer_stream send_stream
= {};
205 health_code_update();
207 if (!viewer_stream_get(vstream
)) {
211 pthread_mutex_lock(&vstream
->stream
->lock
);
212 /* Ignore if not the same session. */
213 if (vstream
->stream
->trace
->session
->id
!= session_id
||
214 (!ignore_sent_flag
&& vstream
->sent_flag
)) {
215 pthread_mutex_unlock(&vstream
->stream
->lock
);
216 viewer_stream_put(vstream
);
220 ctf_trace
= vstream
->stream
->trace
;
221 send_stream
.id
= htobe64(vstream
->stream
->stream_handle
);
222 send_stream
.ctf_trace_id
= htobe64(ctf_trace
->id
);
223 send_stream
.metadata_flag
= htobe32(
224 vstream
->stream
->is_metadata
);
225 if (lttng_strncpy(send_stream
.path_name
, vstream
->path_name
,
226 sizeof(send_stream
.path_name
))) {
227 pthread_mutex_unlock(&vstream
->stream
->lock
);
228 viewer_stream_put(vstream
);
229 ret
= -1; /* Error. */
232 if (lttng_strncpy(send_stream
.channel_name
,
233 vstream
->channel_name
,
234 sizeof(send_stream
.channel_name
))) {
235 pthread_mutex_unlock(&vstream
->stream
->lock
);
236 viewer_stream_put(vstream
);
237 ret
= -1; /* Error. */
241 DBG("Sending stream %" PRIu64
" to viewer",
242 vstream
->stream
->stream_handle
);
243 vstream
->sent_flag
= 1;
244 pthread_mutex_unlock(&vstream
->stream
->lock
);
246 ret
= send_response(sock
, &send_stream
, sizeof(send_stream
));
247 viewer_stream_put(vstream
);
261 * Create every viewer stream possible for the given session with the seek
262 * type. Three counters *can* be return which are in order the total amount of
263 * viewer stream of the session, the number of unsent stream and the number of
264 * stream created. Those counters can be NULL and thus will be ignored.
266 * session must be locked to ensure that we see either none or all initial
267 * streams for a session, but no intermediate state..
269 * Return 0 on success or else a negative value.
271 static int make_viewer_streams(struct relay_session
*relay_session
,
272 struct relay_viewer_session
*viewer_session
,
273 enum lttng_viewer_seek seek_t
,
276 uint32_t *nb_created
,
280 struct lttng_ht_iter iter
;
281 struct ctf_trace
*ctf_trace
;
282 struct relay_stream
*relay_stream
= NULL
;
284 assert(relay_session
);
285 ASSERT_LOCKED(relay_session
->lock
);
287 if (relay_session
->connection_closed
) {
292 * Create viewer streams for relay streams that are ready to be
293 * used for a the given session id only.
296 cds_lfht_for_each_entry (relay_session
->ctf_traces_ht
->ht
, &iter
.iter
,
297 ctf_trace
, node
.node
) {
298 bool trace_has_metadata_stream
= false;
300 health_code_update();
302 if (!ctf_trace_get(ctf_trace
)) {
307 * Iterate over all the streams of the trace to see if we have a
310 cds_list_for_each_entry_rcu(relay_stream
,
311 &ctf_trace
->stream_list
, stream_node
)
313 bool is_metadata_stream
;
315 pthread_mutex_lock(&relay_stream
->lock
);
316 is_metadata_stream
= relay_stream
->is_metadata
;
317 pthread_mutex_unlock(&relay_stream
->lock
);
319 if (is_metadata_stream
) {
320 trace_has_metadata_stream
= true;
328 * If there is no metadata stream in this trace at the moment
329 * and we never sent one to the viewer, skip the trace. We
330 * accept that the viewer will not see this trace at all.
332 if (!trace_has_metadata_stream
&&
333 !ctf_trace
->metadata_stream_sent_to_viewer
) {
334 ctf_trace_put(ctf_trace
);
338 cds_list_for_each_entry_rcu(relay_stream
,
339 &ctf_trace
->stream_list
, stream_node
)
341 struct relay_viewer_stream
*viewer_stream
;
343 if (!stream_get(relay_stream
)) {
347 pthread_mutex_lock(&relay_stream
->lock
);
349 * stream published is protected by the session lock.
351 if (!relay_stream
->published
) {
354 viewer_stream
= viewer_stream_get_by_id(
355 relay_stream
->stream_handle
);
356 if (!viewer_stream
) {
357 struct lttng_trace_chunk
*viewer_stream_trace_chunk
= NULL
;
360 * Save that we sent the metadata stream to the
361 * viewer. So that we know what trace the viewer
364 if (relay_stream
->is_metadata
) {
365 ctf_trace
->metadata_stream_sent_to_viewer
= true;
369 * If a rotation is ongoing, use a copy of the
370 * relay stream's chunk to ensure the stream
373 * Otherwise, the viewer session's current trace
374 * chunk can be used safely.
376 if ((relay_stream
->ongoing_rotation
.is_set
||
377 relay_session
->ongoing_rotation
) &&
378 relay_stream
->trace_chunk
) {
379 viewer_stream_trace_chunk
= lttng_trace_chunk_copy(
380 relay_stream
->trace_chunk
);
381 if (!viewer_stream_trace_chunk
) {
383 ctf_trace_put(ctf_trace
);
387 bool reference_acquired
;
390 * Transition the viewer session into the newest trace chunk available.
392 if (!lttng_trace_chunk_ids_equal(viewer_session
->current_trace_chunk
,
393 relay_stream
->trace_chunk
)) {
395 ret
= viewer_session_set_trace_chunk_copy(
397 relay_stream
->trace_chunk
);
400 ctf_trace_put(ctf_trace
);
405 reference_acquired
= lttng_trace_chunk_get(
406 viewer_session
->current_trace_chunk
);
407 assert(reference_acquired
);
408 viewer_stream_trace_chunk
=
409 viewer_session
->current_trace_chunk
;
412 viewer_stream
= viewer_stream_create(
414 viewer_stream_trace_chunk
,
416 lttng_trace_chunk_put(viewer_stream_trace_chunk
);
417 viewer_stream_trace_chunk
= NULL
;
418 if (!viewer_stream
) {
420 ctf_trace_put(ctf_trace
);
425 /* Update number of created stream counter. */
429 * Ensure a self-reference is preserved even
430 * after we have put our local reference.
432 if (!viewer_stream_get(viewer_stream
)) {
433 ERR("Unable to get self-reference on viewer stream, logic error.");
437 if (!viewer_stream
->sent_flag
&& nb_unsent
) {
438 /* Update number of unsent stream counter. */
442 /* Update number of total stream counter. */
444 if (relay_stream
->is_metadata
) {
445 if (!relay_stream
->closed
||
446 relay_stream
->metadata_received
>
447 viewer_stream
->metadata_sent
) {
451 if (!relay_stream
->closed
||
452 !(((int64_t)(relay_stream
->prev_data_seq
-
453 relay_stream
->last_net_seq_num
)) >=
459 /* Put local reference. */
460 viewer_stream_put(viewer_stream
);
462 pthread_mutex_unlock(&relay_stream
->lock
);
463 stream_put(relay_stream
);
466 ctf_trace_put(ctf_trace
);
475 pthread_mutex_unlock(&relay_stream
->lock
);
476 stream_put(relay_stream
);
482 int relayd_live_stop(void)
484 /* Stop dispatch thread */
485 CMM_STORE_SHARED(live_dispatch_thread_exit
, 1);
486 futex_nto1_wake(&viewer_conn_queue
.futex
);
491 * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set.
494 int create_named_thread_poll_set(struct lttng_poll_event
*events
,
495 int size
, const char *name
)
499 if (events
== NULL
|| size
== 0) {
504 ret
= fd_tracker_util_poll_create(the_fd_tracker
,
505 name
, events
, 1, LTTNG_CLOEXEC
);
507 PERROR("Failed to create \"%s\" poll file descriptor", name
);
512 ret
= lttng_poll_add(events
, thread_quit_pipe
[0], LPOLLIN
| LPOLLERR
);
524 * Check if the thread quit pipe was triggered.
526 * Return 1 if it was triggered else 0;
529 int check_thread_quit_pipe(int fd
, uint32_t events
)
531 if (fd
== thread_quit_pipe
[0] && (events
& LPOLLIN
)) {
539 int create_sock(void *data
, int *out_fd
)
542 struct lttcomm_sock
*sock
= data
;
544 ret
= lttcomm_create_sock(sock
);
555 int close_sock(void *data
, int *in_fd
)
557 struct lttcomm_sock
*sock
= data
;
559 return sock
->ops
->close(sock
);
562 static int accept_sock(void *data
, int *out_fd
)
565 /* Socks is an array of in_sock, out_sock. */
566 struct lttcomm_sock
**socks
= data
;
567 struct lttcomm_sock
*in_sock
= socks
[0];
569 socks
[1] = in_sock
->ops
->accept(in_sock
);
574 *out_fd
= socks
[1]->fd
;
580 struct lttcomm_sock
*accept_live_sock(struct lttcomm_sock
*listening_sock
,
584 struct lttcomm_sock
*socks
[2] = { listening_sock
, NULL
};
585 struct lttcomm_sock
*new_sock
= NULL
;
587 ret
= fd_tracker_open_unsuspendable_fd(the_fd_tracker
, &out_fd
,
588 (const char **) &name
, 1, accept_sock
, &socks
);
593 DBG("%s accepted, socket %d", name
, new_sock
->fd
);
599 * Create and init socket from uri.
602 struct lttcomm_sock
*init_socket(struct lttng_uri
*uri
, const char *name
)
605 struct lttcomm_sock
*sock
= NULL
;
606 char uri_str
[LTTNG_PATH_MAX
];
607 char *formated_name
= NULL
;
609 sock
= lttcomm_alloc_sock_from_uri(uri
);
611 ERR("Allocating socket");
616 * Don't fail to create the socket if the name can't be built as it is
617 * only used for debugging purposes.
619 ret
= uri_to_str_url(uri
, uri_str
, sizeof(uri_str
));
620 uri_str
[sizeof(uri_str
) - 1] = '\0';
622 ret
= asprintf(&formated_name
, "%s socket @ %s", name
,
625 formated_name
= NULL
;
629 ret
= fd_tracker_open_unsuspendable_fd(the_fd_tracker
, &sock_fd
,
630 (const char **) (formated_name
? &formated_name
: NULL
),
631 1, create_sock
, sock
);
633 PERROR("Failed to create \"%s\" socket",
634 formated_name
?: "Unknown");
637 DBG("Listening on %s socket %d", name
, sock
->fd
);
639 ret
= sock
->ops
->bind(sock
);
641 PERROR("Failed to bind lttng-live socket");
645 ret
= sock
->ops
->listen(sock
, -1);
656 lttcomm_destroy_sock(sock
);
663 * This thread manages the listening for new connections on the network
666 void *thread_listener(void *data
)
668 int i
, ret
, pollfd
, err
= -1;
669 uint32_t revents
, nb_fd
;
670 struct lttng_poll_event events
;
671 struct lttcomm_sock
*live_control_sock
;
673 DBG("[thread] Relay live listener started");
675 rcu_register_thread();
676 health_register(health_relayd
, HEALTH_RELAYD_TYPE_LIVE_LISTENER
);
678 health_code_update();
680 live_control_sock
= init_socket(live_uri
, "Live listener");
681 if (!live_control_sock
) {
682 goto error_sock_control
;
685 /* Pass 2 as size here for the thread quit pipe and control sockets. */
686 ret
= create_named_thread_poll_set(&events
, 2,
687 "Live listener thread epoll");
689 goto error_create_poll
;
692 /* Add the control socket */
693 ret
= lttng_poll_add(&events
, live_control_sock
->fd
, LPOLLIN
| LPOLLRDHUP
);
698 lttng_relay_notify_ready();
700 if (testpoint(relayd_thread_live_listener
)) {
701 goto error_testpoint
;
705 health_code_update();
707 DBG("Listener accepting live viewers connections");
711 ret
= lttng_poll_wait(&events
, -1);
715 * Restart interrupted system call.
717 if (errno
== EINTR
) {
724 DBG("Relay new viewer connection received");
725 for (i
= 0; i
< nb_fd
; i
++) {
726 health_code_update();
728 /* Fetch once the poll data */
729 revents
= LTTNG_POLL_GETEV(&events
, i
);
730 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
732 /* Thread quit pipe has been closed. Killing thread. */
733 ret
= check_thread_quit_pipe(pollfd
, revents
);
739 if (revents
& LPOLLIN
) {
741 * A new connection is requested, therefore a
742 * viewer connection is allocated in this
743 * thread, enqueued to a global queue and
744 * dequeued (and freed) in the worker thread.
747 struct relay_connection
*new_conn
;
748 struct lttcomm_sock
*newsock
;
750 newsock
= accept_live_sock(live_control_sock
,
751 "Live socket to client");
753 PERROR("accepting control sock");
756 DBG("Relay viewer connection accepted socket %d", newsock
->fd
);
758 ret
= setsockopt(newsock
->fd
, SOL_SOCKET
, SO_REUSEADDR
, &val
,
761 PERROR("setsockopt inet");
762 lttcomm_destroy_sock(newsock
);
765 new_conn
= connection_create(newsock
, RELAY_CONNECTION_UNKNOWN
);
767 lttcomm_destroy_sock(newsock
);
770 /* Ownership assumed by the connection. */
773 /* Enqueue request for the dispatcher thread. */
774 cds_wfcq_enqueue(&viewer_conn_queue
.head
, &viewer_conn_queue
.tail
,
778 * Wake the dispatch queue futex.
779 * Implicit memory barrier with the
780 * exchange in cds_wfcq_enqueue.
782 futex_nto1_wake(&viewer_conn_queue
.futex
);
783 } else if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
784 ERR("socket poll error");
787 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
797 (void) fd_tracker_util_poll_clean(the_fd_tracker
, &events
);
799 if (live_control_sock
->fd
>= 0) {
800 int sock_fd
= live_control_sock
->fd
;
802 ret
= fd_tracker_close_unsuspendable_fd(the_fd_tracker
,
803 &sock_fd
, 1, close_sock
,
808 live_control_sock
->fd
= -1;
810 lttcomm_destroy_sock(live_control_sock
);
814 DBG("Live viewer listener thread exited with error");
816 health_unregister(health_relayd
);
817 rcu_unregister_thread();
818 DBG("Live viewer listener thread cleanup complete");
819 if (lttng_relay_stop_threads()) {
820 ERR("Error stopping threads");
826 * This thread manages the dispatching of the requests to worker threads
829 void *thread_dispatcher(void *data
)
833 struct cds_wfcq_node
*node
;
834 struct relay_connection
*conn
= NULL
;
836 DBG("[thread] Live viewer relay dispatcher started");
838 health_register(health_relayd
, HEALTH_RELAYD_TYPE_LIVE_DISPATCHER
);
840 if (testpoint(relayd_thread_live_dispatcher
)) {
841 goto error_testpoint
;
844 health_code_update();
847 health_code_update();
849 /* Atomically prepare the queue futex */
850 futex_nto1_prepare(&viewer_conn_queue
.futex
);
852 if (CMM_LOAD_SHARED(live_dispatch_thread_exit
)) {
857 health_code_update();
859 /* Dequeue commands */
860 node
= cds_wfcq_dequeue_blocking(&viewer_conn_queue
.head
,
861 &viewer_conn_queue
.tail
);
863 DBG("Woken up but nothing in the live-viewer "
864 "relay command queue");
865 /* Continue thread execution */
868 conn
= caa_container_of(node
, struct relay_connection
, qnode
);
869 DBG("Dispatching viewer request waiting on sock %d",
873 * Inform worker thread of the new request. This
874 * call is blocking so we can be assured that
875 * the data will be read at some point in time
876 * or wait to the end of the world :)
878 ret
= lttng_write(live_conn_pipe
[1], &conn
, sizeof(conn
));
880 PERROR("write conn pipe");
881 connection_put(conn
);
884 } while (node
!= NULL
);
886 /* Futex wait on queue. Blocking call on futex() */
888 futex_nto1_wait(&viewer_conn_queue
.futex
);
892 /* Normal exit, no error */
899 ERR("Health error occurred in %s", __func__
);
901 health_unregister(health_relayd
);
902 DBG("Live viewer dispatch thread dying");
903 if (lttng_relay_stop_threads()) {
904 ERR("Error stopping threads");
910 * Establish connection with the viewer and check the versions.
912 * Return 0 on success or else negative value.
915 int viewer_connect(struct relay_connection
*conn
)
918 struct lttng_viewer_connect reply
, msg
;
920 conn
->version_check_done
= 1;
922 health_code_update();
924 DBG("Viewer is establishing a connection to the relayd.");
926 ret
= recv_request(conn
->sock
, &msg
, sizeof(msg
));
931 health_code_update();
933 memset(&reply
, 0, sizeof(reply
));
934 reply
.major
= RELAYD_VERSION_COMM_MAJOR
;
935 reply
.minor
= RELAYD_VERSION_COMM_MINOR
;
937 /* Major versions must be the same */
938 if (reply
.major
!= be32toh(msg
.major
)) {
939 DBG("Incompatible major versions ([relayd] %u vs [client] %u)",
940 reply
.major
, be32toh(msg
.major
));
945 conn
->major
= reply
.major
;
946 /* We adapt to the lowest compatible version */
947 if (reply
.minor
<= be32toh(msg
.minor
)) {
948 conn
->minor
= reply
.minor
;
950 conn
->minor
= be32toh(msg
.minor
);
953 if (be32toh(msg
.type
) == LTTNG_VIEWER_CLIENT_COMMAND
) {
954 conn
->type
= RELAY_VIEWER_COMMAND
;
955 } else if (be32toh(msg
.type
) == LTTNG_VIEWER_CLIENT_NOTIFICATION
) {
956 conn
->type
= RELAY_VIEWER_NOTIFICATION
;
958 ERR("Unknown connection type : %u", be32toh(msg
.type
));
963 reply
.major
= htobe32(reply
.major
);
964 reply
.minor
= htobe32(reply
.minor
);
965 if (conn
->type
== RELAY_VIEWER_COMMAND
) {
967 * Increment outside of htobe64 macro, because the argument can
968 * be used more than once within the macro, and thus the
969 * operation may be undefined.
971 pthread_mutex_lock(&last_relay_viewer_session_id_lock
);
972 last_relay_viewer_session_id
++;
973 pthread_mutex_unlock(&last_relay_viewer_session_id_lock
);
974 reply
.viewer_session_id
= htobe64(last_relay_viewer_session_id
);
977 health_code_update();
979 ret
= send_response(conn
->sock
, &reply
, sizeof(reply
));
984 health_code_update();
986 DBG("Version check done using protocol %u.%u", conn
->major
, conn
->minor
);
994 * Send the viewer the list of current sessions.
995 * We need to create a copy of the hash table content because otherwise
996 * we cannot assume the number of entries stays the same between getting
997 * the number of HT elements and iteration over the HT.
999 * Return 0 on success or else a negative value.
1002 int viewer_list_sessions(struct relay_connection
*conn
)
1005 struct lttng_viewer_list_sessions session_list
;
1006 struct lttng_ht_iter iter
;
1007 struct relay_session
*session
;
1008 struct lttng_viewer_session
*send_session_buf
= NULL
;
1009 uint32_t buf_count
= SESSION_BUF_DEFAULT_COUNT
;
1012 DBG("List sessions received");
1014 send_session_buf
= zmalloc(SESSION_BUF_DEFAULT_COUNT
* sizeof(*send_session_buf
));
1015 if (!send_session_buf
) {
1020 cds_lfht_for_each_entry(sessions_ht
->ht
, &iter
.iter
, session
,
1022 struct lttng_viewer_session
*send_session
;
1024 health_code_update();
1026 pthread_mutex_lock(&session
->lock
);
1027 if (session
->connection_closed
) {
1028 /* Skip closed session */
1032 if (count
>= buf_count
) {
1033 struct lttng_viewer_session
*newbuf
;
1034 uint32_t new_buf_count
= buf_count
<< 1;
1036 newbuf
= realloc(send_session_buf
,
1037 new_buf_count
* sizeof(*send_session_buf
));
1042 send_session_buf
= newbuf
;
1043 buf_count
= new_buf_count
;
1045 send_session
= &send_session_buf
[count
];
1046 if (lttng_strncpy(send_session
->session_name
,
1047 session
->session_name
,
1048 sizeof(send_session
->session_name
))) {
1052 if (lttng_strncpy(send_session
->hostname
, session
->hostname
,
1053 sizeof(send_session
->hostname
))) {
1057 send_session
->id
= htobe64(session
->id
);
1058 send_session
->live_timer
= htobe32(session
->live_timer
);
1059 if (session
->viewer_attached
) {
1060 send_session
->clients
= htobe32(1);
1062 send_session
->clients
= htobe32(0);
1064 send_session
->streams
= htobe32(session
->stream_count
);
1067 pthread_mutex_unlock(&session
->lock
);
1070 pthread_mutex_unlock(&session
->lock
);
1078 session_list
.sessions_count
= htobe32(count
);
1080 health_code_update();
1082 ret
= send_response(conn
->sock
, &session_list
, sizeof(session_list
));
1087 health_code_update();
1089 ret
= send_response(conn
->sock
, send_session_buf
,
1090 count
* sizeof(*send_session_buf
));
1094 health_code_update();
1098 free(send_session_buf
);
1103 * Send the viewer the list of current streams.
1106 int viewer_get_new_streams(struct relay_connection
*conn
)
1108 int ret
, send_streams
= 0;
1109 uint32_t nb_created
= 0, nb_unsent
= 0, nb_streams
= 0, nb_total
= 0;
1110 struct lttng_viewer_new_streams_request request
;
1111 struct lttng_viewer_new_streams_response response
;
1112 struct relay_session
*session
= NULL
;
1113 uint64_t session_id
;
1114 bool closed
= false;
1118 DBG("Get new streams received");
1120 health_code_update();
1122 /* Receive the request from the connected client. */
1123 ret
= recv_request(conn
->sock
, &request
, sizeof(request
));
1127 session_id
= be64toh(request
.session_id
);
1129 health_code_update();
1131 memset(&response
, 0, sizeof(response
));
1133 session
= session_get_by_id(session_id
);
1135 DBG("Relay session %" PRIu64
" not found", session_id
);
1136 response
.status
= htobe32(LTTNG_VIEWER_NEW_STREAMS_ERR
);
1140 if (!viewer_session_is_attached(conn
->viewer_session
, session
)) {
1141 response
.status
= htobe32(LTTNG_VIEWER_NEW_STREAMS_ERR
);
1146 * For any new stream, create it with LTTNG_VIEWER_SEEK_BEGINNING since
1147 * that at this point the client is already attached to the session.Aany
1148 * initial stream will have been created with the seek type at attach
1149 * time (for now most readers use the LTTNG_VIEWER_SEEK_LAST on attach).
1150 * Otherwise any event happening in a new stream between the attach and
1151 * a call to viewer_get_new_streams will be "lost" (never received) from
1152 * the viewer's point of view.
1154 pthread_mutex_lock(&session
->lock
);
1155 ret
= make_viewer_streams(session
,
1156 conn
->viewer_session
,
1157 LTTNG_VIEWER_SEEK_BEGINNING
, &nb_total
, &nb_unsent
,
1158 &nb_created
, &closed
);
1160 goto error_unlock_session
;
1163 response
.status
= htobe32(LTTNG_VIEWER_NEW_STREAMS_OK
);
1165 /* Only send back the newly created streams with the unsent ones. */
1166 nb_streams
= nb_created
+ nb_unsent
;
1167 response
.streams_count
= htobe32(nb_streams
);
1170 * If the session is closed, HUP when there are no more streams
1173 if (closed
&& nb_total
== 0) {
1175 response
.streams_count
= 0;
1176 response
.status
= htobe32(LTTNG_VIEWER_NEW_STREAMS_HUP
);
1177 goto send_reply_unlock
;
1180 pthread_mutex_unlock(&session
->lock
);
1183 health_code_update();
1184 ret
= send_response(conn
->sock
, &response
, sizeof(response
));
1186 goto end_put_session
;
1188 health_code_update();
1191 * Unknown or empty session, just return gracefully, the viewer
1192 * knows what is happening.
1194 if (!send_streams
|| !nb_streams
) {
1196 goto end_put_session
;
1200 * Send stream and *DON'T* ignore the sent flag so every viewer
1201 * streams that were not sent from that point will be sent to
1204 ret
= send_viewer_streams(conn
->sock
, session_id
, 0);
1206 goto end_put_session
;
1211 session_put(session
);
1215 error_unlock_session
:
1216 pthread_mutex_unlock(&session
->lock
);
1217 session_put(session
);
1222 * Send the viewer the list of current sessions.
1225 int viewer_attach_session(struct relay_connection
*conn
)
1227 int send_streams
= 0;
1229 uint32_t nb_streams
= 0;
1230 enum lttng_viewer_seek seek_type
;
1231 struct lttng_viewer_attach_session_request request
;
1232 struct lttng_viewer_attach_session_response response
;
1233 struct relay_session
*session
= NULL
;
1234 enum lttng_viewer_attach_return_code viewer_attach_status
;
1235 bool closed
= false;
1236 uint64_t session_id
;
1240 health_code_update();
1242 /* Receive the request from the connected client. */
1243 ret
= recv_request(conn
->sock
, &request
, sizeof(request
));
1248 session_id
= be64toh(request
.session_id
);
1249 health_code_update();
1251 memset(&response
, 0, sizeof(response
));
1253 if (!conn
->viewer_session
) {
1254 DBG("Client trying to attach before creating a live viewer session");
1255 response
.status
= htobe32(LTTNG_VIEWER_ATTACH_NO_SESSION
);
1259 session
= session_get_by_id(session_id
);
1261 DBG("Relay session %" PRIu64
" not found", session_id
);
1262 response
.status
= htobe32(LTTNG_VIEWER_ATTACH_UNK
);
1265 DBG("Attach session ID %" PRIu64
" received", session_id
);
1267 pthread_mutex_lock(&session
->lock
);
1268 if (session
->live_timer
== 0) {
1269 DBG("Not live session");
1270 response
.status
= htobe32(LTTNG_VIEWER_ATTACH_NOT_LIVE
);
1275 viewer_attach_status
= viewer_session_attach(conn
->viewer_session
,
1277 if (viewer_attach_status
!= LTTNG_VIEWER_ATTACH_OK
) {
1278 response
.status
= htobe32(viewer_attach_status
);
1282 switch (be32toh(request
.seek
)) {
1283 case LTTNG_VIEWER_SEEK_BEGINNING
:
1284 case LTTNG_VIEWER_SEEK_LAST
:
1285 response
.status
= htobe32(LTTNG_VIEWER_ATTACH_OK
);
1286 seek_type
= be32toh(request
.seek
);
1289 ERR("Wrong seek parameter");
1290 response
.status
= htobe32(LTTNG_VIEWER_ATTACH_SEEK_ERR
);
1295 ret
= make_viewer_streams(session
,
1296 conn
->viewer_session
, seek_type
,
1297 &nb_streams
, NULL
, NULL
, &closed
);
1299 goto end_put_session
;
1301 pthread_mutex_unlock(&session
->lock
);
1302 session_put(session
);
1305 response
.streams_count
= htobe32(nb_streams
);
1307 * If the session is closed when the viewer is attaching, it
1308 * means some of the streams may have been concurrently removed,
1309 * so we don't allow the viewer to attach, even if there are
1310 * streams available.
1314 response
.streams_count
= 0;
1315 response
.status
= htobe32(LTTNG_VIEWER_ATTACH_UNK
);
1320 health_code_update();
1321 ret
= send_response(conn
->sock
, &response
, sizeof(response
));
1323 goto end_put_session
;
1325 health_code_update();
1328 * Unknown or empty session, just return gracefully, the viewer
1329 * knows what is happening.
1331 if (!send_streams
|| !nb_streams
) {
1333 goto end_put_session
;
1336 /* Send stream and ignore the sent flag. */
1337 ret
= send_viewer_streams(conn
->sock
, session_id
, 1);
1339 goto end_put_session
;
1344 pthread_mutex_unlock(&session
->lock
);
1345 session_put(session
);
1352 * Open the index file if needed for the given vstream.
1354 * If an index file is successfully opened, the vstream will set it as its
1355 * current index file.
1357 * Return 0 on success, a negative value on error (-ENOENT if not ready yet).
1359 * Called with rstream lock held.
1361 static int try_open_index(struct relay_viewer_stream
*vstream
,
1362 struct relay_stream
*rstream
)
1365 const uint32_t connection_major
= rstream
->trace
->session
->major
;
1366 const uint32_t connection_minor
= rstream
->trace
->session
->minor
;
1367 enum lttng_trace_chunk_status chunk_status
;
1369 if (vstream
->index_file
) {
1374 * First time, we open the index file and at least one index is ready.
1376 if (rstream
->index_received_seqcount
== 0 ||
1377 !vstream
->stream_file
.trace_chunk
) {
1382 chunk_status
= lttng_index_file_create_from_trace_chunk_read_only(
1383 vstream
->stream_file
.trace_chunk
, rstream
->path_name
,
1384 rstream
->channel_name
, rstream
->tracefile_size
,
1385 vstream
->current_tracefile_id
,
1386 lttng_to_index_major(connection_major
, connection_minor
),
1387 lttng_to_index_minor(connection_major
, connection_minor
),
1388 true, &vstream
->index_file
);
1389 if (chunk_status
!= LTTNG_TRACE_CHUNK_STATUS_OK
) {
1390 if (chunk_status
== LTTNG_TRACE_CHUNK_STATUS_NO_FILE
) {
1402 * Check the status of the index for the given stream. This function
1403 * updates the index structure if needed and can put (close) the vstream
1404 * in the HUP situation.
1406 * Return 0 means that we can proceed with the index. A value of 1 means
1407 * that the index has been updated and is ready to be sent to the
1408 * client. A negative value indicates an error that can't be handled.
1410 * Called with rstream lock held.
1412 static int check_index_status(struct relay_viewer_stream
*vstream
,
1413 struct relay_stream
*rstream
, struct ctf_trace
*trace
,
1414 struct lttng_viewer_index
*index
)
1418 DBG("Check index status: index_received_seqcount %" PRIu64
" "
1419 "index_sent_seqcount %" PRIu64
" "
1420 "for stream %" PRIu64
,
1421 rstream
->index_received_seqcount
,
1422 vstream
->index_sent_seqcount
,
1423 vstream
->stream
->stream_handle
);
1424 if ((trace
->session
->connection_closed
|| rstream
->closed
)
1425 && rstream
->index_received_seqcount
1426 == vstream
->index_sent_seqcount
) {
1428 * Last index sent and session connection or relay
1429 * stream are closed.
1431 index
->status
= htobe32(LTTNG_VIEWER_INDEX_HUP
);
1433 } else if (rstream
->beacon_ts_end
!= -1ULL &&
1434 (rstream
->index_received_seqcount
== 0 ||
1435 (vstream
->index_sent_seqcount
!= 0 &&
1436 rstream
->index_received_seqcount
1437 <= vstream
->index_sent_seqcount
))) {
1439 * We've received a synchronization beacon and the last index
1440 * available has been sent, the index for now is inactive.
1442 * In this case, we have received a beacon which allows us to
1443 * inform the client of a time interval during which we can
1444 * guarantee that there are no events to read (and never will
1447 * The sent seqcount can grow higher than receive seqcount on
1448 * clear because the rotation performed by clear will push
1449 * the index_sent_seqcount ahead (see
1450 * viewer_stream_sync_tracefile_array_tail) and skip over
1451 * packet sequence numbers.
1453 index
->status
= htobe32(LTTNG_VIEWER_INDEX_INACTIVE
);
1454 index
->timestamp_end
= htobe64(rstream
->beacon_ts_end
);
1455 index
->stream_id
= htobe64(rstream
->ctf_stream_id
);
1456 DBG("Check index status: inactive with beacon, for stream %" PRIu64
,
1457 vstream
->stream
->stream_handle
);
1459 } else if (rstream
->index_received_seqcount
== 0 ||
1460 (vstream
->index_sent_seqcount
!= 0 &&
1461 rstream
->index_received_seqcount
1462 <= vstream
->index_sent_seqcount
)) {
1464 * This checks whether received <= sent seqcount. In
1465 * this case, we have not received a beacon. Therefore,
1466 * we can only ask the client to retry later.
1468 * The sent seqcount can grow higher than receive seqcount on
1469 * clear because the rotation performed by clear will push
1470 * the index_sent_seqcount ahead (see
1471 * viewer_stream_sync_tracefile_array_tail) and skip over
1472 * packet sequence numbers.
1474 index
->status
= htobe32(LTTNG_VIEWER_INDEX_RETRY
);
1475 DBG("Check index status: retry for stream %" PRIu64
,
1476 vstream
->stream
->stream_handle
);
1478 } else if (!tracefile_array_seq_in_file(rstream
->tfa
,
1479 vstream
->current_tracefile_id
,
1480 vstream
->index_sent_seqcount
)) {
1482 * The next index we want to send cannot be read either
1483 * because we need to perform a rotation, or due to
1484 * the producer having overwritten its trace file.
1486 DBG("Viewer stream %" PRIu64
" rotation",
1487 vstream
->stream
->stream_handle
);
1488 ret
= viewer_stream_rotate(vstream
);
1490 /* EOF across entire stream. */
1491 index
->status
= htobe32(LTTNG_VIEWER_INDEX_HUP
);
1495 * If we have been pushed due to overwrite, it
1496 * necessarily means there is data that can be read in
1497 * the stream. If we rotated because we reached the end
1498 * of a tracefile, it means the following tracefile
1499 * needs to contain at least one index, else we would
1500 * have already returned LTTNG_VIEWER_INDEX_RETRY to the
1501 * viewer. The updated index_sent_seqcount needs to
1502 * point to a readable index entry now.
1504 * In the case where we "rotate" on a single file, we
1505 * can end up in a case where the requested index is
1506 * still unavailable.
1508 if (rstream
->tracefile_count
== 1 &&
1509 !tracefile_array_seq_in_file(
1511 vstream
->current_tracefile_id
,
1512 vstream
->index_sent_seqcount
)) {
1513 index
->status
= htobe32(LTTNG_VIEWER_INDEX_RETRY
);
1514 DBG("Check index status: retry: "
1515 "tracefile array sequence number %" PRIu64
1516 " not in file for stream %" PRIu64
,
1517 vstream
->index_sent_seqcount
,
1518 vstream
->stream
->stream_handle
);
1521 assert(tracefile_array_seq_in_file(rstream
->tfa
,
1522 vstream
->current_tracefile_id
,
1523 vstream
->index_sent_seqcount
));
1525 /* ret == 0 means successful so we continue. */
1530 viewer_stream_put(vstream
);
1536 void viewer_stream_rotate_to_trace_chunk(struct relay_viewer_stream
*vstream
,
1537 struct lttng_trace_chunk
*new_trace_chunk
)
1539 lttng_trace_chunk_put(vstream
->stream_file
.trace_chunk
);
1541 if (new_trace_chunk
) {
1542 const bool acquired_reference
= lttng_trace_chunk_get(
1545 assert(acquired_reference
);
1548 vstream
->stream_file
.trace_chunk
= new_trace_chunk
;
1549 viewer_stream_sync_tracefile_array_tail(vstream
);
1550 viewer_stream_close_files(vstream
);
1554 * Send the next index for a stream.
1556 * Return 0 on success or else a negative value.
1559 int viewer_get_next_index(struct relay_connection
*conn
)
1562 struct lttng_viewer_get_next_index request_index
;
1563 struct lttng_viewer_index viewer_index
;
1564 struct ctf_packet_index packet_index
;
1565 struct relay_viewer_stream
*vstream
= NULL
;
1566 struct relay_stream
*rstream
= NULL
;
1567 struct ctf_trace
*ctf_trace
= NULL
;
1568 struct relay_viewer_stream
*metadata_viewer_stream
= NULL
;
1572 DBG("Viewer get next index");
1574 memset(&viewer_index
, 0, sizeof(viewer_index
));
1575 health_code_update();
1577 ret
= recv_request(conn
->sock
, &request_index
, sizeof(request_index
));
1581 health_code_update();
1583 vstream
= viewer_stream_get_by_id(be64toh(request_index
.stream_id
));
1585 DBG("Client requested index of unknown stream id %" PRIu64
,
1586 (uint64_t) be64toh(request_index
.stream_id
));
1587 viewer_index
.status
= htobe32(LTTNG_VIEWER_INDEX_ERR
);
1591 /* Use back. ref. Protected by refcounts. */
1592 rstream
= vstream
->stream
;
1593 ctf_trace
= rstream
->trace
;
1595 /* metadata_viewer_stream may be NULL. */
1596 metadata_viewer_stream
=
1597 ctf_trace_get_viewer_metadata_stream(ctf_trace
);
1599 pthread_mutex_lock(&rstream
->lock
);
1602 * The viewer should not ask for index on metadata stream.
1604 if (rstream
->is_metadata
) {
1605 viewer_index
.status
= htobe32(LTTNG_VIEWER_INDEX_HUP
);
1609 if (rstream
->ongoing_rotation
.is_set
) {
1610 /* Rotation is ongoing, try again later. */
1611 viewer_index
.status
= htobe32(LTTNG_VIEWER_INDEX_RETRY
);
1615 if (rstream
->trace
->session
->ongoing_rotation
) {
1616 /* Rotation is ongoing, try again later. */
1617 viewer_index
.status
= htobe32(LTTNG_VIEWER_INDEX_RETRY
);
1622 * Transition the viewer session into the newest trace chunk available.
1624 if (!lttng_trace_chunk_ids_equal(
1625 conn
->viewer_session
->current_trace_chunk
,
1626 rstream
->trace_chunk
)) {
1627 DBG("Relay stream and viewer chunk ids differ");
1629 ret
= viewer_session_set_trace_chunk_copy(
1630 conn
->viewer_session
,
1631 rstream
->trace_chunk
);
1633 viewer_index
.status
= htobe32(LTTNG_VIEWER_INDEX_ERR
);
1639 * Transition the viewer stream into the latest trace chunk available.
1641 * Note that the stream must _not_ rotate in one precise condition:
1642 * the relay stream has rotated to a NULL trace chunk and the viewer
1643 * stream is consuming the trace chunk that was active just before
1644 * that rotation to NULL.
1646 * This allows clients to consume all the packets of a trace chunk
1647 * after a session's destruction.
1649 if (conn
->viewer_session
->current_trace_chunk
!= vstream
->stream_file
.trace_chunk
&&
1650 !(rstream
->completed_rotation_count
== vstream
->last_seen_rotation_count
+ 1 && !rstream
->trace_chunk
)) {
1651 DBG("Viewer session and viewer stream chunk differ: "
1652 "vsession chunk %p vstream chunk %p",
1653 conn
->viewer_session
->current_trace_chunk
,
1654 vstream
->stream_file
.trace_chunk
);
1655 viewer_stream_rotate_to_trace_chunk(vstream
,
1656 conn
->viewer_session
->current_trace_chunk
);
1657 vstream
->last_seen_rotation_count
=
1658 rstream
->completed_rotation_count
;
1661 ret
= check_index_status(vstream
, rstream
, ctf_trace
, &viewer_index
);
1664 } else if (ret
== 1) {
1666 * We have no index to send and check_index_status has populated
1667 * viewer_index's status.
1671 /* At this point, ret is 0 thus we will be able to read the index. */
1674 /* Try to open an index if one is needed for that stream. */
1675 ret
= try_open_index(vstream
, rstream
);
1676 if (ret
== -ENOENT
) {
1677 if (rstream
->closed
) {
1678 viewer_index
.status
= htobe32(LTTNG_VIEWER_INDEX_HUP
);
1681 viewer_index
.status
= htobe32(LTTNG_VIEWER_INDEX_RETRY
);
1686 viewer_index
.status
= htobe32(LTTNG_VIEWER_INDEX_ERR
);
1691 * vstream->stream_fd may be NULL if it has been closed by
1692 * tracefile rotation, or if we are at the beginning of the
1693 * stream. We open the data stream file here to protect against
1694 * overwrite caused by tracefile rotation (in association with
1695 * unlink performed before overwrite).
1697 if (!vstream
->stream_file
.handle
) {
1698 char file_path
[LTTNG_PATH_MAX
];
1699 enum lttng_trace_chunk_status status
;
1700 struct fs_handle
*fs_handle
;
1702 ret
= utils_stream_file_path(rstream
->path_name
,
1703 rstream
->channel_name
, rstream
->tracefile_size
,
1704 vstream
->current_tracefile_id
, NULL
, file_path
,
1711 * It is possible the the file we are trying to open is
1712 * missing if the stream has been closed (application exits with
1713 * per-pid buffers) and a clear command has been performed.
1715 status
= lttng_trace_chunk_open_fs_handle(
1716 vstream
->stream_file
.trace_chunk
,
1717 file_path
, O_RDONLY
, 0, &fs_handle
, true);
1718 if (status
!= LTTNG_TRACE_CHUNK_STATUS_OK
) {
1719 if (status
== LTTNG_TRACE_CHUNK_STATUS_NO_FILE
&&
1721 viewer_index
.status
= htobe32(LTTNG_VIEWER_INDEX_HUP
);
1724 PERROR("Failed to open trace file for viewer stream");
1727 vstream
->stream_file
.handle
= fs_handle
;
1730 ret
= check_new_streams(conn
);
1732 viewer_index
.status
= htobe32(LTTNG_VIEWER_INDEX_ERR
);
1734 } else if (ret
== 1) {
1735 viewer_index
.flags
|= LTTNG_VIEWER_FLAG_NEW_STREAM
;
1738 ret
= lttng_index_file_read(vstream
->index_file
, &packet_index
);
1740 ERR("Relay error reading index file");
1741 viewer_index
.status
= htobe32(LTTNG_VIEWER_INDEX_ERR
);
1744 viewer_index
.status
= htobe32(LTTNG_VIEWER_INDEX_OK
);
1745 vstream
->index_sent_seqcount
++;
1749 * Indexes are stored in big endian, no need to switch before sending.
1751 DBG("Sending viewer index for stream %" PRIu64
" offset %" PRIu64
,
1752 rstream
->stream_handle
,
1753 (uint64_t) be64toh(packet_index
.offset
));
1754 viewer_index
.offset
= packet_index
.offset
;
1755 viewer_index
.packet_size
= packet_index
.packet_size
;
1756 viewer_index
.content_size
= packet_index
.content_size
;
1757 viewer_index
.timestamp_begin
= packet_index
.timestamp_begin
;
1758 viewer_index
.timestamp_end
= packet_index
.timestamp_end
;
1759 viewer_index
.events_discarded
= packet_index
.events_discarded
;
1760 viewer_index
.stream_id
= packet_index
.stream_id
;
1764 pthread_mutex_unlock(&rstream
->lock
);
1767 if (metadata_viewer_stream
) {
1768 pthread_mutex_lock(&metadata_viewer_stream
->stream
->lock
);
1769 DBG("get next index metadata check: recv %" PRIu64
1771 metadata_viewer_stream
->stream
->metadata_received
,
1772 metadata_viewer_stream
->metadata_sent
);
1773 if (!metadata_viewer_stream
->stream
->metadata_received
||
1774 metadata_viewer_stream
->stream
->metadata_received
>
1775 metadata_viewer_stream
->metadata_sent
) {
1776 viewer_index
.flags
|= LTTNG_VIEWER_FLAG_NEW_METADATA
;
1778 pthread_mutex_unlock(&metadata_viewer_stream
->stream
->lock
);
1781 viewer_index
.flags
= htobe32(viewer_index
.flags
);
1782 health_code_update();
1784 ret
= send_response(conn
->sock
, &viewer_index
, sizeof(viewer_index
));
1788 health_code_update();
1791 DBG("Index %" PRIu64
" for stream %" PRIu64
" sent",
1792 vstream
->index_sent_seqcount
,
1793 vstream
->stream
->stream_handle
);
1796 if (metadata_viewer_stream
) {
1797 viewer_stream_put(metadata_viewer_stream
);
1800 viewer_stream_put(vstream
);
1805 pthread_mutex_unlock(&rstream
->lock
);
1806 if (metadata_viewer_stream
) {
1807 viewer_stream_put(metadata_viewer_stream
);
1809 viewer_stream_put(vstream
);
1814 * Send the next index for a stream
1816 * Return 0 on success or else a negative value.
1819 int viewer_get_packet(struct relay_connection
*conn
)
1824 struct lttng_viewer_get_packet get_packet_info
;
1825 struct lttng_viewer_trace_packet reply_header
;
1826 struct relay_viewer_stream
*vstream
= NULL
;
1827 uint32_t reply_size
= sizeof(reply_header
);
1828 uint32_t packet_data_len
= 0;
1832 DBG2("Relay get data packet");
1834 health_code_update();
1836 ret
= recv_request(conn
->sock
, &get_packet_info
,
1837 sizeof(get_packet_info
));
1841 health_code_update();
1843 /* From this point on, the error label can be reached. */
1844 memset(&reply_header
, 0, sizeof(reply_header
));
1845 stream_id
= (uint64_t) be64toh(get_packet_info
.stream_id
);
1847 vstream
= viewer_stream_get_by_id(stream_id
);
1849 DBG("Client requested packet of unknown stream id %" PRIu64
,
1851 reply_header
.status
= htobe32(LTTNG_VIEWER_GET_PACKET_ERR
);
1852 goto send_reply_nolock
;
1854 packet_data_len
= be32toh(get_packet_info
.len
);
1855 reply_size
+= packet_data_len
;
1858 reply
= zmalloc(reply_size
);
1860 PERROR("packet reply zmalloc");
1861 reply_size
= sizeof(reply_header
);
1865 pthread_mutex_lock(&vstream
->stream
->lock
);
1866 lseek_ret
= fs_handle_seek(vstream
->stream_file
.handle
,
1867 be64toh(get_packet_info
.offset
), SEEK_SET
);
1868 if (lseek_ret
< 0) {
1869 PERROR("Failed to seek file system handle of viewer stream %" PRIu64
1870 " to offset %" PRIu64
,
1872 (uint64_t) be64toh(get_packet_info
.offset
));
1875 read_len
= fs_handle_read(vstream
->stream_file
.handle
,
1876 reply
+ sizeof(reply_header
), packet_data_len
);
1877 if (read_len
< packet_data_len
) {
1878 PERROR("Failed to read from file system handle of viewer stream id %" PRIu64
1879 ", offset: %" PRIu64
,
1881 (uint64_t) be64toh(get_packet_info
.offset
));
1884 reply_header
.status
= htobe32(LTTNG_VIEWER_GET_PACKET_OK
);
1885 reply_header
.len
= htobe32(packet_data_len
);
1889 /* No payload to send on error. */
1890 reply_size
= sizeof(reply_header
);
1891 reply_header
.status
= htobe32(LTTNG_VIEWER_GET_PACKET_ERR
);
1895 pthread_mutex_unlock(&vstream
->stream
->lock
);
1899 health_code_update();
1902 memcpy(reply
, &reply_header
, sizeof(reply_header
));
1903 ret
= send_response(conn
->sock
, reply
, reply_size
);
1905 /* No reply to send. */
1906 ret
= send_response(conn
->sock
, &reply_header
,
1910 health_code_update();
1912 PERROR("sendmsg of packet data failed");
1916 DBG("Sent %u bytes for stream %" PRIu64
, reply_size
, stream_id
);
1922 viewer_stream_put(vstream
);
1928 * Send the session's metadata
1930 * Return 0 on success else a negative value.
1933 int viewer_get_metadata(struct relay_connection
*conn
)
1940 struct lttng_viewer_get_metadata request
;
1941 struct lttng_viewer_metadata_packet reply
;
1942 struct relay_viewer_stream
*vstream
= NULL
;
1946 DBG("Relay get metadata");
1948 health_code_update();
1950 ret
= recv_request(conn
->sock
, &request
, sizeof(request
));
1954 health_code_update();
1956 memset(&reply
, 0, sizeof(reply
));
1958 vstream
= viewer_stream_get_by_id(be64toh(request
.stream_id
));
1961 * The metadata stream can be closed by a CLOSE command
1962 * just before we attach. It can also be closed by
1963 * per-pid tracing during tracing. Therefore, it is
1964 * possible that we cannot find this viewer stream.
1965 * Reply back to the client with an error if we cannot
1968 DBG("Client requested metadata of unknown stream id %" PRIu64
,
1969 (uint64_t) be64toh(request
.stream_id
));
1970 reply
.status
= htobe32(LTTNG_VIEWER_METADATA_ERR
);
1973 pthread_mutex_lock(&vstream
->stream
->lock
);
1974 if (!vstream
->stream
->is_metadata
) {
1975 ERR("Invalid metadata stream");
1979 if (vstream
->metadata_sent
>= vstream
->stream
->metadata_received
) {
1981 * The live viewers expect to receive a NO_NEW_METADATA
1982 * status before a stream disappears, otherwise they abort the
1983 * entire live connection when receiving an error status.
1985 * Clear feature resets the metadata_sent to 0 until the
1986 * same metadata is received again.
1988 reply
.status
= htobe32(LTTNG_VIEWER_NO_NEW_METADATA
);
1990 * The live viewer considers a closed 0 byte metadata stream as
1993 if (vstream
->metadata_sent
> 0) {
1994 vstream
->stream
->no_new_metadata_notified
= true;
1995 if (vstream
->stream
->closed
) {
1996 /* Release ownership for the viewer metadata stream. */
1997 viewer_stream_put(vstream
);
2003 if (vstream
->stream
->trace_chunk
&&
2004 !lttng_trace_chunk_ids_equal(
2005 conn
->viewer_session
->current_trace_chunk
,
2006 vstream
->stream
->trace_chunk
)) {
2007 /* A rotation has occurred on the relay stream. */
2008 DBG("Metadata relay stream and viewer chunk ids differ");
2010 ret
= viewer_session_set_trace_chunk_copy(
2011 conn
->viewer_session
,
2012 vstream
->stream
->trace_chunk
);
2014 reply
.status
= htobe32(LTTNG_VIEWER_METADATA_ERR
);
2019 if (conn
->viewer_session
->current_trace_chunk
!=
2020 vstream
->stream_file
.trace_chunk
) {
2021 bool acquired_reference
;
2023 DBG("Viewer session and viewer stream chunk differ: "
2024 "vsession chunk %p vstream chunk %p",
2025 conn
->viewer_session
->current_trace_chunk
,
2026 vstream
->stream_file
.trace_chunk
);
2027 lttng_trace_chunk_put(vstream
->stream_file
.trace_chunk
);
2028 acquired_reference
= lttng_trace_chunk_get(conn
->viewer_session
->current_trace_chunk
);
2029 assert(acquired_reference
);
2030 vstream
->stream_file
.trace_chunk
=
2031 conn
->viewer_session
->current_trace_chunk
;
2032 viewer_stream_close_files(vstream
);
2035 len
= vstream
->stream
->metadata_received
- vstream
->metadata_sent
;
2038 * Either this is the first time the metadata file is read, or a
2039 * rotation of the corresponding relay stream has occured.
2041 if (!vstream
->stream_file
.handle
&& len
> 0) {
2042 struct fs_handle
*fs_handle
;
2043 char file_path
[LTTNG_PATH_MAX
];
2044 enum lttng_trace_chunk_status status
;
2045 struct relay_stream
*rstream
= vstream
->stream
;
2047 ret
= utils_stream_file_path(rstream
->path_name
,
2048 rstream
->channel_name
, rstream
->tracefile_size
,
2049 vstream
->current_tracefile_id
, NULL
, file_path
,
2056 * It is possible the the metadata file we are trying to open is
2057 * missing if the stream has been closed (application exits with
2058 * per-pid buffers) and a clear command has been performed.
2060 status
= lttng_trace_chunk_open_fs_handle(
2061 vstream
->stream_file
.trace_chunk
,
2062 file_path
, O_RDONLY
, 0, &fs_handle
, true);
2063 if (status
!= LTTNG_TRACE_CHUNK_STATUS_OK
) {
2064 if (status
== LTTNG_TRACE_CHUNK_STATUS_NO_FILE
) {
2065 reply
.status
= htobe32(LTTNG_VIEWER_NO_NEW_METADATA
);
2067 if (vstream
->stream
->closed
) {
2068 viewer_stream_put(vstream
);
2072 PERROR("Failed to open metadata file for viewer stream");
2075 vstream
->stream_file
.handle
= fs_handle
;
2077 if (vstream
->metadata_sent
!= 0) {
2079 * The client does not expect to receive any metadata
2080 * it has received and metadata files in successive
2081 * chunks must be a strict superset of one another.
2083 * Skip the first `metadata_sent` bytes to ensure
2084 * they are not sent a second time to the client.
2086 * Baring a block layer error or an internal error,
2087 * this seek should not fail as
2088 * `vstream->stream->metadata_received` is reset when
2089 * a relay stream is rotated. If this is reached, it is
2090 * safe to assume that
2091 * `metadata_received` > `metadata_sent`.
2093 const off_t seek_ret
= fs_handle_seek(fs_handle
,
2094 vstream
->metadata_sent
, SEEK_SET
);
2097 PERROR("Failed to seek metadata viewer stream file to `sent` position: pos = %" PRId64
,
2098 vstream
->metadata_sent
);
2099 reply
.status
= htobe32(LTTNG_VIEWER_METADATA_ERR
);
2105 reply
.len
= htobe64(len
);
2106 data
= zmalloc(len
);
2108 PERROR("viewer metadata zmalloc");
2112 fd
= fs_handle_get_fd(vstream
->stream_file
.handle
);
2114 ERR("Failed to restore viewer stream file system handle");
2117 read_len
= lttng_read(fd
, data
, len
);
2118 fs_handle_put_fd(vstream
->stream_file
.handle
);
2120 if (read_len
< len
) {
2122 PERROR("Failed to read metadata file");
2126 * A clear has been performed which prevents the relay
2127 * from sending `len` bytes of metadata.
2129 * It is important not to send any metadata if we
2130 * couldn't read all the available metadata in one shot:
2131 * sending partial metadata can cause the client to
2132 * attempt to parse an incomplete (incoherent) metadata
2133 * stream, which would result in an error.
2135 const off_t seek_ret
= fs_handle_seek(
2136 vstream
->stream_file
.handle
, -read_len
,
2139 DBG("Failed to read metadata: requested = %" PRIu64
", got = %zd",
2144 PERROR("Failed to restore metadata file position after partial read");
2150 vstream
->metadata_sent
+= read_len
;
2151 reply
.status
= htobe32(LTTNG_VIEWER_METADATA_OK
);
2156 reply
.status
= htobe32(LTTNG_VIEWER_METADATA_ERR
);
2159 health_code_update();
2161 pthread_mutex_unlock(&vstream
->stream
->lock
);
2163 ret
= send_response(conn
->sock
, &reply
, sizeof(reply
));
2167 health_code_update();
2170 ret
= send_response(conn
->sock
, data
, len
);
2176 DBG("Sent %" PRIu64
" bytes of metadata for stream %" PRIu64
, len
,
2177 (uint64_t) be64toh(request
.stream_id
));
2179 DBG("Metadata sent");
2185 viewer_stream_put(vstream
);
2191 * Create a viewer session.
2193 * Return 0 on success or else a negative value.
2196 int viewer_create_session(struct relay_connection
*conn
)
2199 struct lttng_viewer_create_session_response resp
;
2201 DBG("Viewer create session received");
2203 memset(&resp
, 0, sizeof(resp
));
2204 resp
.status
= htobe32(LTTNG_VIEWER_CREATE_SESSION_OK
);
2205 conn
->viewer_session
= viewer_session_create();
2206 if (!conn
->viewer_session
) {
2207 ERR("Allocation viewer session");
2208 resp
.status
= htobe32(LTTNG_VIEWER_CREATE_SESSION_ERR
);
2213 health_code_update();
2214 ret
= send_response(conn
->sock
, &resp
, sizeof(resp
));
2218 health_code_update();
2226 * Detach a viewer session.
2228 * Return 0 on success or else a negative value.
2231 int viewer_detach_session(struct relay_connection
*conn
)
2234 struct lttng_viewer_detach_session_response response
;
2235 struct lttng_viewer_detach_session_request request
;
2236 struct relay_session
*session
= NULL
;
2237 uint64_t viewer_session_to_close
;
2239 DBG("Viewer detach session received");
2243 health_code_update();
2245 /* Receive the request from the connected client. */
2246 ret
= recv_request(conn
->sock
, &request
, sizeof(request
));
2250 viewer_session_to_close
= be64toh(request
.session_id
);
2252 if (!conn
->viewer_session
) {
2253 DBG("Client trying to detach before creating a live viewer session");
2254 response
.status
= htobe32(LTTNG_VIEWER_DETACH_SESSION_ERR
);
2258 health_code_update();
2260 memset(&response
, 0, sizeof(response
));
2261 DBG("Detaching from session ID %" PRIu64
, viewer_session_to_close
);
2263 session
= session_get_by_id(be64toh(request
.session_id
));
2265 DBG("Relay session %" PRIu64
" not found",
2266 (uint64_t) be64toh(request
.session_id
));
2267 response
.status
= htobe32(LTTNG_VIEWER_DETACH_SESSION_UNK
);
2271 ret
= viewer_session_is_attached(conn
->viewer_session
, session
);
2273 DBG("Not attached to this session");
2274 response
.status
= htobe32(LTTNG_VIEWER_DETACH_SESSION_ERR
);
2275 goto send_reply_put
;
2278 viewer_session_close_one_session(conn
->viewer_session
, session
);
2279 response
.status
= htobe32(LTTNG_VIEWER_DETACH_SESSION_OK
);
2280 DBG("Session %" PRIu64
" detached.", viewer_session_to_close
);
2283 session_put(session
);
2286 health_code_update();
2287 ret
= send_response(conn
->sock
, &response
, sizeof(response
));
2291 health_code_update();
2299 * live_relay_unknown_command: send -1 if received unknown command
2302 void live_relay_unknown_command(struct relay_connection
*conn
)
2304 struct lttcomm_relayd_generic_reply reply
;
2306 memset(&reply
, 0, sizeof(reply
));
2307 reply
.ret_code
= htobe32(LTTNG_ERR_UNK
);
2308 (void) send_response(conn
->sock
, &reply
, sizeof(reply
));
2312 * Process the commands received on the control socket
2315 int process_control(struct lttng_viewer_cmd
*recv_hdr
,
2316 struct relay_connection
*conn
)
2321 msg_value
= be32toh(recv_hdr
->cmd
);
2324 * Make sure we've done the version check before any command other then a
2325 * new client connection.
2327 if (msg_value
!= LTTNG_VIEWER_CONNECT
&& !conn
->version_check_done
) {
2328 ERR("Viewer conn value %" PRIu32
" before version check", msg_value
);
2333 switch (msg_value
) {
2334 case LTTNG_VIEWER_CONNECT
:
2335 ret
= viewer_connect(conn
);
2337 case LTTNG_VIEWER_LIST_SESSIONS
:
2338 ret
= viewer_list_sessions(conn
);
2340 case LTTNG_VIEWER_ATTACH_SESSION
:
2341 ret
= viewer_attach_session(conn
);
2343 case LTTNG_VIEWER_GET_NEXT_INDEX
:
2344 ret
= viewer_get_next_index(conn
);
2346 case LTTNG_VIEWER_GET_PACKET
:
2347 ret
= viewer_get_packet(conn
);
2349 case LTTNG_VIEWER_GET_METADATA
:
2350 ret
= viewer_get_metadata(conn
);
2352 case LTTNG_VIEWER_GET_NEW_STREAMS
:
2353 ret
= viewer_get_new_streams(conn
);
2355 case LTTNG_VIEWER_CREATE_SESSION
:
2356 ret
= viewer_create_session(conn
);
2358 case LTTNG_VIEWER_DETACH_SESSION
:
2359 ret
= viewer_detach_session(conn
);
2362 ERR("Received unknown viewer command (%u)",
2363 be32toh(recv_hdr
->cmd
));
2364 live_relay_unknown_command(conn
);
2374 void cleanup_connection_pollfd(struct lttng_poll_event
*events
, int pollfd
)
2378 (void) lttng_poll_del(events
, pollfd
);
2380 ret
= fd_tracker_close_unsuspendable_fd(the_fd_tracker
, &pollfd
, 1,
2381 fd_tracker_util_close_fd
, NULL
);
2383 ERR("Closing pollfd %d", pollfd
);
2388 * This thread does the actual work
2391 void *thread_worker(void *data
)
2395 struct lttng_poll_event events
;
2396 struct lttng_ht
*viewer_connections_ht
;
2397 struct lttng_ht_iter iter
;
2398 struct lttng_viewer_cmd recv_hdr
;
2399 struct relay_connection
*destroy_conn
;
2401 DBG("[thread] Live viewer relay worker started");
2403 rcu_register_thread();
2405 health_register(health_relayd
, HEALTH_RELAYD_TYPE_LIVE_WORKER
);
2407 if (testpoint(relayd_thread_live_worker
)) {
2408 goto error_testpoint
;
2411 /* table of connections indexed on socket */
2412 viewer_connections_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_ULONG
);
2413 if (!viewer_connections_ht
) {
2414 goto viewer_connections_ht_error
;
2417 ret
= create_named_thread_poll_set(&events
, 2,
2418 "Live viewer worker thread epoll");
2420 goto error_poll_create
;
2423 ret
= lttng_poll_add(&events
, live_conn_pipe
[0], LPOLLIN
| LPOLLRDHUP
);
2432 health_code_update();
2434 /* Infinite blocking call, waiting for transmission */
2435 DBG3("Relayd live viewer worker thread polling...");
2436 health_poll_entry();
2437 ret
= lttng_poll_wait(&events
, -1);
2441 * Restart interrupted system call.
2443 if (errno
== EINTR
) {
2452 * Process control. The control connection is prioritised so we don't
2453 * starve it with high throughput tracing data on the data
2456 for (i
= 0; i
< nb_fd
; i
++) {
2457 /* Fetch once the poll data */
2458 uint32_t revents
= LTTNG_POLL_GETEV(&events
, i
);
2459 int pollfd
= LTTNG_POLL_GETFD(&events
, i
);
2461 health_code_update();
2463 /* Thread quit pipe has been closed. Killing thread. */
2464 ret
= check_thread_quit_pipe(pollfd
, revents
);
2470 /* Inspect the relay conn pipe for new connection. */
2471 if (pollfd
== live_conn_pipe
[0]) {
2472 if (revents
& LPOLLIN
) {
2473 struct relay_connection
*conn
;
2475 ret
= lttng_read(live_conn_pipe
[0],
2476 &conn
, sizeof(conn
));
2480 ret
= lttng_poll_add(&events
,
2482 LPOLLIN
| LPOLLRDHUP
);
2484 ERR("Failed to add new live connection file descriptor to poll set");
2487 connection_ht_add(viewer_connections_ht
, conn
);
2488 DBG("Connection socket %d added to poll", conn
->sock
->fd
);
2489 } else if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
2490 ERR("Relay live pipe error");
2493 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
2497 /* Connection activity. */
2498 struct relay_connection
*conn
;
2500 conn
= connection_get_by_sock(viewer_connections_ht
, pollfd
);
2505 if (revents
& LPOLLIN
) {
2506 ret
= conn
->sock
->ops
->recvmsg(conn
->sock
, &recv_hdr
,
2507 sizeof(recv_hdr
), 0);
2509 /* Connection closed. */
2510 cleanup_connection_pollfd(&events
, pollfd
);
2511 /* Put "create" ownership reference. */
2512 connection_put(conn
);
2513 DBG("Viewer control conn closed with %d", pollfd
);
2515 ret
= process_control(&recv_hdr
, conn
);
2517 /* Clear the session on error. */
2518 cleanup_connection_pollfd(&events
, pollfd
);
2519 /* Put "create" ownership reference. */
2520 connection_put(conn
);
2521 DBG("Viewer connection closed with %d", pollfd
);
2524 } else if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
2525 cleanup_connection_pollfd(&events
, pollfd
);
2526 /* Put "create" ownership reference. */
2527 connection_put(conn
);
2529 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
2530 connection_put(conn
);
2533 /* Put local "get_by_sock" reference. */
2534 connection_put(conn
);
2541 (void) fd_tracker_util_poll_clean(the_fd_tracker
, &events
);
2543 /* Cleanup remaining connection object. */
2545 cds_lfht_for_each_entry(viewer_connections_ht
->ht
, &iter
.iter
,
2548 health_code_update();
2549 connection_put(destroy_conn
);
2553 lttng_ht_destroy(viewer_connections_ht
);
2554 viewer_connections_ht_error
:
2555 /* Close relay conn pipes */
2556 (void) fd_tracker_util_pipe_close(the_fd_tracker
, live_conn_pipe
);
2558 DBG("Viewer worker thread exited with error");
2560 DBG("Viewer worker thread cleanup complete");
2564 ERR("Health error occurred in %s", __func__
);
2566 health_unregister(health_relayd
);
2567 if (lttng_relay_stop_threads()) {
2568 ERR("Error stopping threads");
2570 rcu_unregister_thread();
2575 * Create the relay command pipe to wake thread_manage_apps.
2576 * Closed in cleanup().
2578 static int create_conn_pipe(void)
2580 return fd_tracker_util_pipe_open_cloexec(the_fd_tracker
,
2581 "Live connection pipe", live_conn_pipe
);
2584 int relayd_live_join(void)
2586 int ret
, retval
= 0;
2589 ret
= pthread_join(live_listener_thread
, &status
);
2592 PERROR("pthread_join live listener");
2596 ret
= pthread_join(live_worker_thread
, &status
);
2599 PERROR("pthread_join live worker");
2603 ret
= pthread_join(live_dispatcher_thread
, &status
);
2606 PERROR("pthread_join live dispatcher");
2610 cleanup_relayd_live();
2618 int relayd_live_create(struct lttng_uri
*uri
)
2620 int ret
= 0, retval
= 0;
2626 goto exit_init_data
;
2630 /* Check if daemon is UID = 0 */
2631 is_root
= !getuid();
2634 if (live_uri
->port
< 1024) {
2635 ERR("Need to be root to use ports < 1024");
2637 goto exit_init_data
;
2641 /* Setup the thread apps communication pipe. */
2642 if (create_conn_pipe()) {
2644 goto exit_init_data
;
2647 /* Init relay command queue. */
2648 cds_wfcq_init(&viewer_conn_queue
.head
, &viewer_conn_queue
.tail
);
2650 /* Set up max poll set size */
2651 if (lttng_poll_set_max_size()) {
2653 goto exit_init_data
;
2656 /* Setup the dispatcher thread */
2657 ret
= pthread_create(&live_dispatcher_thread
, default_pthread_attr(),
2658 thread_dispatcher
, (void *) NULL
);
2661 PERROR("pthread_create viewer dispatcher");
2663 goto exit_dispatcher_thread
;
2666 /* Setup the worker thread */
2667 ret
= pthread_create(&live_worker_thread
, default_pthread_attr(),
2668 thread_worker
, NULL
);
2671 PERROR("pthread_create viewer worker");
2673 goto exit_worker_thread
;
2676 /* Setup the listener thread */
2677 ret
= pthread_create(&live_listener_thread
, default_pthread_attr(),
2678 thread_listener
, (void *) NULL
);
2681 PERROR("pthread_create viewer listener");
2683 goto exit_listener_thread
;
2687 * All OK, started all threads.
2692 * Join on the live_listener_thread should anything be added after
2693 * the live_listener thread's creation.
2696 exit_listener_thread
:
2698 ret
= pthread_join(live_worker_thread
, &status
);
2701 PERROR("pthread_join live worker");
2706 ret
= pthread_join(live_dispatcher_thread
, &status
);
2709 PERROR("pthread_join live dispatcher");
2712 exit_dispatcher_thread
:
2715 cleanup_relayd_live();