2 * Copyright (C) 2013 Julien Desfossez <jdesfossez@efficios.com>
3 * Copyright (C) 2013 David Goulet <dgoulet@efficios.com>
4 * Copyright (C) 2015 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 * SPDX-License-Identifier: GPL-2.0-only
22 #include <sys/mount.h>
23 #include <sys/resource.h>
24 #include <sys/socket.h>
26 #include <sys/types.h>
29 #include <urcu/futex.h>
30 #include <urcu/rculist.h>
31 #include <urcu/uatomic.h>
33 #include <common/common.h>
34 #include <common/compat/endian.h>
35 #include <common/compat/poll.h>
36 #include <common/compat/socket.h>
37 #include <common/defaults.h>
38 #include <common/fd-tracker/utils.h>
39 #include <common/fs-handle.h>
40 #include <common/futex.h>
41 #include <common/index/index.h>
42 #include <common/sessiond-comm/inet.h>
43 #include <common/sessiond-comm/relayd.h>
44 #include <common/sessiond-comm/sessiond-comm.h>
45 #include <common/uri.h>
46 #include <common/utils.h>
47 #include <lttng/lttng.h>
50 #include "connection.h"
51 #include "ctf-trace.h"
52 #include "health-relayd.h"
54 #include "lttng-relayd.h"
57 #include "testpoint.h"
59 #include "viewer-session.h"
60 #include "viewer-stream.h"
62 #define SESSION_BUF_DEFAULT_COUNT 16
64 static struct lttng_uri
*live_uri
;
67 * This pipe is used to inform the worker thread that a command is queued and
68 * ready to be processed.
70 static int live_conn_pipe
[2] = { -1, -1 };
72 /* Shared between threads */
73 static int live_dispatch_thread_exit
;
75 static pthread_t live_listener_thread
;
76 static pthread_t live_dispatcher_thread
;
77 static pthread_t live_worker_thread
;
80 * Relay command queue.
82 * The live_thread_listener and live_thread_dispatcher communicate with this
85 static struct relay_conn_queue viewer_conn_queue
;
87 static uint64_t last_relay_viewer_session_id
;
88 static pthread_mutex_t last_relay_viewer_session_id_lock
=
89 PTHREAD_MUTEX_INITIALIZER
;
92 lttng_viewer_next_index_return_code_str(enum lttng_viewer_next_index_return_code code
)
95 case LTTNG_VIEWER_INDEX_OK
:
97 case LTTNG_VIEWER_INDEX_RETRY
:
99 case LTTNG_VIEWER_INDEX_HUP
:
101 case LTTNG_VIEWER_INDEX_ERR
:
103 case LTTNG_VIEWER_INDEX_INACTIVE
:
104 return "INDEX_INACTIVE";
105 case LTTNG_VIEWER_INDEX_EOF
:
116 void cleanup_relayd_live(void)
124 * Receive a request buffer using a given socket, destination allocated buffer
127 * Return the size of the received message or else a negative value on error
128 * with errno being set by recvmsg() syscall.
131 ssize_t
recv_request(struct lttcomm_sock
*sock
, void *buf
, size_t size
)
135 ret
= sock
->ops
->recvmsg(sock
, buf
, size
, 0);
136 if (ret
< 0 || ret
!= size
) {
138 /* Orderly shutdown. Not necessary to print an error. */
139 DBG("Socket %d did an orderly shutdown", sock
->fd
);
141 ERR("Relay failed to receive request.");
150 * Send a response buffer using a given socket, source allocated buffer of
153 * Return the size of the sent message or else a negative value on error with
154 * errno being set by sendmsg() syscall.
157 ssize_t
send_response(struct lttcomm_sock
*sock
, void *buf
, size_t size
)
161 ret
= sock
->ops
->sendmsg(sock
, buf
, size
, 0);
163 ERR("Relayd failed to send response.");
170 * Atomically check if new streams got added in one of the sessions attached
171 * and reset the flag to 0.
173 * Returns 1 if new streams got added, 0 if nothing changed, a negative value
177 int check_new_streams(struct relay_connection
*conn
)
179 struct relay_session
*session
;
183 if (!conn
->viewer_session
) {
187 cds_list_for_each_entry_rcu(
188 session
, &conn
->viewer_session
->session_list
, viewer_session_node
)
190 if (!session_get(session
)) {
194 ret
= uatomic_read(&session
->new_streams
);
195 session_put(session
);
203 DBG("Viewer connection has%s new streams: socket_fd = %d", ret
== 0 ? " no" : "", conn
->sock
->fd
);
208 * Send viewer streams to the given socket. The ignore_sent_flag indicates if
209 * this function should ignore the sent flag or not.
211 * Return 0 on success or else a negative value.
214 ssize_t
send_viewer_streams(struct lttcomm_sock
*sock
,
215 uint64_t session_id
, unsigned int ignore_sent_flag
)
218 struct lttng_ht_iter iter
;
219 struct relay_viewer_stream
*vstream
;
223 cds_lfht_for_each_entry(viewer_streams_ht
->ht
, &iter
.iter
, vstream
,
225 struct ctf_trace
*ctf_trace
;
226 struct lttng_viewer_stream send_stream
= {};
228 health_code_update();
230 if (!viewer_stream_get(vstream
)) {
234 pthread_mutex_lock(&vstream
->stream
->lock
);
235 /* Ignore if not the same session. */
236 if (vstream
->stream
->trace
->session
->id
!= session_id
||
237 (!ignore_sent_flag
&& vstream
->sent_flag
)) {
238 pthread_mutex_unlock(&vstream
->stream
->lock
);
239 viewer_stream_put(vstream
);
243 ctf_trace
= vstream
->stream
->trace
;
244 send_stream
.id
= htobe64(vstream
->stream
->stream_handle
);
245 send_stream
.ctf_trace_id
= htobe64(ctf_trace
->id
);
246 send_stream
.metadata_flag
= htobe32(
247 vstream
->stream
->is_metadata
);
248 if (lttng_strncpy(send_stream
.path_name
, vstream
->path_name
,
249 sizeof(send_stream
.path_name
))) {
250 pthread_mutex_unlock(&vstream
->stream
->lock
);
251 viewer_stream_put(vstream
);
252 ret
= -1; /* Error. */
255 if (lttng_strncpy(send_stream
.channel_name
,
256 vstream
->channel_name
,
257 sizeof(send_stream
.channel_name
))) {
258 pthread_mutex_unlock(&vstream
->stream
->lock
);
259 viewer_stream_put(vstream
);
260 ret
= -1; /* Error. */
264 DBG("Sending stream %" PRIu64
" to viewer",
265 vstream
->stream
->stream_handle
);
266 vstream
->sent_flag
= 1;
267 pthread_mutex_unlock(&vstream
->stream
->lock
);
269 ret
= send_response(sock
, &send_stream
, sizeof(send_stream
));
270 viewer_stream_put(vstream
);
284 * Create every viewer stream possible for the given session with the seek
285 * type. Three counters *can* be return which are in order the total amount of
286 * viewer stream of the session, the number of unsent stream and the number of
287 * stream created. Those counters can be NULL and thus will be ignored.
289 * session must be locked to ensure that we see either none or all initial
290 * streams for a session, but no intermediate state..
292 * Return 0 on success or else a negative value.
294 static int make_viewer_streams(struct relay_session
*relay_session
,
295 struct relay_viewer_session
*viewer_session
,
296 enum lttng_viewer_seek seek_t
,
299 uint32_t *nb_created
,
303 struct lttng_ht_iter iter
;
304 struct ctf_trace
*ctf_trace
;
305 struct relay_stream
*relay_stream
= NULL
;
307 assert(relay_session
);
308 ASSERT_LOCKED(relay_session
->lock
);
310 if (relay_session
->connection_closed
) {
315 * Create viewer streams for relay streams that are ready to be
316 * used for a the given session id only.
319 cds_lfht_for_each_entry (relay_session
->ctf_traces_ht
->ht
, &iter
.iter
,
320 ctf_trace
, node
.node
) {
321 bool trace_has_metadata_stream
= false;
323 health_code_update();
325 if (!ctf_trace_get(ctf_trace
)) {
330 * Iterate over all the streams of the trace to see if we have a
333 cds_list_for_each_entry_rcu(relay_stream
,
334 &ctf_trace
->stream_list
, stream_node
)
336 bool is_metadata_stream
;
338 pthread_mutex_lock(&relay_stream
->lock
);
339 is_metadata_stream
= relay_stream
->is_metadata
;
340 pthread_mutex_unlock(&relay_stream
->lock
);
342 if (is_metadata_stream
) {
343 trace_has_metadata_stream
= true;
351 * If there is no metadata stream in this trace at the moment
352 * and we never sent one to the viewer, skip the trace. We
353 * accept that the viewer will not see this trace at all.
355 if (!trace_has_metadata_stream
&&
356 !ctf_trace
->metadata_stream_sent_to_viewer
) {
357 ctf_trace_put(ctf_trace
);
361 cds_list_for_each_entry_rcu(relay_stream
,
362 &ctf_trace
->stream_list
, stream_node
)
364 struct relay_viewer_stream
*viewer_stream
;
366 if (!stream_get(relay_stream
)) {
370 pthread_mutex_lock(&relay_stream
->lock
);
372 * stream published is protected by the session lock.
374 if (!relay_stream
->published
) {
377 viewer_stream
= viewer_stream_get_by_id(
378 relay_stream
->stream_handle
);
379 if (!viewer_stream
) {
380 struct lttng_trace_chunk
*viewer_stream_trace_chunk
= NULL
;
383 * Save that we sent the metadata stream to the
384 * viewer. So that we know what trace the viewer
387 if (relay_stream
->is_metadata
) {
388 ctf_trace
->metadata_stream_sent_to_viewer
= true;
392 * If a rotation is ongoing, use a copy of the
393 * relay stream's chunk to ensure the stream
396 * Otherwise, the viewer session's current trace
397 * chunk can be used safely.
399 if ((relay_stream
->ongoing_rotation
.is_set
||
400 session_has_ongoing_rotation(relay_session
)) &&
401 relay_stream
->trace_chunk
) {
402 viewer_stream_trace_chunk
= lttng_trace_chunk_copy(
403 relay_stream
->trace_chunk
);
404 if (!viewer_stream_trace_chunk
) {
406 ctf_trace_put(ctf_trace
);
411 * Transition the viewer session into the newest trace chunk available.
413 if (!lttng_trace_chunk_ids_equal(viewer_session
->current_trace_chunk
,
414 relay_stream
->trace_chunk
)) {
416 ret
= viewer_session_set_trace_chunk_copy(
418 relay_stream
->trace_chunk
);
421 ctf_trace_put(ctf_trace
);
426 if (relay_stream
->trace_chunk
) {
428 * If the corresponding relay
429 * stream's trace chunk is set,
430 * the viewer stream will be
433 * Note that a relay stream can
434 * have a NULL output trace
435 * chunk (for instance, after a
436 * clear against a stopped
439 const bool reference_acquired
= lttng_trace_chunk_get(
440 viewer_session
->current_trace_chunk
);
442 assert(reference_acquired
);
443 viewer_stream_trace_chunk
=
444 viewer_session
->current_trace_chunk
;
448 viewer_stream
= viewer_stream_create(
450 viewer_stream_trace_chunk
,
452 lttng_trace_chunk_put(viewer_stream_trace_chunk
);
453 viewer_stream_trace_chunk
= NULL
;
454 if (!viewer_stream
) {
456 ctf_trace_put(ctf_trace
);
461 /* Update number of created stream counter. */
465 * Ensure a self-reference is preserved even
466 * after we have put our local reference.
468 if (!viewer_stream_get(viewer_stream
)) {
469 ERR("Unable to get self-reference on viewer stream, logic error.");
473 if (!viewer_stream
->sent_flag
&& nb_unsent
) {
474 /* Update number of unsent stream counter. */
478 /* Update number of total stream counter. */
480 if (relay_stream
->is_metadata
) {
481 if (!relay_stream
->closed
||
482 relay_stream
->metadata_received
>
483 viewer_stream
->metadata_sent
) {
487 if (!relay_stream
->closed
||
488 !(((int64_t)(relay_stream
->prev_data_seq
-
489 relay_stream
->last_net_seq_num
)) >=
495 /* Put local reference. */
496 viewer_stream_put(viewer_stream
);
498 pthread_mutex_unlock(&relay_stream
->lock
);
499 stream_put(relay_stream
);
502 ctf_trace_put(ctf_trace
);
511 pthread_mutex_unlock(&relay_stream
->lock
);
512 stream_put(relay_stream
);
518 int relayd_live_stop(void)
520 /* Stop dispatch thread */
521 CMM_STORE_SHARED(live_dispatch_thread_exit
, 1);
522 futex_nto1_wake(&viewer_conn_queue
.futex
);
527 * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set.
530 int create_named_thread_poll_set(struct lttng_poll_event
*events
,
531 int size
, const char *name
)
535 if (events
== NULL
|| size
== 0) {
540 ret
= fd_tracker_util_poll_create(the_fd_tracker
,
541 name
, events
, 1, LTTNG_CLOEXEC
);
543 PERROR("Failed to create \"%s\" poll file descriptor", name
);
548 ret
= lttng_poll_add(events
, thread_quit_pipe
[0], LPOLLIN
| LPOLLERR
);
560 * Check if the thread quit pipe was triggered.
562 * Return 1 if it was triggered else 0;
565 int check_thread_quit_pipe(int fd
, uint32_t events
)
567 if (fd
== thread_quit_pipe
[0] && (events
& LPOLLIN
)) {
575 int create_sock(void *data
, int *out_fd
)
578 struct lttcomm_sock
*sock
= data
;
580 ret
= lttcomm_create_sock(sock
);
591 int close_sock(void *data
, int *in_fd
)
593 struct lttcomm_sock
*sock
= data
;
595 return sock
->ops
->close(sock
);
598 static int accept_sock(void *data
, int *out_fd
)
601 /* Socks is an array of in_sock, out_sock. */
602 struct lttcomm_sock
**socks
= data
;
603 struct lttcomm_sock
*in_sock
= socks
[0];
605 socks
[1] = in_sock
->ops
->accept(in_sock
);
610 *out_fd
= socks
[1]->fd
;
616 struct lttcomm_sock
*accept_live_sock(struct lttcomm_sock
*listening_sock
,
620 struct lttcomm_sock
*socks
[2] = { listening_sock
, NULL
};
621 struct lttcomm_sock
*new_sock
= NULL
;
623 ret
= fd_tracker_open_unsuspendable_fd(the_fd_tracker
, &out_fd
,
624 (const char **) &name
, 1, accept_sock
, &socks
);
629 DBG("%s accepted, socket %d", name
, new_sock
->fd
);
635 * Create and init socket from uri.
638 struct lttcomm_sock
*init_socket(struct lttng_uri
*uri
, const char *name
)
641 struct lttcomm_sock
*sock
= NULL
;
642 char uri_str
[LTTNG_PATH_MAX
];
643 char *formated_name
= NULL
;
645 sock
= lttcomm_alloc_sock_from_uri(uri
);
647 ERR("Allocating socket");
652 * Don't fail to create the socket if the name can't be built as it is
653 * only used for debugging purposes.
655 ret
= uri_to_str_url(uri
, uri_str
, sizeof(uri_str
));
656 uri_str
[sizeof(uri_str
) - 1] = '\0';
658 ret
= asprintf(&formated_name
, "%s socket @ %s", name
,
661 formated_name
= NULL
;
665 ret
= fd_tracker_open_unsuspendable_fd(the_fd_tracker
, &sock_fd
,
666 (const char **) (formated_name
? &formated_name
: NULL
),
667 1, create_sock
, sock
);
669 PERROR("Failed to create \"%s\" socket",
670 formated_name
?: "Unknown");
673 DBG("Listening on %s socket %d", name
, sock
->fd
);
675 ret
= sock
->ops
->bind(sock
);
677 PERROR("Failed to bind lttng-live socket");
681 ret
= sock
->ops
->listen(sock
, -1);
692 lttcomm_destroy_sock(sock
);
699 * This thread manages the listening for new connections on the network
702 void *thread_listener(void *data
)
704 int i
, ret
, pollfd
, err
= -1;
705 uint32_t revents
, nb_fd
;
706 struct lttng_poll_event events
;
707 struct lttcomm_sock
*live_control_sock
;
709 DBG("[thread] Relay live listener started");
711 rcu_register_thread();
712 health_register(health_relayd
, HEALTH_RELAYD_TYPE_LIVE_LISTENER
);
714 health_code_update();
716 live_control_sock
= init_socket(live_uri
, "Live listener");
717 if (!live_control_sock
) {
718 goto error_sock_control
;
721 /* Pass 2 as size here for the thread quit pipe and control sockets. */
722 ret
= create_named_thread_poll_set(&events
, 2,
723 "Live listener thread epoll");
725 goto error_create_poll
;
728 /* Add the control socket */
729 ret
= lttng_poll_add(&events
, live_control_sock
->fd
, LPOLLIN
| LPOLLRDHUP
);
734 lttng_relay_notify_ready();
736 if (testpoint(relayd_thread_live_listener
)) {
737 goto error_testpoint
;
741 health_code_update();
743 DBG("Listener accepting live viewers connections");
747 ret
= lttng_poll_wait(&events
, -1);
751 * Restart interrupted system call.
753 if (errno
== EINTR
) {
760 DBG("Relay new viewer connection received");
761 for (i
= 0; i
< nb_fd
; i
++) {
762 health_code_update();
764 /* Fetch once the poll data */
765 revents
= LTTNG_POLL_GETEV(&events
, i
);
766 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
768 /* Thread quit pipe has been closed. Killing thread. */
769 ret
= check_thread_quit_pipe(pollfd
, revents
);
775 if (revents
& LPOLLIN
) {
777 * A new connection is requested, therefore a
778 * viewer connection is allocated in this
779 * thread, enqueued to a global queue and
780 * dequeued (and freed) in the worker thread.
783 struct relay_connection
*new_conn
;
784 struct lttcomm_sock
*newsock
;
786 newsock
= accept_live_sock(live_control_sock
,
787 "Live socket to client");
789 PERROR("accepting control sock");
792 DBG("Relay viewer connection accepted socket %d", newsock
->fd
);
794 ret
= setsockopt(newsock
->fd
, SOL_SOCKET
, SO_REUSEADDR
, &val
,
797 PERROR("setsockopt inet");
798 lttcomm_destroy_sock(newsock
);
801 new_conn
= connection_create(newsock
, RELAY_CONNECTION_UNKNOWN
);
803 lttcomm_destroy_sock(newsock
);
806 /* Ownership assumed by the connection. */
809 /* Enqueue request for the dispatcher thread. */
810 cds_wfcq_enqueue(&viewer_conn_queue
.head
, &viewer_conn_queue
.tail
,
814 * Wake the dispatch queue futex.
815 * Implicit memory barrier with the
816 * exchange in cds_wfcq_enqueue.
818 futex_nto1_wake(&viewer_conn_queue
.futex
);
819 } else if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
820 ERR("socket poll error");
823 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
833 (void) fd_tracker_util_poll_clean(the_fd_tracker
, &events
);
835 if (live_control_sock
->fd
>= 0) {
836 int sock_fd
= live_control_sock
->fd
;
838 ret
= fd_tracker_close_unsuspendable_fd(the_fd_tracker
,
839 &sock_fd
, 1, close_sock
,
844 live_control_sock
->fd
= -1;
846 lttcomm_destroy_sock(live_control_sock
);
850 DBG("Live viewer listener thread exited with error");
852 health_unregister(health_relayd
);
853 rcu_unregister_thread();
854 DBG("Live viewer listener thread cleanup complete");
855 if (lttng_relay_stop_threads()) {
856 ERR("Error stopping threads");
862 * This thread manages the dispatching of the requests to worker threads
865 void *thread_dispatcher(void *data
)
869 struct cds_wfcq_node
*node
;
870 struct relay_connection
*conn
= NULL
;
872 DBG("[thread] Live viewer relay dispatcher started");
874 health_register(health_relayd
, HEALTH_RELAYD_TYPE_LIVE_DISPATCHER
);
876 if (testpoint(relayd_thread_live_dispatcher
)) {
877 goto error_testpoint
;
880 health_code_update();
883 health_code_update();
885 /* Atomically prepare the queue futex */
886 futex_nto1_prepare(&viewer_conn_queue
.futex
);
888 if (CMM_LOAD_SHARED(live_dispatch_thread_exit
)) {
893 health_code_update();
895 /* Dequeue commands */
896 node
= cds_wfcq_dequeue_blocking(&viewer_conn_queue
.head
,
897 &viewer_conn_queue
.tail
);
899 DBG("Woken up but nothing in the live-viewer "
900 "relay command queue");
901 /* Continue thread execution */
904 conn
= caa_container_of(node
, struct relay_connection
, qnode
);
905 DBG("Dispatching viewer request waiting on sock %d",
909 * Inform worker thread of the new request. This
910 * call is blocking so we can be assured that
911 * the data will be read at some point in time
912 * or wait to the end of the world :)
914 ret
= lttng_write(live_conn_pipe
[1], &conn
, sizeof(conn
));
916 PERROR("write conn pipe");
917 connection_put(conn
);
920 } while (node
!= NULL
);
922 /* Futex wait on queue. Blocking call on futex() */
924 futex_nto1_wait(&viewer_conn_queue
.futex
);
928 /* Normal exit, no error */
935 ERR("Health error occurred in %s", __func__
);
937 health_unregister(health_relayd
);
938 DBG("Live viewer dispatch thread dying");
939 if (lttng_relay_stop_threads()) {
940 ERR("Error stopping threads");
946 * Establish connection with the viewer and check the versions.
948 * Return 0 on success or else negative value.
951 int viewer_connect(struct relay_connection
*conn
)
954 struct lttng_viewer_connect reply
, msg
;
956 conn
->version_check_done
= 1;
958 health_code_update();
960 DBG("Viewer is establishing a connection to the relayd.");
962 ret
= recv_request(conn
->sock
, &msg
, sizeof(msg
));
967 health_code_update();
969 memset(&reply
, 0, sizeof(reply
));
970 reply
.major
= RELAYD_VERSION_COMM_MAJOR
;
971 reply
.minor
= RELAYD_VERSION_COMM_MINOR
;
973 /* Major versions must be the same */
974 if (reply
.major
!= be32toh(msg
.major
)) {
975 DBG("Incompatible major versions ([relayd] %u vs [client] %u)",
976 reply
.major
, be32toh(msg
.major
));
981 conn
->major
= reply
.major
;
982 /* We adapt to the lowest compatible version */
983 if (reply
.minor
<= be32toh(msg
.minor
)) {
984 conn
->minor
= reply
.minor
;
986 conn
->minor
= be32toh(msg
.minor
);
989 if (be32toh(msg
.type
) == LTTNG_VIEWER_CLIENT_COMMAND
) {
990 conn
->type
= RELAY_VIEWER_COMMAND
;
991 } else if (be32toh(msg
.type
) == LTTNG_VIEWER_CLIENT_NOTIFICATION
) {
992 conn
->type
= RELAY_VIEWER_NOTIFICATION
;
994 ERR("Unknown connection type : %u", be32toh(msg
.type
));
999 reply
.major
= htobe32(reply
.major
);
1000 reply
.minor
= htobe32(reply
.minor
);
1001 if (conn
->type
== RELAY_VIEWER_COMMAND
) {
1003 * Increment outside of htobe64 macro, because the argument can
1004 * be used more than once within the macro, and thus the
1005 * operation may be undefined.
1007 pthread_mutex_lock(&last_relay_viewer_session_id_lock
);
1008 last_relay_viewer_session_id
++;
1009 pthread_mutex_unlock(&last_relay_viewer_session_id_lock
);
1010 reply
.viewer_session_id
= htobe64(last_relay_viewer_session_id
);
1013 health_code_update();
1015 ret
= send_response(conn
->sock
, &reply
, sizeof(reply
));
1020 health_code_update();
1022 DBG("Version check done using protocol %u.%u", conn
->major
, conn
->minor
);
1030 * Send the viewer the list of current sessions.
1031 * We need to create a copy of the hash table content because otherwise
1032 * we cannot assume the number of entries stays the same between getting
1033 * the number of HT elements and iteration over the HT.
1035 * Return 0 on success or else a negative value.
1038 int viewer_list_sessions(struct relay_connection
*conn
)
1041 struct lttng_viewer_list_sessions session_list
;
1042 struct lttng_ht_iter iter
;
1043 struct relay_session
*session
;
1044 struct lttng_viewer_session
*send_session_buf
= NULL
;
1045 uint32_t buf_count
= SESSION_BUF_DEFAULT_COUNT
;
1048 DBG("List sessions received");
1050 send_session_buf
= zmalloc(SESSION_BUF_DEFAULT_COUNT
* sizeof(*send_session_buf
));
1051 if (!send_session_buf
) {
1056 cds_lfht_for_each_entry(sessions_ht
->ht
, &iter
.iter
, session
,
1058 struct lttng_viewer_session
*send_session
;
1060 health_code_update();
1062 pthread_mutex_lock(&session
->lock
);
1063 if (session
->connection_closed
) {
1064 /* Skip closed session */
1068 if (count
>= buf_count
) {
1069 struct lttng_viewer_session
*newbuf
;
1070 uint32_t new_buf_count
= buf_count
<< 1;
1072 newbuf
= realloc(send_session_buf
,
1073 new_buf_count
* sizeof(*send_session_buf
));
1078 send_session_buf
= newbuf
;
1079 buf_count
= new_buf_count
;
1081 send_session
= &send_session_buf
[count
];
1082 if (lttng_strncpy(send_session
->session_name
,
1083 session
->session_name
,
1084 sizeof(send_session
->session_name
))) {
1088 if (lttng_strncpy(send_session
->hostname
, session
->hostname
,
1089 sizeof(send_session
->hostname
))) {
1093 send_session
->id
= htobe64(session
->id
);
1094 send_session
->live_timer
= htobe32(session
->live_timer
);
1095 if (session
->viewer_attached
) {
1096 send_session
->clients
= htobe32(1);
1098 send_session
->clients
= htobe32(0);
1100 send_session
->streams
= htobe32(session
->stream_count
);
1103 pthread_mutex_unlock(&session
->lock
);
1106 pthread_mutex_unlock(&session
->lock
);
1114 session_list
.sessions_count
= htobe32(count
);
1116 health_code_update();
1118 ret
= send_response(conn
->sock
, &session_list
, sizeof(session_list
));
1123 health_code_update();
1125 ret
= send_response(conn
->sock
, send_session_buf
,
1126 count
* sizeof(*send_session_buf
));
1130 health_code_update();
1134 free(send_session_buf
);
1139 * Send the viewer the list of current streams.
1142 int viewer_get_new_streams(struct relay_connection
*conn
)
1144 int ret
, send_streams
= 0;
1145 uint32_t nb_created
= 0, nb_unsent
= 0, nb_streams
= 0, nb_total
= 0;
1146 struct lttng_viewer_new_streams_request request
;
1147 struct lttng_viewer_new_streams_response response
;
1148 struct relay_session
*session
= NULL
;
1149 uint64_t session_id
;
1150 bool closed
= false;
1154 DBG("Get new streams received");
1156 health_code_update();
1158 /* Receive the request from the connected client. */
1159 ret
= recv_request(conn
->sock
, &request
, sizeof(request
));
1163 session_id
= be64toh(request
.session_id
);
1165 health_code_update();
1167 memset(&response
, 0, sizeof(response
));
1169 session
= session_get_by_id(session_id
);
1171 DBG("Relay session %" PRIu64
" not found", session_id
);
1172 response
.status
= htobe32(LTTNG_VIEWER_NEW_STREAMS_ERR
);
1176 if (!viewer_session_is_attached(conn
->viewer_session
, session
)) {
1177 response
.status
= htobe32(LTTNG_VIEWER_NEW_STREAMS_ERR
);
1182 * For any new stream, create it with LTTNG_VIEWER_SEEK_BEGINNING since
1183 * that at this point the client is already attached to the session.Aany
1184 * initial stream will have been created with the seek type at attach
1185 * time (for now most readers use the LTTNG_VIEWER_SEEK_LAST on attach).
1186 * Otherwise any event happening in a new stream between the attach and
1187 * a call to viewer_get_new_streams will be "lost" (never received) from
1188 * the viewer's point of view.
1190 pthread_mutex_lock(&session
->lock
);
1192 * If a session rotation is ongoing, do not attempt to open any
1193 * stream, because the chunk can be in an intermediate state
1194 * due to directory renaming.
1196 if (session_has_ongoing_rotation(session
)) {
1197 DBG("Relay session %" PRIu64
" rotation ongoing", session_id
);
1198 response
.status
= htobe32(LTTNG_VIEWER_NEW_STREAMS_NO_NEW
);
1199 goto send_reply_unlock
;
1201 ret
= make_viewer_streams(session
,
1202 conn
->viewer_session
,
1203 LTTNG_VIEWER_SEEK_BEGINNING
, &nb_total
, &nb_unsent
,
1204 &nb_created
, &closed
);
1207 * This is caused by an internal error; propagate the negative
1208 * 'ret' to close the connection.
1210 response
.status
= htobe32(LTTNG_VIEWER_NEW_STREAMS_ERR
);
1211 goto send_reply_unlock
;
1214 uatomic_set(&session
->new_streams
, 0);
1216 response
.status
= htobe32(LTTNG_VIEWER_NEW_STREAMS_OK
);
1218 /* Only send back the newly created streams with the unsent ones. */
1219 nb_streams
= nb_created
+ nb_unsent
;
1220 response
.streams_count
= htobe32(nb_streams
);
1223 * If the session is closed, HUP when there are no more streams
1226 if (closed
&& nb_total
== 0) {
1228 response
.streams_count
= 0;
1229 response
.status
= htobe32(LTTNG_VIEWER_NEW_STREAMS_HUP
);
1230 goto send_reply_unlock
;
1233 pthread_mutex_unlock(&session
->lock
);
1236 health_code_update();
1237 ret
= send_response(conn
->sock
, &response
, sizeof(response
));
1239 goto end_put_session
;
1241 health_code_update();
1244 * Unknown or empty session, just return gracefully, the viewer
1245 * knows what is happening.
1247 if (!send_streams
|| !nb_streams
) {
1249 goto end_put_session
;
1253 * Send stream and *DON'T* ignore the sent flag so every viewer
1254 * streams that were not sent from that point will be sent to
1257 ret
= send_viewer_streams(conn
->sock
, session_id
, 0);
1259 goto end_put_session
;
1264 session_put(session
);
1271 * Send the viewer the list of current sessions.
1274 int viewer_attach_session(struct relay_connection
*conn
)
1276 int send_streams
= 0;
1278 uint32_t nb_streams
= 0;
1279 enum lttng_viewer_seek seek_type
;
1280 struct lttng_viewer_attach_session_request request
;
1281 struct lttng_viewer_attach_session_response response
;
1282 struct relay_session
*session
= NULL
;
1283 enum lttng_viewer_attach_return_code viewer_attach_status
;
1284 bool closed
= false;
1285 uint64_t session_id
;
1289 health_code_update();
1291 /* Receive the request from the connected client. */
1292 ret
= recv_request(conn
->sock
, &request
, sizeof(request
));
1297 session_id
= be64toh(request
.session_id
);
1298 health_code_update();
1300 memset(&response
, 0, sizeof(response
));
1302 if (!conn
->viewer_session
) {
1303 DBG("Client trying to attach before creating a live viewer session");
1304 response
.status
= htobe32(LTTNG_VIEWER_ATTACH_NO_SESSION
);
1308 session
= session_get_by_id(session_id
);
1310 DBG("Relay session %" PRIu64
" not found", session_id
);
1311 response
.status
= htobe32(LTTNG_VIEWER_ATTACH_UNK
);
1314 DBG("Attach session ID %" PRIu64
" received", session_id
);
1316 pthread_mutex_lock(&session
->lock
);
1317 if (session
->live_timer
== 0) {
1318 DBG("Not live session");
1319 response
.status
= htobe32(LTTNG_VIEWER_ATTACH_NOT_LIVE
);
1324 viewer_attach_status
= viewer_session_attach(conn
->viewer_session
,
1326 if (viewer_attach_status
!= LTTNG_VIEWER_ATTACH_OK
) {
1327 response
.status
= htobe32(viewer_attach_status
);
1331 switch (be32toh(request
.seek
)) {
1332 case LTTNG_VIEWER_SEEK_BEGINNING
:
1333 case LTTNG_VIEWER_SEEK_LAST
:
1334 response
.status
= htobe32(LTTNG_VIEWER_ATTACH_OK
);
1335 seek_type
= be32toh(request
.seek
);
1338 ERR("Wrong seek parameter");
1339 response
.status
= htobe32(LTTNG_VIEWER_ATTACH_SEEK_ERR
);
1345 * If a session rotation is ongoing, do not attempt to open any
1346 * stream, because the chunk can be in an intermediate state
1347 * due to directory renaming.
1349 if (session_has_ongoing_rotation(session
)) {
1350 DBG("Relay session %" PRIu64
" rotation ongoing", session_id
);
1355 ret
= make_viewer_streams(session
,
1356 conn
->viewer_session
, seek_type
,
1357 &nb_streams
, NULL
, NULL
, &closed
);
1359 goto end_put_session
;
1361 pthread_mutex_unlock(&session
->lock
);
1362 session_put(session
);
1365 response
.streams_count
= htobe32(nb_streams
);
1367 * If the session is closed when the viewer is attaching, it
1368 * means some of the streams may have been concurrently removed,
1369 * so we don't allow the viewer to attach, even if there are
1370 * streams available.
1374 response
.streams_count
= 0;
1375 response
.status
= htobe32(LTTNG_VIEWER_ATTACH_UNK
);
1380 health_code_update();
1381 ret
= send_response(conn
->sock
, &response
, sizeof(response
));
1383 goto end_put_session
;
1385 health_code_update();
1388 * Unknown or empty session, just return gracefully, the viewer
1389 * knows what is happening.
1391 if (!send_streams
|| !nb_streams
) {
1393 goto end_put_session
;
1396 /* Send stream and ignore the sent flag. */
1397 ret
= send_viewer_streams(conn
->sock
, session_id
, 1);
1399 goto end_put_session
;
1404 pthread_mutex_unlock(&session
->lock
);
1405 session_put(session
);
1412 * Open the index file if needed for the given vstream.
1414 * If an index file is successfully opened, the vstream will set it as its
1415 * current index file.
1417 * Return 0 on success, a negative value on error (-ENOENT if not ready yet).
1419 * Called with rstream lock held.
1421 static int try_open_index(struct relay_viewer_stream
*vstream
,
1422 struct relay_stream
*rstream
)
1425 const uint32_t connection_major
= rstream
->trace
->session
->major
;
1426 const uint32_t connection_minor
= rstream
->trace
->session
->minor
;
1427 enum lttng_trace_chunk_status chunk_status
;
1429 if (vstream
->index_file
) {
1434 * First time, we open the index file and at least one index is ready.
1436 if (rstream
->index_received_seqcount
== 0 ||
1437 !vstream
->stream_file
.trace_chunk
) {
1442 chunk_status
= lttng_index_file_create_from_trace_chunk_read_only(
1443 vstream
->stream_file
.trace_chunk
, rstream
->path_name
,
1444 rstream
->channel_name
, rstream
->tracefile_size
,
1445 vstream
->current_tracefile_id
,
1446 lttng_to_index_major(connection_major
, connection_minor
),
1447 lttng_to_index_minor(connection_major
, connection_minor
),
1448 true, &vstream
->index_file
);
1449 if (chunk_status
!= LTTNG_TRACE_CHUNK_STATUS_OK
) {
1450 if (chunk_status
== LTTNG_TRACE_CHUNK_STATUS_NO_FILE
) {
1462 * Check the status of the index for the given stream. This function
1463 * updates the index structure if needed and can put (close) the vstream
1464 * in the HUP situation.
1466 * Return 0 means that we can proceed with the index. A value of 1 means
1467 * that the index has been updated and is ready to be sent to the
1468 * client. A negative value indicates an error that can't be handled.
1470 * Called with rstream lock held.
1472 static int check_index_status(struct relay_viewer_stream
*vstream
,
1473 struct relay_stream
*rstream
, struct ctf_trace
*trace
,
1474 struct lttng_viewer_index
*index
)
1478 DBG("Check index status: index_received_seqcount %" PRIu64
" "
1479 "index_sent_seqcount %" PRIu64
" "
1480 "for stream %" PRIu64
,
1481 rstream
->index_received_seqcount
,
1482 vstream
->index_sent_seqcount
,
1483 vstream
->stream
->stream_handle
);
1484 if ((trace
->session
->connection_closed
|| rstream
->closed
)
1485 && rstream
->index_received_seqcount
1486 == vstream
->index_sent_seqcount
) {
1488 * Last index sent and session connection or relay
1489 * stream are closed.
1491 index
->status
= htobe32(LTTNG_VIEWER_INDEX_HUP
);
1493 } else if (rstream
->beacon_ts_end
!= -1ULL &&
1494 (rstream
->index_received_seqcount
== 0 ||
1495 (vstream
->index_sent_seqcount
!= 0 &&
1496 rstream
->index_received_seqcount
1497 <= vstream
->index_sent_seqcount
))) {
1499 * We've received a synchronization beacon and the last index
1500 * available has been sent, the index for now is inactive.
1502 * In this case, we have received a beacon which allows us to
1503 * inform the client of a time interval during which we can
1504 * guarantee that there are no events to read (and never will
1507 * The sent seqcount can grow higher than receive seqcount on
1508 * clear because the rotation performed by clear will push
1509 * the index_sent_seqcount ahead (see
1510 * viewer_stream_sync_tracefile_array_tail) and skip over
1511 * packet sequence numbers.
1513 index
->status
= htobe32(LTTNG_VIEWER_INDEX_INACTIVE
);
1514 index
->timestamp_end
= htobe64(rstream
->beacon_ts_end
);
1515 index
->stream_id
= htobe64(rstream
->ctf_stream_id
);
1516 DBG("Check index status: inactive with beacon, for stream %" PRIu64
,
1517 vstream
->stream
->stream_handle
);
1519 } else if (rstream
->index_received_seqcount
== 0 ||
1520 (vstream
->index_sent_seqcount
!= 0 &&
1521 rstream
->index_received_seqcount
1522 <= vstream
->index_sent_seqcount
)) {
1524 * This checks whether received <= sent seqcount. In
1525 * this case, we have not received a beacon. Therefore,
1526 * we can only ask the client to retry later.
1528 * The sent seqcount can grow higher than receive seqcount on
1529 * clear because the rotation performed by clear will push
1530 * the index_sent_seqcount ahead (see
1531 * viewer_stream_sync_tracefile_array_tail) and skip over
1532 * packet sequence numbers.
1534 index
->status
= htobe32(LTTNG_VIEWER_INDEX_RETRY
);
1535 DBG("Check index status: retry for stream %" PRIu64
,
1536 vstream
->stream
->stream_handle
);
1538 } else if (!tracefile_array_seq_in_file(rstream
->tfa
,
1539 vstream
->current_tracefile_id
,
1540 vstream
->index_sent_seqcount
)) {
1542 * The next index we want to send cannot be read either
1543 * because we need to perform a rotation, or due to
1544 * the producer having overwritten its trace file.
1546 DBG("Viewer stream %" PRIu64
" rotation",
1547 vstream
->stream
->stream_handle
);
1548 ret
= viewer_stream_rotate(vstream
);
1550 /* EOF across entire stream. */
1551 index
->status
= htobe32(LTTNG_VIEWER_INDEX_HUP
);
1555 * If we have been pushed due to overwrite, it
1556 * necessarily means there is data that can be read in
1557 * the stream. If we rotated because we reached the end
1558 * of a tracefile, it means the following tracefile
1559 * needs to contain at least one index, else we would
1560 * have already returned LTTNG_VIEWER_INDEX_RETRY to the
1561 * viewer. The updated index_sent_seqcount needs to
1562 * point to a readable index entry now.
1564 * In the case where we "rotate" on a single file, we
1565 * can end up in a case where the requested index is
1566 * still unavailable.
1568 if (rstream
->tracefile_count
== 1 &&
1569 !tracefile_array_seq_in_file(
1571 vstream
->current_tracefile_id
,
1572 vstream
->index_sent_seqcount
)) {
1573 index
->status
= htobe32(LTTNG_VIEWER_INDEX_RETRY
);
1574 DBG("Check index status: retry: "
1575 "tracefile array sequence number %" PRIu64
1576 " not in file for stream %" PRIu64
,
1577 vstream
->index_sent_seqcount
,
1578 vstream
->stream
->stream_handle
);
1581 assert(tracefile_array_seq_in_file(rstream
->tfa
,
1582 vstream
->current_tracefile_id
,
1583 vstream
->index_sent_seqcount
));
1585 /* ret == 0 means successful so we continue. */
1590 viewer_stream_put(vstream
);
1596 void viewer_stream_rotate_to_trace_chunk(struct relay_viewer_stream
*vstream
,
1597 struct lttng_trace_chunk
*new_trace_chunk
)
1599 lttng_trace_chunk_put(vstream
->stream_file
.trace_chunk
);
1601 if (new_trace_chunk
) {
1602 const bool acquired_reference
= lttng_trace_chunk_get(
1605 assert(acquired_reference
);
1608 vstream
->stream_file
.trace_chunk
= new_trace_chunk
;
1609 viewer_stream_sync_tracefile_array_tail(vstream
);
1610 viewer_stream_close_files(vstream
);
1614 * Send the next index for a stream.
1616 * Return 0 on success or else a negative value.
1619 int viewer_get_next_index(struct relay_connection
*conn
)
1622 struct lttng_viewer_get_next_index request_index
;
1623 struct lttng_viewer_index viewer_index
;
1624 struct ctf_packet_index packet_index
;
1625 struct relay_viewer_stream
*vstream
= NULL
;
1626 struct relay_stream
*rstream
= NULL
;
1627 struct ctf_trace
*ctf_trace
= NULL
;
1628 struct relay_viewer_stream
*metadata_viewer_stream
= NULL
;
1629 bool attached_sessions_have_new_streams
= false;
1633 DBG("Viewer get next index");
1635 memset(&viewer_index
, 0, sizeof(viewer_index
));
1636 health_code_update();
1638 ret
= recv_request(conn
->sock
, &request_index
, sizeof(request_index
));
1642 health_code_update();
1644 vstream
= viewer_stream_get_by_id(be64toh(request_index
.stream_id
));
1646 DBG("Client requested index of unknown stream id %" PRIu64
,
1647 (uint64_t) be64toh(request_index
.stream_id
));
1648 viewer_index
.status
= htobe32(LTTNG_VIEWER_INDEX_ERR
);
1652 /* Use back. ref. Protected by refcounts. */
1653 rstream
= vstream
->stream
;
1654 ctf_trace
= rstream
->trace
;
1656 /* metadata_viewer_stream may be NULL. */
1657 metadata_viewer_stream
=
1658 ctf_trace_get_viewer_metadata_stream(ctf_trace
);
1661 * Hold the session lock to protect against concurrent changes
1662 * to the chunk files (e.g. rename done by clear), which are
1663 * protected by the session ongoing rotation state. Those are
1664 * synchronized with the session lock.
1666 pthread_mutex_lock(&rstream
->trace
->session
->lock
);
1667 pthread_mutex_lock(&rstream
->lock
);
1670 * The viewer should not ask for index on metadata stream.
1672 if (rstream
->is_metadata
) {
1673 viewer_index
.status
= htobe32(LTTNG_VIEWER_INDEX_HUP
);
1677 ret
= check_new_streams(conn
);
1679 viewer_index
.status
= LTTNG_VIEWER_INDEX_ERR
;
1680 ERR("Error checking for new streams in the attached sessions, returning status=%s",
1681 lttng_viewer_next_index_return_code_str(
1682 (enum lttng_viewer_next_index_return_code
) viewer_index
.status
));
1684 } else if (ret
== 1) {
1685 attached_sessions_have_new_streams
= true;
1688 if (rstream
->ongoing_rotation
.is_set
) {
1689 /* Rotation is ongoing, try again later. */
1690 viewer_index
.status
= htobe32(LTTNG_VIEWER_INDEX_RETRY
);
1694 if (session_has_ongoing_rotation(rstream
->trace
->session
)) {
1695 /* Rotation is ongoing, try again later. */
1696 viewer_index
.status
= htobe32(LTTNG_VIEWER_INDEX_RETRY
);
1701 * Transition the viewer session into the newest trace chunk available.
1703 if (!lttng_trace_chunk_ids_equal(
1704 conn
->viewer_session
->current_trace_chunk
,
1705 rstream
->trace_chunk
)) {
1706 DBG("Relay stream and viewer chunk ids differ");
1708 ret
= viewer_session_set_trace_chunk_copy(
1709 conn
->viewer_session
,
1710 rstream
->trace_chunk
);
1712 viewer_index
.status
= htobe32(LTTNG_VIEWER_INDEX_ERR
);
1718 * Transition the viewer stream into the latest trace chunk available.
1720 * Note that the stream must _not_ rotate in one precise condition:
1721 * the relay stream has rotated to a NULL trace chunk and the viewer
1722 * stream is consuming the trace chunk that was active just before
1723 * that rotation to NULL.
1725 * This allows clients to consume all the packets of a trace chunk
1726 * after a session's destruction.
1728 if (!lttng_trace_chunk_ids_equal(conn
->viewer_session
->current_trace_chunk
, vstream
->stream_file
.trace_chunk
) &&
1729 !(rstream
->completed_rotation_count
== vstream
->last_seen_rotation_count
+ 1 && !rstream
->trace_chunk
)) {
1730 DBG("Viewer session and viewer stream chunk IDs differ: "
1731 "vsession chunk %p vstream chunk %p",
1732 conn
->viewer_session
->current_trace_chunk
,
1733 vstream
->stream_file
.trace_chunk
);
1734 viewer_stream_rotate_to_trace_chunk(vstream
,
1735 conn
->viewer_session
->current_trace_chunk
);
1736 vstream
->last_seen_rotation_count
=
1737 rstream
->completed_rotation_count
;
1740 ret
= check_index_status(vstream
, rstream
, ctf_trace
, &viewer_index
);
1743 } else if (ret
== 1) {
1745 * We have no index to send and check_index_status has populated
1746 * viewer_index's status.
1751 /* At this point, ret is 0 thus we will be able to read the index. */
1754 /* Try to open an index if one is needed for that stream. */
1755 ret
= try_open_index(vstream
, rstream
);
1756 if (ret
== -ENOENT
) {
1757 if (rstream
->closed
) {
1758 viewer_index
.status
= htobe32(LTTNG_VIEWER_INDEX_HUP
);
1761 viewer_index
.status
= htobe32(LTTNG_VIEWER_INDEX_RETRY
);
1766 viewer_index
.status
= htobe32(LTTNG_VIEWER_INDEX_ERR
);
1771 * vstream->stream_fd may be NULL if it has been closed by
1772 * tracefile rotation, or if we are at the beginning of the
1773 * stream. We open the data stream file here to protect against
1774 * overwrite caused by tracefile rotation (in association with
1775 * unlink performed before overwrite).
1777 if (!vstream
->stream_file
.handle
) {
1778 char file_path
[LTTNG_PATH_MAX
];
1779 enum lttng_trace_chunk_status status
;
1780 struct fs_handle
*fs_handle
;
1782 ret
= utils_stream_file_path(rstream
->path_name
,
1783 rstream
->channel_name
, rstream
->tracefile_size
,
1784 vstream
->current_tracefile_id
, NULL
, file_path
,
1791 * It is possible the the file we are trying to open is
1792 * missing if the stream has been closed (application exits with
1793 * per-pid buffers) and a clear command has been performed.
1795 status
= lttng_trace_chunk_open_fs_handle(
1796 vstream
->stream_file
.trace_chunk
,
1797 file_path
, O_RDONLY
, 0, &fs_handle
, true);
1798 if (status
!= LTTNG_TRACE_CHUNK_STATUS_OK
) {
1799 if (status
== LTTNG_TRACE_CHUNK_STATUS_NO_FILE
&&
1801 viewer_index
.status
= htobe32(LTTNG_VIEWER_INDEX_HUP
);
1804 PERROR("Failed to open trace file for viewer stream");
1807 vstream
->stream_file
.handle
= fs_handle
;
1810 ret
= lttng_index_file_read(vstream
->index_file
, &packet_index
);
1812 ERR("Relay error reading index file");
1813 viewer_index
.status
= htobe32(LTTNG_VIEWER_INDEX_ERR
);
1816 viewer_index
.status
= htobe32(LTTNG_VIEWER_INDEX_OK
);
1817 vstream
->index_sent_seqcount
++;
1821 * Indexes are stored in big endian, no need to switch before sending.
1823 DBG("Sending viewer index for stream %" PRIu64
" offset %" PRIu64
,
1824 rstream
->stream_handle
,
1825 (uint64_t) be64toh(packet_index
.offset
));
1826 viewer_index
.offset
= packet_index
.offset
;
1827 viewer_index
.packet_size
= packet_index
.packet_size
;
1828 viewer_index
.content_size
= packet_index
.content_size
;
1829 viewer_index
.timestamp_begin
= packet_index
.timestamp_begin
;
1830 viewer_index
.timestamp_end
= packet_index
.timestamp_end
;
1831 viewer_index
.events_discarded
= packet_index
.events_discarded
;
1832 viewer_index
.stream_id
= packet_index
.stream_id
;
1836 pthread_mutex_unlock(&rstream
->lock
);
1837 pthread_mutex_unlock(&rstream
->trace
->session
->lock
);
1840 if (metadata_viewer_stream
) {
1841 pthread_mutex_lock(&metadata_viewer_stream
->stream
->lock
);
1842 DBG("get next index metadata check: recv %" PRIu64
1844 metadata_viewer_stream
->stream
->metadata_received
,
1845 metadata_viewer_stream
->metadata_sent
);
1846 if (!metadata_viewer_stream
->stream
->metadata_received
||
1847 metadata_viewer_stream
->stream
->metadata_received
>
1848 metadata_viewer_stream
->metadata_sent
) {
1849 viewer_index
.flags
|= LTTNG_VIEWER_FLAG_NEW_METADATA
;
1851 pthread_mutex_unlock(&metadata_viewer_stream
->stream
->lock
);
1854 if (attached_sessions_have_new_streams
) {
1855 viewer_index
.flags
|= LTTNG_VIEWER_FLAG_NEW_STREAM
;
1858 viewer_index
.flags
= htobe32(viewer_index
.flags
);
1859 health_code_update();
1861 ret
= send_response(conn
->sock
, &viewer_index
, sizeof(viewer_index
));
1865 health_code_update();
1868 DBG("Index %" PRIu64
" for stream %" PRIu64
" sent",
1869 vstream
->index_sent_seqcount
,
1870 vstream
->stream
->stream_handle
);
1873 if (metadata_viewer_stream
) {
1874 viewer_stream_put(metadata_viewer_stream
);
1877 viewer_stream_put(vstream
);
1882 pthread_mutex_unlock(&rstream
->lock
);
1883 pthread_mutex_unlock(&rstream
->trace
->session
->lock
);
1884 if (metadata_viewer_stream
) {
1885 viewer_stream_put(metadata_viewer_stream
);
1887 viewer_stream_put(vstream
);
1892 * Send the next index for a stream
1894 * Return 0 on success or else a negative value.
1897 int viewer_get_packet(struct relay_connection
*conn
)
1902 struct lttng_viewer_get_packet get_packet_info
;
1903 struct lttng_viewer_trace_packet reply_header
;
1904 struct relay_viewer_stream
*vstream
= NULL
;
1905 uint32_t reply_size
= sizeof(reply_header
);
1906 uint32_t packet_data_len
= 0;
1910 DBG2("Relay get data packet");
1912 health_code_update();
1914 ret
= recv_request(conn
->sock
, &get_packet_info
,
1915 sizeof(get_packet_info
));
1919 health_code_update();
1921 /* From this point on, the error label can be reached. */
1922 memset(&reply_header
, 0, sizeof(reply_header
));
1923 stream_id
= (uint64_t) be64toh(get_packet_info
.stream_id
);
1925 vstream
= viewer_stream_get_by_id(stream_id
);
1927 DBG("Client requested packet of unknown stream id %" PRIu64
,
1929 reply_header
.status
= htobe32(LTTNG_VIEWER_GET_PACKET_ERR
);
1930 goto send_reply_nolock
;
1932 packet_data_len
= be32toh(get_packet_info
.len
);
1933 reply_size
+= packet_data_len
;
1936 reply
= zmalloc(reply_size
);
1938 PERROR("packet reply zmalloc");
1939 reply_size
= sizeof(reply_header
);
1943 pthread_mutex_lock(&vstream
->stream
->lock
);
1944 lseek_ret
= fs_handle_seek(vstream
->stream_file
.handle
,
1945 be64toh(get_packet_info
.offset
), SEEK_SET
);
1946 if (lseek_ret
< 0) {
1947 PERROR("Failed to seek file system handle of viewer stream %" PRIu64
1948 " to offset %" PRIu64
,
1950 (uint64_t) be64toh(get_packet_info
.offset
));
1953 read_len
= fs_handle_read(vstream
->stream_file
.handle
,
1954 reply
+ sizeof(reply_header
), packet_data_len
);
1955 if (read_len
< packet_data_len
) {
1956 PERROR("Failed to read from file system handle of viewer stream id %" PRIu64
1957 ", offset: %" PRIu64
,
1959 (uint64_t) be64toh(get_packet_info
.offset
));
1962 reply_header
.status
= htobe32(LTTNG_VIEWER_GET_PACKET_OK
);
1963 reply_header
.len
= htobe32(packet_data_len
);
1967 /* No payload to send on error. */
1968 reply_size
= sizeof(reply_header
);
1969 reply_header
.status
= htobe32(LTTNG_VIEWER_GET_PACKET_ERR
);
1973 pthread_mutex_unlock(&vstream
->stream
->lock
);
1977 health_code_update();
1980 memcpy(reply
, &reply_header
, sizeof(reply_header
));
1981 ret
= send_response(conn
->sock
, reply
, reply_size
);
1983 /* No reply to send. */
1984 ret
= send_response(conn
->sock
, &reply_header
,
1988 health_code_update();
1990 PERROR("sendmsg of packet data failed");
1994 DBG("Sent %u bytes for stream %" PRIu64
, reply_size
, stream_id
);
2000 viewer_stream_put(vstream
);
2006 * Send the session's metadata
2008 * Return 0 on success else a negative value.
2011 int viewer_get_metadata(struct relay_connection
*conn
)
2018 struct lttng_viewer_get_metadata request
;
2019 struct lttng_viewer_metadata_packet reply
;
2020 struct relay_viewer_stream
*vstream
= NULL
;
2021 bool dispose_of_stream
= false;
2025 DBG("Relay get metadata");
2027 health_code_update();
2029 ret
= recv_request(conn
->sock
, &request
, sizeof(request
));
2033 health_code_update();
2035 memset(&reply
, 0, sizeof(reply
));
2037 vstream
= viewer_stream_get_by_id(be64toh(request
.stream_id
));
2040 * The metadata stream can be closed by a CLOSE command
2041 * just before we attach. It can also be closed by
2042 * per-pid tracing during tracing. Therefore, it is
2043 * possible that we cannot find this viewer stream.
2044 * Reply back to the client with an error if we cannot
2047 DBG("Client requested metadata of unknown stream id %" PRIu64
,
2048 (uint64_t) be64toh(request
.stream_id
));
2049 reply
.status
= htobe32(LTTNG_VIEWER_METADATA_ERR
);
2053 pthread_mutex_lock(&vstream
->stream
->trace
->session
->lock
);
2054 pthread_mutex_lock(&vstream
->stream
->trace
->lock
);
2055 pthread_mutex_lock(&vstream
->stream
->lock
);
2056 if (!vstream
->stream
->is_metadata
) {
2057 ERR("Invalid metadata stream");
2061 if (vstream
->metadata_sent
>= vstream
->stream
->metadata_received
) {
2063 * Clear feature resets the metadata_received to 0 until the
2064 * same metadata is received again.
2066 reply
.status
= htobe32(LTTNG_VIEWER_NO_NEW_METADATA
);
2068 * The live viewer considers a closed 0 byte metadata stream as
2071 dispose_of_stream
= vstream
->metadata_sent
> 0 && vstream
->stream
->closed
;
2075 if (vstream
->stream
->trace_chunk
&&
2076 !lttng_trace_chunk_ids_equal(
2077 conn
->viewer_session
->current_trace_chunk
,
2078 vstream
->stream
->trace_chunk
)) {
2079 /* A rotation has occurred on the relay stream. */
2080 DBG("Metadata relay stream and viewer chunk ids differ");
2082 ret
= viewer_session_set_trace_chunk_copy(
2083 conn
->viewer_session
,
2084 vstream
->stream
->trace_chunk
);
2086 reply
.status
= htobe32(LTTNG_VIEWER_METADATA_ERR
);
2091 if (conn
->viewer_session
->current_trace_chunk
&&
2092 !lttng_trace_chunk_ids_equal(conn
->viewer_session
->current_trace_chunk
,
2093 vstream
->stream_file
.trace_chunk
)) {
2094 bool acquired_reference
;
2096 DBG("Viewer session and viewer stream chunk differ: "
2097 "vsession chunk %p vstream chunk %p",
2098 conn
->viewer_session
->current_trace_chunk
,
2099 vstream
->stream_file
.trace_chunk
);
2100 lttng_trace_chunk_put(vstream
->stream_file
.trace_chunk
);
2101 acquired_reference
= lttng_trace_chunk_get(conn
->viewer_session
->current_trace_chunk
);
2102 assert(acquired_reference
);
2103 vstream
->stream_file
.trace_chunk
=
2104 conn
->viewer_session
->current_trace_chunk
;
2105 viewer_stream_close_files(vstream
);
2108 len
= vstream
->stream
->metadata_received
- vstream
->metadata_sent
;
2110 if (!vstream
->stream_file
.trace_chunk
) {
2111 if (vstream
->stream
->trace
->session
->connection_closed
) {
2113 * If the connection is closed, there is no way for the metadata stream
2114 * to ever transition back to an active chunk. As such, signal to the viewer
2115 * that there is no new metadata available.
2117 * The stream can be disposed-of. On the next execution of this command,
2118 * the relay daemon will reply with an error status since the stream can't
2121 dispose_of_stream
= true;
2124 reply
.status
= htobe32(LTTNG_VIEWER_NO_NEW_METADATA
);
2127 } else if (vstream
->stream_file
.trace_chunk
&&
2128 !vstream
->stream_file
.handle
&& len
> 0) {
2130 * Either this is the first time the metadata file is read, or a
2131 * rotation of the corresponding relay stream has occurred.
2133 struct fs_handle
*fs_handle
;
2134 char file_path
[LTTNG_PATH_MAX
];
2135 enum lttng_trace_chunk_status status
;
2136 struct relay_stream
*rstream
= vstream
->stream
;
2138 ret
= utils_stream_file_path(rstream
->path_name
,
2139 rstream
->channel_name
, rstream
->tracefile_size
,
2140 vstream
->current_tracefile_id
, NULL
, file_path
,
2147 * It is possible the the metadata file we are trying to open is
2148 * missing if the stream has been closed (application exits with
2149 * per-pid buffers) and a clear command has been performed.
2151 status
= lttng_trace_chunk_open_fs_handle(
2152 vstream
->stream_file
.trace_chunk
,
2153 file_path
, O_RDONLY
, 0, &fs_handle
, true);
2154 if (status
!= LTTNG_TRACE_CHUNK_STATUS_OK
) {
2155 if (status
== LTTNG_TRACE_CHUNK_STATUS_NO_FILE
) {
2156 reply
.status
= htobe32(LTTNG_VIEWER_NO_NEW_METADATA
);
2158 if (vstream
->stream
->closed
) {
2159 viewer_stream_put(vstream
);
2163 PERROR("Failed to open metadata file for viewer stream");
2166 vstream
->stream_file
.handle
= fs_handle
;
2168 if (vstream
->metadata_sent
!= 0) {
2170 * The client does not expect to receive any metadata
2171 * it has received and metadata files in successive
2172 * chunks must be a strict superset of one another.
2174 * Skip the first `metadata_sent` bytes to ensure
2175 * they are not sent a second time to the client.
2177 * Baring a block layer error or an internal error,
2178 * this seek should not fail as
2179 * `vstream->stream->metadata_received` is reset when
2180 * a relay stream is rotated. If this is reached, it is
2181 * safe to assume that
2182 * `metadata_received` > `metadata_sent`.
2184 const off_t seek_ret
= fs_handle_seek(fs_handle
,
2185 vstream
->metadata_sent
, SEEK_SET
);
2188 PERROR("Failed to seek metadata viewer stream file to `sent` position: pos = %" PRId64
,
2189 vstream
->metadata_sent
);
2190 reply
.status
= htobe32(LTTNG_VIEWER_METADATA_ERR
);
2196 reply
.len
= htobe64(len
);
2197 data
= zmalloc(len
);
2199 PERROR("viewer metadata zmalloc");
2203 fd
= fs_handle_get_fd(vstream
->stream_file
.handle
);
2205 ERR("Failed to restore viewer stream file system handle");
2208 read_len
= lttng_read(fd
, data
, len
);
2209 fs_handle_put_fd(vstream
->stream_file
.handle
);
2211 if (read_len
< len
) {
2213 PERROR("Failed to read metadata file");
2217 * A clear has been performed which prevents the relay
2218 * from sending `len` bytes of metadata.
2220 * It is important not to send any metadata if we
2221 * couldn't read all the available metadata in one shot:
2222 * sending partial metadata can cause the client to
2223 * attempt to parse an incomplete (incoherent) metadata
2224 * stream, which would result in an error.
2226 const off_t seek_ret
= fs_handle_seek(
2227 vstream
->stream_file
.handle
, -read_len
,
2230 DBG("Failed to read metadata: requested = %" PRIu64
", got = %zd",
2235 PERROR("Failed to restore metadata file position after partial read");
2241 vstream
->metadata_sent
+= read_len
;
2242 reply
.status
= htobe32(LTTNG_VIEWER_METADATA_OK
);
2247 reply
.status
= htobe32(LTTNG_VIEWER_METADATA_ERR
);
2250 health_code_update();
2252 pthread_mutex_unlock(&vstream
->stream
->lock
);
2253 pthread_mutex_unlock(&vstream
->stream
->trace
->lock
);
2254 pthread_mutex_unlock(&vstream
->stream
->trace
->session
->lock
);
2256 ret
= send_response(conn
->sock
, &reply
, sizeof(reply
));
2260 health_code_update();
2263 ret
= send_response(conn
->sock
, data
, len
);
2269 DBG("Sent %" PRIu64
" bytes of metadata for stream %" PRIu64
, len
,
2270 (uint64_t) be64toh(request
.stream_id
));
2272 DBG("Metadata sent");
2278 viewer_stream_put(vstream
);
2279 if (dispose_of_stream
) {
2281 * Trigger the destruction of the viewer stream
2282 * by releasing its global reference.
2284 * The live viewers expect to receive a NO_NEW_METADATA
2285 * status before a stream disappears, otherwise they abort the
2286 * entire live connection when receiving an error status.
2288 * On the next query for this stream, an error will be reported to the
2291 viewer_stream_put(vstream
);
2299 * Create a viewer session.
2301 * Return 0 on success or else a negative value.
2304 int viewer_create_session(struct relay_connection
*conn
)
2307 struct lttng_viewer_create_session_response resp
;
2309 DBG("Viewer create session received");
2311 memset(&resp
, 0, sizeof(resp
));
2312 resp
.status
= htobe32(LTTNG_VIEWER_CREATE_SESSION_OK
);
2313 conn
->viewer_session
= viewer_session_create();
2314 if (!conn
->viewer_session
) {
2315 ERR("Allocation viewer session");
2316 resp
.status
= htobe32(LTTNG_VIEWER_CREATE_SESSION_ERR
);
2321 health_code_update();
2322 ret
= send_response(conn
->sock
, &resp
, sizeof(resp
));
2326 health_code_update();
2334 * Detach a viewer session.
2336 * Return 0 on success or else a negative value.
2339 int viewer_detach_session(struct relay_connection
*conn
)
2342 struct lttng_viewer_detach_session_response response
;
2343 struct lttng_viewer_detach_session_request request
;
2344 struct relay_session
*session
= NULL
;
2345 uint64_t viewer_session_to_close
;
2347 DBG("Viewer detach session received");
2351 health_code_update();
2353 /* Receive the request from the connected client. */
2354 ret
= recv_request(conn
->sock
, &request
, sizeof(request
));
2358 viewer_session_to_close
= be64toh(request
.session_id
);
2360 if (!conn
->viewer_session
) {
2361 DBG("Client trying to detach before creating a live viewer session");
2362 response
.status
= htobe32(LTTNG_VIEWER_DETACH_SESSION_ERR
);
2366 health_code_update();
2368 memset(&response
, 0, sizeof(response
));
2369 DBG("Detaching from session ID %" PRIu64
, viewer_session_to_close
);
2371 session
= session_get_by_id(be64toh(request
.session_id
));
2373 DBG("Relay session %" PRIu64
" not found",
2374 (uint64_t) be64toh(request
.session_id
));
2375 response
.status
= htobe32(LTTNG_VIEWER_DETACH_SESSION_UNK
);
2379 ret
= viewer_session_is_attached(conn
->viewer_session
, session
);
2381 DBG("Not attached to this session");
2382 response
.status
= htobe32(LTTNG_VIEWER_DETACH_SESSION_ERR
);
2383 goto send_reply_put
;
2386 viewer_session_close_one_session(conn
->viewer_session
, session
);
2387 response
.status
= htobe32(LTTNG_VIEWER_DETACH_SESSION_OK
);
2388 DBG("Session %" PRIu64
" detached.", viewer_session_to_close
);
2391 session_put(session
);
2394 health_code_update();
2395 ret
= send_response(conn
->sock
, &response
, sizeof(response
));
2399 health_code_update();
2407 * live_relay_unknown_command: send -1 if received unknown command
2410 void live_relay_unknown_command(struct relay_connection
*conn
)
2412 struct lttcomm_relayd_generic_reply reply
;
2414 memset(&reply
, 0, sizeof(reply
));
2415 reply
.ret_code
= htobe32(LTTNG_ERR_UNK
);
2416 (void) send_response(conn
->sock
, &reply
, sizeof(reply
));
2420 * Process the commands received on the control socket
2423 int process_control(struct lttng_viewer_cmd
*recv_hdr
,
2424 struct relay_connection
*conn
)
2429 msg_value
= be32toh(recv_hdr
->cmd
);
2432 * Make sure we've done the version check before any command other then a
2433 * new client connection.
2435 if (msg_value
!= LTTNG_VIEWER_CONNECT
&& !conn
->version_check_done
) {
2436 ERR("Viewer conn value %" PRIu32
" before version check", msg_value
);
2441 switch (msg_value
) {
2442 case LTTNG_VIEWER_CONNECT
:
2443 ret
= viewer_connect(conn
);
2445 case LTTNG_VIEWER_LIST_SESSIONS
:
2446 ret
= viewer_list_sessions(conn
);
2448 case LTTNG_VIEWER_ATTACH_SESSION
:
2449 ret
= viewer_attach_session(conn
);
2451 case LTTNG_VIEWER_GET_NEXT_INDEX
:
2452 ret
= viewer_get_next_index(conn
);
2454 case LTTNG_VIEWER_GET_PACKET
:
2455 ret
= viewer_get_packet(conn
);
2457 case LTTNG_VIEWER_GET_METADATA
:
2458 ret
= viewer_get_metadata(conn
);
2460 case LTTNG_VIEWER_GET_NEW_STREAMS
:
2461 ret
= viewer_get_new_streams(conn
);
2463 case LTTNG_VIEWER_CREATE_SESSION
:
2464 ret
= viewer_create_session(conn
);
2466 case LTTNG_VIEWER_DETACH_SESSION
:
2467 ret
= viewer_detach_session(conn
);
2470 ERR("Received unknown viewer command (%u)",
2471 be32toh(recv_hdr
->cmd
));
2472 live_relay_unknown_command(conn
);
2482 void cleanup_connection_pollfd(struct lttng_poll_event
*events
, int pollfd
)
2486 (void) lttng_poll_del(events
, pollfd
);
2488 ret
= fd_tracker_close_unsuspendable_fd(the_fd_tracker
, &pollfd
, 1,
2489 fd_tracker_util_close_fd
, NULL
);
2491 ERR("Closing pollfd %d", pollfd
);
2496 * This thread does the actual work
2499 void *thread_worker(void *data
)
2503 struct lttng_poll_event events
;
2504 struct lttng_ht
*viewer_connections_ht
;
2505 struct lttng_ht_iter iter
;
2506 struct lttng_viewer_cmd recv_hdr
;
2507 struct relay_connection
*destroy_conn
;
2509 DBG("[thread] Live viewer relay worker started");
2511 rcu_register_thread();
2513 health_register(health_relayd
, HEALTH_RELAYD_TYPE_LIVE_WORKER
);
2515 if (testpoint(relayd_thread_live_worker
)) {
2516 goto error_testpoint
;
2519 /* table of connections indexed on socket */
2520 viewer_connections_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_ULONG
);
2521 if (!viewer_connections_ht
) {
2522 goto viewer_connections_ht_error
;
2525 ret
= create_named_thread_poll_set(&events
, 2,
2526 "Live viewer worker thread epoll");
2528 goto error_poll_create
;
2531 ret
= lttng_poll_add(&events
, live_conn_pipe
[0], LPOLLIN
| LPOLLRDHUP
);
2540 health_code_update();
2542 /* Infinite blocking call, waiting for transmission */
2543 DBG3("Relayd live viewer worker thread polling...");
2544 health_poll_entry();
2545 ret
= lttng_poll_wait(&events
, -1);
2549 * Restart interrupted system call.
2551 if (errno
== EINTR
) {
2560 * Process control. The control connection is prioritised so we don't
2561 * starve it with high throughput tracing data on the data
2564 for (i
= 0; i
< nb_fd
; i
++) {
2565 /* Fetch once the poll data */
2566 uint32_t revents
= LTTNG_POLL_GETEV(&events
, i
);
2567 int pollfd
= LTTNG_POLL_GETFD(&events
, i
);
2569 health_code_update();
2571 /* Thread quit pipe has been closed. Killing thread. */
2572 ret
= check_thread_quit_pipe(pollfd
, revents
);
2578 /* Inspect the relay conn pipe for new connection. */
2579 if (pollfd
== live_conn_pipe
[0]) {
2580 if (revents
& LPOLLIN
) {
2581 struct relay_connection
*conn
;
2583 ret
= lttng_read(live_conn_pipe
[0],
2584 &conn
, sizeof(conn
));
2588 ret
= lttng_poll_add(&events
,
2590 LPOLLIN
| LPOLLRDHUP
);
2592 ERR("Failed to add new live connection file descriptor to poll set");
2595 connection_ht_add(viewer_connections_ht
, conn
);
2596 DBG("Connection socket %d added to poll", conn
->sock
->fd
);
2597 } else if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
2598 ERR("Relay live pipe error");
2601 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
2605 /* Connection activity. */
2606 struct relay_connection
*conn
;
2608 conn
= connection_get_by_sock(viewer_connections_ht
, pollfd
);
2613 if (revents
& LPOLLIN
) {
2614 ret
= conn
->sock
->ops
->recvmsg(conn
->sock
, &recv_hdr
,
2615 sizeof(recv_hdr
), 0);
2617 /* Connection closed. */
2618 cleanup_connection_pollfd(&events
, pollfd
);
2619 /* Put "create" ownership reference. */
2620 connection_put(conn
);
2621 DBG("Viewer control conn closed with %d", pollfd
);
2623 ret
= process_control(&recv_hdr
, conn
);
2625 /* Clear the session on error. */
2626 cleanup_connection_pollfd(&events
, pollfd
);
2627 /* Put "create" ownership reference. */
2628 connection_put(conn
);
2629 DBG("Viewer connection closed with %d", pollfd
);
2632 } else if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
2633 cleanup_connection_pollfd(&events
, pollfd
);
2634 /* Put "create" ownership reference. */
2635 connection_put(conn
);
2637 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
2638 connection_put(conn
);
2641 /* Put local "get_by_sock" reference. */
2642 connection_put(conn
);
2649 (void) fd_tracker_util_poll_clean(the_fd_tracker
, &events
);
2651 /* Cleanup remaining connection object. */
2653 cds_lfht_for_each_entry(viewer_connections_ht
->ht
, &iter
.iter
,
2656 health_code_update();
2657 connection_put(destroy_conn
);
2661 lttng_ht_destroy(viewer_connections_ht
);
2662 viewer_connections_ht_error
:
2663 /* Close relay conn pipes */
2664 (void) fd_tracker_util_pipe_close(the_fd_tracker
, live_conn_pipe
);
2666 DBG("Viewer worker thread exited with error");
2668 DBG("Viewer worker thread cleanup complete");
2672 ERR("Health error occurred in %s", __func__
);
2674 health_unregister(health_relayd
);
2675 if (lttng_relay_stop_threads()) {
2676 ERR("Error stopping threads");
2678 rcu_unregister_thread();
2683 * Create the relay command pipe to wake thread_manage_apps.
2684 * Closed in cleanup().
2686 static int create_conn_pipe(void)
2688 return fd_tracker_util_pipe_open_cloexec(the_fd_tracker
,
2689 "Live connection pipe", live_conn_pipe
);
2692 int relayd_live_join(void)
2694 int ret
, retval
= 0;
2697 ret
= pthread_join(live_listener_thread
, &status
);
2700 PERROR("pthread_join live listener");
2704 ret
= pthread_join(live_worker_thread
, &status
);
2707 PERROR("pthread_join live worker");
2711 ret
= pthread_join(live_dispatcher_thread
, &status
);
2714 PERROR("pthread_join live dispatcher");
2718 cleanup_relayd_live();
2726 int relayd_live_create(struct lttng_uri
*uri
)
2728 int ret
= 0, retval
= 0;
2734 goto exit_init_data
;
2738 /* Check if daemon is UID = 0 */
2739 is_root
= !getuid();
2742 if (live_uri
->port
< 1024) {
2743 ERR("Need to be root to use ports < 1024");
2745 goto exit_init_data
;
2749 /* Setup the thread apps communication pipe. */
2750 if (create_conn_pipe()) {
2752 goto exit_init_data
;
2755 /* Init relay command queue. */
2756 cds_wfcq_init(&viewer_conn_queue
.head
, &viewer_conn_queue
.tail
);
2758 /* Set up max poll set size */
2759 if (lttng_poll_set_max_size()) {
2761 goto exit_init_data
;
2764 /* Setup the dispatcher thread */
2765 ret
= pthread_create(&live_dispatcher_thread
, default_pthread_attr(),
2766 thread_dispatcher
, (void *) NULL
);
2769 PERROR("pthread_create viewer dispatcher");
2771 goto exit_dispatcher_thread
;
2774 /* Setup the worker thread */
2775 ret
= pthread_create(&live_worker_thread
, default_pthread_attr(),
2776 thread_worker
, NULL
);
2779 PERROR("pthread_create viewer worker");
2781 goto exit_worker_thread
;
2784 /* Setup the listener thread */
2785 ret
= pthread_create(&live_listener_thread
, default_pthread_attr(),
2786 thread_listener
, (void *) NULL
);
2789 PERROR("pthread_create viewer listener");
2791 goto exit_listener_thread
;
2795 * All OK, started all threads.
2800 * Join on the live_listener_thread should anything be added after
2801 * the live_listener thread's creation.
2804 exit_listener_thread
:
2806 ret
= pthread_join(live_worker_thread
, &status
);
2809 PERROR("pthread_join live worker");
2814 ret
= pthread_join(live_dispatcher_thread
, &status
);
2817 PERROR("pthread_join live dispatcher");
2820 exit_dispatcher_thread
:
2823 cleanup_relayd_live();