Fix: relayd: live: some listed sessions are not attacheable
[lttng-tools.git] / src / bin / lttng-relayd / live.c
1 /*
2 * Copyright (C) 2013 - Julien Desfossez <jdesfossez@efficios.com>
3 * David Goulet <dgoulet@efficios.com>
4 * 2015 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19
20 #define _LGPL_SOURCE
21 #include <getopt.h>
22 #include <grp.h>
23 #include <limits.h>
24 #include <pthread.h>
25 #include <signal.h>
26 #include <stdio.h>
27 #include <stdlib.h>
28 #include <string.h>
29 #include <sys/mman.h>
30 #include <sys/mount.h>
31 #include <sys/resource.h>
32 #include <sys/socket.h>
33 #include <sys/stat.h>
34 #include <sys/types.h>
35 #include <sys/wait.h>
36 #include <inttypes.h>
37 #include <urcu/futex.h>
38 #include <urcu/uatomic.h>
39 #include <urcu/rculist.h>
40 #include <unistd.h>
41 #include <fcntl.h>
42
43 #include <lttng/lttng.h>
44 #include <common/common.h>
45 #include <common/compat/poll.h>
46 #include <common/compat/socket.h>
47 #include <common/compat/endian.h>
48 #include <common/defaults.h>
49 #include <common/futex.h>
50 #include <common/index/index.h>
51 #include <common/sessiond-comm/sessiond-comm.h>
52 #include <common/sessiond-comm/inet.h>
53 #include <common/sessiond-comm/relayd.h>
54 #include <common/uri.h>
55 #include <common/utils.h>
56
57 #include "cmd.h"
58 #include "live.h"
59 #include "lttng-relayd.h"
60 #include "utils.h"
61 #include "health-relayd.h"
62 #include "testpoint.h"
63 #include "viewer-stream.h"
64 #include "stream.h"
65 #include "session.h"
66 #include "ctf-trace.h"
67 #include "connection.h"
68 #include "viewer-session.h"
69
70 #define SESSION_BUF_DEFAULT_COUNT 16
71
72 static struct lttng_uri *live_uri;
73
74 /*
75 * This pipe is used to inform the worker thread that a command is queued and
76 * ready to be processed.
77 */
78 static int live_conn_pipe[2] = { -1, -1 };
79
80 /* Shared between threads */
81 static int live_dispatch_thread_exit;
82
83 static pthread_t live_listener_thread;
84 static pthread_t live_dispatcher_thread;
85 static pthread_t live_worker_thread;
86
87 /*
88 * Relay command queue.
89 *
90 * The live_thread_listener and live_thread_dispatcher communicate with this
91 * queue.
92 */
93 static struct relay_conn_queue viewer_conn_queue;
94
95 static uint64_t last_relay_viewer_session_id;
96 static pthread_mutex_t last_relay_viewer_session_id_lock =
97 PTHREAD_MUTEX_INITIALIZER;
98
99 /*
100 * Cleanup the daemon
101 */
102 static
103 void cleanup_relayd_live(void)
104 {
105 DBG("Cleaning up");
106
107 free(live_uri);
108 }
109
110 /*
111 * Receive a request buffer using a given socket, destination allocated buffer
112 * of length size.
113 *
114 * Return the size of the received message or else a negative value on error
115 * with errno being set by recvmsg() syscall.
116 */
117 static
118 ssize_t recv_request(struct lttcomm_sock *sock, void *buf, size_t size)
119 {
120 ssize_t ret;
121
122 ret = sock->ops->recvmsg(sock, buf, size, 0);
123 if (ret < 0 || ret != size) {
124 if (ret == 0) {
125 /* Orderly shutdown. Not necessary to print an error. */
126 DBG("Socket %d did an orderly shutdown", sock->fd);
127 } else {
128 ERR("Relay failed to receive request.");
129 }
130 ret = -1;
131 }
132
133 return ret;
134 }
135
136 /*
137 * Send a response buffer using a given socket, source allocated buffer of
138 * length size.
139 *
140 * Return the size of the sent message or else a negative value on error with
141 * errno being set by sendmsg() syscall.
142 */
143 static
144 ssize_t send_response(struct lttcomm_sock *sock, void *buf, size_t size)
145 {
146 ssize_t ret;
147
148 ret = sock->ops->sendmsg(sock, buf, size, 0);
149 if (ret < 0) {
150 ERR("Relayd failed to send response.");
151 }
152
153 return ret;
154 }
155
156 /*
157 * Atomically check if new streams got added in one of the sessions attached
158 * and reset the flag to 0.
159 *
160 * Returns 1 if new streams got added, 0 if nothing changed, a negative value
161 * on error.
162 */
163 static
164 int check_new_streams(struct relay_connection *conn)
165 {
166 struct relay_session *session;
167 unsigned long current_val;
168 int ret = 0;
169
170 if (!conn->viewer_session) {
171 goto end;
172 }
173 rcu_read_lock();
174 cds_list_for_each_entry_rcu(session,
175 &conn->viewer_session->session_list,
176 viewer_session_node) {
177 if (!session_get(session)) {
178 continue;
179 }
180 current_val = uatomic_cmpxchg(&session->new_streams, 1, 0);
181 ret = current_val;
182 session_put(session);
183 if (ret == 1) {
184 goto end;
185 }
186 }
187 end:
188 rcu_read_unlock();
189 return ret;
190 }
191
192 /*
193 * Send viewer streams to the given socket. The ignore_sent_flag indicates if
194 * this function should ignore the sent flag or not.
195 *
196 * Return 0 on success or else a negative value.
197 */
198 static
199 ssize_t send_viewer_streams(struct lttcomm_sock *sock,
200 uint64_t session_id, unsigned int ignore_sent_flag)
201 {
202 ssize_t ret;
203 struct lttng_viewer_stream send_stream;
204 struct lttng_ht_iter iter;
205 struct relay_viewer_stream *vstream;
206
207 rcu_read_lock();
208
209 cds_lfht_for_each_entry(viewer_streams_ht->ht, &iter.iter, vstream,
210 stream_n.node) {
211 struct ctf_trace *ctf_trace;
212
213 health_code_update();
214
215 if (!viewer_stream_get(vstream)) {
216 continue;
217 }
218
219 pthread_mutex_lock(&vstream->stream->lock);
220 /* Ignore if not the same session. */
221 if (vstream->stream->trace->session->id != session_id ||
222 (!ignore_sent_flag && vstream->sent_flag)) {
223 pthread_mutex_unlock(&vstream->stream->lock);
224 viewer_stream_put(vstream);
225 continue;
226 }
227
228 ctf_trace = vstream->stream->trace;
229 send_stream.id = htobe64(vstream->stream->stream_handle);
230 send_stream.ctf_trace_id = htobe64(ctf_trace->id);
231 send_stream.metadata_flag = htobe32(
232 vstream->stream->is_metadata);
233 if (lttng_strncpy(send_stream.path_name, vstream->path_name,
234 sizeof(send_stream.path_name))) {
235 pthread_mutex_unlock(&vstream->stream->lock);
236 viewer_stream_put(vstream);
237 ret = -1; /* Error. */
238 goto end_unlock;
239 }
240 if (lttng_strncpy(send_stream.channel_name,
241 vstream->channel_name,
242 sizeof(send_stream.channel_name))) {
243 pthread_mutex_unlock(&vstream->stream->lock);
244 viewer_stream_put(vstream);
245 ret = -1; /* Error. */
246 goto end_unlock;
247 }
248
249 DBG("Sending stream %" PRIu64 " to viewer",
250 vstream->stream->stream_handle);
251 vstream->sent_flag = 1;
252 pthread_mutex_unlock(&vstream->stream->lock);
253
254 ret = send_response(sock, &send_stream, sizeof(send_stream));
255 viewer_stream_put(vstream);
256 if (ret < 0) {
257 goto end_unlock;
258 }
259 }
260
261 ret = 0;
262
263 end_unlock:
264 rcu_read_unlock();
265 return ret;
266 }
267
268 /*
269 * Create every viewer stream possible for the given session with the seek
270 * type. Three counters *can* be return which are in order the total amount of
271 * viewer stream of the session, the number of unsent stream and the number of
272 * stream created. Those counters can be NULL and thus will be ignored.
273 *
274 * session must be locked to ensure that we see either none or all initial
275 * streams for a session, but no intermediate state..
276 *
277 * Return 0 on success or else a negative value.
278 */
279 static int make_viewer_streams(struct relay_session *session,
280 struct lttng_trace_chunk *viewer_trace_chunk,
281 enum lttng_viewer_seek seek_t,
282 uint32_t *nb_total,
283 uint32_t *nb_unsent,
284 uint32_t *nb_created,
285 bool *closed)
286 {
287 int ret;
288 struct lttng_ht_iter iter;
289 struct ctf_trace *ctf_trace;
290
291 assert(session);
292 ASSERT_LOCKED(session->lock);
293
294 if (session->connection_closed) {
295 *closed = true;
296 }
297
298 /*
299 * Create viewer streams for relay streams that are ready to be
300 * used for a the given session id only.
301 */
302 rcu_read_lock();
303 cds_lfht_for_each_entry(session->ctf_traces_ht->ht, &iter.iter, ctf_trace,
304 node.node) {
305 struct relay_stream *stream;
306
307 health_code_update();
308
309 if (!ctf_trace_get(ctf_trace)) {
310 continue;
311 }
312
313 cds_list_for_each_entry_rcu(stream, &ctf_trace->stream_list, stream_node) {
314 struct relay_viewer_stream *vstream;
315
316 if (!stream_get(stream)) {
317 continue;
318 }
319 /*
320 * stream published is protected by the session lock.
321 */
322 if (!stream->published) {
323 goto next;
324 }
325 vstream = viewer_stream_get_by_id(stream->stream_handle);
326 if (!vstream) {
327 vstream = viewer_stream_create(stream,
328 viewer_trace_chunk, seek_t);
329 if (!vstream) {
330 ret = -1;
331 ctf_trace_put(ctf_trace);
332 stream_put(stream);
333 goto error_unlock;
334 }
335
336 if (nb_created) {
337 /* Update number of created stream counter. */
338 (*nb_created)++;
339 }
340 /*
341 * Ensure a self-reference is preserved even
342 * after we have put our local reference.
343 */
344 if (!viewer_stream_get(vstream)) {
345 ERR("Unable to get self-reference on viewer stream, logic error.");
346 abort();
347 }
348 } else {
349 if (!vstream->sent_flag && nb_unsent) {
350 /* Update number of unsent stream counter. */
351 (*nb_unsent)++;
352 }
353 }
354 /* Update number of total stream counter. */
355 if (nb_total) {
356 if (stream->is_metadata) {
357 if (!stream->closed ||
358 stream->metadata_received > vstream->metadata_sent) {
359 (*nb_total)++;
360 }
361 } else {
362 if (!stream->closed ||
363 !(((int64_t) (stream->prev_data_seq - stream->last_net_seq_num)) >= 0)) {
364
365 (*nb_total)++;
366 }
367 }
368 }
369 /* Put local reference. */
370 viewer_stream_put(vstream);
371 next:
372 stream_put(stream);
373 }
374 ctf_trace_put(ctf_trace);
375 }
376
377 ret = 0;
378
379 error_unlock:
380 rcu_read_unlock();
381 return ret;
382 }
383
384 int relayd_live_stop(void)
385 {
386 /* Stop dispatch thread */
387 CMM_STORE_SHARED(live_dispatch_thread_exit, 1);
388 futex_nto1_wake(&viewer_conn_queue.futex);
389 return 0;
390 }
391
392 /*
393 * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set.
394 */
395 static
396 int create_thread_poll_set(struct lttng_poll_event *events, int size)
397 {
398 int ret;
399
400 if (events == NULL || size == 0) {
401 ret = -1;
402 goto error;
403 }
404
405 ret = lttng_poll_create(events, size, LTTNG_CLOEXEC);
406 if (ret < 0) {
407 goto error;
408 }
409
410 /* Add quit pipe */
411 ret = lttng_poll_add(events, thread_quit_pipe[0], LPOLLIN | LPOLLERR);
412 if (ret < 0) {
413 goto error;
414 }
415
416 return 0;
417
418 error:
419 return ret;
420 }
421
422 /*
423 * Check if the thread quit pipe was triggered.
424 *
425 * Return 1 if it was triggered else 0;
426 */
427 static
428 int check_thread_quit_pipe(int fd, uint32_t events)
429 {
430 if (fd == thread_quit_pipe[0] && (events & LPOLLIN)) {
431 return 1;
432 }
433
434 return 0;
435 }
436
437 /*
438 * Create and init socket from uri.
439 */
440 static
441 struct lttcomm_sock *init_socket(struct lttng_uri *uri)
442 {
443 int ret;
444 struct lttcomm_sock *sock = NULL;
445
446 sock = lttcomm_alloc_sock_from_uri(uri);
447 if (sock == NULL) {
448 ERR("Allocating socket");
449 goto error;
450 }
451
452 ret = lttcomm_create_sock(sock);
453 if (ret < 0) {
454 goto error;
455 }
456 DBG("Listening on sock %d for lttng-live", sock->fd);
457
458 ret = sock->ops->bind(sock);
459 if (ret < 0) {
460 PERROR("Failed to bind lttng-live socket");
461 goto error;
462 }
463
464 ret = sock->ops->listen(sock, -1);
465 if (ret < 0) {
466 goto error;
467
468 }
469
470 return sock;
471
472 error:
473 if (sock) {
474 lttcomm_destroy_sock(sock);
475 }
476 return NULL;
477 }
478
479 /*
480 * This thread manages the listening for new connections on the network
481 */
482 static
483 void *thread_listener(void *data)
484 {
485 int i, ret, pollfd, err = -1;
486 uint32_t revents, nb_fd;
487 struct lttng_poll_event events;
488 struct lttcomm_sock *live_control_sock;
489
490 DBG("[thread] Relay live listener started");
491
492 health_register(health_relayd, HEALTH_RELAYD_TYPE_LIVE_LISTENER);
493
494 health_code_update();
495
496 live_control_sock = init_socket(live_uri);
497 if (!live_control_sock) {
498 goto error_sock_control;
499 }
500
501 /* Pass 2 as size here for the thread quit pipe and control sockets. */
502 ret = create_thread_poll_set(&events, 2);
503 if (ret < 0) {
504 goto error_create_poll;
505 }
506
507 /* Add the control socket */
508 ret = lttng_poll_add(&events, live_control_sock->fd, LPOLLIN | LPOLLRDHUP);
509 if (ret < 0) {
510 goto error_poll_add;
511 }
512
513 lttng_relay_notify_ready();
514
515 if (testpoint(relayd_thread_live_listener)) {
516 goto error_testpoint;
517 }
518
519 while (1) {
520 health_code_update();
521
522 DBG("Listener accepting live viewers connections");
523
524 restart:
525 health_poll_entry();
526 ret = lttng_poll_wait(&events, -1);
527 health_poll_exit();
528 if (ret < 0) {
529 /*
530 * Restart interrupted system call.
531 */
532 if (errno == EINTR) {
533 goto restart;
534 }
535 goto error;
536 }
537 nb_fd = ret;
538
539 DBG("Relay new viewer connection received");
540 for (i = 0; i < nb_fd; i++) {
541 health_code_update();
542
543 /* Fetch once the poll data */
544 revents = LTTNG_POLL_GETEV(&events, i);
545 pollfd = LTTNG_POLL_GETFD(&events, i);
546
547 /* Thread quit pipe has been closed. Killing thread. */
548 ret = check_thread_quit_pipe(pollfd, revents);
549 if (ret) {
550 err = 0;
551 goto exit;
552 }
553
554 if (revents & LPOLLIN) {
555 /*
556 * A new connection is requested, therefore a
557 * viewer connection is allocated in this
558 * thread, enqueued to a global queue and
559 * dequeued (and freed) in the worker thread.
560 */
561 int val = 1;
562 struct relay_connection *new_conn;
563 struct lttcomm_sock *newsock;
564
565 newsock = live_control_sock->ops->accept(live_control_sock);
566 if (!newsock) {
567 PERROR("accepting control sock");
568 goto error;
569 }
570 DBG("Relay viewer connection accepted socket %d", newsock->fd);
571
572 ret = setsockopt(newsock->fd, SOL_SOCKET, SO_REUSEADDR, &val,
573 sizeof(val));
574 if (ret < 0) {
575 PERROR("setsockopt inet");
576 lttcomm_destroy_sock(newsock);
577 goto error;
578 }
579 new_conn = connection_create(newsock, RELAY_CONNECTION_UNKNOWN);
580 if (!new_conn) {
581 lttcomm_destroy_sock(newsock);
582 goto error;
583 }
584 /* Ownership assumed by the connection. */
585 newsock = NULL;
586
587 /* Enqueue request for the dispatcher thread. */
588 cds_wfcq_enqueue(&viewer_conn_queue.head, &viewer_conn_queue.tail,
589 &new_conn->qnode);
590
591 /*
592 * Wake the dispatch queue futex.
593 * Implicit memory barrier with the
594 * exchange in cds_wfcq_enqueue.
595 */
596 futex_nto1_wake(&viewer_conn_queue.futex);
597 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
598 ERR("socket poll error");
599 goto error;
600 } else {
601 ERR("Unexpected poll events %u for sock %d", revents, pollfd);
602 goto error;
603 }
604 }
605 }
606
607 exit:
608 error:
609 error_poll_add:
610 error_testpoint:
611 lttng_poll_clean(&events);
612 error_create_poll:
613 if (live_control_sock->fd >= 0) {
614 ret = live_control_sock->ops->close(live_control_sock);
615 if (ret) {
616 PERROR("close");
617 }
618 }
619 lttcomm_destroy_sock(live_control_sock);
620 error_sock_control:
621 if (err) {
622 health_error();
623 DBG("Live viewer listener thread exited with error");
624 }
625 health_unregister(health_relayd);
626 DBG("Live viewer listener thread cleanup complete");
627 if (lttng_relay_stop_threads()) {
628 ERR("Error stopping threads");
629 }
630 return NULL;
631 }
632
633 /*
634 * This thread manages the dispatching of the requests to worker threads
635 */
636 static
637 void *thread_dispatcher(void *data)
638 {
639 int err = -1;
640 ssize_t ret;
641 struct cds_wfcq_node *node;
642 struct relay_connection *conn = NULL;
643
644 DBG("[thread] Live viewer relay dispatcher started");
645
646 health_register(health_relayd, HEALTH_RELAYD_TYPE_LIVE_DISPATCHER);
647
648 if (testpoint(relayd_thread_live_dispatcher)) {
649 goto error_testpoint;
650 }
651
652 health_code_update();
653
654 for (;;) {
655 health_code_update();
656
657 /* Atomically prepare the queue futex */
658 futex_nto1_prepare(&viewer_conn_queue.futex);
659
660 if (CMM_LOAD_SHARED(live_dispatch_thread_exit)) {
661 break;
662 }
663
664 do {
665 health_code_update();
666
667 /* Dequeue commands */
668 node = cds_wfcq_dequeue_blocking(&viewer_conn_queue.head,
669 &viewer_conn_queue.tail);
670 if (node == NULL) {
671 DBG("Woken up but nothing in the live-viewer "
672 "relay command queue");
673 /* Continue thread execution */
674 break;
675 }
676 conn = caa_container_of(node, struct relay_connection, qnode);
677 DBG("Dispatching viewer request waiting on sock %d",
678 conn->sock->fd);
679
680 /*
681 * Inform worker thread of the new request. This
682 * call is blocking so we can be assured that
683 * the data will be read at some point in time
684 * or wait to the end of the world :)
685 */
686 ret = lttng_write(live_conn_pipe[1], &conn, sizeof(conn));
687 if (ret < 0) {
688 PERROR("write conn pipe");
689 connection_put(conn);
690 goto error;
691 }
692 } while (node != NULL);
693
694 /* Futex wait on queue. Blocking call on futex() */
695 health_poll_entry();
696 futex_nto1_wait(&viewer_conn_queue.futex);
697 health_poll_exit();
698 }
699
700 /* Normal exit, no error */
701 err = 0;
702
703 error:
704 error_testpoint:
705 if (err) {
706 health_error();
707 ERR("Health error occurred in %s", __func__);
708 }
709 health_unregister(health_relayd);
710 DBG("Live viewer dispatch thread dying");
711 if (lttng_relay_stop_threads()) {
712 ERR("Error stopping threads");
713 }
714 return NULL;
715 }
716
717 /*
718 * Establish connection with the viewer and check the versions.
719 *
720 * Return 0 on success or else negative value.
721 */
722 static
723 int viewer_connect(struct relay_connection *conn)
724 {
725 int ret;
726 struct lttng_viewer_connect reply, msg;
727
728 conn->version_check_done = 1;
729
730 health_code_update();
731
732 DBG("Viewer is establishing a connection to the relayd.");
733
734 ret = recv_request(conn->sock, &msg, sizeof(msg));
735 if (ret < 0) {
736 goto end;
737 }
738
739 health_code_update();
740
741 memset(&reply, 0, sizeof(reply));
742 reply.major = RELAYD_VERSION_COMM_MAJOR;
743 reply.minor = RELAYD_VERSION_COMM_MINOR;
744
745 /* Major versions must be the same */
746 if (reply.major != be32toh(msg.major)) {
747 DBG("Incompatible major versions ([relayd] %u vs [client] %u)",
748 reply.major, be32toh(msg.major));
749 ret = -1;
750 goto end;
751 }
752
753 conn->major = reply.major;
754 /* We adapt to the lowest compatible version */
755 if (reply.minor <= be32toh(msg.minor)) {
756 conn->minor = reply.minor;
757 } else {
758 conn->minor = be32toh(msg.minor);
759 }
760
761 if (be32toh(msg.type) == LTTNG_VIEWER_CLIENT_COMMAND) {
762 conn->type = RELAY_VIEWER_COMMAND;
763 } else if (be32toh(msg.type) == LTTNG_VIEWER_CLIENT_NOTIFICATION) {
764 conn->type = RELAY_VIEWER_NOTIFICATION;
765 } else {
766 ERR("Unknown connection type : %u", be32toh(msg.type));
767 ret = -1;
768 goto end;
769 }
770
771 reply.major = htobe32(reply.major);
772 reply.minor = htobe32(reply.minor);
773 if (conn->type == RELAY_VIEWER_COMMAND) {
774 /*
775 * Increment outside of htobe64 macro, because the argument can
776 * be used more than once within the macro, and thus the
777 * operation may be undefined.
778 */
779 pthread_mutex_lock(&last_relay_viewer_session_id_lock);
780 last_relay_viewer_session_id++;
781 pthread_mutex_unlock(&last_relay_viewer_session_id_lock);
782 reply.viewer_session_id = htobe64(last_relay_viewer_session_id);
783 }
784
785 health_code_update();
786
787 ret = send_response(conn->sock, &reply, sizeof(reply));
788 if (ret < 0) {
789 goto end;
790 }
791
792 health_code_update();
793
794 DBG("Version check done using protocol %u.%u", conn->major, conn->minor);
795 ret = 0;
796
797 end:
798 return ret;
799 }
800
801 /*
802 * Send the viewer the list of current sessions.
803 * We need to create a copy of the hash table content because otherwise
804 * we cannot assume the number of entries stays the same between getting
805 * the number of HT elements and iteration over the HT.
806 *
807 * Return 0 on success or else a negative value.
808 */
809 static
810 int viewer_list_sessions(struct relay_connection *conn)
811 {
812 int ret = 0;
813 struct lttng_viewer_list_sessions session_list;
814 struct lttng_ht_iter iter;
815 struct relay_session *session;
816 struct lttng_viewer_session *send_session_buf = NULL;
817 uint32_t buf_count = SESSION_BUF_DEFAULT_COUNT;
818 uint32_t count = 0;
819
820 DBG("List sessions received");
821
822 send_session_buf = zmalloc(SESSION_BUF_DEFAULT_COUNT * sizeof(*send_session_buf));
823 if (!send_session_buf) {
824 return -1;
825 }
826
827 rcu_read_lock();
828 cds_lfht_for_each_entry(sessions_ht->ht, &iter.iter, session,
829 session_n.node) {
830 struct lttng_viewer_session *send_session;
831
832 health_code_update();
833
834 pthread_mutex_lock(&session->lock);
835 if (session->connection_closed) {
836 /* Skip closed session */
837 goto next_session;
838 }
839 if (!session->current_trace_chunk) {
840 /*
841 * Skip un-attachable session. It is either
842 * being destroyed or has not had a trace
843 * chunk created against it yet.
844 */
845 goto next_session;
846 }
847
848 if (count >= buf_count) {
849 struct lttng_viewer_session *newbuf;
850 uint32_t new_buf_count = buf_count << 1;
851
852 newbuf = realloc(send_session_buf,
853 new_buf_count * sizeof(*send_session_buf));
854 if (!newbuf) {
855 ret = -1;
856 goto break_loop;
857 }
858 send_session_buf = newbuf;
859 buf_count = new_buf_count;
860 }
861 send_session = &send_session_buf[count];
862 if (lttng_strncpy(send_session->session_name,
863 session->session_name,
864 sizeof(send_session->session_name))) {
865 ret = -1;
866 goto break_loop;
867 }
868 if (lttng_strncpy(send_session->hostname, session->hostname,
869 sizeof(send_session->hostname))) {
870 ret = -1;
871 goto break_loop;
872 }
873 send_session->id = htobe64(session->id);
874 send_session->live_timer = htobe32(session->live_timer);
875 if (session->viewer_attached) {
876 send_session->clients = htobe32(1);
877 } else {
878 send_session->clients = htobe32(0);
879 }
880 send_session->streams = htobe32(session->stream_count);
881 count++;
882 next_session:
883 pthread_mutex_unlock(&session->lock);
884 continue;
885 break_loop:
886 pthread_mutex_unlock(&session->lock);
887 break;
888 }
889 rcu_read_unlock();
890 if (ret < 0) {
891 goto end_free;
892 }
893
894 session_list.sessions_count = htobe32(count);
895
896 health_code_update();
897
898 ret = send_response(conn->sock, &session_list, sizeof(session_list));
899 if (ret < 0) {
900 goto end_free;
901 }
902
903 health_code_update();
904
905 ret = send_response(conn->sock, send_session_buf,
906 count * sizeof(*send_session_buf));
907 if (ret < 0) {
908 goto end_free;
909 }
910 health_code_update();
911
912 ret = 0;
913 end_free:
914 free(send_session_buf);
915 return ret;
916 }
917
918 /*
919 * Send the viewer the list of current streams.
920 */
921 static
922 int viewer_get_new_streams(struct relay_connection *conn)
923 {
924 int ret, send_streams = 0;
925 uint32_t nb_created = 0, nb_unsent = 0, nb_streams = 0, nb_total = 0;
926 struct lttng_viewer_new_streams_request request;
927 struct lttng_viewer_new_streams_response response;
928 struct relay_session *session = NULL;
929 uint64_t session_id;
930 bool closed = false;
931
932 assert(conn);
933
934 DBG("Get new streams received");
935
936 health_code_update();
937
938 /* Receive the request from the connected client. */
939 ret = recv_request(conn->sock, &request, sizeof(request));
940 if (ret < 0) {
941 goto error;
942 }
943 session_id = be64toh(request.session_id);
944
945 health_code_update();
946
947 memset(&response, 0, sizeof(response));
948
949 session = session_get_by_id(session_id);
950 if (!session) {
951 DBG("Relay session %" PRIu64 " not found", session_id);
952 response.status = htobe32(LTTNG_VIEWER_NEW_STREAMS_ERR);
953 goto send_reply;
954 }
955
956 if (!viewer_session_is_attached(conn->viewer_session, session)) {
957 send_streams = 0;
958 response.status = htobe32(LTTNG_VIEWER_NEW_STREAMS_ERR);
959 goto send_reply;
960 }
961
962 send_streams = 1;
963 response.status = htobe32(LTTNG_VIEWER_NEW_STREAMS_OK);
964
965 pthread_mutex_lock(&session->lock);
966 if (!conn->viewer_session->current_trace_chunk &&
967 session->current_trace_chunk) {
968 ret = viewer_session_set_trace_chunk(conn->viewer_session,
969 session->current_trace_chunk);
970 if (ret) {
971 goto error_unlock_session;
972 }
973 }
974 ret = make_viewer_streams(session,
975 conn->viewer_session->current_trace_chunk,
976 LTTNG_VIEWER_SEEK_LAST, &nb_total, &nb_unsent,
977 &nb_created, &closed);
978 if (ret < 0) {
979 goto error_unlock_session;
980 }
981 pthread_mutex_unlock(&session->lock);
982
983 /* Only send back the newly created streams with the unsent ones. */
984 nb_streams = nb_created + nb_unsent;
985 response.streams_count = htobe32(nb_streams);
986
987 /*
988 * If the session is closed, HUP when there are no more streams
989 * with data.
990 */
991 if (closed && nb_total == 0) {
992 send_streams = 0;
993 response.streams_count = 0;
994 response.status = htobe32(LTTNG_VIEWER_NEW_STREAMS_HUP);
995 goto send_reply;
996 }
997
998 send_reply:
999 health_code_update();
1000 ret = send_response(conn->sock, &response, sizeof(response));
1001 if (ret < 0) {
1002 goto end_put_session;
1003 }
1004 health_code_update();
1005
1006 /*
1007 * Unknown or empty session, just return gracefully, the viewer
1008 * knows what is happening.
1009 */
1010 if (!send_streams || !nb_streams) {
1011 ret = 0;
1012 goto end_put_session;
1013 }
1014
1015 /*
1016 * Send stream and *DON'T* ignore the sent flag so every viewer
1017 * streams that were not sent from that point will be sent to
1018 * the viewer.
1019 */
1020 ret = send_viewer_streams(conn->sock, session_id, 0);
1021 if (ret < 0) {
1022 goto end_put_session;
1023 }
1024
1025 end_put_session:
1026 if (session) {
1027 session_put(session);
1028 }
1029 error:
1030 return ret;
1031 error_unlock_session:
1032 pthread_mutex_unlock(&session->lock);
1033 session_put(session);
1034 return ret;
1035 }
1036
1037 /*
1038 * Send the viewer the list of current sessions.
1039 */
1040 static
1041 int viewer_attach_session(struct relay_connection *conn)
1042 {
1043 int send_streams = 0;
1044 ssize_t ret;
1045 uint32_t nb_streams = 0;
1046 enum lttng_viewer_seek seek_type;
1047 struct lttng_viewer_attach_session_request request;
1048 struct lttng_viewer_attach_session_response response;
1049 struct relay_session *session = NULL;
1050 bool closed = false;
1051 uint64_t session_id;
1052
1053 assert(conn);
1054
1055 health_code_update();
1056
1057 /* Receive the request from the connected client. */
1058 ret = recv_request(conn->sock, &request, sizeof(request));
1059 if (ret < 0) {
1060 goto error;
1061 }
1062
1063 session_id = be64toh(request.session_id);
1064 health_code_update();
1065
1066 memset(&response, 0, sizeof(response));
1067
1068 if (!conn->viewer_session) {
1069 DBG("Client trying to attach before creating a live viewer session");
1070 response.status = htobe32(LTTNG_VIEWER_ATTACH_NO_SESSION);
1071 goto send_reply;
1072 }
1073
1074 session = session_get_by_id(session_id);
1075 if (!session) {
1076 DBG("Relay session %" PRIu64 " not found", session_id);
1077 response.status = htobe32(LTTNG_VIEWER_ATTACH_UNK);
1078 goto send_reply;
1079 }
1080 DBG("Attach session ID %" PRIu64 " received", session_id);
1081
1082 pthread_mutex_lock(&session->lock);
1083 if (session->live_timer == 0) {
1084 DBG("Not live session");
1085 response.status = htobe32(LTTNG_VIEWER_ATTACH_NOT_LIVE);
1086 goto send_reply;
1087 }
1088
1089 send_streams = 1;
1090 ret = viewer_session_attach(conn->viewer_session, session);
1091 if (ret) {
1092 DBG("Already a viewer attached");
1093 response.status = htobe32(LTTNG_VIEWER_ATTACH_ALREADY);
1094 goto send_reply;
1095 }
1096
1097 switch (be32toh(request.seek)) {
1098 case LTTNG_VIEWER_SEEK_BEGINNING:
1099 case LTTNG_VIEWER_SEEK_LAST:
1100 response.status = htobe32(LTTNG_VIEWER_ATTACH_OK);
1101 seek_type = be32toh(request.seek);
1102 break;
1103 default:
1104 ERR("Wrong seek parameter");
1105 response.status = htobe32(LTTNG_VIEWER_ATTACH_SEEK_ERR);
1106 send_streams = 0;
1107 goto send_reply;
1108 }
1109
1110 if (!conn->viewer_session->current_trace_chunk &&
1111 session->current_trace_chunk) {
1112 ret = viewer_session_set_trace_chunk(conn->viewer_session,
1113 session->current_trace_chunk);
1114 if (ret) {
1115 goto end_put_session;
1116 }
1117 }
1118 ret = make_viewer_streams(session,
1119 conn->viewer_session->current_trace_chunk, seek_type,
1120 &nb_streams, NULL, NULL, &closed);
1121 if (ret < 0) {
1122 goto end_put_session;
1123 }
1124 pthread_mutex_unlock(&session->lock);
1125 session_put(session);
1126 session = NULL;
1127
1128 response.streams_count = htobe32(nb_streams);
1129 /*
1130 * If the session is closed when the viewer is attaching, it
1131 * means some of the streams may have been concurrently removed,
1132 * so we don't allow the viewer to attach, even if there are
1133 * streams available.
1134 */
1135 if (closed) {
1136 send_streams = 0;
1137 response.streams_count = 0;
1138 response.status = htobe32(LTTNG_VIEWER_ATTACH_UNK);
1139 goto send_reply;
1140 }
1141
1142 send_reply:
1143 health_code_update();
1144 ret = send_response(conn->sock, &response, sizeof(response));
1145 if (ret < 0) {
1146 goto end_put_session;
1147 }
1148 health_code_update();
1149
1150 /*
1151 * Unknown or empty session, just return gracefully, the viewer
1152 * knows what is happening.
1153 */
1154 if (!send_streams || !nb_streams) {
1155 ret = 0;
1156 goto end_put_session;
1157 }
1158
1159 /* Send stream and ignore the sent flag. */
1160 ret = send_viewer_streams(conn->sock, session_id, 1);
1161 if (ret < 0) {
1162 goto end_put_session;
1163 }
1164
1165 end_put_session:
1166 if (session) {
1167 pthread_mutex_unlock(&session->lock);
1168 session_put(session);
1169 }
1170 error:
1171 return ret;
1172 }
1173
1174 /*
1175 * Open the index file if needed for the given vstream.
1176 *
1177 * If an index file is successfully opened, the vstream will set it as its
1178 * current index file.
1179 *
1180 * Return 0 on success, a negative value on error (-ENOENT if not ready yet).
1181 *
1182 * Called with rstream lock held.
1183 */
1184 static int try_open_index(struct relay_viewer_stream *vstream,
1185 struct relay_stream *rstream)
1186 {
1187 int ret = 0;
1188 const uint32_t connection_major = rstream->trace->session->major;
1189 const uint32_t connection_minor = rstream->trace->session->minor;
1190
1191 if (vstream->index_file) {
1192 goto end;
1193 }
1194
1195 /*
1196 * First time, we open the index file and at least one index is ready.
1197 */
1198 if (rstream->index_received_seqcount == 0) {
1199 ret = -ENOENT;
1200 goto end;
1201 }
1202 vstream->index_file = lttng_index_file_create_from_trace_chunk_read_only(
1203 vstream->stream_file.trace_chunk, rstream->path_name,
1204 rstream->channel_name, rstream->tracefile_size,
1205 vstream->current_tracefile_id,
1206 lttng_to_index_major(connection_major, connection_minor),
1207 lttng_to_index_minor(connection_major, connection_minor));
1208 if (!vstream->index_file) {
1209 ret = -1;
1210 }
1211
1212 end:
1213 return ret;
1214 }
1215
1216 /*
1217 * Check the status of the index for the given stream. This function
1218 * updates the index structure if needed and can put (close) the vstream
1219 * in the HUP situation.
1220 *
1221 * Return 0 means that we can proceed with the index. A value of 1 means
1222 * that the index has been updated and is ready to be sent to the
1223 * client. A negative value indicates an error that can't be handled.
1224 *
1225 * Called with rstream lock held.
1226 */
1227 static int check_index_status(struct relay_viewer_stream *vstream,
1228 struct relay_stream *rstream, struct ctf_trace *trace,
1229 struct lttng_viewer_index *index)
1230 {
1231 int ret;
1232
1233 if ((trace->session->connection_closed || rstream->closed)
1234 && rstream->index_received_seqcount
1235 == vstream->index_sent_seqcount) {
1236 /*
1237 * Last index sent and session connection or relay
1238 * stream are closed.
1239 */
1240 index->status = htobe32(LTTNG_VIEWER_INDEX_HUP);
1241 goto hup;
1242 } else if (rstream->beacon_ts_end != -1ULL &&
1243 rstream->index_received_seqcount
1244 == vstream->index_sent_seqcount) {
1245 /*
1246 * We've received a synchronization beacon and the last index
1247 * available has been sent, the index for now is inactive.
1248 *
1249 * In this case, we have received a beacon which allows us to
1250 * inform the client of a time interval during which we can
1251 * guarantee that there are no events to read (and never will
1252 * be).
1253 */
1254 index->status = htobe32(LTTNG_VIEWER_INDEX_INACTIVE);
1255 index->timestamp_end = htobe64(rstream->beacon_ts_end);
1256 index->stream_id = htobe64(rstream->ctf_stream_id);
1257 goto index_ready;
1258 } else if (rstream->index_received_seqcount
1259 == vstream->index_sent_seqcount) {
1260 /*
1261 * This checks whether received == sent seqcount. In
1262 * this case, we have not received a beacon. Therefore,
1263 * we can only ask the client to retry later.
1264 */
1265 index->status = htobe32(LTTNG_VIEWER_INDEX_RETRY);
1266 goto index_ready;
1267 } else if (!tracefile_array_seq_in_file(rstream->tfa,
1268 vstream->current_tracefile_id,
1269 vstream->index_sent_seqcount)) {
1270 /*
1271 * The next index we want to send cannot be read either
1272 * because we need to perform a rotation, or due to
1273 * the producer having overwritten its trace file.
1274 */
1275 DBG("Viewer stream %" PRIu64 " rotation",
1276 vstream->stream->stream_handle);
1277 ret = viewer_stream_rotate(vstream);
1278 if (ret < 0) {
1279 goto end;
1280 } else if (ret == 1) {
1281 /* EOF across entire stream. */
1282 index->status = htobe32(LTTNG_VIEWER_INDEX_HUP);
1283 goto hup;
1284 }
1285 /*
1286 * If we have been pushed due to overwrite, it
1287 * necessarily means there is data that can be read in
1288 * the stream. If we rotated because we reached the end
1289 * of a tracefile, it means the following tracefile
1290 * needs to contain at least one index, else we would
1291 * have already returned LTTNG_VIEWER_INDEX_RETRY to the
1292 * viewer. The updated index_sent_seqcount needs to
1293 * point to a readable index entry now.
1294 *
1295 * In the case where we "rotate" on a single file, we
1296 * can end up in a case where the requested index is
1297 * still unavailable.
1298 */
1299 if (rstream->tracefile_count == 1 &&
1300 !tracefile_array_seq_in_file(
1301 rstream->tfa,
1302 vstream->current_tracefile_id,
1303 vstream->index_sent_seqcount)) {
1304 index->status = htobe32(LTTNG_VIEWER_INDEX_RETRY);
1305 goto index_ready;
1306 }
1307 assert(tracefile_array_seq_in_file(rstream->tfa,
1308 vstream->current_tracefile_id,
1309 vstream->index_sent_seqcount));
1310 }
1311 /* ret == 0 means successful so we continue. */
1312 ret = 0;
1313 end:
1314 return ret;
1315
1316 hup:
1317 viewer_stream_put(vstream);
1318 index_ready:
1319 return 1;
1320 }
1321
1322 /*
1323 * Send the next index for a stream.
1324 *
1325 * Return 0 on success or else a negative value.
1326 */
1327 static
1328 int viewer_get_next_index(struct relay_connection *conn)
1329 {
1330 int ret;
1331 struct lttng_viewer_get_next_index request_index;
1332 struct lttng_viewer_index viewer_index;
1333 struct ctf_packet_index packet_index;
1334 struct relay_viewer_stream *vstream = NULL;
1335 struct relay_stream *rstream = NULL;
1336 struct ctf_trace *ctf_trace = NULL;
1337 struct relay_viewer_stream *metadata_viewer_stream = NULL;
1338
1339 assert(conn);
1340
1341 DBG("Viewer get next index");
1342
1343 memset(&viewer_index, 0, sizeof(viewer_index));
1344 health_code_update();
1345
1346 ret = recv_request(conn->sock, &request_index, sizeof(request_index));
1347 if (ret < 0) {
1348 goto end;
1349 }
1350 health_code_update();
1351
1352 vstream = viewer_stream_get_by_id(be64toh(request_index.stream_id));
1353 if (!vstream) {
1354 DBG("Client requested index of unknown stream id %" PRIu64,
1355 (uint64_t) be64toh(request_index.stream_id));
1356 viewer_index.status = htobe32(LTTNG_VIEWER_INDEX_ERR);
1357 goto send_reply;
1358 }
1359
1360 /* Use back. ref. Protected by refcounts. */
1361 rstream = vstream->stream;
1362 ctf_trace = rstream->trace;
1363
1364 /* metadata_viewer_stream may be NULL. */
1365 metadata_viewer_stream =
1366 ctf_trace_get_viewer_metadata_stream(ctf_trace);
1367
1368 pthread_mutex_lock(&rstream->lock);
1369
1370 /*
1371 * The viewer should not ask for index on metadata stream.
1372 */
1373 if (rstream->is_metadata) {
1374 viewer_index.status = htobe32(LTTNG_VIEWER_INDEX_HUP);
1375 goto send_reply;
1376 }
1377
1378 /* Try to open an index if one is needed for that stream. */
1379 ret = try_open_index(vstream, rstream);
1380 if (ret < 0) {
1381 if (ret == -ENOENT) {
1382 /*
1383 * The index is created only when the first data
1384 * packet arrives, it might not be ready at the
1385 * beginning of the session
1386 */
1387 viewer_index.status = htobe32(LTTNG_VIEWER_INDEX_RETRY);
1388 } else {
1389 /* Unhandled error. */
1390 viewer_index.status = htobe32(LTTNG_VIEWER_INDEX_ERR);
1391 }
1392 goto send_reply;
1393 }
1394
1395 ret = check_index_status(vstream, rstream, ctf_trace, &viewer_index);
1396 if (ret < 0) {
1397 goto error_put;
1398 } else if (ret == 1) {
1399 /*
1400 * We have no index to send and check_index_status has populated
1401 * viewer_index's status.
1402 */
1403 goto send_reply;
1404 }
1405 /* At this point, ret is 0 thus we will be able to read the index. */
1406 assert(!ret);
1407
1408 /*
1409 * vstream->stream_fd may be NULL if it has been closed by
1410 * tracefile rotation, or if we are at the beginning of the
1411 * stream. We open the data stream file here to protect against
1412 * overwrite caused by tracefile rotation (in association with
1413 * unlink performed before overwrite).
1414 */
1415 if (!vstream->stream_file.fd) {
1416 int fd;
1417 char file_path[LTTNG_PATH_MAX];
1418 enum lttng_trace_chunk_status status;
1419
1420 ret = utils_stream_file_path(rstream->path_name,
1421 rstream->channel_name, rstream->tracefile_size,
1422 vstream->current_tracefile_id, NULL, file_path,
1423 sizeof(file_path));
1424 if (ret < 0) {
1425 goto error_put;
1426 }
1427
1428 status = lttng_trace_chunk_open_file(
1429 vstream->stream_file.trace_chunk,
1430 file_path, O_RDONLY, 0, &fd);
1431 if (status != LTTNG_TRACE_CHUNK_STATUS_OK) {
1432 PERROR("Failed to open trace file for viewer stream");
1433 goto error_put;
1434 }
1435 vstream->stream_file.fd = stream_fd_create(fd);
1436 if (!vstream->stream_file.fd) {
1437 if (close(fd)) {
1438 PERROR("Failed to close viewer stream file");
1439 }
1440 goto error_put;
1441 }
1442 }
1443
1444 ret = check_new_streams(conn);
1445 if (ret < 0) {
1446 viewer_index.status = htobe32(LTTNG_VIEWER_INDEX_ERR);
1447 goto send_reply;
1448 } else if (ret == 1) {
1449 viewer_index.flags |= LTTNG_VIEWER_FLAG_NEW_STREAM;
1450 }
1451
1452 ret = lttng_index_file_read(vstream->index_file, &packet_index);
1453 if (ret) {
1454 ERR("Relay error reading index file %d",
1455 vstream->index_file->fd);
1456 viewer_index.status = htobe32(LTTNG_VIEWER_INDEX_ERR);
1457 goto send_reply;
1458 } else {
1459 viewer_index.status = htobe32(LTTNG_VIEWER_INDEX_OK);
1460 vstream->index_sent_seqcount++;
1461 }
1462
1463 /*
1464 * Indexes are stored in big endian, no need to switch before sending.
1465 */
1466 DBG("Sending viewer index for stream %" PRIu64 " offset %" PRIu64,
1467 rstream->stream_handle,
1468 (uint64_t) be64toh(packet_index.offset));
1469 viewer_index.offset = packet_index.offset;
1470 viewer_index.packet_size = packet_index.packet_size;
1471 viewer_index.content_size = packet_index.content_size;
1472 viewer_index.timestamp_begin = packet_index.timestamp_begin;
1473 viewer_index.timestamp_end = packet_index.timestamp_end;
1474 viewer_index.events_discarded = packet_index.events_discarded;
1475 viewer_index.stream_id = packet_index.stream_id;
1476
1477 send_reply:
1478 if (rstream) {
1479 pthread_mutex_unlock(&rstream->lock);
1480 }
1481
1482 if (metadata_viewer_stream) {
1483 pthread_mutex_lock(&metadata_viewer_stream->stream->lock);
1484 DBG("get next index metadata check: recv %" PRIu64
1485 " sent %" PRIu64,
1486 metadata_viewer_stream->stream->metadata_received,
1487 metadata_viewer_stream->metadata_sent);
1488 if (!metadata_viewer_stream->stream->metadata_received ||
1489 metadata_viewer_stream->stream->metadata_received >
1490 metadata_viewer_stream->metadata_sent) {
1491 viewer_index.flags |= LTTNG_VIEWER_FLAG_NEW_METADATA;
1492 }
1493 pthread_mutex_unlock(&metadata_viewer_stream->stream->lock);
1494 }
1495
1496 viewer_index.flags = htobe32(viewer_index.flags);
1497 health_code_update();
1498
1499 ret = send_response(conn->sock, &viewer_index, sizeof(viewer_index));
1500 if (ret < 0) {
1501 goto end;
1502 }
1503 health_code_update();
1504
1505 if (vstream) {
1506 DBG("Index %" PRIu64 " for stream %" PRIu64 " sent",
1507 vstream->index_sent_seqcount,
1508 vstream->stream->stream_handle);
1509 }
1510 end:
1511 if (metadata_viewer_stream) {
1512 viewer_stream_put(metadata_viewer_stream);
1513 }
1514 if (vstream) {
1515 viewer_stream_put(vstream);
1516 }
1517 return ret;
1518
1519 error_put:
1520 pthread_mutex_unlock(&rstream->lock);
1521 if (metadata_viewer_stream) {
1522 viewer_stream_put(metadata_viewer_stream);
1523 }
1524 viewer_stream_put(vstream);
1525 return ret;
1526 }
1527
1528 /*
1529 * Send the next index for a stream
1530 *
1531 * Return 0 on success or else a negative value.
1532 */
1533 static
1534 int viewer_get_packet(struct relay_connection *conn)
1535 {
1536 int ret;
1537 off_t lseek_ret;
1538 char *reply = NULL;
1539 struct lttng_viewer_get_packet get_packet_info;
1540 struct lttng_viewer_trace_packet reply_header;
1541 struct relay_viewer_stream *vstream = NULL;
1542 uint32_t reply_size = sizeof(reply_header);
1543 uint32_t packet_data_len = 0;
1544 ssize_t read_len;
1545
1546 DBG2("Relay get data packet");
1547
1548 health_code_update();
1549
1550 ret = recv_request(conn->sock, &get_packet_info,
1551 sizeof(get_packet_info));
1552 if (ret < 0) {
1553 goto end;
1554 }
1555 health_code_update();
1556
1557 /* From this point on, the error label can be reached. */
1558 memset(&reply_header, 0, sizeof(reply_header));
1559
1560 vstream = viewer_stream_get_by_id(be64toh(get_packet_info.stream_id));
1561 if (!vstream) {
1562 DBG("Client requested packet of unknown stream id %" PRIu64,
1563 (uint64_t) be64toh(get_packet_info.stream_id));
1564 reply_header.status = htobe32(LTTNG_VIEWER_GET_PACKET_ERR);
1565 goto send_reply_nolock;
1566 } else {
1567 packet_data_len = be32toh(get_packet_info.len);
1568 reply_size += packet_data_len;
1569 }
1570
1571 reply = zmalloc(reply_size);
1572 if (!reply) {
1573 PERROR("packet reply zmalloc");
1574 reply_size = sizeof(reply_header);
1575 goto error;
1576 }
1577
1578 pthread_mutex_lock(&vstream->stream->lock);
1579 lseek_ret = lseek(vstream->stream_file.fd->fd,
1580 be64toh(get_packet_info.offset), SEEK_SET);
1581 if (lseek_ret < 0) {
1582 PERROR("lseek fd %d to offset %" PRIu64,
1583 vstream->stream_file.fd->fd,
1584 (uint64_t) be64toh(get_packet_info.offset));
1585 goto error;
1586 }
1587 read_len = lttng_read(vstream->stream_file.fd->fd,
1588 reply + sizeof(reply_header), packet_data_len);
1589 if (read_len < packet_data_len) {
1590 PERROR("Relay reading trace file, fd: %d, offset: %" PRIu64,
1591 vstream->stream_file.fd->fd,
1592 (uint64_t) be64toh(get_packet_info.offset));
1593 goto error;
1594 }
1595 reply_header.status = htobe32(LTTNG_VIEWER_GET_PACKET_OK);
1596 reply_header.len = htobe32(packet_data_len);
1597 goto send_reply;
1598
1599 error:
1600 reply_header.status = htobe32(LTTNG_VIEWER_GET_PACKET_ERR);
1601
1602 send_reply:
1603 if (vstream) {
1604 pthread_mutex_unlock(&vstream->stream->lock);
1605 }
1606 send_reply_nolock:
1607
1608 health_code_update();
1609
1610 if (reply) {
1611 memcpy(reply, &reply_header, sizeof(reply_header));
1612 ret = send_response(conn->sock, reply, reply_size);
1613 } else {
1614 /* No reply to send. */
1615 ret = send_response(conn->sock, &reply_header,
1616 reply_size);
1617 }
1618
1619 health_code_update();
1620 if (ret < 0) {
1621 PERROR("sendmsg of packet data failed");
1622 goto end_free;
1623 }
1624
1625 DBG("Sent %u bytes for stream %" PRIu64, reply_size,
1626 (uint64_t) be64toh(get_packet_info.stream_id));
1627
1628 end_free:
1629 free(reply);
1630 end:
1631 if (vstream) {
1632 viewer_stream_put(vstream);
1633 }
1634 return ret;
1635 }
1636
1637 /*
1638 * Send the session's metadata
1639 *
1640 * Return 0 on success else a negative value.
1641 */
1642 static
1643 int viewer_get_metadata(struct relay_connection *conn)
1644 {
1645 int ret = 0;
1646 ssize_t read_len;
1647 uint64_t len = 0;
1648 char *data = NULL;
1649 struct lttng_viewer_get_metadata request;
1650 struct lttng_viewer_metadata_packet reply;
1651 struct relay_viewer_stream *vstream = NULL;
1652
1653 assert(conn);
1654
1655 DBG("Relay get metadata");
1656
1657 health_code_update();
1658
1659 ret = recv_request(conn->sock, &request, sizeof(request));
1660 if (ret < 0) {
1661 goto end;
1662 }
1663 health_code_update();
1664
1665 memset(&reply, 0, sizeof(reply));
1666
1667 vstream = viewer_stream_get_by_id(be64toh(request.stream_id));
1668 if (!vstream) {
1669 /*
1670 * The metadata stream can be closed by a CLOSE command
1671 * just before we attach. It can also be closed by
1672 * per-pid tracing during tracing. Therefore, it is
1673 * possible that we cannot find this viewer stream.
1674 * Reply back to the client with an error if we cannot
1675 * find it.
1676 */
1677 DBG("Client requested metadata of unknown stream id %" PRIu64,
1678 (uint64_t) be64toh(request.stream_id));
1679 reply.status = htobe32(LTTNG_VIEWER_METADATA_ERR);
1680 goto send_reply;
1681 }
1682 pthread_mutex_lock(&vstream->stream->lock);
1683 if (!vstream->stream->is_metadata) {
1684 ERR("Invalid metadata stream");
1685 goto error;
1686 }
1687
1688 assert(vstream->metadata_sent <= vstream->stream->metadata_received);
1689
1690 len = vstream->stream->metadata_received - vstream->metadata_sent;
1691 if (len == 0) {
1692 reply.status = htobe32(LTTNG_VIEWER_NO_NEW_METADATA);
1693 goto send_reply;
1694 }
1695
1696 /* first time, we open the metadata file */
1697 if (!vstream->stream_file.fd) {
1698 int fd;
1699 char file_path[LTTNG_PATH_MAX];
1700 enum lttng_trace_chunk_status status;
1701 struct relay_stream *rstream = vstream->stream;
1702
1703 ret = utils_stream_file_path(rstream->path_name,
1704 rstream->channel_name, rstream->tracefile_size,
1705 vstream->current_tracefile_id, NULL, file_path,
1706 sizeof(file_path));
1707 if (ret < 0) {
1708 goto error;
1709 }
1710
1711 status = lttng_trace_chunk_open_file(
1712 vstream->stream_file.trace_chunk,
1713 file_path, O_RDONLY, 0, &fd);
1714 if (status != LTTNG_TRACE_CHUNK_STATUS_OK) {
1715 PERROR("Failed to open metadata file for viewer stream");
1716 goto error;
1717 }
1718 vstream->stream_file.fd = stream_fd_create(fd);
1719 if (!vstream->stream_file.fd) {
1720 if (close(fd)) {
1721 PERROR("Failed to close viewer metadata file");
1722 }
1723 goto error;
1724 }
1725 }
1726
1727 reply.len = htobe64(len);
1728 data = zmalloc(len);
1729 if (!data) {
1730 PERROR("viewer metadata zmalloc");
1731 goto error;
1732 }
1733
1734 read_len = lttng_read(vstream->stream_file.fd->fd, data, len);
1735 if (read_len < len) {
1736 PERROR("Relay reading metadata file");
1737 goto error;
1738 }
1739 vstream->metadata_sent += read_len;
1740 if (vstream->metadata_sent == vstream->stream->metadata_received
1741 && vstream->stream->closed) {
1742 /* Release ownership for the viewer metadata stream. */
1743 viewer_stream_put(vstream);
1744 }
1745
1746 reply.status = htobe32(LTTNG_VIEWER_METADATA_OK);
1747
1748 goto send_reply;
1749
1750 error:
1751 reply.status = htobe32(LTTNG_VIEWER_METADATA_ERR);
1752
1753 send_reply:
1754 health_code_update();
1755 if (vstream) {
1756 pthread_mutex_unlock(&vstream->stream->lock);
1757 }
1758 ret = send_response(conn->sock, &reply, sizeof(reply));
1759 if (ret < 0) {
1760 goto end_free;
1761 }
1762 health_code_update();
1763
1764 if (len > 0) {
1765 ret = send_response(conn->sock, data, len);
1766 if (ret < 0) {
1767 goto end_free;
1768 }
1769 }
1770
1771 DBG("Sent %" PRIu64 " bytes of metadata for stream %" PRIu64, len,
1772 (uint64_t) be64toh(request.stream_id));
1773
1774 DBG("Metadata sent");
1775
1776 end_free:
1777 free(data);
1778 end:
1779 if (vstream) {
1780 viewer_stream_put(vstream);
1781 }
1782 return ret;
1783 }
1784
1785 /*
1786 * Create a viewer session.
1787 *
1788 * Return 0 on success or else a negative value.
1789 */
1790 static
1791 int viewer_create_session(struct relay_connection *conn)
1792 {
1793 int ret;
1794 struct lttng_viewer_create_session_response resp;
1795
1796 DBG("Viewer create session received");
1797
1798 memset(&resp, 0, sizeof(resp));
1799 resp.status = htobe32(LTTNG_VIEWER_CREATE_SESSION_OK);
1800 conn->viewer_session = viewer_session_create();
1801 if (!conn->viewer_session) {
1802 ERR("Allocation viewer session");
1803 resp.status = htobe32(LTTNG_VIEWER_CREATE_SESSION_ERR);
1804 goto send_reply;
1805 }
1806
1807 send_reply:
1808 health_code_update();
1809 ret = send_response(conn->sock, &resp, sizeof(resp));
1810 if (ret < 0) {
1811 goto end;
1812 }
1813 health_code_update();
1814 ret = 0;
1815
1816 end:
1817 return ret;
1818 }
1819
1820 /*
1821 * Detach a viewer session.
1822 *
1823 * Return 0 on success or else a negative value.
1824 */
1825 static
1826 int viewer_detach_session(struct relay_connection *conn)
1827 {
1828 int ret;
1829 struct lttng_viewer_detach_session_response response;
1830 struct lttng_viewer_detach_session_request request;
1831 struct relay_session *session = NULL;
1832 uint64_t viewer_session_to_close;
1833
1834 DBG("Viewer detach session received");
1835
1836 assert(conn);
1837
1838 health_code_update();
1839
1840 /* Receive the request from the connected client. */
1841 ret = recv_request(conn->sock, &request, sizeof(request));
1842 if (ret < 0) {
1843 goto end;
1844 }
1845 viewer_session_to_close = be64toh(request.session_id);
1846
1847 if (!conn->viewer_session) {
1848 DBG("Client trying to detach before creating a live viewer session");
1849 response.status = htobe32(LTTNG_VIEWER_DETACH_SESSION_ERR);
1850 goto send_reply;
1851 }
1852
1853 health_code_update();
1854
1855 memset(&response, 0, sizeof(response));
1856 DBG("Detaching from session ID %" PRIu64, viewer_session_to_close);
1857
1858 session = session_get_by_id(be64toh(request.session_id));
1859 if (!session) {
1860 DBG("Relay session %" PRIu64 " not found",
1861 (uint64_t) be64toh(request.session_id));
1862 response.status = htobe32(LTTNG_VIEWER_DETACH_SESSION_UNK);
1863 goto send_reply;
1864 }
1865
1866 ret = viewer_session_is_attached(conn->viewer_session, session);
1867 if (ret != 1) {
1868 DBG("Not attached to this session");
1869 response.status = htobe32(LTTNG_VIEWER_DETACH_SESSION_ERR);
1870 goto send_reply_put;
1871 }
1872
1873 viewer_session_close_one_session(conn->viewer_session, session);
1874 response.status = htobe32(LTTNG_VIEWER_DETACH_SESSION_OK);
1875 DBG("Session %" PRIu64 " detached.", viewer_session_to_close);
1876
1877 send_reply_put:
1878 session_put(session);
1879
1880 send_reply:
1881 health_code_update();
1882 ret = send_response(conn->sock, &response, sizeof(response));
1883 if (ret < 0) {
1884 goto end;
1885 }
1886 health_code_update();
1887 ret = 0;
1888
1889 end:
1890 return ret;
1891 }
1892
1893 /*
1894 * live_relay_unknown_command: send -1 if received unknown command
1895 */
1896 static
1897 void live_relay_unknown_command(struct relay_connection *conn)
1898 {
1899 struct lttcomm_relayd_generic_reply reply;
1900
1901 memset(&reply, 0, sizeof(reply));
1902 reply.ret_code = htobe32(LTTNG_ERR_UNK);
1903 (void) send_response(conn->sock, &reply, sizeof(reply));
1904 }
1905
1906 /*
1907 * Process the commands received on the control socket
1908 */
1909 static
1910 int process_control(struct lttng_viewer_cmd *recv_hdr,
1911 struct relay_connection *conn)
1912 {
1913 int ret = 0;
1914 uint32_t msg_value;
1915
1916 msg_value = be32toh(recv_hdr->cmd);
1917
1918 /*
1919 * Make sure we've done the version check before any command other then a
1920 * new client connection.
1921 */
1922 if (msg_value != LTTNG_VIEWER_CONNECT && !conn->version_check_done) {
1923 ERR("Viewer conn value %" PRIu32 " before version check", msg_value);
1924 ret = -1;
1925 goto end;
1926 }
1927
1928 switch (msg_value) {
1929 case LTTNG_VIEWER_CONNECT:
1930 ret = viewer_connect(conn);
1931 break;
1932 case LTTNG_VIEWER_LIST_SESSIONS:
1933 ret = viewer_list_sessions(conn);
1934 break;
1935 case LTTNG_VIEWER_ATTACH_SESSION:
1936 ret = viewer_attach_session(conn);
1937 break;
1938 case LTTNG_VIEWER_GET_NEXT_INDEX:
1939 ret = viewer_get_next_index(conn);
1940 break;
1941 case LTTNG_VIEWER_GET_PACKET:
1942 ret = viewer_get_packet(conn);
1943 break;
1944 case LTTNG_VIEWER_GET_METADATA:
1945 ret = viewer_get_metadata(conn);
1946 break;
1947 case LTTNG_VIEWER_GET_NEW_STREAMS:
1948 ret = viewer_get_new_streams(conn);
1949 break;
1950 case LTTNG_VIEWER_CREATE_SESSION:
1951 ret = viewer_create_session(conn);
1952 break;
1953 case LTTNG_VIEWER_DETACH_SESSION:
1954 ret = viewer_detach_session(conn);
1955 break;
1956 default:
1957 ERR("Received unknown viewer command (%u)",
1958 be32toh(recv_hdr->cmd));
1959 live_relay_unknown_command(conn);
1960 ret = -1;
1961 goto end;
1962 }
1963
1964 end:
1965 return ret;
1966 }
1967
1968 static
1969 void cleanup_connection_pollfd(struct lttng_poll_event *events, int pollfd)
1970 {
1971 int ret;
1972
1973 (void) lttng_poll_del(events, pollfd);
1974
1975 ret = close(pollfd);
1976 if (ret < 0) {
1977 ERR("Closing pollfd %d", pollfd);
1978 }
1979 }
1980
1981 /*
1982 * This thread does the actual work
1983 */
1984 static
1985 void *thread_worker(void *data)
1986 {
1987 int ret, err = -1;
1988 uint32_t nb_fd;
1989 struct lttng_poll_event events;
1990 struct lttng_ht *viewer_connections_ht;
1991 struct lttng_ht_iter iter;
1992 struct lttng_viewer_cmd recv_hdr;
1993 struct relay_connection *destroy_conn;
1994
1995 DBG("[thread] Live viewer relay worker started");
1996
1997 rcu_register_thread();
1998
1999 health_register(health_relayd, HEALTH_RELAYD_TYPE_LIVE_WORKER);
2000
2001 if (testpoint(relayd_thread_live_worker)) {
2002 goto error_testpoint;
2003 }
2004
2005 /* table of connections indexed on socket */
2006 viewer_connections_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
2007 if (!viewer_connections_ht) {
2008 goto viewer_connections_ht_error;
2009 }
2010
2011 ret = create_thread_poll_set(&events, 2);
2012 if (ret < 0) {
2013 goto error_poll_create;
2014 }
2015
2016 ret = lttng_poll_add(&events, live_conn_pipe[0], LPOLLIN | LPOLLRDHUP);
2017 if (ret < 0) {
2018 goto error;
2019 }
2020
2021 restart:
2022 while (1) {
2023 int i;
2024
2025 health_code_update();
2026
2027 /* Infinite blocking call, waiting for transmission */
2028 DBG3("Relayd live viewer worker thread polling...");
2029 health_poll_entry();
2030 ret = lttng_poll_wait(&events, -1);
2031 health_poll_exit();
2032 if (ret < 0) {
2033 /*
2034 * Restart interrupted system call.
2035 */
2036 if (errno == EINTR) {
2037 goto restart;
2038 }
2039 goto error;
2040 }
2041
2042 nb_fd = ret;
2043
2044 /*
2045 * Process control. The control connection is prioritised so we don't
2046 * starve it with high throughput tracing data on the data
2047 * connection.
2048 */
2049 for (i = 0; i < nb_fd; i++) {
2050 /* Fetch once the poll data */
2051 uint32_t revents = LTTNG_POLL_GETEV(&events, i);
2052 int pollfd = LTTNG_POLL_GETFD(&events, i);
2053
2054 health_code_update();
2055
2056 /* Thread quit pipe has been closed. Killing thread. */
2057 ret = check_thread_quit_pipe(pollfd, revents);
2058 if (ret) {
2059 err = 0;
2060 goto exit;
2061 }
2062
2063 /* Inspect the relay conn pipe for new connection. */
2064 if (pollfd == live_conn_pipe[0]) {
2065 if (revents & LPOLLIN) {
2066 struct relay_connection *conn;
2067
2068 ret = lttng_read(live_conn_pipe[0],
2069 &conn, sizeof(conn));
2070 if (ret < 0) {
2071 goto error;
2072 }
2073 ret = lttng_poll_add(&events,
2074 conn->sock->fd,
2075 LPOLLIN | LPOLLRDHUP);
2076 if (ret) {
2077 ERR("Failed to add new live connection file descriptor to poll set");
2078 goto error;
2079 }
2080 connection_ht_add(viewer_connections_ht, conn);
2081 DBG("Connection socket %d added to poll", conn->sock->fd);
2082 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
2083 ERR("Relay live pipe error");
2084 goto error;
2085 } else {
2086 ERR("Unexpected poll events %u for sock %d", revents, pollfd);
2087 goto error;
2088 }
2089 } else {
2090 /* Connection activity. */
2091 struct relay_connection *conn;
2092
2093 conn = connection_get_by_sock(viewer_connections_ht, pollfd);
2094 if (!conn) {
2095 continue;
2096 }
2097
2098 if (revents & LPOLLIN) {
2099 ret = conn->sock->ops->recvmsg(conn->sock, &recv_hdr,
2100 sizeof(recv_hdr), 0);
2101 if (ret <= 0) {
2102 /* Connection closed. */
2103 cleanup_connection_pollfd(&events, pollfd);
2104 /* Put "create" ownership reference. */
2105 connection_put(conn);
2106 DBG("Viewer control conn closed with %d", pollfd);
2107 } else {
2108 ret = process_control(&recv_hdr, conn);
2109 if (ret < 0) {
2110 /* Clear the session on error. */
2111 cleanup_connection_pollfd(&events, pollfd);
2112 /* Put "create" ownership reference. */
2113 connection_put(conn);
2114 DBG("Viewer connection closed with %d", pollfd);
2115 }
2116 }
2117 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
2118 cleanup_connection_pollfd(&events, pollfd);
2119 /* Put "create" ownership reference. */
2120 connection_put(conn);
2121 } else {
2122 ERR("Unexpected poll events %u for sock %d", revents, pollfd);
2123 connection_put(conn);
2124 goto error;
2125 }
2126 /* Put local "get_by_sock" reference. */
2127 connection_put(conn);
2128 }
2129 }
2130 }
2131
2132 exit:
2133 error:
2134 lttng_poll_clean(&events);
2135
2136 /* Cleanup remaining connection object. */
2137 rcu_read_lock();
2138 cds_lfht_for_each_entry(viewer_connections_ht->ht, &iter.iter,
2139 destroy_conn,
2140 sock_n.node) {
2141 health_code_update();
2142 connection_put(destroy_conn);
2143 }
2144 rcu_read_unlock();
2145 error_poll_create:
2146 lttng_ht_destroy(viewer_connections_ht);
2147 viewer_connections_ht_error:
2148 /* Close relay conn pipes */
2149 utils_close_pipe(live_conn_pipe);
2150 if (err) {
2151 DBG("Viewer worker thread exited with error");
2152 }
2153 DBG("Viewer worker thread cleanup complete");
2154 error_testpoint:
2155 if (err) {
2156 health_error();
2157 ERR("Health error occurred in %s", __func__);
2158 }
2159 health_unregister(health_relayd);
2160 if (lttng_relay_stop_threads()) {
2161 ERR("Error stopping threads");
2162 }
2163 rcu_unregister_thread();
2164 return NULL;
2165 }
2166
2167 /*
2168 * Create the relay command pipe to wake thread_manage_apps.
2169 * Closed in cleanup().
2170 */
2171 static int create_conn_pipe(void)
2172 {
2173 return utils_create_pipe_cloexec(live_conn_pipe);
2174 }
2175
2176 int relayd_live_join(void)
2177 {
2178 int ret, retval = 0;
2179 void *status;
2180
2181 ret = pthread_join(live_listener_thread, &status);
2182 if (ret) {
2183 errno = ret;
2184 PERROR("pthread_join live listener");
2185 retval = -1;
2186 }
2187
2188 ret = pthread_join(live_worker_thread, &status);
2189 if (ret) {
2190 errno = ret;
2191 PERROR("pthread_join live worker");
2192 retval = -1;
2193 }
2194
2195 ret = pthread_join(live_dispatcher_thread, &status);
2196 if (ret) {
2197 errno = ret;
2198 PERROR("pthread_join live dispatcher");
2199 retval = -1;
2200 }
2201
2202 cleanup_relayd_live();
2203
2204 return retval;
2205 }
2206
2207 /*
2208 * main
2209 */
2210 int relayd_live_create(struct lttng_uri *uri)
2211 {
2212 int ret = 0, retval = 0;
2213 void *status;
2214 int is_root;
2215
2216 if (!uri) {
2217 retval = -1;
2218 goto exit_init_data;
2219 }
2220 live_uri = uri;
2221
2222 /* Check if daemon is UID = 0 */
2223 is_root = !getuid();
2224
2225 if (!is_root) {
2226 if (live_uri->port < 1024) {
2227 ERR("Need to be root to use ports < 1024");
2228 retval = -1;
2229 goto exit_init_data;
2230 }
2231 }
2232
2233 /* Setup the thread apps communication pipe. */
2234 if (create_conn_pipe()) {
2235 retval = -1;
2236 goto exit_init_data;
2237 }
2238
2239 /* Init relay command queue. */
2240 cds_wfcq_init(&viewer_conn_queue.head, &viewer_conn_queue.tail);
2241
2242 /* Set up max poll set size */
2243 if (lttng_poll_set_max_size()) {
2244 retval = -1;
2245 goto exit_init_data;
2246 }
2247
2248 /* Setup the dispatcher thread */
2249 ret = pthread_create(&live_dispatcher_thread, default_pthread_attr(),
2250 thread_dispatcher, (void *) NULL);
2251 if (ret) {
2252 errno = ret;
2253 PERROR("pthread_create viewer dispatcher");
2254 retval = -1;
2255 goto exit_dispatcher_thread;
2256 }
2257
2258 /* Setup the worker thread */
2259 ret = pthread_create(&live_worker_thread, default_pthread_attr(),
2260 thread_worker, NULL);
2261 if (ret) {
2262 errno = ret;
2263 PERROR("pthread_create viewer worker");
2264 retval = -1;
2265 goto exit_worker_thread;
2266 }
2267
2268 /* Setup the listener thread */
2269 ret = pthread_create(&live_listener_thread, default_pthread_attr(),
2270 thread_listener, (void *) NULL);
2271 if (ret) {
2272 errno = ret;
2273 PERROR("pthread_create viewer listener");
2274 retval = -1;
2275 goto exit_listener_thread;
2276 }
2277
2278 /*
2279 * All OK, started all threads.
2280 */
2281 return retval;
2282
2283 /*
2284 * Join on the live_listener_thread should anything be added after
2285 * the live_listener thread's creation.
2286 */
2287
2288 exit_listener_thread:
2289
2290 ret = pthread_join(live_worker_thread, &status);
2291 if (ret) {
2292 errno = ret;
2293 PERROR("pthread_join live worker");
2294 retval = -1;
2295 }
2296 exit_worker_thread:
2297
2298 ret = pthread_join(live_dispatcher_thread, &status);
2299 if (ret) {
2300 errno = ret;
2301 PERROR("pthread_join live dispatcher");
2302 retval = -1;
2303 }
2304 exit_dispatcher_thread:
2305
2306 exit_init_data:
2307 cleanup_relayd_live();
2308
2309 return retval;
2310 }
This page took 0.140129 seconds and 4 git commands to generate.