9e1085f33615018237a4acd2abd434c2362c265b
[lttng-tools.git] / src / bin / lttng-relayd / live.c
1 /*
2 * Copyright (C) 2013 - Julien Desfossez <jdesfossez@efficios.com>
3 * David Goulet <dgoulet@efficios.com>
4 * 2015 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19
20 #define _LGPL_SOURCE
21 #include <getopt.h>
22 #include <grp.h>
23 #include <limits.h>
24 #include <pthread.h>
25 #include <signal.h>
26 #include <stdio.h>
27 #include <stdlib.h>
28 #include <string.h>
29 #include <sys/mman.h>
30 #include <sys/mount.h>
31 #include <sys/resource.h>
32 #include <sys/socket.h>
33 #include <sys/stat.h>
34 #include <sys/types.h>
35 #include <sys/wait.h>
36 #include <inttypes.h>
37 #include <urcu/futex.h>
38 #include <urcu/uatomic.h>
39 #include <urcu/rculist.h>
40 #include <unistd.h>
41 #include <fcntl.h>
42
43 #include <lttng/lttng.h>
44 #include <common/common.h>
45 #include <common/compat/poll.h>
46 #include <common/compat/socket.h>
47 #include <common/compat/endian.h>
48 #include <common/defaults.h>
49 #include <common/futex.h>
50 #include <common/index/index.h>
51 #include <common/sessiond-comm/sessiond-comm.h>
52 #include <common/sessiond-comm/inet.h>
53 #include <common/sessiond-comm/relayd.h>
54 #include <common/uri.h>
55 #include <common/utils.h>
56
57 #include "cmd.h"
58 #include "live.h"
59 #include "lttng-relayd.h"
60 #include "utils.h"
61 #include "health-relayd.h"
62 #include "testpoint.h"
63 #include "viewer-stream.h"
64 #include "stream.h"
65 #include "session.h"
66 #include "ctf-trace.h"
67 #include "connection.h"
68 #include "viewer-session.h"
69
70 #define SESSION_BUF_DEFAULT_COUNT 16
71
72 static struct lttng_uri *live_uri;
73
74 /*
75 * This pipe is used to inform the worker thread that a command is queued and
76 * ready to be processed.
77 */
78 static int live_conn_pipe[2] = { -1, -1 };
79
80 /* Shared between threads */
81 static int live_dispatch_thread_exit;
82
83 static pthread_t live_listener_thread;
84 static pthread_t live_dispatcher_thread;
85 static pthread_t live_worker_thread;
86
87 /*
88 * Relay command queue.
89 *
90 * The live_thread_listener and live_thread_dispatcher communicate with this
91 * queue.
92 */
93 static struct relay_conn_queue viewer_conn_queue;
94
95 static uint64_t last_relay_viewer_session_id;
96 static pthread_mutex_t last_relay_viewer_session_id_lock =
97 PTHREAD_MUTEX_INITIALIZER;
98
99 /*
100 * Cleanup the daemon
101 */
102 static
103 void cleanup_relayd_live(void)
104 {
105 DBG("Cleaning up");
106
107 free(live_uri);
108 }
109
110 /*
111 * Receive a request buffer using a given socket, destination allocated buffer
112 * of length size.
113 *
114 * Return the size of the received message or else a negative value on error
115 * with errno being set by recvmsg() syscall.
116 */
117 static
118 ssize_t recv_request(struct lttcomm_sock *sock, void *buf, size_t size)
119 {
120 ssize_t ret;
121
122 ret = sock->ops->recvmsg(sock, buf, size, 0);
123 if (ret < 0 || ret != size) {
124 if (ret == 0) {
125 /* Orderly shutdown. Not necessary to print an error. */
126 DBG("Socket %d did an orderly shutdown", sock->fd);
127 } else {
128 ERR("Relay failed to receive request.");
129 }
130 ret = -1;
131 }
132
133 return ret;
134 }
135
136 /*
137 * Send a response buffer using a given socket, source allocated buffer of
138 * length size.
139 *
140 * Return the size of the sent message or else a negative value on error with
141 * errno being set by sendmsg() syscall.
142 */
143 static
144 ssize_t send_response(struct lttcomm_sock *sock, void *buf, size_t size)
145 {
146 ssize_t ret;
147
148 ret = sock->ops->sendmsg(sock, buf, size, 0);
149 if (ret < 0) {
150 ERR("Relayd failed to send response.");
151 }
152
153 return ret;
154 }
155
156 /*
157 * Atomically check if new streams got added in one of the sessions attached
158 * and reset the flag to 0.
159 *
160 * Returns 1 if new streams got added, 0 if nothing changed, a negative value
161 * on error.
162 */
163 static
164 int check_new_streams(struct relay_connection *conn)
165 {
166 struct relay_session *session;
167 unsigned long current_val;
168 int ret = 0;
169
170 if (!conn->viewer_session) {
171 goto end;
172 }
173 rcu_read_lock();
174 cds_list_for_each_entry_rcu(session,
175 &conn->viewer_session->session_list,
176 viewer_session_node) {
177 if (!session_get(session)) {
178 continue;
179 }
180 current_val = uatomic_cmpxchg(&session->new_streams, 1, 0);
181 ret = current_val;
182 session_put(session);
183 if (ret == 1) {
184 goto end;
185 }
186 }
187 end:
188 rcu_read_unlock();
189 return ret;
190 }
191
192 /*
193 * Send viewer streams to the given socket. The ignore_sent_flag indicates if
194 * this function should ignore the sent flag or not.
195 *
196 * Return 0 on success or else a negative value.
197 */
198 static
199 ssize_t send_viewer_streams(struct lttcomm_sock *sock,
200 struct relay_session *session, unsigned int ignore_sent_flag)
201 {
202 ssize_t ret;
203 struct lttng_viewer_stream send_stream;
204 struct lttng_ht_iter iter;
205 struct relay_viewer_stream *vstream;
206
207 rcu_read_lock();
208
209 cds_lfht_for_each_entry(viewer_streams_ht->ht, &iter.iter, vstream,
210 stream_n.node) {
211 struct ctf_trace *ctf_trace;
212
213 health_code_update();
214
215 if (!viewer_stream_get(vstream)) {
216 continue;
217 }
218
219 pthread_mutex_lock(&vstream->stream->lock);
220 /* Ignore if not the same session. */
221 if (vstream->stream->trace->session->id != session->id ||
222 (!ignore_sent_flag && vstream->sent_flag)) {
223 pthread_mutex_unlock(&vstream->stream->lock);
224 viewer_stream_put(vstream);
225 continue;
226 }
227
228 ctf_trace = vstream->stream->trace;
229 send_stream.id = htobe64(vstream->stream->stream_handle);
230 send_stream.ctf_trace_id = htobe64(ctf_trace->id);
231 send_stream.metadata_flag = htobe32(
232 vstream->stream->is_metadata);
233 if (lttng_strncpy(send_stream.path_name, vstream->path_name,
234 sizeof(send_stream.path_name))) {
235 pthread_mutex_unlock(&vstream->stream->lock);
236 viewer_stream_put(vstream);
237 ret = -1; /* Error. */
238 goto end_unlock;
239 }
240 if (lttng_strncpy(send_stream.channel_name,
241 vstream->channel_name,
242 sizeof(send_stream.channel_name))) {
243 pthread_mutex_unlock(&vstream->stream->lock);
244 viewer_stream_put(vstream);
245 ret = -1; /* Error. */
246 goto end_unlock;
247 }
248
249 DBG("Sending stream %" PRIu64 " to viewer",
250 vstream->stream->stream_handle);
251 vstream->sent_flag = 1;
252 pthread_mutex_unlock(&vstream->stream->lock);
253
254 ret = send_response(sock, &send_stream, sizeof(send_stream));
255 viewer_stream_put(vstream);
256 if (ret < 0) {
257 goto end_unlock;
258 }
259 }
260
261 ret = 0;
262
263 end_unlock:
264 rcu_read_unlock();
265 return ret;
266 }
267
268 /*
269 * Create every viewer stream possible for the given session with the seek
270 * type. Three counters *can* be return which are in order the total amount of
271 * viewer stream of the session, the number of unsent stream and the number of
272 * stream created. Those counters can be NULL and thus will be ignored.
273 *
274 * Return 0 on success or else a negative value.
275 */
276 static
277 int make_viewer_streams(struct relay_session *session,
278 enum lttng_viewer_seek seek_t, uint32_t *nb_total, uint32_t *nb_unsent,
279 uint32_t *nb_created, bool *closed)
280 {
281 int ret;
282 struct lttng_ht_iter iter;
283 struct ctf_trace *ctf_trace;
284
285 assert(session);
286
287 /*
288 * Hold the session lock to ensure that we see either none or
289 * all initial streams for a session, but no intermediate state.
290 */
291 pthread_mutex_lock(&session->lock);
292
293 if (session->connection_closed) {
294 *closed = true;
295 }
296
297 /*
298 * Create viewer streams for relay streams that are ready to be
299 * used for a the given session id only.
300 */
301 rcu_read_lock();
302 cds_lfht_for_each_entry(session->ctf_traces_ht->ht, &iter.iter, ctf_trace,
303 node.node) {
304 struct relay_stream *stream;
305
306 health_code_update();
307
308 if (!ctf_trace_get(ctf_trace)) {
309 continue;
310 }
311
312 cds_list_for_each_entry_rcu(stream, &ctf_trace->stream_list, stream_node) {
313 struct relay_viewer_stream *vstream;
314
315 if (!stream_get(stream)) {
316 continue;
317 }
318 /*
319 * stream published is protected by the session lock.
320 */
321 if (!stream->published) {
322 goto next;
323 }
324 vstream = viewer_stream_get_by_id(stream->stream_handle);
325 if (!vstream) {
326 vstream = viewer_stream_create(stream, seek_t);
327 if (!vstream) {
328 ret = -1;
329 ctf_trace_put(ctf_trace);
330 stream_put(stream);
331 goto error_unlock;
332 }
333
334 if (nb_created) {
335 /* Update number of created stream counter. */
336 (*nb_created)++;
337 }
338 /*
339 * Ensure a self-reference is preserved even
340 * after we have put our local reference.
341 */
342 if (!viewer_stream_get(vstream)) {
343 ERR("Unable to get self-reference on viewer stream, logic error.");
344 abort();
345 }
346 } else {
347 if (!vstream->sent_flag && nb_unsent) {
348 /* Update number of unsent stream counter. */
349 (*nb_unsent)++;
350 }
351 }
352 /* Update number of total stream counter. */
353 if (nb_total) {
354 if (stream->is_metadata) {
355 if (!stream->closed ||
356 stream->metadata_received > vstream->metadata_sent) {
357 (*nb_total)++;
358 }
359 } else {
360 if (!stream->closed ||
361 !(((int64_t) (stream->prev_seq - stream->last_net_seq_num)) >= 0)) {
362
363 (*nb_total)++;
364 }
365 }
366 }
367 /* Put local reference. */
368 viewer_stream_put(vstream);
369 next:
370 stream_put(stream);
371 }
372 ctf_trace_put(ctf_trace);
373 }
374
375 ret = 0;
376
377 error_unlock:
378 rcu_read_unlock();
379 pthread_mutex_unlock(&session->lock);
380 return ret;
381 }
382
383 int relayd_live_stop(void)
384 {
385 /* Stop dispatch thread */
386 CMM_STORE_SHARED(live_dispatch_thread_exit, 1);
387 futex_nto1_wake(&viewer_conn_queue.futex);
388 return 0;
389 }
390
391 /*
392 * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set.
393 */
394 static
395 int create_thread_poll_set(struct lttng_poll_event *events, int size)
396 {
397 int ret;
398
399 if (events == NULL || size == 0) {
400 ret = -1;
401 goto error;
402 }
403
404 ret = lttng_poll_create(events, size, LTTNG_CLOEXEC);
405 if (ret < 0) {
406 goto error;
407 }
408
409 /* Add quit pipe */
410 ret = lttng_poll_add(events, thread_quit_pipe[0], LPOLLIN | LPOLLERR);
411 if (ret < 0) {
412 goto error;
413 }
414
415 return 0;
416
417 error:
418 return ret;
419 }
420
421 /*
422 * Check if the thread quit pipe was triggered.
423 *
424 * Return 1 if it was triggered else 0;
425 */
426 static
427 int check_thread_quit_pipe(int fd, uint32_t events)
428 {
429 if (fd == thread_quit_pipe[0] && (events & LPOLLIN)) {
430 return 1;
431 }
432
433 return 0;
434 }
435
436 /*
437 * Create and init socket from uri.
438 */
439 static
440 struct lttcomm_sock *init_socket(struct lttng_uri *uri)
441 {
442 int ret;
443 struct lttcomm_sock *sock = NULL;
444
445 sock = lttcomm_alloc_sock_from_uri(uri);
446 if (sock == NULL) {
447 ERR("Allocating socket");
448 goto error;
449 }
450
451 ret = lttcomm_create_sock(sock);
452 if (ret < 0) {
453 goto error;
454 }
455 DBG("Listening on sock %d for live", sock->fd);
456
457 ret = sock->ops->bind(sock);
458 if (ret < 0) {
459 goto error;
460 }
461
462 ret = sock->ops->listen(sock, -1);
463 if (ret < 0) {
464 goto error;
465
466 }
467
468 return sock;
469
470 error:
471 if (sock) {
472 lttcomm_destroy_sock(sock);
473 }
474 return NULL;
475 }
476
477 /*
478 * This thread manages the listening for new connections on the network
479 */
480 static
481 void *thread_listener(void *data)
482 {
483 int i, ret, pollfd, err = -1;
484 uint32_t revents, nb_fd;
485 struct lttng_poll_event events;
486 struct lttcomm_sock *live_control_sock;
487
488 DBG("[thread] Relay live listener started");
489
490 health_register(health_relayd, HEALTH_RELAYD_TYPE_LIVE_LISTENER);
491
492 health_code_update();
493
494 live_control_sock = init_socket(live_uri);
495 if (!live_control_sock) {
496 goto error_sock_control;
497 }
498
499 /* Pass 2 as size here for the thread quit pipe and control sockets. */
500 ret = create_thread_poll_set(&events, 2);
501 if (ret < 0) {
502 goto error_create_poll;
503 }
504
505 /* Add the control socket */
506 ret = lttng_poll_add(&events, live_control_sock->fd, LPOLLIN | LPOLLRDHUP);
507 if (ret < 0) {
508 goto error_poll_add;
509 }
510
511 lttng_relay_notify_ready();
512
513 if (testpoint(relayd_thread_live_listener)) {
514 goto error_testpoint;
515 }
516
517 while (1) {
518 health_code_update();
519
520 DBG("Listener accepting live viewers connections");
521
522 restart:
523 health_poll_entry();
524 ret = lttng_poll_wait(&events, -1);
525 health_poll_exit();
526 if (ret < 0) {
527 /*
528 * Restart interrupted system call.
529 */
530 if (errno == EINTR) {
531 goto restart;
532 }
533 goto error;
534 }
535 nb_fd = ret;
536
537 DBG("Relay new viewer connection received");
538 for (i = 0; i < nb_fd; i++) {
539 health_code_update();
540
541 /* Fetch once the poll data */
542 revents = LTTNG_POLL_GETEV(&events, i);
543 pollfd = LTTNG_POLL_GETFD(&events, i);
544
545 if (!revents) {
546 /* No activity for this FD (poll implementation). */
547 continue;
548 }
549
550 /* Thread quit pipe has been closed. Killing thread. */
551 ret = check_thread_quit_pipe(pollfd, revents);
552 if (ret) {
553 err = 0;
554 goto exit;
555 }
556
557 if (revents & LPOLLIN) {
558 /*
559 * A new connection is requested, therefore a
560 * viewer connection is allocated in this
561 * thread, enqueued to a global queue and
562 * dequeued (and freed) in the worker thread.
563 */
564 int val = 1;
565 struct relay_connection *new_conn;
566 struct lttcomm_sock *newsock;
567
568 newsock = live_control_sock->ops->accept(live_control_sock);
569 if (!newsock) {
570 PERROR("accepting control sock");
571 goto error;
572 }
573 DBG("Relay viewer connection accepted socket %d", newsock->fd);
574
575 ret = setsockopt(newsock->fd, SOL_SOCKET, SO_REUSEADDR, &val,
576 sizeof(val));
577 if (ret < 0) {
578 PERROR("setsockopt inet");
579 lttcomm_destroy_sock(newsock);
580 goto error;
581 }
582 new_conn = connection_create(newsock, RELAY_CONNECTION_UNKNOWN);
583 if (!new_conn) {
584 lttcomm_destroy_sock(newsock);
585 goto error;
586 }
587 /* Ownership assumed by the connection. */
588 newsock = NULL;
589
590 /* Enqueue request for the dispatcher thread. */
591 cds_wfcq_enqueue(&viewer_conn_queue.head, &viewer_conn_queue.tail,
592 &new_conn->qnode);
593
594 /*
595 * Wake the dispatch queue futex.
596 * Implicit memory barrier with the
597 * exchange in cds_wfcq_enqueue.
598 */
599 futex_nto1_wake(&viewer_conn_queue.futex);
600 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
601 ERR("socket poll error");
602 goto error;
603 } else {
604 ERR("Unexpected poll events %u for sock %d", revents, pollfd);
605 goto error;
606 }
607 }
608 }
609
610 exit:
611 error:
612 error_poll_add:
613 error_testpoint:
614 lttng_poll_clean(&events);
615 error_create_poll:
616 if (live_control_sock->fd >= 0) {
617 ret = live_control_sock->ops->close(live_control_sock);
618 if (ret) {
619 PERROR("close");
620 }
621 }
622 lttcomm_destroy_sock(live_control_sock);
623 error_sock_control:
624 if (err) {
625 health_error();
626 DBG("Live viewer listener thread exited with error");
627 }
628 health_unregister(health_relayd);
629 DBG("Live viewer listener thread cleanup complete");
630 if (lttng_relay_stop_threads()) {
631 ERR("Error stopping threads");
632 }
633 return NULL;
634 }
635
636 /*
637 * This thread manages the dispatching of the requests to worker threads
638 */
639 static
640 void *thread_dispatcher(void *data)
641 {
642 int err = -1;
643 ssize_t ret;
644 struct cds_wfcq_node *node;
645 struct relay_connection *conn = NULL;
646
647 DBG("[thread] Live viewer relay dispatcher started");
648
649 health_register(health_relayd, HEALTH_RELAYD_TYPE_LIVE_DISPATCHER);
650
651 if (testpoint(relayd_thread_live_dispatcher)) {
652 goto error_testpoint;
653 }
654
655 health_code_update();
656
657 while (!CMM_LOAD_SHARED(live_dispatch_thread_exit)) {
658 health_code_update();
659
660 /* Atomically prepare the queue futex */
661 futex_nto1_prepare(&viewer_conn_queue.futex);
662
663 do {
664 health_code_update();
665
666 /* Dequeue commands */
667 node = cds_wfcq_dequeue_blocking(&viewer_conn_queue.head,
668 &viewer_conn_queue.tail);
669 if (node == NULL) {
670 DBG("Woken up but nothing in the live-viewer "
671 "relay command queue");
672 /* Continue thread execution */
673 break;
674 }
675 conn = caa_container_of(node, struct relay_connection, qnode);
676 DBG("Dispatching viewer request waiting on sock %d",
677 conn->sock->fd);
678
679 /*
680 * Inform worker thread of the new request. This
681 * call is blocking so we can be assured that
682 * the data will be read at some point in time
683 * or wait to the end of the world :)
684 */
685 ret = lttng_write(live_conn_pipe[1], &conn, sizeof(conn));
686 if (ret < 0) {
687 PERROR("write conn pipe");
688 connection_put(conn);
689 goto error;
690 }
691 } while (node != NULL);
692
693 /* Futex wait on queue. Blocking call on futex() */
694 health_poll_entry();
695 futex_nto1_wait(&viewer_conn_queue.futex);
696 health_poll_exit();
697 }
698
699 /* Normal exit, no error */
700 err = 0;
701
702 error:
703 error_testpoint:
704 if (err) {
705 health_error();
706 ERR("Health error occurred in %s", __func__);
707 }
708 health_unregister(health_relayd);
709 DBG("Live viewer dispatch thread dying");
710 if (lttng_relay_stop_threads()) {
711 ERR("Error stopping threads");
712 }
713 return NULL;
714 }
715
716 /*
717 * Establish connection with the viewer and check the versions.
718 *
719 * Return 0 on success or else negative value.
720 */
721 static
722 int viewer_connect(struct relay_connection *conn)
723 {
724 int ret;
725 struct lttng_viewer_connect reply, msg;
726
727 conn->version_check_done = 1;
728
729 health_code_update();
730
731 DBG("Viewer is establishing a connection to the relayd.");
732
733 ret = recv_request(conn->sock, &msg, sizeof(msg));
734 if (ret < 0) {
735 goto end;
736 }
737
738 health_code_update();
739
740 memset(&reply, 0, sizeof(reply));
741 reply.major = RELAYD_VERSION_COMM_MAJOR;
742 reply.minor = RELAYD_VERSION_COMM_MINOR;
743
744 /* Major versions must be the same */
745 if (reply.major != be32toh(msg.major)) {
746 DBG("Incompatible major versions ([relayd] %u vs [client] %u)",
747 reply.major, be32toh(msg.major));
748 ret = -1;
749 goto end;
750 }
751
752 conn->major = reply.major;
753 /* We adapt to the lowest compatible version */
754 if (reply.minor <= be32toh(msg.minor)) {
755 conn->minor = reply.minor;
756 } else {
757 conn->minor = be32toh(msg.minor);
758 }
759
760 if (be32toh(msg.type) == LTTNG_VIEWER_CLIENT_COMMAND) {
761 conn->type = RELAY_VIEWER_COMMAND;
762 } else if (be32toh(msg.type) == LTTNG_VIEWER_CLIENT_NOTIFICATION) {
763 conn->type = RELAY_VIEWER_NOTIFICATION;
764 } else {
765 ERR("Unknown connection type : %u", be32toh(msg.type));
766 ret = -1;
767 goto end;
768 }
769
770 reply.major = htobe32(reply.major);
771 reply.minor = htobe32(reply.minor);
772 if (conn->type == RELAY_VIEWER_COMMAND) {
773 /*
774 * Increment outside of htobe64 macro, because the argument can
775 * be used more than once within the macro, and thus the
776 * operation may be undefined.
777 */
778 pthread_mutex_lock(&last_relay_viewer_session_id_lock);
779 last_relay_viewer_session_id++;
780 pthread_mutex_unlock(&last_relay_viewer_session_id_lock);
781 reply.viewer_session_id = htobe64(last_relay_viewer_session_id);
782 }
783
784 health_code_update();
785
786 ret = send_response(conn->sock, &reply, sizeof(reply));
787 if (ret < 0) {
788 goto end;
789 }
790
791 health_code_update();
792
793 DBG("Version check done using protocol %u.%u", conn->major, conn->minor);
794 ret = 0;
795
796 end:
797 return ret;
798 }
799
800 /*
801 * Send the viewer the list of current sessions.
802 * We need to create a copy of the hash table content because otherwise
803 * we cannot assume the number of entries stays the same between getting
804 * the number of HT elements and iteration over the HT.
805 *
806 * Return 0 on success or else a negative value.
807 */
808 static
809 int viewer_list_sessions(struct relay_connection *conn)
810 {
811 int ret = 0;
812 struct lttng_viewer_list_sessions session_list;
813 struct lttng_ht_iter iter;
814 struct relay_session *session;
815 struct lttng_viewer_session *send_session_buf = NULL;
816 uint32_t buf_count = SESSION_BUF_DEFAULT_COUNT;
817 uint32_t count = 0;
818
819 DBG("List sessions received");
820
821 send_session_buf = zmalloc(SESSION_BUF_DEFAULT_COUNT * sizeof(*send_session_buf));
822 if (!send_session_buf) {
823 return -1;
824 }
825
826 rcu_read_lock();
827 cds_lfht_for_each_entry(sessions_ht->ht, &iter.iter, session,
828 session_n.node) {
829 struct lttng_viewer_session *send_session;
830
831 health_code_update();
832
833 if (count >= buf_count) {
834 struct lttng_viewer_session *newbuf;
835 uint32_t new_buf_count = buf_count << 1;
836
837 newbuf = realloc(send_session_buf,
838 new_buf_count * sizeof(*send_session_buf));
839 if (!newbuf) {
840 ret = -1;
841 break;
842 }
843 send_session_buf = newbuf;
844 buf_count = new_buf_count;
845 }
846 send_session = &send_session_buf[count];
847 if (lttng_strncpy(send_session->session_name,
848 session->session_name,
849 sizeof(send_session->session_name))) {
850 ret = -1;
851 break;
852 }
853 if (lttng_strncpy(send_session->hostname, session->hostname,
854 sizeof(send_session->hostname))) {
855 ret = -1;
856 break;
857 }
858 send_session->id = htobe64(session->id);
859 send_session->live_timer = htobe32(session->live_timer);
860 if (session->viewer_attached) {
861 send_session->clients = htobe32(1);
862 } else {
863 send_session->clients = htobe32(0);
864 }
865 send_session->streams = htobe32(session->stream_count);
866 count++;
867 }
868 rcu_read_unlock();
869 if (ret < 0) {
870 goto end_free;
871 }
872
873 session_list.sessions_count = htobe32(count);
874
875 health_code_update();
876
877 ret = send_response(conn->sock, &session_list, sizeof(session_list));
878 if (ret < 0) {
879 goto end_free;
880 }
881
882 health_code_update();
883
884 ret = send_response(conn->sock, send_session_buf,
885 count * sizeof(*send_session_buf));
886 if (ret < 0) {
887 goto end_free;
888 }
889 health_code_update();
890
891 ret = 0;
892 end_free:
893 free(send_session_buf);
894 return ret;
895 }
896
897 /*
898 * Send the viewer the list of current streams.
899 */
900 static
901 int viewer_get_new_streams(struct relay_connection *conn)
902 {
903 int ret, send_streams = 0;
904 uint32_t nb_created = 0, nb_unsent = 0, nb_streams = 0, nb_total = 0;
905 struct lttng_viewer_new_streams_request request;
906 struct lttng_viewer_new_streams_response response;
907 struct relay_session *session;
908 uint64_t session_id;
909 bool closed = false;
910
911 assert(conn);
912
913 DBG("Get new streams received");
914
915 health_code_update();
916
917 /* Receive the request from the connected client. */
918 ret = recv_request(conn->sock, &request, sizeof(request));
919 if (ret < 0) {
920 goto error;
921 }
922 session_id = be64toh(request.session_id);
923
924 health_code_update();
925
926 memset(&response, 0, sizeof(response));
927
928 session = session_get_by_id(session_id);
929 if (!session) {
930 DBG("Relay session %" PRIu64 " not found", session_id);
931 response.status = htobe32(LTTNG_VIEWER_NEW_STREAMS_ERR);
932 goto send_reply;
933 }
934
935 if (!viewer_session_is_attached(conn->viewer_session, session)) {
936 send_streams = 0;
937 response.status = htobe32(LTTNG_VIEWER_NEW_STREAMS_ERR);
938 goto send_reply;
939 }
940
941 send_streams = 1;
942 response.status = htobe32(LTTNG_VIEWER_NEW_STREAMS_OK);
943
944 ret = make_viewer_streams(session, LTTNG_VIEWER_SEEK_LAST, &nb_total, &nb_unsent,
945 &nb_created, &closed);
946 if (ret < 0) {
947 goto end_put_session;
948 }
949 /* Only send back the newly created streams with the unsent ones. */
950 nb_streams = nb_created + nb_unsent;
951 response.streams_count = htobe32(nb_streams);
952
953 /*
954 * If the session is closed, HUP when there are no more streams
955 * with data.
956 */
957 if (closed && nb_total == 0) {
958 send_streams = 0;
959 response.streams_count = 0;
960 response.status = htobe32(LTTNG_VIEWER_NEW_STREAMS_HUP);
961 goto send_reply;
962 }
963
964 send_reply:
965 health_code_update();
966 ret = send_response(conn->sock, &response, sizeof(response));
967 if (ret < 0) {
968 goto end_put_session;
969 }
970 health_code_update();
971
972 /*
973 * Unknown or empty session, just return gracefully, the viewer
974 * knows what is happening.
975 */
976 if (!send_streams || !nb_streams) {
977 ret = 0;
978 goto end_put_session;
979 }
980
981 /*
982 * Send stream and *DON'T* ignore the sent flag so every viewer
983 * streams that were not sent from that point will be sent to
984 * the viewer.
985 */
986 ret = send_viewer_streams(conn->sock, session, 0);
987 if (ret < 0) {
988 goto end_put_session;
989 }
990
991 end_put_session:
992 if (session) {
993 session_put(session);
994 }
995 error:
996 return ret;
997 }
998
999 /*
1000 * Send the viewer the list of current sessions.
1001 */
1002 static
1003 int viewer_attach_session(struct relay_connection *conn)
1004 {
1005 int send_streams = 0;
1006 ssize_t ret;
1007 uint32_t nb_streams = 0;
1008 enum lttng_viewer_seek seek_type;
1009 struct lttng_viewer_attach_session_request request;
1010 struct lttng_viewer_attach_session_response response;
1011 struct relay_session *session = NULL;
1012 bool closed = false;
1013
1014 assert(conn);
1015
1016 health_code_update();
1017
1018 /* Receive the request from the connected client. */
1019 ret = recv_request(conn->sock, &request, sizeof(request));
1020 if (ret < 0) {
1021 goto error;
1022 }
1023
1024 health_code_update();
1025
1026 memset(&response, 0, sizeof(response));
1027
1028 if (!conn->viewer_session) {
1029 DBG("Client trying to attach before creating a live viewer session");
1030 response.status = htobe32(LTTNG_VIEWER_ATTACH_NO_SESSION);
1031 goto send_reply;
1032 }
1033
1034 session = session_get_by_id(be64toh(request.session_id));
1035 if (!session) {
1036 DBG("Relay session %" PRIu64 " not found",
1037 be64toh(request.session_id));
1038 response.status = htobe32(LTTNG_VIEWER_ATTACH_UNK);
1039 goto send_reply;
1040 }
1041 DBG("Attach session ID %" PRIu64 " received",
1042 be64toh(request.session_id));
1043
1044 if (session->live_timer == 0) {
1045 DBG("Not live session");
1046 response.status = htobe32(LTTNG_VIEWER_ATTACH_NOT_LIVE);
1047 goto send_reply;
1048 }
1049
1050 send_streams = 1;
1051 ret = viewer_session_attach(conn->viewer_session, session);
1052 if (ret) {
1053 DBG("Already a viewer attached");
1054 response.status = htobe32(LTTNG_VIEWER_ATTACH_ALREADY);
1055 goto send_reply;
1056 }
1057
1058 switch (be32toh(request.seek)) {
1059 case LTTNG_VIEWER_SEEK_BEGINNING:
1060 case LTTNG_VIEWER_SEEK_LAST:
1061 response.status = htobe32(LTTNG_VIEWER_ATTACH_OK);
1062 seek_type = be32toh(request.seek);
1063 break;
1064 default:
1065 ERR("Wrong seek parameter");
1066 response.status = htobe32(LTTNG_VIEWER_ATTACH_SEEK_ERR);
1067 send_streams = 0;
1068 goto send_reply;
1069 }
1070
1071 ret = make_viewer_streams(session, seek_type, &nb_streams, NULL,
1072 NULL, &closed);
1073 if (ret < 0) {
1074 goto end_put_session;
1075 }
1076 response.streams_count = htobe32(nb_streams);
1077
1078 /*
1079 * If the session is closed when the viewer is attaching, it
1080 * means some of the streams may have been concurrently removed,
1081 * so we don't allow the viewer to attach, even if there are
1082 * streams available.
1083 */
1084 if (closed) {
1085 send_streams = 0;
1086 response.streams_count = 0;
1087 response.status = htobe32(LTTNG_VIEWER_NEW_STREAMS_HUP);
1088 goto send_reply;
1089 }
1090
1091 send_reply:
1092 health_code_update();
1093 ret = send_response(conn->sock, &response, sizeof(response));
1094 if (ret < 0) {
1095 goto end_put_session;
1096 }
1097 health_code_update();
1098
1099 /*
1100 * Unknown or empty session, just return gracefully, the viewer
1101 * knows what is happening.
1102 */
1103 if (!send_streams || !nb_streams) {
1104 ret = 0;
1105 goto end_put_session;
1106 }
1107
1108 /* Send stream and ignore the sent flag. */
1109 ret = send_viewer_streams(conn->sock, session, 1);
1110 if (ret < 0) {
1111 goto end_put_session;
1112 }
1113
1114 end_put_session:
1115 if (session) {
1116 session_put(session);
1117 }
1118 error:
1119 return ret;
1120 }
1121
1122 /*
1123 * Open the index file if needed for the given vstream.
1124 *
1125 * If an index file is successfully opened, the vstream will set it as its
1126 * current index file.
1127 *
1128 * Return 0 on success, a negative value on error (-ENOENT if not ready yet).
1129 *
1130 * Called with rstream lock held.
1131 */
1132 static int try_open_index(struct relay_viewer_stream *vstream,
1133 struct relay_stream *rstream)
1134 {
1135 int ret = 0;
1136
1137 if (vstream->index_file) {
1138 goto end;
1139 }
1140
1141 /*
1142 * First time, we open the index file and at least one index is ready.
1143 */
1144 if (rstream->index_received_seqcount == 0) {
1145 ret = -ENOENT;
1146 goto end;
1147 }
1148 vstream->index_file = lttng_index_file_open(vstream->path_name,
1149 vstream->channel_name,
1150 vstream->stream->tracefile_count,
1151 vstream->current_tracefile_id);
1152 if (!vstream->index_file) {
1153 ret = -1;
1154 }
1155
1156 end:
1157 return ret;
1158 }
1159
1160 /*
1161 * Check the status of the index for the given stream. This function
1162 * updates the index structure if needed and can put (close) the vstream
1163 * in the HUP situation.
1164 *
1165 * Return 0 means that we can proceed with the index. A value of 1 means
1166 * that the index has been updated and is ready to be sent to the
1167 * client. A negative value indicates an error that can't be handled.
1168 *
1169 * Called with rstream lock held.
1170 */
1171 static int check_index_status(struct relay_viewer_stream *vstream,
1172 struct relay_stream *rstream, struct ctf_trace *trace,
1173 struct lttng_viewer_index *index)
1174 {
1175 int ret;
1176
1177 if (trace->session->connection_closed
1178 && rstream->index_received_seqcount
1179 == vstream->index_sent_seqcount) {
1180 /* Last index sent and session connection is closed. */
1181 index->status = htobe32(LTTNG_VIEWER_INDEX_HUP);
1182 goto hup;
1183 } else if (rstream->beacon_ts_end != -1ULL &&
1184 rstream->index_received_seqcount
1185 == vstream->index_sent_seqcount) {
1186 /*
1187 * We've received a synchronization beacon and the last index
1188 * available has been sent, the index for now is inactive.
1189 *
1190 * In this case, we have received a beacon which allows us to
1191 * inform the client of a time interval during which we can
1192 * guarantee that there are no events to read (and never will
1193 * be).
1194 */
1195 index->status = htobe32(LTTNG_VIEWER_INDEX_INACTIVE);
1196 index->timestamp_end = htobe64(rstream->beacon_ts_end);
1197 index->stream_id = htobe64(rstream->ctf_stream_id);
1198 goto index_ready;
1199 } else if (rstream->index_received_seqcount
1200 == vstream->index_sent_seqcount) {
1201 /*
1202 * This checks whether received == sent seqcount. In
1203 * this case, we have not received a beacon. Therefore,
1204 * we can only ask the client to retry later.
1205 */
1206 index->status = htobe32(LTTNG_VIEWER_INDEX_RETRY);
1207 goto index_ready;
1208 } else if (!tracefile_array_seq_in_file(rstream->tfa,
1209 vstream->current_tracefile_id,
1210 vstream->index_sent_seqcount)) {
1211 /*
1212 * The next index we want to send cannot be read either
1213 * because we need to perform a rotation, or due to
1214 * the producer having overwritten its trace file.
1215 */
1216 DBG("Viewer stream %" PRIu64 " rotation",
1217 vstream->stream->stream_handle);
1218 ret = viewer_stream_rotate(vstream);
1219 if (ret < 0) {
1220 goto end;
1221 } else if (ret == 1) {
1222 /* EOF across entire stream. */
1223 index->status = htobe32(LTTNG_VIEWER_INDEX_HUP);
1224 goto hup;
1225 }
1226 /*
1227 * If we have been pushed due to overwrite, it
1228 * necessarily means there is data that can be read in
1229 * the stream. If we rotated because we reached the end
1230 * of a tracefile, it means the following tracefile
1231 * needs to contain at least one index, else we would
1232 * have already returned LTTNG_VIEWER_INDEX_RETRY to the
1233 * viewer. The updated index_sent_seqcount needs to
1234 * point to a readable index entry now.
1235 *
1236 * In the case where we "rotate" on a single file, we
1237 * can end up in a case where the requested index is
1238 * still unavailable.
1239 */
1240 if (rstream->tracefile_count == 1 &&
1241 !tracefile_array_seq_in_file(
1242 rstream->tfa,
1243 vstream->current_tracefile_id,
1244 vstream->index_sent_seqcount)) {
1245 index->status = htobe32(LTTNG_VIEWER_INDEX_RETRY);
1246 goto index_ready;
1247 }
1248 assert(tracefile_array_seq_in_file(rstream->tfa,
1249 vstream->current_tracefile_id,
1250 vstream->index_sent_seqcount));
1251 }
1252 /* ret == 0 means successful so we continue. */
1253 ret = 0;
1254 end:
1255 return ret;
1256
1257 hup:
1258 viewer_stream_put(vstream);
1259 index_ready:
1260 return 1;
1261 }
1262
1263 /*
1264 * Send the next index for a stream.
1265 *
1266 * Return 0 on success or else a negative value.
1267 */
1268 static
1269 int viewer_get_next_index(struct relay_connection *conn)
1270 {
1271 int ret;
1272 struct lttng_viewer_get_next_index request_index;
1273 struct lttng_viewer_index viewer_index;
1274 struct ctf_packet_index packet_index;
1275 struct relay_viewer_stream *vstream = NULL;
1276 struct relay_stream *rstream = NULL;
1277 struct ctf_trace *ctf_trace = NULL;
1278 struct relay_viewer_stream *metadata_viewer_stream = NULL;
1279
1280 assert(conn);
1281
1282 DBG("Viewer get next index");
1283
1284 memset(&viewer_index, 0, sizeof(viewer_index));
1285 health_code_update();
1286
1287 ret = recv_request(conn->sock, &request_index, sizeof(request_index));
1288 if (ret < 0) {
1289 goto end;
1290 }
1291 health_code_update();
1292
1293 vstream = viewer_stream_get_by_id(be64toh(request_index.stream_id));
1294 if (!vstream) {
1295 DBG("Client requested index of unknown stream id %" PRIu64,
1296 be64toh(request_index.stream_id));
1297 viewer_index.status = htobe32(LTTNG_VIEWER_INDEX_ERR);
1298 goto send_reply;
1299 }
1300
1301 /* Use back. ref. Protected by refcounts. */
1302 rstream = vstream->stream;
1303 ctf_trace = rstream->trace;
1304
1305 /* metadata_viewer_stream may be NULL. */
1306 metadata_viewer_stream =
1307 ctf_trace_get_viewer_metadata_stream(ctf_trace);
1308
1309 pthread_mutex_lock(&rstream->lock);
1310
1311 /*
1312 * The viewer should not ask for index on metadata stream.
1313 */
1314 if (rstream->is_metadata) {
1315 viewer_index.status = htobe32(LTTNG_VIEWER_INDEX_HUP);
1316 goto send_reply;
1317 }
1318
1319 /* Try to open an index if one is needed for that stream. */
1320 ret = try_open_index(vstream, rstream);
1321 if (ret < 0) {
1322 if (ret == -ENOENT) {
1323 /*
1324 * The index is created only when the first data
1325 * packet arrives, it might not be ready at the
1326 * beginning of the session
1327 */
1328 viewer_index.status = htobe32(LTTNG_VIEWER_INDEX_RETRY);
1329 } else {
1330 /* Unhandled error. */
1331 viewer_index.status = htobe32(LTTNG_VIEWER_INDEX_ERR);
1332 }
1333 goto send_reply;
1334 }
1335
1336 ret = check_index_status(vstream, rstream, ctf_trace, &viewer_index);
1337 if (ret < 0) {
1338 goto error_put;
1339 } else if (ret == 1) {
1340 /*
1341 * We have no index to send and check_index_status has populated
1342 * viewer_index's status.
1343 */
1344 goto send_reply;
1345 }
1346 /* At this point, ret is 0 thus we will be able to read the index. */
1347 assert(!ret);
1348
1349 /*
1350 * vstream->stream_fd may be NULL if it has been closed by
1351 * tracefile rotation, or if we are at the beginning of the
1352 * stream. We open the data stream file here to protect against
1353 * overwrite caused by tracefile rotation (in association with
1354 * unlink performed before overwrite).
1355 */
1356 if (!vstream->stream_fd) {
1357 char fullpath[PATH_MAX];
1358
1359 if (vstream->stream->tracefile_count > 0) {
1360 ret = snprintf(fullpath, PATH_MAX, "%s/%s_%" PRIu64,
1361 vstream->path_name,
1362 vstream->channel_name,
1363 vstream->current_tracefile_id);
1364 } else {
1365 ret = snprintf(fullpath, PATH_MAX, "%s/%s",
1366 vstream->path_name,
1367 vstream->channel_name);
1368 }
1369 if (ret < 0) {
1370 goto error_put;
1371 }
1372 ret = open(fullpath, O_RDONLY);
1373 if (ret < 0) {
1374 PERROR("Relay opening trace file");
1375 goto error_put;
1376 }
1377 vstream->stream_fd = stream_fd_create(ret);
1378 if (!vstream->stream_fd) {
1379 if (close(ret)) {
1380 PERROR("close");
1381 }
1382 goto error_put;
1383 }
1384 }
1385
1386 ret = check_new_streams(conn);
1387 if (ret < 0) {
1388 viewer_index.status = htobe32(LTTNG_VIEWER_INDEX_ERR);
1389 goto send_reply;
1390 } else if (ret == 1) {
1391 viewer_index.flags |= LTTNG_VIEWER_FLAG_NEW_STREAM;
1392 }
1393
1394 ret = lttng_index_file_read(vstream->index_file, &packet_index);
1395 if (ret) {
1396 ERR("Relay error reading index file %d",
1397 vstream->index_file->fd);
1398 viewer_index.status = htobe32(LTTNG_VIEWER_INDEX_ERR);
1399 goto send_reply;
1400 } else {
1401 viewer_index.status = htobe32(LTTNG_VIEWER_INDEX_OK);
1402 vstream->index_sent_seqcount++;
1403 }
1404
1405 /*
1406 * Indexes are stored in big endian, no need to switch before sending.
1407 */
1408 DBG("Sending viewer index for stream %" PRIu64 " offset %" PRIu64,
1409 rstream->stream_handle,
1410 be64toh(packet_index.offset));
1411 viewer_index.offset = packet_index.offset;
1412 viewer_index.packet_size = packet_index.packet_size;
1413 viewer_index.content_size = packet_index.content_size;
1414 viewer_index.timestamp_begin = packet_index.timestamp_begin;
1415 viewer_index.timestamp_end = packet_index.timestamp_end;
1416 viewer_index.events_discarded = packet_index.events_discarded;
1417 viewer_index.stream_id = packet_index.stream_id;
1418
1419 send_reply:
1420 if (rstream) {
1421 pthread_mutex_unlock(&rstream->lock);
1422 }
1423
1424 if (metadata_viewer_stream) {
1425 pthread_mutex_lock(&metadata_viewer_stream->stream->lock);
1426 DBG("get next index metadata check: recv %" PRIu64
1427 " sent %" PRIu64,
1428 metadata_viewer_stream->stream->metadata_received,
1429 metadata_viewer_stream->metadata_sent);
1430 if (!metadata_viewer_stream->stream->metadata_received ||
1431 metadata_viewer_stream->stream->metadata_received >
1432 metadata_viewer_stream->metadata_sent) {
1433 viewer_index.flags |= LTTNG_VIEWER_FLAG_NEW_METADATA;
1434 }
1435 pthread_mutex_unlock(&metadata_viewer_stream->stream->lock);
1436 }
1437
1438 viewer_index.flags = htobe32(viewer_index.flags);
1439 health_code_update();
1440
1441 ret = send_response(conn->sock, &viewer_index, sizeof(viewer_index));
1442 if (ret < 0) {
1443 goto end;
1444 }
1445 health_code_update();
1446
1447 if (vstream) {
1448 DBG("Index %" PRIu64 " for stream %" PRIu64 " sent",
1449 vstream->index_sent_seqcount,
1450 vstream->stream->stream_handle);
1451 }
1452 end:
1453 if (metadata_viewer_stream) {
1454 viewer_stream_put(metadata_viewer_stream);
1455 }
1456 if (vstream) {
1457 viewer_stream_put(vstream);
1458 }
1459 return ret;
1460
1461 error_put:
1462 pthread_mutex_unlock(&rstream->lock);
1463 if (metadata_viewer_stream) {
1464 viewer_stream_put(metadata_viewer_stream);
1465 }
1466 viewer_stream_put(vstream);
1467 return ret;
1468 }
1469
1470 /*
1471 * Send the next index for a stream
1472 *
1473 * Return 0 on success or else a negative value.
1474 */
1475 static
1476 int viewer_get_packet(struct relay_connection *conn)
1477 {
1478 int ret, send_data = 0;
1479 char *data = NULL;
1480 uint32_t len = 0;
1481 ssize_t read_len;
1482 struct lttng_viewer_get_packet get_packet_info;
1483 struct lttng_viewer_trace_packet reply;
1484 struct relay_viewer_stream *vstream = NULL;
1485
1486 DBG2("Relay get data packet");
1487
1488 health_code_update();
1489
1490 ret = recv_request(conn->sock, &get_packet_info,
1491 sizeof(get_packet_info));
1492 if (ret < 0) {
1493 goto end;
1494 }
1495 health_code_update();
1496
1497 /* From this point on, the error label can be reached. */
1498 memset(&reply, 0, sizeof(reply));
1499
1500 vstream = viewer_stream_get_by_id(be64toh(get_packet_info.stream_id));
1501 if (!vstream) {
1502 DBG("Client requested packet of unknown stream id %" PRIu64,
1503 be64toh(get_packet_info.stream_id));
1504 reply.status = htobe32(LTTNG_VIEWER_GET_PACKET_ERR);
1505 goto send_reply_nolock;
1506 }
1507
1508 pthread_mutex_lock(&vstream->stream->lock);
1509
1510 len = be32toh(get_packet_info.len);
1511 data = zmalloc(len);
1512 if (!data) {
1513 PERROR("relay data zmalloc");
1514 goto error;
1515 }
1516
1517 ret = lseek(vstream->stream_fd->fd, be64toh(get_packet_info.offset),
1518 SEEK_SET);
1519 if (ret < 0) {
1520 PERROR("lseek fd %d to offset %" PRIu64, vstream->stream_fd->fd,
1521 be64toh(get_packet_info.offset));
1522 goto error;
1523 }
1524 read_len = lttng_read(vstream->stream_fd->fd, data, len);
1525 if (read_len < len) {
1526 PERROR("Relay reading trace file, fd: %d, offset: %" PRIu64,
1527 vstream->stream_fd->fd,
1528 be64toh(get_packet_info.offset));
1529 goto error;
1530 }
1531 reply.status = htobe32(LTTNG_VIEWER_GET_PACKET_OK);
1532 reply.len = htobe32(len);
1533 send_data = 1;
1534 goto send_reply;
1535
1536 error:
1537 reply.status = htobe32(LTTNG_VIEWER_GET_PACKET_ERR);
1538
1539 send_reply:
1540 if (vstream) {
1541 pthread_mutex_unlock(&vstream->stream->lock);
1542 }
1543 send_reply_nolock:
1544 reply.flags = htobe32(reply.flags);
1545
1546 health_code_update();
1547
1548 ret = send_response(conn->sock, &reply, sizeof(reply));
1549 if (ret < 0) {
1550 goto end_free;
1551 }
1552 health_code_update();
1553
1554 if (send_data) {
1555 health_code_update();
1556 ret = send_response(conn->sock, data, len);
1557 if (ret < 0) {
1558 goto end_free;
1559 }
1560 health_code_update();
1561 }
1562
1563 DBG("Sent %u bytes for stream %" PRIu64, len,
1564 be64toh(get_packet_info.stream_id));
1565
1566 end_free:
1567 free(data);
1568 end:
1569 if (vstream) {
1570 viewer_stream_put(vstream);
1571 }
1572 return ret;
1573 }
1574
1575 /*
1576 * Send the session's metadata
1577 *
1578 * Return 0 on success else a negative value.
1579 */
1580 static
1581 int viewer_get_metadata(struct relay_connection *conn)
1582 {
1583 int ret = 0;
1584 ssize_t read_len;
1585 uint64_t len = 0;
1586 char *data = NULL;
1587 struct lttng_viewer_get_metadata request;
1588 struct lttng_viewer_metadata_packet reply;
1589 struct relay_viewer_stream *vstream = NULL;
1590
1591 assert(conn);
1592
1593 DBG("Relay get metadata");
1594
1595 health_code_update();
1596
1597 ret = recv_request(conn->sock, &request, sizeof(request));
1598 if (ret < 0) {
1599 goto end;
1600 }
1601 health_code_update();
1602
1603 memset(&reply, 0, sizeof(reply));
1604
1605 vstream = viewer_stream_get_by_id(be64toh(request.stream_id));
1606 if (!vstream) {
1607 /*
1608 * The metadata stream can be closed by a CLOSE command
1609 * just before we attach. It can also be closed by
1610 * per-pid tracing during tracing. Therefore, it is
1611 * possible that we cannot find this viewer stream.
1612 * Reply back to the client with an error if we cannot
1613 * find it.
1614 */
1615 DBG("Client requested metadata of unknown stream id %" PRIu64,
1616 be64toh(request.stream_id));
1617 reply.status = htobe32(LTTNG_VIEWER_METADATA_ERR);
1618 goto send_reply;
1619 }
1620 pthread_mutex_lock(&vstream->stream->lock);
1621 if (!vstream->stream->is_metadata) {
1622 ERR("Invalid metadata stream");
1623 goto error;
1624 }
1625
1626 assert(vstream->metadata_sent <= vstream->stream->metadata_received);
1627
1628 len = vstream->stream->metadata_received - vstream->metadata_sent;
1629 if (len == 0) {
1630 reply.status = htobe32(LTTNG_VIEWER_NO_NEW_METADATA);
1631 goto send_reply;
1632 }
1633
1634 /* first time, we open the metadata file */
1635 if (!vstream->stream_fd) {
1636 char fullpath[PATH_MAX];
1637
1638 ret = snprintf(fullpath, PATH_MAX, "%s/%s", vstream->path_name,
1639 vstream->channel_name);
1640 if (ret < 0) {
1641 goto error;
1642 }
1643 ret = open(fullpath, O_RDONLY);
1644 if (ret < 0) {
1645 PERROR("Relay opening metadata file");
1646 goto error;
1647 }
1648 vstream->stream_fd = stream_fd_create(ret);
1649 if (!vstream->stream_fd) {
1650 if (close(ret)) {
1651 PERROR("close");
1652 }
1653 goto error;
1654 }
1655 }
1656
1657 reply.len = htobe64(len);
1658 data = zmalloc(len);
1659 if (!data) {
1660 PERROR("viewer metadata zmalloc");
1661 goto error;
1662 }
1663
1664 read_len = lttng_read(vstream->stream_fd->fd, data, len);
1665 if (read_len < len) {
1666 PERROR("Relay reading metadata file");
1667 goto error;
1668 }
1669 vstream->metadata_sent += read_len;
1670 if (vstream->metadata_sent == vstream->stream->metadata_received
1671 && vstream->stream->closed) {
1672 /* Release ownership for the viewer metadata stream. */
1673 viewer_stream_put(vstream);
1674 }
1675
1676 reply.status = htobe32(LTTNG_VIEWER_METADATA_OK);
1677
1678 goto send_reply;
1679
1680 error:
1681 reply.status = htobe32(LTTNG_VIEWER_METADATA_ERR);
1682
1683 send_reply:
1684 health_code_update();
1685 if (vstream) {
1686 pthread_mutex_unlock(&vstream->stream->lock);
1687 }
1688 ret = send_response(conn->sock, &reply, sizeof(reply));
1689 if (ret < 0) {
1690 goto end_free;
1691 }
1692 health_code_update();
1693
1694 if (len > 0) {
1695 ret = send_response(conn->sock, data, len);
1696 if (ret < 0) {
1697 goto end_free;
1698 }
1699 }
1700
1701 DBG("Sent %" PRIu64 " bytes of metadata for stream %" PRIu64, len,
1702 be64toh(request.stream_id));
1703
1704 DBG("Metadata sent");
1705
1706 end_free:
1707 free(data);
1708 end:
1709 if (vstream) {
1710 viewer_stream_put(vstream);
1711 }
1712 return ret;
1713 }
1714
1715 /*
1716 * Create a viewer session.
1717 *
1718 * Return 0 on success or else a negative value.
1719 */
1720 static
1721 int viewer_create_session(struct relay_connection *conn)
1722 {
1723 int ret;
1724 struct lttng_viewer_create_session_response resp;
1725
1726 DBG("Viewer create session received");
1727
1728 memset(&resp, 0, sizeof(resp));
1729 resp.status = htobe32(LTTNG_VIEWER_CREATE_SESSION_OK);
1730 conn->viewer_session = viewer_session_create();
1731 if (!conn->viewer_session) {
1732 ERR("Allocation viewer session");
1733 resp.status = htobe32(LTTNG_VIEWER_CREATE_SESSION_ERR);
1734 goto send_reply;
1735 }
1736
1737 send_reply:
1738 health_code_update();
1739 ret = send_response(conn->sock, &resp, sizeof(resp));
1740 if (ret < 0) {
1741 goto end;
1742 }
1743 health_code_update();
1744 ret = 0;
1745
1746 end:
1747 return ret;
1748 }
1749
1750 /*
1751 * Detach a viewer session.
1752 *
1753 * Return 0 on success or else a negative value.
1754 */
1755 static
1756 int viewer_detach_session(struct relay_connection *conn)
1757 {
1758 int ret;
1759 struct lttng_viewer_detach_session_response response;
1760 struct lttng_viewer_detach_session_request request;
1761 struct relay_session *session = NULL;
1762 uint64_t viewer_session_to_close;
1763
1764 DBG("Viewer detach session received");
1765
1766 assert(conn);
1767
1768 health_code_update();
1769
1770 /* Receive the request from the connected client. */
1771 ret = recv_request(conn->sock, &request, sizeof(request));
1772 if (ret < 0) {
1773 goto end;
1774 }
1775 viewer_session_to_close = be64toh(request.session_id);
1776
1777 if (!conn->viewer_session) {
1778 DBG("Client trying to detach before creating a live viewer session");
1779 response.status = htobe32(LTTNG_VIEWER_DETACH_SESSION_ERR);
1780 goto send_reply;
1781 }
1782
1783 health_code_update();
1784
1785 memset(&response, 0, sizeof(response));
1786 DBG("Detaching from session ID %" PRIu64, viewer_session_to_close);
1787
1788 session = session_get_by_id(be64toh(request.session_id));
1789 if (!session) {
1790 DBG("Relay session %" PRIu64 " not found",
1791 be64toh(request.session_id));
1792 response.status = htobe32(LTTNG_VIEWER_DETACH_SESSION_UNK);
1793 goto send_reply;
1794 }
1795
1796 ret = viewer_session_is_attached(conn->viewer_session, session);
1797 if (ret != 1) {
1798 DBG("Not attached to this session");
1799 response.status = htobe32(LTTNG_VIEWER_DETACH_SESSION_ERR);
1800 goto send_reply_put;
1801 }
1802
1803 viewer_session_close_one_session(conn->viewer_session, session);
1804 response.status = htobe32(LTTNG_VIEWER_DETACH_SESSION_OK);
1805 DBG("Session %" PRIu64 " detached.", viewer_session_to_close);
1806
1807 send_reply_put:
1808 session_put(session);
1809
1810 send_reply:
1811 health_code_update();
1812 ret = send_response(conn->sock, &response, sizeof(response));
1813 if (ret < 0) {
1814 goto end;
1815 }
1816 health_code_update();
1817 ret = 0;
1818
1819 end:
1820 return ret;
1821 }
1822
1823 /*
1824 * live_relay_unknown_command: send -1 if received unknown command
1825 */
1826 static
1827 void live_relay_unknown_command(struct relay_connection *conn)
1828 {
1829 struct lttcomm_relayd_generic_reply reply;
1830
1831 memset(&reply, 0, sizeof(reply));
1832 reply.ret_code = htobe32(LTTNG_ERR_UNK);
1833 (void) send_response(conn->sock, &reply, sizeof(reply));
1834 }
1835
1836 /*
1837 * Process the commands received on the control socket
1838 */
1839 static
1840 int process_control(struct lttng_viewer_cmd *recv_hdr,
1841 struct relay_connection *conn)
1842 {
1843 int ret = 0;
1844 uint32_t msg_value;
1845
1846 msg_value = be32toh(recv_hdr->cmd);
1847
1848 /*
1849 * Make sure we've done the version check before any command other then a
1850 * new client connection.
1851 */
1852 if (msg_value != LTTNG_VIEWER_CONNECT && !conn->version_check_done) {
1853 ERR("Viewer conn value %" PRIu32 " before version check", msg_value);
1854 ret = -1;
1855 goto end;
1856 }
1857
1858 switch (msg_value) {
1859 case LTTNG_VIEWER_CONNECT:
1860 ret = viewer_connect(conn);
1861 break;
1862 case LTTNG_VIEWER_LIST_SESSIONS:
1863 ret = viewer_list_sessions(conn);
1864 break;
1865 case LTTNG_VIEWER_ATTACH_SESSION:
1866 ret = viewer_attach_session(conn);
1867 break;
1868 case LTTNG_VIEWER_GET_NEXT_INDEX:
1869 ret = viewer_get_next_index(conn);
1870 break;
1871 case LTTNG_VIEWER_GET_PACKET:
1872 ret = viewer_get_packet(conn);
1873 break;
1874 case LTTNG_VIEWER_GET_METADATA:
1875 ret = viewer_get_metadata(conn);
1876 break;
1877 case LTTNG_VIEWER_GET_NEW_STREAMS:
1878 ret = viewer_get_new_streams(conn);
1879 break;
1880 case LTTNG_VIEWER_CREATE_SESSION:
1881 ret = viewer_create_session(conn);
1882 break;
1883 case LTTNG_VIEWER_DETACH_SESSION:
1884 ret = viewer_detach_session(conn);
1885 break;
1886 default:
1887 ERR("Received unknown viewer command (%u)",
1888 be32toh(recv_hdr->cmd));
1889 live_relay_unknown_command(conn);
1890 ret = -1;
1891 goto end;
1892 }
1893
1894 end:
1895 return ret;
1896 }
1897
1898 static
1899 void cleanup_connection_pollfd(struct lttng_poll_event *events, int pollfd)
1900 {
1901 int ret;
1902
1903 (void) lttng_poll_del(events, pollfd);
1904
1905 ret = close(pollfd);
1906 if (ret < 0) {
1907 ERR("Closing pollfd %d", pollfd);
1908 }
1909 }
1910
1911 /*
1912 * This thread does the actual work
1913 */
1914 static
1915 void *thread_worker(void *data)
1916 {
1917 int ret, err = -1;
1918 uint32_t nb_fd;
1919 struct lttng_poll_event events;
1920 struct lttng_ht *viewer_connections_ht;
1921 struct lttng_ht_iter iter;
1922 struct lttng_viewer_cmd recv_hdr;
1923 struct relay_connection *destroy_conn;
1924
1925 DBG("[thread] Live viewer relay worker started");
1926
1927 rcu_register_thread();
1928
1929 health_register(health_relayd, HEALTH_RELAYD_TYPE_LIVE_WORKER);
1930
1931 if (testpoint(relayd_thread_live_worker)) {
1932 goto error_testpoint;
1933 }
1934
1935 /* table of connections indexed on socket */
1936 viewer_connections_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
1937 if (!viewer_connections_ht) {
1938 goto viewer_connections_ht_error;
1939 }
1940
1941 ret = create_thread_poll_set(&events, 2);
1942 if (ret < 0) {
1943 goto error_poll_create;
1944 }
1945
1946 ret = lttng_poll_add(&events, live_conn_pipe[0], LPOLLIN | LPOLLRDHUP);
1947 if (ret < 0) {
1948 goto error;
1949 }
1950
1951 restart:
1952 while (1) {
1953 int i;
1954
1955 health_code_update();
1956
1957 /* Infinite blocking call, waiting for transmission */
1958 DBG3("Relayd live viewer worker thread polling...");
1959 health_poll_entry();
1960 ret = lttng_poll_wait(&events, -1);
1961 health_poll_exit();
1962 if (ret < 0) {
1963 /*
1964 * Restart interrupted system call.
1965 */
1966 if (errno == EINTR) {
1967 goto restart;
1968 }
1969 goto error;
1970 }
1971
1972 nb_fd = ret;
1973
1974 /*
1975 * Process control. The control connection is prioritised so we don't
1976 * starve it with high throughput tracing data on the data
1977 * connection.
1978 */
1979 for (i = 0; i < nb_fd; i++) {
1980 /* Fetch once the poll data */
1981 uint32_t revents = LTTNG_POLL_GETEV(&events, i);
1982 int pollfd = LTTNG_POLL_GETFD(&events, i);
1983
1984 health_code_update();
1985
1986 if (!revents) {
1987 /* No activity for this FD (poll implementation). */
1988 continue;
1989 }
1990
1991 /* Thread quit pipe has been closed. Killing thread. */
1992 ret = check_thread_quit_pipe(pollfd, revents);
1993 if (ret) {
1994 err = 0;
1995 goto exit;
1996 }
1997
1998 /* Inspect the relay conn pipe for new connection. */
1999 if (pollfd == live_conn_pipe[0]) {
2000 if (revents & LPOLLIN) {
2001 struct relay_connection *conn;
2002
2003 ret = lttng_read(live_conn_pipe[0],
2004 &conn, sizeof(conn));
2005 if (ret < 0) {
2006 goto error;
2007 }
2008 lttng_poll_add(&events, conn->sock->fd,
2009 LPOLLIN | LPOLLRDHUP);
2010 connection_ht_add(viewer_connections_ht, conn);
2011 DBG("Connection socket %d added to poll", conn->sock->fd);
2012 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
2013 ERR("Relay live pipe error");
2014 goto error;
2015 } else {
2016 ERR("Unexpected poll events %u for sock %d", revents, pollfd);
2017 goto error;
2018 }
2019 } else {
2020 /* Connection activity. */
2021 struct relay_connection *conn;
2022
2023 conn = connection_get_by_sock(viewer_connections_ht, pollfd);
2024 if (!conn) {
2025 continue;
2026 }
2027
2028 if (revents & LPOLLIN) {
2029 ret = conn->sock->ops->recvmsg(conn->sock, &recv_hdr,
2030 sizeof(recv_hdr), 0);
2031 if (ret <= 0) {
2032 /* Connection closed. */
2033 cleanup_connection_pollfd(&events, pollfd);
2034 /* Put "create" ownership reference. */
2035 connection_put(conn);
2036 DBG("Viewer control conn closed with %d", pollfd);
2037 } else {
2038 ret = process_control(&recv_hdr, conn);
2039 if (ret < 0) {
2040 /* Clear the session on error. */
2041 cleanup_connection_pollfd(&events, pollfd);
2042 /* Put "create" ownership reference. */
2043 connection_put(conn);
2044 DBG("Viewer connection closed with %d", pollfd);
2045 }
2046 }
2047 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
2048 cleanup_connection_pollfd(&events, pollfd);
2049 /* Put "create" ownership reference. */
2050 connection_put(conn);
2051 } else {
2052 ERR("Unexpected poll events %u for sock %d", revents, pollfd);
2053 connection_put(conn);
2054 goto error;
2055 }
2056 /* Put local "get_by_sock" reference. */
2057 connection_put(conn);
2058 }
2059 }
2060 }
2061
2062 exit:
2063 error:
2064 lttng_poll_clean(&events);
2065
2066 /* Cleanup reamaining connection object. */
2067 rcu_read_lock();
2068 cds_lfht_for_each_entry(viewer_connections_ht->ht, &iter.iter,
2069 destroy_conn,
2070 sock_n.node) {
2071 health_code_update();
2072 connection_put(destroy_conn);
2073 }
2074 rcu_read_unlock();
2075 error_poll_create:
2076 lttng_ht_destroy(viewer_connections_ht);
2077 viewer_connections_ht_error:
2078 /* Close relay conn pipes */
2079 utils_close_pipe(live_conn_pipe);
2080 if (err) {
2081 DBG("Viewer worker thread exited with error");
2082 }
2083 DBG("Viewer worker thread cleanup complete");
2084 error_testpoint:
2085 if (err) {
2086 health_error();
2087 ERR("Health error occurred in %s", __func__);
2088 }
2089 health_unregister(health_relayd);
2090 if (lttng_relay_stop_threads()) {
2091 ERR("Error stopping threads");
2092 }
2093 rcu_unregister_thread();
2094 return NULL;
2095 }
2096
2097 /*
2098 * Create the relay command pipe to wake thread_manage_apps.
2099 * Closed in cleanup().
2100 */
2101 static int create_conn_pipe(void)
2102 {
2103 return utils_create_pipe_cloexec(live_conn_pipe);
2104 }
2105
2106 int relayd_live_join(void)
2107 {
2108 int ret, retval = 0;
2109 void *status;
2110
2111 ret = pthread_join(live_listener_thread, &status);
2112 if (ret) {
2113 errno = ret;
2114 PERROR("pthread_join live listener");
2115 retval = -1;
2116 }
2117
2118 ret = pthread_join(live_worker_thread, &status);
2119 if (ret) {
2120 errno = ret;
2121 PERROR("pthread_join live worker");
2122 retval = -1;
2123 }
2124
2125 ret = pthread_join(live_dispatcher_thread, &status);
2126 if (ret) {
2127 errno = ret;
2128 PERROR("pthread_join live dispatcher");
2129 retval = -1;
2130 }
2131
2132 cleanup_relayd_live();
2133
2134 return retval;
2135 }
2136
2137 /*
2138 * main
2139 */
2140 int relayd_live_create(struct lttng_uri *uri)
2141 {
2142 int ret = 0, retval = 0;
2143 void *status;
2144 int is_root;
2145
2146 if (!uri) {
2147 retval = -1;
2148 goto exit_init_data;
2149 }
2150 live_uri = uri;
2151
2152 /* Check if daemon is UID = 0 */
2153 is_root = !getuid();
2154
2155 if (!is_root) {
2156 if (live_uri->port < 1024) {
2157 ERR("Need to be root to use ports < 1024");
2158 retval = -1;
2159 goto exit_init_data;
2160 }
2161 }
2162
2163 /* Setup the thread apps communication pipe. */
2164 if (create_conn_pipe()) {
2165 retval = -1;
2166 goto exit_init_data;
2167 }
2168
2169 /* Init relay command queue. */
2170 cds_wfcq_init(&viewer_conn_queue.head, &viewer_conn_queue.tail);
2171
2172 /* Set up max poll set size */
2173 if (lttng_poll_set_max_size()) {
2174 retval = -1;
2175 goto exit_init_data;
2176 }
2177
2178 /* Setup the dispatcher thread */
2179 ret = pthread_create(&live_dispatcher_thread, default_pthread_attr(),
2180 thread_dispatcher, (void *) NULL);
2181 if (ret) {
2182 errno = ret;
2183 PERROR("pthread_create viewer dispatcher");
2184 retval = -1;
2185 goto exit_dispatcher_thread;
2186 }
2187
2188 /* Setup the worker thread */
2189 ret = pthread_create(&live_worker_thread, default_pthread_attr(),
2190 thread_worker, NULL);
2191 if (ret) {
2192 errno = ret;
2193 PERROR("pthread_create viewer worker");
2194 retval = -1;
2195 goto exit_worker_thread;
2196 }
2197
2198 /* Setup the listener thread */
2199 ret = pthread_create(&live_listener_thread, default_pthread_attr(),
2200 thread_listener, (void *) NULL);
2201 if (ret) {
2202 errno = ret;
2203 PERROR("pthread_create viewer listener");
2204 retval = -1;
2205 goto exit_listener_thread;
2206 }
2207
2208 /*
2209 * All OK, started all threads.
2210 */
2211 return retval;
2212
2213 /*
2214 * Join on the live_listener_thread should anything be added after
2215 * the live_listener thread's creation.
2216 */
2217
2218 exit_listener_thread:
2219
2220 ret = pthread_join(live_worker_thread, &status);
2221 if (ret) {
2222 errno = ret;
2223 PERROR("pthread_join live worker");
2224 retval = -1;
2225 }
2226 exit_worker_thread:
2227
2228 ret = pthread_join(live_dispatcher_thread, &status);
2229 if (ret) {
2230 errno = ret;
2231 PERROR("pthread_join live dispatcher");
2232 retval = -1;
2233 }
2234 exit_dispatcher_thread:
2235
2236 exit_init_data:
2237 cleanup_relayd_live();
2238
2239 return retval;
2240 }
This page took 0.127388 seconds and 3 git commands to generate.