Fix: consumer: snapshot: assertion on subsequent snapshot
[lttng-tools.git] / src / common / kernel-consumer / kernel-consumer.cpp
1 /*
2 * Copyright (C) 2011 EfficiOS Inc.
3 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * Copyright (C) 2017 Jérémie Galarneau <jeremie.galarneau@efficios.com>
5 *
6 * SPDX-License-Identifier: GPL-2.0-only
7 *
8 */
9
10 #define _LGPL_SOURCE
11 #include <poll.h>
12 #include <pthread.h>
13 #include <stdlib.h>
14 #include <string.h>
15 #include <sys/mman.h>
16 #include <sys/socket.h>
17 #include <sys/types.h>
18 #include <inttypes.h>
19 #include <unistd.h>
20 #include <sys/stat.h>
21 #include <stdint.h>
22
23 #include <bin/lttng-consumerd/health-consumerd.hpp>
24 #include <common/common.hpp>
25 #include <common/kernel-ctl/kernel-ctl.hpp>
26 #include <common/sessiond-comm/sessiond-comm.hpp>
27 #include <common/sessiond-comm/relayd.hpp>
28 #include <common/compat/fcntl.hpp>
29 #include <common/compat/endian.hpp>
30 #include <common/pipe.hpp>
31 #include <common/relayd/relayd.hpp>
32 #include <common/utils.hpp>
33 #include <common/consumer/consumer-stream.hpp>
34 #include <common/index/index.hpp>
35 #include <common/consumer/consumer-timer.hpp>
36 #include <common/optional.hpp>
37 #include <common/buffer-view.hpp>
38 #include <common/consumer/consumer.hpp>
39 #include <common/consumer/metadata-bucket.hpp>
40
41 #include "kernel-consumer.hpp"
42
43 extern struct lttng_consumer_global_data the_consumer_data;
44 extern int consumer_poll_timeout;
45
46 /*
47 * Take a snapshot for a specific fd
48 *
49 * Returns 0 on success, < 0 on error
50 */
51 int lttng_kconsumer_take_snapshot(struct lttng_consumer_stream *stream)
52 {
53 int ret = 0;
54 int infd = stream->wait_fd;
55
56 ret = kernctl_snapshot(infd);
57 /*
58 * -EAGAIN is not an error, it just means that there is no data to
59 * be read.
60 */
61 if (ret != 0 && ret != -EAGAIN) {
62 PERROR("Getting sub-buffer snapshot.");
63 }
64
65 return ret;
66 }
67
68 /*
69 * Sample consumed and produced positions for a specific fd.
70 *
71 * Returns 0 on success, < 0 on error.
72 */
73 int lttng_kconsumer_sample_snapshot_positions(
74 struct lttng_consumer_stream *stream)
75 {
76 LTTNG_ASSERT(stream);
77
78 return kernctl_snapshot_sample_positions(stream->wait_fd);
79 }
80
81 /*
82 * Get the produced position
83 *
84 * Returns 0 on success, < 0 on error
85 */
86 int lttng_kconsumer_get_produced_snapshot(struct lttng_consumer_stream *stream,
87 unsigned long *pos)
88 {
89 int ret;
90 int infd = stream->wait_fd;
91
92 ret = kernctl_snapshot_get_produced(infd, pos);
93 if (ret != 0) {
94 PERROR("kernctl_snapshot_get_produced");
95 }
96
97 return ret;
98 }
99
100 /*
101 * Get the consumerd position
102 *
103 * Returns 0 on success, < 0 on error
104 */
105 int lttng_kconsumer_get_consumed_snapshot(struct lttng_consumer_stream *stream,
106 unsigned long *pos)
107 {
108 int ret;
109 int infd = stream->wait_fd;
110
111 ret = kernctl_snapshot_get_consumed(infd, pos);
112 if (ret != 0) {
113 PERROR("kernctl_snapshot_get_consumed");
114 }
115
116 return ret;
117 }
118
119 static
120 int get_current_subbuf_addr(struct lttng_consumer_stream *stream,
121 const char **addr)
122 {
123 int ret;
124 unsigned long mmap_offset;
125 const char *mmap_base = (const char *) stream->mmap_base;
126
127 ret = kernctl_get_mmap_read_offset(stream->wait_fd, &mmap_offset);
128 if (ret < 0) {
129 PERROR("Failed to get mmap read offset");
130 goto error;
131 }
132
133 *addr = mmap_base + mmap_offset;
134 error:
135 return ret;
136 }
137
138 static void finalize_snapshot_stream(
139 struct lttng_consumer_stream *stream, uint64_t relayd_id)
140 {
141 ASSERT_LOCKED(stream->lock);
142
143 if (relayd_id == (uint64_t) -1ULL) {
144 if (stream->out_fd >= 0) {
145 const int ret = close(stream->out_fd);
146
147 if (ret < 0) {
148 PERROR("Failed to close stream snapshot output file descriptor");
149 }
150
151 stream->out_fd = -1;
152 }
153 } else {
154 close_relayd_stream(stream);
155 stream->net_seq_idx = (uint64_t) -1ULL;
156 }
157
158 lttng_trace_chunk_put(stream->trace_chunk);
159 stream->trace_chunk = NULL;
160 }
161
162 /*
163 * Take a snapshot of all the stream of a channel
164 * RCU read-side lock must be held across this function to ensure existence of
165 * channel.
166 *
167 * Returns 0 on success, < 0 on error
168 */
169 static int lttng_kconsumer_snapshot_channel(
170 struct lttng_consumer_channel *channel,
171 uint64_t key, char *path, uint64_t relayd_id,
172 uint64_t nb_packets_per_stream)
173 {
174 int ret;
175 struct lttng_consumer_stream *stream;
176
177 DBG("Kernel consumer snapshot channel %" PRIu64, key);
178
179 /* Prevent channel modifications while we perform the snapshot.*/
180 pthread_mutex_lock(&channel->lock);
181
182 rcu_read_lock();
183
184 /* Splice is not supported yet for channel snapshot. */
185 if (channel->output != CONSUMER_CHANNEL_MMAP) {
186 ERR("Unsupported output type for channel \"%s\": mmap output is required to record a snapshot",
187 channel->name);
188 ret = -1;
189 goto end;
190 }
191
192 cds_list_for_each_entry(stream, &channel->streams.head, send_node) {
193 unsigned long consumed_pos, produced_pos;
194
195 health_code_update();
196
197 /*
198 * Lock stream because we are about to change its state.
199 */
200 pthread_mutex_lock(&stream->lock);
201
202 LTTNG_ASSERT(channel->trace_chunk);
203 if (!lttng_trace_chunk_get(channel->trace_chunk)) {
204 /*
205 * Can't happen barring an internal error as the channel
206 * holds a reference to the trace chunk.
207 */
208 ERR("Failed to acquire reference to channel's trace chunk");
209 ret = -1;
210 goto end_unlock;
211 }
212 LTTNG_ASSERT(!stream->trace_chunk);
213 stream->trace_chunk = channel->trace_chunk;
214
215 /*
216 * Assign the received relayd ID so we can use it for streaming. The streams
217 * are not visible to anyone so this is OK to change it.
218 */
219 stream->net_seq_idx = relayd_id;
220 channel->relayd_id = relayd_id;
221 if (relayd_id != (uint64_t) -1ULL) {
222 ret = consumer_send_relayd_stream(stream, path);
223 if (ret < 0) {
224 ERR("sending stream to relayd");
225 goto error_finalize_stream;
226 }
227 } else {
228 ret = consumer_stream_create_output_files(stream,
229 false);
230 if (ret < 0) {
231 goto error_finalize_stream;
232 }
233 DBG("Kernel consumer snapshot stream (%" PRIu64 ")",
234 stream->key);
235 }
236
237 ret = kernctl_buffer_flush_empty(stream->wait_fd);
238 if (ret < 0) {
239 /*
240 * Doing a buffer flush which does not take into
241 * account empty packets. This is not perfect
242 * for stream intersection, but required as a
243 * fall-back when "flush_empty" is not
244 * implemented by lttng-modules.
245 */
246 ret = kernctl_buffer_flush(stream->wait_fd);
247 if (ret < 0) {
248 ERR("Failed to flush kernel stream");
249 goto error_finalize_stream;
250 }
251 goto end_unlock;
252 }
253
254 ret = lttng_kconsumer_take_snapshot(stream);
255 if (ret < 0) {
256 ERR("Taking kernel snapshot");
257 goto error_finalize_stream;
258 }
259
260 ret = lttng_kconsumer_get_produced_snapshot(stream, &produced_pos);
261 if (ret < 0) {
262 ERR("Produced kernel snapshot position");
263 goto error_finalize_stream;
264 }
265
266 ret = lttng_kconsumer_get_consumed_snapshot(stream, &consumed_pos);
267 if (ret < 0) {
268 ERR("Consumerd kernel snapshot position");
269 goto error_finalize_stream;
270 }
271
272 consumed_pos = consumer_get_consume_start_pos(consumed_pos,
273 produced_pos, nb_packets_per_stream,
274 stream->max_sb_size);
275
276 while ((long) (consumed_pos - produced_pos) < 0) {
277 ssize_t read_len;
278 unsigned long len, padded_len;
279 const char *subbuf_addr;
280 struct lttng_buffer_view subbuf_view;
281
282 health_code_update();
283 DBG("Kernel consumer taking snapshot at pos %lu", consumed_pos);
284
285 ret = kernctl_get_subbuf(stream->wait_fd, &consumed_pos);
286 if (ret < 0) {
287 if (ret != -EAGAIN) {
288 PERROR("kernctl_get_subbuf snapshot");
289 goto error_finalize_stream;
290 }
291 DBG("Kernel consumer get subbuf failed. Skipping it.");
292 consumed_pos += stream->max_sb_size;
293 stream->chan->lost_packets++;
294 continue;
295 }
296
297 ret = kernctl_get_subbuf_size(stream->wait_fd, &len);
298 if (ret < 0) {
299 ERR("Snapshot kernctl_get_subbuf_size");
300 goto error_put_subbuf;
301 }
302
303 ret = kernctl_get_padded_subbuf_size(stream->wait_fd, &padded_len);
304 if (ret < 0) {
305 ERR("Snapshot kernctl_get_padded_subbuf_size");
306 goto error_put_subbuf;
307 }
308
309 ret = get_current_subbuf_addr(stream, &subbuf_addr);
310 if (ret) {
311 goto error_put_subbuf;
312 }
313
314 subbuf_view = lttng_buffer_view_init(
315 subbuf_addr, 0, padded_len);
316 read_len = lttng_consumer_on_read_subbuffer_mmap(
317 stream, &subbuf_view,
318 padded_len - len);
319 /*
320 * We write the padded len in local tracefiles but the data len
321 * when using a relay. Display the error but continue processing
322 * to try to release the subbuffer.
323 */
324 if (relayd_id != (uint64_t) -1ULL) {
325 if (read_len != len) {
326 ERR("Error sending to the relay (ret: %zd != len: %lu)",
327 read_len, len);
328 }
329 } else {
330 if (read_len != padded_len) {
331 ERR("Error writing to tracefile (ret: %zd != len: %lu)",
332 read_len, padded_len);
333 }
334 }
335
336 ret = kernctl_put_subbuf(stream->wait_fd);
337 if (ret < 0) {
338 ERR("Snapshot kernctl_put_subbuf");
339 goto error_finalize_stream;
340 }
341 consumed_pos += stream->max_sb_size;
342 }
343
344 finalize_snapshot_stream(stream, relayd_id);
345 pthread_mutex_unlock(&stream->lock);
346 }
347
348 /* All good! */
349 ret = 0;
350 goto end;
351
352 error_put_subbuf:
353 ret = kernctl_put_subbuf(stream->wait_fd);
354 if (ret < 0) {
355 ERR("Snapshot kernctl_put_subbuf error path");
356 }
357 error_finalize_stream:
358 finalize_snapshot_stream(stream, relayd_id);
359 end_unlock:
360 pthread_mutex_unlock(&stream->lock);
361 end:
362 rcu_read_unlock();
363 pthread_mutex_unlock(&channel->lock);
364 return ret;
365 }
366
367 /*
368 * Read the whole metadata available for a snapshot.
369 * RCU read-side lock must be held across this function to ensure existence of
370 * metadata_channel.
371 *
372 * Returns 0 on success, < 0 on error
373 */
374 static int lttng_kconsumer_snapshot_metadata(
375 struct lttng_consumer_channel *metadata_channel,
376 uint64_t key, char *path, uint64_t relayd_id,
377 struct lttng_consumer_local_data *ctx)
378 {
379 int ret, use_relayd = 0;
380 ssize_t ret_read;
381 struct lttng_consumer_stream *metadata_stream;
382
383 LTTNG_ASSERT(ctx);
384
385 DBG("Kernel consumer snapshot metadata with key %" PRIu64 " at path %s",
386 key, path);
387
388 rcu_read_lock();
389
390 metadata_stream = metadata_channel->metadata_stream;
391 LTTNG_ASSERT(metadata_stream);
392
393 metadata_stream->read_subbuffer_ops.lock(metadata_stream);
394 LTTNG_ASSERT(metadata_channel->trace_chunk);
395 LTTNG_ASSERT(metadata_stream->trace_chunk);
396
397 /* Flag once that we have a valid relayd for the stream. */
398 if (relayd_id != (uint64_t) -1ULL) {
399 use_relayd = 1;
400 }
401
402 if (use_relayd) {
403 ret = consumer_send_relayd_stream(metadata_stream, path);
404 if (ret < 0) {
405 goto error_snapshot;
406 }
407 } else {
408 ret = consumer_stream_create_output_files(metadata_stream,
409 false);
410 if (ret < 0) {
411 goto error_snapshot;
412 }
413 }
414
415 do {
416 health_code_update();
417
418 ret_read = lttng_consumer_read_subbuffer(metadata_stream, ctx, true);
419 if (ret_read < 0) {
420 ERR("Kernel snapshot reading metadata subbuffer (ret: %zd)",
421 ret_read);
422 ret = ret_read;
423 goto error_snapshot;
424 }
425 } while (ret_read > 0);
426
427 if (use_relayd) {
428 close_relayd_stream(metadata_stream);
429 metadata_stream->net_seq_idx = (uint64_t) -1ULL;
430 } else {
431 if (metadata_stream->out_fd >= 0) {
432 ret = close(metadata_stream->out_fd);
433 if (ret < 0) {
434 PERROR("Kernel consumer snapshot metadata close out_fd");
435 /*
436 * Don't go on error here since the snapshot was successful at this
437 * point but somehow the close failed.
438 */
439 }
440 metadata_stream->out_fd = -1;
441 lttng_trace_chunk_put(metadata_stream->trace_chunk);
442 metadata_stream->trace_chunk = NULL;
443 }
444 }
445
446 ret = 0;
447 error_snapshot:
448 metadata_stream->read_subbuffer_ops.unlock(metadata_stream);
449 consumer_stream_destroy(metadata_stream, NULL);
450 metadata_channel->metadata_stream = NULL;
451 rcu_read_unlock();
452 return ret;
453 }
454
455 /*
456 * Receive command from session daemon and process it.
457 *
458 * Return 1 on success else a negative value or 0.
459 */
460 int lttng_kconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
461 int sock, struct pollfd *consumer_sockpoll)
462 {
463 int ret_func;
464 enum lttcomm_return_code ret_code = LTTCOMM_CONSUMERD_SUCCESS;
465 struct lttcomm_consumer_msg msg;
466
467 health_code_update();
468
469 {
470 ssize_t ret_recv;
471
472 ret_recv = lttcomm_recv_unix_sock(sock, &msg, sizeof(msg));
473 if (ret_recv != sizeof(msg)) {
474 if (ret_recv > 0) {
475 lttng_consumer_send_error(ctx,
476 LTTCOMM_CONSUMERD_ERROR_RECV_CMD);
477 ret_recv = -1;
478 }
479 return ret_recv;
480 }
481 }
482
483 health_code_update();
484
485 /* Deprecated command */
486 LTTNG_ASSERT(msg.cmd_type != LTTNG_CONSUMER_STOP);
487
488 health_code_update();
489
490 /* relayd needs RCU read-side protection */
491 rcu_read_lock();
492
493 switch (msg.cmd_type) {
494 case LTTNG_CONSUMER_ADD_RELAYD_SOCKET:
495 {
496 uint32_t major = msg.u.relayd_sock.major;
497 uint32_t minor = msg.u.relayd_sock.minor;
498 enum lttcomm_sock_proto protocol = (enum lttcomm_sock_proto)
499 msg.u.relayd_sock.relayd_socket_protocol;
500
501 /* Session daemon status message are handled in the following call. */
502 consumer_add_relayd_socket(msg.u.relayd_sock.net_index,
503 msg.u.relayd_sock.type, ctx, sock,
504 consumer_sockpoll, msg.u.relayd_sock.session_id,
505 msg.u.relayd_sock.relayd_session_id, major,
506 minor, protocol);
507 goto end_nosignal;
508 }
509 case LTTNG_CONSUMER_ADD_CHANNEL:
510 {
511 struct lttng_consumer_channel *new_channel;
512 int ret_send_status, ret_add_channel = 0;
513 const uint64_t chunk_id = msg.u.channel.chunk_id.value;
514
515 health_code_update();
516
517 /* First send a status message before receiving the fds. */
518 ret_send_status = consumer_send_status_msg(sock, ret_code);
519 if (ret_send_status < 0) {
520 /* Somehow, the session daemon is not responding anymore. */
521 goto error_fatal;
522 }
523
524 health_code_update();
525
526 DBG("consumer_add_channel %" PRIu64, msg.u.channel.channel_key);
527 new_channel = consumer_allocate_channel(msg.u.channel.channel_key,
528 msg.u.channel.session_id,
529 msg.u.channel.chunk_id.is_set ?
530 &chunk_id : NULL,
531 msg.u.channel.pathname,
532 msg.u.channel.name,
533 msg.u.channel.relayd_id, msg.u.channel.output,
534 msg.u.channel.tracefile_size,
535 msg.u.channel.tracefile_count, 0,
536 msg.u.channel.monitor,
537 msg.u.channel.live_timer_interval,
538 msg.u.channel.is_live,
539 NULL, NULL);
540 if (new_channel == NULL) {
541 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR);
542 goto end_nosignal;
543 }
544 new_channel->nb_init_stream_left = msg.u.channel.nb_init_streams;
545 switch (msg.u.channel.output) {
546 case LTTNG_EVENT_SPLICE:
547 new_channel->output = CONSUMER_CHANNEL_SPLICE;
548 break;
549 case LTTNG_EVENT_MMAP:
550 new_channel->output = CONSUMER_CHANNEL_MMAP;
551 break;
552 default:
553 ERR("Channel output unknown %d", msg.u.channel.output);
554 goto end_nosignal;
555 }
556
557 /* Translate and save channel type. */
558 switch (msg.u.channel.type) {
559 case CONSUMER_CHANNEL_TYPE_DATA:
560 case CONSUMER_CHANNEL_TYPE_METADATA:
561 new_channel->type = (consumer_channel_type) msg.u.channel.type;
562 break;
563 default:
564 abort();
565 goto end_nosignal;
566 };
567
568 health_code_update();
569
570 if (ctx->on_recv_channel != NULL) {
571 int ret_recv_channel =
572 ctx->on_recv_channel(new_channel);
573 if (ret_recv_channel == 0) {
574 ret_add_channel = consumer_add_channel(
575 new_channel, ctx);
576 } else if (ret_recv_channel < 0) {
577 goto end_nosignal;
578 }
579 } else {
580 ret_add_channel =
581 consumer_add_channel(new_channel, ctx);
582 }
583 if (msg.u.channel.type == CONSUMER_CHANNEL_TYPE_DATA &&
584 !ret_add_channel) {
585 int monitor_start_ret;
586
587 DBG("Consumer starting monitor timer");
588 consumer_timer_live_start(new_channel,
589 msg.u.channel.live_timer_interval);
590 monitor_start_ret = consumer_timer_monitor_start(
591 new_channel,
592 msg.u.channel.monitor_timer_interval);
593 if (monitor_start_ret < 0) {
594 ERR("Starting channel monitoring timer failed");
595 goto end_nosignal;
596 }
597 }
598
599 health_code_update();
600
601 /* If we received an error in add_channel, we need to report it. */
602 if (ret_add_channel < 0) {
603 ret_send_status = consumer_send_status_msg(
604 sock, ret_add_channel);
605 if (ret_send_status < 0) {
606 goto error_fatal;
607 }
608 goto end_nosignal;
609 }
610
611 goto end_nosignal;
612 }
613 case LTTNG_CONSUMER_ADD_STREAM:
614 {
615 int fd;
616 struct lttng_pipe *stream_pipe;
617 struct lttng_consumer_stream *new_stream;
618 struct lttng_consumer_channel *channel;
619 int alloc_ret = 0;
620 int ret_send_status, ret_poll, ret_get_max_subbuf_size;
621 ssize_t ret_pipe_write, ret_recv;
622
623 /*
624 * Get stream's channel reference. Needed when adding the stream to the
625 * global hash table.
626 */
627 channel = consumer_find_channel(msg.u.stream.channel_key);
628 if (!channel) {
629 /*
630 * We could not find the channel. Can happen if cpu hotplug
631 * happens while tearing down.
632 */
633 ERR("Unable to find channel key %" PRIu64, msg.u.stream.channel_key);
634 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
635 }
636
637 health_code_update();
638
639 /* First send a status message before receiving the fds. */
640 ret_send_status = consumer_send_status_msg(sock, ret_code);
641 if (ret_send_status < 0) {
642 /* Somehow, the session daemon is not responding anymore. */
643 goto error_add_stream_fatal;
644 }
645
646 health_code_update();
647
648 if (ret_code != LTTCOMM_CONSUMERD_SUCCESS) {
649 /* Channel was not found. */
650 goto error_add_stream_nosignal;
651 }
652
653 /* Blocking call */
654 health_poll_entry();
655 ret_poll = lttng_consumer_poll_socket(consumer_sockpoll);
656 health_poll_exit();
657 if (ret_poll) {
658 goto error_add_stream_fatal;
659 }
660
661 health_code_update();
662
663 /* Get stream file descriptor from socket */
664 ret_recv = lttcomm_recv_fds_unix_sock(sock, &fd, 1);
665 if (ret_recv != sizeof(fd)) {
666 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_FD);
667 ret_func = ret_recv;
668 goto end;
669 }
670
671 health_code_update();
672
673 /*
674 * Send status code to session daemon only if the recv works. If the
675 * above recv() failed, the session daemon is notified through the
676 * error socket and the teardown is eventually done.
677 */
678 ret_send_status = consumer_send_status_msg(sock, ret_code);
679 if (ret_send_status < 0) {
680 /* Somehow, the session daemon is not responding anymore. */
681 goto error_add_stream_nosignal;
682 }
683
684 health_code_update();
685
686 pthread_mutex_lock(&channel->lock);
687 new_stream = consumer_stream_create(
688 channel,
689 channel->key,
690 fd,
691 channel->name,
692 channel->relayd_id,
693 channel->session_id,
694 channel->trace_chunk,
695 msg.u.stream.cpu,
696 &alloc_ret,
697 channel->type,
698 channel->monitor);
699 if (new_stream == NULL) {
700 switch (alloc_ret) {
701 case -ENOMEM:
702 case -EINVAL:
703 default:
704 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR);
705 break;
706 }
707 pthread_mutex_unlock(&channel->lock);
708 goto error_add_stream_nosignal;
709 }
710
711 new_stream->wait_fd = fd;
712 ret_get_max_subbuf_size = kernctl_get_max_subbuf_size(
713 new_stream->wait_fd, &new_stream->max_sb_size);
714 if (ret_get_max_subbuf_size < 0) {
715 pthread_mutex_unlock(&channel->lock);
716 ERR("Failed to get kernel maximal subbuffer size");
717 goto error_add_stream_nosignal;
718 }
719
720 consumer_stream_update_channel_attributes(new_stream,
721 channel);
722
723 /*
724 * We've just assigned the channel to the stream so increment the
725 * refcount right now. We don't need to increment the refcount for
726 * streams in no monitor because we handle manually the cleanup of
727 * those. It is very important to make sure there is NO prior
728 * consumer_del_stream() calls or else the refcount will be unbalanced.
729 */
730 if (channel->monitor) {
731 uatomic_inc(&new_stream->chan->refcount);
732 }
733
734 /*
735 * The buffer flush is done on the session daemon side for the kernel
736 * so no need for the stream "hangup_flush_done" variable to be
737 * tracked. This is important for a kernel stream since we don't rely
738 * on the flush state of the stream to read data. It's not the case for
739 * user space tracing.
740 */
741 new_stream->hangup_flush_done = 0;
742
743 health_code_update();
744
745 pthread_mutex_lock(&new_stream->lock);
746 if (ctx->on_recv_stream) {
747 int ret_recv_stream = ctx->on_recv_stream(new_stream);
748 if (ret_recv_stream < 0) {
749 pthread_mutex_unlock(&new_stream->lock);
750 pthread_mutex_unlock(&channel->lock);
751 consumer_stream_free(new_stream);
752 goto error_add_stream_nosignal;
753 }
754 }
755 health_code_update();
756
757 if (new_stream->metadata_flag) {
758 channel->metadata_stream = new_stream;
759 }
760
761 /* Do not monitor this stream. */
762 if (!channel->monitor) {
763 DBG("Kernel consumer add stream %s in no monitor mode with "
764 "relayd id %" PRIu64, new_stream->name,
765 new_stream->net_seq_idx);
766 cds_list_add(&new_stream->send_node, &channel->streams.head);
767 pthread_mutex_unlock(&new_stream->lock);
768 pthread_mutex_unlock(&channel->lock);
769 goto end_add_stream;
770 }
771
772 /* Send stream to relayd if the stream has an ID. */
773 if (new_stream->net_seq_idx != (uint64_t) -1ULL) {
774 int ret_send_relayd_stream;
775
776 ret_send_relayd_stream = consumer_send_relayd_stream(
777 new_stream, new_stream->chan->pathname);
778 if (ret_send_relayd_stream < 0) {
779 pthread_mutex_unlock(&new_stream->lock);
780 pthread_mutex_unlock(&channel->lock);
781 consumer_stream_free(new_stream);
782 goto error_add_stream_nosignal;
783 }
784
785 /*
786 * If adding an extra stream to an already
787 * existing channel (e.g. cpu hotplug), we need
788 * to send the "streams_sent" command to relayd.
789 */
790 if (channel->streams_sent_to_relayd) {
791 int ret_send_relayd_streams_sent;
792
793 ret_send_relayd_streams_sent =
794 consumer_send_relayd_streams_sent(
795 new_stream->net_seq_idx);
796 if (ret_send_relayd_streams_sent < 0) {
797 pthread_mutex_unlock(&new_stream->lock);
798 pthread_mutex_unlock(&channel->lock);
799 goto error_add_stream_nosignal;
800 }
801 }
802 }
803 pthread_mutex_unlock(&new_stream->lock);
804 pthread_mutex_unlock(&channel->lock);
805
806 /* Get the right pipe where the stream will be sent. */
807 if (new_stream->metadata_flag) {
808 consumer_add_metadata_stream(new_stream);
809 stream_pipe = ctx->consumer_metadata_pipe;
810 } else {
811 consumer_add_data_stream(new_stream);
812 stream_pipe = ctx->consumer_data_pipe;
813 }
814
815 /* Visible to other threads */
816 new_stream->globally_visible = 1;
817
818 health_code_update();
819
820 ret_pipe_write = lttng_pipe_write(
821 stream_pipe, &new_stream, sizeof(new_stream));
822 if (ret_pipe_write < 0) {
823 ERR("Consumer write %s stream to pipe %d",
824 new_stream->metadata_flag ? "metadata" : "data",
825 lttng_pipe_get_writefd(stream_pipe));
826 if (new_stream->metadata_flag) {
827 consumer_del_stream_for_metadata(new_stream);
828 } else {
829 consumer_del_stream_for_data(new_stream);
830 }
831 goto error_add_stream_nosignal;
832 }
833
834 DBG("Kernel consumer ADD_STREAM %s (fd: %d) %s with relayd id %" PRIu64,
835 new_stream->name, fd, new_stream->chan->pathname, new_stream->relayd_stream_id);
836 end_add_stream:
837 break;
838 error_add_stream_nosignal:
839 goto end_nosignal;
840 error_add_stream_fatal:
841 goto error_fatal;
842 }
843 case LTTNG_CONSUMER_STREAMS_SENT:
844 {
845 struct lttng_consumer_channel *channel;
846 int ret_send_status;
847
848 /*
849 * Get stream's channel reference. Needed when adding the stream to the
850 * global hash table.
851 */
852 channel = consumer_find_channel(msg.u.sent_streams.channel_key);
853 if (!channel) {
854 /*
855 * We could not find the channel. Can happen if cpu hotplug
856 * happens while tearing down.
857 */
858 ERR("Unable to find channel key %" PRIu64,
859 msg.u.sent_streams.channel_key);
860 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
861 }
862
863 health_code_update();
864
865 /*
866 * Send status code to session daemon.
867 */
868 ret_send_status = consumer_send_status_msg(sock, ret_code);
869 if (ret_send_status < 0 ||
870 ret_code != LTTCOMM_CONSUMERD_SUCCESS) {
871 /* Somehow, the session daemon is not responding anymore. */
872 goto error_streams_sent_nosignal;
873 }
874
875 health_code_update();
876
877 /*
878 * We should not send this message if we don't monitor the
879 * streams in this channel.
880 */
881 if (!channel->monitor) {
882 goto end_error_streams_sent;
883 }
884
885 health_code_update();
886 /* Send stream to relayd if the stream has an ID. */
887 if (msg.u.sent_streams.net_seq_idx != (uint64_t) -1ULL) {
888 int ret_send_relay_streams;
889
890 ret_send_relay_streams = consumer_send_relayd_streams_sent(
891 msg.u.sent_streams.net_seq_idx);
892 if (ret_send_relay_streams < 0) {
893 goto error_streams_sent_nosignal;
894 }
895 channel->streams_sent_to_relayd = true;
896 }
897 end_error_streams_sent:
898 break;
899 error_streams_sent_nosignal:
900 goto end_nosignal;
901 }
902 case LTTNG_CONSUMER_UPDATE_STREAM:
903 {
904 rcu_read_unlock();
905 return -ENOSYS;
906 }
907 case LTTNG_CONSUMER_DESTROY_RELAYD:
908 {
909 uint64_t index = msg.u.destroy_relayd.net_seq_idx;
910 struct consumer_relayd_sock_pair *relayd;
911 int ret_send_status;
912
913 DBG("Kernel consumer destroying relayd %" PRIu64, index);
914
915 /* Get relayd reference if exists. */
916 relayd = consumer_find_relayd(index);
917 if (relayd == NULL) {
918 DBG("Unable to find relayd %" PRIu64, index);
919 ret_code = LTTCOMM_CONSUMERD_RELAYD_FAIL;
920 }
921
922 /*
923 * Each relayd socket pair has a refcount of stream attached to it
924 * which tells if the relayd is still active or not depending on the
925 * refcount value.
926 *
927 * This will set the destroy flag of the relayd object and destroy it
928 * if the refcount reaches zero when called.
929 *
930 * The destroy can happen either here or when a stream fd hangs up.
931 */
932 if (relayd) {
933 consumer_flag_relayd_for_destroy(relayd);
934 }
935
936 health_code_update();
937
938 ret_send_status = consumer_send_status_msg(sock, ret_code);
939 if (ret_send_status < 0) {
940 /* Somehow, the session daemon is not responding anymore. */
941 goto error_fatal;
942 }
943
944 goto end_nosignal;
945 }
946 case LTTNG_CONSUMER_DATA_PENDING:
947 {
948 int32_t ret_data_pending;
949 uint64_t id = msg.u.data_pending.session_id;
950 ssize_t ret_send;
951
952 DBG("Kernel consumer data pending command for id %" PRIu64, id);
953
954 ret_data_pending = consumer_data_pending(id);
955
956 health_code_update();
957
958 /* Send back returned value to session daemon */
959 ret_send = lttcomm_send_unix_sock(sock, &ret_data_pending,
960 sizeof(ret_data_pending));
961 if (ret_send < 0) {
962 PERROR("send data pending ret code");
963 goto error_fatal;
964 }
965
966 /*
967 * No need to send back a status message since the data pending
968 * returned value is the response.
969 */
970 break;
971 }
972 case LTTNG_CONSUMER_SNAPSHOT_CHANNEL:
973 {
974 struct lttng_consumer_channel *channel;
975 uint64_t key = msg.u.snapshot_channel.key;
976 int ret_send_status;
977
978 channel = consumer_find_channel(key);
979 if (!channel) {
980 ERR("Channel %" PRIu64 " not found", key);
981 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
982 } else {
983 if (msg.u.snapshot_channel.metadata == 1) {
984 int ret_snapshot;
985
986 ret_snapshot = lttng_kconsumer_snapshot_metadata(
987 channel, key,
988 msg.u.snapshot_channel.pathname,
989 msg.u.snapshot_channel.relayd_id,
990 ctx);
991 if (ret_snapshot < 0) {
992 ERR("Snapshot metadata failed");
993 ret_code = LTTCOMM_CONSUMERD_SNAPSHOT_FAILED;
994 }
995 } else {
996 int ret_snapshot;
997
998 ret_snapshot = lttng_kconsumer_snapshot_channel(
999 channel, key,
1000 msg.u.snapshot_channel.pathname,
1001 msg.u.snapshot_channel.relayd_id,
1002 msg.u.snapshot_channel
1003 .nb_packets_per_stream);
1004 if (ret_snapshot < 0) {
1005 ERR("Snapshot channel failed");
1006 ret_code = LTTCOMM_CONSUMERD_SNAPSHOT_FAILED;
1007 }
1008 }
1009 }
1010 health_code_update();
1011
1012 ret_send_status = consumer_send_status_msg(sock, ret_code);
1013 if (ret_send_status < 0) {
1014 /* Somehow, the session daemon is not responding anymore. */
1015 goto end_nosignal;
1016 }
1017 break;
1018 }
1019 case LTTNG_CONSUMER_DESTROY_CHANNEL:
1020 {
1021 uint64_t key = msg.u.destroy_channel.key;
1022 struct lttng_consumer_channel *channel;
1023 int ret_send_status;
1024
1025 channel = consumer_find_channel(key);
1026 if (!channel) {
1027 ERR("Kernel consumer destroy channel %" PRIu64 " not found", key);
1028 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
1029 }
1030
1031 health_code_update();
1032
1033 ret_send_status = consumer_send_status_msg(sock, ret_code);
1034 if (ret_send_status < 0) {
1035 /* Somehow, the session daemon is not responding anymore. */
1036 goto end_destroy_channel;
1037 }
1038
1039 health_code_update();
1040
1041 /* Stop right now if no channel was found. */
1042 if (!channel) {
1043 goto end_destroy_channel;
1044 }
1045
1046 /*
1047 * This command should ONLY be issued for channel with streams set in
1048 * no monitor mode.
1049 */
1050 LTTNG_ASSERT(!channel->monitor);
1051
1052 /*
1053 * The refcount should ALWAYS be 0 in the case of a channel in no
1054 * monitor mode.
1055 */
1056 LTTNG_ASSERT(!uatomic_sub_return(&channel->refcount, 1));
1057
1058 consumer_del_channel(channel);
1059 end_destroy_channel:
1060 goto end_nosignal;
1061 }
1062 case LTTNG_CONSUMER_DISCARDED_EVENTS:
1063 {
1064 ssize_t ret;
1065 uint64_t count;
1066 struct lttng_consumer_channel *channel;
1067 uint64_t id = msg.u.discarded_events.session_id;
1068 uint64_t key = msg.u.discarded_events.channel_key;
1069
1070 DBG("Kernel consumer discarded events command for session id %"
1071 PRIu64 ", channel key %" PRIu64, id, key);
1072
1073 channel = consumer_find_channel(key);
1074 if (!channel) {
1075 ERR("Kernel consumer discarded events channel %"
1076 PRIu64 " not found", key);
1077 count = 0;
1078 } else {
1079 count = channel->discarded_events;
1080 }
1081
1082 health_code_update();
1083
1084 /* Send back returned value to session daemon */
1085 ret = lttcomm_send_unix_sock(sock, &count, sizeof(count));
1086 if (ret < 0) {
1087 PERROR("send discarded events");
1088 goto error_fatal;
1089 }
1090
1091 break;
1092 }
1093 case LTTNG_CONSUMER_LOST_PACKETS:
1094 {
1095 ssize_t ret;
1096 uint64_t count;
1097 struct lttng_consumer_channel *channel;
1098 uint64_t id = msg.u.lost_packets.session_id;
1099 uint64_t key = msg.u.lost_packets.channel_key;
1100
1101 DBG("Kernel consumer lost packets command for session id %"
1102 PRIu64 ", channel key %" PRIu64, id, key);
1103
1104 channel = consumer_find_channel(key);
1105 if (!channel) {
1106 ERR("Kernel consumer lost packets channel %"
1107 PRIu64 " not found", key);
1108 count = 0;
1109 } else {
1110 count = channel->lost_packets;
1111 }
1112
1113 health_code_update();
1114
1115 /* Send back returned value to session daemon */
1116 ret = lttcomm_send_unix_sock(sock, &count, sizeof(count));
1117 if (ret < 0) {
1118 PERROR("send lost packets");
1119 goto error_fatal;
1120 }
1121
1122 break;
1123 }
1124 case LTTNG_CONSUMER_SET_CHANNEL_MONITOR_PIPE:
1125 {
1126 int channel_monitor_pipe;
1127 int ret_send_status, ret_set_channel_monitor_pipe;
1128 ssize_t ret_recv;
1129
1130 ret_code = LTTCOMM_CONSUMERD_SUCCESS;
1131 /* Successfully received the command's type. */
1132 ret_send_status = consumer_send_status_msg(sock, ret_code);
1133 if (ret_send_status < 0) {
1134 goto error_fatal;
1135 }
1136
1137 ret_recv = lttcomm_recv_fds_unix_sock(
1138 sock, &channel_monitor_pipe, 1);
1139 if (ret_recv != sizeof(channel_monitor_pipe)) {
1140 ERR("Failed to receive channel monitor pipe");
1141 goto error_fatal;
1142 }
1143
1144 DBG("Received channel monitor pipe (%d)", channel_monitor_pipe);
1145 ret_set_channel_monitor_pipe =
1146 consumer_timer_thread_set_channel_monitor_pipe(
1147 channel_monitor_pipe);
1148 if (!ret_set_channel_monitor_pipe) {
1149 int flags;
1150 int ret_fcntl;
1151
1152 ret_code = LTTCOMM_CONSUMERD_SUCCESS;
1153 /* Set the pipe as non-blocking. */
1154 ret_fcntl = fcntl(channel_monitor_pipe, F_GETFL, 0);
1155 if (ret_fcntl == -1) {
1156 PERROR("fcntl get flags of the channel monitoring pipe");
1157 goto error_fatal;
1158 }
1159 flags = ret_fcntl;
1160
1161 ret_fcntl = fcntl(channel_monitor_pipe, F_SETFL,
1162 flags | O_NONBLOCK);
1163 if (ret_fcntl == -1) {
1164 PERROR("fcntl set O_NONBLOCK flag of the channel monitoring pipe");
1165 goto error_fatal;
1166 }
1167 DBG("Channel monitor pipe set as non-blocking");
1168 } else {
1169 ret_code = LTTCOMM_CONSUMERD_ALREADY_SET;
1170 }
1171 ret_send_status = consumer_send_status_msg(sock, ret_code);
1172 if (ret_send_status < 0) {
1173 goto error_fatal;
1174 }
1175 break;
1176 }
1177 case LTTNG_CONSUMER_ROTATE_CHANNEL:
1178 {
1179 struct lttng_consumer_channel *channel;
1180 uint64_t key = msg.u.rotate_channel.key;
1181 int ret_send_status;
1182
1183 DBG("Consumer rotate channel %" PRIu64, key);
1184
1185 channel = consumer_find_channel(key);
1186 if (!channel) {
1187 ERR("Channel %" PRIu64 " not found", key);
1188 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
1189 } else {
1190 /*
1191 * Sample the rotate position of all the streams in this channel.
1192 */
1193 int ret_rotate_channel;
1194
1195 ret_rotate_channel = lttng_consumer_rotate_channel(
1196 channel, key,
1197 msg.u.rotate_channel.relayd_id);
1198 if (ret_rotate_channel < 0) {
1199 ERR("Rotate channel failed");
1200 ret_code = LTTCOMM_CONSUMERD_ROTATION_FAIL;
1201 }
1202
1203 health_code_update();
1204 }
1205
1206 ret_send_status = consumer_send_status_msg(sock, ret_code);
1207 if (ret_send_status < 0) {
1208 /* Somehow, the session daemon is not responding anymore. */
1209 goto error_rotate_channel;
1210 }
1211 if (channel) {
1212 /* Rotate the streams that are ready right now. */
1213 int ret_rotate;
1214
1215 ret_rotate = lttng_consumer_rotate_ready_streams(
1216 channel, key);
1217 if (ret_rotate < 0) {
1218 ERR("Rotate ready streams failed");
1219 }
1220 }
1221 break;
1222 error_rotate_channel:
1223 goto end_nosignal;
1224 }
1225 case LTTNG_CONSUMER_CLEAR_CHANNEL:
1226 {
1227 struct lttng_consumer_channel *channel;
1228 uint64_t key = msg.u.clear_channel.key;
1229 int ret_send_status;
1230
1231 channel = consumer_find_channel(key);
1232 if (!channel) {
1233 DBG("Channel %" PRIu64 " not found", key);
1234 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
1235 } else {
1236 int ret_clear_channel;
1237
1238 ret_clear_channel =
1239 lttng_consumer_clear_channel(channel);
1240 if (ret_clear_channel) {
1241 ERR("Clear channel failed");
1242 ret_code = (lttcomm_return_code) ret_clear_channel;
1243 }
1244
1245 health_code_update();
1246 }
1247
1248 ret_send_status = consumer_send_status_msg(sock, ret_code);
1249 if (ret_send_status < 0) {
1250 /* Somehow, the session daemon is not responding anymore. */
1251 goto end_nosignal;
1252 }
1253
1254 break;
1255 }
1256 case LTTNG_CONSUMER_INIT:
1257 {
1258 int ret_send_status;
1259 lttng_uuid sessiond_uuid;
1260
1261 std::copy(std::begin(msg.u.init.sessiond_uuid), std::end(msg.u.init.sessiond_uuid),
1262 sessiond_uuid.begin());
1263
1264 ret_code = lttng_consumer_init_command(ctx,
1265 sessiond_uuid);
1266 health_code_update();
1267 ret_send_status = consumer_send_status_msg(sock, ret_code);
1268 if (ret_send_status < 0) {
1269 /* Somehow, the session daemon is not responding anymore. */
1270 goto end_nosignal;
1271 }
1272 break;
1273 }
1274 case LTTNG_CONSUMER_CREATE_TRACE_CHUNK:
1275 {
1276 const struct lttng_credentials credentials = {
1277 .uid = LTTNG_OPTIONAL_INIT_VALUE(msg.u.create_trace_chunk.credentials.value.uid),
1278 .gid = LTTNG_OPTIONAL_INIT_VALUE(msg.u.create_trace_chunk.credentials.value.gid),
1279 };
1280 const bool is_local_trace =
1281 !msg.u.create_trace_chunk.relayd_id.is_set;
1282 const uint64_t relayd_id =
1283 msg.u.create_trace_chunk.relayd_id.value;
1284 const char *chunk_override_name =
1285 *msg.u.create_trace_chunk.override_name ?
1286 msg.u.create_trace_chunk.override_name :
1287 NULL;
1288 struct lttng_directory_handle *chunk_directory_handle = NULL;
1289
1290 /*
1291 * The session daemon will only provide a chunk directory file
1292 * descriptor for local traces.
1293 */
1294 if (is_local_trace) {
1295 int chunk_dirfd;
1296 int ret_send_status;
1297 ssize_t ret_recv;
1298
1299 /* Acnowledge the reception of the command. */
1300 ret_send_status = consumer_send_status_msg(
1301 sock, LTTCOMM_CONSUMERD_SUCCESS);
1302 if (ret_send_status < 0) {
1303 /* Somehow, the session daemon is not responding anymore. */
1304 goto end_nosignal;
1305 }
1306
1307 ret_recv = lttcomm_recv_fds_unix_sock(
1308 sock, &chunk_dirfd, 1);
1309 if (ret_recv != sizeof(chunk_dirfd)) {
1310 ERR("Failed to receive trace chunk directory file descriptor");
1311 goto error_fatal;
1312 }
1313
1314 DBG("Received trace chunk directory fd (%d)",
1315 chunk_dirfd);
1316 chunk_directory_handle = lttng_directory_handle_create_from_dirfd(
1317 chunk_dirfd);
1318 if (!chunk_directory_handle) {
1319 ERR("Failed to initialize chunk directory handle from directory file descriptor");
1320 if (close(chunk_dirfd)) {
1321 PERROR("Failed to close chunk directory file descriptor");
1322 }
1323 goto error_fatal;
1324 }
1325 }
1326
1327 ret_code = lttng_consumer_create_trace_chunk(
1328 !is_local_trace ? &relayd_id : NULL,
1329 msg.u.create_trace_chunk.session_id,
1330 msg.u.create_trace_chunk.chunk_id,
1331 (time_t) msg.u.create_trace_chunk
1332 .creation_timestamp,
1333 chunk_override_name,
1334 msg.u.create_trace_chunk.credentials.is_set ?
1335 &credentials :
1336 NULL,
1337 chunk_directory_handle);
1338 lttng_directory_handle_put(chunk_directory_handle);
1339 goto end_msg_sessiond;
1340 }
1341 case LTTNG_CONSUMER_CLOSE_TRACE_CHUNK:
1342 {
1343 enum lttng_trace_chunk_command_type close_command =
1344 (lttng_trace_chunk_command_type) msg.u.close_trace_chunk.close_command.value;
1345 const uint64_t relayd_id =
1346 msg.u.close_trace_chunk.relayd_id.value;
1347 struct lttcomm_consumer_close_trace_chunk_reply reply;
1348 char path[LTTNG_PATH_MAX];
1349 ssize_t ret_send;
1350
1351 ret_code = lttng_consumer_close_trace_chunk(
1352 msg.u.close_trace_chunk.relayd_id.is_set ?
1353 &relayd_id :
1354 NULL,
1355 msg.u.close_trace_chunk.session_id,
1356 msg.u.close_trace_chunk.chunk_id,
1357 (time_t) msg.u.close_trace_chunk.close_timestamp,
1358 msg.u.close_trace_chunk.close_command.is_set ?
1359 &close_command :
1360 NULL, path);
1361 reply.ret_code = ret_code;
1362 reply.path_length = strlen(path) + 1;
1363 ret_send = lttcomm_send_unix_sock(sock, &reply, sizeof(reply));
1364 if (ret_send != sizeof(reply)) {
1365 goto error_fatal;
1366 }
1367 ret_send = lttcomm_send_unix_sock(
1368 sock, path, reply.path_length);
1369 if (ret_send != reply.path_length) {
1370 goto error_fatal;
1371 }
1372 goto end_nosignal;
1373 }
1374 case LTTNG_CONSUMER_TRACE_CHUNK_EXISTS:
1375 {
1376 const uint64_t relayd_id =
1377 msg.u.trace_chunk_exists.relayd_id.value;
1378
1379 ret_code = lttng_consumer_trace_chunk_exists(
1380 msg.u.trace_chunk_exists.relayd_id.is_set ?
1381 &relayd_id : NULL,
1382 msg.u.trace_chunk_exists.session_id,
1383 msg.u.trace_chunk_exists.chunk_id);
1384 goto end_msg_sessiond;
1385 }
1386 case LTTNG_CONSUMER_OPEN_CHANNEL_PACKETS:
1387 {
1388 const uint64_t key = msg.u.open_channel_packets.key;
1389 struct lttng_consumer_channel *channel =
1390 consumer_find_channel(key);
1391
1392 if (channel) {
1393 pthread_mutex_lock(&channel->lock);
1394 ret_code = lttng_consumer_open_channel_packets(channel);
1395 pthread_mutex_unlock(&channel->lock);
1396 } else {
1397 WARN("Channel %" PRIu64 " not found", key);
1398 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
1399 }
1400
1401 health_code_update();
1402 goto end_msg_sessiond;
1403 }
1404 default:
1405 goto end_nosignal;
1406 }
1407
1408 end_nosignal:
1409 /*
1410 * Return 1 to indicate success since the 0 value can be a socket
1411 * shutdown during the recv() or send() call.
1412 */
1413 ret_func = 1;
1414 goto end;
1415 error_fatal:
1416 /* This will issue a consumer stop. */
1417 ret_func = -1;
1418 goto end;
1419 end_msg_sessiond:
1420 /*
1421 * The returned value here is not useful since either way we'll return 1 to
1422 * the caller because the session daemon socket management is done
1423 * elsewhere. Returning a negative code or 0 will shutdown the consumer.
1424 */
1425 {
1426 int ret_send_status;
1427
1428 ret_send_status = consumer_send_status_msg(sock, ret_code);
1429 if (ret_send_status < 0) {
1430 goto error_fatal;
1431 }
1432 }
1433
1434 ret_func = 1;
1435
1436 end:
1437 health_code_update();
1438 rcu_read_unlock();
1439 return ret_func;
1440 }
1441
1442 /*
1443 * Sync metadata meaning request them to the session daemon and snapshot to the
1444 * metadata thread can consumer them.
1445 *
1446 * Metadata stream lock MUST be acquired.
1447 */
1448 enum sync_metadata_status lttng_kconsumer_sync_metadata(
1449 struct lttng_consumer_stream *metadata)
1450 {
1451 int ret;
1452 enum sync_metadata_status status;
1453
1454 LTTNG_ASSERT(metadata);
1455
1456 ret = kernctl_buffer_flush(metadata->wait_fd);
1457 if (ret < 0) {
1458 ERR("Failed to flush kernel stream");
1459 status = SYNC_METADATA_STATUS_ERROR;
1460 goto end;
1461 }
1462
1463 ret = kernctl_snapshot(metadata->wait_fd);
1464 if (ret < 0) {
1465 if (errno == EAGAIN) {
1466 /* No new metadata, exit. */
1467 DBG("Sync metadata, no new kernel metadata");
1468 status = SYNC_METADATA_STATUS_NO_DATA;
1469 } else {
1470 ERR("Sync metadata, taking kernel snapshot failed.");
1471 status = SYNC_METADATA_STATUS_ERROR;
1472 }
1473 } else {
1474 status = SYNC_METADATA_STATUS_NEW_DATA;
1475 }
1476
1477 end:
1478 return status;
1479 }
1480
1481 static
1482 int extract_common_subbuffer_info(struct lttng_consumer_stream *stream,
1483 struct stream_subbuffer *subbuf)
1484 {
1485 int ret;
1486
1487 ret = kernctl_get_subbuf_size(
1488 stream->wait_fd, &subbuf->info.data.subbuf_size);
1489 if (ret) {
1490 goto end;
1491 }
1492
1493 ret = kernctl_get_padded_subbuf_size(
1494 stream->wait_fd, &subbuf->info.data.padded_subbuf_size);
1495 if (ret) {
1496 goto end;
1497 }
1498
1499 end:
1500 return ret;
1501 }
1502
1503 static
1504 int extract_metadata_subbuffer_info(struct lttng_consumer_stream *stream,
1505 struct stream_subbuffer *subbuf)
1506 {
1507 int ret;
1508
1509 ret = extract_common_subbuffer_info(stream, subbuf);
1510 if (ret) {
1511 goto end;
1512 }
1513
1514 ret = kernctl_get_metadata_version(
1515 stream->wait_fd, &subbuf->info.metadata.version);
1516 if (ret) {
1517 goto end;
1518 }
1519
1520 end:
1521 return ret;
1522 }
1523
1524 static
1525 int extract_data_subbuffer_info(struct lttng_consumer_stream *stream,
1526 struct stream_subbuffer *subbuf)
1527 {
1528 int ret;
1529
1530 ret = extract_common_subbuffer_info(stream, subbuf);
1531 if (ret) {
1532 goto end;
1533 }
1534
1535 ret = kernctl_get_packet_size(
1536 stream->wait_fd, &subbuf->info.data.packet_size);
1537 if (ret < 0) {
1538 PERROR("Failed to get sub-buffer packet size");
1539 goto end;
1540 }
1541
1542 ret = kernctl_get_content_size(
1543 stream->wait_fd, &subbuf->info.data.content_size);
1544 if (ret < 0) {
1545 PERROR("Failed to get sub-buffer content size");
1546 goto end;
1547 }
1548
1549 ret = kernctl_get_timestamp_begin(
1550 stream->wait_fd, &subbuf->info.data.timestamp_begin);
1551 if (ret < 0) {
1552 PERROR("Failed to get sub-buffer begin timestamp");
1553 goto end;
1554 }
1555
1556 ret = kernctl_get_timestamp_end(
1557 stream->wait_fd, &subbuf->info.data.timestamp_end);
1558 if (ret < 0) {
1559 PERROR("Failed to get sub-buffer end timestamp");
1560 goto end;
1561 }
1562
1563 ret = kernctl_get_events_discarded(
1564 stream->wait_fd, &subbuf->info.data.events_discarded);
1565 if (ret) {
1566 PERROR("Failed to get sub-buffer events discarded count");
1567 goto end;
1568 }
1569
1570 ret = kernctl_get_sequence_number(stream->wait_fd,
1571 &subbuf->info.data.sequence_number.value);
1572 if (ret) {
1573 /* May not be supported by older LTTng-modules. */
1574 if (ret != -ENOTTY) {
1575 PERROR("Failed to get sub-buffer sequence number");
1576 goto end;
1577 }
1578 } else {
1579 subbuf->info.data.sequence_number.is_set = true;
1580 }
1581
1582 ret = kernctl_get_stream_id(
1583 stream->wait_fd, &subbuf->info.data.stream_id);
1584 if (ret < 0) {
1585 PERROR("Failed to get stream id");
1586 goto end;
1587 }
1588
1589 ret = kernctl_get_instance_id(stream->wait_fd,
1590 &subbuf->info.data.stream_instance_id.value);
1591 if (ret) {
1592 /* May not be supported by older LTTng-modules. */
1593 if (ret != -ENOTTY) {
1594 PERROR("Failed to get stream instance id");
1595 goto end;
1596 }
1597 } else {
1598 subbuf->info.data.stream_instance_id.is_set = true;
1599 }
1600 end:
1601 return ret;
1602 }
1603
1604 static
1605 enum get_next_subbuffer_status get_subbuffer_common(
1606 struct lttng_consumer_stream *stream,
1607 struct stream_subbuffer *subbuffer)
1608 {
1609 int ret;
1610 enum get_next_subbuffer_status status;
1611
1612 ret = kernctl_get_next_subbuf(stream->wait_fd);
1613 switch (ret) {
1614 case 0:
1615 status = GET_NEXT_SUBBUFFER_STATUS_OK;
1616 break;
1617 case -ENODATA:
1618 case -EAGAIN:
1619 /*
1620 * The caller only expects -ENODATA when there is no data to
1621 * read, but the kernel tracer returns -EAGAIN when there is
1622 * currently no data for a non-finalized stream, and -ENODATA
1623 * when there is no data for a finalized stream. Those can be
1624 * combined into a -ENODATA return value.
1625 */
1626 status = GET_NEXT_SUBBUFFER_STATUS_NO_DATA;
1627 goto end;
1628 default:
1629 status = GET_NEXT_SUBBUFFER_STATUS_ERROR;
1630 goto end;
1631 }
1632
1633 ret = stream->read_subbuffer_ops.extract_subbuffer_info(
1634 stream, subbuffer);
1635 if (ret) {
1636 status = GET_NEXT_SUBBUFFER_STATUS_ERROR;
1637 }
1638 end:
1639 return status;
1640 }
1641
1642 static
1643 enum get_next_subbuffer_status get_next_subbuffer_splice(
1644 struct lttng_consumer_stream *stream,
1645 struct stream_subbuffer *subbuffer)
1646 {
1647 const enum get_next_subbuffer_status status =
1648 get_subbuffer_common(stream, subbuffer);
1649
1650 if (status != GET_NEXT_SUBBUFFER_STATUS_OK) {
1651 goto end;
1652 }
1653
1654 subbuffer->buffer.fd = stream->wait_fd;
1655 end:
1656 return status;
1657 }
1658
1659 static
1660 enum get_next_subbuffer_status get_next_subbuffer_mmap(
1661 struct lttng_consumer_stream *stream,
1662 struct stream_subbuffer *subbuffer)
1663 {
1664 int ret;
1665 enum get_next_subbuffer_status status;
1666 const char *addr;
1667
1668 status = get_subbuffer_common(stream, subbuffer);
1669 if (status != GET_NEXT_SUBBUFFER_STATUS_OK) {
1670 goto end;
1671 }
1672
1673 ret = get_current_subbuf_addr(stream, &addr);
1674 if (ret) {
1675 status = GET_NEXT_SUBBUFFER_STATUS_ERROR;
1676 goto end;
1677 }
1678
1679 subbuffer->buffer.buffer = lttng_buffer_view_init(
1680 addr, 0, subbuffer->info.data.padded_subbuf_size);
1681 end:
1682 return status;
1683 }
1684
1685 static
1686 enum get_next_subbuffer_status get_next_subbuffer_metadata_check(struct lttng_consumer_stream *stream,
1687 struct stream_subbuffer *subbuffer)
1688 {
1689 int ret;
1690 const char *addr;
1691 bool coherent;
1692 enum get_next_subbuffer_status status;
1693
1694 ret = kernctl_get_next_subbuf_metadata_check(stream->wait_fd,
1695 &coherent);
1696 if (ret) {
1697 goto end;
1698 }
1699
1700 ret = stream->read_subbuffer_ops.extract_subbuffer_info(
1701 stream, subbuffer);
1702 if (ret) {
1703 goto end;
1704 }
1705
1706 LTTNG_OPTIONAL_SET(&subbuffer->info.metadata.coherent, coherent);
1707
1708 ret = get_current_subbuf_addr(stream, &addr);
1709 if (ret) {
1710 goto end;
1711 }
1712
1713 subbuffer->buffer.buffer = lttng_buffer_view_init(
1714 addr, 0, subbuffer->info.data.padded_subbuf_size);
1715 DBG("Got metadata packet with padded_subbuf_size = %lu, coherent = %s",
1716 subbuffer->info.metadata.padded_subbuf_size,
1717 coherent ? "true" : "false");
1718 end:
1719 /*
1720 * The caller only expects -ENODATA when there is no data to read, but
1721 * the kernel tracer returns -EAGAIN when there is currently no data
1722 * for a non-finalized stream, and -ENODATA when there is no data for a
1723 * finalized stream. Those can be combined into a -ENODATA return value.
1724 */
1725 switch (ret) {
1726 case 0:
1727 status = GET_NEXT_SUBBUFFER_STATUS_OK;
1728 break;
1729 case -ENODATA:
1730 case -EAGAIN:
1731 /*
1732 * The caller only expects -ENODATA when there is no data to
1733 * read, but the kernel tracer returns -EAGAIN when there is
1734 * currently no data for a non-finalized stream, and -ENODATA
1735 * when there is no data for a finalized stream. Those can be
1736 * combined into a -ENODATA return value.
1737 */
1738 status = GET_NEXT_SUBBUFFER_STATUS_NO_DATA;
1739 break;
1740 default:
1741 status = GET_NEXT_SUBBUFFER_STATUS_ERROR;
1742 break;
1743 }
1744
1745 return status;
1746 }
1747
1748 static
1749 int put_next_subbuffer(struct lttng_consumer_stream *stream,
1750 struct stream_subbuffer *subbuffer __attribute__((unused)))
1751 {
1752 const int ret = kernctl_put_next_subbuf(stream->wait_fd);
1753
1754 if (ret) {
1755 if (ret == -EFAULT) {
1756 PERROR("Error in unreserving sub buffer");
1757 } else if (ret == -EIO) {
1758 /* Should never happen with newer LTTng versions */
1759 PERROR("Reader has been pushed by the writer, last sub-buffer corrupted");
1760 }
1761 }
1762
1763 return ret;
1764 }
1765
1766 static
1767 bool is_get_next_check_metadata_available(int tracer_fd)
1768 {
1769 const int ret = kernctl_get_next_subbuf_metadata_check(tracer_fd, NULL);
1770 const bool available = ret != -ENOTTY;
1771
1772 if (ret == 0) {
1773 /* get succeeded, make sure to put the subbuffer. */
1774 kernctl_put_subbuf(tracer_fd);
1775 }
1776
1777 return available;
1778 }
1779
1780 static
1781 int signal_metadata(struct lttng_consumer_stream *stream,
1782 struct lttng_consumer_local_data *ctx __attribute__((unused)))
1783 {
1784 ASSERT_LOCKED(stream->metadata_rdv_lock);
1785 return pthread_cond_broadcast(&stream->metadata_rdv) ? -errno : 0;
1786 }
1787
1788 static
1789 int lttng_kconsumer_set_stream_ops(
1790 struct lttng_consumer_stream *stream)
1791 {
1792 int ret = 0;
1793
1794 if (stream->metadata_flag && stream->chan->is_live) {
1795 DBG("Attempting to enable metadata bucketization for live consumers");
1796 if (is_get_next_check_metadata_available(stream->wait_fd)) {
1797 DBG("Kernel tracer supports get_next_subbuffer_metadata_check, metadata will be accumulated until a coherent state is reached");
1798 stream->read_subbuffer_ops.get_next_subbuffer =
1799 get_next_subbuffer_metadata_check;
1800 ret = consumer_stream_enable_metadata_bucketization(
1801 stream);
1802 if (ret) {
1803 goto end;
1804 }
1805 } else {
1806 /*
1807 * The kernel tracer version is too old to indicate
1808 * when the metadata stream has reached a "coherent"
1809 * (parseable) point.
1810 *
1811 * This means that a live viewer may see an incoherent
1812 * sequence of metadata and fail to parse it.
1813 */
1814 WARN("Kernel tracer does not support get_next_subbuffer_metadata_check which may cause live clients to fail to parse the metadata stream");
1815 metadata_bucket_destroy(stream->metadata_bucket);
1816 stream->metadata_bucket = NULL;
1817 }
1818
1819 stream->read_subbuffer_ops.on_sleep = signal_metadata;
1820 }
1821
1822 if (!stream->read_subbuffer_ops.get_next_subbuffer) {
1823 if (stream->chan->output == CONSUMER_CHANNEL_MMAP) {
1824 stream->read_subbuffer_ops.get_next_subbuffer =
1825 get_next_subbuffer_mmap;
1826 } else {
1827 stream->read_subbuffer_ops.get_next_subbuffer =
1828 get_next_subbuffer_splice;
1829 }
1830 }
1831
1832 if (stream->metadata_flag) {
1833 stream->read_subbuffer_ops.extract_subbuffer_info =
1834 extract_metadata_subbuffer_info;
1835 } else {
1836 stream->read_subbuffer_ops.extract_subbuffer_info =
1837 extract_data_subbuffer_info;
1838 if (stream->chan->is_live) {
1839 stream->read_subbuffer_ops.send_live_beacon =
1840 consumer_flush_kernel_index;
1841 }
1842 }
1843
1844 stream->read_subbuffer_ops.put_next_subbuffer = put_next_subbuffer;
1845 end:
1846 return ret;
1847 }
1848
1849 int lttng_kconsumer_on_recv_stream(struct lttng_consumer_stream *stream)
1850 {
1851 int ret;
1852
1853 LTTNG_ASSERT(stream);
1854
1855 /*
1856 * Don't create anything if this is set for streaming or if there is
1857 * no current trace chunk on the parent channel.
1858 */
1859 if (stream->net_seq_idx == (uint64_t) -1ULL && stream->chan->monitor &&
1860 stream->chan->trace_chunk) {
1861 ret = consumer_stream_create_output_files(stream, true);
1862 if (ret) {
1863 goto error;
1864 }
1865 }
1866
1867 if (stream->output == LTTNG_EVENT_MMAP) {
1868 /* get the len of the mmap region */
1869 unsigned long mmap_len;
1870
1871 ret = kernctl_get_mmap_len(stream->wait_fd, &mmap_len);
1872 if (ret != 0) {
1873 PERROR("kernctl_get_mmap_len");
1874 goto error_close_fd;
1875 }
1876 stream->mmap_len = (size_t) mmap_len;
1877
1878 stream->mmap_base = mmap(NULL, stream->mmap_len, PROT_READ,
1879 MAP_PRIVATE, stream->wait_fd, 0);
1880 if (stream->mmap_base == MAP_FAILED) {
1881 PERROR("Error mmaping");
1882 ret = -1;
1883 goto error_close_fd;
1884 }
1885 }
1886
1887 ret = lttng_kconsumer_set_stream_ops(stream);
1888 if (ret) {
1889 goto error_close_fd;
1890 }
1891
1892 /* we return 0 to let the library handle the FD internally */
1893 return 0;
1894
1895 error_close_fd:
1896 if (stream->out_fd >= 0) {
1897 int err;
1898
1899 err = close(stream->out_fd);
1900 LTTNG_ASSERT(!err);
1901 stream->out_fd = -1;
1902 }
1903 error:
1904 return ret;
1905 }
1906
1907 /*
1908 * Check if data is still being extracted from the buffers for a specific
1909 * stream. Consumer data lock MUST be acquired before calling this function
1910 * and the stream lock.
1911 *
1912 * Return 1 if the traced data are still getting read else 0 meaning that the
1913 * data is available for trace viewer reading.
1914 */
1915 int lttng_kconsumer_data_pending(struct lttng_consumer_stream *stream)
1916 {
1917 int ret;
1918
1919 LTTNG_ASSERT(stream);
1920
1921 if (stream->endpoint_status != CONSUMER_ENDPOINT_ACTIVE) {
1922 ret = 0;
1923 goto end;
1924 }
1925
1926 ret = kernctl_get_next_subbuf(stream->wait_fd);
1927 if (ret == 0) {
1928 /* There is still data so let's put back this subbuffer. */
1929 ret = kernctl_put_subbuf(stream->wait_fd);
1930 LTTNG_ASSERT(ret == 0);
1931 ret = 1; /* Data is pending */
1932 goto end;
1933 }
1934
1935 /* Data is NOT pending and ready to be read. */
1936 ret = 0;
1937
1938 end:
1939 return ret;
1940 }
This page took 0.071989 seconds and 4 git commands to generate.