Tests: Fix: Use '.logfile' instead of '.log' for test app output
[lttng-tools.git] / src / common / kernel-consumer / kernel-consumer.cpp
1 /*
2 * Copyright (C) 2011 EfficiOS Inc.
3 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * Copyright (C) 2017 Jérémie Galarneau <jeremie.galarneau@efficios.com>
5 *
6 * SPDX-License-Identifier: GPL-2.0-only
7 *
8 */
9
10 #define _LGPL_SOURCE
11 #include "kernel-consumer.hpp"
12
13 #include <common/buffer-view.hpp>
14 #include <common/common.hpp>
15 #include <common/compat/endian.hpp>
16 #include <common/consumer/consumer-stream.hpp>
17 #include <common/consumer/consumer-timer.hpp>
18 #include <common/consumer/consumer.hpp>
19 #include <common/consumer/metadata-bucket.hpp>
20 #include <common/index/index.hpp>
21 #include <common/kernel-ctl/kernel-ctl.hpp>
22 #include <common/optional.hpp>
23 #include <common/pipe.hpp>
24 #include <common/pthread-lock.hpp>
25 #include <common/relayd/relayd.hpp>
26 #include <common/scope-exit.hpp>
27 #include <common/sessiond-comm/relayd.hpp>
28 #include <common/sessiond-comm/sessiond-comm.hpp>
29 #include <common/urcu.hpp>
30 #include <common/utils.hpp>
31
32 #include <bin/lttng-consumerd/health-consumerd.hpp>
33 #include <fcntl.h>
34 #include <inttypes.h>
35 #include <poll.h>
36 #include <pthread.h>
37 #include <stdint.h>
38 #include <stdlib.h>
39 #include <string.h>
40 #include <sys/mman.h>
41 #include <sys/socket.h>
42 #include <sys/stat.h>
43 #include <sys/types.h>
44 #include <unistd.h>
45
46 extern struct lttng_consumer_global_data the_consumer_data;
47 extern int consumer_poll_timeout;
48
49 /*
50 * Take a snapshot for a specific fd
51 *
52 * Returns 0 on success, < 0 on error
53 */
54 int lttng_kconsumer_take_snapshot(struct lttng_consumer_stream *stream)
55 {
56 int ret = 0;
57 const int infd = stream->wait_fd;
58
59 ret = kernctl_snapshot(infd);
60 /*
61 * -EAGAIN is not an error, it just means that there is no data to
62 * be read.
63 */
64 if (ret != 0 && ret != -EAGAIN) {
65 PERROR("Getting sub-buffer snapshot.");
66 }
67
68 return ret;
69 }
70
71 /*
72 * Sample consumed and produced positions for a specific fd.
73 *
74 * Returns 0 on success, < 0 on error.
75 */
76 int lttng_kconsumer_sample_snapshot_positions(struct lttng_consumer_stream *stream)
77 {
78 LTTNG_ASSERT(stream);
79
80 return kernctl_snapshot_sample_positions(stream->wait_fd);
81 }
82
83 /*
84 * Get the produced position
85 *
86 * Returns 0 on success, < 0 on error
87 */
88 int lttng_kconsumer_get_produced_snapshot(struct lttng_consumer_stream *stream, unsigned long *pos)
89 {
90 int ret;
91 const int infd = stream->wait_fd;
92
93 ret = kernctl_snapshot_get_produced(infd, pos);
94 if (ret != 0) {
95 PERROR("kernctl_snapshot_get_produced");
96 }
97
98 return ret;
99 }
100
101 /*
102 * Get the consumerd position
103 *
104 * Returns 0 on success, < 0 on error
105 */
106 int lttng_kconsumer_get_consumed_snapshot(struct lttng_consumer_stream *stream, unsigned long *pos)
107 {
108 int ret;
109 const int infd = stream->wait_fd;
110
111 ret = kernctl_snapshot_get_consumed(infd, pos);
112 if (ret != 0) {
113 PERROR("kernctl_snapshot_get_consumed");
114 }
115
116 return ret;
117 }
118
119 static int get_current_subbuf_addr(struct lttng_consumer_stream *stream, const char **addr)
120 {
121 int ret;
122 unsigned long mmap_offset;
123 const char *mmap_base = (const char *) stream->mmap_base;
124
125 ret = kernctl_get_mmap_read_offset(stream->wait_fd, &mmap_offset);
126 if (ret < 0) {
127 PERROR("Failed to get mmap read offset");
128 goto error;
129 }
130
131 *addr = mmap_base + mmap_offset;
132 error:
133 return ret;
134 }
135
136 /*
137 * Take a snapshot of all the stream of a channel
138 * RCU read-side lock must be held across this function to ensure existence of
139 * channel.
140 *
141 * Returns 0 on success, < 0 on error
142 */
143 static int lttng_kconsumer_snapshot_channel(struct lttng_consumer_channel *channel,
144 uint64_t key,
145 char *path,
146 uint64_t relayd_id,
147 uint64_t nb_packets_per_stream)
148 {
149 int ret;
150
151 DBG("Kernel consumer snapshot channel %" PRIu64, key);
152
153 /* Prevent channel modifications while we perform the snapshot.*/
154 const lttng::pthread::lock_guard channe_lock(channel->lock);
155
156 const lttng::urcu::read_lock_guard read_lock;
157
158 /* Splice is not supported yet for channel snapshot. */
159 if (channel->output != CONSUMER_CHANNEL_MMAP) {
160 ERR("Unsupported output type for channel \"%s\": mmap output is required to record a snapshot",
161 channel->name);
162 ret = -1;
163 goto end;
164 }
165
166 for (auto stream : lttng::urcu::list_iteration_adapter<lttng_consumer_stream,
167 &lttng_consumer_stream::send_node>(
168 channel->streams.head)) {
169 unsigned long consumed_pos, produced_pos;
170
171 health_code_update();
172
173 /*
174 * Lock stream because we are about to change its state.
175 */
176 const lttng::pthread::lock_guard stream_lock(stream->lock);
177
178 LTTNG_ASSERT(channel->trace_chunk);
179 if (!lttng_trace_chunk_get(channel->trace_chunk)) {
180 /*
181 * Can't happen barring an internal error as the channel
182 * holds a reference to the trace chunk.
183 */
184 ERR("Failed to acquire reference to channel's trace chunk");
185 ret = -1;
186 goto end;
187 }
188 LTTNG_ASSERT(!stream->trace_chunk);
189 stream->trace_chunk = channel->trace_chunk;
190
191 /*
192 * Assign the received relayd ID so we can use it for streaming. The streams
193 * are not visible to anyone so this is OK to change it.
194 */
195 stream->net_seq_idx = relayd_id;
196 channel->relayd_id = relayd_id;
197
198 /* Close stream output when were are done. */
199 const auto close_stream_output = lttng::make_scope_exit(
200 [stream]() noexcept { consumer_stream_close_output(stream); });
201
202 if (relayd_id != (uint64_t) -1ULL) {
203 ret = consumer_send_relayd_stream(stream, path);
204 if (ret < 0) {
205 ERR("sending stream to relayd");
206 goto end;
207 }
208 } else {
209 ret = consumer_stream_create_output_files(stream, false);
210 if (ret < 0) {
211 goto end;
212 }
213 DBG("Kernel consumer snapshot stream (%" PRIu64 ")", stream->key);
214 }
215
216 ret = kernctl_buffer_flush_empty(stream->wait_fd);
217 if (ret < 0) {
218 /*
219 * Doing a buffer flush which does not take into
220 * account empty packets. This is not perfect
221 * for stream intersection, but required as a
222 * fall-back when "flush_empty" is not
223 * implemented by lttng-modules.
224 */
225 ret = kernctl_buffer_flush(stream->wait_fd);
226 if (ret < 0) {
227 ERR("Failed to flush kernel stream");
228 goto end;
229 }
230 goto end;
231 }
232
233 ret = lttng_kconsumer_take_snapshot(stream);
234 if (ret < 0) {
235 ERR("Taking kernel snapshot");
236 goto end;
237 }
238
239 ret = lttng_kconsumer_get_produced_snapshot(stream, &produced_pos);
240 if (ret < 0) {
241 ERR("Produced kernel snapshot position");
242 goto end;
243 }
244
245 ret = lttng_kconsumer_get_consumed_snapshot(stream, &consumed_pos);
246 if (ret < 0) {
247 ERR("Consumerd kernel snapshot position");
248 goto end;
249 }
250
251 consumed_pos = consumer_get_consume_start_pos(
252 consumed_pos, produced_pos, nb_packets_per_stream, stream->max_sb_size);
253
254 while ((long) (consumed_pos - produced_pos) < 0) {
255 ssize_t read_len;
256 unsigned long len, padded_len;
257 const char *subbuf_addr;
258 struct lttng_buffer_view subbuf_view;
259
260 health_code_update();
261 DBG("Kernel consumer taking snapshot at pos %lu", consumed_pos);
262
263 ret = kernctl_get_subbuf(stream->wait_fd, &consumed_pos);
264 if (ret < 0) {
265 if (ret != -EAGAIN) {
266 PERROR("kernctl_get_subbuf snapshot");
267 goto end;
268 }
269 DBG("Kernel consumer get subbuf failed. Skipping it.");
270 consumed_pos += stream->max_sb_size;
271 stream->chan->lost_packets++;
272 continue;
273 }
274
275 /* Put the subbuffer once we are done. */
276 const auto put_subbuf = lttng::make_scope_exit([stream]() noexcept {
277 const auto put_ret = kernctl_put_subbuf(stream->wait_fd);
278 if (put_ret < 0) {
279 ERR("Snapshot kernctl_put_subbuf");
280 }
281 });
282
283 ret = kernctl_get_subbuf_size(stream->wait_fd, &len);
284 if (ret < 0) {
285 ERR("Snapshot kernctl_get_subbuf_size");
286 goto end;
287 }
288
289 ret = kernctl_get_padded_subbuf_size(stream->wait_fd, &padded_len);
290 if (ret < 0) {
291 ERR("Snapshot kernctl_get_padded_subbuf_size");
292 goto end;
293 }
294
295 ret = get_current_subbuf_addr(stream, &subbuf_addr);
296 if (ret) {
297 goto end;
298 }
299
300 subbuf_view = lttng_buffer_view_init(subbuf_addr, 0, padded_len);
301 read_len = lttng_consumer_on_read_subbuffer_mmap(
302 stream, &subbuf_view, padded_len - len);
303 /*
304 * We write the padded len in local tracefiles but the data len
305 * when using a relay. Display the error but continue processing
306 * to try to release the subbuffer.
307 */
308 if (relayd_id != (uint64_t) -1ULL) {
309 if (read_len != len) {
310 ERR("Error sending to the relay (ret: %zd != len: %lu)",
311 read_len,
312 len);
313 }
314 } else {
315 if (read_len != padded_len) {
316 ERR("Error writing to tracefile (ret: %zd != len: %lu)",
317 read_len,
318 padded_len);
319 }
320 }
321
322 consumed_pos += stream->max_sb_size;
323 }
324 }
325
326 /* All good! */
327 ret = 0;
328 goto end;
329
330 end:
331 return ret;
332 }
333
334 /*
335 * Read the whole metadata available for a snapshot.
336 * RCU read-side lock must be held across this function to ensure existence of
337 * metadata_channel.
338 *
339 * Returns 0 on success, < 0 on error
340 */
341 static int lttng_kconsumer_snapshot_metadata(struct lttng_consumer_channel *metadata_channel,
342 uint64_t key,
343 char *path,
344 uint64_t relayd_id,
345 struct lttng_consumer_local_data *ctx)
346 {
347 int ret, use_relayd = 0;
348 ssize_t ret_read;
349 struct lttng_consumer_stream *metadata_stream;
350
351 LTTNG_ASSERT(ctx);
352
353 DBG("Kernel consumer snapshot metadata with key %" PRIu64 " at path %s", key, path);
354
355 const lttng::urcu::read_lock_guard read_lock;
356
357 metadata_stream = metadata_channel->metadata_stream;
358 LTTNG_ASSERT(metadata_stream);
359
360 metadata_stream->read_subbuffer_ops.lock(metadata_stream);
361 LTTNG_ASSERT(metadata_channel->trace_chunk);
362 LTTNG_ASSERT(metadata_stream->trace_chunk);
363
364 /* Flag once that we have a valid relayd for the stream. */
365 if (relayd_id != (uint64_t) -1ULL) {
366 use_relayd = 1;
367 }
368
369 if (use_relayd) {
370 ret = consumer_send_relayd_stream(metadata_stream, path);
371 if (ret < 0) {
372 goto error_snapshot;
373 }
374 } else {
375 ret = consumer_stream_create_output_files(metadata_stream, false);
376 if (ret < 0) {
377 goto error_snapshot;
378 }
379 }
380
381 do {
382 health_code_update();
383
384 ret_read = lttng_consumer_read_subbuffer(metadata_stream, ctx, true);
385 if (ret_read < 0) {
386 ERR("Kernel snapshot reading metadata subbuffer (ret: %zd)", ret_read);
387 ret = ret_read;
388 goto error_snapshot;
389 }
390 } while (ret_read > 0);
391
392 if (use_relayd) {
393 close_relayd_stream(metadata_stream);
394 metadata_stream->net_seq_idx = (uint64_t) -1ULL;
395 } else {
396 if (metadata_stream->out_fd >= 0) {
397 ret = close(metadata_stream->out_fd);
398 if (ret < 0) {
399 PERROR("Kernel consumer snapshot metadata close out_fd");
400 /*
401 * Don't go on error here since the snapshot was successful at this
402 * point but somehow the close failed.
403 */
404 }
405 metadata_stream->out_fd = -1;
406 lttng_trace_chunk_put(metadata_stream->trace_chunk);
407 metadata_stream->trace_chunk = nullptr;
408 }
409 }
410
411 ret = 0;
412 error_snapshot:
413 metadata_stream->read_subbuffer_ops.unlock(metadata_stream);
414 consumer_stream_destroy(metadata_stream, nullptr);
415 metadata_channel->metadata_stream = nullptr;
416 return ret;
417 }
418
419 /*
420 * Receive command from session daemon and process it.
421 *
422 * Return 1 on success else a negative value or 0.
423 */
424 int lttng_kconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
425 int sock,
426 struct pollfd *consumer_sockpoll)
427 {
428 int ret_func;
429 enum lttcomm_return_code ret_code = LTTCOMM_CONSUMERD_SUCCESS;
430 struct lttcomm_consumer_msg msg;
431
432 health_code_update();
433
434 {
435 ssize_t ret_recv;
436
437 ret_recv = lttcomm_recv_unix_sock(sock, &msg, sizeof(msg));
438 if (ret_recv != sizeof(msg)) {
439 if (ret_recv > 0) {
440 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_CMD);
441 ret_recv = -1;
442 }
443 return ret_recv;
444 }
445 }
446
447 health_code_update();
448
449 /* Deprecated command */
450 LTTNG_ASSERT(msg.cmd_type != LTTNG_CONSUMER_STOP);
451
452 health_code_update();
453
454 /* relayd needs RCU read-side protection */
455 const lttng::urcu::read_lock_guard read_lock;
456
457 switch (msg.cmd_type) {
458 case LTTNG_CONSUMER_ADD_RELAYD_SOCKET:
459 {
460 const uint32_t major = msg.u.relayd_sock.major;
461 const uint32_t minor = msg.u.relayd_sock.minor;
462 const lttcomm_sock_proto protocol =
463 (enum lttcomm_sock_proto) msg.u.relayd_sock.relayd_socket_protocol;
464
465 /* Session daemon status message are handled in the following call. */
466 consumer_add_relayd_socket(msg.u.relayd_sock.net_index,
467 msg.u.relayd_sock.type,
468 ctx,
469 sock,
470 consumer_sockpoll,
471 msg.u.relayd_sock.session_id,
472 msg.u.relayd_sock.relayd_session_id,
473 major,
474 minor,
475 protocol);
476 goto end_nosignal;
477 }
478 case LTTNG_CONSUMER_ADD_CHANNEL:
479 {
480 struct lttng_consumer_channel *new_channel;
481 int ret_send_status, ret_add_channel = 0;
482 const uint64_t chunk_id = msg.u.channel.chunk_id.value;
483
484 health_code_update();
485
486 /* First send a status message before receiving the fds. */
487 ret_send_status = consumer_send_status_msg(sock, ret_code);
488 if (ret_send_status < 0) {
489 /* Somehow, the session daemon is not responding anymore. */
490 goto error_fatal;
491 }
492
493 health_code_update();
494
495 DBG("consumer_add_channel %" PRIu64, msg.u.channel.channel_key);
496 new_channel = consumer_allocate_channel(msg.u.channel.channel_key,
497 msg.u.channel.session_id,
498 msg.u.channel.chunk_id.is_set ? &chunk_id :
499 nullptr,
500 msg.u.channel.pathname,
501 msg.u.channel.name,
502 msg.u.channel.relayd_id,
503 msg.u.channel.output,
504 msg.u.channel.tracefile_size,
505 msg.u.channel.tracefile_count,
506 0,
507 msg.u.channel.monitor,
508 msg.u.channel.live_timer_interval,
509 msg.u.channel.is_live,
510 nullptr,
511 nullptr);
512 if (new_channel == nullptr) {
513 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR);
514 goto end_nosignal;
515 }
516 new_channel->nb_init_stream_left = msg.u.channel.nb_init_streams;
517 switch (msg.u.channel.output) {
518 case LTTNG_EVENT_SPLICE:
519 new_channel->output = CONSUMER_CHANNEL_SPLICE;
520 break;
521 case LTTNG_EVENT_MMAP:
522 new_channel->output = CONSUMER_CHANNEL_MMAP;
523 break;
524 default:
525 ERR("Channel output unknown %d", msg.u.channel.output);
526 goto end_nosignal;
527 }
528
529 /* Translate and save channel type. */
530 switch (msg.u.channel.type) {
531 case CONSUMER_CHANNEL_TYPE_DATA:
532 case CONSUMER_CHANNEL_TYPE_METADATA:
533 new_channel->type = (consumer_channel_type) msg.u.channel.type;
534 break;
535 default:
536 abort();
537 goto end_nosignal;
538 };
539
540 health_code_update();
541
542 if (ctx->on_recv_channel != nullptr) {
543 const int ret_recv_channel = ctx->on_recv_channel(new_channel);
544 if (ret_recv_channel == 0) {
545 ret_add_channel = consumer_add_channel(new_channel, ctx);
546 } else if (ret_recv_channel < 0) {
547 goto end_nosignal;
548 }
549 } else {
550 ret_add_channel = consumer_add_channel(new_channel, ctx);
551 }
552 if (msg.u.channel.type == CONSUMER_CHANNEL_TYPE_DATA && !ret_add_channel) {
553 int monitor_start_ret;
554
555 DBG("Consumer starting monitor timer");
556 consumer_timer_live_start(new_channel, msg.u.channel.live_timer_interval);
557 monitor_start_ret = consumer_timer_monitor_start(
558 new_channel, msg.u.channel.monitor_timer_interval);
559 if (monitor_start_ret < 0) {
560 ERR("Starting channel monitoring timer failed");
561 goto end_nosignal;
562 }
563 }
564
565 health_code_update();
566
567 /* If we received an error in add_channel, we need to report it. */
568 if (ret_add_channel < 0) {
569 ret_send_status = consumer_send_status_msg(sock, ret_add_channel);
570 if (ret_send_status < 0) {
571 goto error_fatal;
572 }
573 goto end_nosignal;
574 }
575
576 goto end_nosignal;
577 }
578 case LTTNG_CONSUMER_ADD_STREAM:
579 {
580 int fd;
581 struct lttng_pipe *stream_pipe;
582 struct lttng_consumer_stream *new_stream;
583 struct lttng_consumer_channel *channel;
584 int alloc_ret = 0;
585 int ret_send_status, ret_poll, ret_get_max_subbuf_size;
586 ssize_t ret_pipe_write, ret_recv;
587
588 /*
589 * Get stream's channel reference. Needed when adding the stream to the
590 * global hash table.
591 */
592 channel = consumer_find_channel(msg.u.stream.channel_key);
593 if (!channel) {
594 /*
595 * We could not find the channel. Can happen if cpu hotplug
596 * happens while tearing down.
597 */
598 ERR("Unable to find channel key %" PRIu64, msg.u.stream.channel_key);
599 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
600 }
601
602 health_code_update();
603
604 /* First send a status message before receiving the fds. */
605 ret_send_status = consumer_send_status_msg(sock, ret_code);
606 if (ret_send_status < 0) {
607 /* Somehow, the session daemon is not responding anymore. */
608 goto error_add_stream_fatal;
609 }
610
611 health_code_update();
612
613 if (ret_code != LTTCOMM_CONSUMERD_SUCCESS) {
614 /* Channel was not found. */
615 goto error_add_stream_nosignal;
616 }
617
618 /* Blocking call */
619 health_poll_entry();
620 ret_poll = lttng_consumer_poll_socket(consumer_sockpoll);
621 health_poll_exit();
622 if (ret_poll) {
623 goto error_add_stream_fatal;
624 }
625
626 health_code_update();
627
628 /* Get stream file descriptor from socket */
629 ret_recv = lttcomm_recv_fds_unix_sock(sock, &fd, 1);
630 if (ret_recv != sizeof(fd)) {
631 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_FD);
632 ret_func = ret_recv;
633 goto end;
634 }
635
636 health_code_update();
637
638 /*
639 * Send status code to session daemon only if the recv works. If the
640 * above recv() failed, the session daemon is notified through the
641 * error socket and the teardown is eventually done.
642 */
643 ret_send_status = consumer_send_status_msg(sock, ret_code);
644 if (ret_send_status < 0) {
645 /* Somehow, the session daemon is not responding anymore. */
646 goto error_add_stream_nosignal;
647 }
648
649 health_code_update();
650
651 pthread_mutex_lock(&channel->lock);
652 new_stream = consumer_stream_create(channel,
653 channel->key,
654 fd,
655 channel->name,
656 channel->relayd_id,
657 channel->session_id,
658 channel->trace_chunk,
659 msg.u.stream.cpu,
660 &alloc_ret,
661 channel->type,
662 channel->monitor);
663 if (new_stream == nullptr) {
664 switch (alloc_ret) {
665 case -ENOMEM:
666 case -EINVAL:
667 default:
668 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR);
669 break;
670 }
671 pthread_mutex_unlock(&channel->lock);
672 goto error_add_stream_nosignal;
673 }
674
675 new_stream->wait_fd = fd;
676 ret_get_max_subbuf_size =
677 kernctl_get_max_subbuf_size(new_stream->wait_fd, &new_stream->max_sb_size);
678 if (ret_get_max_subbuf_size < 0) {
679 pthread_mutex_unlock(&channel->lock);
680 ERR("Failed to get kernel maximal subbuffer size");
681 goto error_add_stream_nosignal;
682 }
683
684 consumer_stream_update_channel_attributes(new_stream, channel);
685
686 /*
687 * We've just assigned the channel to the stream so increment the
688 * refcount right now. We don't need to increment the refcount for
689 * streams in no monitor because we handle manually the cleanup of
690 * those. It is very important to make sure there is NO prior
691 * consumer_del_stream() calls or else the refcount will be unbalanced.
692 */
693 if (channel->monitor) {
694 uatomic_inc(&new_stream->chan->refcount);
695 }
696
697 /*
698 * The buffer flush is done on the session daemon side for the kernel
699 * so no need for the stream "hangup_flush_done" variable to be
700 * tracked. This is important for a kernel stream since we don't rely
701 * on the flush state of the stream to read data. It's not the case for
702 * user space tracing.
703 */
704 new_stream->hangup_flush_done = 0;
705
706 health_code_update();
707
708 pthread_mutex_lock(&new_stream->lock);
709 if (ctx->on_recv_stream) {
710 const int ret_recv_stream = ctx->on_recv_stream(new_stream);
711 if (ret_recv_stream < 0) {
712 pthread_mutex_unlock(&new_stream->lock);
713 pthread_mutex_unlock(&channel->lock);
714 consumer_stream_free(new_stream);
715 goto error_add_stream_nosignal;
716 }
717 }
718 health_code_update();
719
720 if (new_stream->metadata_flag) {
721 channel->metadata_stream = new_stream;
722 }
723
724 /* Do not monitor this stream. */
725 if (!channel->monitor) {
726 DBG("Kernel consumer add stream %s in no monitor mode with "
727 "relayd id %" PRIu64,
728 new_stream->name,
729 new_stream->net_seq_idx);
730 cds_list_add(&new_stream->send_node, &channel->streams.head);
731 pthread_mutex_unlock(&new_stream->lock);
732 pthread_mutex_unlock(&channel->lock);
733 goto end_add_stream;
734 }
735
736 /* Send stream to relayd if the stream has an ID. */
737 if (new_stream->net_seq_idx != (uint64_t) -1ULL) {
738 int ret_send_relayd_stream;
739
740 ret_send_relayd_stream =
741 consumer_send_relayd_stream(new_stream, new_stream->chan->pathname);
742 if (ret_send_relayd_stream < 0) {
743 pthread_mutex_unlock(&new_stream->lock);
744 pthread_mutex_unlock(&channel->lock);
745 consumer_stream_free(new_stream);
746 goto error_add_stream_nosignal;
747 }
748
749 /*
750 * If adding an extra stream to an already
751 * existing channel (e.g. cpu hotplug), we need
752 * to send the "streams_sent" command to relayd.
753 */
754 if (channel->streams_sent_to_relayd) {
755 int ret_send_relayd_streams_sent;
756
757 ret_send_relayd_streams_sent =
758 consumer_send_relayd_streams_sent(new_stream->net_seq_idx);
759 if (ret_send_relayd_streams_sent < 0) {
760 pthread_mutex_unlock(&new_stream->lock);
761 pthread_mutex_unlock(&channel->lock);
762 goto error_add_stream_nosignal;
763 }
764 }
765 }
766 pthread_mutex_unlock(&new_stream->lock);
767 pthread_mutex_unlock(&channel->lock);
768
769 /* Get the right pipe where the stream will be sent. */
770 if (new_stream->metadata_flag) {
771 consumer_add_metadata_stream(new_stream);
772 stream_pipe = ctx->consumer_metadata_pipe;
773 } else {
774 consumer_add_data_stream(new_stream);
775 stream_pipe = ctx->consumer_data_pipe;
776 }
777
778 /* Visible to other threads */
779 new_stream->globally_visible = 1;
780
781 health_code_update();
782
783 ret_pipe_write =
784 lttng_pipe_write(stream_pipe, &new_stream, sizeof(new_stream)); /* NOLINT
785 sizeof
786 used on a
787 pointer.
788 */
789 if (ret_pipe_write < 0) {
790 ERR("Consumer write %s stream to pipe %d",
791 new_stream->metadata_flag ? "metadata" : "data",
792 lttng_pipe_get_writefd(stream_pipe));
793 if (new_stream->metadata_flag) {
794 consumer_del_stream_for_metadata(new_stream);
795 } else {
796 consumer_del_stream_for_data(new_stream);
797 }
798 goto error_add_stream_nosignal;
799 }
800
801 DBG("Kernel consumer ADD_STREAM %s (fd: %d) %s with relayd id %" PRIu64,
802 new_stream->name,
803 fd,
804 new_stream->chan->pathname,
805 new_stream->relayd_stream_id);
806 end_add_stream:
807 break;
808 error_add_stream_nosignal:
809 goto end_nosignal;
810 error_add_stream_fatal:
811 goto error_fatal;
812 }
813 case LTTNG_CONSUMER_STREAMS_SENT:
814 {
815 struct lttng_consumer_channel *channel;
816 int ret_send_status;
817
818 /*
819 * Get stream's channel reference. Needed when adding the stream to the
820 * global hash table.
821 */
822 channel = consumer_find_channel(msg.u.sent_streams.channel_key);
823 if (!channel) {
824 /*
825 * We could not find the channel. Can happen if cpu hotplug
826 * happens while tearing down.
827 */
828 ERR("Unable to find channel key %" PRIu64, msg.u.sent_streams.channel_key);
829 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
830 }
831
832 health_code_update();
833
834 /*
835 * Send status code to session daemon.
836 */
837 ret_send_status = consumer_send_status_msg(sock, ret_code);
838 if (ret_send_status < 0 || ret_code != LTTCOMM_CONSUMERD_SUCCESS) {
839 /* Somehow, the session daemon is not responding anymore. */
840 goto error_streams_sent_nosignal;
841 }
842
843 health_code_update();
844
845 /*
846 * We should not send this message if we don't monitor the
847 * streams in this channel.
848 */
849 if (!channel->monitor) {
850 goto end_error_streams_sent;
851 }
852
853 health_code_update();
854 /* Send stream to relayd if the stream has an ID. */
855 if (msg.u.sent_streams.net_seq_idx != (uint64_t) -1ULL) {
856 int ret_send_relay_streams;
857
858 ret_send_relay_streams =
859 consumer_send_relayd_streams_sent(msg.u.sent_streams.net_seq_idx);
860 if (ret_send_relay_streams < 0) {
861 goto error_streams_sent_nosignal;
862 }
863 channel->streams_sent_to_relayd = true;
864 }
865 end_error_streams_sent:
866 break;
867 error_streams_sent_nosignal:
868 goto end_nosignal;
869 }
870 case LTTNG_CONSUMER_UPDATE_STREAM:
871 {
872 return -ENOSYS;
873 }
874 case LTTNG_CONSUMER_DESTROY_RELAYD:
875 {
876 const uint64_t index = msg.u.destroy_relayd.net_seq_idx;
877 struct consumer_relayd_sock_pair *relayd;
878 int ret_send_status;
879
880 DBG("Kernel consumer destroying relayd %" PRIu64, index);
881
882 /* Get relayd reference if exists. */
883 relayd = consumer_find_relayd(index);
884 if (relayd == nullptr) {
885 DBG("Unable to find relayd %" PRIu64, index);
886 ret_code = LTTCOMM_CONSUMERD_RELAYD_FAIL;
887 }
888
889 /*
890 * Each relayd socket pair has a refcount of stream attached to it
891 * which tells if the relayd is still active or not depending on the
892 * refcount value.
893 *
894 * This will set the destroy flag of the relayd object and destroy it
895 * if the refcount reaches zero when called.
896 *
897 * The destroy can happen either here or when a stream fd hangs up.
898 */
899 if (relayd) {
900 consumer_flag_relayd_for_destroy(relayd);
901 }
902
903 health_code_update();
904
905 ret_send_status = consumer_send_status_msg(sock, ret_code);
906 if (ret_send_status < 0) {
907 /* Somehow, the session daemon is not responding anymore. */
908 goto error_fatal;
909 }
910
911 goto end_nosignal;
912 }
913 case LTTNG_CONSUMER_DATA_PENDING:
914 {
915 int32_t ret_data_pending;
916 const uint64_t id = msg.u.data_pending.session_id;
917 ssize_t ret_send;
918
919 DBG("Kernel consumer data pending command for id %" PRIu64, id);
920
921 ret_data_pending = consumer_data_pending(id);
922
923 health_code_update();
924
925 /* Send back returned value to session daemon */
926 ret_send =
927 lttcomm_send_unix_sock(sock, &ret_data_pending, sizeof(ret_data_pending));
928 if (ret_send < 0) {
929 PERROR("send data pending ret code");
930 goto error_fatal;
931 }
932
933 /*
934 * No need to send back a status message since the data pending
935 * returned value is the response.
936 */
937 break;
938 }
939 case LTTNG_CONSUMER_SNAPSHOT_CHANNEL:
940 {
941 struct lttng_consumer_channel *channel;
942 const uint64_t key = msg.u.snapshot_channel.key;
943 int ret_send_status;
944
945 channel = consumer_find_channel(key);
946 if (!channel) {
947 ERR("Channel %" PRIu64 " not found", key);
948 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
949 } else {
950 if (msg.u.snapshot_channel.metadata == 1) {
951 int ret_snapshot;
952
953 ret_snapshot = lttng_kconsumer_snapshot_metadata(
954 channel,
955 key,
956 msg.u.snapshot_channel.pathname,
957 msg.u.snapshot_channel.relayd_id,
958 ctx);
959 if (ret_snapshot < 0) {
960 ERR("Snapshot metadata failed");
961 ret_code = LTTCOMM_CONSUMERD_SNAPSHOT_FAILED;
962 }
963 } else {
964 int ret_snapshot;
965
966 ret_snapshot = lttng_kconsumer_snapshot_channel(
967 channel,
968 key,
969 msg.u.snapshot_channel.pathname,
970 msg.u.snapshot_channel.relayd_id,
971 msg.u.snapshot_channel.nb_packets_per_stream);
972 if (ret_snapshot < 0) {
973 ERR("Snapshot channel failed");
974 ret_code = LTTCOMM_CONSUMERD_SNAPSHOT_FAILED;
975 }
976 }
977 }
978 health_code_update();
979
980 ret_send_status = consumer_send_status_msg(sock, ret_code);
981 if (ret_send_status < 0) {
982 /* Somehow, the session daemon is not responding anymore. */
983 goto end_nosignal;
984 }
985 break;
986 }
987 case LTTNG_CONSUMER_DESTROY_CHANNEL:
988 {
989 const uint64_t key = msg.u.destroy_channel.key;
990 struct lttng_consumer_channel *channel;
991 int ret_send_status;
992
993 channel = consumer_find_channel(key);
994 if (!channel) {
995 ERR("Kernel consumer destroy channel %" PRIu64 " not found", key);
996 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
997 }
998
999 health_code_update();
1000
1001 ret_send_status = consumer_send_status_msg(sock, ret_code);
1002 if (ret_send_status < 0) {
1003 /* Somehow, the session daemon is not responding anymore. */
1004 goto end_destroy_channel;
1005 }
1006
1007 health_code_update();
1008
1009 /* Stop right now if no channel was found. */
1010 if (!channel) {
1011 goto end_destroy_channel;
1012 }
1013
1014 /*
1015 * This command should ONLY be issued for channel with streams set in
1016 * no monitor mode.
1017 */
1018 LTTNG_ASSERT(!channel->monitor);
1019
1020 /*
1021 * The refcount should ALWAYS be 0 in the case of a channel in no
1022 * monitor mode.
1023 */
1024 LTTNG_ASSERT(!uatomic_sub_return(&channel->refcount, 1));
1025
1026 consumer_del_channel(channel);
1027 end_destroy_channel:
1028 goto end_nosignal;
1029 }
1030 case LTTNG_CONSUMER_DISCARDED_EVENTS:
1031 {
1032 ssize_t ret;
1033 uint64_t count;
1034 struct lttng_consumer_channel *channel;
1035 const uint64_t id = msg.u.discarded_events.session_id;
1036 const uint64_t key = msg.u.discarded_events.channel_key;
1037
1038 DBG("Kernel consumer discarded events command for session id %" PRIu64
1039 ", channel key %" PRIu64,
1040 id,
1041 key);
1042
1043 channel = consumer_find_channel(key);
1044 if (!channel) {
1045 ERR("Kernel consumer discarded events channel %" PRIu64 " not found", key);
1046 count = 0;
1047 } else {
1048 count = channel->discarded_events;
1049 }
1050
1051 health_code_update();
1052
1053 /* Send back returned value to session daemon */
1054 ret = lttcomm_send_unix_sock(sock, &count, sizeof(count));
1055 if (ret < 0) {
1056 PERROR("send discarded events");
1057 goto error_fatal;
1058 }
1059
1060 break;
1061 }
1062 case LTTNG_CONSUMER_LOST_PACKETS:
1063 {
1064 ssize_t ret;
1065 uint64_t count;
1066 struct lttng_consumer_channel *channel;
1067 const uint64_t id = msg.u.lost_packets.session_id;
1068 const uint64_t key = msg.u.lost_packets.channel_key;
1069
1070 DBG("Kernel consumer lost packets command for session id %" PRIu64
1071 ", channel key %" PRIu64,
1072 id,
1073 key);
1074
1075 channel = consumer_find_channel(key);
1076 if (!channel) {
1077 ERR("Kernel consumer lost packets channel %" PRIu64 " not found", key);
1078 count = 0;
1079 } else {
1080 count = channel->lost_packets;
1081 }
1082
1083 health_code_update();
1084
1085 /* Send back returned value to session daemon */
1086 ret = lttcomm_send_unix_sock(sock, &count, sizeof(count));
1087 if (ret < 0) {
1088 PERROR("send lost packets");
1089 goto error_fatal;
1090 }
1091
1092 break;
1093 }
1094 case LTTNG_CONSUMER_SET_CHANNEL_MONITOR_PIPE:
1095 {
1096 int channel_monitor_pipe;
1097 int ret_send_status, ret_set_channel_monitor_pipe;
1098 ssize_t ret_recv;
1099
1100 ret_code = LTTCOMM_CONSUMERD_SUCCESS;
1101 /* Successfully received the command's type. */
1102 ret_send_status = consumer_send_status_msg(sock, ret_code);
1103 if (ret_send_status < 0) {
1104 goto error_fatal;
1105 }
1106
1107 ret_recv = lttcomm_recv_fds_unix_sock(sock, &channel_monitor_pipe, 1);
1108 if (ret_recv != sizeof(channel_monitor_pipe)) {
1109 ERR("Failed to receive channel monitor pipe");
1110 goto error_fatal;
1111 }
1112
1113 DBG("Received channel monitor pipe (%d)", channel_monitor_pipe);
1114 ret_set_channel_monitor_pipe =
1115 consumer_timer_thread_set_channel_monitor_pipe(channel_monitor_pipe);
1116 if (!ret_set_channel_monitor_pipe) {
1117 int flags;
1118 int ret_fcntl;
1119
1120 ret_code = LTTCOMM_CONSUMERD_SUCCESS;
1121 /* Set the pipe as non-blocking. */
1122 ret_fcntl = fcntl(channel_monitor_pipe, F_GETFL, 0);
1123 if (ret_fcntl == -1) {
1124 PERROR("fcntl get flags of the channel monitoring pipe");
1125 goto error_fatal;
1126 }
1127 flags = ret_fcntl;
1128
1129 ret_fcntl = fcntl(channel_monitor_pipe, F_SETFL, flags | O_NONBLOCK);
1130 if (ret_fcntl == -1) {
1131 PERROR("fcntl set O_NONBLOCK flag of the channel monitoring pipe");
1132 goto error_fatal;
1133 }
1134 DBG("Channel monitor pipe set as non-blocking");
1135 } else {
1136 ret_code = LTTCOMM_CONSUMERD_ALREADY_SET;
1137 }
1138 ret_send_status = consumer_send_status_msg(sock, ret_code);
1139 if (ret_send_status < 0) {
1140 goto error_fatal;
1141 }
1142 break;
1143 }
1144 case LTTNG_CONSUMER_ROTATE_CHANNEL:
1145 {
1146 struct lttng_consumer_channel *channel;
1147 const uint64_t key = msg.u.rotate_channel.key;
1148 int ret_send_status;
1149
1150 DBG("Consumer rotate channel %" PRIu64, key);
1151
1152 channel = consumer_find_channel(key);
1153 if (!channel) {
1154 ERR("Channel %" PRIu64 " not found", key);
1155 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
1156 } else {
1157 /*
1158 * Sample the rotate position of all the streams in this channel.
1159 */
1160 int ret_rotate_channel;
1161
1162 ret_rotate_channel = lttng_consumer_rotate_channel(
1163 channel, key, msg.u.rotate_channel.relayd_id);
1164 if (ret_rotate_channel < 0) {
1165 ERR("Rotate channel failed");
1166 ret_code = LTTCOMM_CONSUMERD_ROTATION_FAIL;
1167 }
1168
1169 health_code_update();
1170 }
1171
1172 ret_send_status = consumer_send_status_msg(sock, ret_code);
1173 if (ret_send_status < 0) {
1174 /* Somehow, the session daemon is not responding anymore. */
1175 goto error_rotate_channel;
1176 }
1177 if (channel) {
1178 /* Rotate the streams that are ready right now. */
1179 int ret_rotate;
1180
1181 ret_rotate = lttng_consumer_rotate_ready_streams(channel, key);
1182 if (ret_rotate < 0) {
1183 ERR("Rotate ready streams failed");
1184 }
1185 }
1186 break;
1187 error_rotate_channel:
1188 goto end_nosignal;
1189 }
1190 case LTTNG_CONSUMER_CLEAR_CHANNEL:
1191 {
1192 struct lttng_consumer_channel *channel;
1193 const uint64_t key = msg.u.clear_channel.key;
1194 int ret_send_status;
1195
1196 channel = consumer_find_channel(key);
1197 if (!channel) {
1198 DBG("Channel %" PRIu64 " not found", key);
1199 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
1200 } else {
1201 int ret_clear_channel;
1202
1203 ret_clear_channel = lttng_consumer_clear_channel(channel);
1204 if (ret_clear_channel) {
1205 ERR("Clear channel failed");
1206 ret_code = (lttcomm_return_code) ret_clear_channel;
1207 }
1208
1209 health_code_update();
1210 }
1211
1212 ret_send_status = consumer_send_status_msg(sock, ret_code);
1213 if (ret_send_status < 0) {
1214 /* Somehow, the session daemon is not responding anymore. */
1215 goto end_nosignal;
1216 }
1217
1218 break;
1219 }
1220 case LTTNG_CONSUMER_INIT:
1221 {
1222 int ret_send_status;
1223 lttng_uuid sessiond_uuid;
1224
1225 std::copy(std::begin(msg.u.init.sessiond_uuid),
1226 std::end(msg.u.init.sessiond_uuid),
1227 sessiond_uuid.begin());
1228
1229 ret_code = lttng_consumer_init_command(ctx, sessiond_uuid);
1230 health_code_update();
1231 ret_send_status = consumer_send_status_msg(sock, ret_code);
1232 if (ret_send_status < 0) {
1233 /* Somehow, the session daemon is not responding anymore. */
1234 goto end_nosignal;
1235 }
1236 break;
1237 }
1238 case LTTNG_CONSUMER_CREATE_TRACE_CHUNK:
1239 {
1240 const struct lttng_credentials credentials = {
1241 .uid = LTTNG_OPTIONAL_INIT_VALUE(
1242 msg.u.create_trace_chunk.credentials.value.uid),
1243 .gid = LTTNG_OPTIONAL_INIT_VALUE(
1244 msg.u.create_trace_chunk.credentials.value.gid),
1245 };
1246 const bool is_local_trace = !msg.u.create_trace_chunk.relayd_id.is_set;
1247 const uint64_t relayd_id = msg.u.create_trace_chunk.relayd_id.value;
1248 const char *chunk_override_name = *msg.u.create_trace_chunk.override_name ?
1249 msg.u.create_trace_chunk.override_name :
1250 nullptr;
1251 struct lttng_directory_handle *chunk_directory_handle = nullptr;
1252
1253 /*
1254 * The session daemon will only provide a chunk directory file
1255 * descriptor for local traces.
1256 */
1257 if (is_local_trace) {
1258 int chunk_dirfd;
1259 int ret_send_status;
1260 ssize_t ret_recv;
1261
1262 /* Acnowledge the reception of the command. */
1263 ret_send_status = consumer_send_status_msg(sock, LTTCOMM_CONSUMERD_SUCCESS);
1264 if (ret_send_status < 0) {
1265 /* Somehow, the session daemon is not responding anymore. */
1266 goto end_nosignal;
1267 }
1268
1269 ret_recv = lttcomm_recv_fds_unix_sock(sock, &chunk_dirfd, 1);
1270 if (ret_recv != sizeof(chunk_dirfd)) {
1271 ERR("Failed to receive trace chunk directory file descriptor");
1272 goto error_fatal;
1273 }
1274
1275 DBG("Received trace chunk directory fd (%d)", chunk_dirfd);
1276 chunk_directory_handle =
1277 lttng_directory_handle_create_from_dirfd(chunk_dirfd);
1278 if (!chunk_directory_handle) {
1279 ERR("Failed to initialize chunk directory handle from directory file descriptor");
1280 if (close(chunk_dirfd)) {
1281 PERROR("Failed to close chunk directory file descriptor");
1282 }
1283 goto error_fatal;
1284 }
1285 }
1286
1287 ret_code = lttng_consumer_create_trace_chunk(
1288 !is_local_trace ? &relayd_id : nullptr,
1289 msg.u.create_trace_chunk.session_id,
1290 msg.u.create_trace_chunk.chunk_id,
1291 (time_t) msg.u.create_trace_chunk.creation_timestamp,
1292 chunk_override_name,
1293 msg.u.create_trace_chunk.credentials.is_set ? &credentials : nullptr,
1294 chunk_directory_handle);
1295 lttng_directory_handle_put(chunk_directory_handle);
1296 goto end_msg_sessiond;
1297 }
1298 case LTTNG_CONSUMER_CLOSE_TRACE_CHUNK:
1299 {
1300 enum lttng_trace_chunk_command_type close_command =
1301 (lttng_trace_chunk_command_type) msg.u.close_trace_chunk.close_command.value;
1302 const uint64_t relayd_id = msg.u.close_trace_chunk.relayd_id.value;
1303 struct lttcomm_consumer_close_trace_chunk_reply reply;
1304 char path[LTTNG_PATH_MAX];
1305 ssize_t ret_send;
1306
1307 ret_code = lttng_consumer_close_trace_chunk(
1308 msg.u.close_trace_chunk.relayd_id.is_set ? &relayd_id : nullptr,
1309 msg.u.close_trace_chunk.session_id,
1310 msg.u.close_trace_chunk.chunk_id,
1311 (time_t) msg.u.close_trace_chunk.close_timestamp,
1312 msg.u.close_trace_chunk.close_command.is_set ? &close_command : nullptr,
1313 path);
1314 reply.ret_code = ret_code;
1315 reply.path_length = strlen(path) + 1;
1316 ret_send = lttcomm_send_unix_sock(sock, &reply, sizeof(reply));
1317 if (ret_send != sizeof(reply)) {
1318 goto error_fatal;
1319 }
1320 ret_send = lttcomm_send_unix_sock(sock, path, reply.path_length);
1321 if (ret_send != reply.path_length) {
1322 goto error_fatal;
1323 }
1324 goto end_nosignal;
1325 }
1326 case LTTNG_CONSUMER_TRACE_CHUNK_EXISTS:
1327 {
1328 const uint64_t relayd_id = msg.u.trace_chunk_exists.relayd_id.value;
1329
1330 ret_code = lttng_consumer_trace_chunk_exists(
1331 msg.u.trace_chunk_exists.relayd_id.is_set ? &relayd_id : nullptr,
1332 msg.u.trace_chunk_exists.session_id,
1333 msg.u.trace_chunk_exists.chunk_id);
1334 goto end_msg_sessiond;
1335 }
1336 case LTTNG_CONSUMER_OPEN_CHANNEL_PACKETS:
1337 {
1338 const uint64_t key = msg.u.open_channel_packets.key;
1339 struct lttng_consumer_channel *channel = consumer_find_channel(key);
1340
1341 if (channel) {
1342 pthread_mutex_lock(&channel->lock);
1343 ret_code = lttng_consumer_open_channel_packets(channel);
1344 pthread_mutex_unlock(&channel->lock);
1345 } else {
1346 WARN("Channel %" PRIu64 " not found", key);
1347 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
1348 }
1349
1350 health_code_update();
1351 goto end_msg_sessiond;
1352 }
1353 default:
1354 goto end_nosignal;
1355 }
1356
1357 end_nosignal:
1358 /*
1359 * Return 1 to indicate success since the 0 value can be a socket
1360 * shutdown during the recv() or send() call.
1361 */
1362 ret_func = 1;
1363 goto end;
1364 error_fatal:
1365 /* This will issue a consumer stop. */
1366 ret_func = -1;
1367 goto end;
1368 end_msg_sessiond:
1369 /*
1370 * The returned value here is not useful since either way we'll return 1 to
1371 * the caller because the session daemon socket management is done
1372 * elsewhere. Returning a negative code or 0 will shutdown the consumer.
1373 */
1374 {
1375 int ret_send_status;
1376
1377 ret_send_status = consumer_send_status_msg(sock, ret_code);
1378 if (ret_send_status < 0) {
1379 goto error_fatal;
1380 }
1381 }
1382
1383 ret_func = 1;
1384
1385 end:
1386 health_code_update();
1387 return ret_func;
1388 }
1389
1390 /*
1391 * Sync metadata meaning request them to the session daemon and snapshot to the
1392 * metadata thread can consumer them.
1393 *
1394 * Metadata stream lock MUST be acquired.
1395 */
1396 enum sync_metadata_status lttng_kconsumer_sync_metadata(struct lttng_consumer_stream *metadata)
1397 {
1398 int ret;
1399 enum sync_metadata_status status;
1400
1401 LTTNG_ASSERT(metadata);
1402
1403 ret = kernctl_buffer_flush(metadata->wait_fd);
1404 if (ret < 0) {
1405 ERR("Failed to flush kernel stream");
1406 status = SYNC_METADATA_STATUS_ERROR;
1407 goto end;
1408 }
1409
1410 ret = kernctl_snapshot(metadata->wait_fd);
1411 if (ret < 0) {
1412 if (errno == EAGAIN) {
1413 /* No new metadata, exit. */
1414 DBG("Sync metadata, no new kernel metadata");
1415 status = SYNC_METADATA_STATUS_NO_DATA;
1416 } else {
1417 ERR("Sync metadata, taking kernel snapshot failed.");
1418 status = SYNC_METADATA_STATUS_ERROR;
1419 }
1420 } else {
1421 status = SYNC_METADATA_STATUS_NEW_DATA;
1422 }
1423
1424 end:
1425 return status;
1426 }
1427
1428 static int extract_common_subbuffer_info(struct lttng_consumer_stream *stream,
1429 struct stream_subbuffer *subbuf)
1430 {
1431 int ret;
1432
1433 ret = kernctl_get_subbuf_size(stream->wait_fd, &subbuf->info.data.subbuf_size);
1434 if (ret) {
1435 goto end;
1436 }
1437
1438 ret = kernctl_get_padded_subbuf_size(stream->wait_fd,
1439 &subbuf->info.data.padded_subbuf_size);
1440 if (ret) {
1441 goto end;
1442 }
1443
1444 end:
1445 return ret;
1446 }
1447
1448 static int extract_metadata_subbuffer_info(struct lttng_consumer_stream *stream,
1449 struct stream_subbuffer *subbuf)
1450 {
1451 int ret;
1452
1453 ret = extract_common_subbuffer_info(stream, subbuf);
1454 if (ret) {
1455 goto end;
1456 }
1457
1458 ret = kernctl_get_metadata_version(stream->wait_fd, &subbuf->info.metadata.version);
1459 if (ret) {
1460 goto end;
1461 }
1462
1463 end:
1464 return ret;
1465 }
1466
1467 static int extract_data_subbuffer_info(struct lttng_consumer_stream *stream,
1468 struct stream_subbuffer *subbuf)
1469 {
1470 int ret;
1471
1472 ret = extract_common_subbuffer_info(stream, subbuf);
1473 if (ret) {
1474 goto end;
1475 }
1476
1477 ret = kernctl_get_packet_size(stream->wait_fd, &subbuf->info.data.packet_size);
1478 if (ret < 0) {
1479 PERROR("Failed to get sub-buffer packet size");
1480 goto end;
1481 }
1482
1483 ret = kernctl_get_content_size(stream->wait_fd, &subbuf->info.data.content_size);
1484 if (ret < 0) {
1485 PERROR("Failed to get sub-buffer content size");
1486 goto end;
1487 }
1488
1489 ret = kernctl_get_timestamp_begin(stream->wait_fd, &subbuf->info.data.timestamp_begin);
1490 if (ret < 0) {
1491 PERROR("Failed to get sub-buffer begin timestamp");
1492 goto end;
1493 }
1494
1495 ret = kernctl_get_timestamp_end(stream->wait_fd, &subbuf->info.data.timestamp_end);
1496 if (ret < 0) {
1497 PERROR("Failed to get sub-buffer end timestamp");
1498 goto end;
1499 }
1500
1501 ret = kernctl_get_events_discarded(stream->wait_fd, &subbuf->info.data.events_discarded);
1502 if (ret) {
1503 PERROR("Failed to get sub-buffer events discarded count");
1504 goto end;
1505 }
1506
1507 ret = kernctl_get_sequence_number(stream->wait_fd,
1508 &subbuf->info.data.sequence_number.value);
1509 if (ret) {
1510 /* May not be supported by older LTTng-modules. */
1511 if (ret != -ENOTTY) {
1512 PERROR("Failed to get sub-buffer sequence number");
1513 goto end;
1514 }
1515 } else {
1516 subbuf->info.data.sequence_number.is_set = true;
1517 }
1518
1519 ret = kernctl_get_stream_id(stream->wait_fd, &subbuf->info.data.stream_id);
1520 if (ret < 0) {
1521 PERROR("Failed to get stream id");
1522 goto end;
1523 }
1524
1525 ret = kernctl_get_instance_id(stream->wait_fd, &subbuf->info.data.stream_instance_id.value);
1526 if (ret) {
1527 /* May not be supported by older LTTng-modules. */
1528 if (ret != -ENOTTY) {
1529 PERROR("Failed to get stream instance id");
1530 goto end;
1531 }
1532 } else {
1533 subbuf->info.data.stream_instance_id.is_set = true;
1534 }
1535 end:
1536 return ret;
1537 }
1538
1539 static enum get_next_subbuffer_status get_subbuffer_common(struct lttng_consumer_stream *stream,
1540 struct stream_subbuffer *subbuffer)
1541 {
1542 int ret;
1543 enum get_next_subbuffer_status status;
1544
1545 ret = kernctl_get_next_subbuf(stream->wait_fd);
1546 switch (ret) {
1547 case 0:
1548 status = GET_NEXT_SUBBUFFER_STATUS_OK;
1549 break;
1550 case -ENODATA:
1551 case -EAGAIN:
1552 /*
1553 * The caller only expects -ENODATA when there is no data to
1554 * read, but the kernel tracer returns -EAGAIN when there is
1555 * currently no data for a non-finalized stream, and -ENODATA
1556 * when there is no data for a finalized stream. Those can be
1557 * combined into a -ENODATA return value.
1558 */
1559 status = GET_NEXT_SUBBUFFER_STATUS_NO_DATA;
1560 goto end;
1561 default:
1562 status = GET_NEXT_SUBBUFFER_STATUS_ERROR;
1563 goto end;
1564 }
1565
1566 ret = stream->read_subbuffer_ops.extract_subbuffer_info(stream, subbuffer);
1567 if (ret) {
1568 status = GET_NEXT_SUBBUFFER_STATUS_ERROR;
1569 }
1570 end:
1571 return status;
1572 }
1573
1574 static enum get_next_subbuffer_status
1575 get_next_subbuffer_splice(struct lttng_consumer_stream *stream, struct stream_subbuffer *subbuffer)
1576 {
1577 const enum get_next_subbuffer_status status = get_subbuffer_common(stream, subbuffer);
1578
1579 if (status != GET_NEXT_SUBBUFFER_STATUS_OK) {
1580 goto end;
1581 }
1582
1583 subbuffer->buffer.fd = stream->wait_fd;
1584 end:
1585 return status;
1586 }
1587
1588 static enum get_next_subbuffer_status get_next_subbuffer_mmap(struct lttng_consumer_stream *stream,
1589 struct stream_subbuffer *subbuffer)
1590 {
1591 int ret;
1592 enum get_next_subbuffer_status status;
1593 const char *addr;
1594
1595 status = get_subbuffer_common(stream, subbuffer);
1596 if (status != GET_NEXT_SUBBUFFER_STATUS_OK) {
1597 goto end;
1598 }
1599
1600 ret = get_current_subbuf_addr(stream, &addr);
1601 if (ret) {
1602 status = GET_NEXT_SUBBUFFER_STATUS_ERROR;
1603 goto end;
1604 }
1605
1606 subbuffer->buffer.buffer =
1607 lttng_buffer_view_init(addr, 0, subbuffer->info.data.padded_subbuf_size);
1608 end:
1609 return status;
1610 }
1611
1612 static enum get_next_subbuffer_status
1613 get_next_subbuffer_metadata_check(struct lttng_consumer_stream *stream,
1614 struct stream_subbuffer *subbuffer)
1615 {
1616 int ret;
1617 const char *addr;
1618 bool coherent;
1619 enum get_next_subbuffer_status status;
1620
1621 ret = kernctl_get_next_subbuf_metadata_check(stream->wait_fd, &coherent);
1622 if (ret) {
1623 goto end;
1624 }
1625
1626 ret = stream->read_subbuffer_ops.extract_subbuffer_info(stream, subbuffer);
1627 if (ret) {
1628 goto end;
1629 }
1630
1631 LTTNG_OPTIONAL_SET(&subbuffer->info.metadata.coherent, coherent);
1632
1633 ret = get_current_subbuf_addr(stream, &addr);
1634 if (ret) {
1635 goto end;
1636 }
1637
1638 subbuffer->buffer.buffer =
1639 lttng_buffer_view_init(addr, 0, subbuffer->info.data.padded_subbuf_size);
1640 DBG("Got metadata packet with padded_subbuf_size = %lu, coherent = %s",
1641 subbuffer->info.metadata.padded_subbuf_size,
1642 coherent ? "true" : "false");
1643 end:
1644 /*
1645 * The caller only expects -ENODATA when there is no data to read, but
1646 * the kernel tracer returns -EAGAIN when there is currently no data
1647 * for a non-finalized stream, and -ENODATA when there is no data for a
1648 * finalized stream. Those can be combined into a -ENODATA return value.
1649 */
1650 switch (ret) {
1651 case 0:
1652 status = GET_NEXT_SUBBUFFER_STATUS_OK;
1653 break;
1654 case -ENODATA:
1655 case -EAGAIN:
1656 /*
1657 * The caller only expects -ENODATA when there is no data to
1658 * read, but the kernel tracer returns -EAGAIN when there is
1659 * currently no data for a non-finalized stream, and -ENODATA
1660 * when there is no data for a finalized stream. Those can be
1661 * combined into a -ENODATA return value.
1662 */
1663 status = GET_NEXT_SUBBUFFER_STATUS_NO_DATA;
1664 break;
1665 default:
1666 status = GET_NEXT_SUBBUFFER_STATUS_ERROR;
1667 break;
1668 }
1669
1670 return status;
1671 }
1672
1673 static int put_next_subbuffer(struct lttng_consumer_stream *stream,
1674 struct stream_subbuffer *subbuffer __attribute__((unused)))
1675 {
1676 const int ret = kernctl_put_next_subbuf(stream->wait_fd);
1677
1678 if (ret) {
1679 if (ret == -EFAULT) {
1680 PERROR("Error in unreserving sub buffer");
1681 } else if (ret == -EIO) {
1682 /* Should never happen with newer LTTng versions */
1683 PERROR("Reader has been pushed by the writer, last sub-buffer corrupted");
1684 }
1685 }
1686
1687 return ret;
1688 }
1689
1690 static bool is_get_next_check_metadata_available(int tracer_fd)
1691 {
1692 const int ret = kernctl_get_next_subbuf_metadata_check(tracer_fd, nullptr);
1693 const bool available = ret != -ENOTTY;
1694
1695 if (ret == 0) {
1696 /* get succeeded, make sure to put the subbuffer. */
1697 kernctl_put_subbuf(tracer_fd);
1698 }
1699
1700 return available;
1701 }
1702
1703 static int signal_metadata(struct lttng_consumer_stream *stream,
1704 struct lttng_consumer_local_data *ctx __attribute__((unused)))
1705 {
1706 ASSERT_LOCKED(stream->metadata_rdv_lock);
1707 return pthread_cond_broadcast(&stream->metadata_rdv) ? -errno : 0;
1708 }
1709
1710 static int lttng_kconsumer_set_stream_ops(struct lttng_consumer_stream *stream)
1711 {
1712 int ret = 0;
1713
1714 if (stream->metadata_flag && stream->chan->is_live) {
1715 DBG("Attempting to enable metadata bucketization for live consumers");
1716 if (is_get_next_check_metadata_available(stream->wait_fd)) {
1717 DBG("Kernel tracer supports get_next_subbuffer_metadata_check, metadata will be accumulated until a coherent state is reached");
1718 stream->read_subbuffer_ops.get_next_subbuffer =
1719 get_next_subbuffer_metadata_check;
1720 ret = consumer_stream_enable_metadata_bucketization(stream);
1721 if (ret) {
1722 goto end;
1723 }
1724 } else {
1725 /*
1726 * The kernel tracer version is too old to indicate
1727 * when the metadata stream has reached a "coherent"
1728 * (parseable) point.
1729 *
1730 * This means that a live viewer may see an incoherent
1731 * sequence of metadata and fail to parse it.
1732 */
1733 WARN("Kernel tracer does not support get_next_subbuffer_metadata_check which may cause live clients to fail to parse the metadata stream");
1734 metadata_bucket_destroy(stream->metadata_bucket);
1735 stream->metadata_bucket = nullptr;
1736 }
1737
1738 stream->read_subbuffer_ops.on_sleep = signal_metadata;
1739 }
1740
1741 if (!stream->read_subbuffer_ops.get_next_subbuffer) {
1742 if (stream->chan->output == CONSUMER_CHANNEL_MMAP) {
1743 stream->read_subbuffer_ops.get_next_subbuffer = get_next_subbuffer_mmap;
1744 } else {
1745 stream->read_subbuffer_ops.get_next_subbuffer = get_next_subbuffer_splice;
1746 }
1747 }
1748
1749 if (stream->metadata_flag) {
1750 stream->read_subbuffer_ops.extract_subbuffer_info = extract_metadata_subbuffer_info;
1751 } else {
1752 stream->read_subbuffer_ops.extract_subbuffer_info = extract_data_subbuffer_info;
1753 if (stream->chan->is_live) {
1754 stream->read_subbuffer_ops.send_live_beacon = consumer_flush_kernel_index;
1755 }
1756 }
1757
1758 stream->read_subbuffer_ops.put_next_subbuffer = put_next_subbuffer;
1759 end:
1760 return ret;
1761 }
1762
1763 int lttng_kconsumer_on_recv_stream(struct lttng_consumer_stream *stream)
1764 {
1765 int ret;
1766
1767 LTTNG_ASSERT(stream);
1768
1769 /*
1770 * Don't create anything if this is set for streaming or if there is
1771 * no current trace chunk on the parent channel.
1772 */
1773 if (stream->net_seq_idx == (uint64_t) -1ULL && stream->chan->monitor &&
1774 stream->chan->trace_chunk) {
1775 ret = consumer_stream_create_output_files(stream, true);
1776 if (ret) {
1777 goto error;
1778 }
1779 }
1780
1781 if (stream->output == LTTNG_EVENT_MMAP) {
1782 /* get the len of the mmap region */
1783 unsigned long mmap_len;
1784
1785 ret = kernctl_get_mmap_len(stream->wait_fd, &mmap_len);
1786 if (ret != 0) {
1787 PERROR("kernctl_get_mmap_len");
1788 goto error_close_fd;
1789 }
1790 stream->mmap_len = (size_t) mmap_len;
1791
1792 stream->mmap_base =
1793 mmap(nullptr, stream->mmap_len, PROT_READ, MAP_PRIVATE, stream->wait_fd, 0);
1794 if (stream->mmap_base == MAP_FAILED) {
1795 PERROR("Error mmaping");
1796 ret = -1;
1797 goto error_close_fd;
1798 }
1799 }
1800
1801 ret = lttng_kconsumer_set_stream_ops(stream);
1802 if (ret) {
1803 goto error_close_fd;
1804 }
1805
1806 /* we return 0 to let the library handle the FD internally */
1807 return 0;
1808
1809 error_close_fd:
1810 if (stream->out_fd >= 0) {
1811 int err;
1812
1813 err = close(stream->out_fd);
1814 LTTNG_ASSERT(!err);
1815 stream->out_fd = -1;
1816 }
1817 error:
1818 return ret;
1819 }
1820
1821 /*
1822 * Check if data is still being extracted from the buffers for a specific
1823 * stream. Consumer data lock MUST be acquired before calling this function
1824 * and the stream lock.
1825 *
1826 * Return 1 if the traced data are still getting read else 0 meaning that the
1827 * data is available for trace viewer reading.
1828 */
1829 int lttng_kconsumer_data_pending(struct lttng_consumer_stream *stream)
1830 {
1831 int ret;
1832
1833 LTTNG_ASSERT(stream);
1834
1835 if (stream->endpoint_status != CONSUMER_ENDPOINT_ACTIVE) {
1836 ret = 0;
1837 goto end;
1838 }
1839
1840 ret = kernctl_get_next_subbuf(stream->wait_fd);
1841 if (ret == 0) {
1842 /* There is still data so let's put back this subbuffer. */
1843 ret = kernctl_put_subbuf(stream->wait_fd);
1844 LTTNG_ASSERT(ret == 0);
1845 ret = 1; /* Data is pending */
1846 goto end;
1847 }
1848
1849 /* Data is NOT pending and ready to be read. */
1850 ret = 0;
1851
1852 end:
1853 return ret;
1854 }
This page took 0.065384 seconds and 5 git commands to generate.