consumerd: refactor: split read_subbuf into sub-operations
[lttng-tools.git] / src / common / kernel-consumer / kernel-consumer.c
1 /*
2 * Copyright (C) 2011 Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * Copyright (C) 2017 Jérémie Galarneau <jeremie.galarneau@efficios.com>
5 *
6 * SPDX-License-Identifier: GPL-2.0-only
7 *
8 */
9
10 #define _LGPL_SOURCE
11 #include <assert.h>
12 #include <poll.h>
13 #include <pthread.h>
14 #include <stdlib.h>
15 #include <string.h>
16 #include <sys/mman.h>
17 #include <sys/socket.h>
18 #include <sys/types.h>
19 #include <inttypes.h>
20 #include <unistd.h>
21 #include <sys/stat.h>
22
23 #include <bin/lttng-consumerd/health-consumerd.h>
24 #include <common/common.h>
25 #include <common/kernel-ctl/kernel-ctl.h>
26 #include <common/sessiond-comm/sessiond-comm.h>
27 #include <common/sessiond-comm/relayd.h>
28 #include <common/compat/fcntl.h>
29 #include <common/compat/endian.h>
30 #include <common/pipe.h>
31 #include <common/relayd/relayd.h>
32 #include <common/utils.h>
33 #include <common/consumer/consumer-stream.h>
34 #include <common/index/index.h>
35 #include <common/consumer/consumer-timer.h>
36 #include <common/optional.h>
37 #include <common/buffer-view.h>
38 #include <common/consumer/consumer.h>
39 #include <stdint.h>
40
41 #include "kernel-consumer.h"
42
43 extern struct lttng_consumer_global_data consumer_data;
44 extern int consumer_poll_timeout;
45
46 /*
47 * Take a snapshot for a specific fd
48 *
49 * Returns 0 on success, < 0 on error
50 */
51 int lttng_kconsumer_take_snapshot(struct lttng_consumer_stream *stream)
52 {
53 int ret = 0;
54 int infd = stream->wait_fd;
55
56 ret = kernctl_snapshot(infd);
57 /*
58 * -EAGAIN is not an error, it just means that there is no data to
59 * be read.
60 */
61 if (ret != 0 && ret != -EAGAIN) {
62 PERROR("Getting sub-buffer snapshot.");
63 }
64
65 return ret;
66 }
67
68 /*
69 * Sample consumed and produced positions for a specific fd.
70 *
71 * Returns 0 on success, < 0 on error.
72 */
73 int lttng_kconsumer_sample_snapshot_positions(
74 struct lttng_consumer_stream *stream)
75 {
76 assert(stream);
77
78 return kernctl_snapshot_sample_positions(stream->wait_fd);
79 }
80
81 /*
82 * Get the produced position
83 *
84 * Returns 0 on success, < 0 on error
85 */
86 int lttng_kconsumer_get_produced_snapshot(struct lttng_consumer_stream *stream,
87 unsigned long *pos)
88 {
89 int ret;
90 int infd = stream->wait_fd;
91
92 ret = kernctl_snapshot_get_produced(infd, pos);
93 if (ret != 0) {
94 PERROR("kernctl_snapshot_get_produced");
95 }
96
97 return ret;
98 }
99
100 /*
101 * Get the consumerd position
102 *
103 * Returns 0 on success, < 0 on error
104 */
105 int lttng_kconsumer_get_consumed_snapshot(struct lttng_consumer_stream *stream,
106 unsigned long *pos)
107 {
108 int ret;
109 int infd = stream->wait_fd;
110
111 ret = kernctl_snapshot_get_consumed(infd, pos);
112 if (ret != 0) {
113 PERROR("kernctl_snapshot_get_consumed");
114 }
115
116 return ret;
117 }
118
119 static
120 int get_current_subbuf_addr(struct lttng_consumer_stream *stream,
121 const char **addr)
122 {
123 int ret;
124 unsigned long mmap_offset;
125 const char *mmap_base = stream->mmap_base;
126
127 ret = kernctl_get_mmap_read_offset(stream->wait_fd, &mmap_offset);
128 if (ret < 0) {
129 PERROR("Failed to get mmap read offset");
130 goto error;
131 }
132
133 *addr = mmap_base + mmap_offset;
134 error:
135 return ret;
136 }
137
138 /*
139 * Take a snapshot of all the stream of a channel
140 * RCU read-side lock must be held across this function to ensure existence of
141 * channel. The channel lock must be held by the caller.
142 *
143 * Returns 0 on success, < 0 on error
144 */
145 static int lttng_kconsumer_snapshot_channel(
146 struct lttng_consumer_channel *channel,
147 uint64_t key, char *path, uint64_t relayd_id,
148 uint64_t nb_packets_per_stream,
149 struct lttng_consumer_local_data *ctx)
150 {
151 int ret;
152 struct lttng_consumer_stream *stream;
153
154 DBG("Kernel consumer snapshot channel %" PRIu64, key);
155
156 rcu_read_lock();
157
158 /* Splice is not supported yet for channel snapshot. */
159 if (channel->output != CONSUMER_CHANNEL_MMAP) {
160 ERR("Unsupported output type for channel \"%s\": mmap output is required to record a snapshot",
161 channel->name);
162 ret = -1;
163 goto end;
164 }
165
166 cds_list_for_each_entry(stream, &channel->streams.head, send_node) {
167 unsigned long consumed_pos, produced_pos;
168
169 health_code_update();
170
171 /*
172 * Lock stream because we are about to change its state.
173 */
174 pthread_mutex_lock(&stream->lock);
175
176 assert(channel->trace_chunk);
177 if (!lttng_trace_chunk_get(channel->trace_chunk)) {
178 /*
179 * Can't happen barring an internal error as the channel
180 * holds a reference to the trace chunk.
181 */
182 ERR("Failed to acquire reference to channel's trace chunk");
183 ret = -1;
184 goto end_unlock;
185 }
186 assert(!stream->trace_chunk);
187 stream->trace_chunk = channel->trace_chunk;
188
189 /*
190 * Assign the received relayd ID so we can use it for streaming. The streams
191 * are not visible to anyone so this is OK to change it.
192 */
193 stream->net_seq_idx = relayd_id;
194 channel->relayd_id = relayd_id;
195 if (relayd_id != (uint64_t) -1ULL) {
196 ret = consumer_send_relayd_stream(stream, path);
197 if (ret < 0) {
198 ERR("sending stream to relayd");
199 goto end_unlock;
200 }
201 } else {
202 ret = consumer_stream_create_output_files(stream,
203 false);
204 if (ret < 0) {
205 goto end_unlock;
206 }
207 DBG("Kernel consumer snapshot stream (%" PRIu64 ")",
208 stream->key);
209 }
210
211 ret = kernctl_buffer_flush_empty(stream->wait_fd);
212 if (ret < 0) {
213 /*
214 * Doing a buffer flush which does not take into
215 * account empty packets. This is not perfect
216 * for stream intersection, but required as a
217 * fall-back when "flush_empty" is not
218 * implemented by lttng-modules.
219 */
220 ret = kernctl_buffer_flush(stream->wait_fd);
221 if (ret < 0) {
222 ERR("Failed to flush kernel stream");
223 goto end_unlock;
224 }
225 goto end_unlock;
226 }
227
228 ret = lttng_kconsumer_take_snapshot(stream);
229 if (ret < 0) {
230 ERR("Taking kernel snapshot");
231 goto end_unlock;
232 }
233
234 ret = lttng_kconsumer_get_produced_snapshot(stream, &produced_pos);
235 if (ret < 0) {
236 ERR("Produced kernel snapshot position");
237 goto end_unlock;
238 }
239
240 ret = lttng_kconsumer_get_consumed_snapshot(stream, &consumed_pos);
241 if (ret < 0) {
242 ERR("Consumerd kernel snapshot position");
243 goto end_unlock;
244 }
245
246 consumed_pos = consumer_get_consume_start_pos(consumed_pos,
247 produced_pos, nb_packets_per_stream,
248 stream->max_sb_size);
249
250 while ((long) (consumed_pos - produced_pos) < 0) {
251 ssize_t read_len;
252 unsigned long len, padded_len;
253 const char *subbuf_addr;
254 struct lttng_buffer_view subbuf_view;
255
256 health_code_update();
257 DBG("Kernel consumer taking snapshot at pos %lu", consumed_pos);
258
259 ret = kernctl_get_subbuf(stream->wait_fd, &consumed_pos);
260 if (ret < 0) {
261 if (ret != -EAGAIN) {
262 PERROR("kernctl_get_subbuf snapshot");
263 goto end_unlock;
264 }
265 DBG("Kernel consumer get subbuf failed. Skipping it.");
266 consumed_pos += stream->max_sb_size;
267 stream->chan->lost_packets++;
268 continue;
269 }
270
271 ret = kernctl_get_subbuf_size(stream->wait_fd, &len);
272 if (ret < 0) {
273 ERR("Snapshot kernctl_get_subbuf_size");
274 goto error_put_subbuf;
275 }
276
277 ret = kernctl_get_padded_subbuf_size(stream->wait_fd, &padded_len);
278 if (ret < 0) {
279 ERR("Snapshot kernctl_get_padded_subbuf_size");
280 goto error_put_subbuf;
281 }
282
283 ret = get_current_subbuf_addr(stream, &subbuf_addr);
284 if (ret) {
285 goto error_put_subbuf;
286 }
287
288 subbuf_view = lttng_buffer_view_init(
289 subbuf_addr, 0, padded_len);
290 read_len = lttng_consumer_on_read_subbuffer_mmap(ctx,
291 stream, &subbuf_view,
292 padded_len - len);
293 /*
294 * We write the padded len in local tracefiles but the data len
295 * when using a relay. Display the error but continue processing
296 * to try to release the subbuffer.
297 */
298 if (relayd_id != (uint64_t) -1ULL) {
299 if (read_len != len) {
300 ERR("Error sending to the relay (ret: %zd != len: %lu)",
301 read_len, len);
302 }
303 } else {
304 if (read_len != padded_len) {
305 ERR("Error writing to tracefile (ret: %zd != len: %lu)",
306 read_len, padded_len);
307 }
308 }
309
310 ret = kernctl_put_subbuf(stream->wait_fd);
311 if (ret < 0) {
312 ERR("Snapshot kernctl_put_subbuf");
313 goto end_unlock;
314 }
315 consumed_pos += stream->max_sb_size;
316 }
317
318 if (relayd_id == (uint64_t) -1ULL) {
319 if (stream->out_fd >= 0) {
320 ret = close(stream->out_fd);
321 if (ret < 0) {
322 PERROR("Kernel consumer snapshot close out_fd");
323 goto end_unlock;
324 }
325 stream->out_fd = -1;
326 }
327 } else {
328 close_relayd_stream(stream);
329 stream->net_seq_idx = (uint64_t) -1ULL;
330 }
331 lttng_trace_chunk_put(stream->trace_chunk);
332 stream->trace_chunk = NULL;
333 pthread_mutex_unlock(&stream->lock);
334 }
335
336 /* All good! */
337 ret = 0;
338 goto end;
339
340 error_put_subbuf:
341 ret = kernctl_put_subbuf(stream->wait_fd);
342 if (ret < 0) {
343 ERR("Snapshot kernctl_put_subbuf error path");
344 }
345 end_unlock:
346 pthread_mutex_unlock(&stream->lock);
347 end:
348 rcu_read_unlock();
349 return ret;
350 }
351
352 /*
353 * Read the whole metadata available for a snapshot.
354 * RCU read-side lock must be held across this function to ensure existence of
355 * metadata_channel. The channel lock must be held by the caller.
356 *
357 * Returns 0 on success, < 0 on error
358 */
359 static int lttng_kconsumer_snapshot_metadata(
360 struct lttng_consumer_channel *metadata_channel,
361 uint64_t key, char *path, uint64_t relayd_id,
362 struct lttng_consumer_local_data *ctx)
363 {
364 int ret, use_relayd = 0;
365 ssize_t ret_read;
366 struct lttng_consumer_stream *metadata_stream;
367
368 assert(ctx);
369
370 DBG("Kernel consumer snapshot metadata with key %" PRIu64 " at path %s",
371 key, path);
372
373 rcu_read_lock();
374
375 metadata_stream = metadata_channel->metadata_stream;
376 assert(metadata_stream);
377
378 pthread_mutex_lock(&metadata_stream->lock);
379 assert(metadata_channel->trace_chunk);
380 assert(metadata_stream->trace_chunk);
381
382 /* Flag once that we have a valid relayd for the stream. */
383 if (relayd_id != (uint64_t) -1ULL) {
384 use_relayd = 1;
385 }
386
387 if (use_relayd) {
388 ret = consumer_send_relayd_stream(metadata_stream, path);
389 if (ret < 0) {
390 goto error_snapshot;
391 }
392 } else {
393 ret = consumer_stream_create_output_files(metadata_stream,
394 false);
395 if (ret < 0) {
396 goto error_snapshot;
397 }
398 }
399
400 do {
401 health_code_update();
402
403 ret_read = lttng_consumer_read_subbuffer(metadata_stream, ctx, true);
404 if (ret_read < 0) {
405 if (ret_read != -EAGAIN) {
406 ERR("Kernel snapshot reading metadata subbuffer (ret: %zd)",
407 ret_read);
408 ret = ret_read;
409 goto error_snapshot;
410 }
411 /* ret_read is negative at this point so we will exit the loop. */
412 continue;
413 }
414 } while (ret_read >= 0);
415
416 if (use_relayd) {
417 close_relayd_stream(metadata_stream);
418 metadata_stream->net_seq_idx = (uint64_t) -1ULL;
419 } else {
420 if (metadata_stream->out_fd >= 0) {
421 ret = close(metadata_stream->out_fd);
422 if (ret < 0) {
423 PERROR("Kernel consumer snapshot metadata close out_fd");
424 /*
425 * Don't go on error here since the snapshot was successful at this
426 * point but somehow the close failed.
427 */
428 }
429 metadata_stream->out_fd = -1;
430 lttng_trace_chunk_put(metadata_stream->trace_chunk);
431 metadata_stream->trace_chunk = NULL;
432 }
433 }
434
435 ret = 0;
436 error_snapshot:
437 pthread_mutex_unlock(&metadata_stream->lock);
438 cds_list_del(&metadata_stream->send_node);
439 consumer_stream_destroy(metadata_stream, NULL);
440 metadata_channel->metadata_stream = NULL;
441 rcu_read_unlock();
442 return ret;
443 }
444
445 /*
446 * Receive command from session daemon and process it.
447 *
448 * Return 1 on success else a negative value or 0.
449 */
450 int lttng_kconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
451 int sock, struct pollfd *consumer_sockpoll)
452 {
453 ssize_t ret;
454 enum lttcomm_return_code ret_code = LTTCOMM_CONSUMERD_SUCCESS;
455 struct lttcomm_consumer_msg msg;
456
457 health_code_update();
458
459 ret = lttcomm_recv_unix_sock(sock, &msg, sizeof(msg));
460 if (ret != sizeof(msg)) {
461 if (ret > 0) {
462 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_CMD);
463 ret = -1;
464 }
465 return ret;
466 }
467
468 health_code_update();
469
470 /* Deprecated command */
471 assert(msg.cmd_type != LTTNG_CONSUMER_STOP);
472
473 health_code_update();
474
475 /* relayd needs RCU read-side protection */
476 rcu_read_lock();
477
478 switch (msg.cmd_type) {
479 case LTTNG_CONSUMER_ADD_RELAYD_SOCKET:
480 {
481 /* Session daemon status message are handled in the following call. */
482 consumer_add_relayd_socket(msg.u.relayd_sock.net_index,
483 msg.u.relayd_sock.type, ctx, sock, consumer_sockpoll,
484 &msg.u.relayd_sock.sock, msg.u.relayd_sock.session_id,
485 msg.u.relayd_sock.relayd_session_id);
486 goto end_nosignal;
487 }
488 case LTTNG_CONSUMER_ADD_CHANNEL:
489 {
490 struct lttng_consumer_channel *new_channel;
491 int ret_recv;
492 const uint64_t chunk_id = msg.u.channel.chunk_id.value;
493
494 health_code_update();
495
496 /* First send a status message before receiving the fds. */
497 ret = consumer_send_status_msg(sock, ret_code);
498 if (ret < 0) {
499 /* Somehow, the session daemon is not responding anymore. */
500 goto error_fatal;
501 }
502
503 health_code_update();
504
505 DBG("consumer_add_channel %" PRIu64, msg.u.channel.channel_key);
506 new_channel = consumer_allocate_channel(msg.u.channel.channel_key,
507 msg.u.channel.session_id,
508 msg.u.channel.chunk_id.is_set ?
509 &chunk_id : NULL,
510 msg.u.channel.pathname,
511 msg.u.channel.name,
512 msg.u.channel.relayd_id, msg.u.channel.output,
513 msg.u.channel.tracefile_size,
514 msg.u.channel.tracefile_count, 0,
515 msg.u.channel.monitor,
516 msg.u.channel.live_timer_interval,
517 msg.u.channel.is_live,
518 NULL, NULL);
519 if (new_channel == NULL) {
520 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR);
521 goto end_nosignal;
522 }
523 new_channel->nb_init_stream_left = msg.u.channel.nb_init_streams;
524 switch (msg.u.channel.output) {
525 case LTTNG_EVENT_SPLICE:
526 new_channel->output = CONSUMER_CHANNEL_SPLICE;
527 break;
528 case LTTNG_EVENT_MMAP:
529 new_channel->output = CONSUMER_CHANNEL_MMAP;
530 break;
531 default:
532 ERR("Channel output unknown %d", msg.u.channel.output);
533 goto end_nosignal;
534 }
535
536 /* Translate and save channel type. */
537 switch (msg.u.channel.type) {
538 case CONSUMER_CHANNEL_TYPE_DATA:
539 case CONSUMER_CHANNEL_TYPE_METADATA:
540 new_channel->type = msg.u.channel.type;
541 break;
542 default:
543 assert(0);
544 goto end_nosignal;
545 };
546
547 health_code_update();
548
549 if (ctx->on_recv_channel != NULL) {
550 ret_recv = ctx->on_recv_channel(new_channel);
551 if (ret_recv == 0) {
552 ret = consumer_add_channel(new_channel, ctx);
553 } else if (ret_recv < 0) {
554 goto end_nosignal;
555 }
556 } else {
557 ret = consumer_add_channel(new_channel, ctx);
558 }
559 if (msg.u.channel.type == CONSUMER_CHANNEL_TYPE_DATA && !ret) {
560 int monitor_start_ret;
561
562 DBG("Consumer starting monitor timer");
563 consumer_timer_live_start(new_channel,
564 msg.u.channel.live_timer_interval);
565 monitor_start_ret = consumer_timer_monitor_start(
566 new_channel,
567 msg.u.channel.monitor_timer_interval);
568 if (monitor_start_ret < 0) {
569 ERR("Starting channel monitoring timer failed");
570 goto end_nosignal;
571 }
572
573 }
574
575 health_code_update();
576
577 /* If we received an error in add_channel, we need to report it. */
578 if (ret < 0) {
579 ret = consumer_send_status_msg(sock, ret);
580 if (ret < 0) {
581 goto error_fatal;
582 }
583 goto end_nosignal;
584 }
585
586 goto end_nosignal;
587 }
588 case LTTNG_CONSUMER_ADD_STREAM:
589 {
590 int fd;
591 struct lttng_pipe *stream_pipe;
592 struct lttng_consumer_stream *new_stream;
593 struct lttng_consumer_channel *channel;
594 int alloc_ret = 0;
595
596 /*
597 * Get stream's channel reference. Needed when adding the stream to the
598 * global hash table.
599 */
600 channel = consumer_find_channel(msg.u.stream.channel_key);
601 if (!channel) {
602 /*
603 * We could not find the channel. Can happen if cpu hotplug
604 * happens while tearing down.
605 */
606 ERR("Unable to find channel key %" PRIu64, msg.u.stream.channel_key);
607 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
608 }
609
610 health_code_update();
611
612 /* First send a status message before receiving the fds. */
613 ret = consumer_send_status_msg(sock, ret_code);
614 if (ret < 0) {
615 /* Somehow, the session daemon is not responding anymore. */
616 goto error_add_stream_fatal;
617 }
618
619 health_code_update();
620
621 if (ret_code != LTTCOMM_CONSUMERD_SUCCESS) {
622 /* Channel was not found. */
623 goto error_add_stream_nosignal;
624 }
625
626 /* Blocking call */
627 health_poll_entry();
628 ret = lttng_consumer_poll_socket(consumer_sockpoll);
629 health_poll_exit();
630 if (ret) {
631 goto error_add_stream_fatal;
632 }
633
634 health_code_update();
635
636 /* Get stream file descriptor from socket */
637 ret = lttcomm_recv_fds_unix_sock(sock, &fd, 1);
638 if (ret != sizeof(fd)) {
639 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_FD);
640 goto end;
641 }
642
643 health_code_update();
644
645 /*
646 * Send status code to session daemon only if the recv works. If the
647 * above recv() failed, the session daemon is notified through the
648 * error socket and the teardown is eventually done.
649 */
650 ret = consumer_send_status_msg(sock, ret_code);
651 if (ret < 0) {
652 /* Somehow, the session daemon is not responding anymore. */
653 goto error_add_stream_nosignal;
654 }
655
656 health_code_update();
657
658 pthread_mutex_lock(&channel->lock);
659 new_stream = consumer_stream_create(
660 channel,
661 channel->key,
662 fd,
663 channel->name,
664 channel->relayd_id,
665 channel->session_id,
666 channel->trace_chunk,
667 msg.u.stream.cpu,
668 &alloc_ret,
669 channel->type,
670 channel->monitor);
671 if (new_stream == NULL) {
672 switch (alloc_ret) {
673 case -ENOMEM:
674 case -EINVAL:
675 default:
676 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR);
677 break;
678 }
679 pthread_mutex_unlock(&channel->lock);
680 goto error_add_stream_nosignal;
681 }
682
683 new_stream->wait_fd = fd;
684 ret = kernctl_get_max_subbuf_size(new_stream->wait_fd,
685 &new_stream->max_sb_size);
686 if (ret < 0) {
687 pthread_mutex_unlock(&channel->lock);
688 ERR("Failed to get kernel maximal subbuffer size");
689 goto error_add_stream_nosignal;
690 }
691
692 consumer_stream_update_channel_attributes(new_stream,
693 channel);
694
695 /*
696 * We've just assigned the channel to the stream so increment the
697 * refcount right now. We don't need to increment the refcount for
698 * streams in no monitor because we handle manually the cleanup of
699 * those. It is very important to make sure there is NO prior
700 * consumer_del_stream() calls or else the refcount will be unbalanced.
701 */
702 if (channel->monitor) {
703 uatomic_inc(&new_stream->chan->refcount);
704 }
705
706 /*
707 * The buffer flush is done on the session daemon side for the kernel
708 * so no need for the stream "hangup_flush_done" variable to be
709 * tracked. This is important for a kernel stream since we don't rely
710 * on the flush state of the stream to read data. It's not the case for
711 * user space tracing.
712 */
713 new_stream->hangup_flush_done = 0;
714
715 health_code_update();
716
717 pthread_mutex_lock(&new_stream->lock);
718 if (ctx->on_recv_stream) {
719 ret = ctx->on_recv_stream(new_stream);
720 if (ret < 0) {
721 pthread_mutex_unlock(&new_stream->lock);
722 pthread_mutex_unlock(&channel->lock);
723 consumer_stream_free(new_stream);
724 goto error_add_stream_nosignal;
725 }
726 }
727 health_code_update();
728
729 if (new_stream->metadata_flag) {
730 channel->metadata_stream = new_stream;
731 }
732
733 /* Do not monitor this stream. */
734 if (!channel->monitor) {
735 DBG("Kernel consumer add stream %s in no monitor mode with "
736 "relayd id %" PRIu64, new_stream->name,
737 new_stream->net_seq_idx);
738 cds_list_add(&new_stream->send_node, &channel->streams.head);
739 pthread_mutex_unlock(&new_stream->lock);
740 pthread_mutex_unlock(&channel->lock);
741 goto end_add_stream;
742 }
743
744 /* Send stream to relayd if the stream has an ID. */
745 if (new_stream->net_seq_idx != (uint64_t) -1ULL) {
746 ret = consumer_send_relayd_stream(new_stream,
747 new_stream->chan->pathname);
748 if (ret < 0) {
749 pthread_mutex_unlock(&new_stream->lock);
750 pthread_mutex_unlock(&channel->lock);
751 consumer_stream_free(new_stream);
752 goto error_add_stream_nosignal;
753 }
754
755 /*
756 * If adding an extra stream to an already
757 * existing channel (e.g. cpu hotplug), we need
758 * to send the "streams_sent" command to relayd.
759 */
760 if (channel->streams_sent_to_relayd) {
761 ret = consumer_send_relayd_streams_sent(
762 new_stream->net_seq_idx);
763 if (ret < 0) {
764 pthread_mutex_unlock(&new_stream->lock);
765 pthread_mutex_unlock(&channel->lock);
766 goto error_add_stream_nosignal;
767 }
768 }
769 }
770 pthread_mutex_unlock(&new_stream->lock);
771 pthread_mutex_unlock(&channel->lock);
772
773 /* Get the right pipe where the stream will be sent. */
774 if (new_stream->metadata_flag) {
775 consumer_add_metadata_stream(new_stream);
776 stream_pipe = ctx->consumer_metadata_pipe;
777 } else {
778 consumer_add_data_stream(new_stream);
779 stream_pipe = ctx->consumer_data_pipe;
780 }
781
782 /* Visible to other threads */
783 new_stream->globally_visible = 1;
784
785 health_code_update();
786
787 ret = lttng_pipe_write(stream_pipe, &new_stream, sizeof(new_stream));
788 if (ret < 0) {
789 ERR("Consumer write %s stream to pipe %d",
790 new_stream->metadata_flag ? "metadata" : "data",
791 lttng_pipe_get_writefd(stream_pipe));
792 if (new_stream->metadata_flag) {
793 consumer_del_stream_for_metadata(new_stream);
794 } else {
795 consumer_del_stream_for_data(new_stream);
796 }
797 goto error_add_stream_nosignal;
798 }
799
800 DBG("Kernel consumer ADD_STREAM %s (fd: %d) %s with relayd id %" PRIu64,
801 new_stream->name, fd, new_stream->chan->pathname, new_stream->relayd_stream_id);
802 end_add_stream:
803 break;
804 error_add_stream_nosignal:
805 goto end_nosignal;
806 error_add_stream_fatal:
807 goto error_fatal;
808 }
809 case LTTNG_CONSUMER_STREAMS_SENT:
810 {
811 struct lttng_consumer_channel *channel;
812
813 /*
814 * Get stream's channel reference. Needed when adding the stream to the
815 * global hash table.
816 */
817 channel = consumer_find_channel(msg.u.sent_streams.channel_key);
818 if (!channel) {
819 /*
820 * We could not find the channel. Can happen if cpu hotplug
821 * happens while tearing down.
822 */
823 ERR("Unable to find channel key %" PRIu64,
824 msg.u.sent_streams.channel_key);
825 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
826 }
827
828 health_code_update();
829
830 /*
831 * Send status code to session daemon.
832 */
833 ret = consumer_send_status_msg(sock, ret_code);
834 if (ret < 0 || ret_code != LTTCOMM_CONSUMERD_SUCCESS) {
835 /* Somehow, the session daemon is not responding anymore. */
836 goto error_streams_sent_nosignal;
837 }
838
839 health_code_update();
840
841 /*
842 * We should not send this message if we don't monitor the
843 * streams in this channel.
844 */
845 if (!channel->monitor) {
846 goto end_error_streams_sent;
847 }
848
849 health_code_update();
850 /* Send stream to relayd if the stream has an ID. */
851 if (msg.u.sent_streams.net_seq_idx != (uint64_t) -1ULL) {
852 ret = consumer_send_relayd_streams_sent(
853 msg.u.sent_streams.net_seq_idx);
854 if (ret < 0) {
855 goto error_streams_sent_nosignal;
856 }
857 channel->streams_sent_to_relayd = true;
858 }
859 end_error_streams_sent:
860 break;
861 error_streams_sent_nosignal:
862 goto end_nosignal;
863 }
864 case LTTNG_CONSUMER_UPDATE_STREAM:
865 {
866 rcu_read_unlock();
867 return -ENOSYS;
868 }
869 case LTTNG_CONSUMER_DESTROY_RELAYD:
870 {
871 uint64_t index = msg.u.destroy_relayd.net_seq_idx;
872 struct consumer_relayd_sock_pair *relayd;
873
874 DBG("Kernel consumer destroying relayd %" PRIu64, index);
875
876 /* Get relayd reference if exists. */
877 relayd = consumer_find_relayd(index);
878 if (relayd == NULL) {
879 DBG("Unable to find relayd %" PRIu64, index);
880 ret_code = LTTCOMM_CONSUMERD_RELAYD_FAIL;
881 }
882
883 /*
884 * Each relayd socket pair has a refcount of stream attached to it
885 * which tells if the relayd is still active or not depending on the
886 * refcount value.
887 *
888 * This will set the destroy flag of the relayd object and destroy it
889 * if the refcount reaches zero when called.
890 *
891 * The destroy can happen either here or when a stream fd hangs up.
892 */
893 if (relayd) {
894 consumer_flag_relayd_for_destroy(relayd);
895 }
896
897 health_code_update();
898
899 ret = consumer_send_status_msg(sock, ret_code);
900 if (ret < 0) {
901 /* Somehow, the session daemon is not responding anymore. */
902 goto error_fatal;
903 }
904
905 goto end_nosignal;
906 }
907 case LTTNG_CONSUMER_DATA_PENDING:
908 {
909 int32_t ret;
910 uint64_t id = msg.u.data_pending.session_id;
911
912 DBG("Kernel consumer data pending command for id %" PRIu64, id);
913
914 ret = consumer_data_pending(id);
915
916 health_code_update();
917
918 /* Send back returned value to session daemon */
919 ret = lttcomm_send_unix_sock(sock, &ret, sizeof(ret));
920 if (ret < 0) {
921 PERROR("send data pending ret code");
922 goto error_fatal;
923 }
924
925 /*
926 * No need to send back a status message since the data pending
927 * returned value is the response.
928 */
929 break;
930 }
931 case LTTNG_CONSUMER_SNAPSHOT_CHANNEL:
932 {
933 struct lttng_consumer_channel *channel;
934 uint64_t key = msg.u.snapshot_channel.key;
935
936 channel = consumer_find_channel(key);
937 if (!channel) {
938 ERR("Channel %" PRIu64 " not found", key);
939 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
940 } else {
941 pthread_mutex_lock(&channel->lock);
942 if (msg.u.snapshot_channel.metadata == 1) {
943 ret = lttng_kconsumer_snapshot_metadata(channel, key,
944 msg.u.snapshot_channel.pathname,
945 msg.u.snapshot_channel.relayd_id, ctx);
946 if (ret < 0) {
947 ERR("Snapshot metadata failed");
948 ret_code = LTTCOMM_CONSUMERD_SNAPSHOT_FAILED;
949 }
950 } else {
951 ret = lttng_kconsumer_snapshot_channel(channel, key,
952 msg.u.snapshot_channel.pathname,
953 msg.u.snapshot_channel.relayd_id,
954 msg.u.snapshot_channel.nb_packets_per_stream,
955 ctx);
956 if (ret < 0) {
957 ERR("Snapshot channel failed");
958 ret_code = LTTCOMM_CONSUMERD_SNAPSHOT_FAILED;
959 }
960 }
961 pthread_mutex_unlock(&channel->lock);
962 }
963 health_code_update();
964
965 ret = consumer_send_status_msg(sock, ret_code);
966 if (ret < 0) {
967 /* Somehow, the session daemon is not responding anymore. */
968 goto end_nosignal;
969 }
970 break;
971 }
972 case LTTNG_CONSUMER_DESTROY_CHANNEL:
973 {
974 uint64_t key = msg.u.destroy_channel.key;
975 struct lttng_consumer_channel *channel;
976
977 channel = consumer_find_channel(key);
978 if (!channel) {
979 ERR("Kernel consumer destroy channel %" PRIu64 " not found", key);
980 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
981 }
982
983 health_code_update();
984
985 ret = consumer_send_status_msg(sock, ret_code);
986 if (ret < 0) {
987 /* Somehow, the session daemon is not responding anymore. */
988 goto end_destroy_channel;
989 }
990
991 health_code_update();
992
993 /* Stop right now if no channel was found. */
994 if (!channel) {
995 goto end_destroy_channel;
996 }
997
998 /*
999 * This command should ONLY be issued for channel with streams set in
1000 * no monitor mode.
1001 */
1002 assert(!channel->monitor);
1003
1004 /*
1005 * The refcount should ALWAYS be 0 in the case of a channel in no
1006 * monitor mode.
1007 */
1008 assert(!uatomic_sub_return(&channel->refcount, 1));
1009
1010 consumer_del_channel(channel);
1011 end_destroy_channel:
1012 goto end_nosignal;
1013 }
1014 case LTTNG_CONSUMER_DISCARDED_EVENTS:
1015 {
1016 ssize_t ret;
1017 uint64_t count;
1018 struct lttng_consumer_channel *channel;
1019 uint64_t id = msg.u.discarded_events.session_id;
1020 uint64_t key = msg.u.discarded_events.channel_key;
1021
1022 DBG("Kernel consumer discarded events command for session id %"
1023 PRIu64 ", channel key %" PRIu64, id, key);
1024
1025 channel = consumer_find_channel(key);
1026 if (!channel) {
1027 ERR("Kernel consumer discarded events channel %"
1028 PRIu64 " not found", key);
1029 count = 0;
1030 } else {
1031 count = channel->discarded_events;
1032 }
1033
1034 health_code_update();
1035
1036 /* Send back returned value to session daemon */
1037 ret = lttcomm_send_unix_sock(sock, &count, sizeof(count));
1038 if (ret < 0) {
1039 PERROR("send discarded events");
1040 goto error_fatal;
1041 }
1042
1043 break;
1044 }
1045 case LTTNG_CONSUMER_LOST_PACKETS:
1046 {
1047 ssize_t ret;
1048 uint64_t count;
1049 struct lttng_consumer_channel *channel;
1050 uint64_t id = msg.u.lost_packets.session_id;
1051 uint64_t key = msg.u.lost_packets.channel_key;
1052
1053 DBG("Kernel consumer lost packets command for session id %"
1054 PRIu64 ", channel key %" PRIu64, id, key);
1055
1056 channel = consumer_find_channel(key);
1057 if (!channel) {
1058 ERR("Kernel consumer lost packets channel %"
1059 PRIu64 " not found", key);
1060 count = 0;
1061 } else {
1062 count = channel->lost_packets;
1063 }
1064
1065 health_code_update();
1066
1067 /* Send back returned value to session daemon */
1068 ret = lttcomm_send_unix_sock(sock, &count, sizeof(count));
1069 if (ret < 0) {
1070 PERROR("send lost packets");
1071 goto error_fatal;
1072 }
1073
1074 break;
1075 }
1076 case LTTNG_CONSUMER_SET_CHANNEL_MONITOR_PIPE:
1077 {
1078 int channel_monitor_pipe;
1079
1080 ret_code = LTTCOMM_CONSUMERD_SUCCESS;
1081 /* Successfully received the command's type. */
1082 ret = consumer_send_status_msg(sock, ret_code);
1083 if (ret < 0) {
1084 goto error_fatal;
1085 }
1086
1087 ret = lttcomm_recv_fds_unix_sock(sock, &channel_monitor_pipe,
1088 1);
1089 if (ret != sizeof(channel_monitor_pipe)) {
1090 ERR("Failed to receive channel monitor pipe");
1091 goto error_fatal;
1092 }
1093
1094 DBG("Received channel monitor pipe (%d)", channel_monitor_pipe);
1095 ret = consumer_timer_thread_set_channel_monitor_pipe(
1096 channel_monitor_pipe);
1097 if (!ret) {
1098 int flags;
1099
1100 ret_code = LTTCOMM_CONSUMERD_SUCCESS;
1101 /* Set the pipe as non-blocking. */
1102 ret = fcntl(channel_monitor_pipe, F_GETFL, 0);
1103 if (ret == -1) {
1104 PERROR("fcntl get flags of the channel monitoring pipe");
1105 goto error_fatal;
1106 }
1107 flags = ret;
1108
1109 ret = fcntl(channel_monitor_pipe, F_SETFL,
1110 flags | O_NONBLOCK);
1111 if (ret == -1) {
1112 PERROR("fcntl set O_NONBLOCK flag of the channel monitoring pipe");
1113 goto error_fatal;
1114 }
1115 DBG("Channel monitor pipe set as non-blocking");
1116 } else {
1117 ret_code = LTTCOMM_CONSUMERD_ALREADY_SET;
1118 }
1119 ret = consumer_send_status_msg(sock, ret_code);
1120 if (ret < 0) {
1121 goto error_fatal;
1122 }
1123 break;
1124 }
1125 case LTTNG_CONSUMER_ROTATE_CHANNEL:
1126 {
1127 struct lttng_consumer_channel *channel;
1128 uint64_t key = msg.u.rotate_channel.key;
1129
1130 DBG("Consumer rotate channel %" PRIu64, key);
1131
1132 channel = consumer_find_channel(key);
1133 if (!channel) {
1134 ERR("Channel %" PRIu64 " not found", key);
1135 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
1136 } else {
1137 /*
1138 * Sample the rotate position of all the streams in this channel.
1139 */
1140 ret = lttng_consumer_rotate_channel(channel, key,
1141 msg.u.rotate_channel.relayd_id,
1142 msg.u.rotate_channel.metadata,
1143 ctx);
1144 if (ret < 0) {
1145 ERR("Rotate channel failed");
1146 ret_code = LTTCOMM_CONSUMERD_ROTATION_FAIL;
1147 }
1148
1149 health_code_update();
1150 }
1151 ret = consumer_send_status_msg(sock, ret_code);
1152 if (ret < 0) {
1153 /* Somehow, the session daemon is not responding anymore. */
1154 goto error_rotate_channel;
1155 }
1156 if (channel) {
1157 /* Rotate the streams that are ready right now. */
1158 ret = lttng_consumer_rotate_ready_streams(
1159 channel, key, ctx);
1160 if (ret < 0) {
1161 ERR("Rotate ready streams failed");
1162 }
1163 }
1164 break;
1165 error_rotate_channel:
1166 goto end_nosignal;
1167 }
1168 case LTTNG_CONSUMER_CLEAR_CHANNEL:
1169 {
1170 struct lttng_consumer_channel *channel;
1171 uint64_t key = msg.u.clear_channel.key;
1172
1173 channel = consumer_find_channel(key);
1174 if (!channel) {
1175 DBG("Channel %" PRIu64 " not found", key);
1176 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
1177 } else {
1178 ret = lttng_consumer_clear_channel(channel);
1179 if (ret) {
1180 ERR("Clear channel failed");
1181 ret_code = ret;
1182 }
1183
1184 health_code_update();
1185 }
1186 ret = consumer_send_status_msg(sock, ret_code);
1187 if (ret < 0) {
1188 /* Somehow, the session daemon is not responding anymore. */
1189 goto end_nosignal;
1190 }
1191
1192 break;
1193 }
1194 case LTTNG_CONSUMER_INIT:
1195 {
1196 ret_code = lttng_consumer_init_command(ctx,
1197 msg.u.init.sessiond_uuid);
1198 health_code_update();
1199 ret = consumer_send_status_msg(sock, ret_code);
1200 if (ret < 0) {
1201 /* Somehow, the session daemon is not responding anymore. */
1202 goto end_nosignal;
1203 }
1204 break;
1205 }
1206 case LTTNG_CONSUMER_CREATE_TRACE_CHUNK:
1207 {
1208 const struct lttng_credentials credentials = {
1209 .uid = msg.u.create_trace_chunk.credentials.value.uid,
1210 .gid = msg.u.create_trace_chunk.credentials.value.gid,
1211 };
1212 const bool is_local_trace =
1213 !msg.u.create_trace_chunk.relayd_id.is_set;
1214 const uint64_t relayd_id =
1215 msg.u.create_trace_chunk.relayd_id.value;
1216 const char *chunk_override_name =
1217 *msg.u.create_trace_chunk.override_name ?
1218 msg.u.create_trace_chunk.override_name :
1219 NULL;
1220 struct lttng_directory_handle *chunk_directory_handle = NULL;
1221
1222 /*
1223 * The session daemon will only provide a chunk directory file
1224 * descriptor for local traces.
1225 */
1226 if (is_local_trace) {
1227 int chunk_dirfd;
1228
1229 /* Acnowledge the reception of the command. */
1230 ret = consumer_send_status_msg(sock,
1231 LTTCOMM_CONSUMERD_SUCCESS);
1232 if (ret < 0) {
1233 /* Somehow, the session daemon is not responding anymore. */
1234 goto end_nosignal;
1235 }
1236
1237 ret = lttcomm_recv_fds_unix_sock(sock, &chunk_dirfd, 1);
1238 if (ret != sizeof(chunk_dirfd)) {
1239 ERR("Failed to receive trace chunk directory file descriptor");
1240 goto error_fatal;
1241 }
1242
1243 DBG("Received trace chunk directory fd (%d)",
1244 chunk_dirfd);
1245 chunk_directory_handle = lttng_directory_handle_create_from_dirfd(
1246 chunk_dirfd);
1247 if (!chunk_directory_handle) {
1248 ERR("Failed to initialize chunk directory handle from directory file descriptor");
1249 if (close(chunk_dirfd)) {
1250 PERROR("Failed to close chunk directory file descriptor");
1251 }
1252 goto error_fatal;
1253 }
1254 }
1255
1256 ret_code = lttng_consumer_create_trace_chunk(
1257 !is_local_trace ? &relayd_id : NULL,
1258 msg.u.create_trace_chunk.session_id,
1259 msg.u.create_trace_chunk.chunk_id,
1260 (time_t) msg.u.create_trace_chunk
1261 .creation_timestamp,
1262 chunk_override_name,
1263 msg.u.create_trace_chunk.credentials.is_set ?
1264 &credentials :
1265 NULL,
1266 chunk_directory_handle);
1267 lttng_directory_handle_put(chunk_directory_handle);
1268 goto end_msg_sessiond;
1269 }
1270 case LTTNG_CONSUMER_CLOSE_TRACE_CHUNK:
1271 {
1272 enum lttng_trace_chunk_command_type close_command =
1273 msg.u.close_trace_chunk.close_command.value;
1274 const uint64_t relayd_id =
1275 msg.u.close_trace_chunk.relayd_id.value;
1276 struct lttcomm_consumer_close_trace_chunk_reply reply;
1277 char path[LTTNG_PATH_MAX];
1278
1279 ret_code = lttng_consumer_close_trace_chunk(
1280 msg.u.close_trace_chunk.relayd_id.is_set ?
1281 &relayd_id :
1282 NULL,
1283 msg.u.close_trace_chunk.session_id,
1284 msg.u.close_trace_chunk.chunk_id,
1285 (time_t) msg.u.close_trace_chunk.close_timestamp,
1286 msg.u.close_trace_chunk.close_command.is_set ?
1287 &close_command :
1288 NULL, path);
1289 reply.ret_code = ret_code;
1290 reply.path_length = strlen(path) + 1;
1291 ret = lttcomm_send_unix_sock(sock, &reply, sizeof(reply));
1292 if (ret != sizeof(reply)) {
1293 goto error_fatal;
1294 }
1295 ret = lttcomm_send_unix_sock(sock, path, reply.path_length);
1296 if (ret != reply.path_length) {
1297 goto error_fatal;
1298 }
1299 goto end_nosignal;
1300 }
1301 case LTTNG_CONSUMER_TRACE_CHUNK_EXISTS:
1302 {
1303 const uint64_t relayd_id =
1304 msg.u.trace_chunk_exists.relayd_id.value;
1305
1306 ret_code = lttng_consumer_trace_chunk_exists(
1307 msg.u.trace_chunk_exists.relayd_id.is_set ?
1308 &relayd_id : NULL,
1309 msg.u.trace_chunk_exists.session_id,
1310 msg.u.trace_chunk_exists.chunk_id);
1311 goto end_msg_sessiond;
1312 }
1313 default:
1314 goto end_nosignal;
1315 }
1316
1317 end_nosignal:
1318 /*
1319 * Return 1 to indicate success since the 0 value can be a socket
1320 * shutdown during the recv() or send() call.
1321 */
1322 ret = 1;
1323 goto end;
1324 error_fatal:
1325 /* This will issue a consumer stop. */
1326 ret = -1;
1327 goto end;
1328 end_msg_sessiond:
1329 /*
1330 * The returned value here is not useful since either way we'll return 1 to
1331 * the caller because the session daemon socket management is done
1332 * elsewhere. Returning a negative code or 0 will shutdown the consumer.
1333 */
1334 ret = consumer_send_status_msg(sock, ret_code);
1335 if (ret < 0) {
1336 goto error_fatal;
1337 }
1338 ret = 1;
1339 end:
1340 health_code_update();
1341 rcu_read_unlock();
1342 return ret;
1343 }
1344
1345 /*
1346 * Sync metadata meaning request them to the session daemon and snapshot to the
1347 * metadata thread can consumer them.
1348 *
1349 * Metadata stream lock MUST be acquired.
1350 *
1351 * Return 0 if new metadatda is available, EAGAIN if the metadata stream
1352 * is empty or a negative value on error.
1353 */
1354 int lttng_kconsumer_sync_metadata(struct lttng_consumer_stream *metadata)
1355 {
1356 int ret;
1357
1358 assert(metadata);
1359
1360 ret = kernctl_buffer_flush(metadata->wait_fd);
1361 if (ret < 0) {
1362 ERR("Failed to flush kernel stream");
1363 goto end;
1364 }
1365
1366 ret = kernctl_snapshot(metadata->wait_fd);
1367 if (ret < 0) {
1368 if (ret != -EAGAIN) {
1369 ERR("Sync metadata, taking kernel snapshot failed.");
1370 goto end;
1371 }
1372 DBG("Sync metadata, no new kernel metadata");
1373 /* No new metadata, exit. */
1374 ret = ENODATA;
1375 goto end;
1376 }
1377
1378 end:
1379 return ret;
1380 }
1381
1382 static
1383 int extract_common_subbuffer_info(struct lttng_consumer_stream *stream,
1384 struct stream_subbuffer *subbuf)
1385 {
1386 int ret;
1387
1388 ret = kernctl_get_subbuf_size(
1389 stream->wait_fd, &subbuf->info.data.subbuf_size);
1390 if (ret) {
1391 goto end;
1392 }
1393
1394 ret = kernctl_get_padded_subbuf_size(
1395 stream->wait_fd, &subbuf->info.data.padded_subbuf_size);
1396 if (ret) {
1397 goto end;
1398 }
1399
1400 end:
1401 return ret;
1402 }
1403
1404 static
1405 int extract_metadata_subbuffer_info(struct lttng_consumer_stream *stream,
1406 struct stream_subbuffer *subbuf)
1407 {
1408 int ret;
1409
1410 ret = extract_common_subbuffer_info(stream, subbuf);
1411 if (ret) {
1412 goto end;
1413 }
1414
1415 ret = kernctl_get_metadata_version(
1416 stream->wait_fd, &subbuf->info.metadata.version);
1417 if (ret) {
1418 goto end;
1419 }
1420
1421 end:
1422 return ret;
1423 }
1424
1425 static
1426 int extract_data_subbuffer_info(struct lttng_consumer_stream *stream,
1427 struct stream_subbuffer *subbuf)
1428 {
1429 int ret;
1430
1431 ret = extract_common_subbuffer_info(stream, subbuf);
1432 if (ret) {
1433 goto end;
1434 }
1435
1436 ret = kernctl_get_packet_size(
1437 stream->wait_fd, &subbuf->info.data.packet_size);
1438 if (ret < 0) {
1439 PERROR("Failed to get sub-buffer packet size");
1440 goto end;
1441 }
1442
1443 ret = kernctl_get_content_size(
1444 stream->wait_fd, &subbuf->info.data.content_size);
1445 if (ret < 0) {
1446 PERROR("Failed to get sub-buffer content size");
1447 goto end;
1448 }
1449
1450 ret = kernctl_get_timestamp_begin(
1451 stream->wait_fd, &subbuf->info.data.timestamp_begin);
1452 if (ret < 0) {
1453 PERROR("Failed to get sub-buffer begin timestamp");
1454 goto end;
1455 }
1456
1457 ret = kernctl_get_timestamp_end(
1458 stream->wait_fd, &subbuf->info.data.timestamp_end);
1459 if (ret < 0) {
1460 PERROR("Failed to get sub-buffer end timestamp");
1461 goto end;
1462 }
1463
1464 ret = kernctl_get_events_discarded(
1465 stream->wait_fd, &subbuf->info.data.events_discarded);
1466 if (ret) {
1467 PERROR("Failed to get sub-buffer events discarded count");
1468 goto end;
1469 }
1470
1471 ret = kernctl_get_sequence_number(stream->wait_fd,
1472 &subbuf->info.data.sequence_number.value);
1473 if (ret) {
1474 /* May not be supported by older LTTng-modules. */
1475 if (ret != -ENOTTY) {
1476 PERROR("Failed to get sub-buffer sequence number");
1477 goto end;
1478 }
1479 } else {
1480 subbuf->info.data.sequence_number.is_set = true;
1481 }
1482
1483 ret = kernctl_get_stream_id(
1484 stream->wait_fd, &subbuf->info.data.stream_id);
1485 if (ret < 0) {
1486 PERROR("Failed to get stream id");
1487 goto end;
1488 }
1489
1490 ret = kernctl_get_instance_id(stream->wait_fd,
1491 &subbuf->info.data.stream_instance_id.value);
1492 if (ret) {
1493 /* May not be supported by older LTTng-modules. */
1494 if (ret != -ENOTTY) {
1495 PERROR("Failed to get stream instance id");
1496 goto end;
1497 }
1498 } else {
1499 subbuf->info.data.stream_instance_id.is_set = true;
1500 }
1501 end:
1502 return ret;
1503 }
1504
1505 static
1506 int get_subbuffer_common(struct lttng_consumer_stream *stream,
1507 struct stream_subbuffer *subbuffer)
1508 {
1509 int ret;
1510
1511 ret = kernctl_get_next_subbuf(stream->wait_fd);
1512 if (ret) {
1513 goto end;
1514 }
1515
1516 ret = stream->read_subbuffer_ops.extract_subbuffer_info(
1517 stream, subbuffer);
1518 end:
1519 return ret;
1520 }
1521
1522 static
1523 int get_next_subbuffer_splice(struct lttng_consumer_stream *stream,
1524 struct stream_subbuffer *subbuffer)
1525 {
1526 int ret;
1527
1528 ret = get_subbuffer_common(stream, subbuffer);
1529 if (ret) {
1530 goto end;
1531 }
1532
1533 subbuffer->buffer.fd = stream->wait_fd;
1534 end:
1535 return ret;
1536 }
1537
1538 static
1539 int get_next_subbuffer_mmap(struct lttng_consumer_stream *stream,
1540 struct stream_subbuffer *subbuffer)
1541 {
1542 int ret;
1543 const char *addr;
1544
1545 ret = get_subbuffer_common(stream, subbuffer);
1546 if (ret) {
1547 goto end;
1548 }
1549
1550 ret = get_current_subbuf_addr(stream, &addr);
1551 if (ret) {
1552 goto end;
1553 }
1554
1555 subbuffer->buffer.buffer = lttng_buffer_view_init(
1556 addr, 0, subbuffer->info.data.padded_subbuf_size);
1557 end:
1558 return ret;
1559 }
1560
1561 static
1562 int put_next_subbuffer(struct lttng_consumer_stream *stream,
1563 struct stream_subbuffer *subbuffer)
1564 {
1565 const int ret = kernctl_put_next_subbuf(stream->wait_fd);
1566
1567 if (ret) {
1568 if (ret == -EFAULT) {
1569 PERROR("Error in unreserving sub buffer");
1570 } else if (ret == -EIO) {
1571 /* Should never happen with newer LTTng versions */
1572 PERROR("Reader has been pushed by the writer, last sub-buffer corrupted");
1573 }
1574 }
1575
1576 return ret;
1577 }
1578
1579 static void lttng_kconsumer_set_stream_ops(
1580 struct lttng_consumer_stream *stream)
1581 {
1582 if (stream->chan->output == CONSUMER_CHANNEL_MMAP) {
1583 stream->read_subbuffer_ops.get_next_subbuffer =
1584 get_next_subbuffer_mmap;
1585 } else {
1586 stream->read_subbuffer_ops.get_next_subbuffer =
1587 get_next_subbuffer_splice;
1588 }
1589
1590 if (stream->metadata_flag) {
1591 stream->read_subbuffer_ops.extract_subbuffer_info =
1592 extract_metadata_subbuffer_info;
1593 } else {
1594 stream->read_subbuffer_ops.extract_subbuffer_info =
1595 extract_data_subbuffer_info;
1596 if (stream->chan->is_live) {
1597 stream->read_subbuffer_ops.send_live_beacon =
1598 consumer_flush_kernel_index;
1599 }
1600 }
1601
1602 stream->read_subbuffer_ops.put_next_subbuffer = put_next_subbuffer;
1603 }
1604
1605 int lttng_kconsumer_on_recv_stream(struct lttng_consumer_stream *stream)
1606 {
1607 int ret;
1608
1609 assert(stream);
1610
1611 /*
1612 * Don't create anything if this is set for streaming or if there is
1613 * no current trace chunk on the parent channel.
1614 */
1615 if (stream->net_seq_idx == (uint64_t) -1ULL && stream->chan->monitor &&
1616 stream->chan->trace_chunk) {
1617 ret = consumer_stream_create_output_files(stream, true);
1618 if (ret) {
1619 goto error;
1620 }
1621 }
1622
1623 if (stream->output == LTTNG_EVENT_MMAP) {
1624 /* get the len of the mmap region */
1625 unsigned long mmap_len;
1626
1627 ret = kernctl_get_mmap_len(stream->wait_fd, &mmap_len);
1628 if (ret != 0) {
1629 PERROR("kernctl_get_mmap_len");
1630 goto error_close_fd;
1631 }
1632 stream->mmap_len = (size_t) mmap_len;
1633
1634 stream->mmap_base = mmap(NULL, stream->mmap_len, PROT_READ,
1635 MAP_PRIVATE, stream->wait_fd, 0);
1636 if (stream->mmap_base == MAP_FAILED) {
1637 PERROR("Error mmaping");
1638 ret = -1;
1639 goto error_close_fd;
1640 }
1641 }
1642
1643 lttng_kconsumer_set_stream_ops(stream);
1644
1645 /* we return 0 to let the library handle the FD internally */
1646 return 0;
1647
1648 error_close_fd:
1649 if (stream->out_fd >= 0) {
1650 int err;
1651
1652 err = close(stream->out_fd);
1653 assert(!err);
1654 stream->out_fd = -1;
1655 }
1656 error:
1657 return ret;
1658 }
1659
1660 /*
1661 * Check if data is still being extracted from the buffers for a specific
1662 * stream. Consumer data lock MUST be acquired before calling this function
1663 * and the stream lock.
1664 *
1665 * Return 1 if the traced data are still getting read else 0 meaning that the
1666 * data is available for trace viewer reading.
1667 */
1668 int lttng_kconsumer_data_pending(struct lttng_consumer_stream *stream)
1669 {
1670 int ret;
1671
1672 assert(stream);
1673
1674 if (stream->endpoint_status != CONSUMER_ENDPOINT_ACTIVE) {
1675 ret = 0;
1676 goto end;
1677 }
1678
1679 ret = kernctl_get_next_subbuf(stream->wait_fd);
1680 if (ret == 0) {
1681 /* There is still data so let's put back this subbuffer. */
1682 ret = kernctl_put_subbuf(stream->wait_fd);
1683 assert(ret == 0);
1684 ret = 1; /* Data is pending */
1685 goto end;
1686 }
1687
1688 /* Data is NOT pending and ready to be read. */
1689 ret = 0;
1690
1691 end:
1692 return ret;
1693 }
This page took 0.104905 seconds and 4 git commands to generate.