Fix: consumer: snapshot: assertion on subsequent snapshot
[lttng-tools.git] / src / common / kernel-consumer / kernel-consumer.c
1 /*
2 * Copyright (C) 2011 EfficiOS Inc.
3 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * Copyright (C) 2017 Jérémie Galarneau <jeremie.galarneau@efficios.com>
5 *
6 * SPDX-License-Identifier: GPL-2.0-only
7 *
8 */
9
10 #define _LGPL_SOURCE
11 #include <assert.h>
12 #include <poll.h>
13 #include <pthread.h>
14 #include <stdlib.h>
15 #include <string.h>
16 #include <sys/mman.h>
17 #include <sys/socket.h>
18 #include <sys/types.h>
19 #include <inttypes.h>
20 #include <unistd.h>
21 #include <sys/stat.h>
22 #include <stdint.h>
23
24 #include <bin/lttng-consumerd/health-consumerd.h>
25 #include <common/common.h>
26 #include <common/kernel-ctl/kernel-ctl.h>
27 #include <common/sessiond-comm/sessiond-comm.h>
28 #include <common/sessiond-comm/relayd.h>
29 #include <common/compat/fcntl.h>
30 #include <common/compat/endian.h>
31 #include <common/pipe.h>
32 #include <common/relayd/relayd.h>
33 #include <common/utils.h>
34 #include <common/consumer/consumer-stream.h>
35 #include <common/index/index.h>
36 #include <common/consumer/consumer-timer.h>
37 #include <common/optional.h>
38 #include <common/buffer-view.h>
39 #include <common/consumer/consumer.h>
40 #include <common/consumer/metadata-bucket.h>
41
42 #include "kernel-consumer.h"
43
44 extern struct lttng_consumer_global_data the_consumer_data;
45 extern int consumer_poll_timeout;
46
47 /*
48 * Take a snapshot for a specific fd
49 *
50 * Returns 0 on success, < 0 on error
51 */
52 int lttng_kconsumer_take_snapshot(struct lttng_consumer_stream *stream)
53 {
54 int ret = 0;
55 int infd = stream->wait_fd;
56
57 ret = kernctl_snapshot(infd);
58 /*
59 * -EAGAIN is not an error, it just means that there is no data to
60 * be read.
61 */
62 if (ret != 0 && ret != -EAGAIN) {
63 PERROR("Getting sub-buffer snapshot.");
64 }
65
66 return ret;
67 }
68
69 /*
70 * Sample consumed and produced positions for a specific fd.
71 *
72 * Returns 0 on success, < 0 on error.
73 */
74 int lttng_kconsumer_sample_snapshot_positions(
75 struct lttng_consumer_stream *stream)
76 {
77 assert(stream);
78
79 return kernctl_snapshot_sample_positions(stream->wait_fd);
80 }
81
82 /*
83 * Get the produced position
84 *
85 * Returns 0 on success, < 0 on error
86 */
87 int lttng_kconsumer_get_produced_snapshot(struct lttng_consumer_stream *stream,
88 unsigned long *pos)
89 {
90 int ret;
91 int infd = stream->wait_fd;
92
93 ret = kernctl_snapshot_get_produced(infd, pos);
94 if (ret != 0) {
95 PERROR("kernctl_snapshot_get_produced");
96 }
97
98 return ret;
99 }
100
101 /*
102 * Get the consumerd position
103 *
104 * Returns 0 on success, < 0 on error
105 */
106 int lttng_kconsumer_get_consumed_snapshot(struct lttng_consumer_stream *stream,
107 unsigned long *pos)
108 {
109 int ret;
110 int infd = stream->wait_fd;
111
112 ret = kernctl_snapshot_get_consumed(infd, pos);
113 if (ret != 0) {
114 PERROR("kernctl_snapshot_get_consumed");
115 }
116
117 return ret;
118 }
119
120 static
121 int get_current_subbuf_addr(struct lttng_consumer_stream *stream,
122 const char **addr)
123 {
124 int ret;
125 unsigned long mmap_offset;
126 const char *mmap_base = stream->mmap_base;
127
128 ret = kernctl_get_mmap_read_offset(stream->wait_fd, &mmap_offset);
129 if (ret < 0) {
130 PERROR("Failed to get mmap read offset");
131 goto error;
132 }
133
134 *addr = mmap_base + mmap_offset;
135 error:
136 return ret;
137 }
138
139 static void finalize_snapshot_stream(
140 struct lttng_consumer_stream *stream, uint64_t relayd_id)
141 {
142 ASSERT_LOCKED(stream->lock);
143
144 if (relayd_id == (uint64_t) -1ULL) {
145 if (stream->out_fd >= 0) {
146 const int ret = close(stream->out_fd);
147
148 if (ret < 0) {
149 PERROR("Failed to close stream snapshot output file descriptor");
150 }
151
152 stream->out_fd = -1;
153 }
154 } else {
155 close_relayd_stream(stream);
156 stream->net_seq_idx = (uint64_t) -1ULL;
157 }
158
159 lttng_trace_chunk_put(stream->trace_chunk);
160 stream->trace_chunk = NULL;
161 }
162
163 /*
164 * Take a snapshot of all the stream of a channel
165 * RCU read-side lock must be held across this function to ensure existence of
166 * channel.
167 *
168 * Returns 0 on success, < 0 on error
169 */
170 static int lttng_kconsumer_snapshot_channel(
171 struct lttng_consumer_channel *channel,
172 uint64_t key, char *path, uint64_t relayd_id,
173 uint64_t nb_packets_per_stream,
174 struct lttng_consumer_local_data *ctx)
175 {
176 int ret;
177 struct lttng_consumer_stream *stream;
178
179 DBG("Kernel consumer snapshot channel %" PRIu64, key);
180
181 /* Prevent channel modifications while we perform the snapshot.*/
182 pthread_mutex_lock(&channel->lock);
183
184 rcu_read_lock();
185
186 /* Splice is not supported yet for channel snapshot. */
187 if (channel->output != CONSUMER_CHANNEL_MMAP) {
188 ERR("Unsupported output type for channel \"%s\": mmap output is required to record a snapshot",
189 channel->name);
190 ret = -1;
191 goto end;
192 }
193
194 cds_list_for_each_entry(stream, &channel->streams.head, send_node) {
195 unsigned long consumed_pos, produced_pos;
196
197 health_code_update();
198
199 /*
200 * Lock stream because we are about to change its state.
201 */
202 pthread_mutex_lock(&stream->lock);
203
204 assert(channel->trace_chunk);
205 if (!lttng_trace_chunk_get(channel->trace_chunk)) {
206 /*
207 * Can't happen barring an internal error as the channel
208 * holds a reference to the trace chunk.
209 */
210 ERR("Failed to acquire reference to channel's trace chunk");
211 ret = -1;
212 goto end_unlock;
213 }
214 assert(!stream->trace_chunk);
215 stream->trace_chunk = channel->trace_chunk;
216
217 /*
218 * Assign the received relayd ID so we can use it for streaming. The streams
219 * are not visible to anyone so this is OK to change it.
220 */
221 stream->net_seq_idx = relayd_id;
222 channel->relayd_id = relayd_id;
223 if (relayd_id != (uint64_t) -1ULL) {
224 ret = consumer_send_relayd_stream(stream, path);
225 if (ret < 0) {
226 ERR("sending stream to relayd");
227 goto error_finalize_stream;
228 }
229 } else {
230 ret = consumer_stream_create_output_files(stream,
231 false);
232 if (ret < 0) {
233 goto error_finalize_stream;
234 }
235 DBG("Kernel consumer snapshot stream (%" PRIu64 ")",
236 stream->key);
237 }
238
239 ret = kernctl_buffer_flush_empty(stream->wait_fd);
240 if (ret < 0) {
241 /*
242 * Doing a buffer flush which does not take into
243 * account empty packets. This is not perfect
244 * for stream intersection, but required as a
245 * fall-back when "flush_empty" is not
246 * implemented by lttng-modules.
247 */
248 ret = kernctl_buffer_flush(stream->wait_fd);
249 if (ret < 0) {
250 ERR("Failed to flush kernel stream");
251 goto error_finalize_stream;
252 }
253 goto end_unlock;
254 }
255
256 ret = lttng_kconsumer_take_snapshot(stream);
257 if (ret < 0) {
258 ERR("Taking kernel snapshot");
259 goto error_finalize_stream;
260 }
261
262 ret = lttng_kconsumer_get_produced_snapshot(stream, &produced_pos);
263 if (ret < 0) {
264 ERR("Produced kernel snapshot position");
265 goto error_finalize_stream;
266 }
267
268 ret = lttng_kconsumer_get_consumed_snapshot(stream, &consumed_pos);
269 if (ret < 0) {
270 ERR("Consumerd kernel snapshot position");
271 goto error_finalize_stream;
272 }
273
274 consumed_pos = consumer_get_consume_start_pos(consumed_pos,
275 produced_pos, nb_packets_per_stream,
276 stream->max_sb_size);
277
278 while ((long) (consumed_pos - produced_pos) < 0) {
279 ssize_t read_len;
280 unsigned long len, padded_len;
281 const char *subbuf_addr;
282 struct lttng_buffer_view subbuf_view;
283
284 health_code_update();
285 DBG("Kernel consumer taking snapshot at pos %lu", consumed_pos);
286
287 ret = kernctl_get_subbuf(stream->wait_fd, &consumed_pos);
288 if (ret < 0) {
289 if (ret != -EAGAIN) {
290 PERROR("kernctl_get_subbuf snapshot");
291 goto error_finalize_stream;
292 }
293 DBG("Kernel consumer get subbuf failed. Skipping it.");
294 consumed_pos += stream->max_sb_size;
295 stream->chan->lost_packets++;
296 continue;
297 }
298
299 ret = kernctl_get_subbuf_size(stream->wait_fd, &len);
300 if (ret < 0) {
301 ERR("Snapshot kernctl_get_subbuf_size");
302 goto error_put_subbuf;
303 }
304
305 ret = kernctl_get_padded_subbuf_size(stream->wait_fd, &padded_len);
306 if (ret < 0) {
307 ERR("Snapshot kernctl_get_padded_subbuf_size");
308 goto error_put_subbuf;
309 }
310
311 ret = get_current_subbuf_addr(stream, &subbuf_addr);
312 if (ret) {
313 goto error_put_subbuf;
314 }
315
316 subbuf_view = lttng_buffer_view_init(
317 subbuf_addr, 0, padded_len);
318 read_len = lttng_consumer_on_read_subbuffer_mmap(
319 stream, &subbuf_view,
320 padded_len - len);
321 /*
322 * We write the padded len in local tracefiles but the data len
323 * when using a relay. Display the error but continue processing
324 * to try to release the subbuffer.
325 */
326 if (relayd_id != (uint64_t) -1ULL) {
327 if (read_len != len) {
328 ERR("Error sending to the relay (ret: %zd != len: %lu)",
329 read_len, len);
330 }
331 } else {
332 if (read_len != padded_len) {
333 ERR("Error writing to tracefile (ret: %zd != len: %lu)",
334 read_len, padded_len);
335 }
336 }
337
338 ret = kernctl_put_subbuf(stream->wait_fd);
339 if (ret < 0) {
340 ERR("Snapshot kernctl_put_subbuf");
341 goto error_finalize_stream;
342 }
343 consumed_pos += stream->max_sb_size;
344 }
345
346 finalize_snapshot_stream(stream, relayd_id);
347 pthread_mutex_unlock(&stream->lock);
348 }
349
350 /* All good! */
351 ret = 0;
352 goto end;
353
354 error_put_subbuf:
355 ret = kernctl_put_subbuf(stream->wait_fd);
356 if (ret < 0) {
357 ERR("Snapshot kernctl_put_subbuf error path");
358 }
359 error_finalize_stream:
360 finalize_snapshot_stream(stream, relayd_id);
361 end_unlock:
362 pthread_mutex_unlock(&stream->lock);
363 end:
364 rcu_read_unlock();
365 pthread_mutex_unlock(&channel->lock);
366 return ret;
367 }
368
369 /*
370 * Read the whole metadata available for a snapshot.
371 * RCU read-side lock must be held across this function to ensure existence of
372 * metadata_channel.
373 *
374 * Returns 0 on success, < 0 on error
375 */
376 static int lttng_kconsumer_snapshot_metadata(
377 struct lttng_consumer_channel *metadata_channel,
378 uint64_t key, char *path, uint64_t relayd_id,
379 struct lttng_consumer_local_data *ctx)
380 {
381 int ret, use_relayd = 0;
382 ssize_t ret_read;
383 struct lttng_consumer_stream *metadata_stream;
384
385 assert(ctx);
386
387 DBG("Kernel consumer snapshot metadata with key %" PRIu64 " at path %s",
388 key, path);
389
390 rcu_read_lock();
391
392 metadata_stream = metadata_channel->metadata_stream;
393 assert(metadata_stream);
394
395 metadata_stream->read_subbuffer_ops.lock(metadata_stream);
396 assert(metadata_channel->trace_chunk);
397 assert(metadata_stream->trace_chunk);
398
399 /* Flag once that we have a valid relayd for the stream. */
400 if (relayd_id != (uint64_t) -1ULL) {
401 use_relayd = 1;
402 }
403
404 if (use_relayd) {
405 ret = consumer_send_relayd_stream(metadata_stream, path);
406 if (ret < 0) {
407 goto error_snapshot;
408 }
409 } else {
410 ret = consumer_stream_create_output_files(metadata_stream,
411 false);
412 if (ret < 0) {
413 goto error_snapshot;
414 }
415 }
416
417 do {
418 health_code_update();
419
420 ret_read = lttng_consumer_read_subbuffer(metadata_stream, ctx, true);
421 if (ret_read < 0) {
422 ERR("Kernel snapshot reading metadata subbuffer (ret: %zd)",
423 ret_read);
424 ret = ret_read;
425 goto error_snapshot;
426 }
427 } while (ret_read > 0);
428
429 if (use_relayd) {
430 close_relayd_stream(metadata_stream);
431 metadata_stream->net_seq_idx = (uint64_t) -1ULL;
432 } else {
433 if (metadata_stream->out_fd >= 0) {
434 ret = close(metadata_stream->out_fd);
435 if (ret < 0) {
436 PERROR("Kernel consumer snapshot metadata close out_fd");
437 /*
438 * Don't go on error here since the snapshot was successful at this
439 * point but somehow the close failed.
440 */
441 }
442 metadata_stream->out_fd = -1;
443 lttng_trace_chunk_put(metadata_stream->trace_chunk);
444 metadata_stream->trace_chunk = NULL;
445 }
446 }
447
448 ret = 0;
449 error_snapshot:
450 metadata_stream->read_subbuffer_ops.unlock(metadata_stream);
451 consumer_stream_destroy(metadata_stream, NULL);
452 metadata_channel->metadata_stream = NULL;
453 rcu_read_unlock();
454 return ret;
455 }
456
457 /*
458 * Receive command from session daemon and process it.
459 *
460 * Return 1 on success else a negative value or 0.
461 */
462 int lttng_kconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
463 int sock, struct pollfd *consumer_sockpoll)
464 {
465 int ret_func;
466 enum lttcomm_return_code ret_code = LTTCOMM_CONSUMERD_SUCCESS;
467 struct lttcomm_consumer_msg msg;
468
469 health_code_update();
470
471 {
472 ssize_t ret_recv;
473
474 ret_recv = lttcomm_recv_unix_sock(sock, &msg, sizeof(msg));
475 if (ret_recv != sizeof(msg)) {
476 if (ret_recv > 0) {
477 lttng_consumer_send_error(ctx,
478 LTTCOMM_CONSUMERD_ERROR_RECV_CMD);
479 ret_recv = -1;
480 }
481 return ret_recv;
482 }
483 }
484
485 health_code_update();
486
487 /* Deprecated command */
488 assert(msg.cmd_type != LTTNG_CONSUMER_STOP);
489
490 health_code_update();
491
492 /* relayd needs RCU read-side protection */
493 rcu_read_lock();
494
495 switch (msg.cmd_type) {
496 case LTTNG_CONSUMER_ADD_RELAYD_SOCKET:
497 {
498 uint32_t major = msg.u.relayd_sock.major;
499 uint32_t minor = msg.u.relayd_sock.minor;
500 enum lttcomm_sock_proto protocol = (enum lttcomm_sock_proto)
501 msg.u.relayd_sock.relayd_socket_protocol;
502
503 /* Session daemon status message are handled in the following call. */
504 consumer_add_relayd_socket(msg.u.relayd_sock.net_index,
505 msg.u.relayd_sock.type, ctx, sock,
506 consumer_sockpoll, msg.u.relayd_sock.session_id,
507 msg.u.relayd_sock.relayd_session_id, major,
508 minor, protocol);
509 goto end_nosignal;
510 }
511 case LTTNG_CONSUMER_ADD_CHANNEL:
512 {
513 struct lttng_consumer_channel *new_channel;
514 int ret_send_status, ret_add_channel = 0;
515 const uint64_t chunk_id = msg.u.channel.chunk_id.value;
516
517 health_code_update();
518
519 /* First send a status message before receiving the fds. */
520 ret_send_status = consumer_send_status_msg(sock, ret_code);
521 if (ret_send_status < 0) {
522 /* Somehow, the session daemon is not responding anymore. */
523 goto error_fatal;
524 }
525
526 health_code_update();
527
528 DBG("consumer_add_channel %" PRIu64, msg.u.channel.channel_key);
529 new_channel = consumer_allocate_channel(msg.u.channel.channel_key,
530 msg.u.channel.session_id,
531 msg.u.channel.chunk_id.is_set ?
532 &chunk_id : NULL,
533 msg.u.channel.pathname,
534 msg.u.channel.name,
535 msg.u.channel.relayd_id, msg.u.channel.output,
536 msg.u.channel.tracefile_size,
537 msg.u.channel.tracefile_count, 0,
538 msg.u.channel.monitor,
539 msg.u.channel.live_timer_interval,
540 msg.u.channel.is_live,
541 NULL, NULL);
542 if (new_channel == NULL) {
543 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR);
544 goto end_nosignal;
545 }
546 new_channel->nb_init_stream_left = msg.u.channel.nb_init_streams;
547 switch (msg.u.channel.output) {
548 case LTTNG_EVENT_SPLICE:
549 new_channel->output = CONSUMER_CHANNEL_SPLICE;
550 break;
551 case LTTNG_EVENT_MMAP:
552 new_channel->output = CONSUMER_CHANNEL_MMAP;
553 break;
554 default:
555 ERR("Channel output unknown %d", msg.u.channel.output);
556 goto end_nosignal;
557 }
558
559 /* Translate and save channel type. */
560 switch (msg.u.channel.type) {
561 case CONSUMER_CHANNEL_TYPE_DATA:
562 case CONSUMER_CHANNEL_TYPE_METADATA:
563 new_channel->type = msg.u.channel.type;
564 break;
565 default:
566 assert(0);
567 goto end_nosignal;
568 };
569
570 health_code_update();
571
572 if (ctx->on_recv_channel != NULL) {
573 int ret_recv_channel =
574 ctx->on_recv_channel(new_channel);
575 if (ret_recv_channel == 0) {
576 ret_add_channel = consumer_add_channel(
577 new_channel, ctx);
578 } else if (ret_recv_channel < 0) {
579 goto end_nosignal;
580 }
581 } else {
582 ret_add_channel =
583 consumer_add_channel(new_channel, ctx);
584 }
585 if (msg.u.channel.type == CONSUMER_CHANNEL_TYPE_DATA &&
586 !ret_add_channel) {
587 int monitor_start_ret;
588
589 DBG("Consumer starting monitor timer");
590 consumer_timer_live_start(new_channel,
591 msg.u.channel.live_timer_interval);
592 monitor_start_ret = consumer_timer_monitor_start(
593 new_channel,
594 msg.u.channel.monitor_timer_interval);
595 if (monitor_start_ret < 0) {
596 ERR("Starting channel monitoring timer failed");
597 goto end_nosignal;
598 }
599 }
600
601 health_code_update();
602
603 /* If we received an error in add_channel, we need to report it. */
604 if (ret_add_channel < 0) {
605 ret_send_status = consumer_send_status_msg(
606 sock, ret_add_channel);
607 if (ret_send_status < 0) {
608 goto error_fatal;
609 }
610 goto end_nosignal;
611 }
612
613 goto end_nosignal;
614 }
615 case LTTNG_CONSUMER_ADD_STREAM:
616 {
617 int fd;
618 struct lttng_pipe *stream_pipe;
619 struct lttng_consumer_stream *new_stream;
620 struct lttng_consumer_channel *channel;
621 int alloc_ret = 0;
622 int ret_send_status, ret_poll, ret_get_max_subbuf_size;
623 ssize_t ret_pipe_write, ret_recv;
624
625 /*
626 * Get stream's channel reference. Needed when adding the stream to the
627 * global hash table.
628 */
629 channel = consumer_find_channel(msg.u.stream.channel_key);
630 if (!channel) {
631 /*
632 * We could not find the channel. Can happen if cpu hotplug
633 * happens while tearing down.
634 */
635 ERR("Unable to find channel key %" PRIu64, msg.u.stream.channel_key);
636 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
637 }
638
639 health_code_update();
640
641 /* First send a status message before receiving the fds. */
642 ret_send_status = consumer_send_status_msg(sock, ret_code);
643 if (ret_send_status < 0) {
644 /* Somehow, the session daemon is not responding anymore. */
645 goto error_add_stream_fatal;
646 }
647
648 health_code_update();
649
650 if (ret_code != LTTCOMM_CONSUMERD_SUCCESS) {
651 /* Channel was not found. */
652 goto error_add_stream_nosignal;
653 }
654
655 /* Blocking call */
656 health_poll_entry();
657 ret_poll = lttng_consumer_poll_socket(consumer_sockpoll);
658 health_poll_exit();
659 if (ret_poll) {
660 goto error_add_stream_fatal;
661 }
662
663 health_code_update();
664
665 /* Get stream file descriptor from socket */
666 ret_recv = lttcomm_recv_fds_unix_sock(sock, &fd, 1);
667 if (ret_recv != sizeof(fd)) {
668 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_FD);
669 ret_func = ret_recv;
670 goto end;
671 }
672
673 health_code_update();
674
675 /*
676 * Send status code to session daemon only if the recv works. If the
677 * above recv() failed, the session daemon is notified through the
678 * error socket and the teardown is eventually done.
679 */
680 ret_send_status = consumer_send_status_msg(sock, ret_code);
681 if (ret_send_status < 0) {
682 /* Somehow, the session daemon is not responding anymore. */
683 goto error_add_stream_nosignal;
684 }
685
686 health_code_update();
687
688 pthread_mutex_lock(&channel->lock);
689 new_stream = consumer_stream_create(
690 channel,
691 channel->key,
692 fd,
693 channel->name,
694 channel->relayd_id,
695 channel->session_id,
696 channel->trace_chunk,
697 msg.u.stream.cpu,
698 &alloc_ret,
699 channel->type,
700 channel->monitor);
701 if (new_stream == NULL) {
702 switch (alloc_ret) {
703 case -ENOMEM:
704 case -EINVAL:
705 default:
706 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR);
707 break;
708 }
709 pthread_mutex_unlock(&channel->lock);
710 goto error_add_stream_nosignal;
711 }
712
713 new_stream->wait_fd = fd;
714 ret_get_max_subbuf_size = kernctl_get_max_subbuf_size(
715 new_stream->wait_fd, &new_stream->max_sb_size);
716 if (ret_get_max_subbuf_size < 0) {
717 pthread_mutex_unlock(&channel->lock);
718 ERR("Failed to get kernel maximal subbuffer size");
719 goto error_add_stream_nosignal;
720 }
721
722 consumer_stream_update_channel_attributes(new_stream,
723 channel);
724
725 /*
726 * We've just assigned the channel to the stream so increment the
727 * refcount right now. We don't need to increment the refcount for
728 * streams in no monitor because we handle manually the cleanup of
729 * those. It is very important to make sure there is NO prior
730 * consumer_del_stream() calls or else the refcount will be unbalanced.
731 */
732 if (channel->monitor) {
733 uatomic_inc(&new_stream->chan->refcount);
734 }
735
736 /*
737 * The buffer flush is done on the session daemon side for the kernel
738 * so no need for the stream "hangup_flush_done" variable to be
739 * tracked. This is important for a kernel stream since we don't rely
740 * on the flush state of the stream to read data. It's not the case for
741 * user space tracing.
742 */
743 new_stream->hangup_flush_done = 0;
744
745 health_code_update();
746
747 pthread_mutex_lock(&new_stream->lock);
748 if (ctx->on_recv_stream) {
749 int ret_recv_stream = ctx->on_recv_stream(new_stream);
750 if (ret_recv_stream < 0) {
751 pthread_mutex_unlock(&new_stream->lock);
752 pthread_mutex_unlock(&channel->lock);
753 consumer_stream_free(new_stream);
754 goto error_add_stream_nosignal;
755 }
756 }
757 health_code_update();
758
759 if (new_stream->metadata_flag) {
760 channel->metadata_stream = new_stream;
761 }
762
763 /* Do not monitor this stream. */
764 if (!channel->monitor) {
765 DBG("Kernel consumer add stream %s in no monitor mode with "
766 "relayd id %" PRIu64, new_stream->name,
767 new_stream->net_seq_idx);
768 cds_list_add(&new_stream->send_node, &channel->streams.head);
769 pthread_mutex_unlock(&new_stream->lock);
770 pthread_mutex_unlock(&channel->lock);
771 goto end_add_stream;
772 }
773
774 /* Send stream to relayd if the stream has an ID. */
775 if (new_stream->net_seq_idx != (uint64_t) -1ULL) {
776 int ret_send_relayd_stream;
777
778 ret_send_relayd_stream = consumer_send_relayd_stream(
779 new_stream, new_stream->chan->pathname);
780 if (ret_send_relayd_stream < 0) {
781 pthread_mutex_unlock(&new_stream->lock);
782 pthread_mutex_unlock(&channel->lock);
783 consumer_stream_free(new_stream);
784 goto error_add_stream_nosignal;
785 }
786
787 /*
788 * If adding an extra stream to an already
789 * existing channel (e.g. cpu hotplug), we need
790 * to send the "streams_sent" command to relayd.
791 */
792 if (channel->streams_sent_to_relayd) {
793 int ret_send_relayd_streams_sent;
794
795 ret_send_relayd_streams_sent =
796 consumer_send_relayd_streams_sent(
797 new_stream->net_seq_idx);
798 if (ret_send_relayd_streams_sent < 0) {
799 pthread_mutex_unlock(&new_stream->lock);
800 pthread_mutex_unlock(&channel->lock);
801 goto error_add_stream_nosignal;
802 }
803 }
804 }
805 pthread_mutex_unlock(&new_stream->lock);
806 pthread_mutex_unlock(&channel->lock);
807
808 /* Get the right pipe where the stream will be sent. */
809 if (new_stream->metadata_flag) {
810 consumer_add_metadata_stream(new_stream);
811 stream_pipe = ctx->consumer_metadata_pipe;
812 } else {
813 consumer_add_data_stream(new_stream);
814 stream_pipe = ctx->consumer_data_pipe;
815 }
816
817 /* Visible to other threads */
818 new_stream->globally_visible = 1;
819
820 health_code_update();
821
822 ret_pipe_write = lttng_pipe_write(
823 stream_pipe, &new_stream, sizeof(new_stream));
824 if (ret_pipe_write < 0) {
825 ERR("Consumer write %s stream to pipe %d",
826 new_stream->metadata_flag ? "metadata" : "data",
827 lttng_pipe_get_writefd(stream_pipe));
828 if (new_stream->metadata_flag) {
829 consumer_del_stream_for_metadata(new_stream);
830 } else {
831 consumer_del_stream_for_data(new_stream);
832 }
833 goto error_add_stream_nosignal;
834 }
835
836 DBG("Kernel consumer ADD_STREAM %s (fd: %d) %s with relayd id %" PRIu64,
837 new_stream->name, fd, new_stream->chan->pathname, new_stream->relayd_stream_id);
838 end_add_stream:
839 break;
840 error_add_stream_nosignal:
841 goto end_nosignal;
842 error_add_stream_fatal:
843 goto error_fatal;
844 }
845 case LTTNG_CONSUMER_STREAMS_SENT:
846 {
847 struct lttng_consumer_channel *channel;
848 int ret_send_status;
849
850 /*
851 * Get stream's channel reference. Needed when adding the stream to the
852 * global hash table.
853 */
854 channel = consumer_find_channel(msg.u.sent_streams.channel_key);
855 if (!channel) {
856 /*
857 * We could not find the channel. Can happen if cpu hotplug
858 * happens while tearing down.
859 */
860 ERR("Unable to find channel key %" PRIu64,
861 msg.u.sent_streams.channel_key);
862 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
863 }
864
865 health_code_update();
866
867 /*
868 * Send status code to session daemon.
869 */
870 ret_send_status = consumer_send_status_msg(sock, ret_code);
871 if (ret_send_status < 0 ||
872 ret_code != LTTCOMM_CONSUMERD_SUCCESS) {
873 /* Somehow, the session daemon is not responding anymore. */
874 goto error_streams_sent_nosignal;
875 }
876
877 health_code_update();
878
879 /*
880 * We should not send this message if we don't monitor the
881 * streams in this channel.
882 */
883 if (!channel->monitor) {
884 goto end_error_streams_sent;
885 }
886
887 health_code_update();
888 /* Send stream to relayd if the stream has an ID. */
889 if (msg.u.sent_streams.net_seq_idx != (uint64_t) -1ULL) {
890 int ret_send_relay_streams;
891
892 ret_send_relay_streams = consumer_send_relayd_streams_sent(
893 msg.u.sent_streams.net_seq_idx);
894 if (ret_send_relay_streams < 0) {
895 goto error_streams_sent_nosignal;
896 }
897 channel->streams_sent_to_relayd = true;
898 }
899 end_error_streams_sent:
900 break;
901 error_streams_sent_nosignal:
902 goto end_nosignal;
903 }
904 case LTTNG_CONSUMER_UPDATE_STREAM:
905 {
906 rcu_read_unlock();
907 return -ENOSYS;
908 }
909 case LTTNG_CONSUMER_DESTROY_RELAYD:
910 {
911 uint64_t index = msg.u.destroy_relayd.net_seq_idx;
912 struct consumer_relayd_sock_pair *relayd;
913 int ret_send_status;
914
915 DBG("Kernel consumer destroying relayd %" PRIu64, index);
916
917 /* Get relayd reference if exists. */
918 relayd = consumer_find_relayd(index);
919 if (relayd == NULL) {
920 DBG("Unable to find relayd %" PRIu64, index);
921 ret_code = LTTCOMM_CONSUMERD_RELAYD_FAIL;
922 }
923
924 /*
925 * Each relayd socket pair has a refcount of stream attached to it
926 * which tells if the relayd is still active or not depending on the
927 * refcount value.
928 *
929 * This will set the destroy flag of the relayd object and destroy it
930 * if the refcount reaches zero when called.
931 *
932 * The destroy can happen either here or when a stream fd hangs up.
933 */
934 if (relayd) {
935 consumer_flag_relayd_for_destroy(relayd);
936 }
937
938 health_code_update();
939
940 ret_send_status = consumer_send_status_msg(sock, ret_code);
941 if (ret_send_status < 0) {
942 /* Somehow, the session daemon is not responding anymore. */
943 goto error_fatal;
944 }
945
946 goto end_nosignal;
947 }
948 case LTTNG_CONSUMER_DATA_PENDING:
949 {
950 int32_t ret_data_pending;
951 uint64_t id = msg.u.data_pending.session_id;
952 ssize_t ret_send;
953
954 DBG("Kernel consumer data pending command for id %" PRIu64, id);
955
956 ret_data_pending = consumer_data_pending(id);
957
958 health_code_update();
959
960 /* Send back returned value to session daemon */
961 ret_send = lttcomm_send_unix_sock(sock, &ret_data_pending,
962 sizeof(ret_data_pending));
963 if (ret_send < 0) {
964 PERROR("send data pending ret code");
965 goto error_fatal;
966 }
967
968 /*
969 * No need to send back a status message since the data pending
970 * returned value is the response.
971 */
972 break;
973 }
974 case LTTNG_CONSUMER_SNAPSHOT_CHANNEL:
975 {
976 struct lttng_consumer_channel *channel;
977 uint64_t key = msg.u.snapshot_channel.key;
978 int ret_send_status;
979
980 channel = consumer_find_channel(key);
981 if (!channel) {
982 ERR("Channel %" PRIu64 " not found", key);
983 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
984 } else {
985 if (msg.u.snapshot_channel.metadata == 1) {
986 int ret_snapshot;
987
988 ret_snapshot = lttng_kconsumer_snapshot_metadata(
989 channel, key,
990 msg.u.snapshot_channel.pathname,
991 msg.u.snapshot_channel.relayd_id,
992 ctx);
993 if (ret_snapshot < 0) {
994 ERR("Snapshot metadata failed");
995 ret_code = LTTCOMM_CONSUMERD_SNAPSHOT_FAILED;
996 }
997 } else {
998 int ret_snapshot;
999
1000 ret_snapshot = lttng_kconsumer_snapshot_channel(
1001 channel, key,
1002 msg.u.snapshot_channel.pathname,
1003 msg.u.snapshot_channel.relayd_id,
1004 msg.u.snapshot_channel
1005 .nb_packets_per_stream,
1006 ctx);
1007 if (ret_snapshot < 0) {
1008 ERR("Snapshot channel failed");
1009 ret_code = LTTCOMM_CONSUMERD_SNAPSHOT_FAILED;
1010 }
1011 }
1012 }
1013 health_code_update();
1014
1015 ret_send_status = consumer_send_status_msg(sock, ret_code);
1016 if (ret_send_status < 0) {
1017 /* Somehow, the session daemon is not responding anymore. */
1018 goto end_nosignal;
1019 }
1020 break;
1021 }
1022 case LTTNG_CONSUMER_DESTROY_CHANNEL:
1023 {
1024 uint64_t key = msg.u.destroy_channel.key;
1025 struct lttng_consumer_channel *channel;
1026 int ret_send_status;
1027
1028 channel = consumer_find_channel(key);
1029 if (!channel) {
1030 ERR("Kernel consumer destroy channel %" PRIu64 " not found", key);
1031 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
1032 }
1033
1034 health_code_update();
1035
1036 ret_send_status = consumer_send_status_msg(sock, ret_code);
1037 if (ret_send_status < 0) {
1038 /* Somehow, the session daemon is not responding anymore. */
1039 goto end_destroy_channel;
1040 }
1041
1042 health_code_update();
1043
1044 /* Stop right now if no channel was found. */
1045 if (!channel) {
1046 goto end_destroy_channel;
1047 }
1048
1049 /*
1050 * This command should ONLY be issued for channel with streams set in
1051 * no monitor mode.
1052 */
1053 assert(!channel->monitor);
1054
1055 /*
1056 * The refcount should ALWAYS be 0 in the case of a channel in no
1057 * monitor mode.
1058 */
1059 assert(!uatomic_sub_return(&channel->refcount, 1));
1060
1061 consumer_del_channel(channel);
1062 end_destroy_channel:
1063 goto end_nosignal;
1064 }
1065 case LTTNG_CONSUMER_DISCARDED_EVENTS:
1066 {
1067 ssize_t ret;
1068 uint64_t count;
1069 struct lttng_consumer_channel *channel;
1070 uint64_t id = msg.u.discarded_events.session_id;
1071 uint64_t key = msg.u.discarded_events.channel_key;
1072
1073 DBG("Kernel consumer discarded events command for session id %"
1074 PRIu64 ", channel key %" PRIu64, id, key);
1075
1076 channel = consumer_find_channel(key);
1077 if (!channel) {
1078 ERR("Kernel consumer discarded events channel %"
1079 PRIu64 " not found", key);
1080 count = 0;
1081 } else {
1082 count = channel->discarded_events;
1083 }
1084
1085 health_code_update();
1086
1087 /* Send back returned value to session daemon */
1088 ret = lttcomm_send_unix_sock(sock, &count, sizeof(count));
1089 if (ret < 0) {
1090 PERROR("send discarded events");
1091 goto error_fatal;
1092 }
1093
1094 break;
1095 }
1096 case LTTNG_CONSUMER_LOST_PACKETS:
1097 {
1098 ssize_t ret;
1099 uint64_t count;
1100 struct lttng_consumer_channel *channel;
1101 uint64_t id = msg.u.lost_packets.session_id;
1102 uint64_t key = msg.u.lost_packets.channel_key;
1103
1104 DBG("Kernel consumer lost packets command for session id %"
1105 PRIu64 ", channel key %" PRIu64, id, key);
1106
1107 channel = consumer_find_channel(key);
1108 if (!channel) {
1109 ERR("Kernel consumer lost packets channel %"
1110 PRIu64 " not found", key);
1111 count = 0;
1112 } else {
1113 count = channel->lost_packets;
1114 }
1115
1116 health_code_update();
1117
1118 /* Send back returned value to session daemon */
1119 ret = lttcomm_send_unix_sock(sock, &count, sizeof(count));
1120 if (ret < 0) {
1121 PERROR("send lost packets");
1122 goto error_fatal;
1123 }
1124
1125 break;
1126 }
1127 case LTTNG_CONSUMER_SET_CHANNEL_MONITOR_PIPE:
1128 {
1129 int channel_monitor_pipe;
1130 int ret_send_status, ret_set_channel_monitor_pipe;
1131 ssize_t ret_recv;
1132
1133 ret_code = LTTCOMM_CONSUMERD_SUCCESS;
1134 /* Successfully received the command's type. */
1135 ret_send_status = consumer_send_status_msg(sock, ret_code);
1136 if (ret_send_status < 0) {
1137 goto error_fatal;
1138 }
1139
1140 ret_recv = lttcomm_recv_fds_unix_sock(
1141 sock, &channel_monitor_pipe, 1);
1142 if (ret_recv != sizeof(channel_monitor_pipe)) {
1143 ERR("Failed to receive channel monitor pipe");
1144 goto error_fatal;
1145 }
1146
1147 DBG("Received channel monitor pipe (%d)", channel_monitor_pipe);
1148 ret_set_channel_monitor_pipe =
1149 consumer_timer_thread_set_channel_monitor_pipe(
1150 channel_monitor_pipe);
1151 if (!ret_set_channel_monitor_pipe) {
1152 int flags;
1153 int ret_fcntl;
1154
1155 ret_code = LTTCOMM_CONSUMERD_SUCCESS;
1156 /* Set the pipe as non-blocking. */
1157 ret_fcntl = fcntl(channel_monitor_pipe, F_GETFL, 0);
1158 if (ret_fcntl == -1) {
1159 PERROR("fcntl get flags of the channel monitoring pipe");
1160 goto error_fatal;
1161 }
1162 flags = ret_fcntl;
1163
1164 ret_fcntl = fcntl(channel_monitor_pipe, F_SETFL,
1165 flags | O_NONBLOCK);
1166 if (ret_fcntl == -1) {
1167 PERROR("fcntl set O_NONBLOCK flag of the channel monitoring pipe");
1168 goto error_fatal;
1169 }
1170 DBG("Channel monitor pipe set as non-blocking");
1171 } else {
1172 ret_code = LTTCOMM_CONSUMERD_ALREADY_SET;
1173 }
1174 ret_send_status = consumer_send_status_msg(sock, ret_code);
1175 if (ret_send_status < 0) {
1176 goto error_fatal;
1177 }
1178 break;
1179 }
1180 case LTTNG_CONSUMER_ROTATE_CHANNEL:
1181 {
1182 struct lttng_consumer_channel *channel;
1183 uint64_t key = msg.u.rotate_channel.key;
1184 int ret_send_status;
1185
1186 DBG("Consumer rotate channel %" PRIu64, key);
1187
1188 channel = consumer_find_channel(key);
1189 if (!channel) {
1190 ERR("Channel %" PRIu64 " not found", key);
1191 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
1192 } else {
1193 /*
1194 * Sample the rotate position of all the streams in this channel.
1195 */
1196 int ret_rotate_channel;
1197
1198 ret_rotate_channel = lttng_consumer_rotate_channel(
1199 channel, key,
1200 msg.u.rotate_channel.relayd_id,
1201 msg.u.rotate_channel.metadata, ctx);
1202 if (ret_rotate_channel < 0) {
1203 ERR("Rotate channel failed");
1204 ret_code = LTTCOMM_CONSUMERD_ROTATION_FAIL;
1205 }
1206
1207 health_code_update();
1208 }
1209
1210 ret_send_status = consumer_send_status_msg(sock, ret_code);
1211 if (ret_send_status < 0) {
1212 /* Somehow, the session daemon is not responding anymore. */
1213 goto error_rotate_channel;
1214 }
1215 if (channel) {
1216 /* Rotate the streams that are ready right now. */
1217 int ret_rotate;
1218
1219 ret_rotate = lttng_consumer_rotate_ready_streams(
1220 channel, key, ctx);
1221 if (ret_rotate < 0) {
1222 ERR("Rotate ready streams failed");
1223 }
1224 }
1225 break;
1226 error_rotate_channel:
1227 goto end_nosignal;
1228 }
1229 case LTTNG_CONSUMER_CLEAR_CHANNEL:
1230 {
1231 struct lttng_consumer_channel *channel;
1232 uint64_t key = msg.u.clear_channel.key;
1233 int ret_send_status;
1234
1235 channel = consumer_find_channel(key);
1236 if (!channel) {
1237 DBG("Channel %" PRIu64 " not found", key);
1238 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
1239 } else {
1240 int ret_clear_channel;
1241
1242 ret_clear_channel =
1243 lttng_consumer_clear_channel(channel);
1244 if (ret_clear_channel) {
1245 ERR("Clear channel failed");
1246 ret_code = ret_clear_channel;
1247 }
1248
1249 health_code_update();
1250 }
1251
1252 ret_send_status = consumer_send_status_msg(sock, ret_code);
1253 if (ret_send_status < 0) {
1254 /* Somehow, the session daemon is not responding anymore. */
1255 goto end_nosignal;
1256 }
1257
1258 break;
1259 }
1260 case LTTNG_CONSUMER_INIT:
1261 {
1262 int ret_send_status;
1263
1264 ret_code = lttng_consumer_init_command(ctx,
1265 msg.u.init.sessiond_uuid);
1266 health_code_update();
1267 ret_send_status = consumer_send_status_msg(sock, ret_code);
1268 if (ret_send_status < 0) {
1269 /* Somehow, the session daemon is not responding anymore. */
1270 goto end_nosignal;
1271 }
1272 break;
1273 }
1274 case LTTNG_CONSUMER_CREATE_TRACE_CHUNK:
1275 {
1276 const struct lttng_credentials credentials = {
1277 .uid = LTTNG_OPTIONAL_INIT_VALUE(msg.u.create_trace_chunk.credentials.value.uid),
1278 .gid = LTTNG_OPTIONAL_INIT_VALUE(msg.u.create_trace_chunk.credentials.value.gid),
1279 };
1280 const bool is_local_trace =
1281 !msg.u.create_trace_chunk.relayd_id.is_set;
1282 const uint64_t relayd_id =
1283 msg.u.create_trace_chunk.relayd_id.value;
1284 const char *chunk_override_name =
1285 *msg.u.create_trace_chunk.override_name ?
1286 msg.u.create_trace_chunk.override_name :
1287 NULL;
1288 struct lttng_directory_handle *chunk_directory_handle = NULL;
1289
1290 /*
1291 * The session daemon will only provide a chunk directory file
1292 * descriptor for local traces.
1293 */
1294 if (is_local_trace) {
1295 int chunk_dirfd;
1296 int ret_send_status;
1297 ssize_t ret_recv;
1298
1299 /* Acnowledge the reception of the command. */
1300 ret_send_status = consumer_send_status_msg(
1301 sock, LTTCOMM_CONSUMERD_SUCCESS);
1302 if (ret_send_status < 0) {
1303 /* Somehow, the session daemon is not responding anymore. */
1304 goto end_nosignal;
1305 }
1306
1307 ret_recv = lttcomm_recv_fds_unix_sock(
1308 sock, &chunk_dirfd, 1);
1309 if (ret_recv != sizeof(chunk_dirfd)) {
1310 ERR("Failed to receive trace chunk directory file descriptor");
1311 goto error_fatal;
1312 }
1313
1314 DBG("Received trace chunk directory fd (%d)",
1315 chunk_dirfd);
1316 chunk_directory_handle = lttng_directory_handle_create_from_dirfd(
1317 chunk_dirfd);
1318 if (!chunk_directory_handle) {
1319 ERR("Failed to initialize chunk directory handle from directory file descriptor");
1320 if (close(chunk_dirfd)) {
1321 PERROR("Failed to close chunk directory file descriptor");
1322 }
1323 goto error_fatal;
1324 }
1325 }
1326
1327 ret_code = lttng_consumer_create_trace_chunk(
1328 !is_local_trace ? &relayd_id : NULL,
1329 msg.u.create_trace_chunk.session_id,
1330 msg.u.create_trace_chunk.chunk_id,
1331 (time_t) msg.u.create_trace_chunk
1332 .creation_timestamp,
1333 chunk_override_name,
1334 msg.u.create_trace_chunk.credentials.is_set ?
1335 &credentials :
1336 NULL,
1337 chunk_directory_handle);
1338 lttng_directory_handle_put(chunk_directory_handle);
1339 goto end_msg_sessiond;
1340 }
1341 case LTTNG_CONSUMER_CLOSE_TRACE_CHUNK:
1342 {
1343 enum lttng_trace_chunk_command_type close_command =
1344 msg.u.close_trace_chunk.close_command.value;
1345 const uint64_t relayd_id =
1346 msg.u.close_trace_chunk.relayd_id.value;
1347 struct lttcomm_consumer_close_trace_chunk_reply reply;
1348 char path[LTTNG_PATH_MAX];
1349 ssize_t ret_send;
1350
1351 ret_code = lttng_consumer_close_trace_chunk(
1352 msg.u.close_trace_chunk.relayd_id.is_set ?
1353 &relayd_id :
1354 NULL,
1355 msg.u.close_trace_chunk.session_id,
1356 msg.u.close_trace_chunk.chunk_id,
1357 (time_t) msg.u.close_trace_chunk.close_timestamp,
1358 msg.u.close_trace_chunk.close_command.is_set ?
1359 &close_command :
1360 NULL, path);
1361 reply.ret_code = ret_code;
1362 reply.path_length = strlen(path) + 1;
1363 ret_send = lttcomm_send_unix_sock(sock, &reply, sizeof(reply));
1364 if (ret_send != sizeof(reply)) {
1365 goto error_fatal;
1366 }
1367 ret_send = lttcomm_send_unix_sock(
1368 sock, path, reply.path_length);
1369 if (ret_send != reply.path_length) {
1370 goto error_fatal;
1371 }
1372 goto end_nosignal;
1373 }
1374 case LTTNG_CONSUMER_TRACE_CHUNK_EXISTS:
1375 {
1376 const uint64_t relayd_id =
1377 msg.u.trace_chunk_exists.relayd_id.value;
1378
1379 ret_code = lttng_consumer_trace_chunk_exists(
1380 msg.u.trace_chunk_exists.relayd_id.is_set ?
1381 &relayd_id : NULL,
1382 msg.u.trace_chunk_exists.session_id,
1383 msg.u.trace_chunk_exists.chunk_id);
1384 goto end_msg_sessiond;
1385 }
1386 case LTTNG_CONSUMER_OPEN_CHANNEL_PACKETS:
1387 {
1388 const uint64_t key = msg.u.open_channel_packets.key;
1389 struct lttng_consumer_channel *channel =
1390 consumer_find_channel(key);
1391
1392 if (channel) {
1393 pthread_mutex_lock(&channel->lock);
1394 ret_code = lttng_consumer_open_channel_packets(channel);
1395 pthread_mutex_unlock(&channel->lock);
1396 } else {
1397 WARN("Channel %" PRIu64 " not found", key);
1398 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
1399 }
1400
1401 health_code_update();
1402 goto end_msg_sessiond;
1403 }
1404 default:
1405 goto end_nosignal;
1406 }
1407
1408 end_nosignal:
1409 /*
1410 * Return 1 to indicate success since the 0 value can be a socket
1411 * shutdown during the recv() or send() call.
1412 */
1413 ret_func = 1;
1414 goto end;
1415 error_fatal:
1416 /* This will issue a consumer stop. */
1417 ret_func = -1;
1418 goto end;
1419 end_msg_sessiond:
1420 /*
1421 * The returned value here is not useful since either way we'll return 1 to
1422 * the caller because the session daemon socket management is done
1423 * elsewhere. Returning a negative code or 0 will shutdown the consumer.
1424 */
1425 {
1426 int ret_send_status;
1427
1428 ret_send_status = consumer_send_status_msg(sock, ret_code);
1429 if (ret_send_status < 0) {
1430 goto error_fatal;
1431 }
1432 }
1433
1434 ret_func = 1;
1435
1436 end:
1437 health_code_update();
1438 rcu_read_unlock();
1439 return ret_func;
1440 }
1441
1442 /*
1443 * Sync metadata meaning request them to the session daemon and snapshot to the
1444 * metadata thread can consumer them.
1445 *
1446 * Metadata stream lock MUST be acquired.
1447 */
1448 enum sync_metadata_status lttng_kconsumer_sync_metadata(
1449 struct lttng_consumer_stream *metadata)
1450 {
1451 int ret;
1452 enum sync_metadata_status status;
1453
1454 assert(metadata);
1455
1456 ret = kernctl_buffer_flush(metadata->wait_fd);
1457 if (ret < 0) {
1458 ERR("Failed to flush kernel stream");
1459 status = SYNC_METADATA_STATUS_ERROR;
1460 goto end;
1461 }
1462
1463 ret = kernctl_snapshot(metadata->wait_fd);
1464 if (ret < 0) {
1465 if (errno == EAGAIN) {
1466 /* No new metadata, exit. */
1467 DBG("Sync metadata, no new kernel metadata");
1468 status = SYNC_METADATA_STATUS_NO_DATA;
1469 } else {
1470 ERR("Sync metadata, taking kernel snapshot failed.");
1471 status = SYNC_METADATA_STATUS_ERROR;
1472 }
1473 } else {
1474 status = SYNC_METADATA_STATUS_NEW_DATA;
1475 }
1476
1477 end:
1478 return status;
1479 }
1480
1481 static
1482 int extract_common_subbuffer_info(struct lttng_consumer_stream *stream,
1483 struct stream_subbuffer *subbuf)
1484 {
1485 int ret;
1486
1487 ret = kernctl_get_subbuf_size(
1488 stream->wait_fd, &subbuf->info.data.subbuf_size);
1489 if (ret) {
1490 goto end;
1491 }
1492
1493 ret = kernctl_get_padded_subbuf_size(
1494 stream->wait_fd, &subbuf->info.data.padded_subbuf_size);
1495 if (ret) {
1496 goto end;
1497 }
1498
1499 end:
1500 return ret;
1501 }
1502
1503 static
1504 int extract_metadata_subbuffer_info(struct lttng_consumer_stream *stream,
1505 struct stream_subbuffer *subbuf)
1506 {
1507 int ret;
1508
1509 ret = extract_common_subbuffer_info(stream, subbuf);
1510 if (ret) {
1511 goto end;
1512 }
1513
1514 ret = kernctl_get_metadata_version(
1515 stream->wait_fd, &subbuf->info.metadata.version);
1516 if (ret) {
1517 goto end;
1518 }
1519
1520 end:
1521 return ret;
1522 }
1523
1524 static
1525 int extract_data_subbuffer_info(struct lttng_consumer_stream *stream,
1526 struct stream_subbuffer *subbuf)
1527 {
1528 int ret;
1529
1530 ret = extract_common_subbuffer_info(stream, subbuf);
1531 if (ret) {
1532 goto end;
1533 }
1534
1535 ret = kernctl_get_packet_size(
1536 stream->wait_fd, &subbuf->info.data.packet_size);
1537 if (ret < 0) {
1538 PERROR("Failed to get sub-buffer packet size");
1539 goto end;
1540 }
1541
1542 ret = kernctl_get_content_size(
1543 stream->wait_fd, &subbuf->info.data.content_size);
1544 if (ret < 0) {
1545 PERROR("Failed to get sub-buffer content size");
1546 goto end;
1547 }
1548
1549 ret = kernctl_get_timestamp_begin(
1550 stream->wait_fd, &subbuf->info.data.timestamp_begin);
1551 if (ret < 0) {
1552 PERROR("Failed to get sub-buffer begin timestamp");
1553 goto end;
1554 }
1555
1556 ret = kernctl_get_timestamp_end(
1557 stream->wait_fd, &subbuf->info.data.timestamp_end);
1558 if (ret < 0) {
1559 PERROR("Failed to get sub-buffer end timestamp");
1560 goto end;
1561 }
1562
1563 ret = kernctl_get_events_discarded(
1564 stream->wait_fd, &subbuf->info.data.events_discarded);
1565 if (ret) {
1566 PERROR("Failed to get sub-buffer events discarded count");
1567 goto end;
1568 }
1569
1570 ret = kernctl_get_sequence_number(stream->wait_fd,
1571 &subbuf->info.data.sequence_number.value);
1572 if (ret) {
1573 /* May not be supported by older LTTng-modules. */
1574 if (ret != -ENOTTY) {
1575 PERROR("Failed to get sub-buffer sequence number");
1576 goto end;
1577 }
1578 } else {
1579 subbuf->info.data.sequence_number.is_set = true;
1580 }
1581
1582 ret = kernctl_get_stream_id(
1583 stream->wait_fd, &subbuf->info.data.stream_id);
1584 if (ret < 0) {
1585 PERROR("Failed to get stream id");
1586 goto end;
1587 }
1588
1589 ret = kernctl_get_instance_id(stream->wait_fd,
1590 &subbuf->info.data.stream_instance_id.value);
1591 if (ret) {
1592 /* May not be supported by older LTTng-modules. */
1593 if (ret != -ENOTTY) {
1594 PERROR("Failed to get stream instance id");
1595 goto end;
1596 }
1597 } else {
1598 subbuf->info.data.stream_instance_id.is_set = true;
1599 }
1600 end:
1601 return ret;
1602 }
1603
1604 static
1605 enum get_next_subbuffer_status get_subbuffer_common(
1606 struct lttng_consumer_stream *stream,
1607 struct stream_subbuffer *subbuffer)
1608 {
1609 int ret;
1610 enum get_next_subbuffer_status status;
1611
1612 ret = kernctl_get_next_subbuf(stream->wait_fd);
1613 switch (ret) {
1614 case 0:
1615 status = GET_NEXT_SUBBUFFER_STATUS_OK;
1616 break;
1617 case -ENODATA:
1618 case -EAGAIN:
1619 /*
1620 * The caller only expects -ENODATA when there is no data to
1621 * read, but the kernel tracer returns -EAGAIN when there is
1622 * currently no data for a non-finalized stream, and -ENODATA
1623 * when there is no data for a finalized stream. Those can be
1624 * combined into a -ENODATA return value.
1625 */
1626 status = GET_NEXT_SUBBUFFER_STATUS_NO_DATA;
1627 goto end;
1628 default:
1629 status = GET_NEXT_SUBBUFFER_STATUS_ERROR;
1630 goto end;
1631 }
1632
1633 ret = stream->read_subbuffer_ops.extract_subbuffer_info(
1634 stream, subbuffer);
1635 if (ret) {
1636 status = GET_NEXT_SUBBUFFER_STATUS_ERROR;
1637 }
1638 end:
1639 return status;
1640 }
1641
1642 static
1643 enum get_next_subbuffer_status get_next_subbuffer_splice(
1644 struct lttng_consumer_stream *stream,
1645 struct stream_subbuffer *subbuffer)
1646 {
1647 const enum get_next_subbuffer_status status =
1648 get_subbuffer_common(stream, subbuffer);
1649
1650 if (status != GET_NEXT_SUBBUFFER_STATUS_OK) {
1651 goto end;
1652 }
1653
1654 subbuffer->buffer.fd = stream->wait_fd;
1655 end:
1656 return status;
1657 }
1658
1659 static
1660 enum get_next_subbuffer_status get_next_subbuffer_mmap(
1661 struct lttng_consumer_stream *stream,
1662 struct stream_subbuffer *subbuffer)
1663 {
1664 int ret;
1665 enum get_next_subbuffer_status status;
1666 const char *addr;
1667
1668 status = get_subbuffer_common(stream, subbuffer);
1669 if (status != GET_NEXT_SUBBUFFER_STATUS_OK) {
1670 goto end;
1671 }
1672
1673 ret = get_current_subbuf_addr(stream, &addr);
1674 if (ret) {
1675 status = GET_NEXT_SUBBUFFER_STATUS_ERROR;
1676 goto end;
1677 }
1678
1679 subbuffer->buffer.buffer = lttng_buffer_view_init(
1680 addr, 0, subbuffer->info.data.padded_subbuf_size);
1681 end:
1682 return status;
1683 }
1684
1685 static
1686 enum get_next_subbuffer_status get_next_subbuffer_metadata_check(struct lttng_consumer_stream *stream,
1687 struct stream_subbuffer *subbuffer)
1688 {
1689 int ret;
1690 const char *addr;
1691 bool coherent;
1692 enum get_next_subbuffer_status status;
1693
1694 ret = kernctl_get_next_subbuf_metadata_check(stream->wait_fd,
1695 &coherent);
1696 if (ret) {
1697 goto end;
1698 }
1699
1700 ret = stream->read_subbuffer_ops.extract_subbuffer_info(
1701 stream, subbuffer);
1702 if (ret) {
1703 goto end;
1704 }
1705
1706 LTTNG_OPTIONAL_SET(&subbuffer->info.metadata.coherent, coherent);
1707
1708 ret = get_current_subbuf_addr(stream, &addr);
1709 if (ret) {
1710 goto end;
1711 }
1712
1713 subbuffer->buffer.buffer = lttng_buffer_view_init(
1714 addr, 0, subbuffer->info.data.padded_subbuf_size);
1715 DBG("Got metadata packet with padded_subbuf_size = %lu, coherent = %s",
1716 subbuffer->info.metadata.padded_subbuf_size,
1717 coherent ? "true" : "false");
1718 end:
1719 /*
1720 * The caller only expects -ENODATA when there is no data to read, but
1721 * the kernel tracer returns -EAGAIN when there is currently no data
1722 * for a non-finalized stream, and -ENODATA when there is no data for a
1723 * finalized stream. Those can be combined into a -ENODATA return value.
1724 */
1725 switch (ret) {
1726 case 0:
1727 status = GET_NEXT_SUBBUFFER_STATUS_OK;
1728 break;
1729 case -ENODATA:
1730 case -EAGAIN:
1731 /*
1732 * The caller only expects -ENODATA when there is no data to
1733 * read, but the kernel tracer returns -EAGAIN when there is
1734 * currently no data for a non-finalized stream, and -ENODATA
1735 * when there is no data for a finalized stream. Those can be
1736 * combined into a -ENODATA return value.
1737 */
1738 status = GET_NEXT_SUBBUFFER_STATUS_NO_DATA;
1739 break;
1740 default:
1741 status = GET_NEXT_SUBBUFFER_STATUS_ERROR;
1742 break;
1743 }
1744
1745 return status;
1746 }
1747
1748 static
1749 int put_next_subbuffer(struct lttng_consumer_stream *stream,
1750 struct stream_subbuffer *subbuffer)
1751 {
1752 const int ret = kernctl_put_next_subbuf(stream->wait_fd);
1753
1754 if (ret) {
1755 if (ret == -EFAULT) {
1756 PERROR("Error in unreserving sub buffer");
1757 } else if (ret == -EIO) {
1758 /* Should never happen with newer LTTng versions */
1759 PERROR("Reader has been pushed by the writer, last sub-buffer corrupted");
1760 }
1761 }
1762
1763 return ret;
1764 }
1765
1766 static
1767 bool is_get_next_check_metadata_available(int tracer_fd)
1768 {
1769 const int ret = kernctl_get_next_subbuf_metadata_check(tracer_fd, NULL);
1770 const bool available = ret != -ENOTTY;
1771
1772 if (ret == 0) {
1773 /* get succeeded, make sure to put the subbuffer. */
1774 kernctl_put_subbuf(tracer_fd);
1775 }
1776
1777 return available;
1778 }
1779
1780 static
1781 int signal_metadata(struct lttng_consumer_stream *stream,
1782 struct lttng_consumer_local_data *ctx)
1783 {
1784 ASSERT_LOCKED(stream->metadata_rdv_lock);
1785 return pthread_cond_broadcast(&stream->metadata_rdv) ? -errno : 0;
1786 }
1787
1788 static
1789 int lttng_kconsumer_set_stream_ops(
1790 struct lttng_consumer_stream *stream)
1791 {
1792 int ret = 0;
1793
1794 if (stream->metadata_flag && stream->chan->is_live) {
1795 DBG("Attempting to enable metadata bucketization for live consumers");
1796 if (is_get_next_check_metadata_available(stream->wait_fd)) {
1797 DBG("Kernel tracer supports get_next_subbuffer_metadata_check, metadata will be accumulated until a coherent state is reached");
1798 stream->read_subbuffer_ops.get_next_subbuffer =
1799 get_next_subbuffer_metadata_check;
1800 ret = consumer_stream_enable_metadata_bucketization(
1801 stream);
1802 if (ret) {
1803 goto end;
1804 }
1805 } else {
1806 /*
1807 * The kernel tracer version is too old to indicate
1808 * when the metadata stream has reached a "coherent"
1809 * (parseable) point.
1810 *
1811 * This means that a live viewer may see an incoherent
1812 * sequence of metadata and fail to parse it.
1813 */
1814 WARN("Kernel tracer does not support get_next_subbuffer_metadata_check which may cause live clients to fail to parse the metadata stream");
1815 metadata_bucket_destroy(stream->metadata_bucket);
1816 stream->metadata_bucket = NULL;
1817 }
1818
1819 stream->read_subbuffer_ops.on_sleep = signal_metadata;
1820 }
1821
1822 if (!stream->read_subbuffer_ops.get_next_subbuffer) {
1823 if (stream->chan->output == CONSUMER_CHANNEL_MMAP) {
1824 stream->read_subbuffer_ops.get_next_subbuffer =
1825 get_next_subbuffer_mmap;
1826 } else {
1827 stream->read_subbuffer_ops.get_next_subbuffer =
1828 get_next_subbuffer_splice;
1829 }
1830 }
1831
1832 if (stream->metadata_flag) {
1833 stream->read_subbuffer_ops.extract_subbuffer_info =
1834 extract_metadata_subbuffer_info;
1835 } else {
1836 stream->read_subbuffer_ops.extract_subbuffer_info =
1837 extract_data_subbuffer_info;
1838 if (stream->chan->is_live) {
1839 stream->read_subbuffer_ops.send_live_beacon =
1840 consumer_flush_kernel_index;
1841 }
1842 }
1843
1844 stream->read_subbuffer_ops.put_next_subbuffer = put_next_subbuffer;
1845 end:
1846 return ret;
1847 }
1848
1849 int lttng_kconsumer_on_recv_stream(struct lttng_consumer_stream *stream)
1850 {
1851 int ret;
1852
1853 assert(stream);
1854
1855 /*
1856 * Don't create anything if this is set for streaming or if there is
1857 * no current trace chunk on the parent channel.
1858 */
1859 if (stream->net_seq_idx == (uint64_t) -1ULL && stream->chan->monitor &&
1860 stream->chan->trace_chunk) {
1861 ret = consumer_stream_create_output_files(stream, true);
1862 if (ret) {
1863 goto error;
1864 }
1865 }
1866
1867 if (stream->output == LTTNG_EVENT_MMAP) {
1868 /* get the len of the mmap region */
1869 unsigned long mmap_len;
1870
1871 ret = kernctl_get_mmap_len(stream->wait_fd, &mmap_len);
1872 if (ret != 0) {
1873 PERROR("kernctl_get_mmap_len");
1874 goto error_close_fd;
1875 }
1876 stream->mmap_len = (size_t) mmap_len;
1877
1878 stream->mmap_base = mmap(NULL, stream->mmap_len, PROT_READ,
1879 MAP_PRIVATE, stream->wait_fd, 0);
1880 if (stream->mmap_base == MAP_FAILED) {
1881 PERROR("Error mmaping");
1882 ret = -1;
1883 goto error_close_fd;
1884 }
1885 }
1886
1887 ret = lttng_kconsumer_set_stream_ops(stream);
1888 if (ret) {
1889 goto error_close_fd;
1890 }
1891
1892 /* we return 0 to let the library handle the FD internally */
1893 return 0;
1894
1895 error_close_fd:
1896 if (stream->out_fd >= 0) {
1897 int err;
1898
1899 err = close(stream->out_fd);
1900 assert(!err);
1901 stream->out_fd = -1;
1902 }
1903 error:
1904 return ret;
1905 }
1906
1907 /*
1908 * Check if data is still being extracted from the buffers for a specific
1909 * stream. Consumer data lock MUST be acquired before calling this function
1910 * and the stream lock.
1911 *
1912 * Return 1 if the traced data are still getting read else 0 meaning that the
1913 * data is available for trace viewer reading.
1914 */
1915 int lttng_kconsumer_data_pending(struct lttng_consumer_stream *stream)
1916 {
1917 int ret;
1918
1919 assert(stream);
1920
1921 if (stream->endpoint_status != CONSUMER_ENDPOINT_ACTIVE) {
1922 ret = 0;
1923 goto end;
1924 }
1925
1926 ret = kernctl_get_next_subbuf(stream->wait_fd);
1927 if (ret == 0) {
1928 /* There is still data so let's put back this subbuffer. */
1929 ret = kernctl_put_subbuf(stream->wait_fd);
1930 assert(ret == 0);
1931 ret = 1; /* Data is pending */
1932 goto end;
1933 }
1934
1935 /* Data is NOT pending and ready to be read. */
1936 ret = 0;
1937
1938 end:
1939 return ret;
1940 }
This page took 0.110676 seconds and 4 git commands to generate.