Clean-up: consumer: prepend `the_` to global variable
[lttng-tools.git] / src / common / kernel-consumer / kernel-consumer.c
1 /*
2 * Copyright (C) 2011 Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * Copyright (C) 2017 Jérémie Galarneau <jeremie.galarneau@efficios.com>
5 *
6 * SPDX-License-Identifier: GPL-2.0-only
7 *
8 */
9
10 #define _LGPL_SOURCE
11 #include <assert.h>
12 #include <poll.h>
13 #include <pthread.h>
14 #include <stdlib.h>
15 #include <string.h>
16 #include <sys/mman.h>
17 #include <sys/socket.h>
18 #include <sys/types.h>
19 #include <inttypes.h>
20 #include <unistd.h>
21 #include <sys/stat.h>
22 #include <stdint.h>
23
24 #include <bin/lttng-consumerd/health-consumerd.h>
25 #include <common/common.h>
26 #include <common/kernel-ctl/kernel-ctl.h>
27 #include <common/sessiond-comm/sessiond-comm.h>
28 #include <common/sessiond-comm/relayd.h>
29 #include <common/compat/fcntl.h>
30 #include <common/compat/endian.h>
31 #include <common/pipe.h>
32 #include <common/relayd/relayd.h>
33 #include <common/utils.h>
34 #include <common/consumer/consumer-stream.h>
35 #include <common/index/index.h>
36 #include <common/consumer/consumer-timer.h>
37 #include <common/optional.h>
38 #include <common/buffer-view.h>
39 #include <common/consumer/consumer.h>
40 #include <common/consumer/metadata-bucket.h>
41
42 #include "kernel-consumer.h"
43
44 extern struct lttng_consumer_global_data the_consumer_data;
45 extern int consumer_poll_timeout;
46
47 /*
48 * Take a snapshot for a specific fd
49 *
50 * Returns 0 on success, < 0 on error
51 */
52 int lttng_kconsumer_take_snapshot(struct lttng_consumer_stream *stream)
53 {
54 int ret = 0;
55 int infd = stream->wait_fd;
56
57 ret = kernctl_snapshot(infd);
58 /*
59 * -EAGAIN is not an error, it just means that there is no data to
60 * be read.
61 */
62 if (ret != 0 && ret != -EAGAIN) {
63 PERROR("Getting sub-buffer snapshot.");
64 }
65
66 return ret;
67 }
68
69 /*
70 * Sample consumed and produced positions for a specific fd.
71 *
72 * Returns 0 on success, < 0 on error.
73 */
74 int lttng_kconsumer_sample_snapshot_positions(
75 struct lttng_consumer_stream *stream)
76 {
77 assert(stream);
78
79 return kernctl_snapshot_sample_positions(stream->wait_fd);
80 }
81
82 /*
83 * Get the produced position
84 *
85 * Returns 0 on success, < 0 on error
86 */
87 int lttng_kconsumer_get_produced_snapshot(struct lttng_consumer_stream *stream,
88 unsigned long *pos)
89 {
90 int ret;
91 int infd = stream->wait_fd;
92
93 ret = kernctl_snapshot_get_produced(infd, pos);
94 if (ret != 0) {
95 PERROR("kernctl_snapshot_get_produced");
96 }
97
98 return ret;
99 }
100
101 /*
102 * Get the consumerd position
103 *
104 * Returns 0 on success, < 0 on error
105 */
106 int lttng_kconsumer_get_consumed_snapshot(struct lttng_consumer_stream *stream,
107 unsigned long *pos)
108 {
109 int ret;
110 int infd = stream->wait_fd;
111
112 ret = kernctl_snapshot_get_consumed(infd, pos);
113 if (ret != 0) {
114 PERROR("kernctl_snapshot_get_consumed");
115 }
116
117 return ret;
118 }
119
120 static
121 int get_current_subbuf_addr(struct lttng_consumer_stream *stream,
122 const char **addr)
123 {
124 int ret;
125 unsigned long mmap_offset;
126 const char *mmap_base = stream->mmap_base;
127
128 ret = kernctl_get_mmap_read_offset(stream->wait_fd, &mmap_offset);
129 if (ret < 0) {
130 PERROR("Failed to get mmap read offset");
131 goto error;
132 }
133
134 *addr = mmap_base + mmap_offset;
135 error:
136 return ret;
137 }
138
139 /*
140 * Take a snapshot of all the stream of a channel
141 * RCU read-side lock must be held across this function to ensure existence of
142 * channel. The channel lock must be held by the caller.
143 *
144 * Returns 0 on success, < 0 on error
145 */
146 static int lttng_kconsumer_snapshot_channel(
147 struct lttng_consumer_channel *channel,
148 uint64_t key, char *path, uint64_t relayd_id,
149 uint64_t nb_packets_per_stream,
150 struct lttng_consumer_local_data *ctx)
151 {
152 int ret;
153 struct lttng_consumer_stream *stream;
154
155 DBG("Kernel consumer snapshot channel %" PRIu64, key);
156
157 rcu_read_lock();
158
159 /* Splice is not supported yet for channel snapshot. */
160 if (channel->output != CONSUMER_CHANNEL_MMAP) {
161 ERR("Unsupported output type for channel \"%s\": mmap output is required to record a snapshot",
162 channel->name);
163 ret = -1;
164 goto end;
165 }
166
167 cds_list_for_each_entry(stream, &channel->streams.head, send_node) {
168 unsigned long consumed_pos, produced_pos;
169
170 health_code_update();
171
172 /*
173 * Lock stream because we are about to change its state.
174 */
175 pthread_mutex_lock(&stream->lock);
176
177 assert(channel->trace_chunk);
178 if (!lttng_trace_chunk_get(channel->trace_chunk)) {
179 /*
180 * Can't happen barring an internal error as the channel
181 * holds a reference to the trace chunk.
182 */
183 ERR("Failed to acquire reference to channel's trace chunk");
184 ret = -1;
185 goto end_unlock;
186 }
187 assert(!stream->trace_chunk);
188 stream->trace_chunk = channel->trace_chunk;
189
190 /*
191 * Assign the received relayd ID so we can use it for streaming. The streams
192 * are not visible to anyone so this is OK to change it.
193 */
194 stream->net_seq_idx = relayd_id;
195 channel->relayd_id = relayd_id;
196 if (relayd_id != (uint64_t) -1ULL) {
197 ret = consumer_send_relayd_stream(stream, path);
198 if (ret < 0) {
199 ERR("sending stream to relayd");
200 goto end_unlock;
201 }
202 } else {
203 ret = consumer_stream_create_output_files(stream,
204 false);
205 if (ret < 0) {
206 goto end_unlock;
207 }
208 DBG("Kernel consumer snapshot stream (%" PRIu64 ")",
209 stream->key);
210 }
211
212 ret = kernctl_buffer_flush_empty(stream->wait_fd);
213 if (ret < 0) {
214 /*
215 * Doing a buffer flush which does not take into
216 * account empty packets. This is not perfect
217 * for stream intersection, but required as a
218 * fall-back when "flush_empty" is not
219 * implemented by lttng-modules.
220 */
221 ret = kernctl_buffer_flush(stream->wait_fd);
222 if (ret < 0) {
223 ERR("Failed to flush kernel stream");
224 goto end_unlock;
225 }
226 goto end_unlock;
227 }
228
229 ret = lttng_kconsumer_take_snapshot(stream);
230 if (ret < 0) {
231 ERR("Taking kernel snapshot");
232 goto end_unlock;
233 }
234
235 ret = lttng_kconsumer_get_produced_snapshot(stream, &produced_pos);
236 if (ret < 0) {
237 ERR("Produced kernel snapshot position");
238 goto end_unlock;
239 }
240
241 ret = lttng_kconsumer_get_consumed_snapshot(stream, &consumed_pos);
242 if (ret < 0) {
243 ERR("Consumerd kernel snapshot position");
244 goto end_unlock;
245 }
246
247 consumed_pos = consumer_get_consume_start_pos(consumed_pos,
248 produced_pos, nb_packets_per_stream,
249 stream->max_sb_size);
250
251 while ((long) (consumed_pos - produced_pos) < 0) {
252 ssize_t read_len;
253 unsigned long len, padded_len;
254 const char *subbuf_addr;
255 struct lttng_buffer_view subbuf_view;
256
257 health_code_update();
258 DBG("Kernel consumer taking snapshot at pos %lu", consumed_pos);
259
260 ret = kernctl_get_subbuf(stream->wait_fd, &consumed_pos);
261 if (ret < 0) {
262 if (ret != -EAGAIN) {
263 PERROR("kernctl_get_subbuf snapshot");
264 goto end_unlock;
265 }
266 DBG("Kernel consumer get subbuf failed. Skipping it.");
267 consumed_pos += stream->max_sb_size;
268 stream->chan->lost_packets++;
269 continue;
270 }
271
272 ret = kernctl_get_subbuf_size(stream->wait_fd, &len);
273 if (ret < 0) {
274 ERR("Snapshot kernctl_get_subbuf_size");
275 goto error_put_subbuf;
276 }
277
278 ret = kernctl_get_padded_subbuf_size(stream->wait_fd, &padded_len);
279 if (ret < 0) {
280 ERR("Snapshot kernctl_get_padded_subbuf_size");
281 goto error_put_subbuf;
282 }
283
284 ret = get_current_subbuf_addr(stream, &subbuf_addr);
285 if (ret) {
286 goto error_put_subbuf;
287 }
288
289 subbuf_view = lttng_buffer_view_init(
290 subbuf_addr, 0, padded_len);
291 read_len = lttng_consumer_on_read_subbuffer_mmap(
292 stream, &subbuf_view,
293 padded_len - len);
294 /*
295 * We write the padded len in local tracefiles but the data len
296 * when using a relay. Display the error but continue processing
297 * to try to release the subbuffer.
298 */
299 if (relayd_id != (uint64_t) -1ULL) {
300 if (read_len != len) {
301 ERR("Error sending to the relay (ret: %zd != len: %lu)",
302 read_len, len);
303 }
304 } else {
305 if (read_len != padded_len) {
306 ERR("Error writing to tracefile (ret: %zd != len: %lu)",
307 read_len, padded_len);
308 }
309 }
310
311 ret = kernctl_put_subbuf(stream->wait_fd);
312 if (ret < 0) {
313 ERR("Snapshot kernctl_put_subbuf");
314 goto end_unlock;
315 }
316 consumed_pos += stream->max_sb_size;
317 }
318
319 if (relayd_id == (uint64_t) -1ULL) {
320 if (stream->out_fd >= 0) {
321 ret = close(stream->out_fd);
322 if (ret < 0) {
323 PERROR("Kernel consumer snapshot close out_fd");
324 goto end_unlock;
325 }
326 stream->out_fd = -1;
327 }
328 } else {
329 close_relayd_stream(stream);
330 stream->net_seq_idx = (uint64_t) -1ULL;
331 }
332 lttng_trace_chunk_put(stream->trace_chunk);
333 stream->trace_chunk = NULL;
334 pthread_mutex_unlock(&stream->lock);
335 }
336
337 /* All good! */
338 ret = 0;
339 goto end;
340
341 error_put_subbuf:
342 ret = kernctl_put_subbuf(stream->wait_fd);
343 if (ret < 0) {
344 ERR("Snapshot kernctl_put_subbuf error path");
345 }
346 end_unlock:
347 pthread_mutex_unlock(&stream->lock);
348 end:
349 rcu_read_unlock();
350 return ret;
351 }
352
353 /*
354 * Read the whole metadata available for a snapshot.
355 * RCU read-side lock must be held across this function to ensure existence of
356 * metadata_channel. The channel lock must be held by the caller.
357 *
358 * Returns 0 on success, < 0 on error
359 */
360 static int lttng_kconsumer_snapshot_metadata(
361 struct lttng_consumer_channel *metadata_channel,
362 uint64_t key, char *path, uint64_t relayd_id,
363 struct lttng_consumer_local_data *ctx)
364 {
365 int ret, use_relayd = 0;
366 ssize_t ret_read;
367 struct lttng_consumer_stream *metadata_stream;
368
369 assert(ctx);
370
371 DBG("Kernel consumer snapshot metadata with key %" PRIu64 " at path %s",
372 key, path);
373
374 rcu_read_lock();
375
376 metadata_stream = metadata_channel->metadata_stream;
377 assert(metadata_stream);
378
379 pthread_mutex_lock(&metadata_stream->lock);
380 assert(metadata_channel->trace_chunk);
381 assert(metadata_stream->trace_chunk);
382
383 /* Flag once that we have a valid relayd for the stream. */
384 if (relayd_id != (uint64_t) -1ULL) {
385 use_relayd = 1;
386 }
387
388 if (use_relayd) {
389 ret = consumer_send_relayd_stream(metadata_stream, path);
390 if (ret < 0) {
391 goto error_snapshot;
392 }
393 } else {
394 ret = consumer_stream_create_output_files(metadata_stream,
395 false);
396 if (ret < 0) {
397 goto error_snapshot;
398 }
399 }
400
401 do {
402 health_code_update();
403
404 ret_read = lttng_consumer_read_subbuffer(metadata_stream, ctx, true);
405 if (ret_read < 0) {
406 if (ret_read != -EAGAIN) {
407 ERR("Kernel snapshot reading metadata subbuffer (ret: %zd)",
408 ret_read);
409 ret = ret_read;
410 goto error_snapshot;
411 }
412 /* ret_read is negative at this point so we will exit the loop. */
413 continue;
414 }
415 } while (ret_read >= 0);
416
417 if (use_relayd) {
418 close_relayd_stream(metadata_stream);
419 metadata_stream->net_seq_idx = (uint64_t) -1ULL;
420 } else {
421 if (metadata_stream->out_fd >= 0) {
422 ret = close(metadata_stream->out_fd);
423 if (ret < 0) {
424 PERROR("Kernel consumer snapshot metadata close out_fd");
425 /*
426 * Don't go on error here since the snapshot was successful at this
427 * point but somehow the close failed.
428 */
429 }
430 metadata_stream->out_fd = -1;
431 lttng_trace_chunk_put(metadata_stream->trace_chunk);
432 metadata_stream->trace_chunk = NULL;
433 }
434 }
435
436 ret = 0;
437 error_snapshot:
438 pthread_mutex_unlock(&metadata_stream->lock);
439 cds_list_del(&metadata_stream->send_node);
440 consumer_stream_destroy(metadata_stream, NULL);
441 metadata_channel->metadata_stream = NULL;
442 rcu_read_unlock();
443 return ret;
444 }
445
446 /*
447 * Receive command from session daemon and process it.
448 *
449 * Return 1 on success else a negative value or 0.
450 */
451 int lttng_kconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
452 int sock, struct pollfd *consumer_sockpoll)
453 {
454 int ret_func;
455 enum lttcomm_return_code ret_code = LTTCOMM_CONSUMERD_SUCCESS;
456 struct lttcomm_consumer_msg msg;
457
458 health_code_update();
459
460 {
461 ssize_t ret_recv;
462
463 ret_recv = lttcomm_recv_unix_sock(sock, &msg, sizeof(msg));
464 if (ret_recv != sizeof(msg)) {
465 if (ret_recv > 0) {
466 lttng_consumer_send_error(ctx,
467 LTTCOMM_CONSUMERD_ERROR_RECV_CMD);
468 ret_recv = -1;
469 }
470 return ret_recv;
471 }
472 }
473
474 health_code_update();
475
476 /* Deprecated command */
477 assert(msg.cmd_type != LTTNG_CONSUMER_STOP);
478
479 health_code_update();
480
481 /* relayd needs RCU read-side protection */
482 rcu_read_lock();
483
484 switch (msg.cmd_type) {
485 case LTTNG_CONSUMER_ADD_RELAYD_SOCKET:
486 {
487 /* Session daemon status message are handled in the following call. */
488 consumer_add_relayd_socket(msg.u.relayd_sock.net_index,
489 msg.u.relayd_sock.type, ctx, sock, consumer_sockpoll,
490 &msg.u.relayd_sock.sock, msg.u.relayd_sock.session_id,
491 msg.u.relayd_sock.relayd_session_id);
492 goto end_nosignal;
493 }
494 case LTTNG_CONSUMER_ADD_CHANNEL:
495 {
496 struct lttng_consumer_channel *new_channel;
497 int ret_send_status, ret_add_channel;
498 const uint64_t chunk_id = msg.u.channel.chunk_id.value;
499
500 health_code_update();
501
502 /* First send a status message before receiving the fds. */
503 ret_send_status = consumer_send_status_msg(sock, ret_code);
504 if (ret_send_status < 0) {
505 /* Somehow, the session daemon is not responding anymore. */
506 goto error_fatal;
507 }
508
509 health_code_update();
510
511 DBG("consumer_add_channel %" PRIu64, msg.u.channel.channel_key);
512 new_channel = consumer_allocate_channel(msg.u.channel.channel_key,
513 msg.u.channel.session_id,
514 msg.u.channel.chunk_id.is_set ?
515 &chunk_id : NULL,
516 msg.u.channel.pathname,
517 msg.u.channel.name,
518 msg.u.channel.relayd_id, msg.u.channel.output,
519 msg.u.channel.tracefile_size,
520 msg.u.channel.tracefile_count, 0,
521 msg.u.channel.monitor,
522 msg.u.channel.live_timer_interval,
523 msg.u.channel.is_live,
524 NULL, NULL);
525 if (new_channel == NULL) {
526 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR);
527 goto end_nosignal;
528 }
529 new_channel->nb_init_stream_left = msg.u.channel.nb_init_streams;
530 switch (msg.u.channel.output) {
531 case LTTNG_EVENT_SPLICE:
532 new_channel->output = CONSUMER_CHANNEL_SPLICE;
533 break;
534 case LTTNG_EVENT_MMAP:
535 new_channel->output = CONSUMER_CHANNEL_MMAP;
536 break;
537 default:
538 ERR("Channel output unknown %d", msg.u.channel.output);
539 goto end_nosignal;
540 }
541
542 /* Translate and save channel type. */
543 switch (msg.u.channel.type) {
544 case CONSUMER_CHANNEL_TYPE_DATA:
545 case CONSUMER_CHANNEL_TYPE_METADATA:
546 new_channel->type = msg.u.channel.type;
547 break;
548 default:
549 assert(0);
550 goto end_nosignal;
551 };
552
553 health_code_update();
554
555 if (ctx->on_recv_channel != NULL) {
556 int ret_recv_channel =
557 ctx->on_recv_channel(new_channel);
558 if (ret_recv_channel == 0) {
559 ret_add_channel = consumer_add_channel(
560 new_channel, ctx);
561 } else if (ret_recv_channel < 0) {
562 goto end_nosignal;
563 }
564 } else {
565 ret_add_channel =
566 consumer_add_channel(new_channel, ctx);
567 }
568 if (msg.u.channel.type == CONSUMER_CHANNEL_TYPE_DATA &&
569 !ret_add_channel) {
570 int monitor_start_ret;
571
572 DBG("Consumer starting monitor timer");
573 consumer_timer_live_start(new_channel,
574 msg.u.channel.live_timer_interval);
575 monitor_start_ret = consumer_timer_monitor_start(
576 new_channel,
577 msg.u.channel.monitor_timer_interval);
578 if (monitor_start_ret < 0) {
579 ERR("Starting channel monitoring timer failed");
580 goto end_nosignal;
581 }
582 }
583
584 health_code_update();
585
586 /* If we received an error in add_channel, we need to report it. */
587 if (ret_add_channel < 0) {
588 ret_send_status = consumer_send_status_msg(
589 sock, ret_add_channel);
590 if (ret_send_status < 0) {
591 goto error_fatal;
592 }
593 goto end_nosignal;
594 }
595
596 goto end_nosignal;
597 }
598 case LTTNG_CONSUMER_ADD_STREAM:
599 {
600 int fd;
601 struct lttng_pipe *stream_pipe;
602 struct lttng_consumer_stream *new_stream;
603 struct lttng_consumer_channel *channel;
604 int alloc_ret = 0;
605 int ret_send_status, ret_poll, ret_get_max_subbuf_size;
606 ssize_t ret_pipe_write, ret_recv;
607
608 /*
609 * Get stream's channel reference. Needed when adding the stream to the
610 * global hash table.
611 */
612 channel = consumer_find_channel(msg.u.stream.channel_key);
613 if (!channel) {
614 /*
615 * We could not find the channel. Can happen if cpu hotplug
616 * happens while tearing down.
617 */
618 ERR("Unable to find channel key %" PRIu64, msg.u.stream.channel_key);
619 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
620 }
621
622 health_code_update();
623
624 /* First send a status message before receiving the fds. */
625 ret_send_status = consumer_send_status_msg(sock, ret_code);
626 if (ret_send_status < 0) {
627 /* Somehow, the session daemon is not responding anymore. */
628 goto error_add_stream_fatal;
629 }
630
631 health_code_update();
632
633 if (ret_code != LTTCOMM_CONSUMERD_SUCCESS) {
634 /* Channel was not found. */
635 goto error_add_stream_nosignal;
636 }
637
638 /* Blocking call */
639 health_poll_entry();
640 ret_poll = lttng_consumer_poll_socket(consumer_sockpoll);
641 health_poll_exit();
642 if (ret_poll) {
643 goto error_add_stream_fatal;
644 }
645
646 health_code_update();
647
648 /* Get stream file descriptor from socket */
649 ret_recv = lttcomm_recv_fds_unix_sock(sock, &fd, 1);
650 if (ret_recv != sizeof(fd)) {
651 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_FD);
652 ret_func = ret_recv;
653 goto end;
654 }
655
656 health_code_update();
657
658 /*
659 * Send status code to session daemon only if the recv works. If the
660 * above recv() failed, the session daemon is notified through the
661 * error socket and the teardown is eventually done.
662 */
663 ret_send_status = consumer_send_status_msg(sock, ret_code);
664 if (ret_send_status < 0) {
665 /* Somehow, the session daemon is not responding anymore. */
666 goto error_add_stream_nosignal;
667 }
668
669 health_code_update();
670
671 pthread_mutex_lock(&channel->lock);
672 new_stream = consumer_stream_create(
673 channel,
674 channel->key,
675 fd,
676 channel->name,
677 channel->relayd_id,
678 channel->session_id,
679 channel->trace_chunk,
680 msg.u.stream.cpu,
681 &alloc_ret,
682 channel->type,
683 channel->monitor);
684 if (new_stream == NULL) {
685 switch (alloc_ret) {
686 case -ENOMEM:
687 case -EINVAL:
688 default:
689 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR);
690 break;
691 }
692 pthread_mutex_unlock(&channel->lock);
693 goto error_add_stream_nosignal;
694 }
695
696 new_stream->wait_fd = fd;
697 ret_get_max_subbuf_size = kernctl_get_max_subbuf_size(
698 new_stream->wait_fd, &new_stream->max_sb_size);
699 if (ret_get_max_subbuf_size < 0) {
700 pthread_mutex_unlock(&channel->lock);
701 ERR("Failed to get kernel maximal subbuffer size");
702 goto error_add_stream_nosignal;
703 }
704
705 consumer_stream_update_channel_attributes(new_stream,
706 channel);
707
708 /*
709 * We've just assigned the channel to the stream so increment the
710 * refcount right now. We don't need to increment the refcount for
711 * streams in no monitor because we handle manually the cleanup of
712 * those. It is very important to make sure there is NO prior
713 * consumer_del_stream() calls or else the refcount will be unbalanced.
714 */
715 if (channel->monitor) {
716 uatomic_inc(&new_stream->chan->refcount);
717 }
718
719 /*
720 * The buffer flush is done on the session daemon side for the kernel
721 * so no need for the stream "hangup_flush_done" variable to be
722 * tracked. This is important for a kernel stream since we don't rely
723 * on the flush state of the stream to read data. It's not the case for
724 * user space tracing.
725 */
726 new_stream->hangup_flush_done = 0;
727
728 health_code_update();
729
730 pthread_mutex_lock(&new_stream->lock);
731 if (ctx->on_recv_stream) {
732 int ret_recv_stream = ctx->on_recv_stream(new_stream);
733 if (ret_recv_stream < 0) {
734 pthread_mutex_unlock(&new_stream->lock);
735 pthread_mutex_unlock(&channel->lock);
736 consumer_stream_free(new_stream);
737 goto error_add_stream_nosignal;
738 }
739 }
740 health_code_update();
741
742 if (new_stream->metadata_flag) {
743 channel->metadata_stream = new_stream;
744 }
745
746 /* Do not monitor this stream. */
747 if (!channel->monitor) {
748 DBG("Kernel consumer add stream %s in no monitor mode with "
749 "relayd id %" PRIu64, new_stream->name,
750 new_stream->net_seq_idx);
751 cds_list_add(&new_stream->send_node, &channel->streams.head);
752 pthread_mutex_unlock(&new_stream->lock);
753 pthread_mutex_unlock(&channel->lock);
754 goto end_add_stream;
755 }
756
757 /* Send stream to relayd if the stream has an ID. */
758 if (new_stream->net_seq_idx != (uint64_t) -1ULL) {
759 int ret_send_relayd_stream;
760
761 ret_send_relayd_stream = consumer_send_relayd_stream(
762 new_stream, new_stream->chan->pathname);
763 if (ret_send_relayd_stream < 0) {
764 pthread_mutex_unlock(&new_stream->lock);
765 pthread_mutex_unlock(&channel->lock);
766 consumer_stream_free(new_stream);
767 goto error_add_stream_nosignal;
768 }
769
770 /*
771 * If adding an extra stream to an already
772 * existing channel (e.g. cpu hotplug), we need
773 * to send the "streams_sent" command to relayd.
774 */
775 if (channel->streams_sent_to_relayd) {
776 int ret_send_relayd_streams_sent;
777
778 ret_send_relayd_streams_sent =
779 consumer_send_relayd_streams_sent(
780 new_stream->net_seq_idx);
781 if (ret_send_relayd_streams_sent < 0) {
782 pthread_mutex_unlock(&new_stream->lock);
783 pthread_mutex_unlock(&channel->lock);
784 goto error_add_stream_nosignal;
785 }
786 }
787 }
788 pthread_mutex_unlock(&new_stream->lock);
789 pthread_mutex_unlock(&channel->lock);
790
791 /* Get the right pipe where the stream will be sent. */
792 if (new_stream->metadata_flag) {
793 consumer_add_metadata_stream(new_stream);
794 stream_pipe = ctx->consumer_metadata_pipe;
795 } else {
796 consumer_add_data_stream(new_stream);
797 stream_pipe = ctx->consumer_data_pipe;
798 }
799
800 /* Visible to other threads */
801 new_stream->globally_visible = 1;
802
803 health_code_update();
804
805 ret_pipe_write = lttng_pipe_write(
806 stream_pipe, &new_stream, sizeof(new_stream));
807 if (ret_pipe_write < 0) {
808 ERR("Consumer write %s stream to pipe %d",
809 new_stream->metadata_flag ? "metadata" : "data",
810 lttng_pipe_get_writefd(stream_pipe));
811 if (new_stream->metadata_flag) {
812 consumer_del_stream_for_metadata(new_stream);
813 } else {
814 consumer_del_stream_for_data(new_stream);
815 }
816 goto error_add_stream_nosignal;
817 }
818
819 DBG("Kernel consumer ADD_STREAM %s (fd: %d) %s with relayd id %" PRIu64,
820 new_stream->name, fd, new_stream->chan->pathname, new_stream->relayd_stream_id);
821 end_add_stream:
822 break;
823 error_add_stream_nosignal:
824 goto end_nosignal;
825 error_add_stream_fatal:
826 goto error_fatal;
827 }
828 case LTTNG_CONSUMER_STREAMS_SENT:
829 {
830 struct lttng_consumer_channel *channel;
831 int ret_send_status;
832
833 /*
834 * Get stream's channel reference. Needed when adding the stream to the
835 * global hash table.
836 */
837 channel = consumer_find_channel(msg.u.sent_streams.channel_key);
838 if (!channel) {
839 /*
840 * We could not find the channel. Can happen if cpu hotplug
841 * happens while tearing down.
842 */
843 ERR("Unable to find channel key %" PRIu64,
844 msg.u.sent_streams.channel_key);
845 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
846 }
847
848 health_code_update();
849
850 /*
851 * Send status code to session daemon.
852 */
853 ret_send_status = consumer_send_status_msg(sock, ret_code);
854 if (ret_send_status < 0 ||
855 ret_code != LTTCOMM_CONSUMERD_SUCCESS) {
856 /* Somehow, the session daemon is not responding anymore. */
857 goto error_streams_sent_nosignal;
858 }
859
860 health_code_update();
861
862 /*
863 * We should not send this message if we don't monitor the
864 * streams in this channel.
865 */
866 if (!channel->monitor) {
867 goto end_error_streams_sent;
868 }
869
870 health_code_update();
871 /* Send stream to relayd if the stream has an ID. */
872 if (msg.u.sent_streams.net_seq_idx != (uint64_t) -1ULL) {
873 int ret_send_relay_streams;
874
875 ret_send_relay_streams = consumer_send_relayd_streams_sent(
876 msg.u.sent_streams.net_seq_idx);
877 if (ret_send_relay_streams < 0) {
878 goto error_streams_sent_nosignal;
879 }
880 channel->streams_sent_to_relayd = true;
881 }
882 end_error_streams_sent:
883 break;
884 error_streams_sent_nosignal:
885 goto end_nosignal;
886 }
887 case LTTNG_CONSUMER_UPDATE_STREAM:
888 {
889 rcu_read_unlock();
890 return -ENOSYS;
891 }
892 case LTTNG_CONSUMER_DESTROY_RELAYD:
893 {
894 uint64_t index = msg.u.destroy_relayd.net_seq_idx;
895 struct consumer_relayd_sock_pair *relayd;
896 int ret_send_status;
897
898 DBG("Kernel consumer destroying relayd %" PRIu64, index);
899
900 /* Get relayd reference if exists. */
901 relayd = consumer_find_relayd(index);
902 if (relayd == NULL) {
903 DBG("Unable to find relayd %" PRIu64, index);
904 ret_code = LTTCOMM_CONSUMERD_RELAYD_FAIL;
905 }
906
907 /*
908 * Each relayd socket pair has a refcount of stream attached to it
909 * which tells if the relayd is still active or not depending on the
910 * refcount value.
911 *
912 * This will set the destroy flag of the relayd object and destroy it
913 * if the refcount reaches zero when called.
914 *
915 * The destroy can happen either here or when a stream fd hangs up.
916 */
917 if (relayd) {
918 consumer_flag_relayd_for_destroy(relayd);
919 }
920
921 health_code_update();
922
923 ret_send_status = consumer_send_status_msg(sock, ret_code);
924 if (ret_send_status < 0) {
925 /* Somehow, the session daemon is not responding anymore. */
926 goto error_fatal;
927 }
928
929 goto end_nosignal;
930 }
931 case LTTNG_CONSUMER_DATA_PENDING:
932 {
933 int32_t ret_data_pending;
934 uint64_t id = msg.u.data_pending.session_id;
935 ssize_t ret_send;
936
937 DBG("Kernel consumer data pending command for id %" PRIu64, id);
938
939 ret_data_pending = consumer_data_pending(id);
940
941 health_code_update();
942
943 /* Send back returned value to session daemon */
944 ret_send = lttcomm_send_unix_sock(sock, &ret_data_pending,
945 sizeof(ret_data_pending));
946 if (ret_send < 0) {
947 PERROR("send data pending ret code");
948 goto error_fatal;
949 }
950
951 /*
952 * No need to send back a status message since the data pending
953 * returned value is the response.
954 */
955 break;
956 }
957 case LTTNG_CONSUMER_SNAPSHOT_CHANNEL:
958 {
959 struct lttng_consumer_channel *channel;
960 uint64_t key = msg.u.snapshot_channel.key;
961 int ret_send_status;
962
963 channel = consumer_find_channel(key);
964 if (!channel) {
965 ERR("Channel %" PRIu64 " not found", key);
966 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
967 } else {
968 pthread_mutex_lock(&channel->lock);
969 if (msg.u.snapshot_channel.metadata == 1) {
970 int ret_snapshot;
971
972 ret_snapshot = lttng_kconsumer_snapshot_metadata(
973 channel, key,
974 msg.u.snapshot_channel.pathname,
975 msg.u.snapshot_channel.relayd_id,
976 ctx);
977 if (ret_snapshot < 0) {
978 ERR("Snapshot metadata failed");
979 ret_code = LTTCOMM_CONSUMERD_SNAPSHOT_FAILED;
980 }
981 } else {
982 int ret_snapshot;
983
984 ret_snapshot = lttng_kconsumer_snapshot_channel(
985 channel, key,
986 msg.u.snapshot_channel.pathname,
987 msg.u.snapshot_channel.relayd_id,
988 msg.u.snapshot_channel
989 .nb_packets_per_stream,
990 ctx);
991 if (ret_snapshot < 0) {
992 ERR("Snapshot channel failed");
993 ret_code = LTTCOMM_CONSUMERD_SNAPSHOT_FAILED;
994 }
995 }
996 pthread_mutex_unlock(&channel->lock);
997 }
998 health_code_update();
999
1000 ret_send_status = consumer_send_status_msg(sock, ret_code);
1001 if (ret_send_status < 0) {
1002 /* Somehow, the session daemon is not responding anymore. */
1003 goto end_nosignal;
1004 }
1005 break;
1006 }
1007 case LTTNG_CONSUMER_DESTROY_CHANNEL:
1008 {
1009 uint64_t key = msg.u.destroy_channel.key;
1010 struct lttng_consumer_channel *channel;
1011 int ret_send_status;
1012
1013 channel = consumer_find_channel(key);
1014 if (!channel) {
1015 ERR("Kernel consumer destroy channel %" PRIu64 " not found", key);
1016 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
1017 }
1018
1019 health_code_update();
1020
1021 ret_send_status = consumer_send_status_msg(sock, ret_code);
1022 if (ret_send_status < 0) {
1023 /* Somehow, the session daemon is not responding anymore. */
1024 goto end_destroy_channel;
1025 }
1026
1027 health_code_update();
1028
1029 /* Stop right now if no channel was found. */
1030 if (!channel) {
1031 goto end_destroy_channel;
1032 }
1033
1034 /*
1035 * This command should ONLY be issued for channel with streams set in
1036 * no monitor mode.
1037 */
1038 assert(!channel->monitor);
1039
1040 /*
1041 * The refcount should ALWAYS be 0 in the case of a channel in no
1042 * monitor mode.
1043 */
1044 assert(!uatomic_sub_return(&channel->refcount, 1));
1045
1046 consumer_del_channel(channel);
1047 end_destroy_channel:
1048 goto end_nosignal;
1049 }
1050 case LTTNG_CONSUMER_DISCARDED_EVENTS:
1051 {
1052 ssize_t ret;
1053 uint64_t count;
1054 struct lttng_consumer_channel *channel;
1055 uint64_t id = msg.u.discarded_events.session_id;
1056 uint64_t key = msg.u.discarded_events.channel_key;
1057
1058 DBG("Kernel consumer discarded events command for session id %"
1059 PRIu64 ", channel key %" PRIu64, id, key);
1060
1061 channel = consumer_find_channel(key);
1062 if (!channel) {
1063 ERR("Kernel consumer discarded events channel %"
1064 PRIu64 " not found", key);
1065 count = 0;
1066 } else {
1067 count = channel->discarded_events;
1068 }
1069
1070 health_code_update();
1071
1072 /* Send back returned value to session daemon */
1073 ret = lttcomm_send_unix_sock(sock, &count, sizeof(count));
1074 if (ret < 0) {
1075 PERROR("send discarded events");
1076 goto error_fatal;
1077 }
1078
1079 break;
1080 }
1081 case LTTNG_CONSUMER_LOST_PACKETS:
1082 {
1083 ssize_t ret;
1084 uint64_t count;
1085 struct lttng_consumer_channel *channel;
1086 uint64_t id = msg.u.lost_packets.session_id;
1087 uint64_t key = msg.u.lost_packets.channel_key;
1088
1089 DBG("Kernel consumer lost packets command for session id %"
1090 PRIu64 ", channel key %" PRIu64, id, key);
1091
1092 channel = consumer_find_channel(key);
1093 if (!channel) {
1094 ERR("Kernel consumer lost packets channel %"
1095 PRIu64 " not found", key);
1096 count = 0;
1097 } else {
1098 count = channel->lost_packets;
1099 }
1100
1101 health_code_update();
1102
1103 /* Send back returned value to session daemon */
1104 ret = lttcomm_send_unix_sock(sock, &count, sizeof(count));
1105 if (ret < 0) {
1106 PERROR("send lost packets");
1107 goto error_fatal;
1108 }
1109
1110 break;
1111 }
1112 case LTTNG_CONSUMER_SET_CHANNEL_MONITOR_PIPE:
1113 {
1114 int channel_monitor_pipe;
1115 int ret_send_status, ret_set_channel_monitor_pipe;
1116 ssize_t ret_recv;
1117
1118 ret_code = LTTCOMM_CONSUMERD_SUCCESS;
1119 /* Successfully received the command's type. */
1120 ret_send_status = consumer_send_status_msg(sock, ret_code);
1121 if (ret_send_status < 0) {
1122 goto error_fatal;
1123 }
1124
1125 ret_recv = lttcomm_recv_fds_unix_sock(
1126 sock, &channel_monitor_pipe, 1);
1127 if (ret_recv != sizeof(channel_monitor_pipe)) {
1128 ERR("Failed to receive channel monitor pipe");
1129 goto error_fatal;
1130 }
1131
1132 DBG("Received channel monitor pipe (%d)", channel_monitor_pipe);
1133 ret_set_channel_monitor_pipe =
1134 consumer_timer_thread_set_channel_monitor_pipe(
1135 channel_monitor_pipe);
1136 if (!ret_set_channel_monitor_pipe) {
1137 int flags;
1138 int ret_fcntl;
1139
1140 ret_code = LTTCOMM_CONSUMERD_SUCCESS;
1141 /* Set the pipe as non-blocking. */
1142 ret_fcntl = fcntl(channel_monitor_pipe, F_GETFL, 0);
1143 if (ret_fcntl == -1) {
1144 PERROR("fcntl get flags of the channel monitoring pipe");
1145 goto error_fatal;
1146 }
1147 flags = ret_fcntl;
1148
1149 ret_fcntl = fcntl(channel_monitor_pipe, F_SETFL,
1150 flags | O_NONBLOCK);
1151 if (ret_fcntl == -1) {
1152 PERROR("fcntl set O_NONBLOCK flag of the channel monitoring pipe");
1153 goto error_fatal;
1154 }
1155 DBG("Channel monitor pipe set as non-blocking");
1156 } else {
1157 ret_code = LTTCOMM_CONSUMERD_ALREADY_SET;
1158 }
1159 ret_send_status = consumer_send_status_msg(sock, ret_code);
1160 if (ret_send_status < 0) {
1161 goto error_fatal;
1162 }
1163 break;
1164 }
1165 case LTTNG_CONSUMER_ROTATE_CHANNEL:
1166 {
1167 struct lttng_consumer_channel *channel;
1168 uint64_t key = msg.u.rotate_channel.key;
1169 int ret_send_status;
1170
1171 DBG("Consumer rotate channel %" PRIu64, key);
1172
1173 channel = consumer_find_channel(key);
1174 if (!channel) {
1175 ERR("Channel %" PRIu64 " not found", key);
1176 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
1177 } else {
1178 /*
1179 * Sample the rotate position of all the streams in this channel.
1180 */
1181 int ret_rotate_channel;
1182
1183 ret_rotate_channel = lttng_consumer_rotate_channel(
1184 channel, key,
1185 msg.u.rotate_channel.relayd_id,
1186 msg.u.rotate_channel.metadata, ctx);
1187 if (ret_rotate_channel < 0) {
1188 ERR("Rotate channel failed");
1189 ret_code = LTTCOMM_CONSUMERD_ROTATION_FAIL;
1190 }
1191
1192 health_code_update();
1193 }
1194
1195 ret_send_status = consumer_send_status_msg(sock, ret_code);
1196 if (ret_send_status < 0) {
1197 /* Somehow, the session daemon is not responding anymore. */
1198 goto error_rotate_channel;
1199 }
1200 if (channel) {
1201 /* Rotate the streams that are ready right now. */
1202 int ret_rotate;
1203
1204 ret_rotate = lttng_consumer_rotate_ready_streams(
1205 channel, key, ctx);
1206 if (ret_rotate < 0) {
1207 ERR("Rotate ready streams failed");
1208 }
1209 }
1210 break;
1211 error_rotate_channel:
1212 goto end_nosignal;
1213 }
1214 case LTTNG_CONSUMER_CLEAR_CHANNEL:
1215 {
1216 struct lttng_consumer_channel *channel;
1217 uint64_t key = msg.u.clear_channel.key;
1218 int ret_send_status;
1219
1220 channel = consumer_find_channel(key);
1221 if (!channel) {
1222 DBG("Channel %" PRIu64 " not found", key);
1223 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
1224 } else {
1225 int ret_clear_channel;
1226
1227 ret_clear_channel =
1228 lttng_consumer_clear_channel(channel);
1229 if (ret_clear_channel) {
1230 ERR("Clear channel failed");
1231 ret_code = ret_clear_channel;
1232 }
1233
1234 health_code_update();
1235 }
1236
1237 ret_send_status = consumer_send_status_msg(sock, ret_code);
1238 if (ret_send_status < 0) {
1239 /* Somehow, the session daemon is not responding anymore. */
1240 goto end_nosignal;
1241 }
1242
1243 break;
1244 }
1245 case LTTNG_CONSUMER_INIT:
1246 {
1247 int ret_send_status;
1248
1249 ret_code = lttng_consumer_init_command(ctx,
1250 msg.u.init.sessiond_uuid);
1251 health_code_update();
1252 ret_send_status = consumer_send_status_msg(sock, ret_code);
1253 if (ret_send_status < 0) {
1254 /* Somehow, the session daemon is not responding anymore. */
1255 goto end_nosignal;
1256 }
1257 break;
1258 }
1259 case LTTNG_CONSUMER_CREATE_TRACE_CHUNK:
1260 {
1261 const struct lttng_credentials credentials = {
1262 .uid = LTTNG_OPTIONAL_INIT_VALUE(msg.u.create_trace_chunk.credentials.value.uid),
1263 .gid = LTTNG_OPTIONAL_INIT_VALUE(msg.u.create_trace_chunk.credentials.value.gid),
1264 };
1265 const bool is_local_trace =
1266 !msg.u.create_trace_chunk.relayd_id.is_set;
1267 const uint64_t relayd_id =
1268 msg.u.create_trace_chunk.relayd_id.value;
1269 const char *chunk_override_name =
1270 *msg.u.create_trace_chunk.override_name ?
1271 msg.u.create_trace_chunk.override_name :
1272 NULL;
1273 struct lttng_directory_handle *chunk_directory_handle = NULL;
1274
1275 /*
1276 * The session daemon will only provide a chunk directory file
1277 * descriptor for local traces.
1278 */
1279 if (is_local_trace) {
1280 int chunk_dirfd;
1281 int ret_send_status;
1282 ssize_t ret_recv;
1283
1284 /* Acnowledge the reception of the command. */
1285 ret_send_status = consumer_send_status_msg(
1286 sock, LTTCOMM_CONSUMERD_SUCCESS);
1287 if (ret_send_status < 0) {
1288 /* Somehow, the session daemon is not responding anymore. */
1289 goto end_nosignal;
1290 }
1291
1292 ret_recv = lttcomm_recv_fds_unix_sock(
1293 sock, &chunk_dirfd, 1);
1294 if (ret_recv != sizeof(chunk_dirfd)) {
1295 ERR("Failed to receive trace chunk directory file descriptor");
1296 goto error_fatal;
1297 }
1298
1299 DBG("Received trace chunk directory fd (%d)",
1300 chunk_dirfd);
1301 chunk_directory_handle = lttng_directory_handle_create_from_dirfd(
1302 chunk_dirfd);
1303 if (!chunk_directory_handle) {
1304 ERR("Failed to initialize chunk directory handle from directory file descriptor");
1305 if (close(chunk_dirfd)) {
1306 PERROR("Failed to close chunk directory file descriptor");
1307 }
1308 goto error_fatal;
1309 }
1310 }
1311
1312 ret_code = lttng_consumer_create_trace_chunk(
1313 !is_local_trace ? &relayd_id : NULL,
1314 msg.u.create_trace_chunk.session_id,
1315 msg.u.create_trace_chunk.chunk_id,
1316 (time_t) msg.u.create_trace_chunk
1317 .creation_timestamp,
1318 chunk_override_name,
1319 msg.u.create_trace_chunk.credentials.is_set ?
1320 &credentials :
1321 NULL,
1322 chunk_directory_handle);
1323 lttng_directory_handle_put(chunk_directory_handle);
1324 goto end_msg_sessiond;
1325 }
1326 case LTTNG_CONSUMER_CLOSE_TRACE_CHUNK:
1327 {
1328 enum lttng_trace_chunk_command_type close_command =
1329 msg.u.close_trace_chunk.close_command.value;
1330 const uint64_t relayd_id =
1331 msg.u.close_trace_chunk.relayd_id.value;
1332 struct lttcomm_consumer_close_trace_chunk_reply reply;
1333 char path[LTTNG_PATH_MAX];
1334 ssize_t ret_send;
1335
1336 ret_code = lttng_consumer_close_trace_chunk(
1337 msg.u.close_trace_chunk.relayd_id.is_set ?
1338 &relayd_id :
1339 NULL,
1340 msg.u.close_trace_chunk.session_id,
1341 msg.u.close_trace_chunk.chunk_id,
1342 (time_t) msg.u.close_trace_chunk.close_timestamp,
1343 msg.u.close_trace_chunk.close_command.is_set ?
1344 &close_command :
1345 NULL, path);
1346 reply.ret_code = ret_code;
1347 reply.path_length = strlen(path) + 1;
1348 ret_send = lttcomm_send_unix_sock(sock, &reply, sizeof(reply));
1349 if (ret_send != sizeof(reply)) {
1350 goto error_fatal;
1351 }
1352 ret_send = lttcomm_send_unix_sock(
1353 sock, path, reply.path_length);
1354 if (ret_send != reply.path_length) {
1355 goto error_fatal;
1356 }
1357 goto end_nosignal;
1358 }
1359 case LTTNG_CONSUMER_TRACE_CHUNK_EXISTS:
1360 {
1361 const uint64_t relayd_id =
1362 msg.u.trace_chunk_exists.relayd_id.value;
1363
1364 ret_code = lttng_consumer_trace_chunk_exists(
1365 msg.u.trace_chunk_exists.relayd_id.is_set ?
1366 &relayd_id : NULL,
1367 msg.u.trace_chunk_exists.session_id,
1368 msg.u.trace_chunk_exists.chunk_id);
1369 goto end_msg_sessiond;
1370 }
1371 case LTTNG_CONSUMER_OPEN_CHANNEL_PACKETS:
1372 {
1373 const uint64_t key = msg.u.open_channel_packets.key;
1374 struct lttng_consumer_channel *channel =
1375 consumer_find_channel(key);
1376
1377 if (channel) {
1378 pthread_mutex_lock(&channel->lock);
1379 ret_code = lttng_consumer_open_channel_packets(channel);
1380 pthread_mutex_unlock(&channel->lock);
1381 } else {
1382 WARN("Channel %" PRIu64 " not found", key);
1383 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
1384 }
1385
1386 health_code_update();
1387 goto end_msg_sessiond;
1388 }
1389 default:
1390 goto end_nosignal;
1391 }
1392
1393 end_nosignal:
1394 /*
1395 * Return 1 to indicate success since the 0 value can be a socket
1396 * shutdown during the recv() or send() call.
1397 */
1398 ret_func = 1;
1399 goto end;
1400 error_fatal:
1401 /* This will issue a consumer stop. */
1402 ret_func = -1;
1403 goto end;
1404 end_msg_sessiond:
1405 /*
1406 * The returned value here is not useful since either way we'll return 1 to
1407 * the caller because the session daemon socket management is done
1408 * elsewhere. Returning a negative code or 0 will shutdown the consumer.
1409 */
1410 {
1411 int ret_send_status;
1412
1413 ret_send_status = consumer_send_status_msg(sock, ret_code);
1414 if (ret_send_status < 0) {
1415 goto error_fatal;
1416 }
1417 }
1418
1419 ret_func = 1;
1420
1421 end:
1422 health_code_update();
1423 rcu_read_unlock();
1424 return ret_func;
1425 }
1426
1427 /*
1428 * Sync metadata meaning request them to the session daemon and snapshot to the
1429 * metadata thread can consumer them.
1430 *
1431 * Metadata stream lock MUST be acquired.
1432 */
1433 enum sync_metadata_status lttng_kconsumer_sync_metadata(
1434 struct lttng_consumer_stream *metadata)
1435 {
1436 int ret;
1437 enum sync_metadata_status status;
1438
1439 assert(metadata);
1440
1441 ret = kernctl_buffer_flush(metadata->wait_fd);
1442 if (ret < 0) {
1443 ERR("Failed to flush kernel stream");
1444 status = SYNC_METADATA_STATUS_ERROR;
1445 goto end;
1446 }
1447
1448 ret = kernctl_snapshot(metadata->wait_fd);
1449 if (ret < 0) {
1450 if (errno == EAGAIN) {
1451 /* No new metadata, exit. */
1452 DBG("Sync metadata, no new kernel metadata");
1453 status = SYNC_METADATA_STATUS_NO_DATA;
1454 } else {
1455 ERR("Sync metadata, taking kernel snapshot failed.");
1456 status = SYNC_METADATA_STATUS_ERROR;
1457 }
1458 } else {
1459 status = SYNC_METADATA_STATUS_NEW_DATA;
1460 }
1461
1462 end:
1463 return status;
1464 }
1465
1466 static
1467 int extract_common_subbuffer_info(struct lttng_consumer_stream *stream,
1468 struct stream_subbuffer *subbuf)
1469 {
1470 int ret;
1471
1472 ret = kernctl_get_subbuf_size(
1473 stream->wait_fd, &subbuf->info.data.subbuf_size);
1474 if (ret) {
1475 goto end;
1476 }
1477
1478 ret = kernctl_get_padded_subbuf_size(
1479 stream->wait_fd, &subbuf->info.data.padded_subbuf_size);
1480 if (ret) {
1481 goto end;
1482 }
1483
1484 end:
1485 return ret;
1486 }
1487
1488 static
1489 int extract_metadata_subbuffer_info(struct lttng_consumer_stream *stream,
1490 struct stream_subbuffer *subbuf)
1491 {
1492 int ret;
1493
1494 ret = extract_common_subbuffer_info(stream, subbuf);
1495 if (ret) {
1496 goto end;
1497 }
1498
1499 ret = kernctl_get_metadata_version(
1500 stream->wait_fd, &subbuf->info.metadata.version);
1501 if (ret) {
1502 goto end;
1503 }
1504
1505 end:
1506 return ret;
1507 }
1508
1509 static
1510 int extract_data_subbuffer_info(struct lttng_consumer_stream *stream,
1511 struct stream_subbuffer *subbuf)
1512 {
1513 int ret;
1514
1515 ret = extract_common_subbuffer_info(stream, subbuf);
1516 if (ret) {
1517 goto end;
1518 }
1519
1520 ret = kernctl_get_packet_size(
1521 stream->wait_fd, &subbuf->info.data.packet_size);
1522 if (ret < 0) {
1523 PERROR("Failed to get sub-buffer packet size");
1524 goto end;
1525 }
1526
1527 ret = kernctl_get_content_size(
1528 stream->wait_fd, &subbuf->info.data.content_size);
1529 if (ret < 0) {
1530 PERROR("Failed to get sub-buffer content size");
1531 goto end;
1532 }
1533
1534 ret = kernctl_get_timestamp_begin(
1535 stream->wait_fd, &subbuf->info.data.timestamp_begin);
1536 if (ret < 0) {
1537 PERROR("Failed to get sub-buffer begin timestamp");
1538 goto end;
1539 }
1540
1541 ret = kernctl_get_timestamp_end(
1542 stream->wait_fd, &subbuf->info.data.timestamp_end);
1543 if (ret < 0) {
1544 PERROR("Failed to get sub-buffer end timestamp");
1545 goto end;
1546 }
1547
1548 ret = kernctl_get_events_discarded(
1549 stream->wait_fd, &subbuf->info.data.events_discarded);
1550 if (ret) {
1551 PERROR("Failed to get sub-buffer events discarded count");
1552 goto end;
1553 }
1554
1555 ret = kernctl_get_sequence_number(stream->wait_fd,
1556 &subbuf->info.data.sequence_number.value);
1557 if (ret) {
1558 /* May not be supported by older LTTng-modules. */
1559 if (ret != -ENOTTY) {
1560 PERROR("Failed to get sub-buffer sequence number");
1561 goto end;
1562 }
1563 } else {
1564 subbuf->info.data.sequence_number.is_set = true;
1565 }
1566
1567 ret = kernctl_get_stream_id(
1568 stream->wait_fd, &subbuf->info.data.stream_id);
1569 if (ret < 0) {
1570 PERROR("Failed to get stream id");
1571 goto end;
1572 }
1573
1574 ret = kernctl_get_instance_id(stream->wait_fd,
1575 &subbuf->info.data.stream_instance_id.value);
1576 if (ret) {
1577 /* May not be supported by older LTTng-modules. */
1578 if (ret != -ENOTTY) {
1579 PERROR("Failed to get stream instance id");
1580 goto end;
1581 }
1582 } else {
1583 subbuf->info.data.stream_instance_id.is_set = true;
1584 }
1585 end:
1586 return ret;
1587 }
1588
1589 static
1590 int get_subbuffer_common(struct lttng_consumer_stream *stream,
1591 struct stream_subbuffer *subbuffer)
1592 {
1593 int ret;
1594
1595 ret = kernctl_get_next_subbuf(stream->wait_fd);
1596 if (ret) {
1597 goto end;
1598 }
1599
1600 ret = stream->read_subbuffer_ops.extract_subbuffer_info(
1601 stream, subbuffer);
1602 end:
1603 return ret;
1604 }
1605
1606 static
1607 int get_next_subbuffer_splice(struct lttng_consumer_stream *stream,
1608 struct stream_subbuffer *subbuffer)
1609 {
1610 int ret;
1611
1612 ret = get_subbuffer_common(stream, subbuffer);
1613 if (ret) {
1614 goto end;
1615 }
1616
1617 subbuffer->buffer.fd = stream->wait_fd;
1618 end:
1619 return ret;
1620 }
1621
1622 static
1623 int get_next_subbuffer_mmap(struct lttng_consumer_stream *stream,
1624 struct stream_subbuffer *subbuffer)
1625 {
1626 int ret;
1627 const char *addr;
1628
1629 ret = get_subbuffer_common(stream, subbuffer);
1630 if (ret) {
1631 goto end;
1632 }
1633
1634 ret = get_current_subbuf_addr(stream, &addr);
1635 if (ret) {
1636 goto end;
1637 }
1638
1639 subbuffer->buffer.buffer = lttng_buffer_view_init(
1640 addr, 0, subbuffer->info.data.padded_subbuf_size);
1641 end:
1642 return ret;
1643 }
1644
1645 static
1646 int get_next_subbuffer_metadata_check(struct lttng_consumer_stream *stream,
1647 struct stream_subbuffer *subbuffer)
1648 {
1649 int ret;
1650 const char *addr;
1651 bool coherent;
1652
1653 ret = kernctl_get_next_subbuf_metadata_check(stream->wait_fd,
1654 &coherent);
1655 if (ret) {
1656 goto end;
1657 }
1658
1659 ret = stream->read_subbuffer_ops.extract_subbuffer_info(
1660 stream, subbuffer);
1661 if (ret) {
1662 goto end;
1663 }
1664
1665 LTTNG_OPTIONAL_SET(&subbuffer->info.metadata.coherent, coherent);
1666
1667 ret = get_current_subbuf_addr(stream, &addr);
1668 if (ret) {
1669 goto end;
1670 }
1671
1672 subbuffer->buffer.buffer = lttng_buffer_view_init(
1673 addr, 0, subbuffer->info.data.padded_subbuf_size);
1674 DBG("Got metadata packet with padded_subbuf_size = %lu, coherent = %s",
1675 subbuffer->info.metadata.padded_subbuf_size,
1676 coherent ? "true" : "false");
1677 end:
1678 return ret;
1679 }
1680
1681 static
1682 int put_next_subbuffer(struct lttng_consumer_stream *stream,
1683 struct stream_subbuffer *subbuffer)
1684 {
1685 const int ret = kernctl_put_next_subbuf(stream->wait_fd);
1686
1687 if (ret) {
1688 if (ret == -EFAULT) {
1689 PERROR("Error in unreserving sub buffer");
1690 } else if (ret == -EIO) {
1691 /* Should never happen with newer LTTng versions */
1692 PERROR("Reader has been pushed by the writer, last sub-buffer corrupted");
1693 }
1694 }
1695
1696 return ret;
1697 }
1698
1699 static
1700 bool is_get_next_check_metadata_available(int tracer_fd)
1701 {
1702 return kernctl_get_next_subbuf_metadata_check(tracer_fd, NULL) !=
1703 -ENOTTY;
1704 }
1705
1706 static
1707 int lttng_kconsumer_set_stream_ops(
1708 struct lttng_consumer_stream *stream)
1709 {
1710 int ret = 0;
1711
1712 if (stream->metadata_flag && stream->chan->is_live) {
1713 DBG("Attempting to enable metadata bucketization for live consumers");
1714 if (is_get_next_check_metadata_available(stream->wait_fd)) {
1715 DBG("Kernel tracer supports get_next_subbuffer_metadata_check, metadata will be accumulated until a coherent state is reached");
1716 stream->read_subbuffer_ops.get_next_subbuffer =
1717 get_next_subbuffer_metadata_check;
1718 ret = consumer_stream_enable_metadata_bucketization(
1719 stream);
1720 if (ret) {
1721 goto end;
1722 }
1723 } else {
1724 /*
1725 * The kernel tracer version is too old to indicate
1726 * when the metadata stream has reached a "coherent"
1727 * (parseable) point.
1728 *
1729 * This means that a live viewer may see an incoherent
1730 * sequence of metadata and fail to parse it.
1731 */
1732 WARN("Kernel tracer does not support get_next_subbuffer_metadata_check which may cause live clients to fail to parse the metadata stream");
1733 metadata_bucket_destroy(stream->metadata_bucket);
1734 stream->metadata_bucket = NULL;
1735 }
1736 }
1737
1738 if (!stream->read_subbuffer_ops.get_next_subbuffer) {
1739 if (stream->chan->output == CONSUMER_CHANNEL_MMAP) {
1740 stream->read_subbuffer_ops.get_next_subbuffer =
1741 get_next_subbuffer_mmap;
1742 } else {
1743 stream->read_subbuffer_ops.get_next_subbuffer =
1744 get_next_subbuffer_splice;
1745 }
1746 }
1747
1748 if (stream->metadata_flag) {
1749 stream->read_subbuffer_ops.extract_subbuffer_info =
1750 extract_metadata_subbuffer_info;
1751 } else {
1752 stream->read_subbuffer_ops.extract_subbuffer_info =
1753 extract_data_subbuffer_info;
1754 if (stream->chan->is_live) {
1755 stream->read_subbuffer_ops.send_live_beacon =
1756 consumer_flush_kernel_index;
1757 }
1758 }
1759
1760 stream->read_subbuffer_ops.put_next_subbuffer = put_next_subbuffer;
1761 end:
1762 return ret;
1763 }
1764
1765 int lttng_kconsumer_on_recv_stream(struct lttng_consumer_stream *stream)
1766 {
1767 int ret;
1768
1769 assert(stream);
1770
1771 /*
1772 * Don't create anything if this is set for streaming or if there is
1773 * no current trace chunk on the parent channel.
1774 */
1775 if (stream->net_seq_idx == (uint64_t) -1ULL && stream->chan->monitor &&
1776 stream->chan->trace_chunk) {
1777 ret = consumer_stream_create_output_files(stream, true);
1778 if (ret) {
1779 goto error;
1780 }
1781 }
1782
1783 if (stream->output == LTTNG_EVENT_MMAP) {
1784 /* get the len of the mmap region */
1785 unsigned long mmap_len;
1786
1787 ret = kernctl_get_mmap_len(stream->wait_fd, &mmap_len);
1788 if (ret != 0) {
1789 PERROR("kernctl_get_mmap_len");
1790 goto error_close_fd;
1791 }
1792 stream->mmap_len = (size_t) mmap_len;
1793
1794 stream->mmap_base = mmap(NULL, stream->mmap_len, PROT_READ,
1795 MAP_PRIVATE, stream->wait_fd, 0);
1796 if (stream->mmap_base == MAP_FAILED) {
1797 PERROR("Error mmaping");
1798 ret = -1;
1799 goto error_close_fd;
1800 }
1801 }
1802
1803 ret = lttng_kconsumer_set_stream_ops(stream);
1804 if (ret) {
1805 goto error_close_fd;
1806 }
1807
1808 /* we return 0 to let the library handle the FD internally */
1809 return 0;
1810
1811 error_close_fd:
1812 if (stream->out_fd >= 0) {
1813 int err;
1814
1815 err = close(stream->out_fd);
1816 assert(!err);
1817 stream->out_fd = -1;
1818 }
1819 error:
1820 return ret;
1821 }
1822
1823 /*
1824 * Check if data is still being extracted from the buffers for a specific
1825 * stream. Consumer data lock MUST be acquired before calling this function
1826 * and the stream lock.
1827 *
1828 * Return 1 if the traced data are still getting read else 0 meaning that the
1829 * data is available for trace viewer reading.
1830 */
1831 int lttng_kconsumer_data_pending(struct lttng_consumer_stream *stream)
1832 {
1833 int ret;
1834
1835 assert(stream);
1836
1837 if (stream->endpoint_status != CONSUMER_ENDPOINT_ACTIVE) {
1838 ret = 0;
1839 goto end;
1840 }
1841
1842 ret = kernctl_get_next_subbuf(stream->wait_fd);
1843 if (ret == 0) {
1844 /* There is still data so let's put back this subbuffer. */
1845 ret = kernctl_put_subbuf(stream->wait_fd);
1846 assert(ret == 0);
1847 ret = 1; /* Data is pending */
1848 goto end;
1849 }
1850
1851 /* Data is NOT pending and ready to be read. */
1852 ret = 0;
1853
1854 end:
1855 return ret;
1856 }
This page took 0.108185 seconds and 4 git commands to generate.