bf0208f1d67adb0dd92e840b03a0c43910771b5d
[lttng-tools.git] / src / common / ust-consumer / ust-consumer.c
1 /*
2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2 only,
7 * as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19 #define _GNU_SOURCE
20 #define _LGPL_SOURCE
21 #include <assert.h>
22 #include <lttng/ust-ctl.h>
23 #include <poll.h>
24 #include <pthread.h>
25 #include <stdlib.h>
26 #include <string.h>
27 #include <sys/mman.h>
28 #include <sys/socket.h>
29 #include <sys/stat.h>
30 #include <sys/types.h>
31 #include <inttypes.h>
32 #include <unistd.h>
33 #include <urcu/list.h>
34 #include <signal.h>
35
36 #include <bin/lttng-consumerd/health-consumerd.h>
37 #include <common/common.h>
38 #include <common/sessiond-comm/sessiond-comm.h>
39 #include <common/relayd/relayd.h>
40 #include <common/compat/fcntl.h>
41 #include <common/compat/endian.h>
42 #include <common/consumer-metadata-cache.h>
43 #include <common/consumer-stream.h>
44 #include <common/consumer-timer.h>
45 #include <common/utils.h>
46 #include <common/index/index.h>
47
48 #include "ust-consumer.h"
49
50 extern struct lttng_consumer_global_data consumer_data;
51 extern int consumer_poll_timeout;
52 extern volatile int consumer_quit;
53
54 /*
55 * Free channel object and all streams associated with it. This MUST be used
56 * only and only if the channel has _NEVER_ been added to the global channel
57 * hash table.
58 */
59 static void destroy_channel(struct lttng_consumer_channel *channel)
60 {
61 struct lttng_consumer_stream *stream, *stmp;
62
63 assert(channel);
64
65 DBG("UST consumer cleaning stream list");
66
67 cds_list_for_each_entry_safe(stream, stmp, &channel->streams.head,
68 send_node) {
69
70 health_code_update();
71
72 cds_list_del(&stream->send_node);
73 ustctl_destroy_stream(stream->ustream);
74 free(stream);
75 }
76
77 /*
78 * If a channel is available meaning that was created before the streams
79 * were, delete it.
80 */
81 if (channel->uchan) {
82 lttng_ustconsumer_del_channel(channel);
83 }
84 free(channel);
85 }
86
87 /*
88 * Add channel to internal consumer state.
89 *
90 * Returns 0 on success or else a negative value.
91 */
92 static int add_channel(struct lttng_consumer_channel *channel,
93 struct lttng_consumer_local_data *ctx)
94 {
95 int ret = 0;
96
97 assert(channel);
98 assert(ctx);
99
100 if (ctx->on_recv_channel != NULL) {
101 ret = ctx->on_recv_channel(channel);
102 if (ret == 0) {
103 ret = consumer_add_channel(channel, ctx);
104 } else if (ret < 0) {
105 /* Most likely an ENOMEM. */
106 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR);
107 goto error;
108 }
109 } else {
110 ret = consumer_add_channel(channel, ctx);
111 }
112
113 DBG("UST consumer channel added (key: %" PRIu64 ")", channel->key);
114
115 error:
116 return ret;
117 }
118
119 /*
120 * Allocate and return a consumer channel object.
121 */
122 static struct lttng_consumer_channel *allocate_channel(uint64_t session_id,
123 const char *pathname, const char *name, uid_t uid, gid_t gid,
124 uint64_t relayd_id, uint64_t key, enum lttng_event_output output,
125 uint64_t tracefile_size, uint64_t tracefile_count,
126 uint64_t session_id_per_pid, unsigned int monitor,
127 unsigned int live_timer_interval)
128 {
129 assert(pathname);
130 assert(name);
131
132 return consumer_allocate_channel(key, session_id, pathname, name, uid,
133 gid, relayd_id, output, tracefile_size,
134 tracefile_count, session_id_per_pid, monitor, live_timer_interval);
135 }
136
137 /*
138 * Allocate and return a consumer stream object. If _alloc_ret is not NULL, the
139 * error value if applicable is set in it else it is kept untouched.
140 *
141 * Return NULL on error else the newly allocated stream object.
142 */
143 static struct lttng_consumer_stream *allocate_stream(int cpu, int key,
144 struct lttng_consumer_channel *channel,
145 struct lttng_consumer_local_data *ctx, int *_alloc_ret)
146 {
147 int alloc_ret;
148 struct lttng_consumer_stream *stream = NULL;
149
150 assert(channel);
151 assert(ctx);
152
153 stream = consumer_allocate_stream(channel->key,
154 key,
155 LTTNG_CONSUMER_ACTIVE_STREAM,
156 channel->name,
157 channel->uid,
158 channel->gid,
159 channel->relayd_id,
160 channel->session_id,
161 cpu,
162 &alloc_ret,
163 channel->type,
164 channel->monitor);
165 if (stream == NULL) {
166 switch (alloc_ret) {
167 case -ENOENT:
168 /*
169 * We could not find the channel. Can happen if cpu hotplug
170 * happens while tearing down.
171 */
172 DBG3("Could not find channel");
173 break;
174 case -ENOMEM:
175 case -EINVAL:
176 default:
177 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR);
178 break;
179 }
180 goto error;
181 }
182
183 stream->chan = channel;
184
185 error:
186 if (_alloc_ret) {
187 *_alloc_ret = alloc_ret;
188 }
189 return stream;
190 }
191
192 /*
193 * Send the given stream pointer to the corresponding thread.
194 *
195 * Returns 0 on success else a negative value.
196 */
197 static int send_stream_to_thread(struct lttng_consumer_stream *stream,
198 struct lttng_consumer_local_data *ctx)
199 {
200 int ret;
201 struct lttng_pipe *stream_pipe;
202
203 /* Get the right pipe where the stream will be sent. */
204 if (stream->metadata_flag) {
205 ret = consumer_add_metadata_stream(stream);
206 if (ret) {
207 ERR("Consumer add metadata stream %" PRIu64 " failed.",
208 stream->key);
209 goto error;
210 }
211 stream_pipe = ctx->consumer_metadata_pipe;
212 } else {
213 ret = consumer_add_data_stream(stream);
214 if (ret) {
215 ERR("Consumer add stream %" PRIu64 " failed.",
216 stream->key);
217 goto error;
218 }
219 stream_pipe = ctx->consumer_data_pipe;
220 }
221
222 /*
223 * From this point on, the stream's ownership has been moved away from
224 * the channel and becomes globally visible.
225 */
226 stream->globally_visible = 1;
227
228 ret = lttng_pipe_write(stream_pipe, &stream, sizeof(stream));
229 if (ret < 0) {
230 ERR("Consumer write %s stream to pipe %d",
231 stream->metadata_flag ? "metadata" : "data",
232 lttng_pipe_get_writefd(stream_pipe));
233 if (stream->metadata_flag) {
234 consumer_del_stream_for_metadata(stream);
235 } else {
236 consumer_del_stream_for_data(stream);
237 }
238 }
239 error:
240 return ret;
241 }
242
243 /*
244 * Create streams for the given channel using liblttng-ust-ctl.
245 *
246 * Return 0 on success else a negative value.
247 */
248 static int create_ust_streams(struct lttng_consumer_channel *channel,
249 struct lttng_consumer_local_data *ctx)
250 {
251 int ret, cpu = 0;
252 struct ustctl_consumer_stream *ustream;
253 struct lttng_consumer_stream *stream;
254
255 assert(channel);
256 assert(ctx);
257
258 /*
259 * While a stream is available from ustctl. When NULL is returned, we've
260 * reached the end of the possible stream for the channel.
261 */
262 while ((ustream = ustctl_create_stream(channel->uchan, cpu))) {
263 int wait_fd;
264 int ust_metadata_pipe[2];
265
266 health_code_update();
267
268 if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA && channel->monitor) {
269 ret = utils_create_pipe_cloexec_nonblock(ust_metadata_pipe);
270 if (ret < 0) {
271 ERR("Create ust metadata poll pipe");
272 goto error;
273 }
274 wait_fd = ust_metadata_pipe[0];
275 } else {
276 wait_fd = ustctl_stream_get_wait_fd(ustream);
277 }
278
279 /* Allocate consumer stream object. */
280 stream = allocate_stream(cpu, wait_fd, channel, ctx, &ret);
281 if (!stream) {
282 goto error_alloc;
283 }
284 stream->ustream = ustream;
285 /*
286 * Store it so we can save multiple function calls afterwards since
287 * this value is used heavily in the stream threads. This is UST
288 * specific so this is why it's done after allocation.
289 */
290 stream->wait_fd = wait_fd;
291
292 /*
293 * Increment channel refcount since the channel reference has now been
294 * assigned in the allocation process above.
295 */
296 if (stream->chan->monitor) {
297 uatomic_inc(&stream->chan->refcount);
298 }
299
300 /*
301 * Order is important this is why a list is used. On error, the caller
302 * should clean this list.
303 */
304 cds_list_add_tail(&stream->send_node, &channel->streams.head);
305
306 ret = ustctl_get_max_subbuf_size(stream->ustream,
307 &stream->max_sb_size);
308 if (ret < 0) {
309 ERR("ustctl_get_max_subbuf_size failed for stream %s",
310 stream->name);
311 goto error;
312 }
313
314 /* Do actions once stream has been received. */
315 if (ctx->on_recv_stream) {
316 ret = ctx->on_recv_stream(stream);
317 if (ret < 0) {
318 goto error;
319 }
320 }
321
322 DBG("UST consumer add stream %s (key: %" PRIu64 ") with relayd id %" PRIu64,
323 stream->name, stream->key, stream->relayd_stream_id);
324
325 /* Set next CPU stream. */
326 channel->streams.count = ++cpu;
327
328 /* Keep stream reference when creating metadata. */
329 if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA) {
330 channel->metadata_stream = stream;
331 stream->ust_metadata_poll_pipe[0] = ust_metadata_pipe[0];
332 stream->ust_metadata_poll_pipe[1] = ust_metadata_pipe[1];
333 }
334 }
335
336 return 0;
337
338 error:
339 error_alloc:
340 return ret;
341 }
342
343 /*
344 * Create an UST channel with the given attributes and send it to the session
345 * daemon using the ust ctl API.
346 *
347 * Return 0 on success or else a negative value.
348 */
349 static int create_ust_channel(struct ustctl_consumer_channel_attr *attr,
350 struct ustctl_consumer_channel **chanp)
351 {
352 int ret;
353 struct ustctl_consumer_channel *channel;
354
355 assert(attr);
356 assert(chanp);
357
358 DBG3("Creating channel to ustctl with attr: [overwrite: %d, "
359 "subbuf_size: %" PRIu64 ", num_subbuf: %" PRIu64 ", "
360 "switch_timer_interval: %u, read_timer_interval: %u, "
361 "output: %d, type: %d", attr->overwrite, attr->subbuf_size,
362 attr->num_subbuf, attr->switch_timer_interval,
363 attr->read_timer_interval, attr->output, attr->type);
364
365 channel = ustctl_create_channel(attr);
366 if (!channel) {
367 ret = -1;
368 goto error_create;
369 }
370
371 *chanp = channel;
372
373 return 0;
374
375 error_create:
376 return ret;
377 }
378
379 /*
380 * Send a single given stream to the session daemon using the sock.
381 *
382 * Return 0 on success else a negative value.
383 */
384 static int send_sessiond_stream(int sock, struct lttng_consumer_stream *stream)
385 {
386 int ret;
387
388 assert(stream);
389 assert(sock >= 0);
390
391 DBG("UST consumer sending stream %" PRIu64 " to sessiond", stream->key);
392
393 /* Send stream to session daemon. */
394 ret = ustctl_send_stream_to_sessiond(sock, stream->ustream);
395 if (ret < 0) {
396 goto error;
397 }
398
399 error:
400 return ret;
401 }
402
403 /*
404 * Send channel to sessiond.
405 *
406 * Return 0 on success or else a negative value.
407 */
408 static int send_sessiond_channel(int sock,
409 struct lttng_consumer_channel *channel,
410 struct lttng_consumer_local_data *ctx, int *relayd_error)
411 {
412 int ret, ret_code = LTTCOMM_CONSUMERD_SUCCESS;
413 struct lttng_consumer_stream *stream;
414 uint64_t net_seq_idx = -1ULL;
415
416 assert(channel);
417 assert(ctx);
418 assert(sock >= 0);
419
420 DBG("UST consumer sending channel %s to sessiond", channel->name);
421
422 if (channel->relayd_id != (uint64_t) -1ULL) {
423 cds_list_for_each_entry(stream, &channel->streams.head, send_node) {
424
425 health_code_update();
426
427 /* Try to send the stream to the relayd if one is available. */
428 ret = consumer_send_relayd_stream(stream, stream->chan->pathname);
429 if (ret < 0) {
430 /*
431 * Flag that the relayd was the problem here probably due to a
432 * communicaton error on the socket.
433 */
434 if (relayd_error) {
435 *relayd_error = 1;
436 }
437 ret_code = LTTCOMM_CONSUMERD_RELAYD_FAIL;
438 }
439 if (net_seq_idx == -1ULL) {
440 net_seq_idx = stream->net_seq_idx;
441 }
442 }
443 }
444
445 /* Inform sessiond that we are about to send channel and streams. */
446 ret = consumer_send_status_msg(sock, ret_code);
447 if (ret < 0 || ret_code != LTTCOMM_CONSUMERD_SUCCESS) {
448 /*
449 * Either the session daemon is not responding or the relayd died so we
450 * stop now.
451 */
452 goto error;
453 }
454
455 /* Send channel to sessiond. */
456 ret = ustctl_send_channel_to_sessiond(sock, channel->uchan);
457 if (ret < 0) {
458 goto error;
459 }
460
461 ret = ustctl_channel_close_wakeup_fd(channel->uchan);
462 if (ret < 0) {
463 goto error;
464 }
465
466 /* The channel was sent successfully to the sessiond at this point. */
467 cds_list_for_each_entry(stream, &channel->streams.head, send_node) {
468
469 health_code_update();
470
471 /* Send stream to session daemon. */
472 ret = send_sessiond_stream(sock, stream);
473 if (ret < 0) {
474 goto error;
475 }
476 }
477
478 /* Tell sessiond there is no more stream. */
479 ret = ustctl_send_stream_to_sessiond(sock, NULL);
480 if (ret < 0) {
481 goto error;
482 }
483
484 DBG("UST consumer NULL stream sent to sessiond");
485
486 return 0;
487
488 error:
489 if (ret_code != LTTCOMM_CONSUMERD_SUCCESS) {
490 ret = -1;
491 }
492 return ret;
493 }
494
495 /*
496 * Creates a channel and streams and add the channel it to the channel internal
497 * state. The created stream must ONLY be sent once the GET_CHANNEL command is
498 * received.
499 *
500 * Return 0 on success or else, a negative value is returned and the channel
501 * MUST be destroyed by consumer_del_channel().
502 */
503 static int ask_channel(struct lttng_consumer_local_data *ctx, int sock,
504 struct lttng_consumer_channel *channel,
505 struct ustctl_consumer_channel_attr *attr)
506 {
507 int ret;
508
509 assert(ctx);
510 assert(channel);
511 assert(attr);
512
513 /*
514 * This value is still used by the kernel consumer since for the kernel,
515 * the stream ownership is not IN the consumer so we need to have the
516 * number of left stream that needs to be initialized so we can know when
517 * to delete the channel (see consumer.c).
518 *
519 * As for the user space tracer now, the consumer creates and sends the
520 * stream to the session daemon which only sends them to the application
521 * once every stream of a channel is received making this value useless
522 * because we they will be added to the poll thread before the application
523 * receives them. This ensures that a stream can not hang up during
524 * initilization of a channel.
525 */
526 channel->nb_init_stream_left = 0;
527
528 /* The reply msg status is handled in the following call. */
529 ret = create_ust_channel(attr, &channel->uchan);
530 if (ret < 0) {
531 goto end;
532 }
533
534 channel->wait_fd = ustctl_channel_get_wait_fd(channel->uchan);
535
536 /*
537 * For the snapshots (no monitor), we create the metadata streams
538 * on demand, not during the channel creation.
539 */
540 if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA && !channel->monitor) {
541 ret = 0;
542 goto end;
543 }
544
545 /* Open all streams for this channel. */
546 ret = create_ust_streams(channel, ctx);
547 if (ret < 0) {
548 goto end;
549 }
550
551 end:
552 return ret;
553 }
554
555 /*
556 * Send all stream of a channel to the right thread handling it.
557 *
558 * On error, return a negative value else 0 on success.
559 */
560 static int send_streams_to_thread(struct lttng_consumer_channel *channel,
561 struct lttng_consumer_local_data *ctx)
562 {
563 int ret = 0;
564 struct lttng_consumer_stream *stream, *stmp;
565
566 assert(channel);
567 assert(ctx);
568
569 /* Send streams to the corresponding thread. */
570 cds_list_for_each_entry_safe(stream, stmp, &channel->streams.head,
571 send_node) {
572
573 health_code_update();
574
575 /* Sending the stream to the thread. */
576 ret = send_stream_to_thread(stream, ctx);
577 if (ret < 0) {
578 /*
579 * If we are unable to send the stream to the thread, there is
580 * a big problem so just stop everything.
581 */
582 /* Remove node from the channel stream list. */
583 cds_list_del(&stream->send_node);
584 goto error;
585 }
586
587 /* Remove node from the channel stream list. */
588 cds_list_del(&stream->send_node);
589
590 }
591
592 error:
593 return ret;
594 }
595
596 /*
597 * Flush channel's streams using the given key to retrieve the channel.
598 *
599 * Return 0 on success else an LTTng error code.
600 */
601 static int flush_channel(uint64_t chan_key)
602 {
603 int ret = 0;
604 struct lttng_consumer_channel *channel;
605 struct lttng_consumer_stream *stream;
606 struct lttng_ht *ht;
607 struct lttng_ht_iter iter;
608
609 DBG("UST consumer flush channel key %" PRIu64, chan_key);
610
611 rcu_read_lock();
612 channel = consumer_find_channel(chan_key);
613 if (!channel) {
614 ERR("UST consumer flush channel %" PRIu64 " not found", chan_key);
615 ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
616 goto error;
617 }
618
619 ht = consumer_data.stream_per_chan_id_ht;
620
621 /* For each stream of the channel id, flush it. */
622 cds_lfht_for_each_entry_duplicate(ht->ht,
623 ht->hash_fct(&channel->key, lttng_ht_seed), ht->match_fct,
624 &channel->key, &iter.iter, stream, node_channel_id.node) {
625
626 health_code_update();
627
628 ustctl_flush_buffer(stream->ustream, 1);
629 }
630 error:
631 rcu_read_unlock();
632 return ret;
633 }
634
635 /*
636 * Close metadata stream wakeup_fd using the given key to retrieve the channel.
637 * RCU read side lock MUST be acquired before calling this function.
638 *
639 * Return 0 on success else an LTTng error code.
640 */
641 static int close_metadata(uint64_t chan_key)
642 {
643 int ret = 0;
644 struct lttng_consumer_channel *channel;
645
646 DBG("UST consumer close metadata key %" PRIu64, chan_key);
647
648 channel = consumer_find_channel(chan_key);
649 if (!channel) {
650 /*
651 * This is possible if the metadata thread has issue a delete because
652 * the endpoint point of the stream hung up. There is no way the
653 * session daemon can know about it thus use a DBG instead of an actual
654 * error.
655 */
656 DBG("UST consumer close metadata %" PRIu64 " not found", chan_key);
657 ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
658 goto error;
659 }
660
661 pthread_mutex_lock(&consumer_data.lock);
662 pthread_mutex_lock(&channel->lock);
663
664 if (cds_lfht_is_node_deleted(&channel->node.node)) {
665 goto error_unlock;
666 }
667
668 lttng_ustconsumer_close_metadata(channel);
669
670 error_unlock:
671 pthread_mutex_unlock(&channel->lock);
672 pthread_mutex_unlock(&consumer_data.lock);
673 error:
674 return ret;
675 }
676
677 /*
678 * RCU read side lock MUST be acquired before calling this function.
679 *
680 * Return 0 on success else an LTTng error code.
681 */
682 static int setup_metadata(struct lttng_consumer_local_data *ctx, uint64_t key)
683 {
684 int ret;
685 struct lttng_consumer_channel *metadata;
686
687 DBG("UST consumer setup metadata key %" PRIu64, key);
688
689 metadata = consumer_find_channel(key);
690 if (!metadata) {
691 ERR("UST consumer push metadata %" PRIu64 " not found", key);
692 ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
693 goto end;
694 }
695
696 /*
697 * In no monitor mode, the metadata channel has no stream(s) so skip the
698 * ownership transfer to the metadata thread.
699 */
700 if (!metadata->monitor) {
701 DBG("Metadata channel in no monitor");
702 ret = 0;
703 goto end;
704 }
705
706 /*
707 * Send metadata stream to relayd if one available. Availability is
708 * known if the stream is still in the list of the channel.
709 */
710 if (cds_list_empty(&metadata->streams.head)) {
711 ERR("Metadata channel key %" PRIu64 ", no stream available.", key);
712 ret = LTTCOMM_CONSUMERD_ERROR_METADATA;
713 goto error_no_stream;
714 }
715
716 /* Send metadata stream to relayd if needed. */
717 if (metadata->metadata_stream->net_seq_idx != (uint64_t) -1ULL) {
718 ret = consumer_send_relayd_stream(metadata->metadata_stream,
719 metadata->pathname);
720 if (ret < 0) {
721 ret = LTTCOMM_CONSUMERD_ERROR_METADATA;
722 goto error;
723 }
724 ret = consumer_send_relayd_streams_sent(
725 metadata->metadata_stream->net_seq_idx);
726 if (ret < 0) {
727 ret = LTTCOMM_CONSUMERD_RELAYD_FAIL;
728 goto error;
729 }
730 }
731
732 ret = send_streams_to_thread(metadata, ctx);
733 if (ret < 0) {
734 /*
735 * If we are unable to send the stream to the thread, there is
736 * a big problem so just stop everything.
737 */
738 ret = LTTCOMM_CONSUMERD_FATAL;
739 goto error;
740 }
741 /* List MUST be empty after or else it could be reused. */
742 assert(cds_list_empty(&metadata->streams.head));
743
744 ret = 0;
745 goto end;
746
747 error:
748 /*
749 * Delete metadata channel on error. At this point, the metadata stream can
750 * NOT be monitored by the metadata thread thus having the guarantee that
751 * the stream is still in the local stream list of the channel. This call
752 * will make sure to clean that list.
753 */
754 consumer_stream_destroy(metadata->metadata_stream, NULL);
755 cds_list_del(&metadata->metadata_stream->send_node);
756 metadata->metadata_stream = NULL;
757 error_no_stream:
758 end:
759 return ret;
760 }
761
762 /*
763 * Snapshot the whole metadata.
764 *
765 * Returns 0 on success, < 0 on error
766 */
767 static int snapshot_metadata(uint64_t key, char *path, uint64_t relayd_id,
768 struct lttng_consumer_local_data *ctx)
769 {
770 int ret = 0;
771 struct lttng_consumer_channel *metadata_channel;
772 struct lttng_consumer_stream *metadata_stream;
773
774 assert(path);
775 assert(ctx);
776
777 DBG("UST consumer snapshot metadata with key %" PRIu64 " at path %s",
778 key, path);
779
780 rcu_read_lock();
781
782 metadata_channel = consumer_find_channel(key);
783 if (!metadata_channel) {
784 ERR("UST snapshot metadata channel not found for key %" PRIu64,
785 key);
786 ret = -1;
787 goto error;
788 }
789 assert(!metadata_channel->monitor);
790
791 health_code_update();
792
793 /*
794 * Ask the sessiond if we have new metadata waiting and update the
795 * consumer metadata cache.
796 */
797 ret = lttng_ustconsumer_request_metadata(ctx, metadata_channel, 0, 1);
798 if (ret < 0) {
799 goto error;
800 }
801
802 health_code_update();
803
804 /*
805 * The metadata stream is NOT created in no monitor mode when the channel
806 * is created on a sessiond ask channel command.
807 */
808 ret = create_ust_streams(metadata_channel, ctx);
809 if (ret < 0) {
810 goto error;
811 }
812
813 metadata_stream = metadata_channel->metadata_stream;
814 assert(metadata_stream);
815
816 if (relayd_id != (uint64_t) -1ULL) {
817 metadata_stream->net_seq_idx = relayd_id;
818 ret = consumer_send_relayd_stream(metadata_stream, path);
819 if (ret < 0) {
820 goto error_stream;
821 }
822 } else {
823 ret = utils_create_stream_file(path, metadata_stream->name,
824 metadata_stream->chan->tracefile_size,
825 metadata_stream->tracefile_count_current,
826 metadata_stream->uid, metadata_stream->gid, NULL);
827 if (ret < 0) {
828 goto error_stream;
829 }
830 metadata_stream->out_fd = ret;
831 metadata_stream->tracefile_size_current = 0;
832 }
833
834 do {
835 health_code_update();
836
837 ret = lttng_consumer_read_subbuffer(metadata_stream, ctx);
838 if (ret < 0) {
839 goto error_stream;
840 }
841 } while (ret > 0);
842
843 error_stream:
844 /*
845 * Clean up the stream completly because the next snapshot will use a new
846 * metadata stream.
847 */
848 consumer_stream_destroy(metadata_stream, NULL);
849 cds_list_del(&metadata_stream->send_node);
850 metadata_channel->metadata_stream = NULL;
851
852 error:
853 rcu_read_unlock();
854 return ret;
855 }
856
857 /*
858 * Take a snapshot of all the stream of a channel.
859 *
860 * Returns 0 on success, < 0 on error
861 */
862 static int snapshot_channel(uint64_t key, char *path, uint64_t relayd_id,
863 uint64_t nb_packets_per_stream, struct lttng_consumer_local_data *ctx)
864 {
865 int ret;
866 unsigned use_relayd = 0;
867 unsigned long consumed_pos, produced_pos;
868 struct lttng_consumer_channel *channel;
869 struct lttng_consumer_stream *stream;
870
871 assert(path);
872 assert(ctx);
873
874 rcu_read_lock();
875
876 if (relayd_id != (uint64_t) -1ULL) {
877 use_relayd = 1;
878 }
879
880 channel = consumer_find_channel(key);
881 if (!channel) {
882 ERR("UST snapshot channel not found for key %" PRIu64, key);
883 ret = -1;
884 goto error;
885 }
886 assert(!channel->monitor);
887 DBG("UST consumer snapshot channel %" PRIu64, key);
888
889 cds_list_for_each_entry(stream, &channel->streams.head, send_node) {
890
891 health_code_update();
892
893 /* Lock stream because we are about to change its state. */
894 pthread_mutex_lock(&stream->lock);
895 stream->net_seq_idx = relayd_id;
896
897 if (use_relayd) {
898 ret = consumer_send_relayd_stream(stream, path);
899 if (ret < 0) {
900 goto error_unlock;
901 }
902 } else {
903 ret = utils_create_stream_file(path, stream->name,
904 stream->chan->tracefile_size,
905 stream->tracefile_count_current,
906 stream->uid, stream->gid, NULL);
907 if (ret < 0) {
908 goto error_unlock;
909 }
910 stream->out_fd = ret;
911 stream->tracefile_size_current = 0;
912
913 DBG("UST consumer snapshot stream %s/%s (%" PRIu64 ")", path,
914 stream->name, stream->key);
915 }
916 if (relayd_id != -1ULL) {
917 ret = consumer_send_relayd_streams_sent(relayd_id);
918 if (ret < 0) {
919 goto error_unlock;
920 }
921 }
922
923 ustctl_flush_buffer(stream->ustream, 1);
924
925 ret = lttng_ustconsumer_take_snapshot(stream);
926 if (ret < 0) {
927 ERR("Taking UST snapshot");
928 goto error_unlock;
929 }
930
931 ret = lttng_ustconsumer_get_produced_snapshot(stream, &produced_pos);
932 if (ret < 0) {
933 ERR("Produced UST snapshot position");
934 goto error_unlock;
935 }
936
937 ret = lttng_ustconsumer_get_consumed_snapshot(stream, &consumed_pos);
938 if (ret < 0) {
939 ERR("Consumerd UST snapshot position");
940 goto error_unlock;
941 }
942
943 /*
944 * The original value is sent back if max stream size is larger than
945 * the possible size of the snapshot. Also, we assume that the session
946 * daemon should never send a maximum stream size that is lower than
947 * subbuffer size.
948 */
949 consumed_pos = consumer_get_consume_start_pos(consumed_pos,
950 produced_pos, nb_packets_per_stream,
951 stream->max_sb_size);
952
953 while (consumed_pos < produced_pos) {
954 ssize_t read_len;
955 unsigned long len, padded_len;
956
957 health_code_update();
958
959 DBG("UST consumer taking snapshot at pos %lu", consumed_pos);
960
961 ret = ustctl_get_subbuf(stream->ustream, &consumed_pos);
962 if (ret < 0) {
963 if (ret != -EAGAIN) {
964 PERROR("ustctl_get_subbuf snapshot");
965 goto error_close_stream;
966 }
967 DBG("UST consumer get subbuf failed. Skipping it.");
968 consumed_pos += stream->max_sb_size;
969 continue;
970 }
971
972 ret = ustctl_get_subbuf_size(stream->ustream, &len);
973 if (ret < 0) {
974 ERR("Snapshot ustctl_get_subbuf_size");
975 goto error_put_subbuf;
976 }
977
978 ret = ustctl_get_padded_subbuf_size(stream->ustream, &padded_len);
979 if (ret < 0) {
980 ERR("Snapshot ustctl_get_padded_subbuf_size");
981 goto error_put_subbuf;
982 }
983
984 read_len = lttng_consumer_on_read_subbuffer_mmap(ctx, stream, len,
985 padded_len - len, NULL);
986 if (use_relayd) {
987 if (read_len != len) {
988 ret = -EPERM;
989 goto error_put_subbuf;
990 }
991 } else {
992 if (read_len != padded_len) {
993 ret = -EPERM;
994 goto error_put_subbuf;
995 }
996 }
997
998 ret = ustctl_put_subbuf(stream->ustream);
999 if (ret < 0) {
1000 ERR("Snapshot ustctl_put_subbuf");
1001 goto error_close_stream;
1002 }
1003 consumed_pos += stream->max_sb_size;
1004 }
1005
1006 /* Simply close the stream so we can use it on the next snapshot. */
1007 consumer_stream_close(stream);
1008 pthread_mutex_unlock(&stream->lock);
1009 }
1010
1011 rcu_read_unlock();
1012 return 0;
1013
1014 error_put_subbuf:
1015 if (ustctl_put_subbuf(stream->ustream) < 0) {
1016 ERR("Snapshot ustctl_put_subbuf");
1017 }
1018 error_close_stream:
1019 consumer_stream_close(stream);
1020 error_unlock:
1021 pthread_mutex_unlock(&stream->lock);
1022 error:
1023 rcu_read_unlock();
1024 return ret;
1025 }
1026
1027 /*
1028 * Receive the metadata updates from the sessiond.
1029 */
1030 int lttng_ustconsumer_recv_metadata(int sock, uint64_t key, uint64_t offset,
1031 uint64_t len, struct lttng_consumer_channel *channel,
1032 int timer, int wait)
1033 {
1034 int ret, ret_code = LTTCOMM_CONSUMERD_SUCCESS;
1035 char *metadata_str;
1036
1037 DBG("UST consumer push metadata key %" PRIu64 " of len %" PRIu64, key, len);
1038
1039 metadata_str = zmalloc(len * sizeof(char));
1040 if (!metadata_str) {
1041 PERROR("zmalloc metadata string");
1042 ret_code = LTTCOMM_CONSUMERD_ENOMEM;
1043 goto end;
1044 }
1045
1046 health_code_update();
1047
1048 /* Receive metadata string. */
1049 ret = lttcomm_recv_unix_sock(sock, metadata_str, len);
1050 if (ret < 0) {
1051 /* Session daemon is dead so return gracefully. */
1052 ret_code = ret;
1053 goto end_free;
1054 }
1055
1056 health_code_update();
1057
1058 pthread_mutex_lock(&channel->metadata_cache->lock);
1059 ret = consumer_metadata_cache_write(channel, offset, len, metadata_str);
1060 if (ret < 0) {
1061 /* Unable to handle metadata. Notify session daemon. */
1062 ret_code = LTTCOMM_CONSUMERD_ERROR_METADATA;
1063 /*
1064 * Skip metadata flush on write error since the offset and len might
1065 * not have been updated which could create an infinite loop below when
1066 * waiting for the metadata cache to be flushed.
1067 */
1068 pthread_mutex_unlock(&channel->metadata_cache->lock);
1069 goto end_free;
1070 }
1071 pthread_mutex_unlock(&channel->metadata_cache->lock);
1072
1073 if (!wait) {
1074 goto end_free;
1075 }
1076 while (consumer_metadata_cache_flushed(channel, offset + len, timer)) {
1077 DBG("Waiting for metadata to be flushed");
1078
1079 health_code_update();
1080
1081 usleep(DEFAULT_METADATA_AVAILABILITY_WAIT_TIME);
1082 }
1083
1084 end_free:
1085 free(metadata_str);
1086 end:
1087 return ret_code;
1088 }
1089
1090 /*
1091 * Receive command from session daemon and process it.
1092 *
1093 * Return 1 on success else a negative value or 0.
1094 */
1095 int lttng_ustconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
1096 int sock, struct pollfd *consumer_sockpoll)
1097 {
1098 ssize_t ret;
1099 enum lttcomm_return_code ret_code = LTTCOMM_CONSUMERD_SUCCESS;
1100 struct lttcomm_consumer_msg msg;
1101 struct lttng_consumer_channel *channel = NULL;
1102
1103 health_code_update();
1104
1105 ret = lttcomm_recv_unix_sock(sock, &msg, sizeof(msg));
1106 if (ret != sizeof(msg)) {
1107 DBG("Consumer received unexpected message size %zd (expects %zu)",
1108 ret, sizeof(msg));
1109 /*
1110 * The ret value might 0 meaning an orderly shutdown but this is ok
1111 * since the caller handles this.
1112 */
1113 if (ret > 0) {
1114 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_CMD);
1115 ret = -1;
1116 }
1117 return ret;
1118 }
1119
1120 health_code_update();
1121
1122 /* deprecated */
1123 assert(msg.cmd_type != LTTNG_CONSUMER_STOP);
1124
1125 health_code_update();
1126
1127 /* relayd needs RCU read-side lock */
1128 rcu_read_lock();
1129
1130 switch (msg.cmd_type) {
1131 case LTTNG_CONSUMER_ADD_RELAYD_SOCKET:
1132 {
1133 /* Session daemon status message are handled in the following call. */
1134 ret = consumer_add_relayd_socket(msg.u.relayd_sock.net_index,
1135 msg.u.relayd_sock.type, ctx, sock, consumer_sockpoll,
1136 &msg.u.relayd_sock.sock, msg.u.relayd_sock.session_id,
1137 msg.u.relayd_sock.relayd_session_id);
1138 goto end_nosignal;
1139 }
1140 case LTTNG_CONSUMER_DESTROY_RELAYD:
1141 {
1142 uint64_t index = msg.u.destroy_relayd.net_seq_idx;
1143 struct consumer_relayd_sock_pair *relayd;
1144
1145 DBG("UST consumer destroying relayd %" PRIu64, index);
1146
1147 /* Get relayd reference if exists. */
1148 relayd = consumer_find_relayd(index);
1149 if (relayd == NULL) {
1150 DBG("Unable to find relayd %" PRIu64, index);
1151 ret_code = LTTCOMM_CONSUMERD_RELAYD_FAIL;
1152 }
1153
1154 /*
1155 * Each relayd socket pair has a refcount of stream attached to it
1156 * which tells if the relayd is still active or not depending on the
1157 * refcount value.
1158 *
1159 * This will set the destroy flag of the relayd object and destroy it
1160 * if the refcount reaches zero when called.
1161 *
1162 * The destroy can happen either here or when a stream fd hangs up.
1163 */
1164 if (relayd) {
1165 consumer_flag_relayd_for_destroy(relayd);
1166 }
1167
1168 goto end_msg_sessiond;
1169 }
1170 case LTTNG_CONSUMER_UPDATE_STREAM:
1171 {
1172 rcu_read_unlock();
1173 return -ENOSYS;
1174 }
1175 case LTTNG_CONSUMER_DATA_PENDING:
1176 {
1177 int ret, is_data_pending;
1178 uint64_t id = msg.u.data_pending.session_id;
1179
1180 DBG("UST consumer data pending command for id %" PRIu64, id);
1181
1182 is_data_pending = consumer_data_pending(id);
1183
1184 /* Send back returned value to session daemon */
1185 ret = lttcomm_send_unix_sock(sock, &is_data_pending,
1186 sizeof(is_data_pending));
1187 if (ret < 0) {
1188 DBG("Error when sending the data pending ret code: %d", ret);
1189 goto error_fatal;
1190 }
1191
1192 /*
1193 * No need to send back a status message since the data pending
1194 * returned value is the response.
1195 */
1196 break;
1197 }
1198 case LTTNG_CONSUMER_ASK_CHANNEL_CREATION:
1199 {
1200 int ret;
1201 struct ustctl_consumer_channel_attr attr;
1202
1203 /* Create a plain object and reserve a channel key. */
1204 channel = allocate_channel(msg.u.ask_channel.session_id,
1205 msg.u.ask_channel.pathname, msg.u.ask_channel.name,
1206 msg.u.ask_channel.uid, msg.u.ask_channel.gid,
1207 msg.u.ask_channel.relayd_id, msg.u.ask_channel.key,
1208 (enum lttng_event_output) msg.u.ask_channel.output,
1209 msg.u.ask_channel.tracefile_size,
1210 msg.u.ask_channel.tracefile_count,
1211 msg.u.ask_channel.session_id_per_pid,
1212 msg.u.ask_channel.monitor,
1213 msg.u.ask_channel.live_timer_interval);
1214 if (!channel) {
1215 goto end_channel_error;
1216 }
1217
1218 /*
1219 * Assign UST application UID to the channel. This value is ignored for
1220 * per PID buffers. This is specific to UST thus setting this after the
1221 * allocation.
1222 */
1223 channel->ust_app_uid = msg.u.ask_channel.ust_app_uid;
1224
1225 /* Build channel attributes from received message. */
1226 attr.subbuf_size = msg.u.ask_channel.subbuf_size;
1227 attr.num_subbuf = msg.u.ask_channel.num_subbuf;
1228 attr.overwrite = msg.u.ask_channel.overwrite;
1229 attr.switch_timer_interval = msg.u.ask_channel.switch_timer_interval;
1230 attr.read_timer_interval = msg.u.ask_channel.read_timer_interval;
1231 attr.chan_id = msg.u.ask_channel.chan_id;
1232 memcpy(attr.uuid, msg.u.ask_channel.uuid, sizeof(attr.uuid));
1233
1234 /* Match channel buffer type to the UST abi. */
1235 switch (msg.u.ask_channel.output) {
1236 case LTTNG_EVENT_MMAP:
1237 default:
1238 attr.output = LTTNG_UST_MMAP;
1239 break;
1240 }
1241
1242 /* Translate and save channel type. */
1243 switch (msg.u.ask_channel.type) {
1244 case LTTNG_UST_CHAN_PER_CPU:
1245 channel->type = CONSUMER_CHANNEL_TYPE_DATA;
1246 attr.type = LTTNG_UST_CHAN_PER_CPU;
1247 /*
1248 * Set refcount to 1 for owner. Below, we will
1249 * pass ownership to the
1250 * consumer_thread_channel_poll() thread.
1251 */
1252 channel->refcount = 1;
1253 break;
1254 case LTTNG_UST_CHAN_METADATA:
1255 channel->type = CONSUMER_CHANNEL_TYPE_METADATA;
1256 attr.type = LTTNG_UST_CHAN_METADATA;
1257 break;
1258 default:
1259 assert(0);
1260 goto error_fatal;
1261 };
1262
1263 health_code_update();
1264
1265 ret = ask_channel(ctx, sock, channel, &attr);
1266 if (ret < 0) {
1267 goto end_channel_error;
1268 }
1269
1270 if (msg.u.ask_channel.type == LTTNG_UST_CHAN_METADATA) {
1271 ret = consumer_metadata_cache_allocate(channel);
1272 if (ret < 0) {
1273 ERR("Allocating metadata cache");
1274 goto end_channel_error;
1275 }
1276 consumer_timer_switch_start(channel, attr.switch_timer_interval);
1277 attr.switch_timer_interval = 0;
1278 } else {
1279 consumer_timer_live_start(channel,
1280 msg.u.ask_channel.live_timer_interval);
1281 }
1282
1283 health_code_update();
1284
1285 /*
1286 * Add the channel to the internal state AFTER all streams were created
1287 * and successfully sent to session daemon. This way, all streams must
1288 * be ready before this channel is visible to the threads.
1289 * If add_channel succeeds, ownership of the channel is
1290 * passed to consumer_thread_channel_poll().
1291 */
1292 ret = add_channel(channel, ctx);
1293 if (ret < 0) {
1294 if (msg.u.ask_channel.type == LTTNG_UST_CHAN_METADATA) {
1295 if (channel->switch_timer_enabled == 1) {
1296 consumer_timer_switch_stop(channel);
1297 }
1298 consumer_metadata_cache_destroy(channel);
1299 }
1300 if (channel->live_timer_enabled == 1) {
1301 consumer_timer_live_stop(channel);
1302 }
1303 goto end_channel_error;
1304 }
1305
1306 health_code_update();
1307
1308 /*
1309 * Channel and streams are now created. Inform the session daemon that
1310 * everything went well and should wait to receive the channel and
1311 * streams with ustctl API.
1312 */
1313 ret = consumer_send_status_channel(sock, channel);
1314 if (ret < 0) {
1315 /*
1316 * There is probably a problem on the socket.
1317 */
1318 goto error_fatal;
1319 }
1320
1321 break;
1322 }
1323 case LTTNG_CONSUMER_GET_CHANNEL:
1324 {
1325 int ret, relayd_err = 0;
1326 uint64_t key = msg.u.get_channel.key;
1327 struct lttng_consumer_channel *channel;
1328
1329 channel = consumer_find_channel(key);
1330 if (!channel) {
1331 ERR("UST consumer get channel key %" PRIu64 " not found", key);
1332 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
1333 goto end_msg_sessiond;
1334 }
1335
1336 health_code_update();
1337
1338 /* Send everything to sessiond. */
1339 ret = send_sessiond_channel(sock, channel, ctx, &relayd_err);
1340 if (ret < 0) {
1341 if (relayd_err) {
1342 /*
1343 * We were unable to send to the relayd the stream so avoid
1344 * sending back a fatal error to the thread since this is OK
1345 * and the consumer can continue its work. The above call
1346 * has sent the error status message to the sessiond.
1347 */
1348 goto end_nosignal;
1349 }
1350 /*
1351 * The communicaton was broken hence there is a bad state between
1352 * the consumer and sessiond so stop everything.
1353 */
1354 goto error_fatal;
1355 }
1356
1357 health_code_update();
1358
1359 /*
1360 * In no monitor mode, the streams ownership is kept inside the channel
1361 * so don't send them to the data thread.
1362 */
1363 if (!channel->monitor) {
1364 goto end_msg_sessiond;
1365 }
1366
1367 ret = send_streams_to_thread(channel, ctx);
1368 if (ret < 0) {
1369 /*
1370 * If we are unable to send the stream to the thread, there is
1371 * a big problem so just stop everything.
1372 */
1373 goto error_fatal;
1374 }
1375 /* List MUST be empty after or else it could be reused. */
1376 assert(cds_list_empty(&channel->streams.head));
1377 goto end_msg_sessiond;
1378 }
1379 case LTTNG_CONSUMER_DESTROY_CHANNEL:
1380 {
1381 uint64_t key = msg.u.destroy_channel.key;
1382
1383 /*
1384 * Only called if streams have not been sent to stream
1385 * manager thread. However, channel has been sent to
1386 * channel manager thread.
1387 */
1388 notify_thread_del_channel(ctx, key);
1389 goto end_msg_sessiond;
1390 }
1391 case LTTNG_CONSUMER_CLOSE_METADATA:
1392 {
1393 int ret;
1394
1395 ret = close_metadata(msg.u.close_metadata.key);
1396 if (ret != 0) {
1397 ret_code = ret;
1398 }
1399
1400 goto end_msg_sessiond;
1401 }
1402 case LTTNG_CONSUMER_FLUSH_CHANNEL:
1403 {
1404 int ret;
1405
1406 ret = flush_channel(msg.u.flush_channel.key);
1407 if (ret != 0) {
1408 ret_code = ret;
1409 }
1410
1411 goto end_msg_sessiond;
1412 }
1413 case LTTNG_CONSUMER_PUSH_METADATA:
1414 {
1415 int ret;
1416 uint64_t len = msg.u.push_metadata.len;
1417 uint64_t key = msg.u.push_metadata.key;
1418 uint64_t offset = msg.u.push_metadata.target_offset;
1419 struct lttng_consumer_channel *channel;
1420
1421 DBG("UST consumer push metadata key %" PRIu64 " of len %" PRIu64, key,
1422 len);
1423
1424 channel = consumer_find_channel(key);
1425 if (!channel) {
1426 /*
1427 * This is possible if the metadata creation on the consumer side
1428 * is in flight vis-a-vis a concurrent push metadata from the
1429 * session daemon. Simply return that the channel failed and the
1430 * session daemon will handle that message correctly considering
1431 * that this race is acceptable thus the DBG() statement here.
1432 */
1433 DBG("UST consumer push metadata %" PRIu64 " not found", key);
1434 ret_code = LTTCOMM_CONSUMERD_CHANNEL_FAIL;
1435 goto end_msg_sessiond;
1436 }
1437
1438 health_code_update();
1439
1440 /* Tell session daemon we are ready to receive the metadata. */
1441 ret = consumer_send_status_msg(sock, LTTCOMM_CONSUMERD_SUCCESS);
1442 if (ret < 0) {
1443 /* Somehow, the session daemon is not responding anymore. */
1444 goto error_fatal;
1445 }
1446
1447 health_code_update();
1448
1449 /* Wait for more data. */
1450 health_poll_entry();
1451 ret = lttng_consumer_poll_socket(consumer_sockpoll);
1452 health_poll_exit();
1453 if (ret) {
1454 goto error_fatal;
1455 }
1456
1457 health_code_update();
1458
1459 ret = lttng_ustconsumer_recv_metadata(sock, key, offset,
1460 len, channel, 0, 1);
1461 if (ret < 0) {
1462 /* error receiving from sessiond */
1463 goto error_fatal;
1464 } else {
1465 ret_code = ret;
1466 goto end_msg_sessiond;
1467 }
1468 }
1469 case LTTNG_CONSUMER_SETUP_METADATA:
1470 {
1471 int ret;
1472
1473 ret = setup_metadata(ctx, msg.u.setup_metadata.key);
1474 if (ret) {
1475 ret_code = ret;
1476 }
1477 goto end_msg_sessiond;
1478 }
1479 case LTTNG_CONSUMER_SNAPSHOT_CHANNEL:
1480 {
1481 if (msg.u.snapshot_channel.metadata) {
1482 ret = snapshot_metadata(msg.u.snapshot_channel.key,
1483 msg.u.snapshot_channel.pathname,
1484 msg.u.snapshot_channel.relayd_id,
1485 ctx);
1486 if (ret < 0) {
1487 ERR("Snapshot metadata failed");
1488 ret_code = LTTCOMM_CONSUMERD_ERROR_METADATA;
1489 }
1490 } else {
1491 ret = snapshot_channel(msg.u.snapshot_channel.key,
1492 msg.u.snapshot_channel.pathname,
1493 msg.u.snapshot_channel.relayd_id,
1494 msg.u.snapshot_channel.nb_packets_per_stream,
1495 ctx);
1496 if (ret < 0) {
1497 ERR("Snapshot channel failed");
1498 ret_code = LTTCOMM_CONSUMERD_CHANNEL_FAIL;
1499 }
1500 }
1501
1502 health_code_update();
1503 ret = consumer_send_status_msg(sock, ret_code);
1504 if (ret < 0) {
1505 /* Somehow, the session daemon is not responding anymore. */
1506 goto end_nosignal;
1507 }
1508 health_code_update();
1509 break;
1510 }
1511 default:
1512 break;
1513 }
1514
1515 end_nosignal:
1516 rcu_read_unlock();
1517
1518 health_code_update();
1519
1520 /*
1521 * Return 1 to indicate success since the 0 value can be a socket
1522 * shutdown during the recv() or send() call.
1523 */
1524 return 1;
1525
1526 end_msg_sessiond:
1527 /*
1528 * The returned value here is not useful since either way we'll return 1 to
1529 * the caller because the session daemon socket management is done
1530 * elsewhere. Returning a negative code or 0 will shutdown the consumer.
1531 */
1532 ret = consumer_send_status_msg(sock, ret_code);
1533 if (ret < 0) {
1534 goto error_fatal;
1535 }
1536 rcu_read_unlock();
1537
1538 health_code_update();
1539
1540 return 1;
1541 end_channel_error:
1542 if (channel) {
1543 /*
1544 * Free channel here since no one has a reference to it. We don't
1545 * free after that because a stream can store this pointer.
1546 */
1547 destroy_channel(channel);
1548 }
1549 /* We have to send a status channel message indicating an error. */
1550 ret = consumer_send_status_channel(sock, NULL);
1551 if (ret < 0) {
1552 /* Stop everything if session daemon can not be notified. */
1553 goto error_fatal;
1554 }
1555 rcu_read_unlock();
1556
1557 health_code_update();
1558
1559 return 1;
1560 error_fatal:
1561 rcu_read_unlock();
1562 /* This will issue a consumer stop. */
1563 return -1;
1564 }
1565
1566 /*
1567 * Wrapper over the mmap() read offset from ust-ctl library. Since this can be
1568 * compiled out, we isolate it in this library.
1569 */
1570 int lttng_ustctl_get_mmap_read_offset(struct lttng_consumer_stream *stream,
1571 unsigned long *off)
1572 {
1573 assert(stream);
1574 assert(stream->ustream);
1575
1576 return ustctl_get_mmap_read_offset(stream->ustream, off);
1577 }
1578
1579 /*
1580 * Wrapper over the mmap() read offset from ust-ctl library. Since this can be
1581 * compiled out, we isolate it in this library.
1582 */
1583 void *lttng_ustctl_get_mmap_base(struct lttng_consumer_stream *stream)
1584 {
1585 assert(stream);
1586 assert(stream->ustream);
1587
1588 return ustctl_get_mmap_base(stream->ustream);
1589 }
1590
1591 /*
1592 * Take a snapshot for a specific fd
1593 *
1594 * Returns 0 on success, < 0 on error
1595 */
1596 int lttng_ustconsumer_take_snapshot(struct lttng_consumer_stream *stream)
1597 {
1598 assert(stream);
1599 assert(stream->ustream);
1600
1601 return ustctl_snapshot(stream->ustream);
1602 }
1603
1604 /*
1605 * Get the produced position
1606 *
1607 * Returns 0 on success, < 0 on error
1608 */
1609 int lttng_ustconsumer_get_produced_snapshot(
1610 struct lttng_consumer_stream *stream, unsigned long *pos)
1611 {
1612 assert(stream);
1613 assert(stream->ustream);
1614 assert(pos);
1615
1616 return ustctl_snapshot_get_produced(stream->ustream, pos);
1617 }
1618
1619 /*
1620 * Get the consumed position
1621 *
1622 * Returns 0 on success, < 0 on error
1623 */
1624 int lttng_ustconsumer_get_consumed_snapshot(
1625 struct lttng_consumer_stream *stream, unsigned long *pos)
1626 {
1627 assert(stream);
1628 assert(stream->ustream);
1629 assert(pos);
1630
1631 return ustctl_snapshot_get_consumed(stream->ustream, pos);
1632 }
1633
1634 void lttng_ustconsumer_flush_buffer(struct lttng_consumer_stream *stream,
1635 int producer)
1636 {
1637 assert(stream);
1638 assert(stream->ustream);
1639
1640 ustctl_flush_buffer(stream->ustream, producer);
1641 }
1642
1643 int lttng_ustconsumer_get_current_timestamp(
1644 struct lttng_consumer_stream *stream, uint64_t *ts)
1645 {
1646 assert(stream);
1647 assert(stream->ustream);
1648 assert(ts);
1649
1650 return ustctl_get_current_timestamp(stream->ustream, ts);
1651 }
1652
1653 /*
1654 * Called when the stream signal the consumer that it has hang up.
1655 */
1656 void lttng_ustconsumer_on_stream_hangup(struct lttng_consumer_stream *stream)
1657 {
1658 assert(stream);
1659 assert(stream->ustream);
1660
1661 ustctl_flush_buffer(stream->ustream, 0);
1662 stream->hangup_flush_done = 1;
1663 }
1664
1665 void lttng_ustconsumer_del_channel(struct lttng_consumer_channel *chan)
1666 {
1667 assert(chan);
1668 assert(chan->uchan);
1669
1670 if (chan->switch_timer_enabled == 1) {
1671 consumer_timer_switch_stop(chan);
1672 }
1673 consumer_metadata_cache_destroy(chan);
1674 ustctl_destroy_channel(chan->uchan);
1675 }
1676
1677 void lttng_ustconsumer_del_stream(struct lttng_consumer_stream *stream)
1678 {
1679 assert(stream);
1680 assert(stream->ustream);
1681
1682 if (stream->chan->switch_timer_enabled == 1) {
1683 consumer_timer_switch_stop(stream->chan);
1684 }
1685 ustctl_destroy_stream(stream->ustream);
1686 }
1687
1688 int lttng_ustconsumer_get_wakeup_fd(struct lttng_consumer_stream *stream)
1689 {
1690 assert(stream);
1691 assert(stream->ustream);
1692
1693 return ustctl_stream_get_wakeup_fd(stream->ustream);
1694 }
1695
1696 int lttng_ustconsumer_close_wakeup_fd(struct lttng_consumer_stream *stream)
1697 {
1698 assert(stream);
1699 assert(stream->ustream);
1700
1701 return ustctl_stream_close_wakeup_fd(stream->ustream);
1702 }
1703
1704 /*
1705 * Populate index values of a UST stream. Values are set in big endian order.
1706 *
1707 * Return 0 on success or else a negative value.
1708 */
1709 static int get_index_values(struct ctf_packet_index *index,
1710 struct ustctl_consumer_stream *ustream)
1711 {
1712 int ret;
1713
1714 ret = ustctl_get_timestamp_begin(ustream, &index->timestamp_begin);
1715 if (ret < 0) {
1716 PERROR("ustctl_get_timestamp_begin");
1717 goto error;
1718 }
1719 index->timestamp_begin = htobe64(index->timestamp_begin);
1720
1721 ret = ustctl_get_timestamp_end(ustream, &index->timestamp_end);
1722 if (ret < 0) {
1723 PERROR("ustctl_get_timestamp_end");
1724 goto error;
1725 }
1726 index->timestamp_end = htobe64(index->timestamp_end);
1727
1728 ret = ustctl_get_events_discarded(ustream, &index->events_discarded);
1729 if (ret < 0) {
1730 PERROR("ustctl_get_events_discarded");
1731 goto error;
1732 }
1733 index->events_discarded = htobe64(index->events_discarded);
1734
1735 ret = ustctl_get_content_size(ustream, &index->content_size);
1736 if (ret < 0) {
1737 PERROR("ustctl_get_content_size");
1738 goto error;
1739 }
1740 index->content_size = htobe64(index->content_size);
1741
1742 ret = ustctl_get_packet_size(ustream, &index->packet_size);
1743 if (ret < 0) {
1744 PERROR("ustctl_get_packet_size");
1745 goto error;
1746 }
1747 index->packet_size = htobe64(index->packet_size);
1748
1749 ret = ustctl_get_stream_id(ustream, &index->stream_id);
1750 if (ret < 0) {
1751 PERROR("ustctl_get_stream_id");
1752 goto error;
1753 }
1754 index->stream_id = htobe64(index->stream_id);
1755
1756 error:
1757 return ret;
1758 }
1759
1760 /*
1761 * Write up to one packet from the metadata cache to the channel.
1762 *
1763 * Returns the number of bytes pushed in the cache, or a negative value
1764 * on error.
1765 */
1766 static
1767 int commit_one_metadata_packet(struct lttng_consumer_stream *stream)
1768 {
1769 ssize_t write_len;
1770 int ret;
1771
1772 pthread_mutex_lock(&stream->chan->metadata_cache->lock);
1773 if (stream->chan->metadata_cache->contiguous
1774 == stream->ust_metadata_pushed) {
1775 ret = 0;
1776 goto end;
1777 }
1778
1779 write_len = ustctl_write_one_packet_to_channel(stream->chan->uchan,
1780 &stream->chan->metadata_cache->data[stream->ust_metadata_pushed],
1781 stream->chan->metadata_cache->contiguous
1782 - stream->ust_metadata_pushed);
1783 assert(write_len != 0);
1784 if (write_len < 0) {
1785 ERR("Writing one metadata packet");
1786 ret = -1;
1787 goto end;
1788 }
1789 stream->ust_metadata_pushed += write_len;
1790
1791 assert(stream->chan->metadata_cache->contiguous >=
1792 stream->ust_metadata_pushed);
1793 ret = write_len;
1794
1795 end:
1796 pthread_mutex_unlock(&stream->chan->metadata_cache->lock);
1797 return ret;
1798 }
1799
1800
1801 /*
1802 * Sync metadata meaning request them to the session daemon and snapshot to the
1803 * metadata thread can consumer them.
1804 *
1805 * Metadata stream lock MUST be acquired.
1806 *
1807 * Return 0 if new metadatda is available, EAGAIN if the metadata stream
1808 * is empty or a negative value on error.
1809 */
1810 int lttng_ustconsumer_sync_metadata(struct lttng_consumer_local_data *ctx,
1811 struct lttng_consumer_stream *metadata)
1812 {
1813 int ret;
1814 int retry = 0;
1815
1816 assert(ctx);
1817 assert(metadata);
1818
1819 /*
1820 * Request metadata from the sessiond, but don't wait for the flush
1821 * because we locked the metadata thread.
1822 */
1823 ret = lttng_ustconsumer_request_metadata(ctx, metadata->chan, 0, 0);
1824 if (ret < 0) {
1825 goto end;
1826 }
1827
1828 ret = commit_one_metadata_packet(metadata);
1829 if (ret <= 0) {
1830 goto end;
1831 } else if (ret > 0) {
1832 retry = 1;
1833 }
1834
1835 ustctl_flush_buffer(metadata->ustream, 1);
1836 ret = ustctl_snapshot(metadata->ustream);
1837 if (ret < 0) {
1838 if (errno != EAGAIN) {
1839 ERR("Sync metadata, taking UST snapshot");
1840 goto end;
1841 }
1842 DBG("No new metadata when syncing them.");
1843 /* No new metadata, exit. */
1844 ret = ENODATA;
1845 goto end;
1846 }
1847
1848 /*
1849 * After this flush, we still need to extract metadata.
1850 */
1851 if (retry) {
1852 ret = EAGAIN;
1853 }
1854
1855 end:
1856 return ret;
1857 }
1858
1859 /*
1860 * Return 0 on success else a negative value.
1861 */
1862 static int notify_if_more_data(struct lttng_consumer_stream *stream,
1863 struct lttng_consumer_local_data *ctx)
1864 {
1865 int ret;
1866 struct ustctl_consumer_stream *ustream;
1867
1868 assert(stream);
1869 assert(ctx);
1870
1871 ustream = stream->ustream;
1872
1873 /*
1874 * First, we are going to check if there is a new subbuffer available
1875 * before reading the stream wait_fd.
1876 */
1877 /* Get the next subbuffer */
1878 ret = ustctl_get_next_subbuf(ustream);
1879 if (ret) {
1880 /* No more data found, flag the stream. */
1881 stream->has_data = 0;
1882 ret = 0;
1883 goto end;
1884 }
1885
1886 ret = ustctl_put_subbuf(ustream);
1887 assert(!ret);
1888
1889 /* This stream still has data. Flag it and wake up the data thread. */
1890 stream->has_data = 1;
1891
1892 if (stream->monitor && !stream->hangup_flush_done && !ctx->has_wakeup) {
1893 ssize_t writelen;
1894
1895 writelen = lttng_pipe_write(ctx->consumer_wakeup_pipe, "!", 1);
1896 if (writelen < 0 && errno != EAGAIN && errno != EWOULDBLOCK) {
1897 ret = writelen;
1898 goto end;
1899 }
1900
1901 /* The wake up pipe has been notified. */
1902 ctx->has_wakeup = 1;
1903 }
1904 ret = 0;
1905
1906 end:
1907 return ret;
1908 }
1909
1910 /*
1911 * Read subbuffer from the given stream.
1912 *
1913 * Stream lock MUST be acquired.
1914 *
1915 * Return 0 on success else a negative value.
1916 */
1917 int lttng_ustconsumer_read_subbuffer(struct lttng_consumer_stream *stream,
1918 struct lttng_consumer_local_data *ctx)
1919 {
1920 unsigned long len, subbuf_size, padding;
1921 int err, write_index = 1;
1922 long ret = 0;
1923 struct ustctl_consumer_stream *ustream;
1924 struct ctf_packet_index index;
1925
1926 assert(stream);
1927 assert(stream->ustream);
1928 assert(ctx);
1929
1930 DBG("In UST read_subbuffer (wait_fd: %d, name: %s)", stream->wait_fd,
1931 stream->name);
1932
1933 /* Ease our life for what's next. */
1934 ustream = stream->ustream;
1935
1936 /*
1937 * We can consume the 1 byte written into the wait_fd by UST. Don't trigger
1938 * error if we cannot read this one byte (read returns 0), or if the error
1939 * is EAGAIN or EWOULDBLOCK.
1940 *
1941 * This is only done when the stream is monitored by a thread, before the
1942 * flush is done after a hangup and if the stream is not flagged with data
1943 * since there might be nothing to consume in the wait fd but still have
1944 * data available flagged by the consumer wake up pipe.
1945 */
1946 if (stream->monitor && !stream->hangup_flush_done && !stream->has_data) {
1947 char dummy;
1948 ssize_t readlen;
1949
1950 readlen = lttng_read(stream->wait_fd, &dummy, 1);
1951 if (readlen < 0 && errno != EAGAIN && errno != EWOULDBLOCK) {
1952 ret = readlen;
1953 goto end;
1954 }
1955 }
1956
1957 retry:
1958 /* Get the next subbuffer */
1959 err = ustctl_get_next_subbuf(ustream);
1960 if (err != 0) {
1961 /*
1962 * Populate metadata info if the existing info has
1963 * already been read.
1964 */
1965 if (stream->metadata_flag) {
1966 ret = commit_one_metadata_packet(stream);
1967 if (ret <= 0) {
1968 goto end;
1969 }
1970 ustctl_flush_buffer(stream->ustream, 1);
1971 goto retry;
1972 }
1973
1974 ret = err; /* ustctl_get_next_subbuf returns negative, caller expect positive. */
1975 /*
1976 * This is a debug message even for single-threaded consumer,
1977 * because poll() have more relaxed criterions than get subbuf,
1978 * so get_subbuf may fail for short race windows where poll()
1979 * would issue wakeups.
1980 */
1981 DBG("Reserving sub buffer failed (everything is normal, "
1982 "it is due to concurrency) [ret: %d]", err);
1983 goto end;
1984 }
1985 assert(stream->chan->output == CONSUMER_CHANNEL_MMAP);
1986
1987 if (!stream->metadata_flag) {
1988 index.offset = htobe64(stream->out_fd_offset);
1989 ret = get_index_values(&index, ustream);
1990 if (ret < 0) {
1991 goto end;
1992 }
1993 } else {
1994 write_index = 0;
1995 }
1996
1997 /* Get the full padded subbuffer size */
1998 err = ustctl_get_padded_subbuf_size(ustream, &len);
1999 assert(err == 0);
2000
2001 /* Get subbuffer data size (without padding) */
2002 err = ustctl_get_subbuf_size(ustream, &subbuf_size);
2003 assert(err == 0);
2004
2005 /* Make sure we don't get a subbuffer size bigger than the padded */
2006 assert(len >= subbuf_size);
2007
2008 padding = len - subbuf_size;
2009 /* write the subbuffer to the tracefile */
2010 ret = lttng_consumer_on_read_subbuffer_mmap(ctx, stream, subbuf_size, padding, &index);
2011 /*
2012 * The mmap operation should write subbuf_size amount of data when network
2013 * streaming or the full padding (len) size when we are _not_ streaming.
2014 */
2015 if ((ret != subbuf_size && stream->net_seq_idx != (uint64_t) -1ULL) ||
2016 (ret != len && stream->net_seq_idx == (uint64_t) -1ULL)) {
2017 /*
2018 * Display the error but continue processing to try to release the
2019 * subbuffer. This is a DBG statement since any unexpected kill or
2020 * signal, the application gets unregistered, relayd gets closed or
2021 * anything that affects the buffer lifetime will trigger this error.
2022 * So, for the sake of the user, don't print this error since it can
2023 * happen and it is OK with the code flow.
2024 */
2025 DBG("Error writing to tracefile "
2026 "(ret: %ld != len: %lu != subbuf_size: %lu)",
2027 ret, len, subbuf_size);
2028 write_index = 0;
2029 }
2030 err = ustctl_put_next_subbuf(ustream);
2031 assert(err == 0);
2032
2033 /*
2034 * This will consumer the byte on the wait_fd if and only if there is not
2035 * next subbuffer to be acquired.
2036 */
2037 if (!stream->metadata_flag) {
2038 ret = notify_if_more_data(stream, ctx);
2039 if (ret < 0) {
2040 goto end;
2041 }
2042 }
2043
2044 /* Write index if needed. */
2045 if (!write_index) {
2046 goto end;
2047 }
2048
2049 if (stream->chan->live_timer_interval && !stream->metadata_flag) {
2050 /*
2051 * In live, block until all the metadata is sent.
2052 */
2053 err = consumer_stream_sync_metadata(ctx, stream->session_id);
2054 if (err < 0) {
2055 goto end;
2056 }
2057 }
2058
2059 assert(!stream->metadata_flag);
2060 err = consumer_stream_write_index(stream, &index);
2061 if (err < 0) {
2062 goto end;
2063 }
2064
2065 end:
2066 return ret;
2067 }
2068
2069 /*
2070 * Called when a stream is created.
2071 *
2072 * Return 0 on success or else a negative value.
2073 */
2074 int lttng_ustconsumer_on_recv_stream(struct lttng_consumer_stream *stream)
2075 {
2076 int ret;
2077
2078 assert(stream);
2079
2080 /* Don't create anything if this is set for streaming. */
2081 if (stream->net_seq_idx == (uint64_t) -1ULL && stream->chan->monitor) {
2082 ret = utils_create_stream_file(stream->chan->pathname, stream->name,
2083 stream->chan->tracefile_size, stream->tracefile_count_current,
2084 stream->uid, stream->gid, NULL);
2085 if (ret < 0) {
2086 goto error;
2087 }
2088 stream->out_fd = ret;
2089 stream->tracefile_size_current = 0;
2090
2091 if (!stream->metadata_flag) {
2092 ret = index_create_file(stream->chan->pathname,
2093 stream->name, stream->uid, stream->gid,
2094 stream->chan->tracefile_size,
2095 stream->tracefile_count_current);
2096 if (ret < 0) {
2097 goto error;
2098 }
2099 stream->index_fd = ret;
2100 }
2101 }
2102 ret = 0;
2103
2104 error:
2105 return ret;
2106 }
2107
2108 /*
2109 * Check if data is still being extracted from the buffers for a specific
2110 * stream. Consumer data lock MUST be acquired before calling this function
2111 * and the stream lock.
2112 *
2113 * Return 1 if the traced data are still getting read else 0 meaning that the
2114 * data is available for trace viewer reading.
2115 */
2116 int lttng_ustconsumer_data_pending(struct lttng_consumer_stream *stream)
2117 {
2118 int ret;
2119
2120 assert(stream);
2121 assert(stream->ustream);
2122
2123 DBG("UST consumer checking data pending");
2124
2125 if (stream->endpoint_status != CONSUMER_ENDPOINT_ACTIVE) {
2126 ret = 0;
2127 goto end;
2128 }
2129
2130 if (stream->chan->type == CONSUMER_CHANNEL_TYPE_METADATA) {
2131 uint64_t contiguous, pushed;
2132
2133 /* Ease our life a bit. */
2134 contiguous = stream->chan->metadata_cache->contiguous;
2135 pushed = stream->ust_metadata_pushed;
2136
2137 /*
2138 * We can simply check whether all contiguously available data
2139 * has been pushed to the ring buffer, since the push operation
2140 * is performed within get_next_subbuf(), and because both
2141 * get_next_subbuf() and put_next_subbuf() are issued atomically
2142 * thanks to the stream lock within
2143 * lttng_ustconsumer_read_subbuffer(). This basically means that
2144 * whetnever ust_metadata_pushed is incremented, the associated
2145 * metadata has been consumed from the metadata stream.
2146 */
2147 DBG("UST consumer metadata pending check: contiguous %" PRIu64 " vs pushed %" PRIu64,
2148 contiguous, pushed);
2149 assert(((int64_t) (contiguous - pushed)) >= 0);
2150 if ((contiguous != pushed) ||
2151 (((int64_t) contiguous - pushed) > 0 || contiguous == 0)) {
2152 ret = 1; /* Data is pending */
2153 goto end;
2154 }
2155 } else {
2156 ret = ustctl_get_next_subbuf(stream->ustream);
2157 if (ret == 0) {
2158 /*
2159 * There is still data so let's put back this
2160 * subbuffer.
2161 */
2162 ret = ustctl_put_subbuf(stream->ustream);
2163 assert(ret == 0);
2164 ret = 1; /* Data is pending */
2165 goto end;
2166 }
2167 }
2168
2169 /* Data is NOT pending so ready to be read. */
2170 ret = 0;
2171
2172 end:
2173 return ret;
2174 }
2175
2176 /*
2177 * Stop a given metadata channel timer if enabled and close the wait fd which
2178 * is the poll pipe of the metadata stream.
2179 *
2180 * This MUST be called with the metadata channel acquired.
2181 */
2182 void lttng_ustconsumer_close_metadata(struct lttng_consumer_channel *metadata)
2183 {
2184 int ret;
2185
2186 assert(metadata);
2187 assert(metadata->type == CONSUMER_CHANNEL_TYPE_METADATA);
2188
2189 DBG("Closing metadata channel key %" PRIu64, metadata->key);
2190
2191 if (metadata->switch_timer_enabled == 1) {
2192 consumer_timer_switch_stop(metadata);
2193 }
2194
2195 if (!metadata->metadata_stream) {
2196 goto end;
2197 }
2198
2199 /*
2200 * Closing write side so the thread monitoring the stream wakes up if any
2201 * and clean the metadata stream.
2202 */
2203 if (metadata->metadata_stream->ust_metadata_poll_pipe[1] >= 0) {
2204 ret = close(metadata->metadata_stream->ust_metadata_poll_pipe[1]);
2205 if (ret < 0) {
2206 PERROR("closing metadata pipe write side");
2207 }
2208 metadata->metadata_stream->ust_metadata_poll_pipe[1] = -1;
2209 }
2210
2211 end:
2212 return;
2213 }
2214
2215 /*
2216 * Close every metadata stream wait fd of the metadata hash table. This
2217 * function MUST be used very carefully so not to run into a race between the
2218 * metadata thread handling streams and this function closing their wait fd.
2219 *
2220 * For UST, this is used when the session daemon hangs up. Its the metadata
2221 * producer so calling this is safe because we are assured that no state change
2222 * can occur in the metadata thread for the streams in the hash table.
2223 */
2224 void lttng_ustconsumer_close_all_metadata(struct lttng_ht *metadata_ht)
2225 {
2226 struct lttng_ht_iter iter;
2227 struct lttng_consumer_stream *stream;
2228
2229 assert(metadata_ht);
2230 assert(metadata_ht->ht);
2231
2232 DBG("UST consumer closing all metadata streams");
2233
2234 rcu_read_lock();
2235 cds_lfht_for_each_entry(metadata_ht->ht, &iter.iter, stream,
2236 node.node) {
2237
2238 health_code_update();
2239
2240 pthread_mutex_lock(&stream->chan->lock);
2241 lttng_ustconsumer_close_metadata(stream->chan);
2242 pthread_mutex_unlock(&stream->chan->lock);
2243
2244 }
2245 rcu_read_unlock();
2246 }
2247
2248 void lttng_ustconsumer_close_stream_wakeup(struct lttng_consumer_stream *stream)
2249 {
2250 int ret;
2251
2252 ret = ustctl_stream_close_wakeup_fd(stream->ustream);
2253 if (ret < 0) {
2254 ERR("Unable to close wakeup fd");
2255 }
2256 }
2257
2258 /*
2259 * Please refer to consumer-timer.c before adding any lock within this
2260 * function or any of its callees. Timers have a very strict locking
2261 * semantic with respect to teardown. Failure to respect this semantic
2262 * introduces deadlocks.
2263 */
2264 int lttng_ustconsumer_request_metadata(struct lttng_consumer_local_data *ctx,
2265 struct lttng_consumer_channel *channel, int timer, int wait)
2266 {
2267 struct lttcomm_metadata_request_msg request;
2268 struct lttcomm_consumer_msg msg;
2269 enum lttcomm_return_code ret_code = LTTCOMM_CONSUMERD_SUCCESS;
2270 uint64_t len, key, offset;
2271 int ret;
2272
2273 assert(channel);
2274 assert(channel->metadata_cache);
2275
2276 memset(&request, 0, sizeof(request));
2277
2278 /* send the metadata request to sessiond */
2279 switch (consumer_data.type) {
2280 case LTTNG_CONSUMER64_UST:
2281 request.bits_per_long = 64;
2282 break;
2283 case LTTNG_CONSUMER32_UST:
2284 request.bits_per_long = 32;
2285 break;
2286 default:
2287 request.bits_per_long = 0;
2288 break;
2289 }
2290
2291 request.session_id = channel->session_id;
2292 request.session_id_per_pid = channel->session_id_per_pid;
2293 /*
2294 * Request the application UID here so the metadata of that application can
2295 * be sent back. The channel UID corresponds to the user UID of the session
2296 * used for the rights on the stream file(s).
2297 */
2298 request.uid = channel->ust_app_uid;
2299 request.key = channel->key;
2300
2301 DBG("Sending metadata request to sessiond, session id %" PRIu64
2302 ", per-pid %" PRIu64 ", app UID %u and channek key %" PRIu64,
2303 request.session_id, request.session_id_per_pid, request.uid,
2304 request.key);
2305
2306 pthread_mutex_lock(&ctx->metadata_socket_lock);
2307
2308 health_code_update();
2309
2310 ret = lttcomm_send_unix_sock(ctx->consumer_metadata_socket, &request,
2311 sizeof(request));
2312 if (ret < 0) {
2313 ERR("Asking metadata to sessiond");
2314 goto end;
2315 }
2316
2317 health_code_update();
2318
2319 /* Receive the metadata from sessiond */
2320 ret = lttcomm_recv_unix_sock(ctx->consumer_metadata_socket, &msg,
2321 sizeof(msg));
2322 if (ret != sizeof(msg)) {
2323 DBG("Consumer received unexpected message size %d (expects %zu)",
2324 ret, sizeof(msg));
2325 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_CMD);
2326 /*
2327 * The ret value might 0 meaning an orderly shutdown but this is ok
2328 * since the caller handles this.
2329 */
2330 goto end;
2331 }
2332
2333 health_code_update();
2334
2335 if (msg.cmd_type == LTTNG_ERR_UND) {
2336 /* No registry found */
2337 (void) consumer_send_status_msg(ctx->consumer_metadata_socket,
2338 ret_code);
2339 ret = 0;
2340 goto end;
2341 } else if (msg.cmd_type != LTTNG_CONSUMER_PUSH_METADATA) {
2342 ERR("Unexpected cmd_type received %d", msg.cmd_type);
2343 ret = -1;
2344 goto end;
2345 }
2346
2347 len = msg.u.push_metadata.len;
2348 key = msg.u.push_metadata.key;
2349 offset = msg.u.push_metadata.target_offset;
2350
2351 assert(key == channel->key);
2352 if (len == 0) {
2353 DBG("No new metadata to receive for key %" PRIu64, key);
2354 }
2355
2356 health_code_update();
2357
2358 /* Tell session daemon we are ready to receive the metadata. */
2359 ret = consumer_send_status_msg(ctx->consumer_metadata_socket,
2360 LTTCOMM_CONSUMERD_SUCCESS);
2361 if (ret < 0 || len == 0) {
2362 /*
2363 * Somehow, the session daemon is not responding anymore or there is
2364 * nothing to receive.
2365 */
2366 goto end;
2367 }
2368
2369 health_code_update();
2370
2371 ret = lttng_ustconsumer_recv_metadata(ctx->consumer_metadata_socket,
2372 key, offset, len, channel, timer, wait);
2373 if (ret >= 0) {
2374 /*
2375 * Only send the status msg if the sessiond is alive meaning a positive
2376 * ret code.
2377 */
2378 (void) consumer_send_status_msg(ctx->consumer_metadata_socket, ret);
2379 }
2380 ret = 0;
2381
2382 end:
2383 health_code_update();
2384
2385 pthread_mutex_unlock(&ctx->metadata_socket_lock);
2386 return ret;
2387 }
2388
2389 /*
2390 * Return the ustctl call for the get stream id.
2391 */
2392 int lttng_ustconsumer_get_stream_id(struct lttng_consumer_stream *stream,
2393 uint64_t *stream_id)
2394 {
2395 assert(stream);
2396 assert(stream_id);
2397
2398 return ustctl_get_stream_id(stream->ustream, stream_id);
2399 }
This page took 0.121872 seconds and 3 git commands to generate.