Fix: define _LGPL_SOURCE in C files
[lttng-tools.git] / src / common / ust-consumer / ust-consumer.c
1 /*
2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2 only,
7 * as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19 #define _GNU_SOURCE
20 #define _LGPL_SOURCE
21 #include <assert.h>
22 #include <lttng/ust-ctl.h>
23 #include <poll.h>
24 #include <pthread.h>
25 #include <stdlib.h>
26 #include <string.h>
27 #include <sys/mman.h>
28 #include <sys/socket.h>
29 #include <sys/stat.h>
30 #include <sys/types.h>
31 #include <inttypes.h>
32 #include <unistd.h>
33 #include <urcu/list.h>
34 #include <signal.h>
35
36 #include <bin/lttng-consumerd/health-consumerd.h>
37 #include <common/common.h>
38 #include <common/sessiond-comm/sessiond-comm.h>
39 #include <common/relayd/relayd.h>
40 #include <common/compat/fcntl.h>
41 #include <common/compat/endian.h>
42 #include <common/consumer-metadata-cache.h>
43 #include <common/consumer-stream.h>
44 #include <common/consumer-timer.h>
45 #include <common/utils.h>
46 #include <common/index/index.h>
47
48 #include "ust-consumer.h"
49
50 extern struct lttng_consumer_global_data consumer_data;
51 extern int consumer_poll_timeout;
52 extern volatile int consumer_quit;
53
54 /*
55 * Free channel object and all streams associated with it. This MUST be used
56 * only and only if the channel has _NEVER_ been added to the global channel
57 * hash table.
58 */
59 static void destroy_channel(struct lttng_consumer_channel *channel)
60 {
61 struct lttng_consumer_stream *stream, *stmp;
62
63 assert(channel);
64
65 DBG("UST consumer cleaning stream list");
66
67 cds_list_for_each_entry_safe(stream, stmp, &channel->streams.head,
68 send_node) {
69
70 health_code_update();
71
72 cds_list_del(&stream->send_node);
73 ustctl_destroy_stream(stream->ustream);
74 free(stream);
75 }
76
77 /*
78 * If a channel is available meaning that was created before the streams
79 * were, delete it.
80 */
81 if (channel->uchan) {
82 lttng_ustconsumer_del_channel(channel);
83 }
84 free(channel);
85 }
86
87 /*
88 * Add channel to internal consumer state.
89 *
90 * Returns 0 on success or else a negative value.
91 */
92 static int add_channel(struct lttng_consumer_channel *channel,
93 struct lttng_consumer_local_data *ctx)
94 {
95 int ret = 0;
96
97 assert(channel);
98 assert(ctx);
99
100 if (ctx->on_recv_channel != NULL) {
101 ret = ctx->on_recv_channel(channel);
102 if (ret == 0) {
103 ret = consumer_add_channel(channel, ctx);
104 } else if (ret < 0) {
105 /* Most likely an ENOMEM. */
106 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR);
107 goto error;
108 }
109 } else {
110 ret = consumer_add_channel(channel, ctx);
111 }
112
113 DBG("UST consumer channel added (key: %" PRIu64 ")", channel->key);
114
115 error:
116 return ret;
117 }
118
119 /*
120 * Allocate and return a consumer channel object.
121 */
122 static struct lttng_consumer_channel *allocate_channel(uint64_t session_id,
123 const char *pathname, const char *name, uid_t uid, gid_t gid,
124 uint64_t relayd_id, uint64_t key, enum lttng_event_output output,
125 uint64_t tracefile_size, uint64_t tracefile_count,
126 uint64_t session_id_per_pid, unsigned int monitor,
127 unsigned int live_timer_interval)
128 {
129 assert(pathname);
130 assert(name);
131
132 return consumer_allocate_channel(key, session_id, pathname, name, uid,
133 gid, relayd_id, output, tracefile_size,
134 tracefile_count, session_id_per_pid, monitor, live_timer_interval);
135 }
136
137 /*
138 * Allocate and return a consumer stream object. If _alloc_ret is not NULL, the
139 * error value if applicable is set in it else it is kept untouched.
140 *
141 * Return NULL on error else the newly allocated stream object.
142 */
143 static struct lttng_consumer_stream *allocate_stream(int cpu, int key,
144 struct lttng_consumer_channel *channel,
145 struct lttng_consumer_local_data *ctx, int *_alloc_ret)
146 {
147 int alloc_ret;
148 struct lttng_consumer_stream *stream = NULL;
149
150 assert(channel);
151 assert(ctx);
152
153 stream = consumer_allocate_stream(channel->key,
154 key,
155 LTTNG_CONSUMER_ACTIVE_STREAM,
156 channel->name,
157 channel->uid,
158 channel->gid,
159 channel->relayd_id,
160 channel->session_id,
161 cpu,
162 &alloc_ret,
163 channel->type,
164 channel->monitor);
165 if (stream == NULL) {
166 switch (alloc_ret) {
167 case -ENOENT:
168 /*
169 * We could not find the channel. Can happen if cpu hotplug
170 * happens while tearing down.
171 */
172 DBG3("Could not find channel");
173 break;
174 case -ENOMEM:
175 case -EINVAL:
176 default:
177 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR);
178 break;
179 }
180 goto error;
181 }
182
183 stream->chan = channel;
184
185 error:
186 if (_alloc_ret) {
187 *_alloc_ret = alloc_ret;
188 }
189 return stream;
190 }
191
192 /*
193 * Send the given stream pointer to the corresponding thread.
194 *
195 * Returns 0 on success else a negative value.
196 */
197 static int send_stream_to_thread(struct lttng_consumer_stream *stream,
198 struct lttng_consumer_local_data *ctx)
199 {
200 int ret;
201 struct lttng_pipe *stream_pipe;
202
203 /* Get the right pipe where the stream will be sent. */
204 if (stream->metadata_flag) {
205 ret = consumer_add_metadata_stream(stream);
206 if (ret) {
207 ERR("Consumer add metadata stream %" PRIu64 " failed.",
208 stream->key);
209 goto error;
210 }
211 stream_pipe = ctx->consumer_metadata_pipe;
212 } else {
213 ret = consumer_add_data_stream(stream);
214 if (ret) {
215 ERR("Consumer add stream %" PRIu64 " failed.",
216 stream->key);
217 goto error;
218 }
219 stream_pipe = ctx->consumer_data_pipe;
220 }
221
222 /*
223 * From this point on, the stream's ownership has been moved away from
224 * the channel and becomes globally visible.
225 */
226 stream->globally_visible = 1;
227
228 ret = lttng_pipe_write(stream_pipe, &stream, sizeof(stream));
229 if (ret < 0) {
230 ERR("Consumer write %s stream to pipe %d",
231 stream->metadata_flag ? "metadata" : "data",
232 lttng_pipe_get_writefd(stream_pipe));
233 if (stream->metadata_flag) {
234 consumer_del_stream_for_metadata(stream);
235 } else {
236 consumer_del_stream_for_data(stream);
237 }
238 }
239 error:
240 return ret;
241 }
242
243 /*
244 * Create streams for the given channel using liblttng-ust-ctl.
245 *
246 * Return 0 on success else a negative value.
247 */
248 static int create_ust_streams(struct lttng_consumer_channel *channel,
249 struct lttng_consumer_local_data *ctx)
250 {
251 int ret, cpu = 0;
252 struct ustctl_consumer_stream *ustream;
253 struct lttng_consumer_stream *stream;
254
255 assert(channel);
256 assert(ctx);
257
258 /*
259 * While a stream is available from ustctl. When NULL is returned, we've
260 * reached the end of the possible stream for the channel.
261 */
262 while ((ustream = ustctl_create_stream(channel->uchan, cpu))) {
263 int wait_fd;
264 int ust_metadata_pipe[2];
265
266 health_code_update();
267
268 if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA && channel->monitor) {
269 ret = utils_create_pipe_cloexec_nonblock(ust_metadata_pipe);
270 if (ret < 0) {
271 ERR("Create ust metadata poll pipe");
272 goto error;
273 }
274 wait_fd = ust_metadata_pipe[0];
275 } else {
276 wait_fd = ustctl_stream_get_wait_fd(ustream);
277 }
278
279 /* Allocate consumer stream object. */
280 stream = allocate_stream(cpu, wait_fd, channel, ctx, &ret);
281 if (!stream) {
282 goto error_alloc;
283 }
284 stream->ustream = ustream;
285 /*
286 * Store it so we can save multiple function calls afterwards since
287 * this value is used heavily in the stream threads. This is UST
288 * specific so this is why it's done after allocation.
289 */
290 stream->wait_fd = wait_fd;
291
292 /*
293 * Increment channel refcount since the channel reference has now been
294 * assigned in the allocation process above.
295 */
296 if (stream->chan->monitor) {
297 uatomic_inc(&stream->chan->refcount);
298 }
299
300 /*
301 * Order is important this is why a list is used. On error, the caller
302 * should clean this list.
303 */
304 cds_list_add_tail(&stream->send_node, &channel->streams.head);
305
306 ret = ustctl_get_max_subbuf_size(stream->ustream,
307 &stream->max_sb_size);
308 if (ret < 0) {
309 ERR("ustctl_get_max_subbuf_size failed for stream %s",
310 stream->name);
311 goto error;
312 }
313
314 /* Do actions once stream has been received. */
315 if (ctx->on_recv_stream) {
316 ret = ctx->on_recv_stream(stream);
317 if (ret < 0) {
318 goto error;
319 }
320 }
321
322 DBG("UST consumer add stream %s (key: %" PRIu64 ") with relayd id %" PRIu64,
323 stream->name, stream->key, stream->relayd_stream_id);
324
325 /* Set next CPU stream. */
326 channel->streams.count = ++cpu;
327
328 /* Keep stream reference when creating metadata. */
329 if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA) {
330 channel->metadata_stream = stream;
331 stream->ust_metadata_poll_pipe[0] = ust_metadata_pipe[0];
332 stream->ust_metadata_poll_pipe[1] = ust_metadata_pipe[1];
333 }
334 }
335
336 return 0;
337
338 error:
339 error_alloc:
340 return ret;
341 }
342
343 /*
344 * Create an UST channel with the given attributes and send it to the session
345 * daemon using the ust ctl API.
346 *
347 * Return 0 on success or else a negative value.
348 */
349 static int create_ust_channel(struct ustctl_consumer_channel_attr *attr,
350 struct ustctl_consumer_channel **chanp)
351 {
352 int ret;
353 struct ustctl_consumer_channel *channel;
354
355 assert(attr);
356 assert(chanp);
357
358 DBG3("Creating channel to ustctl with attr: [overwrite: %d, "
359 "subbuf_size: %" PRIu64 ", num_subbuf: %" PRIu64 ", "
360 "switch_timer_interval: %u, read_timer_interval: %u, "
361 "output: %d, type: %d", attr->overwrite, attr->subbuf_size,
362 attr->num_subbuf, attr->switch_timer_interval,
363 attr->read_timer_interval, attr->output, attr->type);
364
365 channel = ustctl_create_channel(attr);
366 if (!channel) {
367 ret = -1;
368 goto error_create;
369 }
370
371 *chanp = channel;
372
373 return 0;
374
375 error_create:
376 return ret;
377 }
378
379 /*
380 * Send a single given stream to the session daemon using the sock.
381 *
382 * Return 0 on success else a negative value.
383 */
384 static int send_sessiond_stream(int sock, struct lttng_consumer_stream *stream)
385 {
386 int ret;
387
388 assert(stream);
389 assert(sock >= 0);
390
391 DBG("UST consumer sending stream %" PRIu64 " to sessiond", stream->key);
392
393 /* Send stream to session daemon. */
394 ret = ustctl_send_stream_to_sessiond(sock, stream->ustream);
395 if (ret < 0) {
396 goto error;
397 }
398
399 error:
400 return ret;
401 }
402
403 /*
404 * Send channel to sessiond.
405 *
406 * Return 0 on success or else a negative value.
407 */
408 static int send_sessiond_channel(int sock,
409 struct lttng_consumer_channel *channel,
410 struct lttng_consumer_local_data *ctx, int *relayd_error)
411 {
412 int ret, ret_code = LTTCOMM_CONSUMERD_SUCCESS;
413 struct lttng_consumer_stream *stream;
414 uint64_t net_seq_idx = -1ULL;
415
416 assert(channel);
417 assert(ctx);
418 assert(sock >= 0);
419
420 DBG("UST consumer sending channel %s to sessiond", channel->name);
421
422 if (channel->relayd_id != (uint64_t) -1ULL) {
423 cds_list_for_each_entry(stream, &channel->streams.head, send_node) {
424
425 health_code_update();
426
427 /* Try to send the stream to the relayd if one is available. */
428 ret = consumer_send_relayd_stream(stream, stream->chan->pathname);
429 if (ret < 0) {
430 /*
431 * Flag that the relayd was the problem here probably due to a
432 * communicaton error on the socket.
433 */
434 if (relayd_error) {
435 *relayd_error = 1;
436 }
437 ret_code = LTTNG_ERR_RELAYD_CONNECT_FAIL;
438 }
439 if (net_seq_idx == -1ULL) {
440 net_seq_idx = stream->net_seq_idx;
441 }
442 }
443 }
444
445 /* Inform sessiond that we are about to send channel and streams. */
446 ret = consumer_send_status_msg(sock, ret_code);
447 if (ret < 0 || ret_code != LTTCOMM_CONSUMERD_SUCCESS) {
448 /*
449 * Either the session daemon is not responding or the relayd died so we
450 * stop now.
451 */
452 goto error;
453 }
454
455 /* Send channel to sessiond. */
456 ret = ustctl_send_channel_to_sessiond(sock, channel->uchan);
457 if (ret < 0) {
458 goto error;
459 }
460
461 ret = ustctl_channel_close_wakeup_fd(channel->uchan);
462 if (ret < 0) {
463 goto error;
464 }
465
466 /* The channel was sent successfully to the sessiond at this point. */
467 cds_list_for_each_entry(stream, &channel->streams.head, send_node) {
468
469 health_code_update();
470
471 /* Send stream to session daemon. */
472 ret = send_sessiond_stream(sock, stream);
473 if (ret < 0) {
474 goto error;
475 }
476 }
477
478 /* Tell sessiond there is no more stream. */
479 ret = ustctl_send_stream_to_sessiond(sock, NULL);
480 if (ret < 0) {
481 goto error;
482 }
483
484 DBG("UST consumer NULL stream sent to sessiond");
485
486 return 0;
487
488 error:
489 if (ret_code != LTTCOMM_CONSUMERD_SUCCESS) {
490 ret = -1;
491 }
492 return ret;
493 }
494
495 /*
496 * Creates a channel and streams and add the channel it to the channel internal
497 * state. The created stream must ONLY be sent once the GET_CHANNEL command is
498 * received.
499 *
500 * Return 0 on success or else, a negative value is returned and the channel
501 * MUST be destroyed by consumer_del_channel().
502 */
503 static int ask_channel(struct lttng_consumer_local_data *ctx, int sock,
504 struct lttng_consumer_channel *channel,
505 struct ustctl_consumer_channel_attr *attr)
506 {
507 int ret;
508
509 assert(ctx);
510 assert(channel);
511 assert(attr);
512
513 /*
514 * This value is still used by the kernel consumer since for the kernel,
515 * the stream ownership is not IN the consumer so we need to have the
516 * number of left stream that needs to be initialized so we can know when
517 * to delete the channel (see consumer.c).
518 *
519 * As for the user space tracer now, the consumer creates and sends the
520 * stream to the session daemon which only sends them to the application
521 * once every stream of a channel is received making this value useless
522 * because we they will be added to the poll thread before the application
523 * receives them. This ensures that a stream can not hang up during
524 * initilization of a channel.
525 */
526 channel->nb_init_stream_left = 0;
527
528 /* The reply msg status is handled in the following call. */
529 ret = create_ust_channel(attr, &channel->uchan);
530 if (ret < 0) {
531 goto end;
532 }
533
534 channel->wait_fd = ustctl_channel_get_wait_fd(channel->uchan);
535
536 /*
537 * For the snapshots (no monitor), we create the metadata streams
538 * on demand, not during the channel creation.
539 */
540 if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA && !channel->monitor) {
541 ret = 0;
542 goto end;
543 }
544
545 /* Open all streams for this channel. */
546 ret = create_ust_streams(channel, ctx);
547 if (ret < 0) {
548 goto end;
549 }
550
551 end:
552 return ret;
553 }
554
555 /*
556 * Send all stream of a channel to the right thread handling it.
557 *
558 * On error, return a negative value else 0 on success.
559 */
560 static int send_streams_to_thread(struct lttng_consumer_channel *channel,
561 struct lttng_consumer_local_data *ctx)
562 {
563 int ret = 0;
564 struct lttng_consumer_stream *stream, *stmp;
565
566 assert(channel);
567 assert(ctx);
568
569 /* Send streams to the corresponding thread. */
570 cds_list_for_each_entry_safe(stream, stmp, &channel->streams.head,
571 send_node) {
572
573 health_code_update();
574
575 /* Sending the stream to the thread. */
576 ret = send_stream_to_thread(stream, ctx);
577 if (ret < 0) {
578 /*
579 * If we are unable to send the stream to the thread, there is
580 * a big problem so just stop everything.
581 */
582 /* Remove node from the channel stream list. */
583 cds_list_del(&stream->send_node);
584 goto error;
585 }
586
587 /* Remove node from the channel stream list. */
588 cds_list_del(&stream->send_node);
589
590 }
591
592 error:
593 return ret;
594 }
595
596 /*
597 * Flush channel's streams using the given key to retrieve the channel.
598 *
599 * Return 0 on success else an LTTng error code.
600 */
601 static int flush_channel(uint64_t chan_key)
602 {
603 int ret = 0;
604 struct lttng_consumer_channel *channel;
605 struct lttng_consumer_stream *stream;
606 struct lttng_ht *ht;
607 struct lttng_ht_iter iter;
608
609 DBG("UST consumer flush channel key %" PRIu64, chan_key);
610
611 rcu_read_lock();
612 channel = consumer_find_channel(chan_key);
613 if (!channel) {
614 ERR("UST consumer flush channel %" PRIu64 " not found", chan_key);
615 ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
616 goto error;
617 }
618
619 ht = consumer_data.stream_per_chan_id_ht;
620
621 /* For each stream of the channel id, flush it. */
622 cds_lfht_for_each_entry_duplicate(ht->ht,
623 ht->hash_fct(&channel->key, lttng_ht_seed), ht->match_fct,
624 &channel->key, &iter.iter, stream, node_channel_id.node) {
625
626 health_code_update();
627
628 ustctl_flush_buffer(stream->ustream, 1);
629 }
630 error:
631 rcu_read_unlock();
632 return ret;
633 }
634
635 /*
636 * Close metadata stream wakeup_fd using the given key to retrieve the channel.
637 * RCU read side lock MUST be acquired before calling this function.
638 *
639 * Return 0 on success else an LTTng error code.
640 */
641 static int close_metadata(uint64_t chan_key)
642 {
643 int ret = 0;
644 struct lttng_consumer_channel *channel;
645
646 DBG("UST consumer close metadata key %" PRIu64, chan_key);
647
648 channel = consumer_find_channel(chan_key);
649 if (!channel) {
650 /*
651 * This is possible if the metadata thread has issue a delete because
652 * the endpoint point of the stream hung up. There is no way the
653 * session daemon can know about it thus use a DBG instead of an actual
654 * error.
655 */
656 DBG("UST consumer close metadata %" PRIu64 " not found", chan_key);
657 ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
658 goto error;
659 }
660
661 pthread_mutex_lock(&consumer_data.lock);
662 pthread_mutex_lock(&channel->lock);
663
664 if (cds_lfht_is_node_deleted(&channel->node.node)) {
665 goto error_unlock;
666 }
667
668 lttng_ustconsumer_close_metadata(channel);
669
670 error_unlock:
671 pthread_mutex_unlock(&channel->lock);
672 pthread_mutex_unlock(&consumer_data.lock);
673 error:
674 return ret;
675 }
676
677 /*
678 * RCU read side lock MUST be acquired before calling this function.
679 *
680 * Return 0 on success else an LTTng error code.
681 */
682 static int setup_metadata(struct lttng_consumer_local_data *ctx, uint64_t key)
683 {
684 int ret;
685 struct lttng_consumer_channel *metadata;
686
687 DBG("UST consumer setup metadata key %" PRIu64, key);
688
689 metadata = consumer_find_channel(key);
690 if (!metadata) {
691 ERR("UST consumer push metadata %" PRIu64 " not found", key);
692 ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
693 goto end;
694 }
695
696 /*
697 * In no monitor mode, the metadata channel has no stream(s) so skip the
698 * ownership transfer to the metadata thread.
699 */
700 if (!metadata->monitor) {
701 DBG("Metadata channel in no monitor");
702 ret = 0;
703 goto end;
704 }
705
706 /*
707 * Send metadata stream to relayd if one available. Availability is
708 * known if the stream is still in the list of the channel.
709 */
710 if (cds_list_empty(&metadata->streams.head)) {
711 ERR("Metadata channel key %" PRIu64 ", no stream available.", key);
712 ret = LTTCOMM_CONSUMERD_ERROR_METADATA;
713 goto error_no_stream;
714 }
715
716 /* Send metadata stream to relayd if needed. */
717 if (metadata->metadata_stream->net_seq_idx != (uint64_t) -1ULL) {
718 ret = consumer_send_relayd_stream(metadata->metadata_stream,
719 metadata->pathname);
720 if (ret < 0) {
721 ret = LTTCOMM_CONSUMERD_ERROR_METADATA;
722 goto error;
723 }
724 ret = consumer_send_relayd_streams_sent(
725 metadata->metadata_stream->net_seq_idx);
726 if (ret < 0) {
727 ret = LTTCOMM_CONSUMERD_RELAYD_FAIL;
728 goto error;
729 }
730 }
731
732 ret = send_streams_to_thread(metadata, ctx);
733 if (ret < 0) {
734 /*
735 * If we are unable to send the stream to the thread, there is
736 * a big problem so just stop everything.
737 */
738 ret = LTTCOMM_CONSUMERD_FATAL;
739 goto error;
740 }
741 /* List MUST be empty after or else it could be reused. */
742 assert(cds_list_empty(&metadata->streams.head));
743
744 ret = 0;
745 goto end;
746
747 error:
748 /*
749 * Delete metadata channel on error. At this point, the metadata stream can
750 * NOT be monitored by the metadata thread thus having the guarantee that
751 * the stream is still in the local stream list of the channel. This call
752 * will make sure to clean that list.
753 */
754 consumer_stream_destroy(metadata->metadata_stream, NULL);
755 cds_list_del(&metadata->metadata_stream->send_node);
756 metadata->metadata_stream = NULL;
757 error_no_stream:
758 end:
759 return ret;
760 }
761
762 /*
763 * Snapshot the whole metadata.
764 *
765 * Returns 0 on success, < 0 on error
766 */
767 static int snapshot_metadata(uint64_t key, char *path, uint64_t relayd_id,
768 struct lttng_consumer_local_data *ctx)
769 {
770 int ret = 0;
771 struct lttng_consumer_channel *metadata_channel;
772 struct lttng_consumer_stream *metadata_stream;
773
774 assert(path);
775 assert(ctx);
776
777 DBG("UST consumer snapshot metadata with key %" PRIu64 " at path %s",
778 key, path);
779
780 rcu_read_lock();
781
782 metadata_channel = consumer_find_channel(key);
783 if (!metadata_channel) {
784 ERR("UST snapshot metadata channel not found for key %" PRIu64,
785 key);
786 ret = -1;
787 goto error;
788 }
789 assert(!metadata_channel->monitor);
790
791 health_code_update();
792
793 /*
794 * Ask the sessiond if we have new metadata waiting and update the
795 * consumer metadata cache.
796 */
797 ret = lttng_ustconsumer_request_metadata(ctx, metadata_channel, 0, 1);
798 if (ret < 0) {
799 goto error;
800 }
801
802 health_code_update();
803
804 /*
805 * The metadata stream is NOT created in no monitor mode when the channel
806 * is created on a sessiond ask channel command.
807 */
808 ret = create_ust_streams(metadata_channel, ctx);
809 if (ret < 0) {
810 goto error;
811 }
812
813 metadata_stream = metadata_channel->metadata_stream;
814 assert(metadata_stream);
815
816 if (relayd_id != (uint64_t) -1ULL) {
817 metadata_stream->net_seq_idx = relayd_id;
818 ret = consumer_send_relayd_stream(metadata_stream, path);
819 if (ret < 0) {
820 goto error_stream;
821 }
822 } else {
823 ret = utils_create_stream_file(path, metadata_stream->name,
824 metadata_stream->chan->tracefile_size,
825 metadata_stream->tracefile_count_current,
826 metadata_stream->uid, metadata_stream->gid, NULL);
827 if (ret < 0) {
828 goto error_stream;
829 }
830 metadata_stream->out_fd = ret;
831 metadata_stream->tracefile_size_current = 0;
832 }
833
834 do {
835 health_code_update();
836
837 ret = lttng_consumer_read_subbuffer(metadata_stream, ctx);
838 if (ret < 0) {
839 goto error_stream;
840 }
841 } while (ret > 0);
842
843 error_stream:
844 /*
845 * Clean up the stream completly because the next snapshot will use a new
846 * metadata stream.
847 */
848 consumer_stream_destroy(metadata_stream, NULL);
849 cds_list_del(&metadata_stream->send_node);
850 metadata_channel->metadata_stream = NULL;
851
852 error:
853 rcu_read_unlock();
854 return ret;
855 }
856
857 /*
858 * Take a snapshot of all the stream of a channel.
859 *
860 * Returns 0 on success, < 0 on error
861 */
862 static int snapshot_channel(uint64_t key, char *path, uint64_t relayd_id,
863 uint64_t max_stream_size, struct lttng_consumer_local_data *ctx)
864 {
865 int ret;
866 unsigned use_relayd = 0;
867 unsigned long consumed_pos, produced_pos;
868 struct lttng_consumer_channel *channel;
869 struct lttng_consumer_stream *stream;
870
871 assert(path);
872 assert(ctx);
873
874 rcu_read_lock();
875
876 if (relayd_id != (uint64_t) -1ULL) {
877 use_relayd = 1;
878 }
879
880 channel = consumer_find_channel(key);
881 if (!channel) {
882 ERR("UST snapshot channel not found for key %" PRIu64, key);
883 ret = -1;
884 goto error;
885 }
886 assert(!channel->monitor);
887 DBG("UST consumer snapshot channel %" PRIu64, key);
888
889 cds_list_for_each_entry(stream, &channel->streams.head, send_node) {
890
891 health_code_update();
892
893 /* Lock stream because we are about to change its state. */
894 pthread_mutex_lock(&stream->lock);
895 stream->net_seq_idx = relayd_id;
896
897 if (use_relayd) {
898 ret = consumer_send_relayd_stream(stream, path);
899 if (ret < 0) {
900 goto error_unlock;
901 }
902 } else {
903 ret = utils_create_stream_file(path, stream->name,
904 stream->chan->tracefile_size,
905 stream->tracefile_count_current,
906 stream->uid, stream->gid, NULL);
907 if (ret < 0) {
908 goto error_unlock;
909 }
910 stream->out_fd = ret;
911 stream->tracefile_size_current = 0;
912
913 DBG("UST consumer snapshot stream %s/%s (%" PRIu64 ")", path,
914 stream->name, stream->key);
915 }
916 if (relayd_id != -1ULL) {
917 ret = consumer_send_relayd_streams_sent(relayd_id);
918 if (ret < 0) {
919 goto error_unlock;
920 }
921 }
922
923 ustctl_flush_buffer(stream->ustream, 1);
924
925 ret = lttng_ustconsumer_take_snapshot(stream);
926 if (ret < 0) {
927 ERR("Taking UST snapshot");
928 goto error_unlock;
929 }
930
931 ret = lttng_ustconsumer_get_produced_snapshot(stream, &produced_pos);
932 if (ret < 0) {
933 ERR("Produced UST snapshot position");
934 goto error_unlock;
935 }
936
937 ret = lttng_ustconsumer_get_consumed_snapshot(stream, &consumed_pos);
938 if (ret < 0) {
939 ERR("Consumerd UST snapshot position");
940 goto error_unlock;
941 }
942
943 /*
944 * The original value is sent back if max stream size is larger than
945 * the possible size of the snapshot. Also, we asume that the session
946 * daemon should never send a maximum stream size that is lower than
947 * subbuffer size.
948 */
949 consumed_pos = consumer_get_consumed_maxsize(consumed_pos,
950 produced_pos, max_stream_size);
951
952 while (consumed_pos < produced_pos) {
953 ssize_t read_len;
954 unsigned long len, padded_len;
955
956 health_code_update();
957
958 DBG("UST consumer taking snapshot at pos %lu", consumed_pos);
959
960 ret = ustctl_get_subbuf(stream->ustream, &consumed_pos);
961 if (ret < 0) {
962 if (ret != -EAGAIN) {
963 PERROR("ustctl_get_subbuf snapshot");
964 goto error_close_stream;
965 }
966 DBG("UST consumer get subbuf failed. Skipping it.");
967 consumed_pos += stream->max_sb_size;
968 continue;
969 }
970
971 ret = ustctl_get_subbuf_size(stream->ustream, &len);
972 if (ret < 0) {
973 ERR("Snapshot ustctl_get_subbuf_size");
974 goto error_put_subbuf;
975 }
976
977 ret = ustctl_get_padded_subbuf_size(stream->ustream, &padded_len);
978 if (ret < 0) {
979 ERR("Snapshot ustctl_get_padded_subbuf_size");
980 goto error_put_subbuf;
981 }
982
983 read_len = lttng_consumer_on_read_subbuffer_mmap(ctx, stream, len,
984 padded_len - len, NULL);
985 if (use_relayd) {
986 if (read_len != len) {
987 ret = -EPERM;
988 goto error_put_subbuf;
989 }
990 } else {
991 if (read_len != padded_len) {
992 ret = -EPERM;
993 goto error_put_subbuf;
994 }
995 }
996
997 ret = ustctl_put_subbuf(stream->ustream);
998 if (ret < 0) {
999 ERR("Snapshot ustctl_put_subbuf");
1000 goto error_close_stream;
1001 }
1002 consumed_pos += stream->max_sb_size;
1003 }
1004
1005 /* Simply close the stream so we can use it on the next snapshot. */
1006 consumer_stream_close(stream);
1007 pthread_mutex_unlock(&stream->lock);
1008 }
1009
1010 rcu_read_unlock();
1011 return 0;
1012
1013 error_put_subbuf:
1014 if (ustctl_put_subbuf(stream->ustream) < 0) {
1015 ERR("Snapshot ustctl_put_subbuf");
1016 }
1017 error_close_stream:
1018 consumer_stream_close(stream);
1019 error_unlock:
1020 pthread_mutex_unlock(&stream->lock);
1021 error:
1022 rcu_read_unlock();
1023 return ret;
1024 }
1025
1026 /*
1027 * Receive the metadata updates from the sessiond.
1028 */
1029 int lttng_ustconsumer_recv_metadata(int sock, uint64_t key, uint64_t offset,
1030 uint64_t len, struct lttng_consumer_channel *channel,
1031 int timer, int wait)
1032 {
1033 int ret, ret_code = LTTCOMM_CONSUMERD_SUCCESS;
1034 char *metadata_str;
1035
1036 DBG("UST consumer push metadata key %" PRIu64 " of len %" PRIu64, key, len);
1037
1038 metadata_str = zmalloc(len * sizeof(char));
1039 if (!metadata_str) {
1040 PERROR("zmalloc metadata string");
1041 ret_code = LTTCOMM_CONSUMERD_ENOMEM;
1042 goto end;
1043 }
1044
1045 health_code_update();
1046
1047 /* Receive metadata string. */
1048 ret = lttcomm_recv_unix_sock(sock, metadata_str, len);
1049 if (ret < 0) {
1050 /* Session daemon is dead so return gracefully. */
1051 ret_code = ret;
1052 goto end_free;
1053 }
1054
1055 health_code_update();
1056
1057 pthread_mutex_lock(&channel->metadata_cache->lock);
1058 ret = consumer_metadata_cache_write(channel, offset, len, metadata_str);
1059 if (ret < 0) {
1060 /* Unable to handle metadata. Notify session daemon. */
1061 ret_code = LTTCOMM_CONSUMERD_ERROR_METADATA;
1062 /*
1063 * Skip metadata flush on write error since the offset and len might
1064 * not have been updated which could create an infinite loop below when
1065 * waiting for the metadata cache to be flushed.
1066 */
1067 pthread_mutex_unlock(&channel->metadata_cache->lock);
1068 goto end_free;
1069 }
1070 pthread_mutex_unlock(&channel->metadata_cache->lock);
1071
1072 if (!wait) {
1073 goto end_free;
1074 }
1075 while (consumer_metadata_cache_flushed(channel, offset + len, timer)) {
1076 DBG("Waiting for metadata to be flushed");
1077
1078 health_code_update();
1079
1080 usleep(DEFAULT_METADATA_AVAILABILITY_WAIT_TIME);
1081 }
1082
1083 end_free:
1084 free(metadata_str);
1085 end:
1086 return ret_code;
1087 }
1088
1089 /*
1090 * Receive command from session daemon and process it.
1091 *
1092 * Return 1 on success else a negative value or 0.
1093 */
1094 int lttng_ustconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
1095 int sock, struct pollfd *consumer_sockpoll)
1096 {
1097 ssize_t ret;
1098 enum lttcomm_return_code ret_code = LTTCOMM_CONSUMERD_SUCCESS;
1099 struct lttcomm_consumer_msg msg;
1100 struct lttng_consumer_channel *channel = NULL;
1101
1102 health_code_update();
1103
1104 ret = lttcomm_recv_unix_sock(sock, &msg, sizeof(msg));
1105 if (ret != sizeof(msg)) {
1106 DBG("Consumer received unexpected message size %zd (expects %zu)",
1107 ret, sizeof(msg));
1108 /*
1109 * The ret value might 0 meaning an orderly shutdown but this is ok
1110 * since the caller handles this.
1111 */
1112 if (ret > 0) {
1113 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_CMD);
1114 ret = -1;
1115 }
1116 return ret;
1117 }
1118
1119 health_code_update();
1120
1121 /* deprecated */
1122 assert(msg.cmd_type != LTTNG_CONSUMER_STOP);
1123
1124 health_code_update();
1125
1126 /* relayd needs RCU read-side lock */
1127 rcu_read_lock();
1128
1129 switch (msg.cmd_type) {
1130 case LTTNG_CONSUMER_ADD_RELAYD_SOCKET:
1131 {
1132 /* Session daemon status message are handled in the following call. */
1133 ret = consumer_add_relayd_socket(msg.u.relayd_sock.net_index,
1134 msg.u.relayd_sock.type, ctx, sock, consumer_sockpoll,
1135 &msg.u.relayd_sock.sock, msg.u.relayd_sock.session_id,
1136 msg.u.relayd_sock.relayd_session_id);
1137 goto end_nosignal;
1138 }
1139 case LTTNG_CONSUMER_DESTROY_RELAYD:
1140 {
1141 uint64_t index = msg.u.destroy_relayd.net_seq_idx;
1142 struct consumer_relayd_sock_pair *relayd;
1143
1144 DBG("UST consumer destroying relayd %" PRIu64, index);
1145
1146 /* Get relayd reference if exists. */
1147 relayd = consumer_find_relayd(index);
1148 if (relayd == NULL) {
1149 DBG("Unable to find relayd %" PRIu64, index);
1150 ret_code = LTTCOMM_CONSUMERD_RELAYD_FAIL;
1151 }
1152
1153 /*
1154 * Each relayd socket pair has a refcount of stream attached to it
1155 * which tells if the relayd is still active or not depending on the
1156 * refcount value.
1157 *
1158 * This will set the destroy flag of the relayd object and destroy it
1159 * if the refcount reaches zero when called.
1160 *
1161 * The destroy can happen either here or when a stream fd hangs up.
1162 */
1163 if (relayd) {
1164 consumer_flag_relayd_for_destroy(relayd);
1165 }
1166
1167 goto end_msg_sessiond;
1168 }
1169 case LTTNG_CONSUMER_UPDATE_STREAM:
1170 {
1171 rcu_read_unlock();
1172 return -ENOSYS;
1173 }
1174 case LTTNG_CONSUMER_DATA_PENDING:
1175 {
1176 int ret, is_data_pending;
1177 uint64_t id = msg.u.data_pending.session_id;
1178
1179 DBG("UST consumer data pending command for id %" PRIu64, id);
1180
1181 is_data_pending = consumer_data_pending(id);
1182
1183 /* Send back returned value to session daemon */
1184 ret = lttcomm_send_unix_sock(sock, &is_data_pending,
1185 sizeof(is_data_pending));
1186 if (ret < 0) {
1187 DBG("Error when sending the data pending ret code: %d", ret);
1188 goto error_fatal;
1189 }
1190
1191 /*
1192 * No need to send back a status message since the data pending
1193 * returned value is the response.
1194 */
1195 break;
1196 }
1197 case LTTNG_CONSUMER_ASK_CHANNEL_CREATION:
1198 {
1199 int ret;
1200 struct ustctl_consumer_channel_attr attr;
1201
1202 /* Create a plain object and reserve a channel key. */
1203 channel = allocate_channel(msg.u.ask_channel.session_id,
1204 msg.u.ask_channel.pathname, msg.u.ask_channel.name,
1205 msg.u.ask_channel.uid, msg.u.ask_channel.gid,
1206 msg.u.ask_channel.relayd_id, msg.u.ask_channel.key,
1207 (enum lttng_event_output) msg.u.ask_channel.output,
1208 msg.u.ask_channel.tracefile_size,
1209 msg.u.ask_channel.tracefile_count,
1210 msg.u.ask_channel.session_id_per_pid,
1211 msg.u.ask_channel.monitor,
1212 msg.u.ask_channel.live_timer_interval);
1213 if (!channel) {
1214 goto end_channel_error;
1215 }
1216
1217 /*
1218 * Assign UST application UID to the channel. This value is ignored for
1219 * per PID buffers. This is specific to UST thus setting this after the
1220 * allocation.
1221 */
1222 channel->ust_app_uid = msg.u.ask_channel.ust_app_uid;
1223
1224 /* Build channel attributes from received message. */
1225 attr.subbuf_size = msg.u.ask_channel.subbuf_size;
1226 attr.num_subbuf = msg.u.ask_channel.num_subbuf;
1227 attr.overwrite = msg.u.ask_channel.overwrite;
1228 attr.switch_timer_interval = msg.u.ask_channel.switch_timer_interval;
1229 attr.read_timer_interval = msg.u.ask_channel.read_timer_interval;
1230 attr.chan_id = msg.u.ask_channel.chan_id;
1231 memcpy(attr.uuid, msg.u.ask_channel.uuid, sizeof(attr.uuid));
1232
1233 /* Match channel buffer type to the UST abi. */
1234 switch (msg.u.ask_channel.output) {
1235 case LTTNG_EVENT_MMAP:
1236 default:
1237 attr.output = LTTNG_UST_MMAP;
1238 break;
1239 }
1240
1241 /* Translate and save channel type. */
1242 switch (msg.u.ask_channel.type) {
1243 case LTTNG_UST_CHAN_PER_CPU:
1244 channel->type = CONSUMER_CHANNEL_TYPE_DATA;
1245 attr.type = LTTNG_UST_CHAN_PER_CPU;
1246 /*
1247 * Set refcount to 1 for owner. Below, we will
1248 * pass ownership to the
1249 * consumer_thread_channel_poll() thread.
1250 */
1251 channel->refcount = 1;
1252 break;
1253 case LTTNG_UST_CHAN_METADATA:
1254 channel->type = CONSUMER_CHANNEL_TYPE_METADATA;
1255 attr.type = LTTNG_UST_CHAN_METADATA;
1256 break;
1257 default:
1258 assert(0);
1259 goto error_fatal;
1260 };
1261
1262 health_code_update();
1263
1264 ret = ask_channel(ctx, sock, channel, &attr);
1265 if (ret < 0) {
1266 goto end_channel_error;
1267 }
1268
1269 if (msg.u.ask_channel.type == LTTNG_UST_CHAN_METADATA) {
1270 ret = consumer_metadata_cache_allocate(channel);
1271 if (ret < 0) {
1272 ERR("Allocating metadata cache");
1273 goto end_channel_error;
1274 }
1275 consumer_timer_switch_start(channel, attr.switch_timer_interval);
1276 attr.switch_timer_interval = 0;
1277 } else {
1278 consumer_timer_live_start(channel,
1279 msg.u.ask_channel.live_timer_interval);
1280 }
1281
1282 health_code_update();
1283
1284 /*
1285 * Add the channel to the internal state AFTER all streams were created
1286 * and successfully sent to session daemon. This way, all streams must
1287 * be ready before this channel is visible to the threads.
1288 * If add_channel succeeds, ownership of the channel is
1289 * passed to consumer_thread_channel_poll().
1290 */
1291 ret = add_channel(channel, ctx);
1292 if (ret < 0) {
1293 if (msg.u.ask_channel.type == LTTNG_UST_CHAN_METADATA) {
1294 if (channel->switch_timer_enabled == 1) {
1295 consumer_timer_switch_stop(channel);
1296 }
1297 consumer_metadata_cache_destroy(channel);
1298 }
1299 if (channel->live_timer_enabled == 1) {
1300 consumer_timer_live_stop(channel);
1301 }
1302 goto end_channel_error;
1303 }
1304
1305 health_code_update();
1306
1307 /*
1308 * Channel and streams are now created. Inform the session daemon that
1309 * everything went well and should wait to receive the channel and
1310 * streams with ustctl API.
1311 */
1312 ret = consumer_send_status_channel(sock, channel);
1313 if (ret < 0) {
1314 /*
1315 * There is probably a problem on the socket.
1316 */
1317 goto error_fatal;
1318 }
1319
1320 break;
1321 }
1322 case LTTNG_CONSUMER_GET_CHANNEL:
1323 {
1324 int ret, relayd_err = 0;
1325 uint64_t key = msg.u.get_channel.key;
1326 struct lttng_consumer_channel *channel;
1327
1328 channel = consumer_find_channel(key);
1329 if (!channel) {
1330 ERR("UST consumer get channel key %" PRIu64 " not found", key);
1331 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
1332 goto end_msg_sessiond;
1333 }
1334
1335 health_code_update();
1336
1337 /* Send everything to sessiond. */
1338 ret = send_sessiond_channel(sock, channel, ctx, &relayd_err);
1339 if (ret < 0) {
1340 if (relayd_err) {
1341 /*
1342 * We were unable to send to the relayd the stream so avoid
1343 * sending back a fatal error to the thread since this is OK
1344 * and the consumer can continue its work. The above call
1345 * has sent the error status message to the sessiond.
1346 */
1347 goto end_nosignal;
1348 }
1349 /*
1350 * The communicaton was broken hence there is a bad state between
1351 * the consumer and sessiond so stop everything.
1352 */
1353 goto error_fatal;
1354 }
1355
1356 health_code_update();
1357
1358 /*
1359 * In no monitor mode, the streams ownership is kept inside the channel
1360 * so don't send them to the data thread.
1361 */
1362 if (!channel->monitor) {
1363 goto end_msg_sessiond;
1364 }
1365
1366 ret = send_streams_to_thread(channel, ctx);
1367 if (ret < 0) {
1368 /*
1369 * If we are unable to send the stream to the thread, there is
1370 * a big problem so just stop everything.
1371 */
1372 goto error_fatal;
1373 }
1374 /* List MUST be empty after or else it could be reused. */
1375 assert(cds_list_empty(&channel->streams.head));
1376 goto end_msg_sessiond;
1377 }
1378 case LTTNG_CONSUMER_DESTROY_CHANNEL:
1379 {
1380 uint64_t key = msg.u.destroy_channel.key;
1381
1382 /*
1383 * Only called if streams have not been sent to stream
1384 * manager thread. However, channel has been sent to
1385 * channel manager thread.
1386 */
1387 notify_thread_del_channel(ctx, key);
1388 goto end_msg_sessiond;
1389 }
1390 case LTTNG_CONSUMER_CLOSE_METADATA:
1391 {
1392 int ret;
1393
1394 ret = close_metadata(msg.u.close_metadata.key);
1395 if (ret != 0) {
1396 ret_code = ret;
1397 }
1398
1399 goto end_msg_sessiond;
1400 }
1401 case LTTNG_CONSUMER_FLUSH_CHANNEL:
1402 {
1403 int ret;
1404
1405 ret = flush_channel(msg.u.flush_channel.key);
1406 if (ret != 0) {
1407 ret_code = ret;
1408 }
1409
1410 goto end_msg_sessiond;
1411 }
1412 case LTTNG_CONSUMER_PUSH_METADATA:
1413 {
1414 int ret;
1415 uint64_t len = msg.u.push_metadata.len;
1416 uint64_t key = msg.u.push_metadata.key;
1417 uint64_t offset = msg.u.push_metadata.target_offset;
1418 struct lttng_consumer_channel *channel;
1419
1420 DBG("UST consumer push metadata key %" PRIu64 " of len %" PRIu64, key,
1421 len);
1422
1423 channel = consumer_find_channel(key);
1424 if (!channel) {
1425 /*
1426 * This is possible if the metadata creation on the consumer side
1427 * is in flight vis-a-vis a concurrent push metadata from the
1428 * session daemon. Simply return that the channel failed and the
1429 * session daemon will handle that message correctly considering
1430 * that this race is acceptable thus the DBG() statement here.
1431 */
1432 DBG("UST consumer push metadata %" PRIu64 " not found", key);
1433 ret_code = LTTCOMM_CONSUMERD_CHANNEL_FAIL;
1434 goto end_msg_sessiond;
1435 }
1436
1437 health_code_update();
1438
1439 /* Tell session daemon we are ready to receive the metadata. */
1440 ret = consumer_send_status_msg(sock, LTTCOMM_CONSUMERD_SUCCESS);
1441 if (ret < 0) {
1442 /* Somehow, the session daemon is not responding anymore. */
1443 goto error_fatal;
1444 }
1445
1446 health_code_update();
1447
1448 /* Wait for more data. */
1449 health_poll_entry();
1450 ret = lttng_consumer_poll_socket(consumer_sockpoll);
1451 health_poll_exit();
1452 if (ret) {
1453 goto error_fatal;
1454 }
1455
1456 health_code_update();
1457
1458 ret = lttng_ustconsumer_recv_metadata(sock, key, offset,
1459 len, channel, 0, 1);
1460 if (ret < 0) {
1461 /* error receiving from sessiond */
1462 goto error_fatal;
1463 } else {
1464 ret_code = ret;
1465 goto end_msg_sessiond;
1466 }
1467 }
1468 case LTTNG_CONSUMER_SETUP_METADATA:
1469 {
1470 int ret;
1471
1472 ret = setup_metadata(ctx, msg.u.setup_metadata.key);
1473 if (ret) {
1474 ret_code = ret;
1475 }
1476 goto end_msg_sessiond;
1477 }
1478 case LTTNG_CONSUMER_SNAPSHOT_CHANNEL:
1479 {
1480 if (msg.u.snapshot_channel.metadata) {
1481 ret = snapshot_metadata(msg.u.snapshot_channel.key,
1482 msg.u.snapshot_channel.pathname,
1483 msg.u.snapshot_channel.relayd_id,
1484 ctx);
1485 if (ret < 0) {
1486 ERR("Snapshot metadata failed");
1487 ret_code = LTTCOMM_CONSUMERD_ERROR_METADATA;
1488 }
1489 } else {
1490 ret = snapshot_channel(msg.u.snapshot_channel.key,
1491 msg.u.snapshot_channel.pathname,
1492 msg.u.snapshot_channel.relayd_id,
1493 msg.u.snapshot_channel.max_stream_size,
1494 ctx);
1495 if (ret < 0) {
1496 ERR("Snapshot channel failed");
1497 ret_code = LTTCOMM_CONSUMERD_CHANNEL_FAIL;
1498 }
1499 }
1500
1501 health_code_update();
1502 ret = consumer_send_status_msg(sock, ret_code);
1503 if (ret < 0) {
1504 /* Somehow, the session daemon is not responding anymore. */
1505 goto end_nosignal;
1506 }
1507 health_code_update();
1508 break;
1509 }
1510 default:
1511 break;
1512 }
1513
1514 end_nosignal:
1515 rcu_read_unlock();
1516
1517 health_code_update();
1518
1519 /*
1520 * Return 1 to indicate success since the 0 value can be a socket
1521 * shutdown during the recv() or send() call.
1522 */
1523 return 1;
1524
1525 end_msg_sessiond:
1526 /*
1527 * The returned value here is not useful since either way we'll return 1 to
1528 * the caller because the session daemon socket management is done
1529 * elsewhere. Returning a negative code or 0 will shutdown the consumer.
1530 */
1531 ret = consumer_send_status_msg(sock, ret_code);
1532 if (ret < 0) {
1533 goto error_fatal;
1534 }
1535 rcu_read_unlock();
1536
1537 health_code_update();
1538
1539 return 1;
1540 end_channel_error:
1541 if (channel) {
1542 /*
1543 * Free channel here since no one has a reference to it. We don't
1544 * free after that because a stream can store this pointer.
1545 */
1546 destroy_channel(channel);
1547 }
1548 /* We have to send a status channel message indicating an error. */
1549 ret = consumer_send_status_channel(sock, NULL);
1550 if (ret < 0) {
1551 /* Stop everything if session daemon can not be notified. */
1552 goto error_fatal;
1553 }
1554 rcu_read_unlock();
1555
1556 health_code_update();
1557
1558 return 1;
1559 error_fatal:
1560 rcu_read_unlock();
1561 /* This will issue a consumer stop. */
1562 return -1;
1563 }
1564
1565 /*
1566 * Wrapper over the mmap() read offset from ust-ctl library. Since this can be
1567 * compiled out, we isolate it in this library.
1568 */
1569 int lttng_ustctl_get_mmap_read_offset(struct lttng_consumer_stream *stream,
1570 unsigned long *off)
1571 {
1572 assert(stream);
1573 assert(stream->ustream);
1574
1575 return ustctl_get_mmap_read_offset(stream->ustream, off);
1576 }
1577
1578 /*
1579 * Wrapper over the mmap() read offset from ust-ctl library. Since this can be
1580 * compiled out, we isolate it in this library.
1581 */
1582 void *lttng_ustctl_get_mmap_base(struct lttng_consumer_stream *stream)
1583 {
1584 assert(stream);
1585 assert(stream->ustream);
1586
1587 return ustctl_get_mmap_base(stream->ustream);
1588 }
1589
1590 /*
1591 * Take a snapshot for a specific fd
1592 *
1593 * Returns 0 on success, < 0 on error
1594 */
1595 int lttng_ustconsumer_take_snapshot(struct lttng_consumer_stream *stream)
1596 {
1597 assert(stream);
1598 assert(stream->ustream);
1599
1600 return ustctl_snapshot(stream->ustream);
1601 }
1602
1603 /*
1604 * Get the produced position
1605 *
1606 * Returns 0 on success, < 0 on error
1607 */
1608 int lttng_ustconsumer_get_produced_snapshot(
1609 struct lttng_consumer_stream *stream, unsigned long *pos)
1610 {
1611 assert(stream);
1612 assert(stream->ustream);
1613 assert(pos);
1614
1615 return ustctl_snapshot_get_produced(stream->ustream, pos);
1616 }
1617
1618 /*
1619 * Get the consumed position
1620 *
1621 * Returns 0 on success, < 0 on error
1622 */
1623 int lttng_ustconsumer_get_consumed_snapshot(
1624 struct lttng_consumer_stream *stream, unsigned long *pos)
1625 {
1626 assert(stream);
1627 assert(stream->ustream);
1628 assert(pos);
1629
1630 return ustctl_snapshot_get_consumed(stream->ustream, pos);
1631 }
1632
1633 void lttng_ustconsumer_flush_buffer(struct lttng_consumer_stream *stream,
1634 int producer)
1635 {
1636 assert(stream);
1637 assert(stream->ustream);
1638
1639 ustctl_flush_buffer(stream->ustream, producer);
1640 }
1641
1642 int lttng_ustconsumer_get_current_timestamp(
1643 struct lttng_consumer_stream *stream, uint64_t *ts)
1644 {
1645 assert(stream);
1646 assert(stream->ustream);
1647 assert(ts);
1648
1649 return ustctl_get_current_timestamp(stream->ustream, ts);
1650 }
1651
1652 /*
1653 * Called when the stream signal the consumer that it has hang up.
1654 */
1655 void lttng_ustconsumer_on_stream_hangup(struct lttng_consumer_stream *stream)
1656 {
1657 assert(stream);
1658 assert(stream->ustream);
1659
1660 ustctl_flush_buffer(stream->ustream, 0);
1661 stream->hangup_flush_done = 1;
1662 }
1663
1664 void lttng_ustconsumer_del_channel(struct lttng_consumer_channel *chan)
1665 {
1666 assert(chan);
1667 assert(chan->uchan);
1668
1669 if (chan->switch_timer_enabled == 1) {
1670 consumer_timer_switch_stop(chan);
1671 }
1672 consumer_metadata_cache_destroy(chan);
1673 ustctl_destroy_channel(chan->uchan);
1674 }
1675
1676 void lttng_ustconsumer_del_stream(struct lttng_consumer_stream *stream)
1677 {
1678 assert(stream);
1679 assert(stream->ustream);
1680
1681 if (stream->chan->switch_timer_enabled == 1) {
1682 consumer_timer_switch_stop(stream->chan);
1683 }
1684 ustctl_destroy_stream(stream->ustream);
1685 }
1686
1687 int lttng_ustconsumer_get_wakeup_fd(struct lttng_consumer_stream *stream)
1688 {
1689 assert(stream);
1690 assert(stream->ustream);
1691
1692 return ustctl_stream_get_wakeup_fd(stream->ustream);
1693 }
1694
1695 int lttng_ustconsumer_close_wakeup_fd(struct lttng_consumer_stream *stream)
1696 {
1697 assert(stream);
1698 assert(stream->ustream);
1699
1700 return ustctl_stream_close_wakeup_fd(stream->ustream);
1701 }
1702
1703 /*
1704 * Populate index values of a UST stream. Values are set in big endian order.
1705 *
1706 * Return 0 on success or else a negative value.
1707 */
1708 static int get_index_values(struct ctf_packet_index *index,
1709 struct ustctl_consumer_stream *ustream)
1710 {
1711 int ret;
1712
1713 ret = ustctl_get_timestamp_begin(ustream, &index->timestamp_begin);
1714 if (ret < 0) {
1715 PERROR("ustctl_get_timestamp_begin");
1716 goto error;
1717 }
1718 index->timestamp_begin = htobe64(index->timestamp_begin);
1719
1720 ret = ustctl_get_timestamp_end(ustream, &index->timestamp_end);
1721 if (ret < 0) {
1722 PERROR("ustctl_get_timestamp_end");
1723 goto error;
1724 }
1725 index->timestamp_end = htobe64(index->timestamp_end);
1726
1727 ret = ustctl_get_events_discarded(ustream, &index->events_discarded);
1728 if (ret < 0) {
1729 PERROR("ustctl_get_events_discarded");
1730 goto error;
1731 }
1732 index->events_discarded = htobe64(index->events_discarded);
1733
1734 ret = ustctl_get_content_size(ustream, &index->content_size);
1735 if (ret < 0) {
1736 PERROR("ustctl_get_content_size");
1737 goto error;
1738 }
1739 index->content_size = htobe64(index->content_size);
1740
1741 ret = ustctl_get_packet_size(ustream, &index->packet_size);
1742 if (ret < 0) {
1743 PERROR("ustctl_get_packet_size");
1744 goto error;
1745 }
1746 index->packet_size = htobe64(index->packet_size);
1747
1748 ret = ustctl_get_stream_id(ustream, &index->stream_id);
1749 if (ret < 0) {
1750 PERROR("ustctl_get_stream_id");
1751 goto error;
1752 }
1753 index->stream_id = htobe64(index->stream_id);
1754
1755 error:
1756 return ret;
1757 }
1758
1759 /*
1760 * Write up to one packet from the metadata cache to the channel.
1761 *
1762 * Returns the number of bytes pushed in the cache, or a negative value
1763 * on error.
1764 */
1765 static
1766 int commit_one_metadata_packet(struct lttng_consumer_stream *stream)
1767 {
1768 ssize_t write_len;
1769 int ret;
1770
1771 pthread_mutex_lock(&stream->chan->metadata_cache->lock);
1772 if (stream->chan->metadata_cache->contiguous
1773 == stream->ust_metadata_pushed) {
1774 ret = 0;
1775 goto end;
1776 }
1777
1778 write_len = ustctl_write_one_packet_to_channel(stream->chan->uchan,
1779 &stream->chan->metadata_cache->data[stream->ust_metadata_pushed],
1780 stream->chan->metadata_cache->contiguous
1781 - stream->ust_metadata_pushed);
1782 assert(write_len != 0);
1783 if (write_len < 0) {
1784 ERR("Writing one metadata packet");
1785 ret = -1;
1786 goto end;
1787 }
1788 stream->ust_metadata_pushed += write_len;
1789
1790 assert(stream->chan->metadata_cache->contiguous >=
1791 stream->ust_metadata_pushed);
1792 ret = write_len;
1793
1794 end:
1795 pthread_mutex_unlock(&stream->chan->metadata_cache->lock);
1796 return ret;
1797 }
1798
1799
1800 /*
1801 * Sync metadata meaning request them to the session daemon and snapshot to the
1802 * metadata thread can consumer them.
1803 *
1804 * Metadata stream lock MUST be acquired.
1805 *
1806 * Return 0 if new metadatda is available, EAGAIN if the metadata stream
1807 * is empty or a negative value on error.
1808 */
1809 int lttng_ustconsumer_sync_metadata(struct lttng_consumer_local_data *ctx,
1810 struct lttng_consumer_stream *metadata)
1811 {
1812 int ret;
1813 int retry = 0;
1814
1815 assert(ctx);
1816 assert(metadata);
1817
1818 /*
1819 * Request metadata from the sessiond, but don't wait for the flush
1820 * because we locked the metadata thread.
1821 */
1822 ret = lttng_ustconsumer_request_metadata(ctx, metadata->chan, 0, 0);
1823 if (ret < 0) {
1824 goto end;
1825 }
1826
1827 ret = commit_one_metadata_packet(metadata);
1828 if (ret <= 0) {
1829 goto end;
1830 } else if (ret > 0) {
1831 retry = 1;
1832 }
1833
1834 ustctl_flush_buffer(metadata->ustream, 1);
1835 ret = ustctl_snapshot(metadata->ustream);
1836 if (ret < 0) {
1837 if (errno != EAGAIN) {
1838 ERR("Sync metadata, taking UST snapshot");
1839 goto end;
1840 }
1841 DBG("No new metadata when syncing them.");
1842 /* No new metadata, exit. */
1843 ret = ENODATA;
1844 goto end;
1845 }
1846
1847 /*
1848 * After this flush, we still need to extract metadata.
1849 */
1850 if (retry) {
1851 ret = EAGAIN;
1852 }
1853
1854 end:
1855 return ret;
1856 }
1857
1858 /*
1859 * Return 0 on success else a negative value.
1860 */
1861 static int notify_if_more_data(struct lttng_consumer_stream *stream,
1862 struct lttng_consumer_local_data *ctx)
1863 {
1864 int ret;
1865 struct ustctl_consumer_stream *ustream;
1866
1867 assert(stream);
1868 assert(ctx);
1869
1870 ustream = stream->ustream;
1871
1872 /*
1873 * First, we are going to check if there is a new subbuffer available
1874 * before reading the stream wait_fd.
1875 */
1876 /* Get the next subbuffer */
1877 ret = ustctl_get_next_subbuf(ustream);
1878 if (ret) {
1879 /* No more data found, flag the stream. */
1880 stream->has_data = 0;
1881 ret = 0;
1882 goto end;
1883 }
1884
1885 ret = ustctl_put_next_subbuf(ustream);
1886 assert(!ret);
1887
1888 /* This stream still has data. Flag it and wake up the data thread. */
1889 stream->has_data = 1;
1890
1891 if (stream->monitor && !stream->hangup_flush_done && !ctx->has_wakeup) {
1892 ssize_t writelen;
1893
1894 writelen = lttng_pipe_write(ctx->consumer_wakeup_pipe, "!", 1);
1895 if (writelen < 0 && errno != EAGAIN && errno != EWOULDBLOCK) {
1896 ret = writelen;
1897 goto end;
1898 }
1899
1900 /* The wake up pipe has been notified. */
1901 ctx->has_wakeup = 1;
1902 }
1903 ret = 0;
1904
1905 end:
1906 return ret;
1907 }
1908
1909 /*
1910 * Read subbuffer from the given stream.
1911 *
1912 * Stream lock MUST be acquired.
1913 *
1914 * Return 0 on success else a negative value.
1915 */
1916 int lttng_ustconsumer_read_subbuffer(struct lttng_consumer_stream *stream,
1917 struct lttng_consumer_local_data *ctx)
1918 {
1919 unsigned long len, subbuf_size, padding;
1920 int err, write_index = 1;
1921 long ret = 0;
1922 struct ustctl_consumer_stream *ustream;
1923 struct ctf_packet_index index;
1924
1925 assert(stream);
1926 assert(stream->ustream);
1927 assert(ctx);
1928
1929 DBG("In UST read_subbuffer (wait_fd: %d, name: %s)", stream->wait_fd,
1930 stream->name);
1931
1932 /* Ease our life for what's next. */
1933 ustream = stream->ustream;
1934
1935 /*
1936 * We can consume the 1 byte written into the wait_fd by UST. Don't trigger
1937 * error if we cannot read this one byte (read returns 0), or if the error
1938 * is EAGAIN or EWOULDBLOCK.
1939 *
1940 * This is only done when the stream is monitored by a thread, before the
1941 * flush is done after a hangup and if the stream is not flagged with data
1942 * since there might be nothing to consume in the wait fd but still have
1943 * data available flagged by the consumer wake up pipe.
1944 */
1945 if (stream->monitor && !stream->hangup_flush_done && !stream->has_data) {
1946 char dummy;
1947 ssize_t readlen;
1948
1949 readlen = lttng_read(stream->wait_fd, &dummy, 1);
1950 if (readlen < 0 && errno != EAGAIN && errno != EWOULDBLOCK) {
1951 ret = readlen;
1952 goto end;
1953 }
1954 }
1955
1956 retry:
1957 /* Get the next subbuffer */
1958 err = ustctl_get_next_subbuf(ustream);
1959 if (err != 0) {
1960 /*
1961 * Populate metadata info if the existing info has
1962 * already been read.
1963 */
1964 if (stream->metadata_flag) {
1965 ret = commit_one_metadata_packet(stream);
1966 if (ret <= 0) {
1967 goto end;
1968 }
1969 ustctl_flush_buffer(stream->ustream, 1);
1970 goto retry;
1971 }
1972
1973 ret = err; /* ustctl_get_next_subbuf returns negative, caller expect positive. */
1974 /*
1975 * This is a debug message even for single-threaded consumer,
1976 * because poll() have more relaxed criterions than get subbuf,
1977 * so get_subbuf may fail for short race windows where poll()
1978 * would issue wakeups.
1979 */
1980 DBG("Reserving sub buffer failed (everything is normal, "
1981 "it is due to concurrency) [ret: %d]", err);
1982 goto end;
1983 }
1984 assert(stream->chan->output == CONSUMER_CHANNEL_MMAP);
1985
1986 if (!stream->metadata_flag) {
1987 index.offset = htobe64(stream->out_fd_offset);
1988 ret = get_index_values(&index, ustream);
1989 if (ret < 0) {
1990 goto end;
1991 }
1992 } else {
1993 write_index = 0;
1994 }
1995
1996 /* Get the full padded subbuffer size */
1997 err = ustctl_get_padded_subbuf_size(ustream, &len);
1998 assert(err == 0);
1999
2000 /* Get subbuffer data size (without padding) */
2001 err = ustctl_get_subbuf_size(ustream, &subbuf_size);
2002 assert(err == 0);
2003
2004 /* Make sure we don't get a subbuffer size bigger than the padded */
2005 assert(len >= subbuf_size);
2006
2007 padding = len - subbuf_size;
2008 /* write the subbuffer to the tracefile */
2009 ret = lttng_consumer_on_read_subbuffer_mmap(ctx, stream, subbuf_size, padding, &index);
2010 /*
2011 * The mmap operation should write subbuf_size amount of data when network
2012 * streaming or the full padding (len) size when we are _not_ streaming.
2013 */
2014 if ((ret != subbuf_size && stream->net_seq_idx != (uint64_t) -1ULL) ||
2015 (ret != len && stream->net_seq_idx == (uint64_t) -1ULL)) {
2016 /*
2017 * Display the error but continue processing to try to release the
2018 * subbuffer. This is a DBG statement since any unexpected kill or
2019 * signal, the application gets unregistered, relayd gets closed or
2020 * anything that affects the buffer lifetime will trigger this error.
2021 * So, for the sake of the user, don't print this error since it can
2022 * happen and it is OK with the code flow.
2023 */
2024 DBG("Error writing to tracefile "
2025 "(ret: %ld != len: %lu != subbuf_size: %lu)",
2026 ret, len, subbuf_size);
2027 write_index = 0;
2028 }
2029 err = ustctl_put_next_subbuf(ustream);
2030 assert(err == 0);
2031
2032 /*
2033 * This will consumer the byte on the wait_fd if and only if there is not
2034 * next subbuffer to be acquired.
2035 */
2036 if (!stream->metadata_flag) {
2037 ret = notify_if_more_data(stream, ctx);
2038 if (ret < 0) {
2039 goto end;
2040 }
2041 }
2042
2043 /* Write index if needed. */
2044 if (!write_index) {
2045 goto end;
2046 }
2047
2048 if (stream->chan->live_timer_interval && !stream->metadata_flag) {
2049 /*
2050 * In live, block until all the metadata is sent.
2051 */
2052 err = consumer_stream_sync_metadata(ctx, stream->session_id);
2053 if (err < 0) {
2054 goto end;
2055 }
2056 }
2057
2058 assert(!stream->metadata_flag);
2059 err = consumer_stream_write_index(stream, &index);
2060 if (err < 0) {
2061 goto end;
2062 }
2063
2064 end:
2065 return ret;
2066 }
2067
2068 /*
2069 * Called when a stream is created.
2070 *
2071 * Return 0 on success or else a negative value.
2072 */
2073 int lttng_ustconsumer_on_recv_stream(struct lttng_consumer_stream *stream)
2074 {
2075 int ret;
2076
2077 assert(stream);
2078
2079 /* Don't create anything if this is set for streaming. */
2080 if (stream->net_seq_idx == (uint64_t) -1ULL && stream->chan->monitor) {
2081 ret = utils_create_stream_file(stream->chan->pathname, stream->name,
2082 stream->chan->tracefile_size, stream->tracefile_count_current,
2083 stream->uid, stream->gid, NULL);
2084 if (ret < 0) {
2085 goto error;
2086 }
2087 stream->out_fd = ret;
2088 stream->tracefile_size_current = 0;
2089
2090 if (!stream->metadata_flag) {
2091 ret = index_create_file(stream->chan->pathname,
2092 stream->name, stream->uid, stream->gid,
2093 stream->chan->tracefile_size,
2094 stream->tracefile_count_current);
2095 if (ret < 0) {
2096 goto error;
2097 }
2098 stream->index_fd = ret;
2099 }
2100 }
2101 ret = 0;
2102
2103 error:
2104 return ret;
2105 }
2106
2107 /*
2108 * Check if data is still being extracted from the buffers for a specific
2109 * stream. Consumer data lock MUST be acquired before calling this function
2110 * and the stream lock.
2111 *
2112 * Return 1 if the traced data are still getting read else 0 meaning that the
2113 * data is available for trace viewer reading.
2114 */
2115 int lttng_ustconsumer_data_pending(struct lttng_consumer_stream *stream)
2116 {
2117 int ret;
2118
2119 assert(stream);
2120 assert(stream->ustream);
2121
2122 DBG("UST consumer checking data pending");
2123
2124 if (stream->endpoint_status != CONSUMER_ENDPOINT_ACTIVE) {
2125 ret = 0;
2126 goto end;
2127 }
2128
2129 if (stream->chan->type == CONSUMER_CHANNEL_TYPE_METADATA) {
2130 uint64_t contiguous, pushed;
2131
2132 /* Ease our life a bit. */
2133 contiguous = stream->chan->metadata_cache->contiguous;
2134 pushed = stream->ust_metadata_pushed;
2135
2136 /*
2137 * We can simply check whether all contiguously available data
2138 * has been pushed to the ring buffer, since the push operation
2139 * is performed within get_next_subbuf(), and because both
2140 * get_next_subbuf() and put_next_subbuf() are issued atomically
2141 * thanks to the stream lock within
2142 * lttng_ustconsumer_read_subbuffer(). This basically means that
2143 * whetnever ust_metadata_pushed is incremented, the associated
2144 * metadata has been consumed from the metadata stream.
2145 */
2146 DBG("UST consumer metadata pending check: contiguous %" PRIu64 " vs pushed %" PRIu64,
2147 contiguous, pushed);
2148 assert(((int64_t) (contiguous - pushed)) >= 0);
2149 if ((contiguous != pushed) ||
2150 (((int64_t) contiguous - pushed) > 0 || contiguous == 0)) {
2151 ret = 1; /* Data is pending */
2152 goto end;
2153 }
2154 } else {
2155 ret = ustctl_get_next_subbuf(stream->ustream);
2156 if (ret == 0) {
2157 /*
2158 * There is still data so let's put back this
2159 * subbuffer.
2160 */
2161 ret = ustctl_put_subbuf(stream->ustream);
2162 assert(ret == 0);
2163 ret = 1; /* Data is pending */
2164 goto end;
2165 }
2166 }
2167
2168 /* Data is NOT pending so ready to be read. */
2169 ret = 0;
2170
2171 end:
2172 return ret;
2173 }
2174
2175 /*
2176 * Stop a given metadata channel timer if enabled and close the wait fd which
2177 * is the poll pipe of the metadata stream.
2178 *
2179 * This MUST be called with the metadata channel acquired.
2180 */
2181 void lttng_ustconsumer_close_metadata(struct lttng_consumer_channel *metadata)
2182 {
2183 int ret;
2184
2185 assert(metadata);
2186 assert(metadata->type == CONSUMER_CHANNEL_TYPE_METADATA);
2187
2188 DBG("Closing metadata channel key %" PRIu64, metadata->key);
2189
2190 if (metadata->switch_timer_enabled == 1) {
2191 consumer_timer_switch_stop(metadata);
2192 }
2193
2194 if (!metadata->metadata_stream) {
2195 goto end;
2196 }
2197
2198 /*
2199 * Closing write side so the thread monitoring the stream wakes up if any
2200 * and clean the metadata stream.
2201 */
2202 if (metadata->metadata_stream->ust_metadata_poll_pipe[1] >= 0) {
2203 ret = close(metadata->metadata_stream->ust_metadata_poll_pipe[1]);
2204 if (ret < 0) {
2205 PERROR("closing metadata pipe write side");
2206 }
2207 metadata->metadata_stream->ust_metadata_poll_pipe[1] = -1;
2208 }
2209
2210 end:
2211 return;
2212 }
2213
2214 /*
2215 * Close every metadata stream wait fd of the metadata hash table. This
2216 * function MUST be used very carefully so not to run into a race between the
2217 * metadata thread handling streams and this function closing their wait fd.
2218 *
2219 * For UST, this is used when the session daemon hangs up. Its the metadata
2220 * producer so calling this is safe because we are assured that no state change
2221 * can occur in the metadata thread for the streams in the hash table.
2222 */
2223 void lttng_ustconsumer_close_all_metadata(struct lttng_ht *metadata_ht)
2224 {
2225 struct lttng_ht_iter iter;
2226 struct lttng_consumer_stream *stream;
2227
2228 assert(metadata_ht);
2229 assert(metadata_ht->ht);
2230
2231 DBG("UST consumer closing all metadata streams");
2232
2233 rcu_read_lock();
2234 cds_lfht_for_each_entry(metadata_ht->ht, &iter.iter, stream,
2235 node.node) {
2236
2237 health_code_update();
2238
2239 pthread_mutex_lock(&stream->chan->lock);
2240 lttng_ustconsumer_close_metadata(stream->chan);
2241 pthread_mutex_unlock(&stream->chan->lock);
2242
2243 }
2244 rcu_read_unlock();
2245 }
2246
2247 void lttng_ustconsumer_close_stream_wakeup(struct lttng_consumer_stream *stream)
2248 {
2249 int ret;
2250
2251 ret = ustctl_stream_close_wakeup_fd(stream->ustream);
2252 if (ret < 0) {
2253 ERR("Unable to close wakeup fd");
2254 }
2255 }
2256
2257 /*
2258 * Please refer to consumer-timer.c before adding any lock within this
2259 * function or any of its callees. Timers have a very strict locking
2260 * semantic with respect to teardown. Failure to respect this semantic
2261 * introduces deadlocks.
2262 */
2263 int lttng_ustconsumer_request_metadata(struct lttng_consumer_local_data *ctx,
2264 struct lttng_consumer_channel *channel, int timer, int wait)
2265 {
2266 struct lttcomm_metadata_request_msg request;
2267 struct lttcomm_consumer_msg msg;
2268 enum lttcomm_return_code ret_code = LTTCOMM_CONSUMERD_SUCCESS;
2269 uint64_t len, key, offset;
2270 int ret;
2271
2272 assert(channel);
2273 assert(channel->metadata_cache);
2274
2275 memset(&request, 0, sizeof(request));
2276
2277 /* send the metadata request to sessiond */
2278 switch (consumer_data.type) {
2279 case LTTNG_CONSUMER64_UST:
2280 request.bits_per_long = 64;
2281 break;
2282 case LTTNG_CONSUMER32_UST:
2283 request.bits_per_long = 32;
2284 break;
2285 default:
2286 request.bits_per_long = 0;
2287 break;
2288 }
2289
2290 request.session_id = channel->session_id;
2291 request.session_id_per_pid = channel->session_id_per_pid;
2292 /*
2293 * Request the application UID here so the metadata of that application can
2294 * be sent back. The channel UID corresponds to the user UID of the session
2295 * used for the rights on the stream file(s).
2296 */
2297 request.uid = channel->ust_app_uid;
2298 request.key = channel->key;
2299
2300 DBG("Sending metadata request to sessiond, session id %" PRIu64
2301 ", per-pid %" PRIu64 ", app UID %u and channek key %" PRIu64,
2302 request.session_id, request.session_id_per_pid, request.uid,
2303 request.key);
2304
2305 pthread_mutex_lock(&ctx->metadata_socket_lock);
2306
2307 health_code_update();
2308
2309 ret = lttcomm_send_unix_sock(ctx->consumer_metadata_socket, &request,
2310 sizeof(request));
2311 if (ret < 0) {
2312 ERR("Asking metadata to sessiond");
2313 goto end;
2314 }
2315
2316 health_code_update();
2317
2318 /* Receive the metadata from sessiond */
2319 ret = lttcomm_recv_unix_sock(ctx->consumer_metadata_socket, &msg,
2320 sizeof(msg));
2321 if (ret != sizeof(msg)) {
2322 DBG("Consumer received unexpected message size %d (expects %zu)",
2323 ret, sizeof(msg));
2324 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_CMD);
2325 /*
2326 * The ret value might 0 meaning an orderly shutdown but this is ok
2327 * since the caller handles this.
2328 */
2329 goto end;
2330 }
2331
2332 health_code_update();
2333
2334 if (msg.cmd_type == LTTNG_ERR_UND) {
2335 /* No registry found */
2336 (void) consumer_send_status_msg(ctx->consumer_metadata_socket,
2337 ret_code);
2338 ret = 0;
2339 goto end;
2340 } else if (msg.cmd_type != LTTNG_CONSUMER_PUSH_METADATA) {
2341 ERR("Unexpected cmd_type received %d", msg.cmd_type);
2342 ret = -1;
2343 goto end;
2344 }
2345
2346 len = msg.u.push_metadata.len;
2347 key = msg.u.push_metadata.key;
2348 offset = msg.u.push_metadata.target_offset;
2349
2350 assert(key == channel->key);
2351 if (len == 0) {
2352 DBG("No new metadata to receive for key %" PRIu64, key);
2353 }
2354
2355 health_code_update();
2356
2357 /* Tell session daemon we are ready to receive the metadata. */
2358 ret = consumer_send_status_msg(ctx->consumer_metadata_socket,
2359 LTTCOMM_CONSUMERD_SUCCESS);
2360 if (ret < 0 || len == 0) {
2361 /*
2362 * Somehow, the session daemon is not responding anymore or there is
2363 * nothing to receive.
2364 */
2365 goto end;
2366 }
2367
2368 health_code_update();
2369
2370 ret = lttng_ustconsumer_recv_metadata(ctx->consumer_metadata_socket,
2371 key, offset, len, channel, timer, wait);
2372 if (ret >= 0) {
2373 /*
2374 * Only send the status msg if the sessiond is alive meaning a positive
2375 * ret code.
2376 */
2377 (void) consumer_send_status_msg(ctx->consumer_metadata_socket, ret);
2378 }
2379 ret = 0;
2380
2381 end:
2382 health_code_update();
2383
2384 pthread_mutex_unlock(&ctx->metadata_socket_lock);
2385 return ret;
2386 }
2387
2388 /*
2389 * Return the ustctl call for the get stream id.
2390 */
2391 int lttng_ustconsumer_get_stream_id(struct lttng_consumer_stream *stream,
2392 uint64_t *stream_id)
2393 {
2394 assert(stream);
2395 assert(stream_id);
2396
2397 return ustctl_get_stream_id(stream->ustream, stream_id);
2398 }
This page took 0.132333 seconds and 4 git commands to generate.