1e3bfe7853f905addf8cee2efbefb3ccd6f9446f
[lttng-tools.git] / src / common / consumer / consumer-stream.c
1 /*
2 * Copyright (C) 2011 Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * Copyright (C) 2013 David Goulet <dgoulet@efficios.com>
5 *
6 * SPDX-License-Identifier: GPL-2.0-only
7 *
8 */
9
10 #define _LGPL_SOURCE
11 #include <assert.h>
12 #include <inttypes.h>
13 #include <sys/mman.h>
14 #include <unistd.h>
15
16 #include <common/common.h>
17 #include <common/index/index.h>
18 #include <common/kernel-consumer/kernel-consumer.h>
19 #include <common/relayd/relayd.h>
20 #include <common/ust-consumer/ust-consumer.h>
21 #include <common/utils.h>
22 #include <common/consumer/consumer.h>
23 #include <common/consumer/consumer-timer.h>
24 #include <common/consumer/metadata-bucket.h>
25 #include <common/kernel-ctl/kernel-ctl.h>
26
27 #include "consumer-stream.h"
28
29 /*
30 * RCU call to free stream. MUST only be used with call_rcu().
31 */
32 static void free_stream_rcu(struct rcu_head *head)
33 {
34 struct lttng_ht_node_u64 *node =
35 caa_container_of(head, struct lttng_ht_node_u64, head);
36 struct lttng_consumer_stream *stream =
37 caa_container_of(node, struct lttng_consumer_stream, node);
38
39 pthread_mutex_destroy(&stream->lock);
40 free(stream);
41 }
42
43 static void consumer_stream_data_lock_all(struct lttng_consumer_stream *stream)
44 {
45 pthread_mutex_lock(&stream->chan->lock);
46 pthread_mutex_lock(&stream->lock);
47 }
48
49 static void consumer_stream_data_unlock_all(struct lttng_consumer_stream *stream)
50 {
51 pthread_mutex_unlock(&stream->lock);
52 pthread_mutex_unlock(&stream->chan->lock);
53 }
54
55 static void consumer_stream_metadata_lock_all(struct lttng_consumer_stream *stream)
56 {
57 consumer_stream_data_lock_all(stream);
58 pthread_mutex_lock(&stream->metadata_rdv_lock);
59 }
60
61 static void consumer_stream_metadata_unlock_all(struct lttng_consumer_stream *stream)
62 {
63 pthread_mutex_unlock(&stream->metadata_rdv_lock);
64 consumer_stream_data_unlock_all(stream);
65 }
66
67 /* Only used for data streams. */
68 static int consumer_stream_update_stats(struct lttng_consumer_stream *stream,
69 const struct stream_subbuffer *subbuf)
70 {
71 int ret = 0;
72 uint64_t sequence_number;
73 const uint64_t discarded_events = subbuf->info.data.events_discarded;
74
75 if (!subbuf->info.data.sequence_number.is_set) {
76 /* Command not supported by the tracer. */
77 sequence_number = -1ULL;
78 stream->sequence_number_unavailable = true;
79 } else {
80 sequence_number = subbuf->info.data.sequence_number.value;
81 }
82
83 /*
84 * Start the sequence when we extract the first packet in case we don't
85 * start at 0 (for example if a consumer is not connected to the
86 * session immediately after the beginning).
87 */
88 if (stream->last_sequence_number == -1ULL) {
89 stream->last_sequence_number = sequence_number;
90 } else if (sequence_number > stream->last_sequence_number) {
91 stream->chan->lost_packets += sequence_number -
92 stream->last_sequence_number - 1;
93 } else {
94 /* seq <= last_sequence_number */
95 ERR("Sequence number inconsistent : prev = %" PRIu64
96 ", current = %" PRIu64,
97 stream->last_sequence_number, sequence_number);
98 ret = -1;
99 goto end;
100 }
101 stream->last_sequence_number = sequence_number;
102
103 if (discarded_events < stream->last_discarded_events) {
104 /*
105 * Overflow has occurred. We assume only one wrap-around
106 * has occurred.
107 */
108 stream->chan->discarded_events +=
109 (1ULL << (CAA_BITS_PER_LONG - 1)) -
110 stream->last_discarded_events +
111 discarded_events;
112 } else {
113 stream->chan->discarded_events += discarded_events -
114 stream->last_discarded_events;
115 }
116 stream->last_discarded_events = discarded_events;
117 ret = 0;
118
119 end:
120 return ret;
121 }
122
123 static
124 void ctf_packet_index_populate(struct ctf_packet_index *index,
125 off_t offset, const struct stream_subbuffer *subbuffer)
126 {
127 *index = (typeof(*index)){
128 .offset = htobe64(offset),
129 .packet_size = htobe64(subbuffer->info.data.packet_size),
130 .content_size = htobe64(subbuffer->info.data.content_size),
131 .timestamp_begin = htobe64(
132 subbuffer->info.data.timestamp_begin),
133 .timestamp_end = htobe64(
134 subbuffer->info.data.timestamp_end),
135 .events_discarded = htobe64(
136 subbuffer->info.data.events_discarded),
137 .stream_id = htobe64(subbuffer->info.data.stream_id),
138 .stream_instance_id = htobe64(
139 subbuffer->info.data.stream_instance_id.is_set ?
140 subbuffer->info.data.stream_instance_id.value : -1ULL),
141 .packet_seq_num = htobe64(
142 subbuffer->info.data.sequence_number.is_set ?
143 subbuffer->info.data.sequence_number.value : -1ULL),
144 };
145 }
146
147 static ssize_t consumer_stream_consume_mmap(
148 struct lttng_consumer_local_data *ctx,
149 struct lttng_consumer_stream *stream,
150 const struct stream_subbuffer *subbuffer)
151 {
152 const unsigned long padding_size =
153 subbuffer->info.data.padded_subbuf_size -
154 subbuffer->info.data.subbuf_size;
155 const ssize_t written_bytes = lttng_consumer_on_read_subbuffer_mmap(
156 stream, &subbuffer->buffer.buffer, padding_size);
157
158 if (stream->net_seq_idx == -1ULL) {
159 /*
160 * When writing on disk, check that only the subbuffer (no
161 * padding) was written to disk.
162 */
163 if (written_bytes != subbuffer->info.data.padded_subbuf_size) {
164 DBG("Failed to write the entire padded subbuffer on disk (written_bytes: %zd, padded subbuffer size %lu)",
165 written_bytes,
166 subbuffer->info.data.padded_subbuf_size);
167 }
168 } else {
169 /*
170 * When streaming over the network, check that the entire
171 * subbuffer including padding was successfully written.
172 */
173 if (written_bytes != subbuffer->info.data.subbuf_size) {
174 DBG("Failed to write only the subbuffer over the network (written_bytes: %zd, subbuffer size %lu)",
175 written_bytes,
176 subbuffer->info.data.subbuf_size);
177 }
178 }
179
180 /*
181 * If `lttng_consumer_on_read_subbuffer_mmap()` returned an error, pass
182 * it along to the caller, else return zero.
183 */
184 if (written_bytes < 0) {
185 ERR("Error reading mmap subbuffer: %zd", written_bytes);
186 }
187
188 return written_bytes;
189 }
190
191 static ssize_t consumer_stream_consume_splice(
192 struct lttng_consumer_local_data *ctx,
193 struct lttng_consumer_stream *stream,
194 const struct stream_subbuffer *subbuffer)
195 {
196 const ssize_t written_bytes = lttng_consumer_on_read_subbuffer_splice(
197 ctx, stream, subbuffer->info.data.padded_subbuf_size, 0);
198
199 if (written_bytes != subbuffer->info.data.padded_subbuf_size) {
200 DBG("Failed to write the entire padded subbuffer (written_bytes: %zd, padded subbuffer size %lu)",
201 written_bytes,
202 subbuffer->info.data.padded_subbuf_size);
203 }
204
205 /*
206 * If `lttng_consumer_on_read_subbuffer_splice()` returned an error,
207 * pass it along to the caller, else return zero.
208 */
209 if (written_bytes < 0) {
210 ERR("Error reading splice subbuffer: %zd", written_bytes);
211 }
212
213 return written_bytes;
214 }
215
216 static int consumer_stream_send_index(
217 struct lttng_consumer_stream *stream,
218 const struct stream_subbuffer *subbuffer,
219 struct lttng_consumer_local_data *ctx)
220 {
221 off_t packet_offset = 0;
222 struct ctf_packet_index index = {};
223
224 /*
225 * This is called after consuming the sub-buffer; substract the
226 * effect this sub-buffer from the offset.
227 */
228 if (stream->net_seq_idx == (uint64_t) -1ULL) {
229 packet_offset = stream->out_fd_offset -
230 subbuffer->info.data.padded_subbuf_size;
231 }
232
233 ctf_packet_index_populate(&index, packet_offset, subbuffer);
234 return consumer_stream_write_index(stream, &index);
235 }
236
237 /*
238 * Actually do the metadata sync using the given metadata stream.
239 *
240 * Return 0 on success else a negative value. ENODATA can be returned also
241 * indicating that there is no metadata available for that stream.
242 */
243 static int do_sync_metadata(struct lttng_consumer_stream *metadata,
244 struct lttng_consumer_local_data *ctx)
245 {
246 int ret;
247 enum sync_metadata_status status;
248
249 assert(metadata);
250 assert(metadata->metadata_flag);
251 assert(ctx);
252
253 /*
254 * In UST, since we have to write the metadata from the cache packet
255 * by packet, we might need to start this procedure multiple times
256 * until all the metadata from the cache has been extracted.
257 */
258 do {
259 /*
260 * Steps :
261 * - Lock the metadata stream
262 * - Check if metadata stream node was deleted before locking.
263 * - if yes, release and return success
264 * - Check if new metadata is ready (flush + snapshot pos)
265 * - If nothing : release and return.
266 * - Lock the metadata_rdv_lock
267 * - Unlock the metadata stream
268 * - cond_wait on metadata_rdv to wait the wakeup from the
269 * metadata thread
270 * - Unlock the metadata_rdv_lock
271 */
272 pthread_mutex_lock(&metadata->lock);
273
274 /*
275 * There is a possibility that we were able to acquire a reference on the
276 * stream from the RCU hash table but between then and now, the node might
277 * have been deleted just before the lock is acquired. Thus, after locking,
278 * we make sure the metadata node has not been deleted which means that the
279 * buffers are closed.
280 *
281 * In that case, there is no need to sync the metadata hence returning a
282 * success return code.
283 */
284 ret = cds_lfht_is_node_deleted(&metadata->node.node);
285 if (ret) {
286 ret = 0;
287 goto end_unlock_mutex;
288 }
289
290 switch (ctx->type) {
291 case LTTNG_CONSUMER_KERNEL:
292 /*
293 * Empty the metadata cache and flush the current stream.
294 */
295 status = lttng_kconsumer_sync_metadata(metadata);
296 break;
297 case LTTNG_CONSUMER32_UST:
298 case LTTNG_CONSUMER64_UST:
299 /*
300 * Ask the sessiond if we have new metadata waiting and update the
301 * consumer metadata cache.
302 */
303 status = lttng_ustconsumer_sync_metadata(ctx, metadata);
304 break;
305 default:
306 abort();
307 }
308
309 switch (status) {
310 case SYNC_METADATA_STATUS_NEW_DATA:
311 break;
312 case SYNC_METADATA_STATUS_NO_DATA:
313 ret = 0;
314 goto end_unlock_mutex;
315 case SYNC_METADATA_STATUS_ERROR:
316 ret = -1;
317 goto end_unlock_mutex;
318 default:
319 abort();
320 }
321
322 /*
323 * At this point, new metadata have been flushed, so we wait on the
324 * rendez-vous point for the metadata thread to wake us up when it
325 * finishes consuming the metadata and continue execution.
326 */
327
328 pthread_mutex_lock(&metadata->metadata_rdv_lock);
329
330 /*
331 * Release metadata stream lock so the metadata thread can process it.
332 */
333 pthread_mutex_unlock(&metadata->lock);
334
335 /*
336 * Wait on the rendez-vous point. Once woken up, it means the metadata was
337 * consumed and thus synchronization is achieved.
338 */
339 pthread_cond_wait(&metadata->metadata_rdv, &metadata->metadata_rdv_lock);
340 pthread_mutex_unlock(&metadata->metadata_rdv_lock);
341 } while (status == SYNC_METADATA_STATUS_NEW_DATA);
342
343 /* Success */
344 return 0;
345
346 end_unlock_mutex:
347 pthread_mutex_unlock(&metadata->lock);
348 return ret;
349 }
350
351 /*
352 * Synchronize the metadata using a given session ID. A successful acquisition
353 * of a metadata stream will trigger a request to the session daemon and a
354 * snapshot so the metadata thread can consume it.
355 *
356 * This function call is a rendez-vous point between the metadata thread and
357 * the data thread.
358 *
359 * Return 0 on success or else a negative value.
360 */
361 int consumer_stream_sync_metadata(struct lttng_consumer_local_data *ctx,
362 uint64_t session_id)
363 {
364 int ret;
365 struct lttng_consumer_stream *stream = NULL;
366 struct lttng_ht_iter iter;
367 struct lttng_ht *ht;
368
369 assert(ctx);
370
371 /* Ease our life a bit. */
372 ht = consumer_data.stream_list_ht;
373
374 rcu_read_lock();
375
376 /* Search the metadata associated with the session id of the given stream. */
377
378 cds_lfht_for_each_entry_duplicate(ht->ht,
379 ht->hash_fct(&session_id, lttng_ht_seed), ht->match_fct,
380 &session_id, &iter.iter, stream, node_session_id.node) {
381 if (!stream->metadata_flag) {
382 continue;
383 }
384
385 ret = do_sync_metadata(stream, ctx);
386 if (ret < 0) {
387 goto end;
388 }
389 }
390
391 /*
392 * Force return code to 0 (success) since ret might be ENODATA for instance
393 * which is not an error but rather that we should come back.
394 */
395 ret = 0;
396
397 end:
398 rcu_read_unlock();
399 return ret;
400 }
401
402 static int consumer_stream_sync_metadata_index(
403 struct lttng_consumer_stream *stream,
404 const struct stream_subbuffer *subbuffer,
405 struct lttng_consumer_local_data *ctx)
406 {
407 int ret;
408
409 /* Block until all the metadata is sent. */
410 pthread_mutex_lock(&stream->metadata_timer_lock);
411 assert(!stream->missed_metadata_flush);
412 stream->waiting_on_metadata = true;
413 pthread_mutex_unlock(&stream->metadata_timer_lock);
414
415 ret = consumer_stream_sync_metadata(ctx, stream->session_id);
416
417 pthread_mutex_lock(&stream->metadata_timer_lock);
418 stream->waiting_on_metadata = false;
419 if (stream->missed_metadata_flush) {
420 stream->missed_metadata_flush = false;
421 pthread_mutex_unlock(&stream->metadata_timer_lock);
422 (void) stream->read_subbuffer_ops.send_live_beacon(stream);
423 } else {
424 pthread_mutex_unlock(&stream->metadata_timer_lock);
425 }
426 if (ret < 0) {
427 goto end;
428 }
429
430 ret = consumer_stream_send_index(stream, subbuffer, ctx);
431 end:
432 return ret;
433 }
434
435 /*
436 * Check if the local version of the metadata stream matches with the version
437 * of the metadata stream in the kernel. If it was updated, set the reset flag
438 * on the stream.
439 */
440 static
441 int metadata_stream_check_version(struct lttng_consumer_stream *stream,
442 const struct stream_subbuffer *subbuffer)
443 {
444 if (stream->metadata_version == subbuffer->info.metadata.version) {
445 goto end;
446 }
447
448 DBG("New metadata version detected");
449 consumer_stream_metadata_set_version(stream,
450 subbuffer->info.metadata.version);
451
452 if (stream->read_subbuffer_ops.reset_metadata) {
453 stream->read_subbuffer_ops.reset_metadata(stream);
454 }
455
456 end:
457 return 0;
458 }
459
460 static
461 bool stream_is_rotating_to_null_chunk(
462 const struct lttng_consumer_stream *stream)
463 {
464 bool rotating_to_null_chunk = false;
465
466 if (stream->rotate_position == -1ULL) {
467 /* No rotation ongoing. */
468 goto end;
469 }
470
471 if (stream->trace_chunk == stream->chan->trace_chunk ||
472 !stream->chan->trace_chunk) {
473 rotating_to_null_chunk = true;
474 }
475 end:
476 return rotating_to_null_chunk;
477 }
478
479 enum consumer_stream_open_packet_status consumer_stream_open_packet(
480 struct lttng_consumer_stream *stream)
481 {
482 int ret;
483 enum consumer_stream_open_packet_status status;
484 unsigned long produced_pos_before, produced_pos_after;
485
486 ret = lttng_consumer_sample_snapshot_positions(stream);
487 if (ret < 0) {
488 ERR("Failed to snapshot positions before post-rotation empty packet flush: stream id = %" PRIu64
489 ", channel name = %s, session id = %" PRIu64,
490 stream->key, stream->chan->name,
491 stream->chan->session_id);
492 status = CONSUMER_STREAM_OPEN_PACKET_STATUS_ERROR;
493 goto end;
494 }
495
496 ret = lttng_consumer_get_produced_snapshot(
497 stream, &produced_pos_before);
498 if (ret < 0) {
499 ERR("Failed to read produced position before post-rotation empty packet flush: stream id = %" PRIu64
500 ", channel name = %s, session id = %" PRIu64,
501 stream->key, stream->chan->name,
502 stream->chan->session_id);
503 status = CONSUMER_STREAM_OPEN_PACKET_STATUS_ERROR;
504 goto end;
505 }
506
507 ret = consumer_stream_flush_buffer(stream, 0);
508 if (ret) {
509 ERR("Failed to flush an empty packet at rotation point: stream id = %" PRIu64
510 ", channel name = %s, session id = %" PRIu64,
511 stream->key, stream->chan->name,
512 stream->chan->session_id);
513 status = CONSUMER_STREAM_OPEN_PACKET_STATUS_ERROR;
514 goto end;
515 }
516
517 ret = lttng_consumer_sample_snapshot_positions(stream);
518 if (ret < 0) {
519 ERR("Failed to snapshot positions after post-rotation empty packet flush: stream id = %" PRIu64
520 ", channel name = %s, session id = %" PRIu64,
521 stream->key, stream->chan->name,
522 stream->chan->session_id);
523 status = CONSUMER_STREAM_OPEN_PACKET_STATUS_ERROR;
524 goto end;
525 }
526
527 ret = lttng_consumer_get_produced_snapshot(stream, &produced_pos_after);
528 if (ret < 0) {
529 ERR("Failed to read produced position after post-rotation empty packet flush: stream id = %" PRIu64
530 ", channel name = %s, session id = %" PRIu64,
531 stream->key, stream->chan->name,
532 stream->chan->session_id);
533 status = CONSUMER_STREAM_OPEN_PACKET_STATUS_ERROR;
534 goto end;
535 }
536
537 /*
538 * Determine if the flush had an effect by comparing the produced
539 * positons before and after the flush.
540 */
541 status = produced_pos_before != produced_pos_after ?
542 CONSUMER_STREAM_OPEN_PACKET_STATUS_OPENED :
543 CONSUMER_STREAM_OPEN_PACKET_STATUS_NO_SPACE;
544 if (status == CONSUMER_STREAM_OPEN_PACKET_STATUS_OPENED) {
545 stream->opened_packet_in_current_trace_chunk = true;
546 }
547
548 end:
549 return status;
550 }
551
552 /*
553 * An attempt to open a new packet is performed after a rotation completes to
554 * get a begin timestamp as close as possible to the rotation point.
555 *
556 * However, that initial attempt at opening a packet can fail due to a full
557 * ring-buffer. In that case, a second attempt is performed after consuming
558 * a packet since that will have freed enough space in the ring-buffer.
559 */
560 static
561 int post_consume_open_new_packet(struct lttng_consumer_stream *stream,
562 const struct stream_subbuffer *subbuffer,
563 struct lttng_consumer_local_data *ctx)
564 {
565 int ret = 0;
566
567 if (!stream->opened_packet_in_current_trace_chunk &&
568 stream->trace_chunk &&
569 !stream_is_rotating_to_null_chunk(stream)) {
570 const enum consumer_stream_open_packet_status status =
571 consumer_stream_open_packet(stream);
572
573 switch (status) {
574 case CONSUMER_STREAM_OPEN_PACKET_STATUS_OPENED:
575 DBG("Opened a packet after consuming a packet rotation: stream id = %" PRIu64
576 ", channel name = %s, session id = %" PRIu64,
577 stream->key, stream->chan->name,
578 stream->chan->session_id);
579 stream->opened_packet_in_current_trace_chunk = true;
580 break;
581 case CONSUMER_STREAM_OPEN_PACKET_STATUS_NO_SPACE:
582 /*
583 * Can't open a packet as there is no space left.
584 * This means that new events were produced, resulting
585 * in a packet being opened, which is what we want
586 * anyhow.
587 */
588 DBG("No space left to open a packet after consuming a packet: stream id = %" PRIu64
589 ", channel name = %s, session id = %" PRIu64,
590 stream->key, stream->chan->name,
591 stream->chan->session_id);
592 stream->opened_packet_in_current_trace_chunk = true;
593 break;
594 case CONSUMER_STREAM_OPEN_PACKET_STATUS_ERROR:
595 /* Logged by callee. */
596 ret = -1;
597 goto end;
598 default:
599 abort();
600 }
601
602 stream->opened_packet_in_current_trace_chunk = true;
603 }
604
605 end:
606 return ret;
607 }
608
609 struct lttng_consumer_stream *consumer_stream_create(
610 struct lttng_consumer_channel *channel,
611 uint64_t channel_key,
612 uint64_t stream_key,
613 const char *channel_name,
614 uint64_t relayd_id,
615 uint64_t session_id,
616 struct lttng_trace_chunk *trace_chunk,
617 int cpu,
618 int *alloc_ret,
619 enum consumer_channel_type type,
620 unsigned int monitor)
621 {
622 int ret;
623 struct lttng_consumer_stream *stream;
624
625 stream = zmalloc(sizeof(*stream));
626 if (stream == NULL) {
627 PERROR("malloc struct lttng_consumer_stream");
628 ret = -ENOMEM;
629 goto end;
630 }
631
632 if (trace_chunk && !lttng_trace_chunk_get(trace_chunk)) {
633 ERR("Failed to acquire trace chunk reference during the creation of a stream");
634 ret = -1;
635 goto error;
636 }
637
638 rcu_read_lock();
639 stream->chan = channel;
640 stream->key = stream_key;
641 stream->trace_chunk = trace_chunk;
642 stream->out_fd = -1;
643 stream->out_fd_offset = 0;
644 stream->output_written = 0;
645 stream->net_seq_idx = relayd_id;
646 stream->session_id = session_id;
647 stream->monitor = monitor;
648 stream->endpoint_status = CONSUMER_ENDPOINT_ACTIVE;
649 stream->index_file = NULL;
650 stream->last_sequence_number = -1ULL;
651 stream->rotate_position = -1ULL;
652 /* Buffer is created with an open packet. */
653 stream->opened_packet_in_current_trace_chunk = true;
654 pthread_mutex_init(&stream->lock, NULL);
655 pthread_mutex_init(&stream->metadata_timer_lock, NULL);
656
657 /* If channel is the metadata, flag this stream as metadata. */
658 if (type == CONSUMER_CHANNEL_TYPE_METADATA) {
659 stream->metadata_flag = 1;
660 /* Metadata is flat out. */
661 strncpy(stream->name, DEFAULT_METADATA_NAME, sizeof(stream->name));
662 /* Live rendez-vous point. */
663 pthread_cond_init(&stream->metadata_rdv, NULL);
664 pthread_mutex_init(&stream->metadata_rdv_lock, NULL);
665 } else {
666 /* Format stream name to <channel_name>_<cpu_number> */
667 ret = snprintf(stream->name, sizeof(stream->name), "%s_%d",
668 channel_name, cpu);
669 if (ret < 0) {
670 PERROR("snprintf stream name");
671 goto error;
672 }
673 }
674
675 switch (channel->output) {
676 case CONSUMER_CHANNEL_SPLICE:
677 stream->output = LTTNG_EVENT_SPLICE;
678 ret = utils_create_pipe(stream->splice_pipe);
679 if (ret < 0) {
680 goto error;
681 }
682 break;
683 case CONSUMER_CHANNEL_MMAP:
684 stream->output = LTTNG_EVENT_MMAP;
685 break;
686 default:
687 abort();
688 }
689
690 /* Key is always the wait_fd for streams. */
691 lttng_ht_node_init_u64(&stream->node, stream->key);
692
693 /* Init node per channel id key */
694 lttng_ht_node_init_u64(&stream->node_channel_id, channel_key);
695
696 /* Init session id node with the stream session id */
697 lttng_ht_node_init_u64(&stream->node_session_id, stream->session_id);
698
699 DBG3("Allocated stream %s (key %" PRIu64 ", chan_key %" PRIu64
700 " relayd_id %" PRIu64 ", session_id %" PRIu64,
701 stream->name, stream->key, channel_key,
702 stream->net_seq_idx, stream->session_id);
703
704 rcu_read_unlock();
705
706 lttng_dynamic_array_init(&stream->read_subbuffer_ops.post_consume_cbs,
707 sizeof(post_consume_cb), NULL);
708
709 if (type == CONSUMER_CHANNEL_TYPE_METADATA) {
710 stream->read_subbuffer_ops.lock =
711 consumer_stream_metadata_lock_all;
712 stream->read_subbuffer_ops.unlock =
713 consumer_stream_metadata_unlock_all;
714 stream->read_subbuffer_ops.pre_consume_subbuffer =
715 metadata_stream_check_version;
716 } else {
717 const post_consume_cb post_consume_index_op = channel->is_live ?
718 consumer_stream_sync_metadata_index :
719 consumer_stream_send_index;
720
721 ret = lttng_dynamic_array_add_element(
722 &stream->read_subbuffer_ops.post_consume_cbs,
723 &post_consume_index_op);
724 if (ret) {
725 PERROR("Failed to add `send index` callback to stream's post consumption callbacks");
726 goto error;
727 }
728
729 ret = lttng_dynamic_array_add_element(
730 &stream->read_subbuffer_ops.post_consume_cbs,
731 &(post_consume_cb) { post_consume_open_new_packet });
732 if (ret) {
733 PERROR("Failed to add `open new packet` callback to stream's post consumption callbacks");
734 goto error;
735 }
736
737 stream->read_subbuffer_ops.lock = consumer_stream_data_lock_all;
738 stream->read_subbuffer_ops.unlock =
739 consumer_stream_data_unlock_all;
740 stream->read_subbuffer_ops.pre_consume_subbuffer =
741 consumer_stream_update_stats;
742 }
743
744 if (channel->output == CONSUMER_CHANNEL_MMAP) {
745 stream->read_subbuffer_ops.consume_subbuffer =
746 consumer_stream_consume_mmap;
747 } else {
748 stream->read_subbuffer_ops.consume_subbuffer =
749 consumer_stream_consume_splice;
750 }
751
752 return stream;
753
754 error:
755 rcu_read_unlock();
756 lttng_trace_chunk_put(stream->trace_chunk);
757 lttng_dynamic_array_reset(&stream->read_subbuffer_ops.post_consume_cbs);
758 free(stream);
759 end:
760 if (alloc_ret) {
761 *alloc_ret = ret;
762 }
763 return NULL;
764 }
765
766 /*
767 * Close stream on the relayd side. This call can destroy a relayd if the
768 * conditions are met.
769 *
770 * A RCU read side lock MUST be acquired if the relayd object was looked up in
771 * a hash table before calling this.
772 */
773 void consumer_stream_relayd_close(struct lttng_consumer_stream *stream,
774 struct consumer_relayd_sock_pair *relayd)
775 {
776 int ret;
777
778 assert(stream);
779 assert(relayd);
780
781 if (stream->sent_to_relayd) {
782 uatomic_dec(&relayd->refcount);
783 assert(uatomic_read(&relayd->refcount) >= 0);
784 }
785
786 /* Closing streams requires to lock the control socket. */
787 pthread_mutex_lock(&relayd->ctrl_sock_mutex);
788 ret = relayd_send_close_stream(&relayd->control_sock,
789 stream->relayd_stream_id,
790 stream->next_net_seq_num - 1);
791 pthread_mutex_unlock(&relayd->ctrl_sock_mutex);
792 if (ret < 0) {
793 ERR("Relayd send close stream failed. Cleaning up relayd %" PRIu64 ".", relayd->net_seq_idx);
794 lttng_consumer_cleanup_relayd(relayd);
795 }
796
797 /* Both conditions are met, we destroy the relayd. */
798 if (uatomic_read(&relayd->refcount) == 0 &&
799 uatomic_read(&relayd->destroy_flag)) {
800 consumer_destroy_relayd(relayd);
801 }
802 stream->net_seq_idx = (uint64_t) -1ULL;
803 stream->sent_to_relayd = 0;
804 }
805
806 /*
807 * Close stream's file descriptors and, if needed, close stream also on the
808 * relayd side.
809 *
810 * The consumer data lock MUST be acquired.
811 * The stream lock MUST be acquired.
812 */
813 void consumer_stream_close(struct lttng_consumer_stream *stream)
814 {
815 int ret;
816 struct consumer_relayd_sock_pair *relayd;
817
818 assert(stream);
819
820 switch (consumer_data.type) {
821 case LTTNG_CONSUMER_KERNEL:
822 if (stream->mmap_base != NULL) {
823 ret = munmap(stream->mmap_base, stream->mmap_len);
824 if (ret != 0) {
825 PERROR("munmap");
826 }
827 }
828
829 if (stream->wait_fd >= 0) {
830 ret = close(stream->wait_fd);
831 if (ret) {
832 PERROR("close");
833 }
834 stream->wait_fd = -1;
835 }
836 if (stream->chan->output == CONSUMER_CHANNEL_SPLICE) {
837 utils_close_pipe(stream->splice_pipe);
838 }
839 break;
840 case LTTNG_CONSUMER32_UST:
841 case LTTNG_CONSUMER64_UST:
842 {
843 /*
844 * Special case for the metadata since the wait fd is an internal pipe
845 * polled in the metadata thread.
846 */
847 if (stream->metadata_flag && stream->chan->monitor) {
848 int rpipe = stream->ust_metadata_poll_pipe[0];
849
850 /*
851 * This will stop the channel timer if one and close the write side
852 * of the metadata poll pipe.
853 */
854 lttng_ustconsumer_close_metadata(stream->chan);
855 if (rpipe >= 0) {
856 ret = close(rpipe);
857 if (ret < 0) {
858 PERROR("closing metadata pipe read side");
859 }
860 stream->ust_metadata_poll_pipe[0] = -1;
861 }
862 }
863 break;
864 }
865 default:
866 ERR("Unknown consumer_data type");
867 assert(0);
868 }
869
870 /* Close output fd. Could be a socket or local file at this point. */
871 if (stream->out_fd >= 0) {
872 ret = close(stream->out_fd);
873 if (ret) {
874 PERROR("close");
875 }
876 stream->out_fd = -1;
877 }
878
879 if (stream->index_file) {
880 lttng_index_file_put(stream->index_file);
881 stream->index_file = NULL;
882 }
883
884 lttng_trace_chunk_put(stream->trace_chunk);
885 stream->trace_chunk = NULL;
886
887 /* Check and cleanup relayd if needed. */
888 rcu_read_lock();
889 relayd = consumer_find_relayd(stream->net_seq_idx);
890 if (relayd != NULL) {
891 consumer_stream_relayd_close(stream, relayd);
892 }
893 rcu_read_unlock();
894 }
895
896 /*
897 * Delete the stream from all possible hash tables.
898 *
899 * The consumer data lock MUST be acquired.
900 * The stream lock MUST be acquired.
901 */
902 void consumer_stream_delete(struct lttng_consumer_stream *stream,
903 struct lttng_ht *ht)
904 {
905 int ret;
906 struct lttng_ht_iter iter;
907
908 assert(stream);
909 /* Should NEVER be called not in monitor mode. */
910 assert(stream->chan->monitor);
911
912 rcu_read_lock();
913
914 if (ht) {
915 iter.iter.node = &stream->node.node;
916 ret = lttng_ht_del(ht, &iter);
917 assert(!ret);
918 }
919
920 /* Delete from stream per channel ID hash table. */
921 iter.iter.node = &stream->node_channel_id.node;
922 /*
923 * The returned value is of no importance. Even if the node is NOT in the
924 * hash table, we continue since we may have been called by a code path
925 * that did not add the stream to a (all) hash table. Same goes for the
926 * next call ht del call.
927 */
928 (void) lttng_ht_del(consumer_data.stream_per_chan_id_ht, &iter);
929
930 /* Delete from the global stream list. */
931 iter.iter.node = &stream->node_session_id.node;
932 /* See the previous ht del on why we ignore the returned value. */
933 (void) lttng_ht_del(consumer_data.stream_list_ht, &iter);
934
935 rcu_read_unlock();
936
937 if (!stream->metadata_flag) {
938 /* Decrement the stream count of the global consumer data. */
939 assert(consumer_data.stream_count > 0);
940 consumer_data.stream_count--;
941 }
942 }
943
944 /*
945 * Free the given stream within a RCU call.
946 */
947 void consumer_stream_free(struct lttng_consumer_stream *stream)
948 {
949 assert(stream);
950
951 metadata_bucket_destroy(stream->metadata_bucket);
952 call_rcu(&stream->node.head, free_stream_rcu);
953 }
954
955 /*
956 * Destroy the stream's buffers of the tracer.
957 */
958 void consumer_stream_destroy_buffers(struct lttng_consumer_stream *stream)
959 {
960 assert(stream);
961
962 switch (consumer_data.type) {
963 case LTTNG_CONSUMER_KERNEL:
964 break;
965 case LTTNG_CONSUMER32_UST:
966 case LTTNG_CONSUMER64_UST:
967 lttng_ustconsumer_del_stream(stream);
968 break;
969 default:
970 ERR("Unknown consumer_data type");
971 assert(0);
972 }
973 }
974
975 /*
976 * Destroy and close a already created stream.
977 */
978 static void destroy_close_stream(struct lttng_consumer_stream *stream)
979 {
980 assert(stream);
981
982 DBG("Consumer stream destroy monitored key: %" PRIu64, stream->key);
983
984 /* Destroy tracer buffers of the stream. */
985 consumer_stream_destroy_buffers(stream);
986 /* Close down everything including the relayd if one. */
987 consumer_stream_close(stream);
988 }
989
990 /*
991 * Decrement the stream's channel refcount and if down to 0, return the channel
992 * pointer so it can be destroyed by the caller or NULL if not.
993 */
994 static struct lttng_consumer_channel *unref_channel(
995 struct lttng_consumer_stream *stream)
996 {
997 struct lttng_consumer_channel *free_chan = NULL;
998
999 assert(stream);
1000 assert(stream->chan);
1001
1002 /* Update refcount of channel and see if we need to destroy it. */
1003 if (!uatomic_sub_return(&stream->chan->refcount, 1)
1004 && !uatomic_read(&stream->chan->nb_init_stream_left)) {
1005 free_chan = stream->chan;
1006 }
1007
1008 return free_chan;
1009 }
1010
1011 /*
1012 * Destroy a stream completely. This will delete, close and free the stream.
1013 * Once return, the stream is NO longer usable. Its channel may get destroyed
1014 * if conditions are met for a monitored stream.
1015 *
1016 * This MUST be called WITHOUT the consumer data and stream lock acquired if
1017 * the stream is in _monitor_ mode else it does not matter.
1018 */
1019 void consumer_stream_destroy(struct lttng_consumer_stream *stream,
1020 struct lttng_ht *ht)
1021 {
1022 assert(stream);
1023
1024 /* Stream is in monitor mode. */
1025 if (stream->monitor) {
1026 struct lttng_consumer_channel *free_chan = NULL;
1027
1028 /*
1029 * This means that the stream was successfully removed from the streams
1030 * list of the channel and sent to the right thread managing this
1031 * stream thus being globally visible.
1032 */
1033 if (stream->globally_visible) {
1034 pthread_mutex_lock(&consumer_data.lock);
1035 pthread_mutex_lock(&stream->chan->lock);
1036 pthread_mutex_lock(&stream->lock);
1037 /* Remove every reference of the stream in the consumer. */
1038 consumer_stream_delete(stream, ht);
1039
1040 destroy_close_stream(stream);
1041
1042 /* Update channel's refcount of the stream. */
1043 free_chan = unref_channel(stream);
1044
1045 /* Indicates that the consumer data state MUST be updated after this. */
1046 consumer_data.need_update = 1;
1047
1048 pthread_mutex_unlock(&stream->lock);
1049 pthread_mutex_unlock(&stream->chan->lock);
1050 pthread_mutex_unlock(&consumer_data.lock);
1051 } else {
1052 /*
1053 * If the stream is not visible globally, this needs to be done
1054 * outside of the consumer data lock section.
1055 */
1056 free_chan = unref_channel(stream);
1057 }
1058
1059 if (free_chan) {
1060 consumer_del_channel(free_chan);
1061 }
1062 } else {
1063 destroy_close_stream(stream);
1064 }
1065
1066 /* Free stream within a RCU call. */
1067 lttng_trace_chunk_put(stream->trace_chunk);
1068 stream->trace_chunk = NULL;
1069 lttng_dynamic_array_reset(&stream->read_subbuffer_ops.post_consume_cbs);
1070 consumer_stream_free(stream);
1071 }
1072
1073 /*
1074 * Write index of a specific stream either on the relayd or local disk.
1075 *
1076 * Return 0 on success or else a negative value.
1077 */
1078 int consumer_stream_write_index(struct lttng_consumer_stream *stream,
1079 struct ctf_packet_index *element)
1080 {
1081 int ret;
1082
1083 assert(stream);
1084 assert(element);
1085
1086 rcu_read_lock();
1087 if (stream->net_seq_idx != (uint64_t) -1ULL) {
1088 struct consumer_relayd_sock_pair *relayd;
1089 relayd = consumer_find_relayd(stream->net_seq_idx);
1090 if (relayd) {
1091 pthread_mutex_lock(&relayd->ctrl_sock_mutex);
1092 ret = relayd_send_index(&relayd->control_sock, element,
1093 stream->relayd_stream_id, stream->next_net_seq_num - 1);
1094 if (ret < 0) {
1095 /*
1096 * Communication error with lttng-relayd,
1097 * perform cleanup now
1098 */
1099 ERR("Relayd send index failed. Cleaning up relayd %" PRIu64 ".", relayd->net_seq_idx);
1100 lttng_consumer_cleanup_relayd(relayd);
1101 ret = -1;
1102 }
1103 pthread_mutex_unlock(&relayd->ctrl_sock_mutex);
1104 } else {
1105 ERR("Stream %" PRIu64 " relayd ID %" PRIu64 " unknown. Can't write index.",
1106 stream->key, stream->net_seq_idx);
1107 ret = -1;
1108 }
1109 } else {
1110 if (lttng_index_file_write(stream->index_file, element)) {
1111 ret = -1;
1112 } else {
1113 ret = 0;
1114 }
1115 }
1116 if (ret < 0) {
1117 goto error;
1118 }
1119
1120 error:
1121 rcu_read_unlock();
1122 return ret;
1123 }
1124
1125 int consumer_stream_create_output_files(struct lttng_consumer_stream *stream,
1126 bool create_index)
1127 {
1128 int ret;
1129 enum lttng_trace_chunk_status chunk_status;
1130 const int flags = O_WRONLY | O_CREAT | O_TRUNC;
1131 const mode_t mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP;
1132 char stream_path[LTTNG_PATH_MAX];
1133
1134 ASSERT_LOCKED(stream->lock);
1135 assert(stream->trace_chunk);
1136
1137 ret = utils_stream_file_path(stream->chan->pathname, stream->name,
1138 stream->chan->tracefile_size,
1139 stream->tracefile_count_current, NULL,
1140 stream_path, sizeof(stream_path));
1141 if (ret < 0) {
1142 goto end;
1143 }
1144
1145 if (stream->out_fd >= 0) {
1146 ret = close(stream->out_fd);
1147 if (ret < 0) {
1148 PERROR("Failed to close stream file \"%s\"",
1149 stream->name);
1150 goto end;
1151 }
1152 stream->out_fd = -1;
1153 }
1154
1155 DBG("Opening stream output file \"%s\"", stream_path);
1156 chunk_status = lttng_trace_chunk_open_file(stream->trace_chunk, stream_path,
1157 flags, mode, &stream->out_fd, false);
1158 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
1159 ERR("Failed to open stream file \"%s\"", stream->name);
1160 ret = -1;
1161 goto end;
1162 }
1163
1164 if (!stream->metadata_flag && (create_index || stream->index_file)) {
1165 if (stream->index_file) {
1166 lttng_index_file_put(stream->index_file);
1167 }
1168 chunk_status = lttng_index_file_create_from_trace_chunk(
1169 stream->trace_chunk,
1170 stream->chan->pathname,
1171 stream->name,
1172 stream->chan->tracefile_size,
1173 stream->tracefile_count_current,
1174 CTF_INDEX_MAJOR, CTF_INDEX_MINOR,
1175 false, &stream->index_file);
1176 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
1177 ret = -1;
1178 goto end;
1179 }
1180 }
1181
1182 /* Reset current size because we just perform a rotation. */
1183 stream->tracefile_size_current = 0;
1184 stream->out_fd_offset = 0;
1185 end:
1186 return ret;
1187 }
1188
1189 int consumer_stream_rotate_output_files(struct lttng_consumer_stream *stream)
1190 {
1191 int ret;
1192
1193 stream->tracefile_count_current++;
1194 if (stream->chan->tracefile_count > 0) {
1195 stream->tracefile_count_current %=
1196 stream->chan->tracefile_count;
1197 }
1198
1199 DBG("Rotating output files of stream \"%s\"", stream->name);
1200 ret = consumer_stream_create_output_files(stream, true);
1201 if (ret) {
1202 goto end;
1203 }
1204
1205 end:
1206 return ret;
1207 }
1208
1209 bool consumer_stream_is_deleted(struct lttng_consumer_stream *stream)
1210 {
1211 /*
1212 * This function does not take a const stream since
1213 * cds_lfht_is_node_deleted was not const before liburcu 0.12.
1214 */
1215 assert(stream);
1216 return cds_lfht_is_node_deleted(&stream->node.node);
1217 }
1218
1219 static ssize_t metadata_bucket_flush(
1220 const struct stream_subbuffer *buffer, void *data)
1221 {
1222 ssize_t ret;
1223 struct lttng_consumer_stream *stream = data;
1224
1225 ret = consumer_stream_consume_mmap(NULL, stream, buffer);
1226 if (ret < 0) {
1227 goto end;
1228 }
1229 end:
1230 return ret;
1231 }
1232
1233 static ssize_t metadata_bucket_consume(
1234 struct lttng_consumer_local_data *unused,
1235 struct lttng_consumer_stream *stream,
1236 const struct stream_subbuffer *subbuffer)
1237 {
1238 ssize_t ret;
1239 enum metadata_bucket_status status;
1240
1241 status = metadata_bucket_fill(stream->metadata_bucket, subbuffer);
1242 switch (status) {
1243 case METADATA_BUCKET_STATUS_OK:
1244 /* Return consumed size. */
1245 ret = subbuffer->buffer.buffer.size;
1246 break;
1247 default:
1248 ret = -1;
1249 }
1250
1251 return ret;
1252 }
1253
1254 int consumer_stream_enable_metadata_bucketization(
1255 struct lttng_consumer_stream *stream)
1256 {
1257 int ret = 0;
1258
1259 assert(stream->metadata_flag);
1260 assert(!stream->metadata_bucket);
1261 assert(stream->chan->output == CONSUMER_CHANNEL_MMAP);
1262
1263 stream->metadata_bucket = metadata_bucket_create(
1264 metadata_bucket_flush, stream);
1265 if (!stream->metadata_bucket) {
1266 ret = -1;
1267 goto end;
1268 }
1269
1270 stream->read_subbuffer_ops.consume_subbuffer = metadata_bucket_consume;
1271 end:
1272 return ret;
1273 }
1274
1275 void consumer_stream_metadata_set_version(
1276 struct lttng_consumer_stream *stream, uint64_t new_version)
1277 {
1278 assert(new_version > stream->metadata_version);
1279 stream->metadata_version = new_version;
1280 stream->reset_metadata_flag = 1;
1281
1282 if (stream->metadata_bucket) {
1283 metadata_bucket_reset(stream->metadata_bucket);
1284 }
1285 }
1286
1287 int consumer_stream_flush_buffer(struct lttng_consumer_stream *stream,
1288 bool producer_active)
1289 {
1290 int ret = 0;
1291
1292 switch (consumer_data.type) {
1293 case LTTNG_CONSUMER_KERNEL:
1294 if (producer_active) {
1295 ret = kernctl_buffer_flush(stream->wait_fd);
1296 if (ret < 0) {
1297 ERR("Failed to flush kernel stream");
1298 goto end;
1299 }
1300 } else {
1301 ret = kernctl_buffer_flush_empty(stream->wait_fd);
1302 if (ret < 0) {
1303 /*
1304 * Doing a buffer flush which does not take into
1305 * account empty packets. This is not perfect,
1306 * but required as a fall-back when
1307 * "flush_empty" is not implemented by
1308 * lttng-modules.
1309 */
1310 ret = kernctl_buffer_flush(stream->wait_fd);
1311 if (ret < 0) {
1312 ERR("Failed to flush kernel stream");
1313 goto end;
1314 }
1315 }
1316 }
1317 break;
1318 case LTTNG_CONSUMER32_UST:
1319 case LTTNG_CONSUMER64_UST:
1320 lttng_ustconsumer_flush_buffer(stream, (int) producer_active);
1321 break;
1322 default:
1323 ERR("Unknown consumer_data type");
1324 abort();
1325 }
1326
1327 end:
1328 return ret;
1329 }
This page took 0.097376 seconds and 4 git commands to generate.