Fix: container-wrapper: size container operation can throw
[lttng-tools.git] / src / common / consumer / consumer-timer.cpp
1 /*
2 * Copyright (C) 2012 Julien Desfossez <julien.desfossez@efficios.com>
3 * Copyright (C) 2012 David Goulet <dgoulet@efficios.com>
4 *
5 * SPDX-License-Identifier: GPL-2.0-only
6 *
7 */
8
9 #define _LGPL_SOURCE
10 #include <common/common.hpp>
11 #include <common/compat/endian.hpp>
12 #include <common/consumer/consumer-stream.hpp>
13 #include <common/consumer/consumer-testpoint.hpp>
14 #include <common/consumer/consumer-timer.hpp>
15 #include <common/kernel-consumer/kernel-consumer.hpp>
16 #include <common/kernel-ctl/kernel-ctl.hpp>
17 #include <common/urcu.hpp>
18 #include <common/ust-consumer/ust-consumer.hpp>
19
20 #include <bin/lttng-consumerd/health-consumerd.hpp>
21 #include <inttypes.h>
22 #include <signal.h>
23
24 using sample_positions_cb = int (*)(struct lttng_consumer_stream *);
25 using get_consumed_cb = int (*)(struct lttng_consumer_stream *, unsigned long *);
26 using get_produced_cb = int (*)(struct lttng_consumer_stream *, unsigned long *);
27 using flush_index_cb = int (*)(struct lttng_consumer_stream *);
28
29 static struct timer_signal_data timer_signal = {
30 .tid = 0,
31 .setup_done = 0,
32 .qs_done = 0,
33 .lock = PTHREAD_MUTEX_INITIALIZER,
34 };
35
36 /*
37 * Set custom signal mask to current thread.
38 */
39 static void setmask(sigset_t *mask)
40 {
41 int ret;
42
43 ret = sigemptyset(mask);
44 if (ret) {
45 PERROR("sigemptyset");
46 }
47 ret = sigaddset(mask, LTTNG_CONSUMER_SIG_SWITCH);
48 if (ret) {
49 PERROR("sigaddset switch");
50 }
51 ret = sigaddset(mask, LTTNG_CONSUMER_SIG_TEARDOWN);
52 if (ret) {
53 PERROR("sigaddset teardown");
54 }
55 ret = sigaddset(mask, LTTNG_CONSUMER_SIG_LIVE);
56 if (ret) {
57 PERROR("sigaddset live");
58 }
59 ret = sigaddset(mask, LTTNG_CONSUMER_SIG_MONITOR);
60 if (ret) {
61 PERROR("sigaddset monitor");
62 }
63 ret = sigaddset(mask, LTTNG_CONSUMER_SIG_EXIT);
64 if (ret) {
65 PERROR("sigaddset exit");
66 }
67 }
68
69 static int the_channel_monitor_pipe = -1;
70
71 /*
72 * Execute action on a timer switch.
73 *
74 * Beware: metadata_switch_timer() should *never* take a mutex also held
75 * while consumer_timer_switch_stop() is called. It would result in
76 * deadlocks.
77 */
78 static void metadata_switch_timer(struct lttng_consumer_local_data *ctx, siginfo_t *si)
79 {
80 int ret;
81 struct lttng_consumer_channel *channel;
82
83 channel = (lttng_consumer_channel *) si->si_value.sival_ptr;
84 LTTNG_ASSERT(channel);
85
86 if (channel->switch_timer_error) {
87 return;
88 }
89
90 DBG("Switch timer for channel %" PRIu64, channel->key);
91 switch (ctx->type) {
92 case LTTNG_CONSUMER32_UST:
93 case LTTNG_CONSUMER64_UST:
94 /*
95 * Locks taken by lttng_ustconsumer_request_metadata():
96 * - metadata_socket_lock
97 * - Calling lttng_ustconsumer_recv_metadata():
98 * - channel->metadata_cache->lock
99 * - Calling consumer_metadata_cache_flushed():
100 * - channel->timer_lock
101 * - channel->metadata_cache->lock
102 *
103 * Ensure that neither consumer_data.lock nor
104 * channel->lock are taken within this function, since
105 * they are held while consumer_timer_switch_stop() is
106 * called.
107 */
108 ret = lttng_ustconsumer_request_metadata(ctx, channel, 1, 1);
109 if (ret < 0) {
110 channel->switch_timer_error = 1;
111 }
112 break;
113 case LTTNG_CONSUMER_KERNEL:
114 case LTTNG_CONSUMER_UNKNOWN:
115 abort();
116 break;
117 }
118 }
119
120 static int send_empty_index(struct lttng_consumer_stream *stream, uint64_t ts, uint64_t stream_id)
121 {
122 int ret;
123 struct ctf_packet_index index;
124
125 memset(&index, 0, sizeof(index));
126 index.stream_id = htobe64(stream_id);
127 index.timestamp_end = htobe64(ts);
128 ret = consumer_stream_write_index(stream, &index);
129 if (ret < 0) {
130 goto error;
131 }
132
133 error:
134 return ret;
135 }
136
137 int consumer_flush_kernel_index(struct lttng_consumer_stream *stream)
138 {
139 uint64_t ts, stream_id;
140 int ret;
141
142 ret = kernctl_get_current_timestamp(stream->wait_fd, &ts);
143 if (ret < 0) {
144 ERR("Failed to get the current timestamp");
145 goto end;
146 }
147 ret = kernctl_buffer_flush(stream->wait_fd);
148 if (ret < 0) {
149 ERR("Failed to flush kernel stream");
150 goto end;
151 }
152 ret = kernctl_snapshot(stream->wait_fd);
153 if (ret < 0) {
154 if (ret != -EAGAIN && ret != -ENODATA) {
155 PERROR("live timer kernel snapshot");
156 ret = -1;
157 goto end;
158 }
159 ret = kernctl_get_stream_id(stream->wait_fd, &stream_id);
160 if (ret < 0) {
161 PERROR("kernctl_get_stream_id");
162 goto end;
163 }
164 DBG("Stream %" PRIu64 " empty, sending beacon", stream->key);
165 ret = send_empty_index(stream, ts, stream_id);
166 if (ret < 0) {
167 goto end;
168 }
169 }
170 ret = 0;
171 end:
172 return ret;
173 }
174
175 static int check_stream(struct lttng_consumer_stream *stream, flush_index_cb flush_index)
176 {
177 int ret;
178
179 /*
180 * While holding the stream mutex, try to take a snapshot, if it
181 * succeeds, it means that data is ready to be sent, just let the data
182 * thread handle that. Otherwise, if the snapshot returns EAGAIN, it
183 * means that there is no data to read after the flush, so we can
184 * safely send the empty index.
185 *
186 * Doing a trylock and checking if waiting on metadata if
187 * trylock fails. Bail out of the stream is indeed waiting for
188 * metadata to be pushed. Busy wait on trylock otherwise.
189 */
190 for (;;) {
191 ret = pthread_mutex_trylock(&stream->lock);
192 switch (ret) {
193 case 0:
194 break; /* We have the lock. */
195 case EBUSY:
196 pthread_mutex_lock(&stream->metadata_timer_lock);
197 if (stream->waiting_on_metadata) {
198 ret = 0;
199 stream->missed_metadata_flush = true;
200 pthread_mutex_unlock(&stream->metadata_timer_lock);
201 goto end; /* Bail out. */
202 }
203 pthread_mutex_unlock(&stream->metadata_timer_lock);
204 /* Try again. */
205 caa_cpu_relax();
206 continue;
207 default:
208 ERR("Unexpected pthread_mutex_trylock error %d", ret);
209 ret = -1;
210 goto end;
211 }
212 break;
213 }
214 ret = flush_index(stream);
215 pthread_mutex_unlock(&stream->lock);
216 end:
217 return ret;
218 }
219
220 int consumer_flush_ust_index(struct lttng_consumer_stream *stream)
221 {
222 uint64_t ts, stream_id;
223 int ret;
224
225 ret = cds_lfht_is_node_deleted(&stream->node.node);
226 if (ret) {
227 goto end;
228 }
229
230 ret = lttng_ustconsumer_get_current_timestamp(stream, &ts);
231 if (ret < 0) {
232 ERR("Failed to get the current timestamp");
233 goto end;
234 }
235 ret = lttng_ustconsumer_flush_buffer(stream, 1);
236 if (ret < 0) {
237 ERR("Failed to flush buffer while flushing index");
238 goto end;
239 }
240 ret = lttng_ustconsumer_take_snapshot(stream);
241 if (ret < 0) {
242 if (ret != -EAGAIN) {
243 ERR("Taking UST snapshot");
244 ret = -1;
245 goto end;
246 }
247 ret = lttng_ustconsumer_get_stream_id(stream, &stream_id);
248 if (ret < 0) {
249 PERROR("lttng_ust_ctl_get_stream_id");
250 goto end;
251 }
252 DBG("Stream %" PRIu64 " empty, sending beacon", stream->key);
253 ret = send_empty_index(stream, ts, stream_id);
254 if (ret < 0) {
255 goto end;
256 }
257 }
258 ret = 0;
259 end:
260 return ret;
261 }
262
263 /*
264 * Execute action on a live timer
265 */
266 static void live_timer(struct lttng_consumer_local_data *ctx, siginfo_t *si)
267 {
268 int ret;
269 struct lttng_consumer_channel *channel;
270 struct lttng_consumer_stream *stream;
271 struct lttng_ht_iter iter;
272 const struct lttng_ht *ht = the_consumer_data.stream_per_chan_id_ht;
273 const flush_index_cb flush_index = ctx->type == LTTNG_CONSUMER_KERNEL ?
274 consumer_flush_kernel_index :
275 consumer_flush_ust_index;
276
277 channel = (lttng_consumer_channel *) si->si_value.sival_ptr;
278 LTTNG_ASSERT(channel);
279
280 if (channel->switch_timer_error) {
281 goto error;
282 }
283
284 DBG("Live timer for channel %" PRIu64, channel->key);
285
286 {
287 lttng::urcu::read_lock_guard read_lock;
288 cds_lfht_for_each_entry_duplicate(ht->ht,
289 ht->hash_fct(&channel->key, lttng_ht_seed),
290 ht->match_fct,
291 &channel->key,
292 &iter.iter,
293 stream,
294 node_channel_id.node)
295 {
296 ret = check_stream(stream, flush_index);
297 if (ret < 0) {
298 goto error_unlock;
299 }
300 }
301 }
302 error_unlock:
303
304 error:
305 return;
306 }
307
308 static void consumer_timer_signal_thread_qs(unsigned int signr)
309 {
310 sigset_t pending_set;
311 int ret;
312
313 /*
314 * We need to be the only thread interacting with the thread
315 * that manages signals for teardown synchronization.
316 */
317 pthread_mutex_lock(&timer_signal.lock);
318
319 /* Ensure we don't have any signal queued for this channel. */
320 for (;;) {
321 ret = sigemptyset(&pending_set);
322 if (ret == -1) {
323 PERROR("sigemptyset");
324 }
325 ret = sigpending(&pending_set);
326 if (ret == -1) {
327 PERROR("sigpending");
328 }
329 if (!sigismember(&pending_set, signr)) {
330 break;
331 }
332 caa_cpu_relax();
333 }
334
335 /*
336 * From this point, no new signal handler will be fired that would try to
337 * access "chan". However, we still need to wait for any currently
338 * executing handler to complete.
339 */
340 cmm_smp_mb();
341 CMM_STORE_SHARED(timer_signal.qs_done, 0);
342 cmm_smp_mb();
343
344 /*
345 * Kill with LTTNG_CONSUMER_SIG_TEARDOWN, so signal management thread wakes
346 * up.
347 */
348 kill(getpid(), LTTNG_CONSUMER_SIG_TEARDOWN);
349
350 while (!CMM_LOAD_SHARED(timer_signal.qs_done)) {
351 caa_cpu_relax();
352 }
353 cmm_smp_mb();
354
355 pthread_mutex_unlock(&timer_signal.lock);
356 }
357
358 /*
359 * Start a timer channel timer which will fire at a given interval
360 * (timer_interval_us)and fire a given signal (signal).
361 *
362 * Returns a negative value on error, 0 if a timer was created, and
363 * a positive value if no timer was created (not an error).
364 */
365 static int consumer_channel_timer_start(timer_t *timer_id,
366 struct lttng_consumer_channel *channel,
367 unsigned int timer_interval_us,
368 int signal)
369 {
370 int ret = 0, delete_ret;
371 struct sigevent sev = {};
372 struct itimerspec its;
373
374 LTTNG_ASSERT(channel);
375 LTTNG_ASSERT(channel->key);
376
377 if (timer_interval_us == 0) {
378 /* No creation needed; not an error. */
379 ret = 1;
380 goto end;
381 }
382
383 sev.sigev_notify = SIGEV_SIGNAL;
384 sev.sigev_signo = signal;
385 sev.sigev_value.sival_ptr = channel;
386 ret = timer_create(CLOCKID, &sev, timer_id);
387 if (ret == -1) {
388 PERROR("timer_create");
389 goto end;
390 }
391
392 its.it_value.tv_sec = timer_interval_us / 1000000;
393 its.it_value.tv_nsec = (timer_interval_us % 1000000) * 1000;
394 its.it_interval.tv_sec = its.it_value.tv_sec;
395 its.it_interval.tv_nsec = its.it_value.tv_nsec;
396
397 ret = timer_settime(*timer_id, 0, &its, nullptr);
398 if (ret == -1) {
399 PERROR("timer_settime");
400 goto error_destroy_timer;
401 }
402 end:
403 return ret;
404 error_destroy_timer:
405 delete_ret = timer_delete(*timer_id);
406 if (delete_ret == -1) {
407 PERROR("timer_delete");
408 }
409 goto end;
410 }
411
412 static int consumer_channel_timer_stop(timer_t *timer_id, int signal)
413 {
414 int ret = 0;
415
416 ret = timer_delete(*timer_id);
417 if (ret == -1) {
418 PERROR("timer_delete");
419 goto end;
420 }
421
422 consumer_timer_signal_thread_qs(signal);
423 *timer_id = nullptr;
424 end:
425 return ret;
426 }
427
428 /*
429 * Set the channel's switch timer.
430 */
431 void consumer_timer_switch_start(struct lttng_consumer_channel *channel,
432 unsigned int switch_timer_interval_us)
433 {
434 int ret;
435
436 LTTNG_ASSERT(channel);
437 LTTNG_ASSERT(channel->key);
438
439 ret = consumer_channel_timer_start(&channel->switch_timer,
440 channel,
441 switch_timer_interval_us,
442 LTTNG_CONSUMER_SIG_SWITCH);
443
444 channel->switch_timer_enabled = !!(ret == 0);
445 }
446
447 /*
448 * Stop and delete the channel's switch timer.
449 */
450 void consumer_timer_switch_stop(struct lttng_consumer_channel *channel)
451 {
452 int ret;
453
454 LTTNG_ASSERT(channel);
455
456 ret = consumer_channel_timer_stop(&channel->switch_timer, LTTNG_CONSUMER_SIG_SWITCH);
457 if (ret == -1) {
458 ERR("Failed to stop switch timer");
459 }
460
461 channel->switch_timer_enabled = 0;
462 }
463
464 /*
465 * Set the channel's live timer.
466 */
467 void consumer_timer_live_start(struct lttng_consumer_channel *channel,
468 unsigned int live_timer_interval_us)
469 {
470 int ret;
471
472 LTTNG_ASSERT(channel);
473 LTTNG_ASSERT(channel->key);
474
475 ret = consumer_channel_timer_start(
476 &channel->live_timer, channel, live_timer_interval_us, LTTNG_CONSUMER_SIG_LIVE);
477
478 channel->live_timer_enabled = !!(ret == 0);
479 }
480
481 /*
482 * Stop and delete the channel's live timer.
483 */
484 void consumer_timer_live_stop(struct lttng_consumer_channel *channel)
485 {
486 int ret;
487
488 LTTNG_ASSERT(channel);
489
490 ret = consumer_channel_timer_stop(&channel->live_timer, LTTNG_CONSUMER_SIG_LIVE);
491 if (ret == -1) {
492 ERR("Failed to stop live timer");
493 }
494
495 channel->live_timer_enabled = 0;
496 }
497
498 /*
499 * Set the channel's monitoring timer.
500 *
501 * Returns a negative value on error, 0 if a timer was created, and
502 * a positive value if no timer was created (not an error).
503 */
504 int consumer_timer_monitor_start(struct lttng_consumer_channel *channel,
505 unsigned int monitor_timer_interval_us)
506 {
507 int ret;
508
509 LTTNG_ASSERT(channel);
510 LTTNG_ASSERT(channel->key);
511 LTTNG_ASSERT(!channel->monitor_timer_enabled);
512
513 ret = consumer_channel_timer_start(&channel->monitor_timer,
514 channel,
515 monitor_timer_interval_us,
516 LTTNG_CONSUMER_SIG_MONITOR);
517 channel->monitor_timer_enabled = !!(ret == 0);
518 return ret;
519 }
520
521 /*
522 * Stop and delete the channel's monitoring timer.
523 */
524 int consumer_timer_monitor_stop(struct lttng_consumer_channel *channel)
525 {
526 int ret;
527
528 LTTNG_ASSERT(channel);
529 LTTNG_ASSERT(channel->monitor_timer_enabled);
530
531 ret = consumer_channel_timer_stop(&channel->monitor_timer, LTTNG_CONSUMER_SIG_MONITOR);
532 if (ret == -1) {
533 ERR("Failed to stop live timer");
534 goto end;
535 }
536
537 channel->monitor_timer_enabled = 0;
538 end:
539 return ret;
540 }
541
542 /*
543 * Block the RT signals for the entire process. It must be called from the
544 * consumer main before creating the threads
545 */
546 int consumer_signal_init()
547 {
548 int ret;
549 sigset_t mask;
550
551 /* Block signal for entire process, so only our thread processes it. */
552 setmask(&mask);
553 ret = pthread_sigmask(SIG_BLOCK, &mask, nullptr);
554 if (ret) {
555 errno = ret;
556 PERROR("pthread_sigmask");
557 return -1;
558 }
559 return 0;
560 }
561
562 static int sample_channel_positions(struct lttng_consumer_channel *channel,
563 uint64_t *_highest_use,
564 uint64_t *_lowest_use,
565 uint64_t *_total_consumed,
566 sample_positions_cb sample,
567 get_consumed_cb get_consumed,
568 get_produced_cb get_produced)
569 {
570 int ret = 0;
571 struct lttng_ht_iter iter;
572 struct lttng_consumer_stream *stream;
573 bool empty_channel = true;
574 uint64_t high = 0, low = UINT64_MAX;
575 struct lttng_ht *ht = the_consumer_data.stream_per_chan_id_ht;
576
577 *_total_consumed = 0;
578
579 lttng::urcu::read_lock_guard read_lock;
580
581 cds_lfht_for_each_entry_duplicate(ht->ht,
582 ht->hash_fct(&channel->key, lttng_ht_seed),
583 ht->match_fct,
584 &channel->key,
585 &iter.iter,
586 stream,
587 node_channel_id.node)
588 {
589 unsigned long produced, consumed, usage;
590
591 empty_channel = false;
592
593 pthread_mutex_lock(&stream->lock);
594 if (cds_lfht_is_node_deleted(&stream->node.node)) {
595 goto next;
596 }
597
598 ret = sample(stream);
599 if (ret) {
600 ERR("Failed to take buffer position snapshot in monitor timer (ret = %d)",
601 ret);
602 pthread_mutex_unlock(&stream->lock);
603 goto end;
604 }
605 ret = get_consumed(stream, &consumed);
606 if (ret) {
607 ERR("Failed to get buffer consumed position in monitor timer");
608 pthread_mutex_unlock(&stream->lock);
609 goto end;
610 }
611 ret = get_produced(stream, &produced);
612 if (ret) {
613 ERR("Failed to get buffer produced position in monitor timer");
614 pthread_mutex_unlock(&stream->lock);
615 goto end;
616 }
617
618 usage = produced - consumed;
619 high = (usage > high) ? usage : high;
620 low = (usage < low) ? usage : low;
621
622 /*
623 * We don't use consumed here for 2 reasons:
624 * - output_written takes into account the padding written in the
625 * tracefiles when we stop the session;
626 * - the consumed position is not the accurate representation of what
627 * was extracted from a buffer in overwrite mode.
628 */
629 *_total_consumed += stream->output_written;
630 next:
631 pthread_mutex_unlock(&stream->lock);
632 }
633
634 *_highest_use = high;
635 *_lowest_use = low;
636 end:
637 if (empty_channel) {
638 ret = -1;
639 }
640 return ret;
641 }
642
643 /* Sample and send channel buffering statistics to the session daemon. */
644 void sample_and_send_channel_buffer_stats(struct lttng_consumer_channel *channel)
645 {
646 int ret;
647 int channel_monitor_pipe = consumer_timer_thread_get_channel_monitor_pipe();
648 struct lttcomm_consumer_channel_monitor_msg msg = {
649 .key = channel->key,
650 .session_id = channel->session_id,
651 .lowest = 0,
652 .highest = 0,
653 .consumed_since_last_sample = 0,
654 };
655 sample_positions_cb sample;
656 get_consumed_cb get_consumed;
657 get_produced_cb get_produced;
658 uint64_t lowest = 0, highest = 0, total_consumed = 0;
659
660 LTTNG_ASSERT(channel);
661
662 if (channel_monitor_pipe < 0) {
663 return;
664 }
665
666 switch (the_consumer_data.type) {
667 case LTTNG_CONSUMER_KERNEL:
668 sample = lttng_kconsumer_sample_snapshot_positions;
669 get_consumed = lttng_kconsumer_get_consumed_snapshot;
670 get_produced = lttng_kconsumer_get_produced_snapshot;
671 break;
672 case LTTNG_CONSUMER32_UST:
673 case LTTNG_CONSUMER64_UST:
674 sample = lttng_ustconsumer_sample_snapshot_positions;
675 get_consumed = lttng_ustconsumer_get_consumed_snapshot;
676 get_produced = lttng_ustconsumer_get_produced_snapshot;
677 break;
678 default:
679 abort();
680 }
681
682 ret = sample_channel_positions(
683 channel, &highest, &lowest, &total_consumed, sample, get_consumed, get_produced);
684 if (ret) {
685 return;
686 }
687
688 msg.highest = highest;
689 msg.lowest = lowest;
690 msg.consumed_since_last_sample = total_consumed - channel->last_consumed_size_sample_sent;
691
692 /*
693 * Writes performed here are assumed to be atomic which is only
694 * guaranteed for sizes < than PIPE_BUF.
695 */
696 LTTNG_ASSERT(sizeof(msg) <= PIPE_BUF);
697
698 do {
699 ret = write(channel_monitor_pipe, &msg, sizeof(msg));
700 } while (ret == -1 && errno == EINTR);
701 if (ret == -1) {
702 if (errno == EAGAIN) {
703 /* Not an error, the sample is merely dropped. */
704 DBG("Channel monitor pipe is full; dropping sample for channel key = %" PRIu64,
705 channel->key);
706 } else {
707 PERROR("write to the channel monitor pipe");
708 }
709 } else {
710 DBG("Sent channel monitoring sample for channel key %" PRIu64
711 ", (highest = %" PRIu64 ", lowest = %" PRIu64 ")",
712 channel->key,
713 msg.highest,
714 msg.lowest);
715 channel->last_consumed_size_sample_sent = msg.consumed_since_last_sample;
716 }
717 }
718
719 int consumer_timer_thread_get_channel_monitor_pipe()
720 {
721 return uatomic_read(&the_channel_monitor_pipe);
722 }
723
724 int consumer_timer_thread_set_channel_monitor_pipe(int fd)
725 {
726 int ret;
727
728 ret = uatomic_cmpxchg(&the_channel_monitor_pipe, -1, fd);
729 if (ret != -1) {
730 ret = -1;
731 goto end;
732 }
733 ret = 0;
734 end:
735 return ret;
736 }
737
738 /*
739 * This thread is the sighandler for signals LTTNG_CONSUMER_SIG_SWITCH,
740 * LTTNG_CONSUMER_SIG_TEARDOWN, LTTNG_CONSUMER_SIG_LIVE, and
741 * LTTNG_CONSUMER_SIG_MONITOR, LTTNG_CONSUMER_SIG_EXIT.
742 */
743 void *consumer_timer_thread(void *data)
744 {
745 int signr;
746 sigset_t mask;
747 siginfo_t info;
748 struct lttng_consumer_local_data *ctx = (lttng_consumer_local_data *) data;
749
750 rcu_register_thread();
751
752 health_register(health_consumerd, HEALTH_CONSUMERD_TYPE_METADATA_TIMER);
753
754 if (testpoint(consumerd_thread_metadata_timer)) {
755 goto error_testpoint;
756 }
757
758 health_code_update();
759
760 /* Only self thread will receive signal mask. */
761 setmask(&mask);
762 CMM_STORE_SHARED(timer_signal.tid, pthread_self());
763
764 while (true) {
765 health_code_update();
766
767 health_poll_entry();
768 signr = sigwaitinfo(&mask, &info);
769 health_poll_exit();
770
771 /*
772 * NOTE: cascading conditions are used instead of a switch case
773 * since the use of SIGRTMIN in the definition of the signals'
774 * values prevents the reduction to an integer constant.
775 */
776 if (signr == -1) {
777 if (errno != EINTR) {
778 PERROR("sigwaitinfo");
779 }
780 continue;
781 } else if (signr == LTTNG_CONSUMER_SIG_SWITCH) {
782 metadata_switch_timer(ctx, &info);
783 } else if (signr == LTTNG_CONSUMER_SIG_TEARDOWN) {
784 cmm_smp_mb();
785 CMM_STORE_SHARED(timer_signal.qs_done, 1);
786 cmm_smp_mb();
787 DBG("Signal timer metadata thread teardown");
788 } else if (signr == LTTNG_CONSUMER_SIG_LIVE) {
789 live_timer(ctx, &info);
790 } else if (signr == LTTNG_CONSUMER_SIG_MONITOR) {
791 struct lttng_consumer_channel *channel;
792
793 channel = (lttng_consumer_channel *) info.si_value.sival_ptr;
794 sample_and_send_channel_buffer_stats(channel);
795 } else if (signr == LTTNG_CONSUMER_SIG_EXIT) {
796 LTTNG_ASSERT(CMM_LOAD_SHARED(consumer_quit));
797 goto end;
798 } else {
799 ERR("Unexpected signal %d\n", info.si_signo);
800 }
801 }
802
803 error_testpoint:
804 /* Only reached in testpoint error */
805 health_error();
806 end:
807 health_unregister(health_consumerd);
808 rcu_unregister_thread();
809 return nullptr;
810 }
This page took 0.045706 seconds and 4 git commands to generate.