Tracepoints and markers: cleanup init, add missing mutex lock/unlock
[ust.git] / libust / tracectl.c
1 /* Copyright (C) 2009 Pierre-Marc Fournier
2 *
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public
5 * License as published by the Free Software Foundation; either
6 * version 2.1 of the License, or (at your option) any later version.
7 *
8 * This library is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
12 *
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
16 */
17
18 /* This file contains the implementation of the UST listener thread, which
19 * receives trace control commands. It also coordinates the initialization of
20 * libust.
21 */
22
23 #define _GNU_SOURCE
24 #define _LGPL_SOURCE
25 #include <stdio.h>
26 #include <stdlib.h>
27 #include <stdint.h>
28 #include <pthread.h>
29 #include <signal.h>
30 #include <sys/epoll.h>
31 #include <sys/time.h>
32 #include <sys/types.h>
33 #include <sys/socket.h>
34 #include <fcntl.h>
35 #include <poll.h>
36 #include <regex.h>
37 #include <urcu/uatomic_arch.h>
38 #include <urcu/list.h>
39
40 #include <ust/marker.h>
41 #include <ust/tracepoint.h>
42 #include <ust/tracepoint-internal.h>
43 #include <ust/tracectl.h>
44 #include <ust/clock.h>
45 #include "tracer.h"
46 #include "usterr_signal_safe.h"
47 #include "ustcomm.h"
48 #include "buffers.h"
49 #include "marker-control.h"
50
51 /* This should only be accessed by the constructor, before the creation
52 * of the listener, and then only by the listener.
53 */
54 s64 pidunique = -1LL;
55
56 /* The process pid is used to detect a non-traceable fork
57 * and allow the non-traceable fork to be ignored
58 * by destructor sequences in libust
59 */
60 static pid_t processpid = 0;
61
62 static struct ustcomm_header _receive_header;
63 static struct ustcomm_header *receive_header = &_receive_header;
64 static char receive_buffer[USTCOMM_BUFFER_SIZE];
65 static char send_buffer[USTCOMM_BUFFER_SIZE];
66
67 static int epoll_fd;
68
69 /*
70 * Listener thread data vs fork() protection mechanism. Ensures that no listener
71 * thread mutexes and data structures are being concurrently modified or held by
72 * other threads when fork() is executed.
73 */
74 static pthread_mutex_t listener_thread_data_mutex = PTHREAD_MUTEX_INITIALIZER;
75
76 /* Mutex protecting listen_sock. Nests inside listener_thread_data_mutex. */
77 static pthread_mutex_t listen_sock_mutex = PTHREAD_MUTEX_INITIALIZER;
78 static struct ustcomm_sock *listen_sock;
79
80 extern struct chan_info_struct chan_infos[];
81
82 static struct cds_list_head ust_socks = CDS_LIST_HEAD_INIT(ust_socks);
83
84 /* volatile because shared between the listener and the main thread */
85 int buffers_to_export = 0;
86
87 int ust_clock_source;
88
89 static long long make_pidunique(void)
90 {
91 s64 retval;
92 struct timeval tv;
93
94 gettimeofday(&tv, NULL);
95
96 retval = tv.tv_sec;
97 retval <<= 32;
98 retval |= tv.tv_usec;
99
100 return retval;
101 }
102
103 static void print_ust_marker(FILE *fp)
104 {
105 struct ust_marker_iter iter;
106
107 ust_marker_iter_reset(&iter);
108 ust_marker_iter_start(&iter);
109
110 while (iter.ust_marker) {
111 fprintf(fp, "ust_marker: %s/%s %d \"%s\" %p\n",
112 (*iter.ust_marker)->channel,
113 (*iter.ust_marker)->name,
114 (int)(*iter.ust_marker)->state,
115 (*iter.ust_marker)->format,
116 NULL); /*
117 * location is null for now, will be added
118 * to a different table.
119 */
120 ust_marker_iter_next(&iter);
121 }
122 ust_marker_iter_stop(&iter);
123 }
124
125 static void print_trace_events(FILE *fp)
126 {
127 struct trace_event_iter iter;
128
129 lock_trace_events();
130 trace_event_iter_reset(&iter);
131 trace_event_iter_start(&iter);
132
133 while (iter.trace_event) {
134 fprintf(fp, "trace_event: %s\n", (*iter.trace_event)->name);
135 trace_event_iter_next(&iter);
136 }
137 unlock_trace_events();
138 }
139
140 static int connect_ustconsumer(void)
141 {
142 int result, fd;
143 char default_daemon_path[] = SOCK_DIR "/ustconsumer";
144 char *explicit_daemon_path, *daemon_path;
145
146 explicit_daemon_path = getenv("UST_DAEMON_SOCKET");
147 if (explicit_daemon_path) {
148 daemon_path = explicit_daemon_path;
149 } else {
150 daemon_path = default_daemon_path;
151 }
152
153 DBG("Connecting to daemon_path %s", daemon_path);
154
155 result = ustcomm_connect_path(daemon_path, &fd);
156 if (result < 0) {
157 WARN("connect_ustconsumer failed, daemon_path: %s",
158 daemon_path);
159 return result;
160 }
161
162 return fd;
163 }
164
165
166 static void request_buffer_consumer(int sock,
167 const char *trace,
168 const char *channel,
169 int cpu)
170 {
171 struct ustcomm_header send_header, recv_header;
172 struct ustcomm_buffer_info buf_inf;
173 int result = 0;
174
175 result = ustcomm_pack_buffer_info(&send_header,
176 &buf_inf,
177 trace,
178 channel,
179 cpu);
180
181 if (result < 0) {
182 ERR("failed to pack buffer info message %s_%d",
183 channel, cpu);
184 return;
185 }
186
187 buf_inf.pid = getpid();
188 send_header.command = CONSUME_BUFFER;
189
190 result = ustcomm_req(sock, &send_header, (char *) &buf_inf,
191 &recv_header, NULL);
192 if (result <= 0) {
193 PERROR("request for buffer consumer failed, is the daemon online?");
194 }
195
196 return;
197 }
198
199 /* Ask the daemon to collect a trace called trace_name and being
200 * produced by this pid.
201 *
202 * The trace must be at least allocated. (It can also be started.)
203 * This is because _ltt_trace_find is used.
204 */
205
206 static void inform_consumer_daemon(const char *trace_name)
207 {
208 int sock, i,j;
209 struct ust_trace *trace;
210 const char *ch_name;
211
212 sock = connect_ustconsumer();
213 if (sock < 0) {
214 return;
215 }
216
217 DBG("Connected to ustconsumer");
218
219 ltt_lock_traces();
220
221 trace = _ltt_trace_find(trace_name);
222 if (trace == NULL) {
223 WARN("inform_consumer_daemon: could not find trace \"%s\"; it is probably already destroyed", trace_name);
224 goto unlock_traces;
225 }
226
227 for (i=0; i < trace->nr_channels; i++) {
228 if (trace->channels[i].request_collection) {
229 /* iterate on all cpus */
230 for (j=0; j<trace->channels[i].n_cpus; j++) {
231 ch_name = trace->channels[i].channel_name;
232 request_buffer_consumer(sock, trace_name,
233 ch_name, j);
234 CMM_STORE_SHARED(buffers_to_export,
235 CMM_LOAD_SHARED(buffers_to_export)+1);
236 }
237 }
238 }
239
240 unlock_traces:
241 ltt_unlock_traces();
242
243 close(sock);
244 }
245
246 static struct ust_channel *find_channel(const char *ch_name,
247 struct ust_trace *trace)
248 {
249 int i;
250
251 for (i=0; i<trace->nr_channels; i++) {
252 if (!strcmp(trace->channels[i].channel_name, ch_name)) {
253 return &trace->channels[i];
254 }
255 }
256
257 return NULL;
258 }
259
260 static int get_buffer_shmid_pipe_fd(const char *trace_name, const char *ch_name,
261 int ch_cpu,
262 int *buf_shmid,
263 int *buf_struct_shmid,
264 int *buf_pipe_fd)
265 {
266 struct ust_trace *trace;
267 struct ust_channel *channel;
268 struct ust_buffer *buf;
269
270 DBG("get_buffer_shmid_pipe_fd");
271
272 ltt_lock_traces();
273 trace = _ltt_trace_find(trace_name);
274 ltt_unlock_traces();
275
276 if (trace == NULL) {
277 ERR("cannot find trace!");
278 return -ENODATA;
279 }
280
281 channel = find_channel(ch_name, trace);
282 if (!channel) {
283 ERR("cannot find channel %s!", ch_name);
284 return -ENODATA;
285 }
286
287 buf = channel->buf[ch_cpu];
288
289 *buf_shmid = buf->shmid;
290 *buf_struct_shmid = channel->buf_struct_shmids[ch_cpu];
291 *buf_pipe_fd = buf->data_ready_fd_read;
292
293 return 0;
294 }
295
296 static int get_subbuf_num_size(const char *trace_name, const char *ch_name,
297 int *num, int *size)
298 {
299 struct ust_trace *trace;
300 struct ust_channel *channel;
301
302 DBG("get_subbuf_size");
303
304 ltt_lock_traces();
305 trace = _ltt_trace_find(trace_name);
306 ltt_unlock_traces();
307
308 if (!trace) {
309 ERR("cannot find trace!");
310 return -ENODATA;
311 }
312
313 channel = find_channel(ch_name, trace);
314 if (!channel) {
315 ERR("unable to find channel");
316 return -ENODATA;
317 }
318
319 *num = channel->subbuf_cnt;
320 *size = channel->subbuf_size;
321
322 return 0;
323 }
324
325 /* Return the power of two which is equal or higher to v */
326
327 static unsigned int pow2_higher_or_eq(unsigned int v)
328 {
329 int hb = fls(v);
330 int retval = 1<<(hb-1);
331
332 if (v-retval == 0)
333 return retval;
334 else
335 return retval<<1;
336 }
337
338 static int set_subbuf_size(const char *trace_name, const char *ch_name,
339 unsigned int size)
340 {
341 unsigned int power;
342 int retval = 0;
343 struct ust_trace *trace;
344 struct ust_channel *channel;
345
346 DBG("set_subbuf_size");
347
348 power = pow2_higher_or_eq(size);
349 power = max_t(unsigned int, 2u, power);
350 if (power != size) {
351 WARN("using the next power of two for buffer size = %u\n", power);
352 }
353
354 ltt_lock_traces();
355 trace = _ltt_trace_find_setup(trace_name);
356 if (trace == NULL) {
357 ERR("cannot find trace!");
358 retval = -ENODATA;
359 goto unlock_traces;
360 }
361
362 channel = find_channel(ch_name, trace);
363 if (!channel) {
364 ERR("unable to find channel");
365 retval = -ENODATA;
366 goto unlock_traces;
367 }
368
369 channel->subbuf_size = power;
370 DBG("the set_subbuf_size for the requested channel is %zu", channel->subbuf_size);
371
372 unlock_traces:
373 ltt_unlock_traces();
374
375 return retval;
376 }
377
378 static int set_subbuf_num(const char *trace_name, const char *ch_name,
379 unsigned int num)
380 {
381 struct ust_trace *trace;
382 struct ust_channel *channel;
383 int retval = 0;
384
385 DBG("set_subbuf_num");
386
387 if (num < 2) {
388 ERR("subbuffer count should be greater than 2");
389 return -EINVAL;
390 }
391
392 ltt_lock_traces();
393 trace = _ltt_trace_find_setup(trace_name);
394 if (trace == NULL) {
395 ERR("cannot find trace!");
396 retval = -ENODATA;
397 goto unlock_traces;
398 }
399
400 channel = find_channel(ch_name, trace);
401 if (!channel) {
402 ERR("unable to find channel");
403 retval = -ENODATA;
404 goto unlock_traces;
405 }
406
407 channel->subbuf_cnt = num;
408 DBG("the set_subbuf_cnt for the requested channel is %u", channel->subbuf_cnt);
409
410 unlock_traces:
411 ltt_unlock_traces();
412 return retval;
413 }
414
415 static int get_subbuffer(const char *trace_name, const char *ch_name,
416 int ch_cpu, long *consumed_old)
417 {
418 int retval = 0;
419 struct ust_trace *trace;
420 struct ust_channel *channel;
421 struct ust_buffer *buf;
422
423 DBG("get_subbuf");
424
425 *consumed_old = 0;
426
427 ltt_lock_traces();
428 trace = _ltt_trace_find(trace_name);
429
430 if (!trace) {
431 DBG("Cannot find trace. It was likely destroyed by the user.");
432 retval = -ENODATA;
433 goto unlock_traces;
434 }
435
436 channel = find_channel(ch_name, trace);
437 if (!channel) {
438 ERR("unable to find channel");
439 retval = -ENODATA;
440 goto unlock_traces;
441 }
442
443 buf = channel->buf[ch_cpu];
444
445 retval = ust_buffers_get_subbuf(buf, consumed_old);
446 if (retval < 0) {
447 WARN("missed buffer?");
448 }
449
450 unlock_traces:
451 ltt_unlock_traces();
452
453 return retval;
454 }
455
456
457 static int notify_buffer_mapped(const char *trace_name,
458 const char *ch_name,
459 int ch_cpu)
460 {
461 int retval = 0;
462 struct ust_trace *trace;
463 struct ust_channel *channel;
464 struct ust_buffer *buf;
465
466 DBG("get_buffer_fd");
467
468 ltt_lock_traces();
469 trace = _ltt_trace_find(trace_name);
470
471 if (!trace) {
472 retval = -ENODATA;
473 DBG("Cannot find trace. It was likely destroyed by the user.");
474 goto unlock_traces;
475 }
476
477 channel = find_channel(ch_name, trace);
478 if (!channel) {
479 retval = -ENODATA;
480 ERR("unable to find channel");
481 goto unlock_traces;
482 }
483
484 buf = channel->buf[ch_cpu];
485
486 /* Being here is the proof the daemon has mapped the buffer in its
487 * memory. We may now decrement buffers_to_export.
488 */
489 if (uatomic_read(&buf->consumed) == 0) {
490 DBG("decrementing buffers_to_export");
491 CMM_STORE_SHARED(buffers_to_export, CMM_LOAD_SHARED(buffers_to_export)-1);
492 }
493
494 unlock_traces:
495 ltt_unlock_traces();
496
497 return retval;
498 }
499
500 static int put_subbuffer(const char *trace_name, const char *ch_name,
501 int ch_cpu, long consumed_old)
502 {
503 int retval = 0;
504 struct ust_trace *trace;
505 struct ust_channel *channel;
506 struct ust_buffer *buf;
507
508 DBG("put_subbuf");
509
510 ltt_lock_traces();
511 trace = _ltt_trace_find(trace_name);
512
513 if (!trace) {
514 retval = -ENODATA;
515 DBG("Cannot find trace. It was likely destroyed by the user.");
516 goto unlock_traces;
517 }
518
519 channel = find_channel(ch_name, trace);
520 if (!channel) {
521 retval = -ENODATA;
522 ERR("unable to find channel");
523 goto unlock_traces;
524 }
525
526 buf = channel->buf[ch_cpu];
527
528 retval = ust_buffers_put_subbuf(buf, consumed_old);
529 if (retval < 0) {
530 WARN("ust_buffers_put_subbuf: error (subbuf=%s_%d)",
531 ch_name, ch_cpu);
532 } else {
533 DBG("ust_buffers_put_subbuf: success (subbuf=%s_%d)",
534 ch_name, ch_cpu);
535 }
536
537 unlock_traces:
538 ltt_unlock_traces();
539
540 return retval;
541 }
542
543 static void release_listener_mutex(void *ptr)
544 {
545 pthread_mutex_unlock(&listener_thread_data_mutex);
546 }
547
548 static void listener_cleanup(void *ptr)
549 {
550 pthread_mutex_lock(&listen_sock_mutex);
551 if (listen_sock) {
552 ustcomm_del_named_sock(listen_sock, 0);
553 listen_sock = NULL;
554 }
555 pthread_mutex_unlock(&listen_sock_mutex);
556 }
557
558 static int force_subbuf_switch(const char *trace_name)
559 {
560 struct ust_trace *trace;
561 int i, j, retval = 0;
562
563 ltt_lock_traces();
564 trace = _ltt_trace_find(trace_name);
565 if (!trace) {
566 retval = -ENODATA;
567 DBG("Cannot find trace. It was likely destroyed by the user.");
568 goto unlock_traces;
569 }
570
571 for (i = 0; i < trace->nr_channels; i++) {
572 for (j = 0; j < trace->channels[i].n_cpus; j++) {
573 ltt_force_switch(trace->channels[i].buf[j],
574 FORCE_FLUSH);
575 }
576 }
577
578 unlock_traces:
579 ltt_unlock_traces();
580
581 return retval;
582 }
583
584 static int process_trace_cmd(int command, char *trace_name)
585 {
586 int result;
587 char trace_type[] = "ustrelay";
588
589 switch(command) {
590 case START:
591 /* start is an operation that setups the trace, allocates it and starts it */
592 result = ltt_trace_setup(trace_name);
593 if (result < 0) {
594 ERR("ltt_trace_setup failed");
595 return result;
596 }
597
598 result = ltt_trace_set_type(trace_name, trace_type);
599 if (result < 0) {
600 ERR("ltt_trace_set_type failed");
601 return result;
602 }
603
604 result = ltt_trace_alloc(trace_name);
605 if (result < 0) {
606 ERR("ltt_trace_alloc failed");
607 return result;
608 }
609
610 inform_consumer_daemon(trace_name);
611
612 result = ltt_trace_start(trace_name);
613 if (result < 0) {
614 ERR("ltt_trace_start failed");
615 return result;
616 }
617
618 return 0;
619 case SETUP_TRACE:
620 DBG("trace setup");
621
622 result = ltt_trace_setup(trace_name);
623 if (result < 0) {
624 ERR("ltt_trace_setup failed");
625 return result;
626 }
627
628 result = ltt_trace_set_type(trace_name, trace_type);
629 if (result < 0) {
630 ERR("ltt_trace_set_type failed");
631 return result;
632 }
633
634 return 0;
635 case ALLOC_TRACE:
636 DBG("trace alloc");
637
638 result = ltt_trace_alloc(trace_name);
639 if (result < 0) {
640 ERR("ltt_trace_alloc failed");
641 return result;
642 }
643 inform_consumer_daemon(trace_name);
644
645 return 0;
646
647 case CREATE_TRACE:
648 DBG("trace create");
649
650 result = ltt_trace_setup(trace_name);
651 if (result < 0) {
652 ERR("ltt_trace_setup failed");
653 return result;
654 }
655
656 result = ltt_trace_set_type(trace_name, trace_type);
657 if (result < 0) {
658 ERR("ltt_trace_set_type failed");
659 return result;
660 }
661
662 return 0;
663 case START_TRACE:
664 DBG("trace start");
665
666 result = ltt_trace_alloc(trace_name);
667 if (result < 0) {
668 ERR("ltt_trace_alloc failed");
669 return result;
670 }
671 if (!result) {
672 inform_consumer_daemon(trace_name);
673 }
674
675 result = ltt_trace_start(trace_name);
676 if (result < 0) {
677 ERR("ltt_trace_start failed");
678 return result;
679 }
680
681 return 0;
682 case STOP_TRACE:
683 DBG("trace stop");
684
685 result = ltt_trace_stop(trace_name);
686 if (result < 0) {
687 ERR("ltt_trace_stop failed");
688 return result;
689 }
690
691 return 0;
692 case DESTROY_TRACE:
693 DBG("trace destroy");
694
695 result = ltt_trace_destroy(trace_name, 0);
696 if (result < 0) {
697 ERR("ltt_trace_destroy failed");
698 return result;
699 }
700 return 0;
701 case FORCE_SUBBUF_SWITCH:
702 DBG("force switch");
703
704 result = force_subbuf_switch(trace_name);
705 if (result < 0) {
706 ERR("force_subbuf_switch failed");
707 return result;
708 }
709 return 0;
710 }
711
712 return 0;
713 }
714
715
716 static void process_channel_cmd(int sock, int command,
717 struct ustcomm_channel_info *ch_inf)
718 {
719 struct ustcomm_header _reply_header;
720 struct ustcomm_header *reply_header = &_reply_header;
721 struct ustcomm_channel_info *reply_msg =
722 (struct ustcomm_channel_info *)send_buffer;
723 int result, offset = 0, num, size;
724
725 memset(reply_header, 0, sizeof(*reply_header));
726
727 switch (command) {
728 case GET_SUBBUF_NUM_SIZE:
729 result = get_subbuf_num_size(ch_inf->trace,
730 ch_inf->channel,
731 &num, &size);
732 if (result < 0) {
733 reply_header->result = result;
734 break;
735 }
736
737 reply_msg->channel = USTCOMM_POISON_PTR;
738 reply_msg->subbuf_num = num;
739 reply_msg->subbuf_size = size;
740
741
742 reply_header->size = COMPUTE_MSG_SIZE(reply_msg, offset);
743
744 break;
745 case SET_SUBBUF_NUM:
746 reply_header->result = set_subbuf_num(ch_inf->trace,
747 ch_inf->channel,
748 ch_inf->subbuf_num);
749
750 break;
751 case SET_SUBBUF_SIZE:
752 reply_header->result = set_subbuf_size(ch_inf->trace,
753 ch_inf->channel,
754 ch_inf->subbuf_size);
755
756
757 break;
758 }
759 if (ustcomm_send(sock, reply_header, (char *)reply_msg) < 0) {
760 ERR("ustcomm_send failed");
761 }
762 }
763
764 static void process_buffer_cmd(int sock, int command,
765 struct ustcomm_buffer_info *buf_inf)
766 {
767 struct ustcomm_header _reply_header;
768 struct ustcomm_header *reply_header = &_reply_header;
769 struct ustcomm_buffer_info *reply_msg =
770 (struct ustcomm_buffer_info *)send_buffer;
771 int result, offset = 0, buf_shmid, buf_struct_shmid, buf_pipe_fd;
772 long consumed_old;
773
774 memset(reply_header, 0, sizeof(*reply_header));
775
776 switch (command) {
777 case GET_BUF_SHMID_PIPE_FD:
778 result = get_buffer_shmid_pipe_fd(buf_inf->trace,
779 buf_inf->channel,
780 buf_inf->ch_cpu,
781 &buf_shmid,
782 &buf_struct_shmid,
783 &buf_pipe_fd);
784 if (result < 0) {
785 reply_header->result = result;
786 break;
787 }
788
789 reply_msg->channel = USTCOMM_POISON_PTR;
790 reply_msg->buf_shmid = buf_shmid;
791 reply_msg->buf_struct_shmid = buf_struct_shmid;
792
793 reply_header->size = COMPUTE_MSG_SIZE(reply_msg, offset);
794 reply_header->fd_included = 1;
795
796 if (ustcomm_send_fd(sock, reply_header, (char *)reply_msg,
797 &buf_pipe_fd) < 0) {
798 ERR("ustcomm_send failed");
799 }
800 return;
801
802 case NOTIFY_BUF_MAPPED:
803 reply_header->result =
804 notify_buffer_mapped(buf_inf->trace,
805 buf_inf->channel,
806 buf_inf->ch_cpu);
807 break;
808 case GET_SUBBUFFER:
809 result = get_subbuffer(buf_inf->trace, buf_inf->channel,
810 buf_inf->ch_cpu, &consumed_old);
811 if (result < 0) {
812 reply_header->result = result;
813 break;
814 }
815
816 reply_msg->channel = USTCOMM_POISON_PTR;
817 reply_msg->consumed_old = consumed_old;
818
819 reply_header->size = COMPUTE_MSG_SIZE(reply_msg, offset);
820
821 break;
822 case PUT_SUBBUFFER:
823 result = put_subbuffer(buf_inf->trace, buf_inf->channel,
824 buf_inf->ch_cpu,
825 buf_inf->consumed_old);
826 reply_header->result = result;
827
828 break;
829 }
830
831 if (ustcomm_send(sock, reply_header, (char *)reply_msg) < 0) {
832 ERR("ustcomm_send failed");
833 }
834
835 }
836
837 static void process_ust_marker_cmd(int sock, int command,
838 struct ustcomm_ust_marker_info *ust_marker_inf)
839 {
840 struct ustcomm_header _reply_header;
841 struct ustcomm_header *reply_header = &_reply_header;
842 int result = 0;
843
844 memset(reply_header, 0, sizeof(*reply_header));
845
846 switch(command) {
847 case ENABLE_MARKER:
848
849 result = ltt_ust_marker_connect(ust_marker_inf->channel,
850 ust_marker_inf->ust_marker,
851 "default");
852 if (result < 0) {
853 WARN("could not enable ust_marker; channel=%s,"
854 " name=%s",
855 ust_marker_inf->channel,
856 ust_marker_inf->ust_marker);
857
858 }
859 break;
860 case DISABLE_MARKER:
861 result = ltt_ust_marker_disconnect(ust_marker_inf->channel,
862 ust_marker_inf->ust_marker,
863 "default");
864 if (result < 0) {
865 WARN("could not disable ust_marker; channel=%s,"
866 " name=%s",
867 ust_marker_inf->channel,
868 ust_marker_inf->ust_marker);
869 }
870 break;
871 }
872
873 reply_header->result = result;
874
875 if (ustcomm_send(sock, reply_header, NULL) < 0) {
876 ERR("ustcomm_send failed");
877 }
878
879 }
880 static void process_client_cmd(struct ustcomm_header *recv_header,
881 char *recv_buf, int sock)
882 {
883 int result;
884 struct ustcomm_header _reply_header;
885 struct ustcomm_header *reply_header = &_reply_header;
886 char *send_buf = send_buffer;
887
888 memset(reply_header, 0, sizeof(*reply_header));
889 memset(send_buf, 0, sizeof(send_buffer));
890
891 switch(recv_header->command) {
892 case GET_SUBBUF_NUM_SIZE:
893 case SET_SUBBUF_NUM:
894 case SET_SUBBUF_SIZE:
895 {
896 struct ustcomm_channel_info *ch_inf;
897 ch_inf = (struct ustcomm_channel_info *)recv_buf;
898 result = ustcomm_unpack_channel_info(ch_inf);
899 if (result < 0) {
900 ERR("couldn't unpack channel info");
901 reply_header->result = -EINVAL;
902 goto send_response;
903 }
904 process_channel_cmd(sock, recv_header->command, ch_inf);
905 return;
906 }
907 case GET_BUF_SHMID_PIPE_FD:
908 case NOTIFY_BUF_MAPPED:
909 case GET_SUBBUFFER:
910 case PUT_SUBBUFFER:
911 {
912 struct ustcomm_buffer_info *buf_inf;
913 buf_inf = (struct ustcomm_buffer_info *)recv_buf;
914 result = ustcomm_unpack_buffer_info(buf_inf);
915 if (result < 0) {
916 ERR("couldn't unpack buffer info");
917 reply_header->result = -EINVAL;
918 goto send_response;
919 }
920 process_buffer_cmd(sock, recv_header->command, buf_inf);
921 return;
922 }
923 case ENABLE_MARKER:
924 case DISABLE_MARKER:
925 {
926 struct ustcomm_ust_marker_info *ust_marker_inf;
927 ust_marker_inf = (struct ustcomm_ust_marker_info *)recv_buf;
928 result = ustcomm_unpack_ust_marker_info(ust_marker_inf);
929 if (result < 0) {
930 ERR("couldn't unpack ust_marker info");
931 reply_header->result = -EINVAL;
932 goto send_response;
933 }
934 process_ust_marker_cmd(sock, recv_header->command, ust_marker_inf);
935 return;
936 }
937 case LIST_MARKERS:
938 {
939 char *ptr;
940 size_t size;
941 FILE *fp;
942
943 fp = open_memstream(&ptr, &size);
944 if (fp == NULL) {
945 ERR("opening memstream failed");
946 return;
947 }
948 print_ust_marker(fp);
949 fclose(fp);
950
951 reply_header->size = size + 1; /* Include final \0 */
952
953 result = ustcomm_send(sock, reply_header, ptr);
954
955 free(ptr);
956
957 if (result < 0) {
958 PERROR("failed to send ust_marker list");
959 }
960
961 break;
962 }
963 case LIST_TRACE_EVENTS:
964 {
965 char *ptr;
966 size_t size;
967 FILE *fp;
968
969 fp = open_memstream(&ptr, &size);
970 if (fp == NULL) {
971 ERR("opening memstream failed");
972 return;
973 }
974 print_trace_events(fp);
975 fclose(fp);
976
977 reply_header->size = size + 1; /* Include final \0 */
978
979 result = ustcomm_send(sock, reply_header, ptr);
980
981 free(ptr);
982
983 if (result < 0) {
984 ERR("list_trace_events failed");
985 return;
986 }
987
988 break;
989 }
990 case LOAD_PROBE_LIB:
991 {
992 char *libfile;
993
994 /* FIXME: No functionality at all... */
995 libfile = recv_buf;
996
997 DBG("load_probe_lib loading %s", libfile);
998
999 break;
1000 }
1001 case GET_PIDUNIQUE:
1002 {
1003 struct ustcomm_pidunique *pid_msg;
1004 pid_msg = (struct ustcomm_pidunique *)send_buf;
1005
1006 pid_msg->pidunique = pidunique;
1007 reply_header->size = sizeof(pid_msg);
1008
1009 goto send_response;
1010
1011 }
1012 case GET_SOCK_PATH:
1013 {
1014 struct ustcomm_single_field *sock_msg;
1015 char *sock_path_env;
1016
1017 sock_msg = (struct ustcomm_single_field *)send_buf;
1018
1019 sock_path_env = getenv("UST_DAEMON_SOCKET");
1020
1021 if (!sock_path_env) {
1022 result = ustcomm_pack_single_field(reply_header,
1023 sock_msg,
1024 SOCK_DIR "/ustconsumer");
1025
1026 } else {
1027 result = ustcomm_pack_single_field(reply_header,
1028 sock_msg,
1029 sock_path_env);
1030 }
1031 reply_header->result = result;
1032
1033 goto send_response;
1034 }
1035 case SET_SOCK_PATH:
1036 {
1037 struct ustcomm_single_field *sock_msg;
1038 sock_msg = (struct ustcomm_single_field *)recv_buf;
1039 result = ustcomm_unpack_single_field(sock_msg);
1040 if (result < 0) {
1041 reply_header->result = -EINVAL;
1042 goto send_response;
1043 }
1044
1045 reply_header->result = setenv("UST_DAEMON_SOCKET",
1046 sock_msg->field, 1);
1047
1048 goto send_response;
1049 }
1050 case START:
1051 case SETUP_TRACE:
1052 case ALLOC_TRACE:
1053 case CREATE_TRACE:
1054 case START_TRACE:
1055 case STOP_TRACE:
1056 case DESTROY_TRACE:
1057 case FORCE_SUBBUF_SWITCH:
1058 {
1059 struct ustcomm_single_field *trace_inf =
1060 (struct ustcomm_single_field *)recv_buf;
1061
1062 result = ustcomm_unpack_single_field(trace_inf);
1063 if (result < 0) {
1064 ERR("couldn't unpack trace info");
1065 reply_header->result = -EINVAL;
1066 goto send_response;
1067 }
1068
1069 reply_header->result =
1070 process_trace_cmd(recv_header->command,
1071 trace_inf->field);
1072 goto send_response;
1073
1074 }
1075 default:
1076 reply_header->result = -EINVAL;
1077
1078 goto send_response;
1079 }
1080
1081 return;
1082
1083 send_response:
1084 ustcomm_send(sock, reply_header, send_buf);
1085 }
1086
1087 #define MAX_EVENTS 10
1088
1089 void *listener_main(void *p)
1090 {
1091 struct ustcomm_sock *epoll_sock;
1092 struct epoll_event events[MAX_EVENTS];
1093 struct sockaddr addr;
1094 int accept_fd, nfds, result, i, addr_size;
1095
1096 DBG("LISTENER");
1097
1098 pthread_cleanup_push(listener_cleanup, NULL);
1099
1100 for(;;) {
1101 nfds = epoll_wait(epoll_fd, events, MAX_EVENTS, -1);
1102 if (nfds == -1) {
1103 PERROR("listener_main: epoll_wait failed");
1104 continue;
1105 }
1106
1107 for (i = 0; i < nfds; i++) {
1108 pthread_mutex_lock(&listener_thread_data_mutex);
1109 pthread_cleanup_push(release_listener_mutex, NULL);
1110 epoll_sock = (struct ustcomm_sock *)events[i].data.ptr;
1111 if (epoll_sock == listen_sock) {
1112 addr_size = sizeof(struct sockaddr);
1113 accept_fd = accept(epoll_sock->fd,
1114 &addr,
1115 (socklen_t *)&addr_size);
1116 if (accept_fd == -1) {
1117 PERROR("listener_main: accept failed");
1118 continue;
1119 }
1120 ustcomm_init_sock(accept_fd, epoll_fd,
1121 &ust_socks);
1122 } else {
1123 memset(receive_header, 0,
1124 sizeof(*receive_header));
1125 memset(receive_buffer, 0,
1126 sizeof(receive_buffer));
1127 result = ustcomm_recv(epoll_sock->fd,
1128 receive_header,
1129 receive_buffer);
1130 if (result == 0) {
1131 ustcomm_del_sock(epoll_sock, 0);
1132 } else {
1133 process_client_cmd(receive_header,
1134 receive_buffer,
1135 epoll_sock->fd);
1136 }
1137 }
1138 pthread_cleanup_pop(1); /* release listener mutex */
1139 }
1140 }
1141
1142 pthread_cleanup_pop(1);
1143 }
1144
1145 /* These should only be accessed in the parent thread,
1146 * not the listener.
1147 */
1148 static volatile sig_atomic_t have_listener = 0;
1149 static pthread_t listener_thread;
1150
1151 void create_listener(void)
1152 {
1153 int result;
1154 sigset_t sig_all_blocked;
1155 sigset_t orig_parent_mask;
1156
1157 if (have_listener) {
1158 WARN("not creating listener because we already had one");
1159 return;
1160 }
1161
1162 /* A new thread created by pthread_create inherits the signal mask
1163 * from the parent. To avoid any signal being received by the
1164 * listener thread, we block all signals temporarily in the parent,
1165 * while we create the listener thread.
1166 */
1167
1168 sigfillset(&sig_all_blocked);
1169
1170 result = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_parent_mask);
1171 if (result) {
1172 PERROR("pthread_sigmask: %s", strerror(result));
1173 }
1174
1175 result = pthread_create(&listener_thread, NULL, listener_main, NULL);
1176 if (result == -1) {
1177 PERROR("pthread_create");
1178 }
1179
1180 /* Restore original signal mask in parent */
1181 result = pthread_sigmask(SIG_SETMASK, &orig_parent_mask, NULL);
1182 if (result) {
1183 PERROR("pthread_sigmask: %s", strerror(result));
1184 } else {
1185 have_listener = 1;
1186 }
1187 }
1188
1189 #define AUTOPROBE_DISABLED 0
1190 #define AUTOPROBE_ENABLE_ALL 1
1191 #define AUTOPROBE_ENABLE_REGEX 2
1192 static int autoprobe_method = AUTOPROBE_DISABLED;
1193 static regex_t autoprobe_regex;
1194
1195 static void auto_probe_connect(struct ust_marker *m)
1196 {
1197 int result;
1198
1199 char* concat_name = NULL;
1200 const char *probe_name = "default";
1201
1202 if (autoprobe_method == AUTOPROBE_DISABLED) {
1203 return;
1204 } else if (autoprobe_method == AUTOPROBE_ENABLE_REGEX) {
1205 result = asprintf(&concat_name, "%s/%s", m->channel, m->name);
1206 if (result == -1) {
1207 ERR("auto_probe_connect: asprintf failed (ust_marker %s/%s)",
1208 m->channel, m->name);
1209 return;
1210 }
1211 if (regexec(&autoprobe_regex, concat_name, 0, NULL, 0)) {
1212 free(concat_name);
1213 return;
1214 }
1215 free(concat_name);
1216 }
1217
1218 result = ltt_ust_marker_connect(m->channel, m->name, probe_name);
1219 if (result && result != -EEXIST)
1220 ERR("ltt_ust_marker_connect (ust_marker = %s/%s, errno = %d)", m->channel, m->name, -result);
1221
1222 DBG("auto connected ust_marker %s (addr: %p) %s to probe default", m->channel, m, m->name);
1223
1224 }
1225
1226 static struct ustcomm_sock * init_app_socket(int epoll_fd)
1227 {
1228 char *dir_name, *sock_name;
1229 int result;
1230 struct ustcomm_sock *sock = NULL;
1231 time_t mtime;
1232
1233 dir_name = ustcomm_user_sock_dir();
1234 if (!dir_name)
1235 return NULL;
1236
1237 mtime = ustcomm_pid_st_mtime(getpid());
1238 if (!mtime) {
1239 goto free_dir_name;
1240 }
1241
1242 result = asprintf(&sock_name, "%s/%d.%ld", dir_name,
1243 (int) getpid(), (long) mtime);
1244 if (result < 0) {
1245 ERR("string overflow allocating socket name, "
1246 "UST thread bailing");
1247 goto free_dir_name;
1248 }
1249
1250 result = ensure_dir_exists(dir_name, S_IRWXU);
1251 if (result == -1) {
1252 ERR("Unable to create socket directory %s, UST thread bailing",
1253 dir_name);
1254 goto free_sock_name;
1255 }
1256
1257 sock = ustcomm_init_named_socket(sock_name, epoll_fd);
1258 if (!sock) {
1259 ERR("Error initializing named socket (%s). Check that directory"
1260 "exists and that it is writable. UST thread bailing", sock_name);
1261 goto free_sock_name;
1262 }
1263
1264 free_sock_name:
1265 free(sock_name);
1266 free_dir_name:
1267 free(dir_name);
1268
1269 return sock;
1270 }
1271
1272 static void __attribute__((constructor)) init()
1273 {
1274 struct timespec ts;
1275 int result;
1276 char* autoprobe_val = NULL;
1277 char* subbuffer_size_val = NULL;
1278 char* subbuffer_count_val = NULL;
1279 unsigned int subbuffer_size;
1280 unsigned int subbuffer_count;
1281 unsigned int power;
1282
1283 /* Assign the pidunique, to be able to differentiate the processes with same
1284 * pid, (before and after an exec).
1285 */
1286 pidunique = make_pidunique();
1287 processpid = getpid();
1288
1289 DBG("Tracectl constructor");
1290
1291 /* Set up epoll */
1292 epoll_fd = epoll_create(MAX_EVENTS);
1293 if (epoll_fd == -1) {
1294 ERR("epoll_create failed, tracing shutting down");
1295 return;
1296 }
1297
1298 /* Create the socket */
1299 listen_sock = init_app_socket(epoll_fd);
1300 if (!listen_sock) {
1301 ERR("failed to create application socket,"
1302 " tracing shutting down");
1303 return;
1304 }
1305
1306 create_listener();
1307
1308 /* Get clock the clock source type */
1309
1310 /* Default clock source */
1311 ust_clock_source = CLOCK_TRACE;
1312 if (clock_gettime(ust_clock_source, &ts) != 0) {
1313 ust_clock_source = CLOCK_MONOTONIC;
1314 DBG("UST traces will not be synchronized with LTTng traces");
1315 }
1316
1317 if (getenv("UST_TRACE") || getenv("UST_AUTOPROBE")) {
1318 /* Ensure ust_marker control is initialized */
1319 init_ust_marker_control();
1320 }
1321
1322 autoprobe_val = getenv("UST_AUTOPROBE");
1323 if (autoprobe_val) {
1324 struct ust_marker_iter iter;
1325
1326 DBG("Autoprobe enabled.");
1327
1328 /* first, set the callback that will connect the
1329 * probe on new ust_marker
1330 */
1331 if (autoprobe_val[0] == '/') {
1332 result = regcomp(&autoprobe_regex, autoprobe_val+1, 0);
1333 if (result) {
1334 char regexerr[150];
1335
1336 regerror(result, &autoprobe_regex, regexerr, sizeof(regexerr));
1337 ERR("cannot parse regex %s (%s), will ignore UST_AUTOPROBE", autoprobe_val, regexerr);
1338 /* don't crash the application just for this */
1339 } else {
1340 autoprobe_method = AUTOPROBE_ENABLE_REGEX;
1341 }
1342 } else {
1343 /* just enable all instrumentation */
1344 autoprobe_method = AUTOPROBE_ENABLE_ALL;
1345 }
1346
1347 ust_marker_set_new_ust_marker_cb(auto_probe_connect);
1348
1349 /* Now, connect the probes that were already registered. */
1350 ust_marker_iter_reset(&iter);
1351 ust_marker_iter_start(&iter);
1352
1353 DBG("now iterating on ust_marker already registered");
1354 while (iter.ust_marker) {
1355 DBG("now iterating on ust_marker %s", (*iter.ust_marker)->name);
1356 auto_probe_connect(*iter.ust_marker);
1357 ust_marker_iter_next(&iter);
1358 }
1359 ust_marker_iter_stop(&iter);
1360 }
1361
1362 if (getenv("UST_OVERWRITE")) {
1363 int val = atoi(getenv("UST_OVERWRITE"));
1364 if (val == 0 || val == 1) {
1365 CMM_STORE_SHARED(ust_channels_overwrite_by_default, val);
1366 } else {
1367 WARN("invalid value for UST_OVERWRITE");
1368 }
1369 }
1370
1371 if (getenv("UST_AUTOCOLLECT")) {
1372 int val = atoi(getenv("UST_AUTOCOLLECT"));
1373 if (val == 0 || val == 1) {
1374 CMM_STORE_SHARED(ust_channels_request_collection_by_default, val);
1375 } else {
1376 WARN("invalid value for UST_AUTOCOLLECT");
1377 }
1378 }
1379
1380 subbuffer_size_val = getenv("UST_SUBBUF_SIZE");
1381 if (subbuffer_size_val) {
1382 sscanf(subbuffer_size_val, "%u", &subbuffer_size);
1383 power = pow2_higher_or_eq(subbuffer_size);
1384 if (power != subbuffer_size)
1385 WARN("using the next power of two for buffer size = %u\n", power);
1386 chan_infos[LTT_CHANNEL_UST].def_subbufsize = power;
1387 }
1388
1389 subbuffer_count_val = getenv("UST_SUBBUF_NUM");
1390 if (subbuffer_count_val) {
1391 sscanf(subbuffer_count_val, "%u", &subbuffer_count);
1392 if (subbuffer_count < 2)
1393 subbuffer_count = 2;
1394 chan_infos[LTT_CHANNEL_UST].def_subbufcount = subbuffer_count;
1395 }
1396
1397 if (getenv("UST_TRACE")) {
1398 char trace_name[] = "auto";
1399 char trace_type[] = "ustrelay";
1400
1401 DBG("starting early tracing");
1402
1403 /* Ensure buffers are initialized, for the transport to be available.
1404 * We are about to set a trace type and it will fail without this.
1405 */
1406 init_ustrelay_transport();
1407
1408 /* FIXME: When starting early tracing (here), depending on the
1409 * order of constructors, it is very well possible some ust_marker
1410 * sections are not yet registered. Because of this, some
1411 * channels may not be registered. Yet, we are about to ask the
1412 * daemon to collect the channels. Channels which are not yet
1413 * registered will not be collected.
1414 *
1415 * Currently, in LTTng, there is no way to add a channel after
1416 * trace start. The reason for this is that it induces complex
1417 * concurrency issues on the trace structures, which can only
1418 * be resolved using RCU. This has not been done yet. As a
1419 * workaround, we are forcing the registration of the "ust"
1420 * channel here. This is the only channel (apart from metadata)
1421 * that can be reliably used in early tracing.
1422 *
1423 * Non-early tracing does not have this problem and can use
1424 * arbitrary channel names.
1425 */
1426 ltt_channels_register("ust");
1427
1428 result = ltt_trace_setup(trace_name);
1429 if (result < 0) {
1430 ERR("ltt_trace_setup failed");
1431 return;
1432 }
1433
1434 result = ltt_trace_set_type(trace_name, trace_type);
1435 if (result < 0) {
1436 ERR("ltt_trace_set_type failed");
1437 return;
1438 }
1439
1440 result = ltt_trace_alloc(trace_name);
1441 if (result < 0) {
1442 ERR("ltt_trace_alloc failed");
1443 return;
1444 }
1445
1446 result = ltt_trace_start(trace_name);
1447 if (result < 0) {
1448 ERR("ltt_trace_start failed");
1449 return;
1450 }
1451
1452 /* Do this after the trace is started in order to avoid creating confusion
1453 * if the trace fails to start. */
1454 inform_consumer_daemon(trace_name);
1455 }
1456
1457 return;
1458
1459 /* should decrementally destroy stuff if error */
1460
1461 }
1462
1463 /* This is only called if we terminate normally, not with an unhandled signal,
1464 * so we cannot rely on it. However, for now, LTTV requires that the header of
1465 * the last sub-buffer contain a valid end time for the trace. This is done
1466 * automatically only when the trace is properly stopped.
1467 *
1468 * If the traced program crashed, it is always possible to manually add the
1469 * right value in the header, or to open the trace in text mode.
1470 *
1471 * FIXME: Fix LTTV so it doesn't need this.
1472 */
1473
1474 static void destroy_traces(void)
1475 {
1476 int result;
1477
1478 /* if trace running, finish it */
1479
1480 DBG("destructor stopping traces");
1481
1482 result = ltt_trace_stop("auto");
1483 if (result == -1) {
1484 ERR("ltt_trace_stop error");
1485 }
1486
1487 result = ltt_trace_destroy("auto", 0);
1488 if (result == -1) {
1489 ERR("ltt_trace_destroy error");
1490 }
1491 }
1492
1493 static int trace_recording(void)
1494 {
1495 int retval = 0;
1496 struct ust_trace *trace;
1497
1498 ltt_lock_traces();
1499
1500 cds_list_for_each_entry(trace, &ltt_traces.head, list) {
1501 if (trace->active) {
1502 retval = 1;
1503 break;
1504 }
1505 }
1506
1507 ltt_unlock_traces();
1508
1509 return retval;
1510 }
1511
1512 int restarting_usleep(useconds_t usecs)
1513 {
1514 struct timespec tv;
1515 int result;
1516
1517 tv.tv_sec = 0;
1518 tv.tv_nsec = usecs * 1000;
1519
1520 do {
1521 result = nanosleep(&tv, &tv);
1522 } while (result == -1 && errno == EINTR);
1523
1524 return result;
1525 }
1526
1527 static void stop_listener(void)
1528 {
1529 int result;
1530
1531 if (!have_listener)
1532 return;
1533
1534 result = pthread_cancel(listener_thread);
1535 if (result != 0) {
1536 ERR("pthread_cancel: %s", strerror(result));
1537 }
1538 result = pthread_join(listener_thread, NULL);
1539 if (result != 0) {
1540 ERR("pthread_join: %s", strerror(result));
1541 }
1542 }
1543
1544 /* This destructor keeps the process alive for a few seconds in order
1545 * to leave time for ustconsumer to connect to its buffers. This is necessary
1546 * for programs whose execution is very short. It is also useful in all
1547 * programs when tracing is started close to the end of the program
1548 * execution.
1549 *
1550 * FIXME: For now, this only works for the first trace created in a
1551 * process.
1552 */
1553
1554 static void __attribute__((destructor)) keepalive()
1555 {
1556 if (processpid != getpid()) {
1557 return;
1558 }
1559
1560 if (trace_recording() && CMM_LOAD_SHARED(buffers_to_export)) {
1561 int total = 0;
1562 DBG("Keeping process alive for consumer daemon...");
1563 while (CMM_LOAD_SHARED(buffers_to_export)) {
1564 const int interv = 200000;
1565 restarting_usleep(interv);
1566 total += interv;
1567
1568 if (total >= 3000000) {
1569 WARN("non-consumed buffers remaining after wait limit; not waiting anymore");
1570 break;
1571 }
1572 }
1573 DBG("Finally dying...");
1574 }
1575
1576 destroy_traces();
1577
1578 /* Ask the listener to stop and clean up. */
1579 stop_listener();
1580 }
1581
1582 void ust_potential_exec(void)
1583 {
1584 ust_marker(potential_exec, UST_MARKER_NOARGS);
1585
1586 DBG("test");
1587
1588 keepalive();
1589 }
1590
1591 /* Notify ust that there was a fork. This needs to be called inside
1592 * the new process, anytime a process whose memory is not shared with
1593 * the parent is created. If this function is not called, the events
1594 * of the new process will not be collected.
1595 *
1596 * Signals should be disabled before the fork and reenabled only after
1597 * this call in order to guarantee tracing is not started before ust_fork()
1598 * sanitizes the new process.
1599 */
1600
1601 static void ust_fork(void)
1602 {
1603 struct ustcomm_sock *sock, *sock_tmp;
1604 struct ust_trace *trace, *trace_tmp;
1605 int result;
1606
1607 /* FIXME: technically, the locks could have been taken before the fork */
1608 DBG("ust: forking");
1609
1610 /* Get the pid of the new process */
1611 processpid = getpid();
1612
1613 /*
1614 * FIXME: This could be prettier, we loop over the list twice and
1615 * following good locking practice should lock around the loop
1616 */
1617 cds_list_for_each_entry_safe(trace, trace_tmp, &ltt_traces.head, list) {
1618 ltt_trace_stop(trace->trace_name);
1619 }
1620
1621 /* Delete all active connections, but leave them in the epoll set */
1622 cds_list_for_each_entry_safe(sock, sock_tmp, &ust_socks, list) {
1623 ustcomm_del_sock(sock, 1);
1624 }
1625
1626 /*
1627 * FIXME: This could be prettier, we loop over the list twice and
1628 * following good locking practice should lock around the loop
1629 */
1630 cds_list_for_each_entry_safe(trace, trace_tmp, &ltt_traces.head, list) {
1631 ltt_trace_destroy(trace->trace_name, 1);
1632 }
1633
1634 /* Clean up the listener socket and epoll, keeping the socket file */
1635 if (listen_sock) {
1636 ustcomm_del_named_sock(listen_sock, 1);
1637 listen_sock = NULL;
1638 }
1639 close(epoll_fd);
1640
1641 /* Re-start the launch sequence */
1642 CMM_STORE_SHARED(buffers_to_export, 0);
1643 have_listener = 0;
1644
1645 /* Set up epoll */
1646 epoll_fd = epoll_create(MAX_EVENTS);
1647 if (epoll_fd == -1) {
1648 ERR("epoll_create failed, tracing shutting down");
1649 return;
1650 }
1651
1652 /* Create the socket */
1653 listen_sock = init_app_socket(epoll_fd);
1654 if (!listen_sock) {
1655 ERR("failed to create application socket,"
1656 " tracing shutting down");
1657 return;
1658 }
1659 create_listener();
1660 ltt_trace_setup("auto");
1661 result = ltt_trace_set_type("auto", "ustrelay");
1662 if (result < 0) {
1663 ERR("ltt_trace_set_type failed");
1664 return;
1665 }
1666
1667 ltt_trace_alloc("auto");
1668 ltt_trace_start("auto");
1669 inform_consumer_daemon("auto");
1670 }
1671
1672 void ust_before_fork(ust_fork_info_t *fork_info)
1673 {
1674 /* Disable signals. This is to avoid that the child
1675 * intervenes before it is properly setup for tracing. It is
1676 * safer to disable all signals, because then we know we are not
1677 * breaking anything by restoring the original mask.
1678 */
1679 sigset_t all_sigs;
1680 int result;
1681
1682 /* FIXME:
1683 - only do this if tracing is active
1684 */
1685
1686 /* Disable signals */
1687 sigfillset(&all_sigs);
1688 result = sigprocmask(SIG_BLOCK, &all_sigs, &fork_info->orig_sigs);
1689 if (result == -1) {
1690 PERROR("sigprocmask");
1691 return;
1692 }
1693
1694 /*
1695 * Take the fork lock to make sure we are not in the middle of
1696 * something in the listener thread.
1697 */
1698 pthread_mutex_lock(&listener_thread_data_mutex);
1699 /*
1700 * Hold listen_sock_mutex to protect from listen_sock teardown.
1701 */
1702 pthread_mutex_lock(&listen_sock_mutex);
1703 rcu_bp_before_fork();
1704 }
1705
1706 /* Don't call this function directly in a traced program */
1707 static void ust_after_fork_common(ust_fork_info_t *fork_info)
1708 {
1709 int result;
1710
1711 pthread_mutex_unlock(&listen_sock_mutex);
1712 pthread_mutex_unlock(&listener_thread_data_mutex);
1713
1714 /* Restore signals */
1715 result = sigprocmask(SIG_SETMASK, &fork_info->orig_sigs, NULL);
1716 if (result == -1) {
1717 PERROR("sigprocmask");
1718 return;
1719 }
1720 }
1721
1722 void ust_after_fork_parent(ust_fork_info_t *fork_info)
1723 {
1724 rcu_bp_after_fork_parent();
1725 /* Release mutexes and reenable signals */
1726 ust_after_fork_common(fork_info);
1727 }
1728
1729 void ust_after_fork_child(ust_fork_info_t *fork_info)
1730 {
1731 /* Release urcu mutexes */
1732 rcu_bp_after_fork_child();
1733
1734 /* Sanitize the child */
1735 ust_fork();
1736
1737 /* Then release mutexes and reenable signals */
1738 ust_after_fork_common(fork_info);
1739 }
1740
This page took 0.065737 seconds and 5 git commands to generate.