Remove #define _LGPL_SOURCE from public headers
[ust.git] / libust / tracectl.c
1 /* Copyright (C) 2009 Pierre-Marc Fournier
2 *
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public
5 * License as published by the Free Software Foundation; either
6 * version 2.1 of the License, or (at your option) any later version.
7 *
8 * This library is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
12 *
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
16 */
17
18 /* This file contains the implementation of the UST listener thread, which
19 * receives trace control commands. It also coordinates the initialization of
20 * libust.
21 */
22
23 #define _GNU_SOURCE
24 #define _LGPL_SOURCE
25 #include <stdio.h>
26 #include <stdlib.h>
27 #include <stdint.h>
28 #include <pthread.h>
29 #include <signal.h>
30 #include <sys/epoll.h>
31 #include <sys/time.h>
32 #include <sys/types.h>
33 #include <sys/socket.h>
34 #include <fcntl.h>
35 #include <poll.h>
36 #include <regex.h>
37 #include <urcu/uatomic_arch.h>
38 #include <urcu/list.h>
39
40 #include <ust/marker.h>
41 #include <ust/tracepoint.h>
42 #include <ust/tracectl.h>
43 #include <ust/clock.h>
44 #include "tracer.h"
45 #include "usterr_signal_safe.h"
46 #include "ustcomm.h"
47 #include "buffers.h"
48 #include "marker-control.h"
49
50 /* This should only be accessed by the constructor, before the creation
51 * of the listener, and then only by the listener.
52 */
53 s64 pidunique = -1LL;
54
55 /* The process pid is used to detect a non-traceable fork
56 * and allow the non-traceable fork to be ignored
57 * by destructor sequences in libust
58 */
59 static pid_t processpid = 0;
60
61 static struct ustcomm_header _receive_header;
62 static struct ustcomm_header *receive_header = &_receive_header;
63 static char receive_buffer[USTCOMM_BUFFER_SIZE];
64 static char send_buffer[USTCOMM_BUFFER_SIZE];
65
66 static int epoll_fd;
67
68 /*
69 * Listener thread data vs fork() protection mechanism. Ensures that no listener
70 * thread mutexes and data structures are being concurrently modified or held by
71 * other threads when fork() is executed.
72 */
73 static pthread_mutex_t listener_thread_data_mutex = PTHREAD_MUTEX_INITIALIZER;
74
75 /* Mutex protecting listen_sock. Nests inside listener_thread_data_mutex. */
76 static pthread_mutex_t listen_sock_mutex = PTHREAD_MUTEX_INITIALIZER;
77 static struct ustcomm_sock *listen_sock;
78
79 extern struct chan_info_struct chan_infos[];
80
81 static struct cds_list_head ust_socks = CDS_LIST_HEAD_INIT(ust_socks);
82
83 /* volatile because shared between the listener and the main thread */
84 int buffers_to_export = 0;
85
86 int ust_clock_source;
87
88 static long long make_pidunique(void)
89 {
90 s64 retval;
91 struct timeval tv;
92
93 gettimeofday(&tv, NULL);
94
95 retval = tv.tv_sec;
96 retval <<= 32;
97 retval |= tv.tv_usec;
98
99 return retval;
100 }
101
102 static void print_ust_marker(FILE *fp)
103 {
104 struct ust_marker_iter iter;
105
106 lock_ust_marker();
107 ust_marker_iter_reset(&iter);
108 ust_marker_iter_start(&iter);
109
110 while (iter.ust_marker) {
111 fprintf(fp, "ust_marker: %s/%s %d \"%s\" %p\n",
112 (*iter.ust_marker)->channel,
113 (*iter.ust_marker)->name,
114 (int)(*iter.ust_marker)->state,
115 (*iter.ust_marker)->format,
116 (*iter.ust_marker)->location);
117 ust_marker_iter_next(&iter);
118 }
119 unlock_ust_marker();
120 }
121
122 static void print_trace_events(FILE *fp)
123 {
124 struct trace_event_iter iter;
125
126 lock_trace_events();
127 trace_event_iter_reset(&iter);
128 trace_event_iter_start(&iter);
129
130 while (iter.trace_event) {
131 fprintf(fp, "trace_event: %s\n", (*iter.trace_event)->name);
132 trace_event_iter_next(&iter);
133 }
134 unlock_trace_events();
135 }
136
137 static int connect_ustconsumer(void)
138 {
139 int result, fd;
140 char default_daemon_path[] = SOCK_DIR "/ustconsumer";
141 char *explicit_daemon_path, *daemon_path;
142
143 explicit_daemon_path = getenv("UST_DAEMON_SOCKET");
144 if (explicit_daemon_path) {
145 daemon_path = explicit_daemon_path;
146 } else {
147 daemon_path = default_daemon_path;
148 }
149
150 DBG("Connecting to daemon_path %s", daemon_path);
151
152 result = ustcomm_connect_path(daemon_path, &fd);
153 if (result < 0) {
154 WARN("connect_ustconsumer failed, daemon_path: %s",
155 daemon_path);
156 return result;
157 }
158
159 return fd;
160 }
161
162
163 static void request_buffer_consumer(int sock,
164 const char *trace,
165 const char *channel,
166 int cpu)
167 {
168 struct ustcomm_header send_header, recv_header;
169 struct ustcomm_buffer_info buf_inf;
170 int result = 0;
171
172 result = ustcomm_pack_buffer_info(&send_header,
173 &buf_inf,
174 trace,
175 channel,
176 cpu);
177
178 if (result < 0) {
179 ERR("failed to pack buffer info message %s_%d",
180 channel, cpu);
181 return;
182 }
183
184 buf_inf.pid = getpid();
185 send_header.command = CONSUME_BUFFER;
186
187 result = ustcomm_req(sock, &send_header, (char *) &buf_inf,
188 &recv_header, NULL);
189 if (result <= 0) {
190 PERROR("request for buffer consumer failed, is the daemon online?");
191 }
192
193 return;
194 }
195
196 /* Ask the daemon to collect a trace called trace_name and being
197 * produced by this pid.
198 *
199 * The trace must be at least allocated. (It can also be started.)
200 * This is because _ltt_trace_find is used.
201 */
202
203 static void inform_consumer_daemon(const char *trace_name)
204 {
205 int sock, i,j;
206 struct ust_trace *trace;
207 const char *ch_name;
208
209 sock = connect_ustconsumer();
210 if (sock < 0) {
211 return;
212 }
213
214 DBG("Connected to ustconsumer");
215
216 ltt_lock_traces();
217
218 trace = _ltt_trace_find(trace_name);
219 if (trace == NULL) {
220 WARN("inform_consumer_daemon: could not find trace \"%s\"; it is probably already destroyed", trace_name);
221 goto unlock_traces;
222 }
223
224 for (i=0; i < trace->nr_channels; i++) {
225 if (trace->channels[i].request_collection) {
226 /* iterate on all cpus */
227 for (j=0; j<trace->channels[i].n_cpus; j++) {
228 ch_name = trace->channels[i].channel_name;
229 request_buffer_consumer(sock, trace_name,
230 ch_name, j);
231 CMM_STORE_SHARED(buffers_to_export,
232 CMM_LOAD_SHARED(buffers_to_export)+1);
233 }
234 }
235 }
236
237 unlock_traces:
238 ltt_unlock_traces();
239
240 close(sock);
241 }
242
243 static struct ust_channel *find_channel(const char *ch_name,
244 struct ust_trace *trace)
245 {
246 int i;
247
248 for (i=0; i<trace->nr_channels; i++) {
249 if (!strcmp(trace->channels[i].channel_name, ch_name)) {
250 return &trace->channels[i];
251 }
252 }
253
254 return NULL;
255 }
256
257 static int get_buffer_shmid_pipe_fd(const char *trace_name, const char *ch_name,
258 int ch_cpu,
259 int *buf_shmid,
260 int *buf_struct_shmid,
261 int *buf_pipe_fd)
262 {
263 struct ust_trace *trace;
264 struct ust_channel *channel;
265 struct ust_buffer *buf;
266
267 DBG("get_buffer_shmid_pipe_fd");
268
269 ltt_lock_traces();
270 trace = _ltt_trace_find(trace_name);
271 ltt_unlock_traces();
272
273 if (trace == NULL) {
274 ERR("cannot find trace!");
275 return -ENODATA;
276 }
277
278 channel = find_channel(ch_name, trace);
279 if (!channel) {
280 ERR("cannot find channel %s!", ch_name);
281 return -ENODATA;
282 }
283
284 buf = channel->buf[ch_cpu];
285
286 *buf_shmid = buf->shmid;
287 *buf_struct_shmid = channel->buf_struct_shmids[ch_cpu];
288 *buf_pipe_fd = buf->data_ready_fd_read;
289
290 return 0;
291 }
292
293 static int get_subbuf_num_size(const char *trace_name, const char *ch_name,
294 int *num, int *size)
295 {
296 struct ust_trace *trace;
297 struct ust_channel *channel;
298
299 DBG("get_subbuf_size");
300
301 ltt_lock_traces();
302 trace = _ltt_trace_find(trace_name);
303 ltt_unlock_traces();
304
305 if (!trace) {
306 ERR("cannot find trace!");
307 return -ENODATA;
308 }
309
310 channel = find_channel(ch_name, trace);
311 if (!channel) {
312 ERR("unable to find channel");
313 return -ENODATA;
314 }
315
316 *num = channel->subbuf_cnt;
317 *size = channel->subbuf_size;
318
319 return 0;
320 }
321
322 /* Return the power of two which is equal or higher to v */
323
324 static unsigned int pow2_higher_or_eq(unsigned int v)
325 {
326 int hb = fls(v);
327 int retval = 1<<(hb-1);
328
329 if (v-retval == 0)
330 return retval;
331 else
332 return retval<<1;
333 }
334
335 static int set_subbuf_size(const char *trace_name, const char *ch_name,
336 unsigned int size)
337 {
338 unsigned int power;
339 int retval = 0;
340 struct ust_trace *trace;
341 struct ust_channel *channel;
342
343 DBG("set_subbuf_size");
344
345 power = pow2_higher_or_eq(size);
346 power = max_t(unsigned int, 2u, power);
347 if (power != size) {
348 WARN("using the next power of two for buffer size = %u\n", power);
349 }
350
351 ltt_lock_traces();
352 trace = _ltt_trace_find_setup(trace_name);
353 if (trace == NULL) {
354 ERR("cannot find trace!");
355 retval = -ENODATA;
356 goto unlock_traces;
357 }
358
359 channel = find_channel(ch_name, trace);
360 if (!channel) {
361 ERR("unable to find channel");
362 retval = -ENODATA;
363 goto unlock_traces;
364 }
365
366 channel->subbuf_size = power;
367 DBG("the set_subbuf_size for the requested channel is %zu", channel->subbuf_size);
368
369 unlock_traces:
370 ltt_unlock_traces();
371
372 return retval;
373 }
374
375 static int set_subbuf_num(const char *trace_name, const char *ch_name,
376 unsigned int num)
377 {
378 struct ust_trace *trace;
379 struct ust_channel *channel;
380 int retval = 0;
381
382 DBG("set_subbuf_num");
383
384 if (num < 2) {
385 ERR("subbuffer count should be greater than 2");
386 return -EINVAL;
387 }
388
389 ltt_lock_traces();
390 trace = _ltt_trace_find_setup(trace_name);
391 if (trace == NULL) {
392 ERR("cannot find trace!");
393 retval = -ENODATA;
394 goto unlock_traces;
395 }
396
397 channel = find_channel(ch_name, trace);
398 if (!channel) {
399 ERR("unable to find channel");
400 retval = -ENODATA;
401 goto unlock_traces;
402 }
403
404 channel->subbuf_cnt = num;
405 DBG("the set_subbuf_cnt for the requested channel is %u", channel->subbuf_cnt);
406
407 unlock_traces:
408 ltt_unlock_traces();
409 return retval;
410 }
411
412 static int get_subbuffer(const char *trace_name, const char *ch_name,
413 int ch_cpu, long *consumed_old)
414 {
415 int retval = 0;
416 struct ust_trace *trace;
417 struct ust_channel *channel;
418 struct ust_buffer *buf;
419
420 DBG("get_subbuf");
421
422 *consumed_old = 0;
423
424 ltt_lock_traces();
425 trace = _ltt_trace_find(trace_name);
426
427 if (!trace) {
428 DBG("Cannot find trace. It was likely destroyed by the user.");
429 retval = -ENODATA;
430 goto unlock_traces;
431 }
432
433 channel = find_channel(ch_name, trace);
434 if (!channel) {
435 ERR("unable to find channel");
436 retval = -ENODATA;
437 goto unlock_traces;
438 }
439
440 buf = channel->buf[ch_cpu];
441
442 retval = ust_buffers_get_subbuf(buf, consumed_old);
443 if (retval < 0) {
444 WARN("missed buffer?");
445 }
446
447 unlock_traces:
448 ltt_unlock_traces();
449
450 return retval;
451 }
452
453
454 static int notify_buffer_mapped(const char *trace_name,
455 const char *ch_name,
456 int ch_cpu)
457 {
458 int retval = 0;
459 struct ust_trace *trace;
460 struct ust_channel *channel;
461 struct ust_buffer *buf;
462
463 DBG("get_buffer_fd");
464
465 ltt_lock_traces();
466 trace = _ltt_trace_find(trace_name);
467
468 if (!trace) {
469 retval = -ENODATA;
470 DBG("Cannot find trace. It was likely destroyed by the user.");
471 goto unlock_traces;
472 }
473
474 channel = find_channel(ch_name, trace);
475 if (!channel) {
476 retval = -ENODATA;
477 ERR("unable to find channel");
478 goto unlock_traces;
479 }
480
481 buf = channel->buf[ch_cpu];
482
483 /* Being here is the proof the daemon has mapped the buffer in its
484 * memory. We may now decrement buffers_to_export.
485 */
486 if (uatomic_read(&buf->consumed) == 0) {
487 DBG("decrementing buffers_to_export");
488 CMM_STORE_SHARED(buffers_to_export, CMM_LOAD_SHARED(buffers_to_export)-1);
489 }
490
491 unlock_traces:
492 ltt_unlock_traces();
493
494 return retval;
495 }
496
497 static int put_subbuffer(const char *trace_name, const char *ch_name,
498 int ch_cpu, long consumed_old)
499 {
500 int retval = 0;
501 struct ust_trace *trace;
502 struct ust_channel *channel;
503 struct ust_buffer *buf;
504
505 DBG("put_subbuf");
506
507 ltt_lock_traces();
508 trace = _ltt_trace_find(trace_name);
509
510 if (!trace) {
511 retval = -ENODATA;
512 DBG("Cannot find trace. It was likely destroyed by the user.");
513 goto unlock_traces;
514 }
515
516 channel = find_channel(ch_name, trace);
517 if (!channel) {
518 retval = -ENODATA;
519 ERR("unable to find channel");
520 goto unlock_traces;
521 }
522
523 buf = channel->buf[ch_cpu];
524
525 retval = ust_buffers_put_subbuf(buf, consumed_old);
526 if (retval < 0) {
527 WARN("ust_buffers_put_subbuf: error (subbuf=%s_%d)",
528 ch_name, ch_cpu);
529 } else {
530 DBG("ust_buffers_put_subbuf: success (subbuf=%s_%d)",
531 ch_name, ch_cpu);
532 }
533
534 unlock_traces:
535 ltt_unlock_traces();
536
537 return retval;
538 }
539
540 static void release_listener_mutex(void *ptr)
541 {
542 pthread_mutex_unlock(&listener_thread_data_mutex);
543 }
544
545 static void listener_cleanup(void *ptr)
546 {
547 pthread_mutex_lock(&listen_sock_mutex);
548 if (listen_sock) {
549 ustcomm_del_named_sock(listen_sock, 0);
550 listen_sock = NULL;
551 }
552 pthread_mutex_unlock(&listen_sock_mutex);
553 }
554
555 static int force_subbuf_switch(const char *trace_name)
556 {
557 struct ust_trace *trace;
558 int i, j, retval = 0;
559
560 ltt_lock_traces();
561 trace = _ltt_trace_find(trace_name);
562 if (!trace) {
563 retval = -ENODATA;
564 DBG("Cannot find trace. It was likely destroyed by the user.");
565 goto unlock_traces;
566 }
567
568 for (i = 0; i < trace->nr_channels; i++) {
569 for (j = 0; j < trace->channels[i].n_cpus; j++) {
570 ltt_force_switch(trace->channels[i].buf[j],
571 FORCE_FLUSH);
572 }
573 }
574
575 unlock_traces:
576 ltt_unlock_traces();
577
578 return retval;
579 }
580
581 static int process_trace_cmd(int command, char *trace_name)
582 {
583 int result;
584 char trace_type[] = "ustrelay";
585
586 switch(command) {
587 case START:
588 /* start is an operation that setups the trace, allocates it and starts it */
589 result = ltt_trace_setup(trace_name);
590 if (result < 0) {
591 ERR("ltt_trace_setup failed");
592 return result;
593 }
594
595 result = ltt_trace_set_type(trace_name, trace_type);
596 if (result < 0) {
597 ERR("ltt_trace_set_type failed");
598 return result;
599 }
600
601 result = ltt_trace_alloc(trace_name);
602 if (result < 0) {
603 ERR("ltt_trace_alloc failed");
604 return result;
605 }
606
607 inform_consumer_daemon(trace_name);
608
609 result = ltt_trace_start(trace_name);
610 if (result < 0) {
611 ERR("ltt_trace_start failed");
612 return result;
613 }
614
615 return 0;
616 case SETUP_TRACE:
617 DBG("trace setup");
618
619 result = ltt_trace_setup(trace_name);
620 if (result < 0) {
621 ERR("ltt_trace_setup failed");
622 return result;
623 }
624
625 result = ltt_trace_set_type(trace_name, trace_type);
626 if (result < 0) {
627 ERR("ltt_trace_set_type failed");
628 return result;
629 }
630
631 return 0;
632 case ALLOC_TRACE:
633 DBG("trace alloc");
634
635 result = ltt_trace_alloc(trace_name);
636 if (result < 0) {
637 ERR("ltt_trace_alloc failed");
638 return result;
639 }
640 inform_consumer_daemon(trace_name);
641
642 return 0;
643
644 case CREATE_TRACE:
645 DBG("trace create");
646
647 result = ltt_trace_setup(trace_name);
648 if (result < 0) {
649 ERR("ltt_trace_setup failed");
650 return result;
651 }
652
653 result = ltt_trace_set_type(trace_name, trace_type);
654 if (result < 0) {
655 ERR("ltt_trace_set_type failed");
656 return result;
657 }
658
659 return 0;
660 case START_TRACE:
661 DBG("trace start");
662
663 result = ltt_trace_alloc(trace_name);
664 if (result < 0) {
665 ERR("ltt_trace_alloc failed");
666 return result;
667 }
668 if (!result) {
669 inform_consumer_daemon(trace_name);
670 }
671
672 result = ltt_trace_start(trace_name);
673 if (result < 0) {
674 ERR("ltt_trace_start failed");
675 return result;
676 }
677
678 return 0;
679 case STOP_TRACE:
680 DBG("trace stop");
681
682 result = ltt_trace_stop(trace_name);
683 if (result < 0) {
684 ERR("ltt_trace_stop failed");
685 return result;
686 }
687
688 return 0;
689 case DESTROY_TRACE:
690 DBG("trace destroy");
691
692 result = ltt_trace_destroy(trace_name, 0);
693 if (result < 0) {
694 ERR("ltt_trace_destroy failed");
695 return result;
696 }
697 return 0;
698 case FORCE_SUBBUF_SWITCH:
699 DBG("force switch");
700
701 result = force_subbuf_switch(trace_name);
702 if (result < 0) {
703 ERR("force_subbuf_switch failed");
704 return result;
705 }
706 return 0;
707 }
708
709 return 0;
710 }
711
712
713 static void process_channel_cmd(int sock, int command,
714 struct ustcomm_channel_info *ch_inf)
715 {
716 struct ustcomm_header _reply_header;
717 struct ustcomm_header *reply_header = &_reply_header;
718 struct ustcomm_channel_info *reply_msg =
719 (struct ustcomm_channel_info *)send_buffer;
720 int result, offset = 0, num, size;
721
722 memset(reply_header, 0, sizeof(*reply_header));
723
724 switch (command) {
725 case GET_SUBBUF_NUM_SIZE:
726 result = get_subbuf_num_size(ch_inf->trace,
727 ch_inf->channel,
728 &num, &size);
729 if (result < 0) {
730 reply_header->result = result;
731 break;
732 }
733
734 reply_msg->channel = USTCOMM_POISON_PTR;
735 reply_msg->subbuf_num = num;
736 reply_msg->subbuf_size = size;
737
738
739 reply_header->size = COMPUTE_MSG_SIZE(reply_msg, offset);
740
741 break;
742 case SET_SUBBUF_NUM:
743 reply_header->result = set_subbuf_num(ch_inf->trace,
744 ch_inf->channel,
745 ch_inf->subbuf_num);
746
747 break;
748 case SET_SUBBUF_SIZE:
749 reply_header->result = set_subbuf_size(ch_inf->trace,
750 ch_inf->channel,
751 ch_inf->subbuf_size);
752
753
754 break;
755 }
756 if (ustcomm_send(sock, reply_header, (char *)reply_msg) < 0) {
757 ERR("ustcomm_send failed");
758 }
759 }
760
761 static void process_buffer_cmd(int sock, int command,
762 struct ustcomm_buffer_info *buf_inf)
763 {
764 struct ustcomm_header _reply_header;
765 struct ustcomm_header *reply_header = &_reply_header;
766 struct ustcomm_buffer_info *reply_msg =
767 (struct ustcomm_buffer_info *)send_buffer;
768 int result, offset = 0, buf_shmid, buf_struct_shmid, buf_pipe_fd;
769 long consumed_old;
770
771 memset(reply_header, 0, sizeof(*reply_header));
772
773 switch (command) {
774 case GET_BUF_SHMID_PIPE_FD:
775 result = get_buffer_shmid_pipe_fd(buf_inf->trace,
776 buf_inf->channel,
777 buf_inf->ch_cpu,
778 &buf_shmid,
779 &buf_struct_shmid,
780 &buf_pipe_fd);
781 if (result < 0) {
782 reply_header->result = result;
783 break;
784 }
785
786 reply_msg->channel = USTCOMM_POISON_PTR;
787 reply_msg->buf_shmid = buf_shmid;
788 reply_msg->buf_struct_shmid = buf_struct_shmid;
789
790 reply_header->size = COMPUTE_MSG_SIZE(reply_msg, offset);
791 reply_header->fd_included = 1;
792
793 if (ustcomm_send_fd(sock, reply_header, (char *)reply_msg,
794 &buf_pipe_fd) < 0) {
795 ERR("ustcomm_send failed");
796 }
797 return;
798
799 case NOTIFY_BUF_MAPPED:
800 reply_header->result =
801 notify_buffer_mapped(buf_inf->trace,
802 buf_inf->channel,
803 buf_inf->ch_cpu);
804 break;
805 case GET_SUBBUFFER:
806 result = get_subbuffer(buf_inf->trace, buf_inf->channel,
807 buf_inf->ch_cpu, &consumed_old);
808 if (result < 0) {
809 reply_header->result = result;
810 break;
811 }
812
813 reply_msg->channel = USTCOMM_POISON_PTR;
814 reply_msg->consumed_old = consumed_old;
815
816 reply_header->size = COMPUTE_MSG_SIZE(reply_msg, offset);
817
818 break;
819 case PUT_SUBBUFFER:
820 result = put_subbuffer(buf_inf->trace, buf_inf->channel,
821 buf_inf->ch_cpu,
822 buf_inf->consumed_old);
823 reply_header->result = result;
824
825 break;
826 }
827
828 if (ustcomm_send(sock, reply_header, (char *)reply_msg) < 0) {
829 ERR("ustcomm_send failed");
830 }
831
832 }
833
834 static void process_ust_marker_cmd(int sock, int command,
835 struct ustcomm_ust_marker_info *ust_marker_inf)
836 {
837 struct ustcomm_header _reply_header;
838 struct ustcomm_header *reply_header = &_reply_header;
839 int result = 0;
840
841 memset(reply_header, 0, sizeof(*reply_header));
842
843 switch(command) {
844 case ENABLE_MARKER:
845
846 result = ltt_ust_marker_connect(ust_marker_inf->channel,
847 ust_marker_inf->ust_marker,
848 "default");
849 if (result < 0) {
850 WARN("could not enable ust_marker; channel=%s,"
851 " name=%s",
852 ust_marker_inf->channel,
853 ust_marker_inf->ust_marker);
854
855 }
856 break;
857 case DISABLE_MARKER:
858 result = ltt_ust_marker_disconnect(ust_marker_inf->channel,
859 ust_marker_inf->ust_marker,
860 "default");
861 if (result < 0) {
862 WARN("could not disable ust_marker; channel=%s,"
863 " name=%s",
864 ust_marker_inf->channel,
865 ust_marker_inf->ust_marker);
866 }
867 break;
868 }
869
870 reply_header->result = result;
871
872 if (ustcomm_send(sock, reply_header, NULL) < 0) {
873 ERR("ustcomm_send failed");
874 }
875
876 }
877 static void process_client_cmd(struct ustcomm_header *recv_header,
878 char *recv_buf, int sock)
879 {
880 int result;
881 struct ustcomm_header _reply_header;
882 struct ustcomm_header *reply_header = &_reply_header;
883 char *send_buf = send_buffer;
884
885 memset(reply_header, 0, sizeof(*reply_header));
886 memset(send_buf, 0, sizeof(send_buffer));
887
888 switch(recv_header->command) {
889 case GET_SUBBUF_NUM_SIZE:
890 case SET_SUBBUF_NUM:
891 case SET_SUBBUF_SIZE:
892 {
893 struct ustcomm_channel_info *ch_inf;
894 ch_inf = (struct ustcomm_channel_info *)recv_buf;
895 result = ustcomm_unpack_channel_info(ch_inf);
896 if (result < 0) {
897 ERR("couldn't unpack channel info");
898 reply_header->result = -EINVAL;
899 goto send_response;
900 }
901 process_channel_cmd(sock, recv_header->command, ch_inf);
902 return;
903 }
904 case GET_BUF_SHMID_PIPE_FD:
905 case NOTIFY_BUF_MAPPED:
906 case GET_SUBBUFFER:
907 case PUT_SUBBUFFER:
908 {
909 struct ustcomm_buffer_info *buf_inf;
910 buf_inf = (struct ustcomm_buffer_info *)recv_buf;
911 result = ustcomm_unpack_buffer_info(buf_inf);
912 if (result < 0) {
913 ERR("couldn't unpack buffer info");
914 reply_header->result = -EINVAL;
915 goto send_response;
916 }
917 process_buffer_cmd(sock, recv_header->command, buf_inf);
918 return;
919 }
920 case ENABLE_MARKER:
921 case DISABLE_MARKER:
922 {
923 struct ustcomm_ust_marker_info *ust_marker_inf;
924 ust_marker_inf = (struct ustcomm_ust_marker_info *)recv_buf;
925 result = ustcomm_unpack_ust_marker_info(ust_marker_inf);
926 if (result < 0) {
927 ERR("couldn't unpack ust_marker info");
928 reply_header->result = -EINVAL;
929 goto send_response;
930 }
931 process_ust_marker_cmd(sock, recv_header->command, ust_marker_inf);
932 return;
933 }
934 case LIST_MARKERS:
935 {
936 char *ptr;
937 size_t size;
938 FILE *fp;
939
940 fp = open_memstream(&ptr, &size);
941 if (fp == NULL) {
942 ERR("opening memstream failed");
943 return;
944 }
945 print_ust_marker(fp);
946 fclose(fp);
947
948 reply_header->size = size + 1; /* Include final \0 */
949
950 result = ustcomm_send(sock, reply_header, ptr);
951
952 free(ptr);
953
954 if (result < 0) {
955 PERROR("failed to send ust_marker list");
956 }
957
958 break;
959 }
960 case LIST_TRACE_EVENTS:
961 {
962 char *ptr;
963 size_t size;
964 FILE *fp;
965
966 fp = open_memstream(&ptr, &size);
967 if (fp == NULL) {
968 ERR("opening memstream failed");
969 return;
970 }
971 print_trace_events(fp);
972 fclose(fp);
973
974 reply_header->size = size + 1; /* Include final \0 */
975
976 result = ustcomm_send(sock, reply_header, ptr);
977
978 free(ptr);
979
980 if (result < 0) {
981 ERR("list_trace_events failed");
982 return;
983 }
984
985 break;
986 }
987 case LOAD_PROBE_LIB:
988 {
989 char *libfile;
990
991 /* FIXME: No functionality at all... */
992 libfile = recv_buf;
993
994 DBG("load_probe_lib loading %s", libfile);
995
996 break;
997 }
998 case GET_PIDUNIQUE:
999 {
1000 struct ustcomm_pidunique *pid_msg;
1001 pid_msg = (struct ustcomm_pidunique *)send_buf;
1002
1003 pid_msg->pidunique = pidunique;
1004 reply_header->size = sizeof(pid_msg);
1005
1006 goto send_response;
1007
1008 }
1009 case GET_SOCK_PATH:
1010 {
1011 struct ustcomm_single_field *sock_msg;
1012 char *sock_path_env;
1013
1014 sock_msg = (struct ustcomm_single_field *)send_buf;
1015
1016 sock_path_env = getenv("UST_DAEMON_SOCKET");
1017
1018 if (!sock_path_env) {
1019 result = ustcomm_pack_single_field(reply_header,
1020 sock_msg,
1021 SOCK_DIR "/ustconsumer");
1022
1023 } else {
1024 result = ustcomm_pack_single_field(reply_header,
1025 sock_msg,
1026 sock_path_env);
1027 }
1028 reply_header->result = result;
1029
1030 goto send_response;
1031 }
1032 case SET_SOCK_PATH:
1033 {
1034 struct ustcomm_single_field *sock_msg;
1035 sock_msg = (struct ustcomm_single_field *)recv_buf;
1036 result = ustcomm_unpack_single_field(sock_msg);
1037 if (result < 0) {
1038 reply_header->result = -EINVAL;
1039 goto send_response;
1040 }
1041
1042 reply_header->result = setenv("UST_DAEMON_SOCKET",
1043 sock_msg->field, 1);
1044
1045 goto send_response;
1046 }
1047 case START:
1048 case SETUP_TRACE:
1049 case ALLOC_TRACE:
1050 case CREATE_TRACE:
1051 case START_TRACE:
1052 case STOP_TRACE:
1053 case DESTROY_TRACE:
1054 case FORCE_SUBBUF_SWITCH:
1055 {
1056 struct ustcomm_single_field *trace_inf =
1057 (struct ustcomm_single_field *)recv_buf;
1058
1059 result = ustcomm_unpack_single_field(trace_inf);
1060 if (result < 0) {
1061 ERR("couldn't unpack trace info");
1062 reply_header->result = -EINVAL;
1063 goto send_response;
1064 }
1065
1066 reply_header->result =
1067 process_trace_cmd(recv_header->command,
1068 trace_inf->field);
1069 goto send_response;
1070
1071 }
1072 default:
1073 reply_header->result = -EINVAL;
1074
1075 goto send_response;
1076 }
1077
1078 return;
1079
1080 send_response:
1081 ustcomm_send(sock, reply_header, send_buf);
1082 }
1083
1084 #define MAX_EVENTS 10
1085
1086 void *listener_main(void *p)
1087 {
1088 struct ustcomm_sock *epoll_sock;
1089 struct epoll_event events[MAX_EVENTS];
1090 struct sockaddr addr;
1091 int accept_fd, nfds, result, i, addr_size;
1092
1093 DBG("LISTENER");
1094
1095 pthread_cleanup_push(listener_cleanup, NULL);
1096
1097 for(;;) {
1098 nfds = epoll_wait(epoll_fd, events, MAX_EVENTS, -1);
1099 if (nfds == -1) {
1100 PERROR("listener_main: epoll_wait failed");
1101 continue;
1102 }
1103
1104 for (i = 0; i < nfds; i++) {
1105 pthread_mutex_lock(&listener_thread_data_mutex);
1106 pthread_cleanup_push(release_listener_mutex, NULL);
1107 epoll_sock = (struct ustcomm_sock *)events[i].data.ptr;
1108 if (epoll_sock == listen_sock) {
1109 addr_size = sizeof(struct sockaddr);
1110 accept_fd = accept(epoll_sock->fd,
1111 &addr,
1112 (socklen_t *)&addr_size);
1113 if (accept_fd == -1) {
1114 PERROR("listener_main: accept failed");
1115 continue;
1116 }
1117 ustcomm_init_sock(accept_fd, epoll_fd,
1118 &ust_socks);
1119 } else {
1120 memset(receive_header, 0,
1121 sizeof(*receive_header));
1122 memset(receive_buffer, 0,
1123 sizeof(receive_buffer));
1124 result = ustcomm_recv(epoll_sock->fd,
1125 receive_header,
1126 receive_buffer);
1127 if (result == 0) {
1128 ustcomm_del_sock(epoll_sock, 0);
1129 } else {
1130 process_client_cmd(receive_header,
1131 receive_buffer,
1132 epoll_sock->fd);
1133 }
1134 }
1135 pthread_cleanup_pop(1); /* release listener mutex */
1136 }
1137 }
1138
1139 pthread_cleanup_pop(1);
1140 }
1141
1142 /* These should only be accessed in the parent thread,
1143 * not the listener.
1144 */
1145 static volatile sig_atomic_t have_listener = 0;
1146 static pthread_t listener_thread;
1147
1148 void create_listener(void)
1149 {
1150 int result;
1151 sigset_t sig_all_blocked;
1152 sigset_t orig_parent_mask;
1153
1154 if (have_listener) {
1155 WARN("not creating listener because we already had one");
1156 return;
1157 }
1158
1159 /* A new thread created by pthread_create inherits the signal mask
1160 * from the parent. To avoid any signal being received by the
1161 * listener thread, we block all signals temporarily in the parent,
1162 * while we create the listener thread.
1163 */
1164
1165 sigfillset(&sig_all_blocked);
1166
1167 result = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_parent_mask);
1168 if (result) {
1169 PERROR("pthread_sigmask: %s", strerror(result));
1170 }
1171
1172 result = pthread_create(&listener_thread, NULL, listener_main, NULL);
1173 if (result == -1) {
1174 PERROR("pthread_create");
1175 }
1176
1177 /* Restore original signal mask in parent */
1178 result = pthread_sigmask(SIG_SETMASK, &orig_parent_mask, NULL);
1179 if (result) {
1180 PERROR("pthread_sigmask: %s", strerror(result));
1181 } else {
1182 have_listener = 1;
1183 }
1184 }
1185
1186 #define AUTOPROBE_DISABLED 0
1187 #define AUTOPROBE_ENABLE_ALL 1
1188 #define AUTOPROBE_ENABLE_REGEX 2
1189 static int autoprobe_method = AUTOPROBE_DISABLED;
1190 static regex_t autoprobe_regex;
1191
1192 static void auto_probe_connect(struct ust_marker *m)
1193 {
1194 int result;
1195
1196 char* concat_name = NULL;
1197 const char *probe_name = "default";
1198
1199 if (autoprobe_method == AUTOPROBE_DISABLED) {
1200 return;
1201 } else if (autoprobe_method == AUTOPROBE_ENABLE_REGEX) {
1202 result = asprintf(&concat_name, "%s/%s", m->channel, m->name);
1203 if (result == -1) {
1204 ERR("auto_probe_connect: asprintf failed (ust_marker %s/%s)",
1205 m->channel, m->name);
1206 return;
1207 }
1208 if (regexec(&autoprobe_regex, concat_name, 0, NULL, 0)) {
1209 free(concat_name);
1210 return;
1211 }
1212 free(concat_name);
1213 }
1214
1215 result = ltt_ust_marker_connect(m->channel, m->name, probe_name);
1216 if (result && result != -EEXIST)
1217 ERR("ltt_ust_marker_connect (ust_marker = %s/%s, errno = %d)", m->channel, m->name, -result);
1218
1219 DBG("auto connected ust_marker %s (addr: %p) %s to probe default", m->channel, m, m->name);
1220
1221 }
1222
1223 static struct ustcomm_sock * init_app_socket(int epoll_fd)
1224 {
1225 char *dir_name, *sock_name;
1226 int result;
1227 struct ustcomm_sock *sock = NULL;
1228
1229 dir_name = ustcomm_user_sock_dir();
1230 if (!dir_name)
1231 return NULL;
1232
1233 result = asprintf(&sock_name, "%s/%d", dir_name, (int)getpid());
1234 if (result < 0) {
1235 ERR("string overflow allocating socket name, "
1236 "UST thread bailing");
1237 goto free_dir_name;
1238 }
1239
1240 result = ensure_dir_exists(dir_name, S_IRWXU);
1241 if (result == -1) {
1242 ERR("Unable to create socket directory %s, UST thread bailing",
1243 dir_name);
1244 goto free_sock_name;
1245 }
1246
1247 sock = ustcomm_init_named_socket(sock_name, epoll_fd);
1248 if (!sock) {
1249 ERR("Error initializing named socket (%s). Check that directory"
1250 "exists and that it is writable. UST thread bailing", sock_name);
1251 goto free_sock_name;
1252 }
1253
1254 free_sock_name:
1255 free(sock_name);
1256 free_dir_name:
1257 free(dir_name);
1258
1259 return sock;
1260 }
1261
1262 static void __attribute__((constructor)) init()
1263 {
1264 struct timespec ts;
1265 int result;
1266 char* autoprobe_val = NULL;
1267 char* subbuffer_size_val = NULL;
1268 char* subbuffer_count_val = NULL;
1269 unsigned int subbuffer_size;
1270 unsigned int subbuffer_count;
1271 unsigned int power;
1272
1273 /* Assign the pidunique, to be able to differentiate the processes with same
1274 * pid, (before and after an exec).
1275 */
1276 pidunique = make_pidunique();
1277 processpid = getpid();
1278
1279 DBG("Tracectl constructor");
1280
1281 /* Set up epoll */
1282 epoll_fd = epoll_create(MAX_EVENTS);
1283 if (epoll_fd == -1) {
1284 ERR("epoll_create failed, tracing shutting down");
1285 return;
1286 }
1287
1288 /* Create the socket */
1289 listen_sock = init_app_socket(epoll_fd);
1290 if (!listen_sock) {
1291 ERR("failed to create application socket,"
1292 " tracing shutting down");
1293 return;
1294 }
1295
1296 create_listener();
1297
1298 /* Get clock the clock source type */
1299
1300 /* Default clock source */
1301 ust_clock_source = CLOCK_TRACE;
1302 if (clock_gettime(ust_clock_source, &ts) != 0) {
1303 ust_clock_source = CLOCK_MONOTONIC;
1304 DBG("UST traces will not be synchronized with LTTng traces");
1305 }
1306
1307 autoprobe_val = getenv("UST_AUTOPROBE");
1308 if (autoprobe_val) {
1309 struct ust_marker_iter iter;
1310
1311 DBG("Autoprobe enabled.");
1312
1313 /* Ensure ust_marker are initialized */
1314 //init_ust_marker();
1315
1316 /* Ensure ust_marker control is initialized, for the probe */
1317 init_ust_marker_control();
1318
1319 /* first, set the callback that will connect the
1320 * probe on new ust_marker
1321 */
1322 if (autoprobe_val[0] == '/') {
1323 result = regcomp(&autoprobe_regex, autoprobe_val+1, 0);
1324 if (result) {
1325 char regexerr[150];
1326
1327 regerror(result, &autoprobe_regex, regexerr, sizeof(regexerr));
1328 ERR("cannot parse regex %s (%s), will ignore UST_AUTOPROBE", autoprobe_val, regexerr);
1329 /* don't crash the application just for this */
1330 } else {
1331 autoprobe_method = AUTOPROBE_ENABLE_REGEX;
1332 }
1333 } else {
1334 /* just enable all instrumentation */
1335 autoprobe_method = AUTOPROBE_ENABLE_ALL;
1336 }
1337
1338 ust_marker_set_new_ust_marker_cb(auto_probe_connect);
1339
1340 /* Now, connect the probes that were already registered. */
1341 ust_marker_iter_reset(&iter);
1342 ust_marker_iter_start(&iter);
1343
1344 DBG("now iterating on ust_marker already registered");
1345 while (iter.ust_marker) {
1346 DBG("now iterating on ust_marker %s", (*iter.ust_marker)->name);
1347 auto_probe_connect(*iter.ust_marker);
1348 ust_marker_iter_next(&iter);
1349 }
1350 }
1351
1352 if (getenv("UST_OVERWRITE")) {
1353 int val = atoi(getenv("UST_OVERWRITE"));
1354 if (val == 0 || val == 1) {
1355 CMM_STORE_SHARED(ust_channels_overwrite_by_default, val);
1356 } else {
1357 WARN("invalid value for UST_OVERWRITE");
1358 }
1359 }
1360
1361 if (getenv("UST_AUTOCOLLECT")) {
1362 int val = atoi(getenv("UST_AUTOCOLLECT"));
1363 if (val == 0 || val == 1) {
1364 CMM_STORE_SHARED(ust_channels_request_collection_by_default, val);
1365 } else {
1366 WARN("invalid value for UST_AUTOCOLLECT");
1367 }
1368 }
1369
1370 subbuffer_size_val = getenv("UST_SUBBUF_SIZE");
1371 if (subbuffer_size_val) {
1372 sscanf(subbuffer_size_val, "%u", &subbuffer_size);
1373 power = pow2_higher_or_eq(subbuffer_size);
1374 if (power != subbuffer_size)
1375 WARN("using the next power of two for buffer size = %u\n", power);
1376 chan_infos[LTT_CHANNEL_UST].def_subbufsize = power;
1377 }
1378
1379 subbuffer_count_val = getenv("UST_SUBBUF_NUM");
1380 if (subbuffer_count_val) {
1381 sscanf(subbuffer_count_val, "%u", &subbuffer_count);
1382 if (subbuffer_count < 2)
1383 subbuffer_count = 2;
1384 chan_infos[LTT_CHANNEL_UST].def_subbufcount = subbuffer_count;
1385 }
1386
1387 if (getenv("UST_TRACE")) {
1388 char trace_name[] = "auto";
1389 char trace_type[] = "ustrelay";
1390
1391 DBG("starting early tracing");
1392
1393 /* Ensure ust_marker control is initialized */
1394 init_ust_marker_control();
1395
1396 /* Ensure ust_marker are initialized */
1397 init_ust_marker();
1398
1399 /* Ensure buffers are initialized, for the transport to be available.
1400 * We are about to set a trace type and it will fail without this.
1401 */
1402 init_ustrelay_transport();
1403
1404 /* FIXME: When starting early tracing (here), depending on the
1405 * order of constructors, it is very well possible some ust_marker
1406 * sections are not yet registered. Because of this, some
1407 * channels may not be registered. Yet, we are about to ask the
1408 * daemon to collect the channels. Channels which are not yet
1409 * registered will not be collected.
1410 *
1411 * Currently, in LTTng, there is no way to add a channel after
1412 * trace start. The reason for this is that it induces complex
1413 * concurrency issues on the trace structures, which can only
1414 * be resolved using RCU. This has not been done yet. As a
1415 * workaround, we are forcing the registration of the "ust"
1416 * channel here. This is the only channel (apart from metadata)
1417 * that can be reliably used in early tracing.
1418 *
1419 * Non-early tracing does not have this problem and can use
1420 * arbitrary channel names.
1421 */
1422 ltt_channels_register("ust");
1423
1424 result = ltt_trace_setup(trace_name);
1425 if (result < 0) {
1426 ERR("ltt_trace_setup failed");
1427 return;
1428 }
1429
1430 result = ltt_trace_set_type(trace_name, trace_type);
1431 if (result < 0) {
1432 ERR("ltt_trace_set_type failed");
1433 return;
1434 }
1435
1436 result = ltt_trace_alloc(trace_name);
1437 if (result < 0) {
1438 ERR("ltt_trace_alloc failed");
1439 return;
1440 }
1441
1442 result = ltt_trace_start(trace_name);
1443 if (result < 0) {
1444 ERR("ltt_trace_start failed");
1445 return;
1446 }
1447
1448 /* Do this after the trace is started in order to avoid creating confusion
1449 * if the trace fails to start. */
1450 inform_consumer_daemon(trace_name);
1451 }
1452
1453 return;
1454
1455 /* should decrementally destroy stuff if error */
1456
1457 }
1458
1459 /* This is only called if we terminate normally, not with an unhandled signal,
1460 * so we cannot rely on it. However, for now, LTTV requires that the header of
1461 * the last sub-buffer contain a valid end time for the trace. This is done
1462 * automatically only when the trace is properly stopped.
1463 *
1464 * If the traced program crashed, it is always possible to manually add the
1465 * right value in the header, or to open the trace in text mode.
1466 *
1467 * FIXME: Fix LTTV so it doesn't need this.
1468 */
1469
1470 static void destroy_traces(void)
1471 {
1472 int result;
1473
1474 /* if trace running, finish it */
1475
1476 DBG("destructor stopping traces");
1477
1478 result = ltt_trace_stop("auto");
1479 if (result == -1) {
1480 ERR("ltt_trace_stop error");
1481 }
1482
1483 result = ltt_trace_destroy("auto", 0);
1484 if (result == -1) {
1485 ERR("ltt_trace_destroy error");
1486 }
1487 }
1488
1489 static int trace_recording(void)
1490 {
1491 int retval = 0;
1492 struct ust_trace *trace;
1493
1494 ltt_lock_traces();
1495
1496 cds_list_for_each_entry(trace, &ltt_traces.head, list) {
1497 if (trace->active) {
1498 retval = 1;
1499 break;
1500 }
1501 }
1502
1503 ltt_unlock_traces();
1504
1505 return retval;
1506 }
1507
1508 int restarting_usleep(useconds_t usecs)
1509 {
1510 struct timespec tv;
1511 int result;
1512
1513 tv.tv_sec = 0;
1514 tv.tv_nsec = usecs * 1000;
1515
1516 do {
1517 result = nanosleep(&tv, &tv);
1518 } while (result == -1 && errno == EINTR);
1519
1520 return result;
1521 }
1522
1523 static void stop_listener(void)
1524 {
1525 int result;
1526
1527 if (!have_listener)
1528 return;
1529
1530 result = pthread_cancel(listener_thread);
1531 if (result != 0) {
1532 ERR("pthread_cancel: %s", strerror(result));
1533 }
1534 result = pthread_join(listener_thread, NULL);
1535 if (result != 0) {
1536 ERR("pthread_join: %s", strerror(result));
1537 }
1538 }
1539
1540 /* This destructor keeps the process alive for a few seconds in order
1541 * to leave time for ustconsumer to connect to its buffers. This is necessary
1542 * for programs whose execution is very short. It is also useful in all
1543 * programs when tracing is started close to the end of the program
1544 * execution.
1545 *
1546 * FIXME: For now, this only works for the first trace created in a
1547 * process.
1548 */
1549
1550 static void __attribute__((destructor)) keepalive()
1551 {
1552 if (processpid != getpid()) {
1553 return;
1554 }
1555
1556 if (trace_recording() && CMM_LOAD_SHARED(buffers_to_export)) {
1557 int total = 0;
1558 DBG("Keeping process alive for consumer daemon...");
1559 while (CMM_LOAD_SHARED(buffers_to_export)) {
1560 const int interv = 200000;
1561 restarting_usleep(interv);
1562 total += interv;
1563
1564 if (total >= 3000000) {
1565 WARN("non-consumed buffers remaining after wait limit; not waiting anymore");
1566 break;
1567 }
1568 }
1569 DBG("Finally dying...");
1570 }
1571
1572 destroy_traces();
1573
1574 /* Ask the listener to stop and clean up. */
1575 stop_listener();
1576 }
1577
1578 void ust_potential_exec(void)
1579 {
1580 ust_marker(potential_exec, UST_MARKER_NOARGS);
1581
1582 DBG("test");
1583
1584 keepalive();
1585 }
1586
1587 /* Notify ust that there was a fork. This needs to be called inside
1588 * the new process, anytime a process whose memory is not shared with
1589 * the parent is created. If this function is not called, the events
1590 * of the new process will not be collected.
1591 *
1592 * Signals should be disabled before the fork and reenabled only after
1593 * this call in order to guarantee tracing is not started before ust_fork()
1594 * sanitizes the new process.
1595 */
1596
1597 static void ust_fork(void)
1598 {
1599 struct ustcomm_sock *sock, *sock_tmp;
1600 struct ust_trace *trace, *trace_tmp;
1601 int result;
1602
1603 /* FIXME: technically, the locks could have been taken before the fork */
1604 DBG("ust: forking");
1605
1606 /* Get the pid of the new process */
1607 processpid = getpid();
1608
1609 /*
1610 * FIXME: This could be prettier, we loop over the list twice and
1611 * following good locking practice should lock around the loop
1612 */
1613 cds_list_for_each_entry_safe(trace, trace_tmp, &ltt_traces.head, list) {
1614 ltt_trace_stop(trace->trace_name);
1615 }
1616
1617 /* Delete all active connections, but leave them in the epoll set */
1618 cds_list_for_each_entry_safe(sock, sock_tmp, &ust_socks, list) {
1619 ustcomm_del_sock(sock, 1);
1620 }
1621
1622 /*
1623 * FIXME: This could be prettier, we loop over the list twice and
1624 * following good locking practice should lock around the loop
1625 */
1626 cds_list_for_each_entry_safe(trace, trace_tmp, &ltt_traces.head, list) {
1627 ltt_trace_destroy(trace->trace_name, 1);
1628 }
1629
1630 /* Clean up the listener socket and epoll, keeping the socket file */
1631 if (listen_sock) {
1632 ustcomm_del_named_sock(listen_sock, 1);
1633 listen_sock = NULL;
1634 }
1635 close(epoll_fd);
1636
1637 /* Re-start the launch sequence */
1638 CMM_STORE_SHARED(buffers_to_export, 0);
1639 have_listener = 0;
1640
1641 /* Set up epoll */
1642 epoll_fd = epoll_create(MAX_EVENTS);
1643 if (epoll_fd == -1) {
1644 ERR("epoll_create failed, tracing shutting down");
1645 return;
1646 }
1647
1648 /* Create the socket */
1649 listen_sock = init_app_socket(epoll_fd);
1650 if (!listen_sock) {
1651 ERR("failed to create application socket,"
1652 " tracing shutting down");
1653 return;
1654 }
1655 create_listener();
1656 ltt_trace_setup("auto");
1657 result = ltt_trace_set_type("auto", "ustrelay");
1658 if (result < 0) {
1659 ERR("ltt_trace_set_type failed");
1660 return;
1661 }
1662
1663 ltt_trace_alloc("auto");
1664 ltt_trace_start("auto");
1665 inform_consumer_daemon("auto");
1666 }
1667
1668 void ust_before_fork(ust_fork_info_t *fork_info)
1669 {
1670 /* Disable signals. This is to avoid that the child
1671 * intervenes before it is properly setup for tracing. It is
1672 * safer to disable all signals, because then we know we are not
1673 * breaking anything by restoring the original mask.
1674 */
1675 sigset_t all_sigs;
1676 int result;
1677
1678 /* FIXME:
1679 - only do this if tracing is active
1680 */
1681
1682 /* Disable signals */
1683 sigfillset(&all_sigs);
1684 result = sigprocmask(SIG_BLOCK, &all_sigs, &fork_info->orig_sigs);
1685 if (result == -1) {
1686 PERROR("sigprocmask");
1687 return;
1688 }
1689
1690 /*
1691 * Take the fork lock to make sure we are not in the middle of
1692 * something in the listener thread.
1693 */
1694 pthread_mutex_lock(&listener_thread_data_mutex);
1695 /*
1696 * Hold listen_sock_mutex to protect from listen_sock teardown.
1697 */
1698 pthread_mutex_lock(&listen_sock_mutex);
1699 rcu_bp_before_fork();
1700 }
1701
1702 /* Don't call this function directly in a traced program */
1703 static void ust_after_fork_common(ust_fork_info_t *fork_info)
1704 {
1705 int result;
1706
1707 pthread_mutex_unlock(&listen_sock_mutex);
1708 pthread_mutex_unlock(&listener_thread_data_mutex);
1709
1710 /* Restore signals */
1711 result = sigprocmask(SIG_SETMASK, &fork_info->orig_sigs, NULL);
1712 if (result == -1) {
1713 PERROR("sigprocmask");
1714 return;
1715 }
1716 }
1717
1718 void ust_after_fork_parent(ust_fork_info_t *fork_info)
1719 {
1720 rcu_bp_after_fork_parent();
1721 /* Release mutexes and reenable signals */
1722 ust_after_fork_common(fork_info);
1723 }
1724
1725 void ust_after_fork_child(ust_fork_info_t *fork_info)
1726 {
1727 /* Release urcu mutexes */
1728 rcu_bp_after_fork_child();
1729
1730 /* Sanitize the child */
1731 ust_fork();
1732
1733 /* Then release mutexes and reenable signals */
1734 ust_after_fork_common(fork_info);
1735 }
1736
This page took 0.087639 seconds and 5 git commands to generate.