libust: Remove some unused variables
[ust.git] / libust / tracectl.c
1 /* Copyright (C) 2009 Pierre-Marc Fournier
2 *
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public
5 * License as published by the Free Software Foundation; either
6 * version 2.1 of the License, or (at your option) any later version.
7 *
8 * This library is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
12 *
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
16 */
17
18 /* This file contains the implementation of the UST listener thread, which
19 * receives trace control commands. It also coordinates the initialization of
20 * libust.
21 */
22
23 #define _GNU_SOURCE
24 #include <stdio.h>
25 #include <stdlib.h>
26 #include <stdint.h>
27 #include <pthread.h>
28 #include <signal.h>
29 #include <sys/epoll.h>
30 #include <sys/time.h>
31 #include <sys/types.h>
32 #include <sys/socket.h>
33 #include <fcntl.h>
34 #include <poll.h>
35 #include <regex.h>
36 #include <urcu/uatomic_arch.h>
37 #include <urcu/list.h>
38
39 #include <ust/marker.h>
40 #include <ust/tracepoint.h>
41 #include <ust/tracectl.h>
42 #include <ust/clock.h>
43 #include "tracer.h"
44 #include "usterr.h"
45 #include "ustcomm.h"
46 #include "buffers.h"
47 #include "marker-control.h"
48
49 /* This should only be accessed by the constructor, before the creation
50 * of the listener, and then only by the listener.
51 */
52 s64 pidunique = -1LL;
53
54 /* The process pid is used to detect a non-traceable fork
55 * and allow the non-traceable fork to be ignored
56 * by destructor sequences in libust
57 */
58 static pid_t processpid = 0;
59
60 static struct ustcomm_header _receive_header;
61 static struct ustcomm_header *receive_header = &_receive_header;
62 static char receive_buffer[USTCOMM_BUFFER_SIZE];
63 static char send_buffer[USTCOMM_BUFFER_SIZE];
64
65 static int epoll_fd;
66
67 /*
68 * Listener thread data vs fork() protection mechanism. Ensures that no listener
69 * thread mutexes and data structures are being concurrently modified or held by
70 * other threads when fork() is executed.
71 */
72 static pthread_mutex_t listener_thread_data_mutex = PTHREAD_MUTEX_INITIALIZER;
73
74 /* Mutex protecting listen_sock. Nests inside listener_thread_data_mutex. */
75 static pthread_mutex_t listen_sock_mutex = PTHREAD_MUTEX_INITIALIZER;
76 static struct ustcomm_sock *listen_sock;
77
78 extern struct chan_info_struct chan_infos[];
79
80 static struct cds_list_head ust_socks = CDS_LIST_HEAD_INIT(ust_socks);
81
82 /* volatile because shared between the listener and the main thread */
83 int buffers_to_export = 0;
84
85 int ust_clock_source;
86
87 static long long make_pidunique(void)
88 {
89 s64 retval;
90 struct timeval tv;
91
92 gettimeofday(&tv, NULL);
93
94 retval = tv.tv_sec;
95 retval <<= 32;
96 retval |= tv.tv_usec;
97
98 return retval;
99 }
100
101 static void print_markers(FILE *fp)
102 {
103 struct marker_iter iter;
104
105 lock_markers();
106 marker_iter_reset(&iter);
107 marker_iter_start(&iter);
108
109 while (iter.marker) {
110 fprintf(fp, "marker: %s/%s %d \"%s\" %p\n",
111 (*iter.marker)->channel,
112 (*iter.marker)->name,
113 (int)imv_read((*iter.marker)->state),
114 (*iter.marker)->format,
115 (*iter.marker)->location);
116 marker_iter_next(&iter);
117 }
118 unlock_markers();
119 }
120
121 static void print_trace_events(FILE *fp)
122 {
123 struct trace_event_iter iter;
124
125 lock_trace_events();
126 trace_event_iter_reset(&iter);
127 trace_event_iter_start(&iter);
128
129 while (iter.trace_event) {
130 fprintf(fp, "trace_event: %s\n", (*iter.trace_event)->name);
131 trace_event_iter_next(&iter);
132 }
133 unlock_trace_events();
134 }
135
136 static int connect_ustconsumer(void)
137 {
138 int result, fd;
139 char default_daemon_path[] = SOCK_DIR "/ustconsumer";
140 char *explicit_daemon_path, *daemon_path;
141
142 explicit_daemon_path = getenv("UST_DAEMON_SOCKET");
143 if (explicit_daemon_path) {
144 daemon_path = explicit_daemon_path;
145 } else {
146 daemon_path = default_daemon_path;
147 }
148
149 DBG("Connecting to daemon_path %s", daemon_path);
150
151 result = ustcomm_connect_path(daemon_path, &fd);
152 if (result < 0) {
153 WARN("connect_ustconsumer failed, daemon_path: %s",
154 daemon_path);
155 return result;
156 }
157
158 return fd;
159 }
160
161
162 static void request_buffer_consumer(int sock,
163 const char *trace,
164 const char *channel,
165 int cpu)
166 {
167 struct ustcomm_header send_header, recv_header;
168 struct ustcomm_buffer_info buf_inf;
169 int result = 0;
170
171 result = ustcomm_pack_buffer_info(&send_header,
172 &buf_inf,
173 trace,
174 channel,
175 cpu);
176
177 if (result < 0) {
178 ERR("failed to pack buffer info message %s_%d",
179 channel, cpu);
180 return;
181 }
182
183 buf_inf.pid = getpid();
184 send_header.command = CONSUME_BUFFER;
185
186 result = ustcomm_req(sock, &send_header, (char *) &buf_inf,
187 &recv_header, NULL);
188 if (result <= 0) {
189 PERROR("request for buffer consumer failed, is the daemon online?");
190 }
191
192 return;
193 }
194
195 /* Ask the daemon to collect a trace called trace_name and being
196 * produced by this pid.
197 *
198 * The trace must be at least allocated. (It can also be started.)
199 * This is because _ltt_trace_find is used.
200 */
201
202 static void inform_consumer_daemon(const char *trace_name)
203 {
204 int sock, i,j;
205 struct ust_trace *trace;
206 const char *ch_name;
207
208 sock = connect_ustconsumer();
209 if (sock < 0) {
210 return;
211 }
212
213 DBG("Connected to ustconsumer");
214
215 ltt_lock_traces();
216
217 trace = _ltt_trace_find(trace_name);
218 if (trace == NULL) {
219 WARN("inform_consumer_daemon: could not find trace \"%s\"; it is probably already destroyed", trace_name);
220 goto unlock_traces;
221 }
222
223 for (i=0; i < trace->nr_channels; i++) {
224 if (trace->channels[i].request_collection) {
225 /* iterate on all cpus */
226 for (j=0; j<trace->channels[i].n_cpus; j++) {
227 ch_name = trace->channels[i].channel_name;
228 request_buffer_consumer(sock, trace_name,
229 ch_name, j);
230 CMM_STORE_SHARED(buffers_to_export,
231 CMM_LOAD_SHARED(buffers_to_export)+1);
232 }
233 }
234 }
235
236 unlock_traces:
237 ltt_unlock_traces();
238
239 close(sock);
240 }
241
242 static struct ust_channel *find_channel(const char *ch_name,
243 struct ust_trace *trace)
244 {
245 int i;
246
247 for (i=0; i<trace->nr_channels; i++) {
248 if (!strcmp(trace->channels[i].channel_name, ch_name)) {
249 return &trace->channels[i];
250 }
251 }
252
253 return NULL;
254 }
255
256 static int get_buffer_shmid_pipe_fd(const char *trace_name, const char *ch_name,
257 int ch_cpu,
258 int *buf_shmid,
259 int *buf_struct_shmid,
260 int *buf_pipe_fd)
261 {
262 struct ust_trace *trace;
263 struct ust_channel *channel;
264 struct ust_buffer *buf;
265
266 DBG("get_buffer_shmid_pipe_fd");
267
268 ltt_lock_traces();
269 trace = _ltt_trace_find(trace_name);
270 ltt_unlock_traces();
271
272 if (trace == NULL) {
273 ERR("cannot find trace!");
274 return -ENODATA;
275 }
276
277 channel = find_channel(ch_name, trace);
278 if (!channel) {
279 ERR("cannot find channel %s!", ch_name);
280 return -ENODATA;
281 }
282
283 buf = channel->buf[ch_cpu];
284
285 *buf_shmid = buf->shmid;
286 *buf_struct_shmid = channel->buf_struct_shmids[ch_cpu];
287 *buf_pipe_fd = buf->data_ready_fd_read;
288
289 return 0;
290 }
291
292 static int get_subbuf_num_size(const char *trace_name, const char *ch_name,
293 int *num, int *size)
294 {
295 struct ust_trace *trace;
296 struct ust_channel *channel;
297
298 DBG("get_subbuf_size");
299
300 ltt_lock_traces();
301 trace = _ltt_trace_find(trace_name);
302 ltt_unlock_traces();
303
304 if (!trace) {
305 ERR("cannot find trace!");
306 return -ENODATA;
307 }
308
309 channel = find_channel(ch_name, trace);
310 if (!channel) {
311 ERR("unable to find channel");
312 return -ENODATA;
313 }
314
315 *num = channel->subbuf_cnt;
316 *size = channel->subbuf_size;
317
318 return 0;
319 }
320
321 /* Return the power of two which is equal or higher to v */
322
323 static unsigned int pow2_higher_or_eq(unsigned int v)
324 {
325 int hb = fls(v);
326 int retval = 1<<(hb-1);
327
328 if (v-retval == 0)
329 return retval;
330 else
331 return retval<<1;
332 }
333
334 static int set_subbuf_size(const char *trace_name, const char *ch_name,
335 unsigned int size)
336 {
337 unsigned int power;
338 int retval = 0;
339 struct ust_trace *trace;
340 struct ust_channel *channel;
341
342 DBG("set_subbuf_size");
343
344 power = pow2_higher_or_eq(size);
345 power = max_t(unsigned int, 2u, power);
346 if (power != size) {
347 WARN("using the next power of two for buffer size = %u\n", power);
348 }
349
350 ltt_lock_traces();
351 trace = _ltt_trace_find_setup(trace_name);
352 if (trace == NULL) {
353 ERR("cannot find trace!");
354 retval = -ENODATA;
355 goto unlock_traces;
356 }
357
358 channel = find_channel(ch_name, trace);
359 if (!channel) {
360 ERR("unable to find channel");
361 retval = -ENODATA;
362 goto unlock_traces;
363 }
364
365 channel->subbuf_size = power;
366 DBG("the set_subbuf_size for the requested channel is %zu", channel->subbuf_size);
367
368 unlock_traces:
369 ltt_unlock_traces();
370
371 return retval;
372 }
373
374 static int set_subbuf_num(const char *trace_name, const char *ch_name,
375 unsigned int num)
376 {
377 struct ust_trace *trace;
378 struct ust_channel *channel;
379 int retval = 0;
380
381 DBG("set_subbuf_num");
382
383 if (num < 2) {
384 ERR("subbuffer count should be greater than 2");
385 return -EINVAL;
386 }
387
388 ltt_lock_traces();
389 trace = _ltt_trace_find_setup(trace_name);
390 if (trace == NULL) {
391 ERR("cannot find trace!");
392 retval = -ENODATA;
393 goto unlock_traces;
394 }
395
396 channel = find_channel(ch_name, trace);
397 if (!channel) {
398 ERR("unable to find channel");
399 retval = -ENODATA;
400 goto unlock_traces;
401 }
402
403 channel->subbuf_cnt = num;
404 DBG("the set_subbuf_cnt for the requested channel is %u", channel->subbuf_cnt);
405
406 unlock_traces:
407 ltt_unlock_traces();
408 return retval;
409 }
410
411 static int get_subbuffer(const char *trace_name, const char *ch_name,
412 int ch_cpu, long *consumed_old)
413 {
414 int retval = 0;
415 struct ust_trace *trace;
416 struct ust_channel *channel;
417 struct ust_buffer *buf;
418
419 DBG("get_subbuf");
420
421 *consumed_old = 0;
422
423 ltt_lock_traces();
424 trace = _ltt_trace_find(trace_name);
425
426 if (!trace) {
427 DBG("Cannot find trace. It was likely destroyed by the user.");
428 retval = -ENODATA;
429 goto unlock_traces;
430 }
431
432 channel = find_channel(ch_name, trace);
433 if (!channel) {
434 ERR("unable to find channel");
435 retval = -ENODATA;
436 goto unlock_traces;
437 }
438
439 buf = channel->buf[ch_cpu];
440
441 retval = ust_buffers_get_subbuf(buf, consumed_old);
442 if (retval < 0) {
443 WARN("missed buffer?");
444 }
445
446 unlock_traces:
447 ltt_unlock_traces();
448
449 return retval;
450 }
451
452
453 static int notify_buffer_mapped(const char *trace_name,
454 const char *ch_name,
455 int ch_cpu)
456 {
457 int retval = 0;
458 struct ust_trace *trace;
459 struct ust_channel *channel;
460 struct ust_buffer *buf;
461
462 DBG("get_buffer_fd");
463
464 ltt_lock_traces();
465 trace = _ltt_trace_find(trace_name);
466
467 if (!trace) {
468 retval = -ENODATA;
469 DBG("Cannot find trace. It was likely destroyed by the user.");
470 goto unlock_traces;
471 }
472
473 channel = find_channel(ch_name, trace);
474 if (!channel) {
475 retval = -ENODATA;
476 ERR("unable to find channel");
477 goto unlock_traces;
478 }
479
480 buf = channel->buf[ch_cpu];
481
482 /* Being here is the proof the daemon has mapped the buffer in its
483 * memory. We may now decrement buffers_to_export.
484 */
485 if (uatomic_read(&buf->consumed) == 0) {
486 DBG("decrementing buffers_to_export");
487 CMM_STORE_SHARED(buffers_to_export, CMM_LOAD_SHARED(buffers_to_export)-1);
488 }
489
490 unlock_traces:
491 ltt_unlock_traces();
492
493 return retval;
494 }
495
496 static int put_subbuffer(const char *trace_name, const char *ch_name,
497 int ch_cpu, long consumed_old)
498 {
499 int retval = 0;
500 struct ust_trace *trace;
501 struct ust_channel *channel;
502 struct ust_buffer *buf;
503
504 DBG("put_subbuf");
505
506 ltt_lock_traces();
507 trace = _ltt_trace_find(trace_name);
508
509 if (!trace) {
510 retval = -ENODATA;
511 DBG("Cannot find trace. It was likely destroyed by the user.");
512 goto unlock_traces;
513 }
514
515 channel = find_channel(ch_name, trace);
516 if (!channel) {
517 retval = -ENODATA;
518 ERR("unable to find channel");
519 goto unlock_traces;
520 }
521
522 buf = channel->buf[ch_cpu];
523
524 retval = ust_buffers_put_subbuf(buf, consumed_old);
525 if (retval < 0) {
526 WARN("ust_buffers_put_subbuf: error (subbuf=%s_%d)",
527 ch_name, ch_cpu);
528 } else {
529 DBG("ust_buffers_put_subbuf: success (subbuf=%s_%d)",
530 ch_name, ch_cpu);
531 }
532
533 unlock_traces:
534 ltt_unlock_traces();
535
536 return retval;
537 }
538
539 static void release_listener_mutex(void *ptr)
540 {
541 pthread_mutex_unlock(&listener_thread_data_mutex);
542 }
543
544 static void listener_cleanup(void *ptr)
545 {
546 pthread_mutex_lock(&listen_sock_mutex);
547 if (listen_sock) {
548 ustcomm_del_named_sock(listen_sock, 0);
549 listen_sock = NULL;
550 }
551 pthread_mutex_unlock(&listen_sock_mutex);
552 }
553
554 static int force_subbuf_switch(const char *trace_name)
555 {
556 struct ust_trace *trace;
557 int i, j, retval = 0;
558
559 ltt_lock_traces();
560 trace = _ltt_trace_find(trace_name);
561 if (!trace) {
562 retval = -ENODATA;
563 DBG("Cannot find trace. It was likely destroyed by the user.");
564 goto unlock_traces;
565 }
566
567 for (i = 0; i < trace->nr_channels; i++) {
568 for (j = 0; j < trace->channels[i].n_cpus; j++) {
569 ltt_force_switch(trace->channels[i].buf[j],
570 FORCE_FLUSH);
571 }
572 }
573
574 unlock_traces:
575 ltt_unlock_traces();
576
577 return retval;
578 }
579
580 static int process_trace_cmd(int command, char *trace_name)
581 {
582 int result;
583 char trace_type[] = "ustrelay";
584
585 switch(command) {
586 case START:
587 /* start is an operation that setups the trace, allocates it and starts it */
588 result = ltt_trace_setup(trace_name);
589 if (result < 0) {
590 ERR("ltt_trace_setup failed");
591 return result;
592 }
593
594 result = ltt_trace_set_type(trace_name, trace_type);
595 if (result < 0) {
596 ERR("ltt_trace_set_type failed");
597 return result;
598 }
599
600 result = ltt_trace_alloc(trace_name);
601 if (result < 0) {
602 ERR("ltt_trace_alloc failed");
603 return result;
604 }
605
606 inform_consumer_daemon(trace_name);
607
608 result = ltt_trace_start(trace_name);
609 if (result < 0) {
610 ERR("ltt_trace_start failed");
611 return result;
612 }
613
614 return 0;
615 case SETUP_TRACE:
616 DBG("trace setup");
617
618 result = ltt_trace_setup(trace_name);
619 if (result < 0) {
620 ERR("ltt_trace_setup failed");
621 return result;
622 }
623
624 result = ltt_trace_set_type(trace_name, trace_type);
625 if (result < 0) {
626 ERR("ltt_trace_set_type failed");
627 return result;
628 }
629
630 return 0;
631 case ALLOC_TRACE:
632 DBG("trace alloc");
633
634 result = ltt_trace_alloc(trace_name);
635 if (result < 0) {
636 ERR("ltt_trace_alloc failed");
637 return result;
638 }
639 inform_consumer_daemon(trace_name);
640
641 return 0;
642
643 case CREATE_TRACE:
644 DBG("trace create");
645
646 result = ltt_trace_setup(trace_name);
647 if (result < 0) {
648 ERR("ltt_trace_setup failed");
649 return result;
650 }
651
652 result = ltt_trace_set_type(trace_name, trace_type);
653 if (result < 0) {
654 ERR("ltt_trace_set_type failed");
655 return result;
656 }
657
658 return 0;
659 case START_TRACE:
660 DBG("trace start");
661
662 result = ltt_trace_alloc(trace_name);
663 if (result < 0) {
664 ERR("ltt_trace_alloc failed");
665 return result;
666 }
667 if (!result) {
668 inform_consumer_daemon(trace_name);
669 }
670
671 result = ltt_trace_start(trace_name);
672 if (result < 0) {
673 ERR("ltt_trace_start failed");
674 return result;
675 }
676
677 return 0;
678 case STOP_TRACE:
679 DBG("trace stop");
680
681 result = ltt_trace_stop(trace_name);
682 if (result < 0) {
683 ERR("ltt_trace_stop failed");
684 return result;
685 }
686
687 return 0;
688 case DESTROY_TRACE:
689 DBG("trace destroy");
690
691 result = ltt_trace_destroy(trace_name, 0);
692 if (result < 0) {
693 ERR("ltt_trace_destroy failed");
694 return result;
695 }
696 return 0;
697 case FORCE_SUBBUF_SWITCH:
698 DBG("force switch");
699
700 result = force_subbuf_switch(trace_name);
701 if (result < 0) {
702 ERR("force_subbuf_switch failed");
703 return result;
704 }
705 return 0;
706 }
707
708 return 0;
709 }
710
711
712 static void process_channel_cmd(int sock, int command,
713 struct ustcomm_channel_info *ch_inf)
714 {
715 struct ustcomm_header _reply_header;
716 struct ustcomm_header *reply_header = &_reply_header;
717 struct ustcomm_channel_info *reply_msg =
718 (struct ustcomm_channel_info *)send_buffer;
719 int result, offset = 0, num, size;
720
721 memset(reply_header, 0, sizeof(*reply_header));
722
723 switch (command) {
724 case GET_SUBBUF_NUM_SIZE:
725 result = get_subbuf_num_size(ch_inf->trace,
726 ch_inf->channel,
727 &num, &size);
728 if (result < 0) {
729 reply_header->result = result;
730 break;
731 }
732
733 reply_msg->channel = USTCOMM_POISON_PTR;
734 reply_msg->subbuf_num = num;
735 reply_msg->subbuf_size = size;
736
737
738 reply_header->size = COMPUTE_MSG_SIZE(reply_msg, offset);
739
740 break;
741 case SET_SUBBUF_NUM:
742 reply_header->result = set_subbuf_num(ch_inf->trace,
743 ch_inf->channel,
744 ch_inf->subbuf_num);
745
746 break;
747 case SET_SUBBUF_SIZE:
748 reply_header->result = set_subbuf_size(ch_inf->trace,
749 ch_inf->channel,
750 ch_inf->subbuf_size);
751
752
753 break;
754 }
755 if (ustcomm_send(sock, reply_header, (char *)reply_msg) < 0) {
756 ERR("ustcomm_send failed");
757 }
758 }
759
760 static void process_buffer_cmd(int sock, int command,
761 struct ustcomm_buffer_info *buf_inf)
762 {
763 struct ustcomm_header _reply_header;
764 struct ustcomm_header *reply_header = &_reply_header;
765 struct ustcomm_buffer_info *reply_msg =
766 (struct ustcomm_buffer_info *)send_buffer;
767 int result, offset = 0, buf_shmid, buf_struct_shmid, buf_pipe_fd;
768 long consumed_old;
769
770 memset(reply_header, 0, sizeof(*reply_header));
771
772 switch (command) {
773 case GET_BUF_SHMID_PIPE_FD:
774 result = get_buffer_shmid_pipe_fd(buf_inf->trace,
775 buf_inf->channel,
776 buf_inf->ch_cpu,
777 &buf_shmid,
778 &buf_struct_shmid,
779 &buf_pipe_fd);
780 if (result < 0) {
781 reply_header->result = result;
782 break;
783 }
784
785 reply_msg->channel = USTCOMM_POISON_PTR;
786 reply_msg->buf_shmid = buf_shmid;
787 reply_msg->buf_struct_shmid = buf_struct_shmid;
788
789 reply_header->size = COMPUTE_MSG_SIZE(reply_msg, offset);
790 reply_header->fd_included = 1;
791
792 if (ustcomm_send_fd(sock, reply_header, (char *)reply_msg,
793 &buf_pipe_fd) < 0) {
794 ERR("ustcomm_send failed");
795 }
796 return;
797
798 case NOTIFY_BUF_MAPPED:
799 reply_header->result =
800 notify_buffer_mapped(buf_inf->trace,
801 buf_inf->channel,
802 buf_inf->ch_cpu);
803 break;
804 case GET_SUBBUFFER:
805 result = get_subbuffer(buf_inf->trace, buf_inf->channel,
806 buf_inf->ch_cpu, &consumed_old);
807 if (result < 0) {
808 reply_header->result = result;
809 break;
810 }
811
812 reply_msg->channel = USTCOMM_POISON_PTR;
813 reply_msg->consumed_old = consumed_old;
814
815 reply_header->size = COMPUTE_MSG_SIZE(reply_msg, offset);
816
817 break;
818 case PUT_SUBBUFFER:
819 result = put_subbuffer(buf_inf->trace, buf_inf->channel,
820 buf_inf->ch_cpu,
821 buf_inf->consumed_old);
822 reply_header->result = result;
823
824 break;
825 }
826
827 if (ustcomm_send(sock, reply_header, (char *)reply_msg) < 0) {
828 ERR("ustcomm_send failed");
829 }
830
831 }
832
833 static void process_marker_cmd(int sock, int command,
834 struct ustcomm_marker_info *marker_inf)
835 {
836 struct ustcomm_header _reply_header;
837 struct ustcomm_header *reply_header = &_reply_header;
838 int result = 0;
839
840 memset(reply_header, 0, sizeof(*reply_header));
841
842 switch(command) {
843 case ENABLE_MARKER:
844
845 result = ltt_marker_connect(marker_inf->channel,
846 marker_inf->marker,
847 "default");
848 if (result < 0) {
849 WARN("could not enable marker; channel=%s,"
850 " name=%s",
851 marker_inf->channel,
852 marker_inf->marker);
853
854 }
855 break;
856 case DISABLE_MARKER:
857 result = ltt_marker_disconnect(marker_inf->channel,
858 marker_inf->marker,
859 "default");
860 if (result < 0) {
861 WARN("could not disable marker; channel=%s,"
862 " name=%s",
863 marker_inf->channel,
864 marker_inf->marker);
865 }
866 break;
867 }
868
869 reply_header->result = result;
870
871 if (ustcomm_send(sock, reply_header, NULL) < 0) {
872 ERR("ustcomm_send failed");
873 }
874
875 }
876 static void process_client_cmd(struct ustcomm_header *recv_header,
877 char *recv_buf, int sock)
878 {
879 int result;
880 struct ustcomm_header _reply_header;
881 struct ustcomm_header *reply_header = &_reply_header;
882 char *send_buf = send_buffer;
883
884 memset(reply_header, 0, sizeof(*reply_header));
885 memset(send_buf, 0, sizeof(send_buffer));
886
887 switch(recv_header->command) {
888 case GET_SUBBUF_NUM_SIZE:
889 case SET_SUBBUF_NUM:
890 case SET_SUBBUF_SIZE:
891 {
892 struct ustcomm_channel_info *ch_inf;
893 ch_inf = (struct ustcomm_channel_info *)recv_buf;
894 result = ustcomm_unpack_channel_info(ch_inf);
895 if (result < 0) {
896 ERR("couldn't unpack channel info");
897 reply_header->result = -EINVAL;
898 goto send_response;
899 }
900 process_channel_cmd(sock, recv_header->command, ch_inf);
901 return;
902 }
903 case GET_BUF_SHMID_PIPE_FD:
904 case NOTIFY_BUF_MAPPED:
905 case GET_SUBBUFFER:
906 case PUT_SUBBUFFER:
907 {
908 struct ustcomm_buffer_info *buf_inf;
909 buf_inf = (struct ustcomm_buffer_info *)recv_buf;
910 result = ustcomm_unpack_buffer_info(buf_inf);
911 if (result < 0) {
912 ERR("couldn't unpack buffer info");
913 reply_header->result = -EINVAL;
914 goto send_response;
915 }
916 process_buffer_cmd(sock, recv_header->command, buf_inf);
917 return;
918 }
919 case ENABLE_MARKER:
920 case DISABLE_MARKER:
921 {
922 struct ustcomm_marker_info *marker_inf;
923 marker_inf = (struct ustcomm_marker_info *)recv_buf;
924 result = ustcomm_unpack_marker_info(marker_inf);
925 if (result < 0) {
926 ERR("couldn't unpack marker info");
927 reply_header->result = -EINVAL;
928 goto send_response;
929 }
930 process_marker_cmd(sock, recv_header->command, marker_inf);
931 return;
932 }
933 case LIST_MARKERS:
934 {
935 char *ptr;
936 size_t size;
937 FILE *fp;
938
939 fp = open_memstream(&ptr, &size);
940 if (fp == NULL) {
941 ERR("opening memstream failed");
942 return;
943 }
944 print_markers(fp);
945 fclose(fp);
946
947 reply_header->size = size + 1; /* Include final \0 */
948
949 result = ustcomm_send(sock, reply_header, ptr);
950
951 free(ptr);
952
953 if (result < 0) {
954 PERROR("failed to send markers list");
955 }
956
957 break;
958 }
959 case LIST_TRACE_EVENTS:
960 {
961 char *ptr;
962 size_t size;
963 FILE *fp;
964
965 fp = open_memstream(&ptr, &size);
966 if (fp == NULL) {
967 ERR("opening memstream failed");
968 return;
969 }
970 print_trace_events(fp);
971 fclose(fp);
972
973 reply_header->size = size + 1; /* Include final \0 */
974
975 result = ustcomm_send(sock, reply_header, ptr);
976
977 free(ptr);
978
979 if (result < 0) {
980 ERR("list_trace_events failed");
981 return;
982 }
983
984 break;
985 }
986 case LOAD_PROBE_LIB:
987 {
988 char *libfile;
989
990 /* FIXME: No functionality at all... */
991 libfile = recv_buf;
992
993 DBG("load_probe_lib loading %s", libfile);
994
995 break;
996 }
997 case GET_PIDUNIQUE:
998 {
999 struct ustcomm_pidunique *pid_msg;
1000 pid_msg = (struct ustcomm_pidunique *)send_buf;
1001
1002 pid_msg->pidunique = pidunique;
1003 reply_header->size = sizeof(pid_msg);
1004
1005 goto send_response;
1006
1007 }
1008 case GET_SOCK_PATH:
1009 {
1010 struct ustcomm_single_field *sock_msg;
1011 char *sock_path_env;
1012
1013 sock_msg = (struct ustcomm_single_field *)send_buf;
1014
1015 sock_path_env = getenv("UST_DAEMON_SOCKET");
1016
1017 if (!sock_path_env) {
1018 result = ustcomm_pack_single_field(reply_header,
1019 sock_msg,
1020 SOCK_DIR "/ustconsumer");
1021
1022 } else {
1023 result = ustcomm_pack_single_field(reply_header,
1024 sock_msg,
1025 sock_path_env);
1026 }
1027 reply_header->result = result;
1028
1029 goto send_response;
1030 }
1031 case SET_SOCK_PATH:
1032 {
1033 struct ustcomm_single_field *sock_msg;
1034 sock_msg = (struct ustcomm_single_field *)recv_buf;
1035 result = ustcomm_unpack_single_field(sock_msg);
1036 if (result < 0) {
1037 reply_header->result = -EINVAL;
1038 goto send_response;
1039 }
1040
1041 reply_header->result = setenv("UST_DAEMON_SOCKET",
1042 sock_msg->field, 1);
1043
1044 goto send_response;
1045 }
1046 case START:
1047 case SETUP_TRACE:
1048 case ALLOC_TRACE:
1049 case CREATE_TRACE:
1050 case START_TRACE:
1051 case STOP_TRACE:
1052 case DESTROY_TRACE:
1053 case FORCE_SUBBUF_SWITCH:
1054 {
1055 struct ustcomm_single_field *trace_inf =
1056 (struct ustcomm_single_field *)recv_buf;
1057
1058 result = ustcomm_unpack_single_field(trace_inf);
1059 if (result < 0) {
1060 ERR("couldn't unpack trace info");
1061 reply_header->result = -EINVAL;
1062 goto send_response;
1063 }
1064
1065 reply_header->result =
1066 process_trace_cmd(recv_header->command,
1067 trace_inf->field);
1068 goto send_response;
1069
1070 }
1071 default:
1072 reply_header->result = -EINVAL;
1073
1074 goto send_response;
1075 }
1076
1077 return;
1078
1079 send_response:
1080 ustcomm_send(sock, reply_header, send_buf);
1081 }
1082
1083 #define MAX_EVENTS 10
1084
1085 void *listener_main(void *p)
1086 {
1087 struct ustcomm_sock *epoll_sock;
1088 struct epoll_event events[MAX_EVENTS];
1089 struct sockaddr addr;
1090 int accept_fd, nfds, result, i, addr_size;
1091
1092 DBG("LISTENER");
1093
1094 pthread_cleanup_push(listener_cleanup, NULL);
1095
1096 for(;;) {
1097 nfds = epoll_wait(epoll_fd, events, MAX_EVENTS, -1);
1098 if (nfds == -1) {
1099 PERROR("listener_main: epoll_wait failed");
1100 continue;
1101 }
1102
1103 for (i = 0; i < nfds; i++) {
1104 pthread_mutex_lock(&listener_thread_data_mutex);
1105 pthread_cleanup_push(release_listener_mutex, NULL);
1106 epoll_sock = (struct ustcomm_sock *)events[i].data.ptr;
1107 if (epoll_sock == listen_sock) {
1108 addr_size = sizeof(struct sockaddr);
1109 accept_fd = accept(epoll_sock->fd,
1110 &addr,
1111 (socklen_t *)&addr_size);
1112 if (accept_fd == -1) {
1113 PERROR("listener_main: accept failed");
1114 continue;
1115 }
1116 ustcomm_init_sock(accept_fd, epoll_fd,
1117 &ust_socks);
1118 } else {
1119 memset(receive_header, 0,
1120 sizeof(*receive_header));
1121 memset(receive_buffer, 0,
1122 sizeof(receive_buffer));
1123 result = ustcomm_recv(epoll_sock->fd,
1124 receive_header,
1125 receive_buffer);
1126 if (result == 0) {
1127 ustcomm_del_sock(epoll_sock, 0);
1128 } else {
1129 process_client_cmd(receive_header,
1130 receive_buffer,
1131 epoll_sock->fd);
1132 }
1133 }
1134 pthread_cleanup_pop(1); /* release listener mutex */
1135 }
1136 }
1137
1138 pthread_cleanup_pop(1);
1139 }
1140
1141 /* These should only be accessed in the parent thread,
1142 * not the listener.
1143 */
1144 static volatile sig_atomic_t have_listener = 0;
1145 static pthread_t listener_thread;
1146
1147 void create_listener(void)
1148 {
1149 int result;
1150 sigset_t sig_all_blocked;
1151 sigset_t orig_parent_mask;
1152
1153 if (have_listener) {
1154 WARN("not creating listener because we already had one");
1155 return;
1156 }
1157
1158 /* A new thread created by pthread_create inherits the signal mask
1159 * from the parent. To avoid any signal being received by the
1160 * listener thread, we block all signals temporarily in the parent,
1161 * while we create the listener thread.
1162 */
1163
1164 sigfillset(&sig_all_blocked);
1165
1166 result = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_parent_mask);
1167 if (result) {
1168 PERROR("pthread_sigmask: %s", strerror(result));
1169 }
1170
1171 result = pthread_create(&listener_thread, NULL, listener_main, NULL);
1172 if (result == -1) {
1173 PERROR("pthread_create");
1174 }
1175
1176 /* Restore original signal mask in parent */
1177 result = pthread_sigmask(SIG_SETMASK, &orig_parent_mask, NULL);
1178 if (result) {
1179 PERROR("pthread_sigmask: %s", strerror(result));
1180 } else {
1181 have_listener = 1;
1182 }
1183 }
1184
1185 #define AUTOPROBE_DISABLED 0
1186 #define AUTOPROBE_ENABLE_ALL 1
1187 #define AUTOPROBE_ENABLE_REGEX 2
1188 static int autoprobe_method = AUTOPROBE_DISABLED;
1189 static regex_t autoprobe_regex;
1190
1191 static void auto_probe_connect(struct marker *m)
1192 {
1193 int result;
1194
1195 char* concat_name = NULL;
1196 const char *probe_name = "default";
1197
1198 if (autoprobe_method == AUTOPROBE_DISABLED) {
1199 return;
1200 } else if (autoprobe_method == AUTOPROBE_ENABLE_REGEX) {
1201 result = asprintf(&concat_name, "%s/%s", m->channel, m->name);
1202 if (result == -1) {
1203 ERR("auto_probe_connect: asprintf failed (marker %s/%s)",
1204 m->channel, m->name);
1205 return;
1206 }
1207 if (regexec(&autoprobe_regex, concat_name, 0, NULL, 0)) {
1208 free(concat_name);
1209 return;
1210 }
1211 free(concat_name);
1212 }
1213
1214 result = ltt_marker_connect(m->channel, m->name, probe_name);
1215 if (result && result != -EEXIST)
1216 ERR("ltt_marker_connect (marker = %s/%s, errno = %d)", m->channel, m->name, -result);
1217
1218 DBG("auto connected marker %s (addr: %p) %s to probe default", m->channel, m, m->name);
1219
1220 }
1221
1222 static struct ustcomm_sock * init_app_socket(int epoll_fd)
1223 {
1224 char *name;
1225 int result;
1226 struct ustcomm_sock *sock;
1227
1228 result = asprintf(&name, "%s/%d", SOCK_DIR, (int)getpid());
1229 if (result < 0) {
1230 ERR("string overflow allocating socket name, "
1231 "UST thread bailing");
1232 return NULL;
1233 }
1234
1235 result = ensure_dir_exists(SOCK_DIR);
1236 if (result == -1) {
1237 ERR("Unable to create socket directory %s, UST thread bailing",
1238 SOCK_DIR);
1239 goto free_name;
1240 }
1241
1242 sock = ustcomm_init_named_socket(name, epoll_fd);
1243 if (!sock) {
1244 ERR("Error initializing named socket (%s). Check that directory"
1245 "exists and that it is writable. UST thread bailing", name);
1246 goto free_name;
1247 }
1248
1249 free(name);
1250 return sock;
1251
1252 free_name:
1253 free(name);
1254 return NULL;
1255 }
1256
1257 static void __attribute__((constructor)) init()
1258 {
1259 struct timespec ts;
1260 int result;
1261 char* autoprobe_val = NULL;
1262 char* subbuffer_size_val = NULL;
1263 char* subbuffer_count_val = NULL;
1264 unsigned int subbuffer_size;
1265 unsigned int subbuffer_count;
1266 unsigned int power;
1267
1268 /* Assign the pidunique, to be able to differentiate the processes with same
1269 * pid, (before and after an exec).
1270 */
1271 pidunique = make_pidunique();
1272 processpid = getpid();
1273
1274 DBG("Tracectl constructor");
1275
1276 /* Set up epoll */
1277 epoll_fd = epoll_create(MAX_EVENTS);
1278 if (epoll_fd == -1) {
1279 ERR("epoll_create failed, tracing shutting down");
1280 return;
1281 }
1282
1283 /* Create the socket */
1284 listen_sock = init_app_socket(epoll_fd);
1285 if (!listen_sock) {
1286 ERR("failed to create application socket,"
1287 " tracing shutting down");
1288 return;
1289 }
1290
1291 create_listener();
1292
1293 /* Get clock the clock source type */
1294
1295 /* Default clock source */
1296 ust_clock_source = CLOCK_TRACE;
1297 if (clock_gettime(ust_clock_source, &ts) != 0) {
1298 ust_clock_source = CLOCK_MONOTONIC;
1299 DBG("UST traces will not be synchronized with LTTng traces");
1300 }
1301
1302 autoprobe_val = getenv("UST_AUTOPROBE");
1303 if (autoprobe_val) {
1304 struct marker_iter iter;
1305
1306 DBG("Autoprobe enabled.");
1307
1308 /* Ensure markers are initialized */
1309 //init_markers();
1310
1311 /* Ensure marker control is initialized, for the probe */
1312 init_marker_control();
1313
1314 /* first, set the callback that will connect the
1315 * probe on new markers
1316 */
1317 if (autoprobe_val[0] == '/') {
1318 result = regcomp(&autoprobe_regex, autoprobe_val+1, 0);
1319 if (result) {
1320 char regexerr[150];
1321
1322 regerror(result, &autoprobe_regex, regexerr, sizeof(regexerr));
1323 ERR("cannot parse regex %s (%s), will ignore UST_AUTOPROBE", autoprobe_val, regexerr);
1324 /* don't crash the application just for this */
1325 } else {
1326 autoprobe_method = AUTOPROBE_ENABLE_REGEX;
1327 }
1328 } else {
1329 /* just enable all instrumentation */
1330 autoprobe_method = AUTOPROBE_ENABLE_ALL;
1331 }
1332
1333 marker_set_new_marker_cb(auto_probe_connect);
1334
1335 /* Now, connect the probes that were already registered. */
1336 marker_iter_reset(&iter);
1337 marker_iter_start(&iter);
1338
1339 DBG("now iterating on markers already registered");
1340 while (iter.marker) {
1341 DBG("now iterating on marker %s", (*iter.marker)->name);
1342 auto_probe_connect(*iter.marker);
1343 marker_iter_next(&iter);
1344 }
1345 }
1346
1347 if (getenv("UST_OVERWRITE")) {
1348 int val = atoi(getenv("UST_OVERWRITE"));
1349 if (val == 0 || val == 1) {
1350 CMM_STORE_SHARED(ust_channels_overwrite_by_default, val);
1351 } else {
1352 WARN("invalid value for UST_OVERWRITE");
1353 }
1354 }
1355
1356 if (getenv("UST_AUTOCOLLECT")) {
1357 int val = atoi(getenv("UST_AUTOCOLLECT"));
1358 if (val == 0 || val == 1) {
1359 CMM_STORE_SHARED(ust_channels_request_collection_by_default, val);
1360 } else {
1361 WARN("invalid value for UST_AUTOCOLLECT");
1362 }
1363 }
1364
1365 subbuffer_size_val = getenv("UST_SUBBUF_SIZE");
1366 if (subbuffer_size_val) {
1367 sscanf(subbuffer_size_val, "%u", &subbuffer_size);
1368 power = pow2_higher_or_eq(subbuffer_size);
1369 if (power != subbuffer_size)
1370 WARN("using the next power of two for buffer size = %u\n", power);
1371 chan_infos[LTT_CHANNEL_UST].def_subbufsize = power;
1372 }
1373
1374 subbuffer_count_val = getenv("UST_SUBBUF_NUM");
1375 if (subbuffer_count_val) {
1376 sscanf(subbuffer_count_val, "%u", &subbuffer_count);
1377 if (subbuffer_count < 2)
1378 subbuffer_count = 2;
1379 chan_infos[LTT_CHANNEL_UST].def_subbufcount = subbuffer_count;
1380 }
1381
1382 if (getenv("UST_TRACE")) {
1383 char trace_name[] = "auto";
1384 char trace_type[] = "ustrelay";
1385
1386 DBG("starting early tracing");
1387
1388 /* Ensure marker control is initialized */
1389 init_marker_control();
1390
1391 /* Ensure markers are initialized */
1392 init_markers();
1393
1394 /* Ensure buffers are initialized, for the transport to be available.
1395 * We are about to set a trace type and it will fail without this.
1396 */
1397 init_ustrelay_transport();
1398
1399 /* FIXME: When starting early tracing (here), depending on the
1400 * order of constructors, it is very well possible some marker
1401 * sections are not yet registered. Because of this, some
1402 * channels may not be registered. Yet, we are about to ask the
1403 * daemon to collect the channels. Channels which are not yet
1404 * registered will not be collected.
1405 *
1406 * Currently, in LTTng, there is no way to add a channel after
1407 * trace start. The reason for this is that it induces complex
1408 * concurrency issues on the trace structures, which can only
1409 * be resolved using RCU. This has not been done yet. As a
1410 * workaround, we are forcing the registration of the "ust"
1411 * channel here. This is the only channel (apart from metadata)
1412 * that can be reliably used in early tracing.
1413 *
1414 * Non-early tracing does not have this problem and can use
1415 * arbitrary channel names.
1416 */
1417 ltt_channels_register("ust");
1418
1419 result = ltt_trace_setup(trace_name);
1420 if (result < 0) {
1421 ERR("ltt_trace_setup failed");
1422 return;
1423 }
1424
1425 result = ltt_trace_set_type(trace_name, trace_type);
1426 if (result < 0) {
1427 ERR("ltt_trace_set_type failed");
1428 return;
1429 }
1430
1431 result = ltt_trace_alloc(trace_name);
1432 if (result < 0) {
1433 ERR("ltt_trace_alloc failed");
1434 return;
1435 }
1436
1437 result = ltt_trace_start(trace_name);
1438 if (result < 0) {
1439 ERR("ltt_trace_start failed");
1440 return;
1441 }
1442
1443 /* Do this after the trace is started in order to avoid creating confusion
1444 * if the trace fails to start. */
1445 inform_consumer_daemon(trace_name);
1446 }
1447
1448 return;
1449
1450 /* should decrementally destroy stuff if error */
1451
1452 }
1453
1454 /* This is only called if we terminate normally, not with an unhandled signal,
1455 * so we cannot rely on it. However, for now, LTTV requires that the header of
1456 * the last sub-buffer contain a valid end time for the trace. This is done
1457 * automatically only when the trace is properly stopped.
1458 *
1459 * If the traced program crashed, it is always possible to manually add the
1460 * right value in the header, or to open the trace in text mode.
1461 *
1462 * FIXME: Fix LTTV so it doesn't need this.
1463 */
1464
1465 static void destroy_traces(void)
1466 {
1467 int result;
1468
1469 /* if trace running, finish it */
1470
1471 DBG("destructor stopping traces");
1472
1473 result = ltt_trace_stop("auto");
1474 if (result == -1) {
1475 ERR("ltt_trace_stop error");
1476 }
1477
1478 result = ltt_trace_destroy("auto", 0);
1479 if (result == -1) {
1480 ERR("ltt_trace_destroy error");
1481 }
1482 }
1483
1484 static int trace_recording(void)
1485 {
1486 int retval = 0;
1487 struct ust_trace *trace;
1488
1489 ltt_lock_traces();
1490
1491 cds_list_for_each_entry(trace, &ltt_traces.head, list) {
1492 if (trace->active) {
1493 retval = 1;
1494 break;
1495 }
1496 }
1497
1498 ltt_unlock_traces();
1499
1500 return retval;
1501 }
1502
1503 int restarting_usleep(useconds_t usecs)
1504 {
1505 struct timespec tv;
1506 int result;
1507
1508 tv.tv_sec = 0;
1509 tv.tv_nsec = usecs * 1000;
1510
1511 do {
1512 result = nanosleep(&tv, &tv);
1513 } while (result == -1 && errno == EINTR);
1514
1515 return result;
1516 }
1517
1518 static void stop_listener(void)
1519 {
1520 int result;
1521
1522 if (!have_listener)
1523 return;
1524
1525 result = pthread_cancel(listener_thread);
1526 if (result != 0) {
1527 ERR("pthread_cancel: %s", strerror(result));
1528 }
1529 result = pthread_join(listener_thread, NULL);
1530 if (result != 0) {
1531 ERR("pthread_join: %s", strerror(result));
1532 }
1533 }
1534
1535 /* This destructor keeps the process alive for a few seconds in order
1536 * to leave time for ustconsumer to connect to its buffers. This is necessary
1537 * for programs whose execution is very short. It is also useful in all
1538 * programs when tracing is started close to the end of the program
1539 * execution.
1540 *
1541 * FIXME: For now, this only works for the first trace created in a
1542 * process.
1543 */
1544
1545 static void __attribute__((destructor)) keepalive()
1546 {
1547 if (processpid != getpid()) {
1548 return;
1549 }
1550
1551 if (trace_recording() && CMM_LOAD_SHARED(buffers_to_export)) {
1552 int total = 0;
1553 DBG("Keeping process alive for consumer daemon...");
1554 while (CMM_LOAD_SHARED(buffers_to_export)) {
1555 const int interv = 200000;
1556 restarting_usleep(interv);
1557 total += interv;
1558
1559 if (total >= 3000000) {
1560 WARN("non-consumed buffers remaining after wait limit; not waiting anymore");
1561 break;
1562 }
1563 }
1564 DBG("Finally dying...");
1565 }
1566
1567 destroy_traces();
1568
1569 /* Ask the listener to stop and clean up. */
1570 stop_listener();
1571 }
1572
1573 void ust_potential_exec(void)
1574 {
1575 trace_mark(ust, potential_exec, MARK_NOARGS);
1576
1577 DBG("test");
1578
1579 keepalive();
1580 }
1581
1582 /* Notify ust that there was a fork. This needs to be called inside
1583 * the new process, anytime a process whose memory is not shared with
1584 * the parent is created. If this function is not called, the events
1585 * of the new process will not be collected.
1586 *
1587 * Signals should be disabled before the fork and reenabled only after
1588 * this call in order to guarantee tracing is not started before ust_fork()
1589 * sanitizes the new process.
1590 */
1591
1592 static void ust_fork(void)
1593 {
1594 struct ustcomm_sock *sock, *sock_tmp;
1595 struct ust_trace *trace, *trace_tmp;
1596 int result;
1597
1598 /* FIXME: technically, the locks could have been taken before the fork */
1599 DBG("ust: forking");
1600
1601 /* Get the pid of the new process */
1602 processpid = getpid();
1603
1604 /*
1605 * FIXME: This could be prettier, we loop over the list twice and
1606 * following good locking practice should lock around the loop
1607 */
1608 cds_list_for_each_entry_safe(trace, trace_tmp, &ltt_traces.head, list) {
1609 ltt_trace_stop(trace->trace_name);
1610 }
1611
1612 /* Delete all active connections, but leave them in the epoll set */
1613 cds_list_for_each_entry_safe(sock, sock_tmp, &ust_socks, list) {
1614 ustcomm_del_sock(sock, 1);
1615 }
1616
1617 /*
1618 * FIXME: This could be prettier, we loop over the list twice and
1619 * following good locking practice should lock around the loop
1620 */
1621 cds_list_for_each_entry_safe(trace, trace_tmp, &ltt_traces.head, list) {
1622 ltt_trace_destroy(trace->trace_name, 1);
1623 }
1624
1625 /* Clean up the listener socket and epoll, keeping the socket file */
1626 if (listen_sock) {
1627 ustcomm_del_named_sock(listen_sock, 1);
1628 listen_sock = NULL;
1629 }
1630 close(epoll_fd);
1631
1632 /* Re-start the launch sequence */
1633 CMM_STORE_SHARED(buffers_to_export, 0);
1634 have_listener = 0;
1635
1636 /* Set up epoll */
1637 epoll_fd = epoll_create(MAX_EVENTS);
1638 if (epoll_fd == -1) {
1639 ERR("epoll_create failed, tracing shutting down");
1640 return;
1641 }
1642
1643 /* Create the socket */
1644 listen_sock = init_app_socket(epoll_fd);
1645 if (!listen_sock) {
1646 ERR("failed to create application socket,"
1647 " tracing shutting down");
1648 return;
1649 }
1650 create_listener();
1651 ltt_trace_setup("auto");
1652 result = ltt_trace_set_type("auto", "ustrelay");
1653 if (result < 0) {
1654 ERR("ltt_trace_set_type failed");
1655 return;
1656 }
1657
1658 ltt_trace_alloc("auto");
1659 ltt_trace_start("auto");
1660 inform_consumer_daemon("auto");
1661 }
1662
1663 void ust_before_fork(ust_fork_info_t *fork_info)
1664 {
1665 /* Disable signals. This is to avoid that the child
1666 * intervenes before it is properly setup for tracing. It is
1667 * safer to disable all signals, because then we know we are not
1668 * breaking anything by restoring the original mask.
1669 */
1670 sigset_t all_sigs;
1671 int result;
1672
1673 /* FIXME:
1674 - only do this if tracing is active
1675 */
1676
1677 /* Disable signals */
1678 sigfillset(&all_sigs);
1679 result = sigprocmask(SIG_BLOCK, &all_sigs, &fork_info->orig_sigs);
1680 if (result == -1) {
1681 PERROR("sigprocmask");
1682 return;
1683 }
1684
1685 /*
1686 * Take the fork lock to make sure we are not in the middle of
1687 * something in the listener thread.
1688 */
1689 pthread_mutex_lock(&listener_thread_data_mutex);
1690 /*
1691 * Hold listen_sock_mutex to protect from listen_sock teardown.
1692 */
1693 pthread_mutex_lock(&listen_sock_mutex);
1694 rcu_bp_before_fork();
1695 }
1696
1697 /* Don't call this function directly in a traced program */
1698 static void ust_after_fork_common(ust_fork_info_t *fork_info)
1699 {
1700 int result;
1701
1702 pthread_mutex_unlock(&listen_sock_mutex);
1703 pthread_mutex_unlock(&listener_thread_data_mutex);
1704
1705 /* Restore signals */
1706 result = sigprocmask(SIG_SETMASK, &fork_info->orig_sigs, NULL);
1707 if (result == -1) {
1708 PERROR("sigprocmask");
1709 return;
1710 }
1711 }
1712
1713 void ust_after_fork_parent(ust_fork_info_t *fork_info)
1714 {
1715 rcu_bp_after_fork_parent();
1716 /* Release mutexes and reenable signals */
1717 ust_after_fork_common(fork_info);
1718 }
1719
1720 void ust_after_fork_child(ust_fork_info_t *fork_info)
1721 {
1722 /* Release urcu mutexes */
1723 rcu_bp_after_fork_child();
1724
1725 /* Sanitize the child */
1726 ust_fork();
1727
1728 /* Then release mutexes and reenable signals */
1729 ust_after_fork_common(fork_info);
1730 }
1731
This page took 0.081292 seconds and 5 git commands to generate.