Tracepoint and TRACEPOINT_EVENT API cleanup
[ust.git] / libust / tracectl.c
1 /* Copyright (C) 2009 Pierre-Marc Fournier
2 *
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public
5 * License as published by the Free Software Foundation; either
6 * version 2.1 of the License, or (at your option) any later version.
7 *
8 * This library is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
12 *
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
16 */
17
18 /* This file contains the implementation of the UST listener thread, which
19 * receives trace control commands. It also coordinates the initialization of
20 * libust.
21 */
22
23 #define _GNU_SOURCE
24 #define _LGPL_SOURCE
25 #include <stdio.h>
26 #include <stdlib.h>
27 #include <stdint.h>
28 #include <pthread.h>
29 #include <signal.h>
30 #include <sys/epoll.h>
31 #include <sys/time.h>
32 #include <sys/types.h>
33 #include <sys/socket.h>
34 #include <fcntl.h>
35 #include <poll.h>
36 #include <regex.h>
37 #include <urcu/uatomic_arch.h>
38 #include <urcu/list.h>
39
40 #include <ust/marker.h>
41 #include <ust/tracepoint.h>
42 #include <ust/tracepoint-internal.h>
43 #include <ust/tracectl.h>
44 #include <ust/clock.h>
45 #include "tracer.h"
46 #include "usterr_signal_safe.h"
47 #include "ustcomm.h"
48 #include "buffers.h"
49 #include "marker-control.h"
50
51 /* This should only be accessed by the constructor, before the creation
52 * of the listener, and then only by the listener.
53 */
54 s64 pidunique = -1LL;
55
56 /* The process pid is used to detect a non-traceable fork
57 * and allow the non-traceable fork to be ignored
58 * by destructor sequences in libust
59 */
60 static pid_t processpid = 0;
61
62 static struct ustcomm_header _receive_header;
63 static struct ustcomm_header *receive_header = &_receive_header;
64 static char receive_buffer[USTCOMM_BUFFER_SIZE];
65 static char send_buffer[USTCOMM_BUFFER_SIZE];
66
67 static int epoll_fd;
68
69 /*
70 * Listener thread data vs fork() protection mechanism. Ensures that no listener
71 * thread mutexes and data structures are being concurrently modified or held by
72 * other threads when fork() is executed.
73 */
74 static pthread_mutex_t listener_thread_data_mutex = PTHREAD_MUTEX_INITIALIZER;
75
76 /* Mutex protecting listen_sock. Nests inside listener_thread_data_mutex. */
77 static pthread_mutex_t listen_sock_mutex = PTHREAD_MUTEX_INITIALIZER;
78 static struct ustcomm_sock *listen_sock;
79
80 extern struct chan_info_struct chan_infos[];
81
82 static struct cds_list_head ust_socks = CDS_LIST_HEAD_INIT(ust_socks);
83
84 /* volatile because shared between the listener and the main thread */
85 int buffers_to_export = 0;
86
87 int ust_clock_source;
88
89 static long long make_pidunique(void)
90 {
91 s64 retval;
92 struct timeval tv;
93
94 gettimeofday(&tv, NULL);
95
96 retval = tv.tv_sec;
97 retval <<= 32;
98 retval |= tv.tv_usec;
99
100 return retval;
101 }
102
103 static void print_ust_marker(FILE *fp)
104 {
105 struct ust_marker_iter iter;
106
107 lock_ust_marker();
108 ust_marker_iter_reset(&iter);
109 ust_marker_iter_start(&iter);
110
111 while (iter.ust_marker) {
112 fprintf(fp, "ust_marker: %s/%s %d \"%s\" %p\n",
113 (*iter.ust_marker)->channel,
114 (*iter.ust_marker)->name,
115 (int)(*iter.ust_marker)->state,
116 (*iter.ust_marker)->format,
117 NULL); /*
118 * location is null for now, will be added
119 * to a different table.
120 */
121 ust_marker_iter_next(&iter);
122 }
123 unlock_ust_marker();
124 }
125
126 static void print_trace_events(FILE *fp)
127 {
128 struct trace_event_iter iter;
129
130 lock_trace_events();
131 trace_event_iter_reset(&iter);
132 trace_event_iter_start(&iter);
133
134 while (iter.trace_event) {
135 fprintf(fp, "trace_event: %s\n", (*iter.trace_event)->name);
136 trace_event_iter_next(&iter);
137 }
138 unlock_trace_events();
139 }
140
141 static int connect_ustconsumer(void)
142 {
143 int result, fd;
144 char default_daemon_path[] = SOCK_DIR "/ustconsumer";
145 char *explicit_daemon_path, *daemon_path;
146
147 explicit_daemon_path = getenv("UST_DAEMON_SOCKET");
148 if (explicit_daemon_path) {
149 daemon_path = explicit_daemon_path;
150 } else {
151 daemon_path = default_daemon_path;
152 }
153
154 DBG("Connecting to daemon_path %s", daemon_path);
155
156 result = ustcomm_connect_path(daemon_path, &fd);
157 if (result < 0) {
158 WARN("connect_ustconsumer failed, daemon_path: %s",
159 daemon_path);
160 return result;
161 }
162
163 return fd;
164 }
165
166
167 static void request_buffer_consumer(int sock,
168 const char *trace,
169 const char *channel,
170 int cpu)
171 {
172 struct ustcomm_header send_header, recv_header;
173 struct ustcomm_buffer_info buf_inf;
174 int result = 0;
175
176 result = ustcomm_pack_buffer_info(&send_header,
177 &buf_inf,
178 trace,
179 channel,
180 cpu);
181
182 if (result < 0) {
183 ERR("failed to pack buffer info message %s_%d",
184 channel, cpu);
185 return;
186 }
187
188 buf_inf.pid = getpid();
189 send_header.command = CONSUME_BUFFER;
190
191 result = ustcomm_req(sock, &send_header, (char *) &buf_inf,
192 &recv_header, NULL);
193 if (result <= 0) {
194 PERROR("request for buffer consumer failed, is the daemon online?");
195 }
196
197 return;
198 }
199
200 /* Ask the daemon to collect a trace called trace_name and being
201 * produced by this pid.
202 *
203 * The trace must be at least allocated. (It can also be started.)
204 * This is because _ltt_trace_find is used.
205 */
206
207 static void inform_consumer_daemon(const char *trace_name)
208 {
209 int sock, i,j;
210 struct ust_trace *trace;
211 const char *ch_name;
212
213 sock = connect_ustconsumer();
214 if (sock < 0) {
215 return;
216 }
217
218 DBG("Connected to ustconsumer");
219
220 ltt_lock_traces();
221
222 trace = _ltt_trace_find(trace_name);
223 if (trace == NULL) {
224 WARN("inform_consumer_daemon: could not find trace \"%s\"; it is probably already destroyed", trace_name);
225 goto unlock_traces;
226 }
227
228 for (i=0; i < trace->nr_channels; i++) {
229 if (trace->channels[i].request_collection) {
230 /* iterate on all cpus */
231 for (j=0; j<trace->channels[i].n_cpus; j++) {
232 ch_name = trace->channels[i].channel_name;
233 request_buffer_consumer(sock, trace_name,
234 ch_name, j);
235 CMM_STORE_SHARED(buffers_to_export,
236 CMM_LOAD_SHARED(buffers_to_export)+1);
237 }
238 }
239 }
240
241 unlock_traces:
242 ltt_unlock_traces();
243
244 close(sock);
245 }
246
247 static struct ust_channel *find_channel(const char *ch_name,
248 struct ust_trace *trace)
249 {
250 int i;
251
252 for (i=0; i<trace->nr_channels; i++) {
253 if (!strcmp(trace->channels[i].channel_name, ch_name)) {
254 return &trace->channels[i];
255 }
256 }
257
258 return NULL;
259 }
260
261 static int get_buffer_shmid_pipe_fd(const char *trace_name, const char *ch_name,
262 int ch_cpu,
263 int *buf_shmid,
264 int *buf_struct_shmid,
265 int *buf_pipe_fd)
266 {
267 struct ust_trace *trace;
268 struct ust_channel *channel;
269 struct ust_buffer *buf;
270
271 DBG("get_buffer_shmid_pipe_fd");
272
273 ltt_lock_traces();
274 trace = _ltt_trace_find(trace_name);
275 ltt_unlock_traces();
276
277 if (trace == NULL) {
278 ERR("cannot find trace!");
279 return -ENODATA;
280 }
281
282 channel = find_channel(ch_name, trace);
283 if (!channel) {
284 ERR("cannot find channel %s!", ch_name);
285 return -ENODATA;
286 }
287
288 buf = channel->buf[ch_cpu];
289
290 *buf_shmid = buf->shmid;
291 *buf_struct_shmid = channel->buf_struct_shmids[ch_cpu];
292 *buf_pipe_fd = buf->data_ready_fd_read;
293
294 return 0;
295 }
296
297 static int get_subbuf_num_size(const char *trace_name, const char *ch_name,
298 int *num, int *size)
299 {
300 struct ust_trace *trace;
301 struct ust_channel *channel;
302
303 DBG("get_subbuf_size");
304
305 ltt_lock_traces();
306 trace = _ltt_trace_find(trace_name);
307 ltt_unlock_traces();
308
309 if (!trace) {
310 ERR("cannot find trace!");
311 return -ENODATA;
312 }
313
314 channel = find_channel(ch_name, trace);
315 if (!channel) {
316 ERR("unable to find channel");
317 return -ENODATA;
318 }
319
320 *num = channel->subbuf_cnt;
321 *size = channel->subbuf_size;
322
323 return 0;
324 }
325
326 /* Return the power of two which is equal or higher to v */
327
328 static unsigned int pow2_higher_or_eq(unsigned int v)
329 {
330 int hb = fls(v);
331 int retval = 1<<(hb-1);
332
333 if (v-retval == 0)
334 return retval;
335 else
336 return retval<<1;
337 }
338
339 static int set_subbuf_size(const char *trace_name, const char *ch_name,
340 unsigned int size)
341 {
342 unsigned int power;
343 int retval = 0;
344 struct ust_trace *trace;
345 struct ust_channel *channel;
346
347 DBG("set_subbuf_size");
348
349 power = pow2_higher_or_eq(size);
350 power = max_t(unsigned int, 2u, power);
351 if (power != size) {
352 WARN("using the next power of two for buffer size = %u\n", power);
353 }
354
355 ltt_lock_traces();
356 trace = _ltt_trace_find_setup(trace_name);
357 if (trace == NULL) {
358 ERR("cannot find trace!");
359 retval = -ENODATA;
360 goto unlock_traces;
361 }
362
363 channel = find_channel(ch_name, trace);
364 if (!channel) {
365 ERR("unable to find channel");
366 retval = -ENODATA;
367 goto unlock_traces;
368 }
369
370 channel->subbuf_size = power;
371 DBG("the set_subbuf_size for the requested channel is %zu", channel->subbuf_size);
372
373 unlock_traces:
374 ltt_unlock_traces();
375
376 return retval;
377 }
378
379 static int set_subbuf_num(const char *trace_name, const char *ch_name,
380 unsigned int num)
381 {
382 struct ust_trace *trace;
383 struct ust_channel *channel;
384 int retval = 0;
385
386 DBG("set_subbuf_num");
387
388 if (num < 2) {
389 ERR("subbuffer count should be greater than 2");
390 return -EINVAL;
391 }
392
393 ltt_lock_traces();
394 trace = _ltt_trace_find_setup(trace_name);
395 if (trace == NULL) {
396 ERR("cannot find trace!");
397 retval = -ENODATA;
398 goto unlock_traces;
399 }
400
401 channel = find_channel(ch_name, trace);
402 if (!channel) {
403 ERR("unable to find channel");
404 retval = -ENODATA;
405 goto unlock_traces;
406 }
407
408 channel->subbuf_cnt = num;
409 DBG("the set_subbuf_cnt for the requested channel is %u", channel->subbuf_cnt);
410
411 unlock_traces:
412 ltt_unlock_traces();
413 return retval;
414 }
415
416 static int get_subbuffer(const char *trace_name, const char *ch_name,
417 int ch_cpu, long *consumed_old)
418 {
419 int retval = 0;
420 struct ust_trace *trace;
421 struct ust_channel *channel;
422 struct ust_buffer *buf;
423
424 DBG("get_subbuf");
425
426 *consumed_old = 0;
427
428 ltt_lock_traces();
429 trace = _ltt_trace_find(trace_name);
430
431 if (!trace) {
432 DBG("Cannot find trace. It was likely destroyed by the user.");
433 retval = -ENODATA;
434 goto unlock_traces;
435 }
436
437 channel = find_channel(ch_name, trace);
438 if (!channel) {
439 ERR("unable to find channel");
440 retval = -ENODATA;
441 goto unlock_traces;
442 }
443
444 buf = channel->buf[ch_cpu];
445
446 retval = ust_buffers_get_subbuf(buf, consumed_old);
447 if (retval < 0) {
448 WARN("missed buffer?");
449 }
450
451 unlock_traces:
452 ltt_unlock_traces();
453
454 return retval;
455 }
456
457
458 static int notify_buffer_mapped(const char *trace_name,
459 const char *ch_name,
460 int ch_cpu)
461 {
462 int retval = 0;
463 struct ust_trace *trace;
464 struct ust_channel *channel;
465 struct ust_buffer *buf;
466
467 DBG("get_buffer_fd");
468
469 ltt_lock_traces();
470 trace = _ltt_trace_find(trace_name);
471
472 if (!trace) {
473 retval = -ENODATA;
474 DBG("Cannot find trace. It was likely destroyed by the user.");
475 goto unlock_traces;
476 }
477
478 channel = find_channel(ch_name, trace);
479 if (!channel) {
480 retval = -ENODATA;
481 ERR("unable to find channel");
482 goto unlock_traces;
483 }
484
485 buf = channel->buf[ch_cpu];
486
487 /* Being here is the proof the daemon has mapped the buffer in its
488 * memory. We may now decrement buffers_to_export.
489 */
490 if (uatomic_read(&buf->consumed) == 0) {
491 DBG("decrementing buffers_to_export");
492 CMM_STORE_SHARED(buffers_to_export, CMM_LOAD_SHARED(buffers_to_export)-1);
493 }
494
495 unlock_traces:
496 ltt_unlock_traces();
497
498 return retval;
499 }
500
501 static int put_subbuffer(const char *trace_name, const char *ch_name,
502 int ch_cpu, long consumed_old)
503 {
504 int retval = 0;
505 struct ust_trace *trace;
506 struct ust_channel *channel;
507 struct ust_buffer *buf;
508
509 DBG("put_subbuf");
510
511 ltt_lock_traces();
512 trace = _ltt_trace_find(trace_name);
513
514 if (!trace) {
515 retval = -ENODATA;
516 DBG("Cannot find trace. It was likely destroyed by the user.");
517 goto unlock_traces;
518 }
519
520 channel = find_channel(ch_name, trace);
521 if (!channel) {
522 retval = -ENODATA;
523 ERR("unable to find channel");
524 goto unlock_traces;
525 }
526
527 buf = channel->buf[ch_cpu];
528
529 retval = ust_buffers_put_subbuf(buf, consumed_old);
530 if (retval < 0) {
531 WARN("ust_buffers_put_subbuf: error (subbuf=%s_%d)",
532 ch_name, ch_cpu);
533 } else {
534 DBG("ust_buffers_put_subbuf: success (subbuf=%s_%d)",
535 ch_name, ch_cpu);
536 }
537
538 unlock_traces:
539 ltt_unlock_traces();
540
541 return retval;
542 }
543
544 static void release_listener_mutex(void *ptr)
545 {
546 pthread_mutex_unlock(&listener_thread_data_mutex);
547 }
548
549 static void listener_cleanup(void *ptr)
550 {
551 pthread_mutex_lock(&listen_sock_mutex);
552 if (listen_sock) {
553 ustcomm_del_named_sock(listen_sock, 0);
554 listen_sock = NULL;
555 }
556 pthread_mutex_unlock(&listen_sock_mutex);
557 }
558
559 static int force_subbuf_switch(const char *trace_name)
560 {
561 struct ust_trace *trace;
562 int i, j, retval = 0;
563
564 ltt_lock_traces();
565 trace = _ltt_trace_find(trace_name);
566 if (!trace) {
567 retval = -ENODATA;
568 DBG("Cannot find trace. It was likely destroyed by the user.");
569 goto unlock_traces;
570 }
571
572 for (i = 0; i < trace->nr_channels; i++) {
573 for (j = 0; j < trace->channels[i].n_cpus; j++) {
574 ltt_force_switch(trace->channels[i].buf[j],
575 FORCE_FLUSH);
576 }
577 }
578
579 unlock_traces:
580 ltt_unlock_traces();
581
582 return retval;
583 }
584
585 static int process_trace_cmd(int command, char *trace_name)
586 {
587 int result;
588 char trace_type[] = "ustrelay";
589
590 switch(command) {
591 case START:
592 /* start is an operation that setups the trace, allocates it and starts it */
593 result = ltt_trace_setup(trace_name);
594 if (result < 0) {
595 ERR("ltt_trace_setup failed");
596 return result;
597 }
598
599 result = ltt_trace_set_type(trace_name, trace_type);
600 if (result < 0) {
601 ERR("ltt_trace_set_type failed");
602 return result;
603 }
604
605 result = ltt_trace_alloc(trace_name);
606 if (result < 0) {
607 ERR("ltt_trace_alloc failed");
608 return result;
609 }
610
611 inform_consumer_daemon(trace_name);
612
613 result = ltt_trace_start(trace_name);
614 if (result < 0) {
615 ERR("ltt_trace_start failed");
616 return result;
617 }
618
619 return 0;
620 case SETUP_TRACE:
621 DBG("trace setup");
622
623 result = ltt_trace_setup(trace_name);
624 if (result < 0) {
625 ERR("ltt_trace_setup failed");
626 return result;
627 }
628
629 result = ltt_trace_set_type(trace_name, trace_type);
630 if (result < 0) {
631 ERR("ltt_trace_set_type failed");
632 return result;
633 }
634
635 return 0;
636 case ALLOC_TRACE:
637 DBG("trace alloc");
638
639 result = ltt_trace_alloc(trace_name);
640 if (result < 0) {
641 ERR("ltt_trace_alloc failed");
642 return result;
643 }
644 inform_consumer_daemon(trace_name);
645
646 return 0;
647
648 case CREATE_TRACE:
649 DBG("trace create");
650
651 result = ltt_trace_setup(trace_name);
652 if (result < 0) {
653 ERR("ltt_trace_setup failed");
654 return result;
655 }
656
657 result = ltt_trace_set_type(trace_name, trace_type);
658 if (result < 0) {
659 ERR("ltt_trace_set_type failed");
660 return result;
661 }
662
663 return 0;
664 case START_TRACE:
665 DBG("trace start");
666
667 result = ltt_trace_alloc(trace_name);
668 if (result < 0) {
669 ERR("ltt_trace_alloc failed");
670 return result;
671 }
672 if (!result) {
673 inform_consumer_daemon(trace_name);
674 }
675
676 result = ltt_trace_start(trace_name);
677 if (result < 0) {
678 ERR("ltt_trace_start failed");
679 return result;
680 }
681
682 return 0;
683 case STOP_TRACE:
684 DBG("trace stop");
685
686 result = ltt_trace_stop(trace_name);
687 if (result < 0) {
688 ERR("ltt_trace_stop failed");
689 return result;
690 }
691
692 return 0;
693 case DESTROY_TRACE:
694 DBG("trace destroy");
695
696 result = ltt_trace_destroy(trace_name, 0);
697 if (result < 0) {
698 ERR("ltt_trace_destroy failed");
699 return result;
700 }
701 return 0;
702 case FORCE_SUBBUF_SWITCH:
703 DBG("force switch");
704
705 result = force_subbuf_switch(trace_name);
706 if (result < 0) {
707 ERR("force_subbuf_switch failed");
708 return result;
709 }
710 return 0;
711 }
712
713 return 0;
714 }
715
716
717 static void process_channel_cmd(int sock, int command,
718 struct ustcomm_channel_info *ch_inf)
719 {
720 struct ustcomm_header _reply_header;
721 struct ustcomm_header *reply_header = &_reply_header;
722 struct ustcomm_channel_info *reply_msg =
723 (struct ustcomm_channel_info *)send_buffer;
724 int result, offset = 0, num, size;
725
726 memset(reply_header, 0, sizeof(*reply_header));
727
728 switch (command) {
729 case GET_SUBBUF_NUM_SIZE:
730 result = get_subbuf_num_size(ch_inf->trace,
731 ch_inf->channel,
732 &num, &size);
733 if (result < 0) {
734 reply_header->result = result;
735 break;
736 }
737
738 reply_msg->channel = USTCOMM_POISON_PTR;
739 reply_msg->subbuf_num = num;
740 reply_msg->subbuf_size = size;
741
742
743 reply_header->size = COMPUTE_MSG_SIZE(reply_msg, offset);
744
745 break;
746 case SET_SUBBUF_NUM:
747 reply_header->result = set_subbuf_num(ch_inf->trace,
748 ch_inf->channel,
749 ch_inf->subbuf_num);
750
751 break;
752 case SET_SUBBUF_SIZE:
753 reply_header->result = set_subbuf_size(ch_inf->trace,
754 ch_inf->channel,
755 ch_inf->subbuf_size);
756
757
758 break;
759 }
760 if (ustcomm_send(sock, reply_header, (char *)reply_msg) < 0) {
761 ERR("ustcomm_send failed");
762 }
763 }
764
765 static void process_buffer_cmd(int sock, int command,
766 struct ustcomm_buffer_info *buf_inf)
767 {
768 struct ustcomm_header _reply_header;
769 struct ustcomm_header *reply_header = &_reply_header;
770 struct ustcomm_buffer_info *reply_msg =
771 (struct ustcomm_buffer_info *)send_buffer;
772 int result, offset = 0, buf_shmid, buf_struct_shmid, buf_pipe_fd;
773 long consumed_old;
774
775 memset(reply_header, 0, sizeof(*reply_header));
776
777 switch (command) {
778 case GET_BUF_SHMID_PIPE_FD:
779 result = get_buffer_shmid_pipe_fd(buf_inf->trace,
780 buf_inf->channel,
781 buf_inf->ch_cpu,
782 &buf_shmid,
783 &buf_struct_shmid,
784 &buf_pipe_fd);
785 if (result < 0) {
786 reply_header->result = result;
787 break;
788 }
789
790 reply_msg->channel = USTCOMM_POISON_PTR;
791 reply_msg->buf_shmid = buf_shmid;
792 reply_msg->buf_struct_shmid = buf_struct_shmid;
793
794 reply_header->size = COMPUTE_MSG_SIZE(reply_msg, offset);
795 reply_header->fd_included = 1;
796
797 if (ustcomm_send_fd(sock, reply_header, (char *)reply_msg,
798 &buf_pipe_fd) < 0) {
799 ERR("ustcomm_send failed");
800 }
801 return;
802
803 case NOTIFY_BUF_MAPPED:
804 reply_header->result =
805 notify_buffer_mapped(buf_inf->trace,
806 buf_inf->channel,
807 buf_inf->ch_cpu);
808 break;
809 case GET_SUBBUFFER:
810 result = get_subbuffer(buf_inf->trace, buf_inf->channel,
811 buf_inf->ch_cpu, &consumed_old);
812 if (result < 0) {
813 reply_header->result = result;
814 break;
815 }
816
817 reply_msg->channel = USTCOMM_POISON_PTR;
818 reply_msg->consumed_old = consumed_old;
819
820 reply_header->size = COMPUTE_MSG_SIZE(reply_msg, offset);
821
822 break;
823 case PUT_SUBBUFFER:
824 result = put_subbuffer(buf_inf->trace, buf_inf->channel,
825 buf_inf->ch_cpu,
826 buf_inf->consumed_old);
827 reply_header->result = result;
828
829 break;
830 }
831
832 if (ustcomm_send(sock, reply_header, (char *)reply_msg) < 0) {
833 ERR("ustcomm_send failed");
834 }
835
836 }
837
838 static void process_ust_marker_cmd(int sock, int command,
839 struct ustcomm_ust_marker_info *ust_marker_inf)
840 {
841 struct ustcomm_header _reply_header;
842 struct ustcomm_header *reply_header = &_reply_header;
843 int result = 0;
844
845 memset(reply_header, 0, sizeof(*reply_header));
846
847 switch(command) {
848 case ENABLE_MARKER:
849
850 result = ltt_ust_marker_connect(ust_marker_inf->channel,
851 ust_marker_inf->ust_marker,
852 "default");
853 if (result < 0) {
854 WARN("could not enable ust_marker; channel=%s,"
855 " name=%s",
856 ust_marker_inf->channel,
857 ust_marker_inf->ust_marker);
858
859 }
860 break;
861 case DISABLE_MARKER:
862 result = ltt_ust_marker_disconnect(ust_marker_inf->channel,
863 ust_marker_inf->ust_marker,
864 "default");
865 if (result < 0) {
866 WARN("could not disable ust_marker; channel=%s,"
867 " name=%s",
868 ust_marker_inf->channel,
869 ust_marker_inf->ust_marker);
870 }
871 break;
872 }
873
874 reply_header->result = result;
875
876 if (ustcomm_send(sock, reply_header, NULL) < 0) {
877 ERR("ustcomm_send failed");
878 }
879
880 }
881 static void process_client_cmd(struct ustcomm_header *recv_header,
882 char *recv_buf, int sock)
883 {
884 int result;
885 struct ustcomm_header _reply_header;
886 struct ustcomm_header *reply_header = &_reply_header;
887 char *send_buf = send_buffer;
888
889 memset(reply_header, 0, sizeof(*reply_header));
890 memset(send_buf, 0, sizeof(send_buffer));
891
892 switch(recv_header->command) {
893 case GET_SUBBUF_NUM_SIZE:
894 case SET_SUBBUF_NUM:
895 case SET_SUBBUF_SIZE:
896 {
897 struct ustcomm_channel_info *ch_inf;
898 ch_inf = (struct ustcomm_channel_info *)recv_buf;
899 result = ustcomm_unpack_channel_info(ch_inf);
900 if (result < 0) {
901 ERR("couldn't unpack channel info");
902 reply_header->result = -EINVAL;
903 goto send_response;
904 }
905 process_channel_cmd(sock, recv_header->command, ch_inf);
906 return;
907 }
908 case GET_BUF_SHMID_PIPE_FD:
909 case NOTIFY_BUF_MAPPED:
910 case GET_SUBBUFFER:
911 case PUT_SUBBUFFER:
912 {
913 struct ustcomm_buffer_info *buf_inf;
914 buf_inf = (struct ustcomm_buffer_info *)recv_buf;
915 result = ustcomm_unpack_buffer_info(buf_inf);
916 if (result < 0) {
917 ERR("couldn't unpack buffer info");
918 reply_header->result = -EINVAL;
919 goto send_response;
920 }
921 process_buffer_cmd(sock, recv_header->command, buf_inf);
922 return;
923 }
924 case ENABLE_MARKER:
925 case DISABLE_MARKER:
926 {
927 struct ustcomm_ust_marker_info *ust_marker_inf;
928 ust_marker_inf = (struct ustcomm_ust_marker_info *)recv_buf;
929 result = ustcomm_unpack_ust_marker_info(ust_marker_inf);
930 if (result < 0) {
931 ERR("couldn't unpack ust_marker info");
932 reply_header->result = -EINVAL;
933 goto send_response;
934 }
935 process_ust_marker_cmd(sock, recv_header->command, ust_marker_inf);
936 return;
937 }
938 case LIST_MARKERS:
939 {
940 char *ptr;
941 size_t size;
942 FILE *fp;
943
944 fp = open_memstream(&ptr, &size);
945 if (fp == NULL) {
946 ERR("opening memstream failed");
947 return;
948 }
949 print_ust_marker(fp);
950 fclose(fp);
951
952 reply_header->size = size + 1; /* Include final \0 */
953
954 result = ustcomm_send(sock, reply_header, ptr);
955
956 free(ptr);
957
958 if (result < 0) {
959 PERROR("failed to send ust_marker list");
960 }
961
962 break;
963 }
964 case LIST_TRACE_EVENTS:
965 {
966 char *ptr;
967 size_t size;
968 FILE *fp;
969
970 fp = open_memstream(&ptr, &size);
971 if (fp == NULL) {
972 ERR("opening memstream failed");
973 return;
974 }
975 print_trace_events(fp);
976 fclose(fp);
977
978 reply_header->size = size + 1; /* Include final \0 */
979
980 result = ustcomm_send(sock, reply_header, ptr);
981
982 free(ptr);
983
984 if (result < 0) {
985 ERR("list_trace_events failed");
986 return;
987 }
988
989 break;
990 }
991 case LOAD_PROBE_LIB:
992 {
993 char *libfile;
994
995 /* FIXME: No functionality at all... */
996 libfile = recv_buf;
997
998 DBG("load_probe_lib loading %s", libfile);
999
1000 break;
1001 }
1002 case GET_PIDUNIQUE:
1003 {
1004 struct ustcomm_pidunique *pid_msg;
1005 pid_msg = (struct ustcomm_pidunique *)send_buf;
1006
1007 pid_msg->pidunique = pidunique;
1008 reply_header->size = sizeof(pid_msg);
1009
1010 goto send_response;
1011
1012 }
1013 case GET_SOCK_PATH:
1014 {
1015 struct ustcomm_single_field *sock_msg;
1016 char *sock_path_env;
1017
1018 sock_msg = (struct ustcomm_single_field *)send_buf;
1019
1020 sock_path_env = getenv("UST_DAEMON_SOCKET");
1021
1022 if (!sock_path_env) {
1023 result = ustcomm_pack_single_field(reply_header,
1024 sock_msg,
1025 SOCK_DIR "/ustconsumer");
1026
1027 } else {
1028 result = ustcomm_pack_single_field(reply_header,
1029 sock_msg,
1030 sock_path_env);
1031 }
1032 reply_header->result = result;
1033
1034 goto send_response;
1035 }
1036 case SET_SOCK_PATH:
1037 {
1038 struct ustcomm_single_field *sock_msg;
1039 sock_msg = (struct ustcomm_single_field *)recv_buf;
1040 result = ustcomm_unpack_single_field(sock_msg);
1041 if (result < 0) {
1042 reply_header->result = -EINVAL;
1043 goto send_response;
1044 }
1045
1046 reply_header->result = setenv("UST_DAEMON_SOCKET",
1047 sock_msg->field, 1);
1048
1049 goto send_response;
1050 }
1051 case START:
1052 case SETUP_TRACE:
1053 case ALLOC_TRACE:
1054 case CREATE_TRACE:
1055 case START_TRACE:
1056 case STOP_TRACE:
1057 case DESTROY_TRACE:
1058 case FORCE_SUBBUF_SWITCH:
1059 {
1060 struct ustcomm_single_field *trace_inf =
1061 (struct ustcomm_single_field *)recv_buf;
1062
1063 result = ustcomm_unpack_single_field(trace_inf);
1064 if (result < 0) {
1065 ERR("couldn't unpack trace info");
1066 reply_header->result = -EINVAL;
1067 goto send_response;
1068 }
1069
1070 reply_header->result =
1071 process_trace_cmd(recv_header->command,
1072 trace_inf->field);
1073 goto send_response;
1074
1075 }
1076 default:
1077 reply_header->result = -EINVAL;
1078
1079 goto send_response;
1080 }
1081
1082 return;
1083
1084 send_response:
1085 ustcomm_send(sock, reply_header, send_buf);
1086 }
1087
1088 #define MAX_EVENTS 10
1089
1090 void *listener_main(void *p)
1091 {
1092 struct ustcomm_sock *epoll_sock;
1093 struct epoll_event events[MAX_EVENTS];
1094 struct sockaddr addr;
1095 int accept_fd, nfds, result, i, addr_size;
1096
1097 DBG("LISTENER");
1098
1099 pthread_cleanup_push(listener_cleanup, NULL);
1100
1101 for(;;) {
1102 nfds = epoll_wait(epoll_fd, events, MAX_EVENTS, -1);
1103 if (nfds == -1) {
1104 PERROR("listener_main: epoll_wait failed");
1105 continue;
1106 }
1107
1108 for (i = 0; i < nfds; i++) {
1109 pthread_mutex_lock(&listener_thread_data_mutex);
1110 pthread_cleanup_push(release_listener_mutex, NULL);
1111 epoll_sock = (struct ustcomm_sock *)events[i].data.ptr;
1112 if (epoll_sock == listen_sock) {
1113 addr_size = sizeof(struct sockaddr);
1114 accept_fd = accept(epoll_sock->fd,
1115 &addr,
1116 (socklen_t *)&addr_size);
1117 if (accept_fd == -1) {
1118 PERROR("listener_main: accept failed");
1119 continue;
1120 }
1121 ustcomm_init_sock(accept_fd, epoll_fd,
1122 &ust_socks);
1123 } else {
1124 memset(receive_header, 0,
1125 sizeof(*receive_header));
1126 memset(receive_buffer, 0,
1127 sizeof(receive_buffer));
1128 result = ustcomm_recv(epoll_sock->fd,
1129 receive_header,
1130 receive_buffer);
1131 if (result == 0) {
1132 ustcomm_del_sock(epoll_sock, 0);
1133 } else {
1134 process_client_cmd(receive_header,
1135 receive_buffer,
1136 epoll_sock->fd);
1137 }
1138 }
1139 pthread_cleanup_pop(1); /* release listener mutex */
1140 }
1141 }
1142
1143 pthread_cleanup_pop(1);
1144 }
1145
1146 /* These should only be accessed in the parent thread,
1147 * not the listener.
1148 */
1149 static volatile sig_atomic_t have_listener = 0;
1150 static pthread_t listener_thread;
1151
1152 void create_listener(void)
1153 {
1154 int result;
1155 sigset_t sig_all_blocked;
1156 sigset_t orig_parent_mask;
1157
1158 if (have_listener) {
1159 WARN("not creating listener because we already had one");
1160 return;
1161 }
1162
1163 /* A new thread created by pthread_create inherits the signal mask
1164 * from the parent. To avoid any signal being received by the
1165 * listener thread, we block all signals temporarily in the parent,
1166 * while we create the listener thread.
1167 */
1168
1169 sigfillset(&sig_all_blocked);
1170
1171 result = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_parent_mask);
1172 if (result) {
1173 PERROR("pthread_sigmask: %s", strerror(result));
1174 }
1175
1176 result = pthread_create(&listener_thread, NULL, listener_main, NULL);
1177 if (result == -1) {
1178 PERROR("pthread_create");
1179 }
1180
1181 /* Restore original signal mask in parent */
1182 result = pthread_sigmask(SIG_SETMASK, &orig_parent_mask, NULL);
1183 if (result) {
1184 PERROR("pthread_sigmask: %s", strerror(result));
1185 } else {
1186 have_listener = 1;
1187 }
1188 }
1189
1190 #define AUTOPROBE_DISABLED 0
1191 #define AUTOPROBE_ENABLE_ALL 1
1192 #define AUTOPROBE_ENABLE_REGEX 2
1193 static int autoprobe_method = AUTOPROBE_DISABLED;
1194 static regex_t autoprobe_regex;
1195
1196 static void auto_probe_connect(struct ust_marker *m)
1197 {
1198 int result;
1199
1200 char* concat_name = NULL;
1201 const char *probe_name = "default";
1202
1203 if (autoprobe_method == AUTOPROBE_DISABLED) {
1204 return;
1205 } else if (autoprobe_method == AUTOPROBE_ENABLE_REGEX) {
1206 result = asprintf(&concat_name, "%s/%s", m->channel, m->name);
1207 if (result == -1) {
1208 ERR("auto_probe_connect: asprintf failed (ust_marker %s/%s)",
1209 m->channel, m->name);
1210 return;
1211 }
1212 if (regexec(&autoprobe_regex, concat_name, 0, NULL, 0)) {
1213 free(concat_name);
1214 return;
1215 }
1216 free(concat_name);
1217 }
1218
1219 result = ltt_ust_marker_connect(m->channel, m->name, probe_name);
1220 if (result && result != -EEXIST)
1221 ERR("ltt_ust_marker_connect (ust_marker = %s/%s, errno = %d)", m->channel, m->name, -result);
1222
1223 DBG("auto connected ust_marker %s (addr: %p) %s to probe default", m->channel, m, m->name);
1224
1225 }
1226
1227 static struct ustcomm_sock * init_app_socket(int epoll_fd)
1228 {
1229 char *dir_name, *sock_name;
1230 int result;
1231 struct ustcomm_sock *sock = NULL;
1232 time_t mtime;
1233
1234 dir_name = ustcomm_user_sock_dir();
1235 if (!dir_name)
1236 return NULL;
1237
1238 mtime = ustcomm_pid_st_mtime(getpid());
1239 if (!mtime) {
1240 goto free_dir_name;
1241 }
1242
1243 result = asprintf(&sock_name, "%s/%d.%ld", dir_name,
1244 (int) getpid(), (long) mtime);
1245 if (result < 0) {
1246 ERR("string overflow allocating socket name, "
1247 "UST thread bailing");
1248 goto free_dir_name;
1249 }
1250
1251 result = ensure_dir_exists(dir_name, S_IRWXU);
1252 if (result == -1) {
1253 ERR("Unable to create socket directory %s, UST thread bailing",
1254 dir_name);
1255 goto free_sock_name;
1256 }
1257
1258 sock = ustcomm_init_named_socket(sock_name, epoll_fd);
1259 if (!sock) {
1260 ERR("Error initializing named socket (%s). Check that directory"
1261 "exists and that it is writable. UST thread bailing", sock_name);
1262 goto free_sock_name;
1263 }
1264
1265 free_sock_name:
1266 free(sock_name);
1267 free_dir_name:
1268 free(dir_name);
1269
1270 return sock;
1271 }
1272
1273 static void __attribute__((constructor)) init()
1274 {
1275 struct timespec ts;
1276 int result;
1277 char* autoprobe_val = NULL;
1278 char* subbuffer_size_val = NULL;
1279 char* subbuffer_count_val = NULL;
1280 unsigned int subbuffer_size;
1281 unsigned int subbuffer_count;
1282 unsigned int power;
1283
1284 /* Assign the pidunique, to be able to differentiate the processes with same
1285 * pid, (before and after an exec).
1286 */
1287 pidunique = make_pidunique();
1288 processpid = getpid();
1289
1290 DBG("Tracectl constructor");
1291
1292 /* Set up epoll */
1293 epoll_fd = epoll_create(MAX_EVENTS);
1294 if (epoll_fd == -1) {
1295 ERR("epoll_create failed, tracing shutting down");
1296 return;
1297 }
1298
1299 /* Create the socket */
1300 listen_sock = init_app_socket(epoll_fd);
1301 if (!listen_sock) {
1302 ERR("failed to create application socket,"
1303 " tracing shutting down");
1304 return;
1305 }
1306
1307 create_listener();
1308
1309 /* Get clock the clock source type */
1310
1311 /* Default clock source */
1312 ust_clock_source = CLOCK_TRACE;
1313 if (clock_gettime(ust_clock_source, &ts) != 0) {
1314 ust_clock_source = CLOCK_MONOTONIC;
1315 DBG("UST traces will not be synchronized with LTTng traces");
1316 }
1317
1318 autoprobe_val = getenv("UST_AUTOPROBE");
1319 if (autoprobe_val) {
1320 struct ust_marker_iter iter;
1321
1322 DBG("Autoprobe enabled.");
1323
1324 /* Ensure ust_marker are initialized */
1325 //init_ust_marker();
1326
1327 /* Ensure ust_marker control is initialized, for the probe */
1328 init_ust_marker_control();
1329
1330 /* first, set the callback that will connect the
1331 * probe on new ust_marker
1332 */
1333 if (autoprobe_val[0] == '/') {
1334 result = regcomp(&autoprobe_regex, autoprobe_val+1, 0);
1335 if (result) {
1336 char regexerr[150];
1337
1338 regerror(result, &autoprobe_regex, regexerr, sizeof(regexerr));
1339 ERR("cannot parse regex %s (%s), will ignore UST_AUTOPROBE", autoprobe_val, regexerr);
1340 /* don't crash the application just for this */
1341 } else {
1342 autoprobe_method = AUTOPROBE_ENABLE_REGEX;
1343 }
1344 } else {
1345 /* just enable all instrumentation */
1346 autoprobe_method = AUTOPROBE_ENABLE_ALL;
1347 }
1348
1349 ust_marker_set_new_ust_marker_cb(auto_probe_connect);
1350
1351 /* Now, connect the probes that were already registered. */
1352 ust_marker_iter_reset(&iter);
1353 ust_marker_iter_start(&iter);
1354
1355 DBG("now iterating on ust_marker already registered");
1356 while (iter.ust_marker) {
1357 DBG("now iterating on ust_marker %s", (*iter.ust_marker)->name);
1358 auto_probe_connect(*iter.ust_marker);
1359 ust_marker_iter_next(&iter);
1360 }
1361 }
1362
1363 if (getenv("UST_OVERWRITE")) {
1364 int val = atoi(getenv("UST_OVERWRITE"));
1365 if (val == 0 || val == 1) {
1366 CMM_STORE_SHARED(ust_channels_overwrite_by_default, val);
1367 } else {
1368 WARN("invalid value for UST_OVERWRITE");
1369 }
1370 }
1371
1372 if (getenv("UST_AUTOCOLLECT")) {
1373 int val = atoi(getenv("UST_AUTOCOLLECT"));
1374 if (val == 0 || val == 1) {
1375 CMM_STORE_SHARED(ust_channels_request_collection_by_default, val);
1376 } else {
1377 WARN("invalid value for UST_AUTOCOLLECT");
1378 }
1379 }
1380
1381 subbuffer_size_val = getenv("UST_SUBBUF_SIZE");
1382 if (subbuffer_size_val) {
1383 sscanf(subbuffer_size_val, "%u", &subbuffer_size);
1384 power = pow2_higher_or_eq(subbuffer_size);
1385 if (power != subbuffer_size)
1386 WARN("using the next power of two for buffer size = %u\n", power);
1387 chan_infos[LTT_CHANNEL_UST].def_subbufsize = power;
1388 }
1389
1390 subbuffer_count_val = getenv("UST_SUBBUF_NUM");
1391 if (subbuffer_count_val) {
1392 sscanf(subbuffer_count_val, "%u", &subbuffer_count);
1393 if (subbuffer_count < 2)
1394 subbuffer_count = 2;
1395 chan_infos[LTT_CHANNEL_UST].def_subbufcount = subbuffer_count;
1396 }
1397
1398 if (getenv("UST_TRACE")) {
1399 char trace_name[] = "auto";
1400 char trace_type[] = "ustrelay";
1401
1402 DBG("starting early tracing");
1403
1404 /* Ensure ust_marker control is initialized */
1405 init_ust_marker_control();
1406
1407 /* Ensure ust_marker are initialized */
1408 init_ust_marker();
1409
1410 /* Ensure buffers are initialized, for the transport to be available.
1411 * We are about to set a trace type and it will fail without this.
1412 */
1413 init_ustrelay_transport();
1414
1415 /* FIXME: When starting early tracing (here), depending on the
1416 * order of constructors, it is very well possible some ust_marker
1417 * sections are not yet registered. Because of this, some
1418 * channels may not be registered. Yet, we are about to ask the
1419 * daemon to collect the channels. Channels which are not yet
1420 * registered will not be collected.
1421 *
1422 * Currently, in LTTng, there is no way to add a channel after
1423 * trace start. The reason for this is that it induces complex
1424 * concurrency issues on the trace structures, which can only
1425 * be resolved using RCU. This has not been done yet. As a
1426 * workaround, we are forcing the registration of the "ust"
1427 * channel here. This is the only channel (apart from metadata)
1428 * that can be reliably used in early tracing.
1429 *
1430 * Non-early tracing does not have this problem and can use
1431 * arbitrary channel names.
1432 */
1433 ltt_channels_register("ust");
1434
1435 result = ltt_trace_setup(trace_name);
1436 if (result < 0) {
1437 ERR("ltt_trace_setup failed");
1438 return;
1439 }
1440
1441 result = ltt_trace_set_type(trace_name, trace_type);
1442 if (result < 0) {
1443 ERR("ltt_trace_set_type failed");
1444 return;
1445 }
1446
1447 result = ltt_trace_alloc(trace_name);
1448 if (result < 0) {
1449 ERR("ltt_trace_alloc failed");
1450 return;
1451 }
1452
1453 result = ltt_trace_start(trace_name);
1454 if (result < 0) {
1455 ERR("ltt_trace_start failed");
1456 return;
1457 }
1458
1459 /* Do this after the trace is started in order to avoid creating confusion
1460 * if the trace fails to start. */
1461 inform_consumer_daemon(trace_name);
1462 }
1463
1464 return;
1465
1466 /* should decrementally destroy stuff if error */
1467
1468 }
1469
1470 /* This is only called if we terminate normally, not with an unhandled signal,
1471 * so we cannot rely on it. However, for now, LTTV requires that the header of
1472 * the last sub-buffer contain a valid end time for the trace. This is done
1473 * automatically only when the trace is properly stopped.
1474 *
1475 * If the traced program crashed, it is always possible to manually add the
1476 * right value in the header, or to open the trace in text mode.
1477 *
1478 * FIXME: Fix LTTV so it doesn't need this.
1479 */
1480
1481 static void destroy_traces(void)
1482 {
1483 int result;
1484
1485 /* if trace running, finish it */
1486
1487 DBG("destructor stopping traces");
1488
1489 result = ltt_trace_stop("auto");
1490 if (result == -1) {
1491 ERR("ltt_trace_stop error");
1492 }
1493
1494 result = ltt_trace_destroy("auto", 0);
1495 if (result == -1) {
1496 ERR("ltt_trace_destroy error");
1497 }
1498 }
1499
1500 static int trace_recording(void)
1501 {
1502 int retval = 0;
1503 struct ust_trace *trace;
1504
1505 ltt_lock_traces();
1506
1507 cds_list_for_each_entry(trace, &ltt_traces.head, list) {
1508 if (trace->active) {
1509 retval = 1;
1510 break;
1511 }
1512 }
1513
1514 ltt_unlock_traces();
1515
1516 return retval;
1517 }
1518
1519 int restarting_usleep(useconds_t usecs)
1520 {
1521 struct timespec tv;
1522 int result;
1523
1524 tv.tv_sec = 0;
1525 tv.tv_nsec = usecs * 1000;
1526
1527 do {
1528 result = nanosleep(&tv, &tv);
1529 } while (result == -1 && errno == EINTR);
1530
1531 return result;
1532 }
1533
1534 static void stop_listener(void)
1535 {
1536 int result;
1537
1538 if (!have_listener)
1539 return;
1540
1541 result = pthread_cancel(listener_thread);
1542 if (result != 0) {
1543 ERR("pthread_cancel: %s", strerror(result));
1544 }
1545 result = pthread_join(listener_thread, NULL);
1546 if (result != 0) {
1547 ERR("pthread_join: %s", strerror(result));
1548 }
1549 }
1550
1551 /* This destructor keeps the process alive for a few seconds in order
1552 * to leave time for ustconsumer to connect to its buffers. This is necessary
1553 * for programs whose execution is very short. It is also useful in all
1554 * programs when tracing is started close to the end of the program
1555 * execution.
1556 *
1557 * FIXME: For now, this only works for the first trace created in a
1558 * process.
1559 */
1560
1561 static void __attribute__((destructor)) keepalive()
1562 {
1563 if (processpid != getpid()) {
1564 return;
1565 }
1566
1567 if (trace_recording() && CMM_LOAD_SHARED(buffers_to_export)) {
1568 int total = 0;
1569 DBG("Keeping process alive for consumer daemon...");
1570 while (CMM_LOAD_SHARED(buffers_to_export)) {
1571 const int interv = 200000;
1572 restarting_usleep(interv);
1573 total += interv;
1574
1575 if (total >= 3000000) {
1576 WARN("non-consumed buffers remaining after wait limit; not waiting anymore");
1577 break;
1578 }
1579 }
1580 DBG("Finally dying...");
1581 }
1582
1583 destroy_traces();
1584
1585 /* Ask the listener to stop and clean up. */
1586 stop_listener();
1587 }
1588
1589 void ust_potential_exec(void)
1590 {
1591 ust_marker(potential_exec, UST_MARKER_NOARGS);
1592
1593 DBG("test");
1594
1595 keepalive();
1596 }
1597
1598 /* Notify ust that there was a fork. This needs to be called inside
1599 * the new process, anytime a process whose memory is not shared with
1600 * the parent is created. If this function is not called, the events
1601 * of the new process will not be collected.
1602 *
1603 * Signals should be disabled before the fork and reenabled only after
1604 * this call in order to guarantee tracing is not started before ust_fork()
1605 * sanitizes the new process.
1606 */
1607
1608 static void ust_fork(void)
1609 {
1610 struct ustcomm_sock *sock, *sock_tmp;
1611 struct ust_trace *trace, *trace_tmp;
1612 int result;
1613
1614 /* FIXME: technically, the locks could have been taken before the fork */
1615 DBG("ust: forking");
1616
1617 /* Get the pid of the new process */
1618 processpid = getpid();
1619
1620 /*
1621 * FIXME: This could be prettier, we loop over the list twice and
1622 * following good locking practice should lock around the loop
1623 */
1624 cds_list_for_each_entry_safe(trace, trace_tmp, &ltt_traces.head, list) {
1625 ltt_trace_stop(trace->trace_name);
1626 }
1627
1628 /* Delete all active connections, but leave them in the epoll set */
1629 cds_list_for_each_entry_safe(sock, sock_tmp, &ust_socks, list) {
1630 ustcomm_del_sock(sock, 1);
1631 }
1632
1633 /*
1634 * FIXME: This could be prettier, we loop over the list twice and
1635 * following good locking practice should lock around the loop
1636 */
1637 cds_list_for_each_entry_safe(trace, trace_tmp, &ltt_traces.head, list) {
1638 ltt_trace_destroy(trace->trace_name, 1);
1639 }
1640
1641 /* Clean up the listener socket and epoll, keeping the socket file */
1642 if (listen_sock) {
1643 ustcomm_del_named_sock(listen_sock, 1);
1644 listen_sock = NULL;
1645 }
1646 close(epoll_fd);
1647
1648 /* Re-start the launch sequence */
1649 CMM_STORE_SHARED(buffers_to_export, 0);
1650 have_listener = 0;
1651
1652 /* Set up epoll */
1653 epoll_fd = epoll_create(MAX_EVENTS);
1654 if (epoll_fd == -1) {
1655 ERR("epoll_create failed, tracing shutting down");
1656 return;
1657 }
1658
1659 /* Create the socket */
1660 listen_sock = init_app_socket(epoll_fd);
1661 if (!listen_sock) {
1662 ERR("failed to create application socket,"
1663 " tracing shutting down");
1664 return;
1665 }
1666 create_listener();
1667 ltt_trace_setup("auto");
1668 result = ltt_trace_set_type("auto", "ustrelay");
1669 if (result < 0) {
1670 ERR("ltt_trace_set_type failed");
1671 return;
1672 }
1673
1674 ltt_trace_alloc("auto");
1675 ltt_trace_start("auto");
1676 inform_consumer_daemon("auto");
1677 }
1678
1679 void ust_before_fork(ust_fork_info_t *fork_info)
1680 {
1681 /* Disable signals. This is to avoid that the child
1682 * intervenes before it is properly setup for tracing. It is
1683 * safer to disable all signals, because then we know we are not
1684 * breaking anything by restoring the original mask.
1685 */
1686 sigset_t all_sigs;
1687 int result;
1688
1689 /* FIXME:
1690 - only do this if tracing is active
1691 */
1692
1693 /* Disable signals */
1694 sigfillset(&all_sigs);
1695 result = sigprocmask(SIG_BLOCK, &all_sigs, &fork_info->orig_sigs);
1696 if (result == -1) {
1697 PERROR("sigprocmask");
1698 return;
1699 }
1700
1701 /*
1702 * Take the fork lock to make sure we are not in the middle of
1703 * something in the listener thread.
1704 */
1705 pthread_mutex_lock(&listener_thread_data_mutex);
1706 /*
1707 * Hold listen_sock_mutex to protect from listen_sock teardown.
1708 */
1709 pthread_mutex_lock(&listen_sock_mutex);
1710 rcu_bp_before_fork();
1711 }
1712
1713 /* Don't call this function directly in a traced program */
1714 static void ust_after_fork_common(ust_fork_info_t *fork_info)
1715 {
1716 int result;
1717
1718 pthread_mutex_unlock(&listen_sock_mutex);
1719 pthread_mutex_unlock(&listener_thread_data_mutex);
1720
1721 /* Restore signals */
1722 result = sigprocmask(SIG_SETMASK, &fork_info->orig_sigs, NULL);
1723 if (result == -1) {
1724 PERROR("sigprocmask");
1725 return;
1726 }
1727 }
1728
1729 void ust_after_fork_parent(ust_fork_info_t *fork_info)
1730 {
1731 rcu_bp_after_fork_parent();
1732 /* Release mutexes and reenable signals */
1733 ust_after_fork_common(fork_info);
1734 }
1735
1736 void ust_after_fork_child(ust_fork_info_t *fork_info)
1737 {
1738 /* Release urcu mutexes */
1739 rcu_bp_after_fork_child();
1740
1741 /* Sanitize the child */
1742 ust_fork();
1743
1744 /* Then release mutexes and reenable signals */
1745 ust_after_fork_common(fork_info);
1746 }
1747
This page took 0.092158 seconds and 4 git commands to generate.