Change force_subbuffer switch to be per trace
[ust.git] / libust / tracectl.c
1 /* Copyright (C) 2009 Pierre-Marc Fournier
2 *
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public
5 * License as published by the Free Software Foundation; either
6 * version 2.1 of the License, or (at your option) any later version.
7 *
8 * This library is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
12 *
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
16 */
17
18 /* This file contains the implementation of the UST listener thread, which
19 * receives trace control commands. It also coordinates the initialization of
20 * libust.
21 */
22
23 #define _GNU_SOURCE
24 #include <stdio.h>
25 #include <stdlib.h>
26 #include <stdint.h>
27 #include <pthread.h>
28 #include <signal.h>
29 #include <sys/epoll.h>
30 #include <sys/time.h>
31 #include <sys/types.h>
32 #include <sys/socket.h>
33 #include <fcntl.h>
34 #include <poll.h>
35 #include <regex.h>
36 #include <urcu/uatomic_arch.h>
37 #include <urcu/list.h>
38
39 #include <ust/marker.h>
40 #include <ust/tracepoint.h>
41 #include <ust/tracectl.h>
42 #include <ust/clock.h>
43 #include "tracer.h"
44 #include "usterr.h"
45 #include "ustcomm.h"
46 #include "buffers.h"
47 #include "marker-control.h"
48
49 /* This should only be accessed by the constructor, before the creation
50 * of the listener, and then only by the listener.
51 */
52 s64 pidunique = -1LL;
53
54 /* The process pid is used to detect a non-traceable fork
55 * and allow the non-traceable fork to be ignored
56 * by destructor sequences in libust
57 */
58 static pid_t processpid = 0;
59
60 static struct ustcomm_header _receive_header;
61 static struct ustcomm_header *receive_header = &_receive_header;
62 static char receive_buffer[USTCOMM_BUFFER_SIZE];
63 static char send_buffer[USTCOMM_BUFFER_SIZE];
64
65 static int epoll_fd;
66
67 /*
68 * Listener thread data vs fork() protection mechanism. Ensures that no listener
69 * thread mutexes and data structures are being concurrently modified or held by
70 * other threads when fork() is executed.
71 */
72 static pthread_mutex_t listener_thread_data_mutex = PTHREAD_MUTEX_INITIALIZER;
73
74 /* Mutex protecting listen_sock. Nests inside listener_thread_data_mutex. */
75 static pthread_mutex_t listen_sock_mutex = PTHREAD_MUTEX_INITIALIZER;
76 static struct ustcomm_sock *listen_sock;
77
78 extern struct chan_info_struct chan_infos[];
79
80 static struct cds_list_head open_buffers_list = CDS_LIST_HEAD_INIT(open_buffers_list);
81
82 static struct cds_list_head ust_socks = CDS_LIST_HEAD_INIT(ust_socks);
83
84 /* volatile because shared between the listener and the main thread */
85 int buffers_to_export = 0;
86
87 int ust_clock_source;
88
89 static long long make_pidunique(void)
90 {
91 s64 retval;
92 struct timeval tv;
93
94 gettimeofday(&tv, NULL);
95
96 retval = tv.tv_sec;
97 retval <<= 32;
98 retval |= tv.tv_usec;
99
100 return retval;
101 }
102
103 static void print_markers(FILE *fp)
104 {
105 struct marker_iter iter;
106
107 lock_markers();
108 marker_iter_reset(&iter);
109 marker_iter_start(&iter);
110
111 while (iter.marker) {
112 fprintf(fp, "marker: %s/%s %d \"%s\" %p\n",
113 (*iter.marker)->channel,
114 (*iter.marker)->name,
115 (int)imv_read((*iter.marker)->state),
116 (*iter.marker)->format,
117 (*iter.marker)->location);
118 marker_iter_next(&iter);
119 }
120 unlock_markers();
121 }
122
123 static void print_trace_events(FILE *fp)
124 {
125 struct trace_event_iter iter;
126
127 lock_trace_events();
128 trace_event_iter_reset(&iter);
129 trace_event_iter_start(&iter);
130
131 while (iter.trace_event) {
132 fprintf(fp, "trace_event: %s\n", (*iter.trace_event)->name);
133 trace_event_iter_next(&iter);
134 }
135 unlock_trace_events();
136 }
137
138 static int connect_ustconsumer(void)
139 {
140 int result, fd;
141 char default_daemon_path[] = SOCK_DIR "/ustconsumer";
142 char *explicit_daemon_path, *daemon_path;
143
144 explicit_daemon_path = getenv("UST_DAEMON_SOCKET");
145 if (explicit_daemon_path) {
146 daemon_path = explicit_daemon_path;
147 } else {
148 daemon_path = default_daemon_path;
149 }
150
151 DBG("Connecting to daemon_path %s", daemon_path);
152
153 result = ustcomm_connect_path(daemon_path, &fd);
154 if (result < 0) {
155 WARN("connect_ustconsumer failed, daemon_path: %s",
156 daemon_path);
157 return result;
158 }
159
160 return fd;
161 }
162
163
164 static void request_buffer_consumer(int sock,
165 const char *trace,
166 const char *channel,
167 int cpu)
168 {
169 struct ustcomm_header send_header, recv_header;
170 struct ustcomm_buffer_info buf_inf;
171 int result = 0;
172
173 result = ustcomm_pack_buffer_info(&send_header,
174 &buf_inf,
175 trace,
176 channel,
177 cpu);
178
179 if (result < 0) {
180 ERR("failed to pack buffer info message %s_%d",
181 channel, cpu);
182 return;
183 }
184
185 buf_inf.pid = getpid();
186 send_header.command = CONSUME_BUFFER;
187
188 result = ustcomm_req(sock, &send_header, (char *) &buf_inf,
189 &recv_header, NULL);
190 if (result <= 0) {
191 PERROR("request for buffer consumer failed, is the daemon online?");
192 }
193
194 return;
195 }
196
197 /* Ask the daemon to collect a trace called trace_name and being
198 * produced by this pid.
199 *
200 * The trace must be at least allocated. (It can also be started.)
201 * This is because _ltt_trace_find is used.
202 */
203
204 static void inform_consumer_daemon(const char *trace_name)
205 {
206 int sock, i,j;
207 struct ust_trace *trace;
208 const char *ch_name;
209
210 sock = connect_ustconsumer();
211 if (sock < 0) {
212 return;
213 }
214
215 DBG("Connected to ustconsumer");
216
217 ltt_lock_traces();
218
219 trace = _ltt_trace_find(trace_name);
220 if (trace == NULL) {
221 WARN("inform_consumer_daemon: could not find trace \"%s\"; it is probably already destroyed", trace_name);
222 goto unlock_traces;
223 }
224
225 for (i=0; i < trace->nr_channels; i++) {
226 if (trace->channels[i].request_collection) {
227 /* iterate on all cpus */
228 for (j=0; j<trace->channels[i].n_cpus; j++) {
229 ch_name = trace->channels[i].channel_name;
230 request_buffer_consumer(sock, trace_name,
231 ch_name, j);
232 CMM_STORE_SHARED(buffers_to_export,
233 CMM_LOAD_SHARED(buffers_to_export)+1);
234 }
235 }
236 }
237
238 unlock_traces:
239 ltt_unlock_traces();
240
241 close(sock);
242 }
243
244 static struct ust_channel *find_channel(const char *ch_name,
245 struct ust_trace *trace)
246 {
247 int i;
248
249 for (i=0; i<trace->nr_channels; i++) {
250 if (!strcmp(trace->channels[i].channel_name, ch_name)) {
251 return &trace->channels[i];
252 }
253 }
254
255 return NULL;
256 }
257
258 static int get_buffer_shmid_pipe_fd(const char *trace_name, const char *ch_name,
259 int ch_cpu,
260 int *buf_shmid,
261 int *buf_struct_shmid,
262 int *buf_pipe_fd)
263 {
264 struct ust_trace *trace;
265 struct ust_channel *channel;
266 struct ust_buffer *buf;
267
268 DBG("get_buffer_shmid_pipe_fd");
269
270 ltt_lock_traces();
271 trace = _ltt_trace_find(trace_name);
272 ltt_unlock_traces();
273
274 if (trace == NULL) {
275 ERR("cannot find trace!");
276 return -ENODATA;
277 }
278
279 channel = find_channel(ch_name, trace);
280 if (!channel) {
281 ERR("cannot find channel %s!", ch_name);
282 return -ENODATA;
283 }
284
285 buf = channel->buf[ch_cpu];
286
287 *buf_shmid = buf->shmid;
288 *buf_struct_shmid = channel->buf_struct_shmids[ch_cpu];
289 *buf_pipe_fd = buf->data_ready_fd_read;
290
291 return 0;
292 }
293
294 static int get_subbuf_num_size(const char *trace_name, const char *ch_name,
295 int *num, int *size)
296 {
297 struct ust_trace *trace;
298 struct ust_channel *channel;
299
300 DBG("get_subbuf_size");
301
302 ltt_lock_traces();
303 trace = _ltt_trace_find(trace_name);
304 ltt_unlock_traces();
305
306 if (!trace) {
307 ERR("cannot find trace!");
308 return -ENODATA;
309 }
310
311 channel = find_channel(ch_name, trace);
312 if (!channel) {
313 ERR("unable to find channel");
314 return -ENODATA;
315 }
316
317 *num = channel->subbuf_cnt;
318 *size = channel->subbuf_size;
319
320 return 0;
321 }
322
323 /* Return the power of two which is equal or higher to v */
324
325 static unsigned int pow2_higher_or_eq(unsigned int v)
326 {
327 int hb = fls(v);
328 int retval = 1<<(hb-1);
329
330 if (v-retval == 0)
331 return retval;
332 else
333 return retval<<1;
334 }
335
336 static int set_subbuf_size(const char *trace_name, const char *ch_name,
337 unsigned int size)
338 {
339 unsigned int power;
340 int retval = 0;
341 struct ust_trace *trace;
342 struct ust_channel *channel;
343
344 DBG("set_subbuf_size");
345
346 power = pow2_higher_or_eq(size);
347 power = max_t(unsigned int, 2u, power);
348 if (power != size) {
349 WARN("using the next power of two for buffer size = %u\n", power);
350 }
351
352 ltt_lock_traces();
353 trace = _ltt_trace_find_setup(trace_name);
354 if (trace == NULL) {
355 ERR("cannot find trace!");
356 retval = -ENODATA;
357 goto unlock_traces;
358 }
359
360 channel = find_channel(ch_name, trace);
361 if (!channel) {
362 ERR("unable to find channel");
363 retval = -ENODATA;
364 goto unlock_traces;
365 }
366
367 channel->subbuf_size = power;
368 DBG("the set_subbuf_size for the requested channel is %zu", channel->subbuf_size);
369
370 unlock_traces:
371 ltt_unlock_traces();
372
373 return retval;
374 }
375
376 static int set_subbuf_num(const char *trace_name, const char *ch_name,
377 unsigned int num)
378 {
379 struct ust_trace *trace;
380 struct ust_channel *channel;
381 int retval = 0;
382
383 DBG("set_subbuf_num");
384
385 if (num < 2) {
386 ERR("subbuffer count should be greater than 2");
387 return -EINVAL;
388 }
389
390 ltt_lock_traces();
391 trace = _ltt_trace_find_setup(trace_name);
392 if (trace == NULL) {
393 ERR("cannot find trace!");
394 retval = -ENODATA;
395 goto unlock_traces;
396 }
397
398 channel = find_channel(ch_name, trace);
399 if (!channel) {
400 ERR("unable to find channel");
401 retval = -ENODATA;
402 goto unlock_traces;
403 }
404
405 channel->subbuf_cnt = num;
406 DBG("the set_subbuf_cnt for the requested channel is %u", channel->subbuf_cnt);
407
408 unlock_traces:
409 ltt_unlock_traces();
410 return retval;
411 }
412
413 static int get_subbuffer(const char *trace_name, const char *ch_name,
414 int ch_cpu, long *consumed_old)
415 {
416 int retval = 0;
417 struct ust_trace *trace;
418 struct ust_channel *channel;
419 struct ust_buffer *buf;
420
421 DBG("get_subbuf");
422
423 *consumed_old = 0;
424
425 ltt_lock_traces();
426 trace = _ltt_trace_find(trace_name);
427
428 if (!trace) {
429 DBG("Cannot find trace. It was likely destroyed by the user.");
430 retval = -ENODATA;
431 goto unlock_traces;
432 }
433
434 channel = find_channel(ch_name, trace);
435 if (!channel) {
436 ERR("unable to find channel");
437 retval = -ENODATA;
438 goto unlock_traces;
439 }
440
441 buf = channel->buf[ch_cpu];
442
443 retval = ust_buffers_get_subbuf(buf, consumed_old);
444 if (retval < 0) {
445 WARN("missed buffer?");
446 }
447
448 unlock_traces:
449 ltt_unlock_traces();
450
451 return retval;
452 }
453
454
455 static int notify_buffer_mapped(const char *trace_name,
456 const char *ch_name,
457 int ch_cpu)
458 {
459 int retval = 0;
460 struct ust_trace *trace;
461 struct ust_channel *channel;
462 struct ust_buffer *buf;
463
464 DBG("get_buffer_fd");
465
466 ltt_lock_traces();
467 trace = _ltt_trace_find(trace_name);
468
469 if (!trace) {
470 retval = -ENODATA;
471 DBG("Cannot find trace. It was likely destroyed by the user.");
472 goto unlock_traces;
473 }
474
475 channel = find_channel(ch_name, trace);
476 if (!channel) {
477 retval = -ENODATA;
478 ERR("unable to find channel");
479 goto unlock_traces;
480 }
481
482 buf = channel->buf[ch_cpu];
483
484 /* Being here is the proof the daemon has mapped the buffer in its
485 * memory. We may now decrement buffers_to_export.
486 */
487 if (uatomic_read(&buf->consumed) == 0) {
488 DBG("decrementing buffers_to_export");
489 CMM_STORE_SHARED(buffers_to_export, CMM_LOAD_SHARED(buffers_to_export)-1);
490 }
491
492 /* The buffer has been exported, ergo, we can add it to the
493 * list of open buffers
494 */
495 cds_list_add(&buf->open_buffers_list, &open_buffers_list);
496
497 unlock_traces:
498 ltt_unlock_traces();
499
500 return retval;
501 }
502
503 static int put_subbuffer(const char *trace_name, const char *ch_name,
504 int ch_cpu, long consumed_old)
505 {
506 int retval = 0;
507 struct ust_trace *trace;
508 struct ust_channel *channel;
509 struct ust_buffer *buf;
510
511 DBG("put_subbuf");
512
513 ltt_lock_traces();
514 trace = _ltt_trace_find(trace_name);
515
516 if (!trace) {
517 retval = -ENODATA;
518 DBG("Cannot find trace. It was likely destroyed by the user.");
519 goto unlock_traces;
520 }
521
522 channel = find_channel(ch_name, trace);
523 if (!channel) {
524 retval = -ENODATA;
525 ERR("unable to find channel");
526 goto unlock_traces;
527 }
528
529 buf = channel->buf[ch_cpu];
530
531 retval = ust_buffers_put_subbuf(buf, consumed_old);
532 if (retval < 0) {
533 WARN("ust_buffers_put_subbuf: error (subbuf=%s_%d)",
534 ch_name, ch_cpu);
535 } else {
536 DBG("ust_buffers_put_subbuf: success (subbuf=%s_%d)",
537 ch_name, ch_cpu);
538 }
539
540 unlock_traces:
541 ltt_unlock_traces();
542
543 return retval;
544 }
545
546 static void release_listener_mutex(void *ptr)
547 {
548 pthread_mutex_unlock(&listener_thread_data_mutex);
549 }
550
551 static void listener_cleanup(void *ptr)
552 {
553 pthread_mutex_lock(&listen_sock_mutex);
554 if (listen_sock) {
555 ustcomm_del_named_sock(listen_sock, 0);
556 listen_sock = NULL;
557 }
558 pthread_mutex_unlock(&listen_sock_mutex);
559 }
560
561 static int force_subbuf_switch(const char *trace_name)
562 {
563 struct ust_trace *trace;
564 int i, j, retval = 0;
565
566 ltt_lock_traces();
567 trace = _ltt_trace_find(trace_name);
568 if (!trace) {
569 retval = -ENODATA;
570 DBG("Cannot find trace. It was likely destroyed by the user.");
571 goto unlock_traces;
572 }
573
574 for (i = 0; i < trace->nr_channels; i++) {
575 for (j = 0; j < trace->channels[i].n_cpus; j++) {
576 ltt_force_switch(trace->channels[i].buf[j],
577 FORCE_FLUSH);
578 }
579 }
580
581 unlock_traces:
582 ltt_unlock_traces();
583
584 return retval;
585 }
586
587 /* Simple commands are those which need only respond with a return value. */
588 static int process_simple_client_cmd(int command, char *recv_buf)
589 {
590 int result;
591
592 switch(command) {
593 case SET_SOCK_PATH:
594 {
595 struct ustcomm_single_field *sock_msg;
596 sock_msg = (struct ustcomm_single_field *)recv_buf;
597 result = ustcomm_unpack_single_field(sock_msg);
598 if (result < 0) {
599 return result;
600 }
601 return setenv("UST_DAEMON_SOCKET", sock_msg->field, 1);
602 }
603
604 default:
605 return -EINVAL;
606 }
607
608 return 0;
609 }
610
611
612 static int process_trace_cmd(int command, char *trace_name)
613 {
614 int result;
615 char trace_type[] = "ustrelay";
616
617 switch(command) {
618 case START:
619 /* start is an operation that setups the trace, allocates it and starts it */
620 result = ltt_trace_setup(trace_name);
621 if (result < 0) {
622 ERR("ltt_trace_setup failed");
623 return result;
624 }
625
626 result = ltt_trace_set_type(trace_name, trace_type);
627 if (result < 0) {
628 ERR("ltt_trace_set_type failed");
629 return result;
630 }
631
632 result = ltt_trace_alloc(trace_name);
633 if (result < 0) {
634 ERR("ltt_trace_alloc failed");
635 return result;
636 }
637
638 inform_consumer_daemon(trace_name);
639
640 result = ltt_trace_start(trace_name);
641 if (result < 0) {
642 ERR("ltt_trace_start failed");
643 return result;
644 }
645
646 return 0;
647 case SETUP_TRACE:
648 DBG("trace setup");
649
650 result = ltt_trace_setup(trace_name);
651 if (result < 0) {
652 ERR("ltt_trace_setup failed");
653 return result;
654 }
655
656 result = ltt_trace_set_type(trace_name, trace_type);
657 if (result < 0) {
658 ERR("ltt_trace_set_type failed");
659 return result;
660 }
661
662 return 0;
663 case ALLOC_TRACE:
664 DBG("trace alloc");
665
666 result = ltt_trace_alloc(trace_name);
667 if (result < 0) {
668 ERR("ltt_trace_alloc failed");
669 return result;
670 }
671 inform_consumer_daemon(trace_name);
672
673 return 0;
674
675 case CREATE_TRACE:
676 DBG("trace create");
677
678 result = ltt_trace_setup(trace_name);
679 if (result < 0) {
680 ERR("ltt_trace_setup failed");
681 return result;
682 }
683
684 result = ltt_trace_set_type(trace_name, trace_type);
685 if (result < 0) {
686 ERR("ltt_trace_set_type failed");
687 return result;
688 }
689
690 return 0;
691 case START_TRACE:
692 DBG("trace start");
693
694 result = ltt_trace_alloc(trace_name);
695 if (result < 0) {
696 ERR("ltt_trace_alloc failed");
697 return result;
698 }
699 if (!result) {
700 inform_consumer_daemon(trace_name);
701 }
702
703 result = ltt_trace_start(trace_name);
704 if (result < 0) {
705 ERR("ltt_trace_start failed");
706 return result;
707 }
708
709 return 0;
710 case STOP_TRACE:
711 DBG("trace stop");
712
713 result = ltt_trace_stop(trace_name);
714 if (result < 0) {
715 ERR("ltt_trace_stop failed");
716 return result;
717 }
718
719 return 0;
720 case DESTROY_TRACE:
721 DBG("trace destroy");
722
723 result = ltt_trace_destroy(trace_name, 0);
724 if (result < 0) {
725 ERR("ltt_trace_destroy failed");
726 return result;
727 }
728 return 0;
729 case FORCE_SUBBUF_SWITCH:
730 DBG("force switch");
731
732 result = force_subbuf_switch(trace_name);
733 if (result < 0) {
734 ERR("force_subbuf_switch failed");
735 return result;
736 }
737 return 0;
738 }
739
740 return 0;
741 }
742
743
744 static void process_channel_cmd(int sock, int command,
745 struct ustcomm_channel_info *ch_inf)
746 {
747 struct ustcomm_header _reply_header;
748 struct ustcomm_header *reply_header = &_reply_header;
749 struct ustcomm_channel_info *reply_msg =
750 (struct ustcomm_channel_info *)send_buffer;
751 int result, offset = 0, num, size;
752
753 memset(reply_header, 0, sizeof(*reply_header));
754
755 switch (command) {
756 case GET_SUBBUF_NUM_SIZE:
757 result = get_subbuf_num_size(ch_inf->trace,
758 ch_inf->channel,
759 &num, &size);
760 if (result < 0) {
761 reply_header->result = result;
762 break;
763 }
764
765 reply_msg->channel = USTCOMM_POISON_PTR;
766 reply_msg->subbuf_num = num;
767 reply_msg->subbuf_size = size;
768
769
770 reply_header->size = COMPUTE_MSG_SIZE(reply_msg, offset);
771
772 break;
773 case SET_SUBBUF_NUM:
774 reply_header->result = set_subbuf_num(ch_inf->trace,
775 ch_inf->channel,
776 ch_inf->subbuf_num);
777
778 break;
779 case SET_SUBBUF_SIZE:
780 reply_header->result = set_subbuf_size(ch_inf->trace,
781 ch_inf->channel,
782 ch_inf->subbuf_size);
783
784
785 break;
786 }
787 if (ustcomm_send(sock, reply_header, (char *)reply_msg) < 0) {
788 ERR("ustcomm_send failed");
789 }
790 }
791
792 static void process_buffer_cmd(int sock, int command,
793 struct ustcomm_buffer_info *buf_inf)
794 {
795 struct ustcomm_header _reply_header;
796 struct ustcomm_header *reply_header = &_reply_header;
797 struct ustcomm_buffer_info *reply_msg =
798 (struct ustcomm_buffer_info *)send_buffer;
799 int result, offset = 0, buf_shmid, buf_struct_shmid, buf_pipe_fd;
800 long consumed_old;
801
802 memset(reply_header, 0, sizeof(*reply_header));
803
804 switch (command) {
805 case GET_BUF_SHMID_PIPE_FD:
806 result = get_buffer_shmid_pipe_fd(buf_inf->trace,
807 buf_inf->channel,
808 buf_inf->ch_cpu,
809 &buf_shmid,
810 &buf_struct_shmid,
811 &buf_pipe_fd);
812 if (result < 0) {
813 reply_header->result = result;
814 break;
815 }
816
817 reply_msg->channel = USTCOMM_POISON_PTR;
818 reply_msg->buf_shmid = buf_shmid;
819 reply_msg->buf_struct_shmid = buf_struct_shmid;
820
821 reply_header->size = COMPUTE_MSG_SIZE(reply_msg, offset);
822 reply_header->fd_included = 1;
823
824 if (ustcomm_send_fd(sock, reply_header, (char *)reply_msg,
825 &buf_pipe_fd) < 0) {
826 ERR("ustcomm_send failed");
827 }
828 return;
829
830 case NOTIFY_BUF_MAPPED:
831 reply_header->result =
832 notify_buffer_mapped(buf_inf->trace,
833 buf_inf->channel,
834 buf_inf->ch_cpu);
835 break;
836 case GET_SUBBUFFER:
837 result = get_subbuffer(buf_inf->trace, buf_inf->channel,
838 buf_inf->ch_cpu, &consumed_old);
839 if (result < 0) {
840 reply_header->result = result;
841 break;
842 }
843
844 reply_msg->channel = USTCOMM_POISON_PTR;
845 reply_msg->consumed_old = consumed_old;
846
847 reply_header->size = COMPUTE_MSG_SIZE(reply_msg, offset);
848
849 break;
850 case PUT_SUBBUFFER:
851 result = put_subbuffer(buf_inf->trace, buf_inf->channel,
852 buf_inf->ch_cpu,
853 buf_inf->consumed_old);
854 reply_header->result = result;
855
856 break;
857 }
858
859 if (ustcomm_send(sock, reply_header, (char *)reply_msg) < 0) {
860 ERR("ustcomm_send failed");
861 }
862
863 }
864
865 static void process_marker_cmd(int sock, int command,
866 struct ustcomm_marker_info *marker_inf)
867 {
868 struct ustcomm_header _reply_header;
869 struct ustcomm_header *reply_header = &_reply_header;
870 int result = 0;
871
872 memset(reply_header, 0, sizeof(*reply_header));
873
874 switch(command) {
875 case ENABLE_MARKER:
876
877 result = ltt_marker_connect(marker_inf->channel,
878 marker_inf->marker,
879 "default");
880 if (result < 0) {
881 WARN("could not enable marker; channel=%s,"
882 " name=%s",
883 marker_inf->channel,
884 marker_inf->marker);
885
886 }
887 break;
888 case DISABLE_MARKER:
889 result = ltt_marker_disconnect(marker_inf->channel,
890 marker_inf->marker,
891 "default");
892 if (result < 0) {
893 WARN("could not disable marker; channel=%s,"
894 " name=%s",
895 marker_inf->channel,
896 marker_inf->marker);
897 }
898 break;
899 }
900
901 reply_header->result = result;
902
903 if (ustcomm_send(sock, reply_header, NULL) < 0) {
904 ERR("ustcomm_send failed");
905 }
906
907 }
908 static void process_client_cmd(struct ustcomm_header *recv_header,
909 char *recv_buf, int sock)
910 {
911 int result;
912 struct ustcomm_header _reply_header;
913 struct ustcomm_header *reply_header = &_reply_header;
914 char *send_buf = send_buffer;
915
916 memset(reply_header, 0, sizeof(*reply_header));
917 memset(send_buf, 0, sizeof(send_buffer));
918
919 switch(recv_header->command) {
920 case GET_SUBBUF_NUM_SIZE:
921 case SET_SUBBUF_NUM:
922 case SET_SUBBUF_SIZE:
923 {
924 struct ustcomm_channel_info *ch_inf;
925 ch_inf = (struct ustcomm_channel_info *)recv_buf;
926 result = ustcomm_unpack_channel_info(ch_inf);
927 if (result < 0) {
928 ERR("couldn't unpack channel info");
929 reply_header->result = -EINVAL;
930 goto send_response;
931 }
932 process_channel_cmd(sock, recv_header->command, ch_inf);
933 return;
934 }
935 case GET_BUF_SHMID_PIPE_FD:
936 case NOTIFY_BUF_MAPPED:
937 case GET_SUBBUFFER:
938 case PUT_SUBBUFFER:
939 {
940 struct ustcomm_buffer_info *buf_inf;
941 buf_inf = (struct ustcomm_buffer_info *)recv_buf;
942 result = ustcomm_unpack_buffer_info(buf_inf);
943 if (result < 0) {
944 ERR("couldn't unpack buffer info");
945 reply_header->result = -EINVAL;
946 goto send_response;
947 }
948 process_buffer_cmd(sock, recv_header->command, buf_inf);
949 return;
950 }
951 case ENABLE_MARKER:
952 case DISABLE_MARKER:
953 {
954 struct ustcomm_marker_info *marker_inf;
955 marker_inf = (struct ustcomm_marker_info *)recv_buf;
956 result = ustcomm_unpack_marker_info(marker_inf);
957 if (result < 0) {
958 ERR("couldn't unpack marker info");
959 reply_header->result = -EINVAL;
960 goto send_response;
961 }
962 process_marker_cmd(sock, recv_header->command, marker_inf);
963 return;
964 }
965 case LIST_MARKERS:
966 {
967 char *ptr;
968 size_t size;
969 FILE *fp;
970
971 fp = open_memstream(&ptr, &size);
972 if (fp == NULL) {
973 ERR("opening memstream failed");
974 return;
975 }
976 print_markers(fp);
977 fclose(fp);
978
979 reply_header->size = size + 1; /* Include final \0 */
980
981 result = ustcomm_send(sock, reply_header, ptr);
982
983 free(ptr);
984
985 if (result < 0) {
986 PERROR("failed to send markers list");
987 }
988
989 break;
990 }
991 case LIST_TRACE_EVENTS:
992 {
993 char *ptr;
994 size_t size;
995 FILE *fp;
996
997 fp = open_memstream(&ptr, &size);
998 if (fp == NULL) {
999 ERR("opening memstream failed");
1000 return;
1001 }
1002 print_trace_events(fp);
1003 fclose(fp);
1004
1005 reply_header->size = size + 1; /* Include final \0 */
1006
1007 result = ustcomm_send(sock, reply_header, ptr);
1008
1009 free(ptr);
1010
1011 if (result < 0) {
1012 ERR("list_trace_events failed");
1013 return;
1014 }
1015
1016 break;
1017 }
1018 case LOAD_PROBE_LIB:
1019 {
1020 char *libfile;
1021
1022 /* FIXME: No functionality at all... */
1023 libfile = recv_buf;
1024
1025 DBG("load_probe_lib loading %s", libfile);
1026
1027 break;
1028 }
1029 case GET_PIDUNIQUE:
1030 {
1031 struct ustcomm_pidunique *pid_msg;
1032 pid_msg = (struct ustcomm_pidunique *)send_buf;
1033
1034 pid_msg->pidunique = pidunique;
1035 reply_header->size = sizeof(pid_msg);
1036
1037 goto send_response;
1038
1039 }
1040 case GET_SOCK_PATH:
1041 {
1042 struct ustcomm_single_field *sock_msg;
1043 char *sock_path_env;
1044
1045 sock_msg = (struct ustcomm_single_field *)send_buf;
1046
1047 sock_path_env = getenv("UST_DAEMON_SOCKET");
1048
1049 if (!sock_path_env) {
1050 result = ustcomm_pack_single_field(reply_header,
1051 sock_msg,
1052 SOCK_DIR "/ustconsumer");
1053
1054 } else {
1055 result = ustcomm_pack_single_field(reply_header,
1056 sock_msg,
1057 sock_path_env);
1058 }
1059 reply_header->result = result;
1060
1061 goto send_response;
1062 }
1063 case START:
1064 case SETUP_TRACE:
1065 case ALLOC_TRACE:
1066 case CREATE_TRACE:
1067 case START_TRACE:
1068 case STOP_TRACE:
1069 case DESTROY_TRACE:
1070 case FORCE_SUBBUF_SWITCH:
1071 {
1072 struct ustcomm_single_field *trace_inf =
1073 (struct ustcomm_single_field *)recv_buf;
1074
1075 result = ustcomm_unpack_single_field(trace_inf);
1076 if (result < 0) {
1077 ERR("couldn't unpack trace info");
1078 reply_header->result = -EINVAL;
1079 goto send_response;
1080 }
1081
1082 reply_header->result =
1083 process_trace_cmd(recv_header->command,
1084 trace_inf->field);
1085 goto send_response;
1086
1087 }
1088 default:
1089 reply_header->result =
1090 process_simple_client_cmd(recv_header->command,
1091 recv_buf);
1092 goto send_response;
1093
1094 }
1095
1096 return;
1097
1098 send_response:
1099 ustcomm_send(sock, reply_header, send_buf);
1100 }
1101
1102 #define MAX_EVENTS 10
1103
1104 void *listener_main(void *p)
1105 {
1106 struct ustcomm_sock *epoll_sock;
1107 struct epoll_event events[MAX_EVENTS];
1108 struct sockaddr addr;
1109 int accept_fd, nfds, result, i, addr_size;
1110
1111 DBG("LISTENER");
1112
1113 pthread_cleanup_push(listener_cleanup, NULL);
1114
1115 for(;;) {
1116 nfds = epoll_wait(epoll_fd, events, MAX_EVENTS, -1);
1117 if (nfds == -1) {
1118 PERROR("listener_main: epoll_wait failed");
1119 continue;
1120 }
1121
1122 for (i = 0; i < nfds; i++) {
1123 pthread_mutex_lock(&listener_thread_data_mutex);
1124 pthread_cleanup_push(release_listener_mutex, NULL);
1125 epoll_sock = (struct ustcomm_sock *)events[i].data.ptr;
1126 if (epoll_sock == listen_sock) {
1127 addr_size = sizeof(struct sockaddr);
1128 accept_fd = accept(epoll_sock->fd,
1129 &addr,
1130 (socklen_t *)&addr_size);
1131 if (accept_fd == -1) {
1132 PERROR("listener_main: accept failed");
1133 continue;
1134 }
1135 ustcomm_init_sock(accept_fd, epoll_fd,
1136 &ust_socks);
1137 } else {
1138 memset(receive_header, 0,
1139 sizeof(*receive_header));
1140 memset(receive_buffer, 0,
1141 sizeof(receive_buffer));
1142 result = ustcomm_recv(epoll_sock->fd,
1143 receive_header,
1144 receive_buffer);
1145 if (result == 0) {
1146 ustcomm_del_sock(epoll_sock, 0);
1147 } else {
1148 process_client_cmd(receive_header,
1149 receive_buffer,
1150 epoll_sock->fd);
1151 }
1152 }
1153 pthread_cleanup_pop(1); /* release listener mutex */
1154 }
1155 }
1156
1157 pthread_cleanup_pop(1);
1158 }
1159
1160 /* These should only be accessed in the parent thread,
1161 * not the listener.
1162 */
1163 static volatile sig_atomic_t have_listener = 0;
1164 static pthread_t listener_thread;
1165
1166 void create_listener(void)
1167 {
1168 int result;
1169 sigset_t sig_all_blocked;
1170 sigset_t orig_parent_mask;
1171
1172 if (have_listener) {
1173 WARN("not creating listener because we already had one");
1174 return;
1175 }
1176
1177 /* A new thread created by pthread_create inherits the signal mask
1178 * from the parent. To avoid any signal being received by the
1179 * listener thread, we block all signals temporarily in the parent,
1180 * while we create the listener thread.
1181 */
1182
1183 sigfillset(&sig_all_blocked);
1184
1185 result = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_parent_mask);
1186 if (result) {
1187 PERROR("pthread_sigmask: %s", strerror(result));
1188 }
1189
1190 result = pthread_create(&listener_thread, NULL, listener_main, NULL);
1191 if (result == -1) {
1192 PERROR("pthread_create");
1193 }
1194
1195 /* Restore original signal mask in parent */
1196 result = pthread_sigmask(SIG_SETMASK, &orig_parent_mask, NULL);
1197 if (result) {
1198 PERROR("pthread_sigmask: %s", strerror(result));
1199 } else {
1200 have_listener = 1;
1201 }
1202 }
1203
1204 #define AUTOPROBE_DISABLED 0
1205 #define AUTOPROBE_ENABLE_ALL 1
1206 #define AUTOPROBE_ENABLE_REGEX 2
1207 static int autoprobe_method = AUTOPROBE_DISABLED;
1208 static regex_t autoprobe_regex;
1209
1210 static void auto_probe_connect(struct marker *m)
1211 {
1212 int result;
1213
1214 char* concat_name = NULL;
1215 const char *probe_name = "default";
1216
1217 if (autoprobe_method == AUTOPROBE_DISABLED) {
1218 return;
1219 } else if (autoprobe_method == AUTOPROBE_ENABLE_REGEX) {
1220 result = asprintf(&concat_name, "%s/%s", m->channel, m->name);
1221 if (result == -1) {
1222 ERR("auto_probe_connect: asprintf failed (marker %s/%s)",
1223 m->channel, m->name);
1224 return;
1225 }
1226 if (regexec(&autoprobe_regex, concat_name, 0, NULL, 0)) {
1227 free(concat_name);
1228 return;
1229 }
1230 free(concat_name);
1231 }
1232
1233 result = ltt_marker_connect(m->channel, m->name, probe_name);
1234 if (result && result != -EEXIST)
1235 ERR("ltt_marker_connect (marker = %s/%s, errno = %d)", m->channel, m->name, -result);
1236
1237 DBG("auto connected marker %s (addr: %p) %s to probe default", m->channel, m, m->name);
1238
1239 }
1240
1241 static struct ustcomm_sock * init_app_socket(int epoll_fd)
1242 {
1243 char *name;
1244 int result;
1245 struct ustcomm_sock *sock;
1246
1247 result = asprintf(&name, "%s/%d", SOCK_DIR, (int)getpid());
1248 if (result < 0) {
1249 ERR("string overflow allocating socket name, "
1250 "UST thread bailing");
1251 return NULL;
1252 }
1253
1254 result = ensure_dir_exists(SOCK_DIR);
1255 if (result == -1) {
1256 ERR("Unable to create socket directory %s, UST thread bailing",
1257 SOCK_DIR);
1258 goto free_name;
1259 }
1260
1261 sock = ustcomm_init_named_socket(name, epoll_fd);
1262 if (!sock) {
1263 ERR("Error initializing named socket (%s). Check that directory"
1264 "exists and that it is writable. UST thread bailing", name);
1265 goto free_name;
1266 }
1267
1268 free(name);
1269 return sock;
1270
1271 free_name:
1272 free(name);
1273 return NULL;
1274 }
1275
1276 static void __attribute__((constructor)) init()
1277 {
1278 struct timespec ts;
1279 int result;
1280 char* autoprobe_val = NULL;
1281 char* subbuffer_size_val = NULL;
1282 char* subbuffer_count_val = NULL;
1283 unsigned int subbuffer_size;
1284 unsigned int subbuffer_count;
1285 unsigned int power;
1286
1287 /* Assign the pidunique, to be able to differentiate the processes with same
1288 * pid, (before and after an exec).
1289 */
1290 pidunique = make_pidunique();
1291 processpid = getpid();
1292
1293 DBG("Tracectl constructor");
1294
1295 /* Set up epoll */
1296 epoll_fd = epoll_create(MAX_EVENTS);
1297 if (epoll_fd == -1) {
1298 ERR("epoll_create failed, tracing shutting down");
1299 return;
1300 }
1301
1302 /* Create the socket */
1303 listen_sock = init_app_socket(epoll_fd);
1304 if (!listen_sock) {
1305 ERR("failed to create application socket,"
1306 " tracing shutting down");
1307 return;
1308 }
1309
1310 create_listener();
1311
1312 /* Get clock the clock source type */
1313
1314 /* Default clock source */
1315 ust_clock_source = CLOCK_TRACE;
1316 if (clock_gettime(ust_clock_source, &ts) != 0) {
1317 ust_clock_source = CLOCK_MONOTONIC;
1318 DBG("UST traces will not be synchronized with LTTng traces");
1319 }
1320
1321 autoprobe_val = getenv("UST_AUTOPROBE");
1322 if (autoprobe_val) {
1323 struct marker_iter iter;
1324
1325 DBG("Autoprobe enabled.");
1326
1327 /* Ensure markers are initialized */
1328 //init_markers();
1329
1330 /* Ensure marker control is initialized, for the probe */
1331 init_marker_control();
1332
1333 /* first, set the callback that will connect the
1334 * probe on new markers
1335 */
1336 if (autoprobe_val[0] == '/') {
1337 result = regcomp(&autoprobe_regex, autoprobe_val+1, 0);
1338 if (result) {
1339 char regexerr[150];
1340
1341 regerror(result, &autoprobe_regex, regexerr, sizeof(regexerr));
1342 ERR("cannot parse regex %s (%s), will ignore UST_AUTOPROBE", autoprobe_val, regexerr);
1343 /* don't crash the application just for this */
1344 } else {
1345 autoprobe_method = AUTOPROBE_ENABLE_REGEX;
1346 }
1347 } else {
1348 /* just enable all instrumentation */
1349 autoprobe_method = AUTOPROBE_ENABLE_ALL;
1350 }
1351
1352 marker_set_new_marker_cb(auto_probe_connect);
1353
1354 /* Now, connect the probes that were already registered. */
1355 marker_iter_reset(&iter);
1356 marker_iter_start(&iter);
1357
1358 DBG("now iterating on markers already registered");
1359 while (iter.marker) {
1360 DBG("now iterating on marker %s", (*iter.marker)->name);
1361 auto_probe_connect(*iter.marker);
1362 marker_iter_next(&iter);
1363 }
1364 }
1365
1366 if (getenv("UST_OVERWRITE")) {
1367 int val = atoi(getenv("UST_OVERWRITE"));
1368 if (val == 0 || val == 1) {
1369 CMM_STORE_SHARED(ust_channels_overwrite_by_default, val);
1370 } else {
1371 WARN("invalid value for UST_OVERWRITE");
1372 }
1373 }
1374
1375 if (getenv("UST_AUTOCOLLECT")) {
1376 int val = atoi(getenv("UST_AUTOCOLLECT"));
1377 if (val == 0 || val == 1) {
1378 CMM_STORE_SHARED(ust_channels_request_collection_by_default, val);
1379 } else {
1380 WARN("invalid value for UST_AUTOCOLLECT");
1381 }
1382 }
1383
1384 subbuffer_size_val = getenv("UST_SUBBUF_SIZE");
1385 if (subbuffer_size_val) {
1386 sscanf(subbuffer_size_val, "%u", &subbuffer_size);
1387 power = pow2_higher_or_eq(subbuffer_size);
1388 if (power != subbuffer_size)
1389 WARN("using the next power of two for buffer size = %u\n", power);
1390 chan_infos[LTT_CHANNEL_UST].def_subbufsize = power;
1391 }
1392
1393 subbuffer_count_val = getenv("UST_SUBBUF_NUM");
1394 if (subbuffer_count_val) {
1395 sscanf(subbuffer_count_val, "%u", &subbuffer_count);
1396 if (subbuffer_count < 2)
1397 subbuffer_count = 2;
1398 chan_infos[LTT_CHANNEL_UST].def_subbufcount = subbuffer_count;
1399 }
1400
1401 if (getenv("UST_TRACE")) {
1402 char trace_name[] = "auto";
1403 char trace_type[] = "ustrelay";
1404
1405 DBG("starting early tracing");
1406
1407 /* Ensure marker control is initialized */
1408 init_marker_control();
1409
1410 /* Ensure markers are initialized */
1411 init_markers();
1412
1413 /* Ensure buffers are initialized, for the transport to be available.
1414 * We are about to set a trace type and it will fail without this.
1415 */
1416 init_ustrelay_transport();
1417
1418 /* FIXME: When starting early tracing (here), depending on the
1419 * order of constructors, it is very well possible some marker
1420 * sections are not yet registered. Because of this, some
1421 * channels may not be registered. Yet, we are about to ask the
1422 * daemon to collect the channels. Channels which are not yet
1423 * registered will not be collected.
1424 *
1425 * Currently, in LTTng, there is no way to add a channel after
1426 * trace start. The reason for this is that it induces complex
1427 * concurrency issues on the trace structures, which can only
1428 * be resolved using RCU. This has not been done yet. As a
1429 * workaround, we are forcing the registration of the "ust"
1430 * channel here. This is the only channel (apart from metadata)
1431 * that can be reliably used in early tracing.
1432 *
1433 * Non-early tracing does not have this problem and can use
1434 * arbitrary channel names.
1435 */
1436 ltt_channels_register("ust");
1437
1438 result = ltt_trace_setup(trace_name);
1439 if (result < 0) {
1440 ERR("ltt_trace_setup failed");
1441 return;
1442 }
1443
1444 result = ltt_trace_set_type(trace_name, trace_type);
1445 if (result < 0) {
1446 ERR("ltt_trace_set_type failed");
1447 return;
1448 }
1449
1450 result = ltt_trace_alloc(trace_name);
1451 if (result < 0) {
1452 ERR("ltt_trace_alloc failed");
1453 return;
1454 }
1455
1456 result = ltt_trace_start(trace_name);
1457 if (result < 0) {
1458 ERR("ltt_trace_start failed");
1459 return;
1460 }
1461
1462 /* Do this after the trace is started in order to avoid creating confusion
1463 * if the trace fails to start. */
1464 inform_consumer_daemon(trace_name);
1465 }
1466
1467 return;
1468
1469 /* should decrementally destroy stuff if error */
1470
1471 }
1472
1473 /* This is only called if we terminate normally, not with an unhandled signal,
1474 * so we cannot rely on it. However, for now, LTTV requires that the header of
1475 * the last sub-buffer contain a valid end time for the trace. This is done
1476 * automatically only when the trace is properly stopped.
1477 *
1478 * If the traced program crashed, it is always possible to manually add the
1479 * right value in the header, or to open the trace in text mode.
1480 *
1481 * FIXME: Fix LTTV so it doesn't need this.
1482 */
1483
1484 static void destroy_traces(void)
1485 {
1486 int result;
1487
1488 /* if trace running, finish it */
1489
1490 DBG("destructor stopping traces");
1491
1492 result = ltt_trace_stop("auto");
1493 if (result == -1) {
1494 ERR("ltt_trace_stop error");
1495 }
1496
1497 result = ltt_trace_destroy("auto", 0);
1498 if (result == -1) {
1499 ERR("ltt_trace_destroy error");
1500 }
1501 }
1502
1503 static int trace_recording(void)
1504 {
1505 int retval = 0;
1506 struct ust_trace *trace;
1507
1508 ltt_lock_traces();
1509
1510 cds_list_for_each_entry(trace, &ltt_traces.head, list) {
1511 if (trace->active) {
1512 retval = 1;
1513 break;
1514 }
1515 }
1516
1517 ltt_unlock_traces();
1518
1519 return retval;
1520 }
1521
1522 int restarting_usleep(useconds_t usecs)
1523 {
1524 struct timespec tv;
1525 int result;
1526
1527 tv.tv_sec = 0;
1528 tv.tv_nsec = usecs * 1000;
1529
1530 do {
1531 result = nanosleep(&tv, &tv);
1532 } while (result == -1 && errno == EINTR);
1533
1534 return result;
1535 }
1536
1537 static void stop_listener(void)
1538 {
1539 int result;
1540
1541 if (!have_listener)
1542 return;
1543
1544 result = pthread_cancel(listener_thread);
1545 if (result != 0) {
1546 ERR("pthread_cancel: %s", strerror(result));
1547 }
1548 result = pthread_join(listener_thread, NULL);
1549 if (result != 0) {
1550 ERR("pthread_join: %s", strerror(result));
1551 }
1552 }
1553
1554 /* This destructor keeps the process alive for a few seconds in order
1555 * to leave time for ustconsumer to connect to its buffers. This is necessary
1556 * for programs whose execution is very short. It is also useful in all
1557 * programs when tracing is started close to the end of the program
1558 * execution.
1559 *
1560 * FIXME: For now, this only works for the first trace created in a
1561 * process.
1562 */
1563
1564 static void __attribute__((destructor)) keepalive()
1565 {
1566 if (processpid != getpid()) {
1567 return;
1568 }
1569
1570 if (trace_recording() && CMM_LOAD_SHARED(buffers_to_export)) {
1571 int total = 0;
1572 DBG("Keeping process alive for consumer daemon...");
1573 while (CMM_LOAD_SHARED(buffers_to_export)) {
1574 const int interv = 200000;
1575 restarting_usleep(interv);
1576 total += interv;
1577
1578 if (total >= 3000000) {
1579 WARN("non-consumed buffers remaining after wait limit; not waiting anymore");
1580 break;
1581 }
1582 }
1583 DBG("Finally dying...");
1584 }
1585
1586 destroy_traces();
1587
1588 /* Ask the listener to stop and clean up. */
1589 stop_listener();
1590 }
1591
1592 void ust_potential_exec(void)
1593 {
1594 trace_mark(ust, potential_exec, MARK_NOARGS);
1595
1596 DBG("test");
1597
1598 keepalive();
1599 }
1600
1601 /* Notify ust that there was a fork. This needs to be called inside
1602 * the new process, anytime a process whose memory is not shared with
1603 * the parent is created. If this function is not called, the events
1604 * of the new process will not be collected.
1605 *
1606 * Signals should be disabled before the fork and reenabled only after
1607 * this call in order to guarantee tracing is not started before ust_fork()
1608 * sanitizes the new process.
1609 */
1610
1611 static void ust_fork(void)
1612 {
1613 struct ust_buffer *buf, *buf_tmp;
1614 struct ustcomm_sock *sock, *sock_tmp;
1615 struct ust_trace *trace, *trace_tmp;
1616 int result;
1617
1618 /* FIXME: technically, the locks could have been taken before the fork */
1619 DBG("ust: forking");
1620
1621 /* Get the pid of the new process */
1622 processpid = getpid();
1623
1624 /*
1625 * FIXME: This could be prettier, we loop over the list twice and
1626 * following good locking practice should lock around the loop
1627 */
1628 cds_list_for_each_entry_safe(trace, trace_tmp, &ltt_traces.head, list) {
1629 ltt_trace_stop(trace->trace_name);
1630 }
1631
1632 /* Delete all active connections, but leave them in the epoll set */
1633 cds_list_for_each_entry_safe(sock, sock_tmp, &ust_socks, list) {
1634 ustcomm_del_sock(sock, 1);
1635 }
1636
1637 /* Delete all blocked consumers */
1638 cds_list_for_each_entry_safe(buf, buf_tmp, &open_buffers_list,
1639 open_buffers_list) {
1640 cds_list_del(&buf->open_buffers_list);
1641 }
1642
1643 /*
1644 * FIXME: This could be prettier, we loop over the list twice and
1645 * following good locking practice should lock around the loop
1646 */
1647 cds_list_for_each_entry_safe(trace, trace_tmp, &ltt_traces.head, list) {
1648 ltt_trace_destroy(trace->trace_name, 1);
1649 }
1650
1651 /* Clean up the listener socket and epoll, keeping the socket file */
1652 if (listen_sock) {
1653 ustcomm_del_named_sock(listen_sock, 1);
1654 listen_sock = NULL;
1655 }
1656 close(epoll_fd);
1657
1658 /* Re-start the launch sequence */
1659 CMM_STORE_SHARED(buffers_to_export, 0);
1660 have_listener = 0;
1661
1662 /* Set up epoll */
1663 epoll_fd = epoll_create(MAX_EVENTS);
1664 if (epoll_fd == -1) {
1665 ERR("epoll_create failed, tracing shutting down");
1666 return;
1667 }
1668
1669 /* Create the socket */
1670 listen_sock = init_app_socket(epoll_fd);
1671 if (!listen_sock) {
1672 ERR("failed to create application socket,"
1673 " tracing shutting down");
1674 return;
1675 }
1676 create_listener();
1677 ltt_trace_setup("auto");
1678 result = ltt_trace_set_type("auto", "ustrelay");
1679 if (result < 0) {
1680 ERR("ltt_trace_set_type failed");
1681 return;
1682 }
1683
1684 ltt_trace_alloc("auto");
1685 ltt_trace_start("auto");
1686 inform_consumer_daemon("auto");
1687 }
1688
1689 void ust_before_fork(ust_fork_info_t *fork_info)
1690 {
1691 /* Disable signals. This is to avoid that the child
1692 * intervenes before it is properly setup for tracing. It is
1693 * safer to disable all signals, because then we know we are not
1694 * breaking anything by restoring the original mask.
1695 */
1696 sigset_t all_sigs;
1697 int result;
1698
1699 /* FIXME:
1700 - only do this if tracing is active
1701 */
1702
1703 /* Disable signals */
1704 sigfillset(&all_sigs);
1705 result = sigprocmask(SIG_BLOCK, &all_sigs, &fork_info->orig_sigs);
1706 if (result == -1) {
1707 PERROR("sigprocmask");
1708 return;
1709 }
1710
1711 /*
1712 * Take the fork lock to make sure we are not in the middle of
1713 * something in the listener thread.
1714 */
1715 pthread_mutex_lock(&listener_thread_data_mutex);
1716 /*
1717 * Hold listen_sock_mutex to protect from listen_sock teardown.
1718 */
1719 pthread_mutex_lock(&listen_sock_mutex);
1720 rcu_bp_before_fork();
1721 }
1722
1723 /* Don't call this function directly in a traced program */
1724 static void ust_after_fork_common(ust_fork_info_t *fork_info)
1725 {
1726 int result;
1727
1728 pthread_mutex_unlock(&listen_sock_mutex);
1729 pthread_mutex_unlock(&listener_thread_data_mutex);
1730
1731 /* Restore signals */
1732 result = sigprocmask(SIG_SETMASK, &fork_info->orig_sigs, NULL);
1733 if (result == -1) {
1734 PERROR("sigprocmask");
1735 return;
1736 }
1737 }
1738
1739 void ust_after_fork_parent(ust_fork_info_t *fork_info)
1740 {
1741 rcu_bp_after_fork_parent();
1742 /* Release mutexes and reenable signals */
1743 ust_after_fork_common(fork_info);
1744 }
1745
1746 void ust_after_fork_child(ust_fork_info_t *fork_info)
1747 {
1748 /* Release urcu mutexes */
1749 rcu_bp_after_fork_child();
1750
1751 /* Sanitize the child */
1752 ust_fork();
1753
1754 /* Then release mutexes and reenable signals */
1755 ust_after_fork_common(fork_info);
1756 }
1757
This page took 0.066566 seconds and 5 git commands to generate.