Add trace name handling throughout tracectl, ustcomm and ustcmd
[ust.git] / libust / tracectl.c
1 /* Copyright (C) 2009 Pierre-Marc Fournier
2 *
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public
5 * License as published by the Free Software Foundation; either
6 * version 2.1 of the License, or (at your option) any later version.
7 *
8 * This library is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
12 *
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
16 */
17
18 /* This file contains the implementation of the UST listener thread, which
19 * receives trace control commands. It also coordinates the initialization of
20 * libust.
21 */
22
23 #define _GNU_SOURCE
24 #include <stdio.h>
25 #include <stdlib.h>
26 #include <stdint.h>
27 #include <pthread.h>
28 #include <signal.h>
29 #include <sys/epoll.h>
30 #include <sys/time.h>
31 #include <sys/types.h>
32 #include <sys/socket.h>
33 #include <fcntl.h>
34 #include <poll.h>
35 #include <regex.h>
36 #include <urcu/uatomic_arch.h>
37 #include <urcu/list.h>
38
39 #include <ust/marker.h>
40 #include <ust/tracepoint.h>
41 #include <ust/tracectl.h>
42 #include "tracer.h"
43 #include "usterr.h"
44 #include "ustcomm.h"
45 #include "buffers.h"
46 #include "marker-control.h"
47
48 /* This should only be accessed by the constructor, before the creation
49 * of the listener, and then only by the listener.
50 */
51 s64 pidunique = -1LL;
52
53 /* The process pid is used to detect a non-traceable fork
54 * and allow the non-traceable fork to be ignored
55 * by destructor sequences in libust
56 */
57 static pid_t processpid = 0;
58
59 static struct ustcomm_header _receive_header;
60 static struct ustcomm_header *receive_header = &_receive_header;
61 static char receive_buffer[USTCOMM_BUFFER_SIZE];
62 static char send_buffer[USTCOMM_BUFFER_SIZE];
63
64 static int epoll_fd;
65 static struct ustcomm_sock *listen_sock;
66
67 extern struct chan_info_struct chan_infos[];
68
69 static struct list_head open_buffers_list = LIST_HEAD_INIT(open_buffers_list);
70
71 static struct list_head ust_socks = LIST_HEAD_INIT(ust_socks);
72
73 /* volatile because shared between the listener and the main thread */
74 int buffers_to_export = 0;
75
76 static long long make_pidunique(void)
77 {
78 s64 retval;
79 struct timeval tv;
80
81 gettimeofday(&tv, NULL);
82
83 retval = tv.tv_sec;
84 retval <<= 32;
85 retval |= tv.tv_usec;
86
87 return retval;
88 }
89
90 static void print_markers(FILE *fp)
91 {
92 struct marker_iter iter;
93
94 lock_markers();
95 marker_iter_reset(&iter);
96 marker_iter_start(&iter);
97
98 while (iter.marker) {
99 fprintf(fp, "marker: %s/%s %d \"%s\" %p\n",
100 iter.marker->channel,
101 iter.marker->name,
102 (int)imv_read(iter.marker->state),
103 iter.marker->format,
104 iter.marker->location);
105 marker_iter_next(&iter);
106 }
107 unlock_markers();
108 }
109
110 static void print_trace_events(FILE *fp)
111 {
112 struct trace_event_iter iter;
113
114 lock_trace_events();
115 trace_event_iter_reset(&iter);
116 trace_event_iter_start(&iter);
117
118 while (iter.trace_event) {
119 fprintf(fp, "trace_event: %s\n", iter.trace_event->name);
120 trace_event_iter_next(&iter);
121 }
122 unlock_trace_events();
123 }
124
125 static int connect_ustd(void)
126 {
127 int result, fd;
128 char default_daemon_path[] = SOCK_DIR "/ustd";
129 char *explicit_daemon_path, *daemon_path;
130
131 explicit_daemon_path = getenv("UST_DAEMON_SOCKET");
132 if (explicit_daemon_path) {
133 daemon_path = explicit_daemon_path;
134 } else {
135 daemon_path = default_daemon_path;
136 }
137
138 DBG("Connecting to daemon_path %s", daemon_path);
139
140 result = ustcomm_connect_path(daemon_path, &fd);
141 if (result < 0) {
142 WARN("connect_ustd failed, daemon_path: %s",
143 daemon_path);
144 return result;
145 }
146
147 return fd;
148 }
149
150
151 static void request_buffer_consumer(int sock,
152 const char *trace,
153 const char *channel,
154 int cpu)
155 {
156 struct ustcomm_header send_header, recv_header;
157 struct ustcomm_buffer_info buf_inf;
158 int result = 0;
159
160 result = ustcomm_pack_buffer_info(&send_header,
161 &buf_inf,
162 trace,
163 channel,
164 cpu);
165
166 if (result < 0) {
167 ERR("failed to pack buffer info message %s_%d",
168 channel, cpu);
169 return;
170 }
171
172 buf_inf.pid = getpid();
173 send_header.command = CONSUME_BUFFER;
174
175 result = ustcomm_req(sock, &send_header, (char *) &buf_inf,
176 &recv_header, NULL);
177 if (result <= 0) {
178 PERROR("request for buffer consumer failed, is the daemon online?");
179 }
180
181 return;
182 }
183
184 /* Ask the daemon to collect a trace called trace_name and being
185 * produced by this pid.
186 *
187 * The trace must be at least allocated. (It can also be started.)
188 * This is because _ltt_trace_find is used.
189 */
190
191 static void inform_consumer_daemon(const char *trace_name)
192 {
193 int sock, i,j;
194 struct ust_trace *trace;
195 const char *ch_name;
196
197 sock = connect_ustd();
198 if (sock < 0) {
199 return;
200 }
201
202 DBG("Connected to ustd");
203
204 ltt_lock_traces();
205
206 trace = _ltt_trace_find(trace_name);
207 if (trace == NULL) {
208 WARN("inform_consumer_daemon: could not find trace \"%s\"; it is probably already destroyed", trace_name);
209 goto unlock_traces;
210 }
211
212 for (i=0; i < trace->nr_channels; i++) {
213 if (trace->channels[i].request_collection) {
214 /* iterate on all cpus */
215 for (j=0; j<trace->channels[i].n_cpus; j++) {
216 ch_name = trace->channels[i].channel_name;
217 request_buffer_consumer(sock, trace_name,
218 ch_name, j);
219 STORE_SHARED(buffers_to_export,
220 LOAD_SHARED(buffers_to_export)+1);
221 }
222 }
223 }
224
225 unlock_traces:
226 ltt_unlock_traces();
227
228 close(sock);
229 }
230
231 static struct ust_channel *find_channel(const char *ch_name,
232 struct ust_trace *trace)
233 {
234 int i;
235
236 for (i=0; i<trace->nr_channels; i++) {
237 if (!strcmp(trace->channels[i].channel_name, ch_name)) {
238 return &trace->channels[i];
239 }
240 }
241
242 return NULL;
243 }
244
245 static int get_buffer_shmid_pipe_fd(const char *trace_name, const char *ch_name,
246 int ch_cpu,
247 int *buf_shmid,
248 int *buf_struct_shmid,
249 int *buf_pipe_fd)
250 {
251 struct ust_trace *trace;
252 struct ust_channel *channel;
253 struct ust_buffer *buf;
254
255 DBG("get_buffer_shmid_pipe_fd");
256
257 ltt_lock_traces();
258 trace = _ltt_trace_find(trace_name);
259 ltt_unlock_traces();
260
261 if (trace == NULL) {
262 ERR("cannot find trace!");
263 return -ENODATA;
264 }
265
266 channel = find_channel(ch_name, trace);
267 if (!channel) {
268 ERR("cannot find channel %s!", ch_name);
269 return -ENODATA;
270 }
271
272 buf = channel->buf[ch_cpu];
273
274 *buf_shmid = buf->shmid;
275 *buf_struct_shmid = channel->buf_struct_shmids[ch_cpu];
276 *buf_pipe_fd = buf->data_ready_fd_read;
277
278 return 0;
279 }
280
281 static int get_subbuf_num_size(const char *trace_name, const char *ch_name,
282 int *num, int *size)
283 {
284 struct ust_trace *trace;
285 struct ust_channel *channel;
286
287 DBG("get_subbuf_size");
288
289 ltt_lock_traces();
290 trace = _ltt_trace_find(trace_name);
291 ltt_unlock_traces();
292
293 if (!trace) {
294 ERR("cannot find trace!");
295 return -ENODATA;
296 }
297
298 channel = find_channel(ch_name, trace);
299 if (!channel) {
300 ERR("unable to find channel");
301 return -ENODATA;
302 }
303
304 *num = channel->subbuf_cnt;
305 *size = channel->subbuf_size;
306
307 return 0;
308 }
309
310 /* Return the power of two which is equal or higher to v */
311
312 static unsigned int pow2_higher_or_eq(unsigned int v)
313 {
314 int hb = fls(v);
315 int retval = 1<<(hb-1);
316
317 if (v-retval == 0)
318 return retval;
319 else
320 return retval<<1;
321 }
322
323 static int set_subbuf_size(const char *trace_name, const char *ch_name,
324 unsigned int size)
325 {
326 unsigned int power;
327 int retval = 0;
328 struct ust_trace *trace;
329 struct ust_channel *channel;
330
331 DBG("set_subbuf_size");
332
333 power = pow2_higher_or_eq(size);
334 power = max_t(unsigned int, 2u, power);
335 if (power != size) {
336 WARN("using the next power of two for buffer size = %u\n", power);
337 }
338
339 ltt_lock_traces();
340 trace = _ltt_trace_find_setup(trace_name);
341 if (trace == NULL) {
342 ERR("cannot find trace!");
343 retval = -ENODATA;
344 goto unlock_traces;
345 }
346
347 channel = find_channel(ch_name, trace);
348 if (!channel) {
349 ERR("unable to find channel");
350 retval = -ENODATA;
351 goto unlock_traces;
352 }
353
354 channel->subbuf_size = power;
355 DBG("the set_subbuf_size for the requested channel is %u", channel->subbuf_size);
356
357 unlock_traces:
358 ltt_unlock_traces();
359
360 return retval;
361 }
362
363 static int set_subbuf_num(const char *trace_name, const char *ch_name,
364 unsigned int num)
365 {
366 struct ust_trace *trace;
367 struct ust_channel *channel;
368 int retval = 0;
369
370 DBG("set_subbuf_num");
371
372 if (num < 2) {
373 ERR("subbuffer count should be greater than 2");
374 return -EINVAL;
375 }
376
377 ltt_lock_traces();
378 trace = _ltt_trace_find_setup(trace_name);
379 if (trace == NULL) {
380 ERR("cannot find trace!");
381 retval = -ENODATA;
382 goto unlock_traces;
383 }
384
385 channel = find_channel(ch_name, trace);
386 if (!channel) {
387 ERR("unable to find channel");
388 retval = -ENODATA;
389 goto unlock_traces;
390 }
391
392 channel->subbuf_cnt = num;
393 DBG("the set_subbuf_cnt for the requested channel is %zd", channel->subbuf_cnt);
394
395 unlock_traces:
396 ltt_unlock_traces();
397 return retval;
398 }
399
400 static int get_subbuffer(const char *trace_name, const char *ch_name,
401 int ch_cpu, long *consumed_old)
402 {
403 int retval = 0;
404 struct ust_trace *trace;
405 struct ust_channel *channel;
406 struct ust_buffer *buf;
407
408 DBG("get_subbuf");
409
410 *consumed_old = 0;
411
412 ltt_lock_traces();
413 trace = _ltt_trace_find(trace_name);
414
415 if (!trace) {
416 DBG("Cannot find trace. It was likely destroyed by the user.");
417 retval = -ENODATA;
418 goto unlock_traces;
419 }
420
421 channel = find_channel(ch_name, trace);
422 if (!channel) {
423 ERR("unable to find channel");
424 retval = -ENODATA;
425 goto unlock_traces;
426 }
427
428 buf = channel->buf[ch_cpu];
429
430 retval = ust_buffers_get_subbuf(buf, consumed_old);
431 if (retval < 0) {
432 WARN("missed buffer?");
433 }
434
435 unlock_traces:
436 ltt_unlock_traces();
437
438 return retval;
439 }
440
441
442 static int notify_buffer_mapped(const char *trace_name,
443 const char *ch_name,
444 int ch_cpu)
445 {
446 int retval = 0;
447 struct ust_trace *trace;
448 struct ust_channel *channel;
449 struct ust_buffer *buf;
450
451 DBG("get_buffer_fd");
452
453 ltt_lock_traces();
454 trace = _ltt_trace_find(trace_name);
455
456 if (!trace) {
457 retval = -ENODATA;
458 DBG("Cannot find trace. It was likely destroyed by the user.");
459 goto unlock_traces;
460 }
461
462 channel = find_channel(ch_name, trace);
463 if (!channel) {
464 retval = -ENODATA;
465 ERR("unable to find channel");
466 goto unlock_traces;
467 }
468
469 buf = channel->buf[ch_cpu];
470
471 /* Being here is the proof the daemon has mapped the buffer in its
472 * memory. We may now decrement buffers_to_export.
473 */
474 if (uatomic_read(&buf->consumed) == 0) {
475 DBG("decrementing buffers_to_export");
476 STORE_SHARED(buffers_to_export, LOAD_SHARED(buffers_to_export)-1);
477 }
478
479 /* The buffer has been exported, ergo, we can add it to the
480 * list of open buffers
481 */
482 list_add(&buf->open_buffers_list, &open_buffers_list);
483
484 unlock_traces:
485 ltt_unlock_traces();
486
487 return retval;
488 }
489
490 static int put_subbuffer(const char *trace_name, const char *ch_name,
491 int ch_cpu, long consumed_old)
492 {
493 int retval = 0;
494 struct ust_trace *trace;
495 struct ust_channel *channel;
496 struct ust_buffer *buf;
497
498 DBG("put_subbuf");
499
500 ltt_lock_traces();
501 trace = _ltt_trace_find(trace_name);
502
503 if (!trace) {
504 retval = -ENODATA;
505 DBG("Cannot find trace. It was likely destroyed by the user.");
506 goto unlock_traces;
507 }
508
509 channel = find_channel(ch_name, trace);
510 if (!channel) {
511 retval = -ENODATA;
512 ERR("unable to find channel");
513 goto unlock_traces;
514 }
515
516 buf = channel->buf[ch_cpu];
517
518 retval = ust_buffers_put_subbuf(buf, consumed_old);
519 if (retval < 0) {
520 WARN("ust_buffers_put_subbuf: error (subbuf=%s_%d)",
521 ch_name, ch_cpu);
522 } else {
523 DBG("ust_buffers_put_subbuf: success (subbuf=%s_%d)",
524 ch_name, ch_cpu);
525 }
526
527 unlock_traces:
528 ltt_unlock_traces();
529
530 return retval;
531 }
532
533 static void listener_cleanup(void *ptr)
534 {
535 ustcomm_del_named_sock(listen_sock, 0);
536 }
537
538 static void force_subbuf_switch()
539 {
540 struct ust_buffer *buf;
541
542 list_for_each_entry(buf, &open_buffers_list,
543 open_buffers_list) {
544 ltt_force_switch(buf, FORCE_FLUSH);
545 }
546 }
547
548 /* Simple commands are those which need only respond with a return value. */
549 static int process_simple_client_cmd(int command, char *recv_buf)
550 {
551 switch(command) {
552 case SET_SOCK_PATH:
553 {
554 struct ustcomm_sock_path *sock_msg;
555 sock_msg = (struct ustcomm_sock_path *)recv_buf;
556 sock_msg->sock_path =
557 ustcomm_restore_ptr(sock_msg->sock_path,
558 sock_msg->data,
559 sizeof(sock_msg->data));
560 if (!sock_msg->sock_path) {
561
562 return -EINVAL;
563 }
564 return setenv("UST_DAEMON_SOCKET", sock_msg->sock_path, 1);
565 }
566
567 case FORCE_SUBBUF_SWITCH:
568 /* FIXME: return codes? */
569 force_subbuf_switch();
570
571 break;
572
573 default:
574 return -EINVAL;
575 }
576
577 return 0;
578 }
579
580
581 static int process_trace_cmd(int command, char *trace_name)
582 {
583 int result;
584 char trace_type[] = "ustrelay";
585
586 switch(command) {
587 case START:
588 /* start is an operation that setups the trace, allocates it and starts it */
589 result = ltt_trace_setup(trace_name);
590 if (result < 0) {
591 ERR("ltt_trace_setup failed");
592 return result;
593 }
594
595 result = ltt_trace_set_type(trace_name, trace_type);
596 if (result < 0) {
597 ERR("ltt_trace_set_type failed");
598 return result;
599 }
600
601 result = ltt_trace_alloc(trace_name);
602 if (result < 0) {
603 ERR("ltt_trace_alloc failed");
604 return result;
605 }
606
607 inform_consumer_daemon(trace_name);
608
609 result = ltt_trace_start(trace_name);
610 if (result < 0) {
611 ERR("ltt_trace_start failed");
612 return result;
613 }
614
615 return 0;
616 case SETUP_TRACE:
617 DBG("trace setup");
618
619 result = ltt_trace_setup(trace_name);
620 if (result < 0) {
621 ERR("ltt_trace_setup failed");
622 return result;
623 }
624
625 result = ltt_trace_set_type(trace_name, trace_type);
626 if (result < 0) {
627 ERR("ltt_trace_set_type failed");
628 return result;
629 }
630
631 return 0;
632 case ALLOC_TRACE:
633 DBG("trace alloc");
634
635 result = ltt_trace_alloc(trace_name);
636 if (result < 0) {
637 ERR("ltt_trace_alloc failed");
638 return result;
639 }
640 inform_consumer_daemon(trace_name);
641
642 return 0;
643
644 case CREATE_TRACE:
645 DBG("trace create");
646
647 result = ltt_trace_setup(trace_name);
648 if (result < 0) {
649 ERR("ltt_trace_setup failed");
650 return result;
651 }
652
653 result = ltt_trace_set_type(trace_name, trace_type);
654 if (result < 0) {
655 ERR("ltt_trace_set_type failed");
656 return result;
657 }
658
659 return 0;
660 case START_TRACE:
661 DBG("trace start");
662
663 result = ltt_trace_alloc(trace_name);
664 if (result < 0) {
665 ERR("ltt_trace_alloc failed");
666 return result;
667 }
668 if (!result) {
669 inform_consumer_daemon(trace_name);
670 }
671
672 result = ltt_trace_start(trace_name);
673 if (result < 0) {
674 ERR("ltt_trace_start failed");
675 return result;
676 }
677
678 return 0;
679 case STOP_TRACE:
680 DBG("trace stop");
681
682 result = ltt_trace_stop(trace_name);
683 if (result < 0) {
684 ERR("ltt_trace_stop failed");
685 return result;
686 }
687
688 return 0;
689 case DESTROY_TRACE:
690 DBG("trace destroy");
691
692 result = ltt_trace_destroy(trace_name, 0);
693 if (result < 0) {
694 ERR("ltt_trace_destroy failed");
695 return result;
696 }
697 return 0;
698 }
699
700 return 0;
701 }
702
703
704 static void process_channel_cmd(int sock, int command,
705 struct ustcomm_channel_info *ch_inf)
706 {
707 struct ustcomm_header _reply_header;
708 struct ustcomm_header *reply_header = &_reply_header;
709 struct ustcomm_channel_info *reply_msg =
710 (struct ustcomm_channel_info *)send_buffer;
711 int result, offset = 0, num, size;
712
713 memset(reply_header, 0, sizeof(*reply_header));
714
715 switch (command) {
716 case GET_SUBBUF_NUM_SIZE:
717 result = get_subbuf_num_size(ch_inf->trace,
718 ch_inf->channel,
719 &num, &size);
720 if (result < 0) {
721 reply_header->result = result;
722 break;
723 }
724
725 reply_msg->channel = USTCOMM_POISON_PTR;
726 reply_msg->subbuf_num = num;
727 reply_msg->subbuf_size = size;
728
729
730 reply_header->size = COMPUTE_MSG_SIZE(reply_msg, offset);
731
732 break;
733 case SET_SUBBUF_NUM:
734 reply_header->result = set_subbuf_num(ch_inf->trace,
735 ch_inf->channel,
736 ch_inf->subbuf_num);
737
738 break;
739 case SET_SUBBUF_SIZE:
740 reply_header->result = set_subbuf_size(ch_inf->trace,
741 ch_inf->channel,
742 ch_inf->subbuf_size);
743
744
745 break;
746 }
747 if (ustcomm_send(sock, reply_header, (char *)reply_msg) < 0) {
748 ERR("ustcomm_send failed");
749 }
750 }
751
752 static void process_buffer_cmd(int sock, int command,
753 struct ustcomm_buffer_info *buf_inf)
754 {
755 struct ustcomm_header _reply_header;
756 struct ustcomm_header *reply_header = &_reply_header;
757 struct ustcomm_buffer_info *reply_msg =
758 (struct ustcomm_buffer_info *)send_buffer;
759 int result, offset = 0, buf_shmid, buf_struct_shmid, buf_pipe_fd;
760 long consumed_old;
761
762 memset(reply_header, 0, sizeof(*reply_header));
763
764 switch (command) {
765 case GET_BUF_SHMID_PIPE_FD:
766 result = get_buffer_shmid_pipe_fd(buf_inf->trace,
767 buf_inf->channel,
768 buf_inf->ch_cpu,
769 &buf_shmid,
770 &buf_struct_shmid,
771 &buf_pipe_fd);
772 if (result < 0) {
773 reply_header->result = result;
774 break;
775 }
776
777 reply_msg->channel = USTCOMM_POISON_PTR;
778 reply_msg->buf_shmid = buf_shmid;
779 reply_msg->buf_struct_shmid = buf_struct_shmid;
780
781 reply_header->size = COMPUTE_MSG_SIZE(reply_msg, offset);
782 reply_header->fd_included = 1;
783
784 if (ustcomm_send_fd(sock, reply_header, (char *)reply_msg,
785 &buf_pipe_fd) < 0) {
786 ERR("ustcomm_send failed");
787 }
788 return;
789
790 case NOTIFY_BUF_MAPPED:
791 reply_header->result =
792 notify_buffer_mapped(buf_inf->trace,
793 buf_inf->channel,
794 buf_inf->ch_cpu);
795 break;
796 case GET_SUBBUFFER:
797 result = get_subbuffer(buf_inf->trace, buf_inf->channel,
798 buf_inf->ch_cpu, &consumed_old);
799 if (result < 0) {
800 reply_header->result = result;
801 break;
802 }
803
804 reply_msg->channel = USTCOMM_POISON_PTR;
805 reply_msg->consumed_old = consumed_old;
806
807 reply_header->size = COMPUTE_MSG_SIZE(reply_msg, offset);
808
809 break;
810 case PUT_SUBBUFFER:
811 result = put_subbuffer(buf_inf->trace, buf_inf->channel,
812 buf_inf->ch_cpu,
813 buf_inf->consumed_old);
814 reply_header->result = result;
815
816 break;
817 }
818
819 if (ustcomm_send(sock, reply_header, (char *)reply_msg) < 0) {
820 ERR("ustcomm_send failed");
821 }
822
823 }
824
825 static void process_marker_cmd(int sock, int command,
826 struct ustcomm_marker_info *marker_inf)
827 {
828 struct ustcomm_header _reply_header;
829 struct ustcomm_header *reply_header = &_reply_header;
830 int result;
831
832 memset(reply_header, 0, sizeof(*reply_header));
833
834 switch(command) {
835 case ENABLE_MARKER:
836
837 result = ltt_marker_connect(marker_inf->channel,
838 marker_inf->marker,
839 "default");
840 if (result < 0) {
841 WARN("could not enable marker; channel=%s,"
842 " name=%s",
843 marker_inf->channel,
844 marker_inf->marker);
845
846 }
847 break;
848 case DISABLE_MARKER:
849 result = ltt_marker_disconnect(marker_inf->channel,
850 marker_inf->marker,
851 "default");
852 if (result < 0) {
853 WARN("could not disable marker; channel=%s,"
854 " name=%s",
855 marker_inf->channel,
856 marker_inf->marker);
857 }
858 break;
859 }
860
861 reply_header->result = result;
862
863 if (ustcomm_send(sock, reply_header, NULL) < 0) {
864 ERR("ustcomm_send failed");
865 }
866
867 }
868 static void process_client_cmd(struct ustcomm_header *recv_header,
869 char *recv_buf, int sock)
870 {
871 int result;
872 struct ustcomm_header _reply_header;
873 struct ustcomm_header *reply_header = &_reply_header;
874 char *send_buf = send_buffer;
875
876 memset(reply_header, 0, sizeof(*reply_header));
877 memset(send_buf, 0, sizeof(send_buffer));
878
879 switch(recv_header->command) {
880 case GET_SUBBUF_NUM_SIZE:
881 case SET_SUBBUF_NUM:
882 case SET_SUBBUF_SIZE:
883 {
884 struct ustcomm_channel_info *ch_inf;
885 ch_inf = (struct ustcomm_channel_info *)recv_buf;
886 result = ustcomm_unpack_channel_info(ch_inf);
887 if (result < 0) {
888 ERR("couldn't unpack channel info");
889 reply_header->result = -EINVAL;
890 goto send_response;
891 }
892 process_channel_cmd(sock, recv_header->command, ch_inf);
893 return;
894 }
895 case GET_BUF_SHMID_PIPE_FD:
896 case NOTIFY_BUF_MAPPED:
897 case GET_SUBBUFFER:
898 case PUT_SUBBUFFER:
899 {
900 struct ustcomm_buffer_info *buf_inf;
901 buf_inf = (struct ustcomm_buffer_info *)recv_buf;
902 result = ustcomm_unpack_buffer_info(buf_inf);
903 if (result < 0) {
904 ERR("couldn't unpack buffer info");
905 reply_header->result = -EINVAL;
906 goto send_response;
907 }
908 process_buffer_cmd(sock, recv_header->command, buf_inf);
909 return;
910 }
911 case ENABLE_MARKER:
912 case DISABLE_MARKER:
913 {
914 struct ustcomm_marker_info *marker_inf;
915 marker_inf = (struct ustcomm_marker_info *)recv_buf;
916 result = ustcomm_unpack_marker_info(marker_inf);
917 if (result < 0) {
918 ERR("couldn't unpack marker info");
919 reply_header->result = -EINVAL;
920 goto send_response;
921 }
922 process_marker_cmd(sock, recv_header->command, marker_inf);
923 return;
924 }
925 case LIST_MARKERS:
926 {
927 char *ptr;
928 size_t size;
929 FILE *fp;
930
931 fp = open_memstream(&ptr, &size);
932 if (fp == NULL) {
933 ERR("opening memstream failed");
934 return;
935 }
936 print_markers(fp);
937 fclose(fp);
938
939 reply_header->size = size;
940
941 result = ustcomm_send(sock, reply_header, ptr);
942
943 free(ptr);
944
945 if (result < 0) {
946 PERROR("failed to send markers list");
947 }
948
949 break;
950 }
951 case LIST_TRACE_EVENTS:
952 {
953 char *ptr;
954 size_t size;
955 FILE *fp;
956
957 fp = open_memstream(&ptr, &size);
958 if (fp == NULL) {
959 ERR("opening memstream failed");
960 return;
961 }
962 print_trace_events(fp);
963 fclose(fp);
964
965 reply_header->size = size;
966
967 result = ustcomm_send(sock, reply_header, ptr);
968
969 free(ptr);
970
971 if (result < 0) {
972 ERR("list_trace_events failed");
973 return;
974 }
975
976 break;
977 }
978 case LOAD_PROBE_LIB:
979 {
980 char *libfile;
981
982 /* FIXME: No functionality at all... */
983 libfile = recv_buf;
984
985 DBG("load_probe_lib loading %s", libfile);
986
987 break;
988 }
989 case GET_PIDUNIQUE:
990 {
991 struct ustcomm_pidunique *pid_msg;
992 pid_msg = (struct ustcomm_pidunique *)send_buf;
993
994 pid_msg->pidunique = pidunique;
995 reply_header->size = sizeof(pid_msg);
996
997 goto send_response;
998
999 }
1000 case GET_SOCK_PATH:
1001 {
1002 struct ustcomm_sock_path *sock_msg;
1003 char *sock_path_env;
1004
1005 sock_msg = (struct ustcomm_sock_path *)send_buf;
1006
1007 sock_path_env = getenv("UST_DAEMON_SOCKET");
1008
1009 if (!sock_path_env) {
1010 result = ustcomm_pack_sock_path(reply_header,
1011 sock_msg,
1012 SOCK_DIR "/ustd");
1013
1014 } else {
1015 result = ustcomm_pack_sock_path(reply_header,
1016 sock_msg,
1017 sock_path_env);
1018 }
1019 reply_header->result = result;
1020
1021 goto send_response;
1022 }
1023 case START:
1024 case SETUP_TRACE:
1025 case ALLOC_TRACE:
1026 case CREATE_TRACE:
1027 case START_TRACE:
1028 case STOP_TRACE:
1029 case DESTROY_TRACE:
1030 {
1031 struct ustcomm_trace_info *trace_inf =
1032 (struct ustcomm_trace_info *)recv_buf;
1033
1034 result = ustcomm_unpack_trace_info(trace_inf);
1035 if (result < 0) {
1036 ERR("couldn't unpack trace info");
1037 reply_header->result = -EINVAL;
1038 goto send_response;
1039 }
1040
1041 reply_header->result =
1042 process_trace_cmd(recv_header->command,
1043 trace_inf->trace);
1044 goto send_response;
1045
1046 }
1047 default:
1048 reply_header->result =
1049 process_simple_client_cmd(recv_header->command,
1050 recv_buf);
1051 goto send_response;
1052
1053 }
1054
1055 return;
1056
1057 send_response:
1058 ustcomm_send(sock, reply_header, send_buf);
1059 }
1060
1061 #define MAX_EVENTS 10
1062
1063 void *listener_main(void *p)
1064 {
1065 struct ustcomm_sock *epoll_sock;
1066 struct epoll_event events[MAX_EVENTS];
1067 struct sockaddr addr;
1068 int accept_fd, nfds, result, i, addr_size;
1069
1070 DBG("LISTENER");
1071
1072 pthread_cleanup_push(listener_cleanup, NULL);
1073
1074 for(;;) {
1075 nfds = epoll_wait(epoll_fd, events, MAX_EVENTS, -1);
1076 if (nfds == -1) {
1077 PERROR("listener_main: epoll_wait failed");
1078 continue;
1079 }
1080
1081 for (i = 0; i < nfds; i++) {
1082 epoll_sock = (struct ustcomm_sock *)events[i].data.ptr;
1083 if (epoll_sock == listen_sock) {
1084 addr_size = sizeof(struct sockaddr);
1085 accept_fd = accept(epoll_sock->fd,
1086 &addr,
1087 (socklen_t *)&addr_size);
1088 if (accept_fd == -1) {
1089 PERROR("listener_main: accept failed");
1090 continue;
1091 }
1092 ustcomm_init_sock(accept_fd, epoll_fd,
1093 &ust_socks);
1094 } else {
1095 memset(receive_header, 0,
1096 sizeof(*receive_header));
1097 memset(receive_buffer, 0,
1098 sizeof(receive_buffer));
1099 result = ustcomm_recv(epoll_sock->fd,
1100 receive_header,
1101 receive_buffer);
1102 if (result == 0) {
1103 ustcomm_del_sock(epoll_sock, 0);
1104 } else {
1105 process_client_cmd(receive_header,
1106 receive_buffer,
1107 epoll_sock->fd);
1108 }
1109 }
1110 }
1111 }
1112
1113 pthread_cleanup_pop(1);
1114 }
1115
1116 /* These should only be accessed in the parent thread,
1117 * not the listener.
1118 */
1119 static volatile sig_atomic_t have_listener = 0;
1120 static pthread_t listener_thread;
1121
1122 void create_listener(void)
1123 {
1124 int result;
1125 sigset_t sig_all_blocked;
1126 sigset_t orig_parent_mask;
1127
1128 if (have_listener) {
1129 WARN("not creating listener because we already had one");
1130 return;
1131 }
1132
1133 /* A new thread created by pthread_create inherits the signal mask
1134 * from the parent. To avoid any signal being received by the
1135 * listener thread, we block all signals temporarily in the parent,
1136 * while we create the listener thread.
1137 */
1138
1139 sigfillset(&sig_all_blocked);
1140
1141 result = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_parent_mask);
1142 if (result) {
1143 PERROR("pthread_sigmask: %s", strerror(result));
1144 }
1145
1146 result = pthread_create(&listener_thread, NULL, listener_main, NULL);
1147 if (result == -1) {
1148 PERROR("pthread_create");
1149 }
1150
1151 /* Restore original signal mask in parent */
1152 result = pthread_sigmask(SIG_SETMASK, &orig_parent_mask, NULL);
1153 if (result) {
1154 PERROR("pthread_sigmask: %s", strerror(result));
1155 } else {
1156 have_listener = 1;
1157 }
1158 }
1159
1160 #define AUTOPROBE_DISABLED 0
1161 #define AUTOPROBE_ENABLE_ALL 1
1162 #define AUTOPROBE_ENABLE_REGEX 2
1163 static int autoprobe_method = AUTOPROBE_DISABLED;
1164 static regex_t autoprobe_regex;
1165
1166 static void auto_probe_connect(struct marker *m)
1167 {
1168 int result;
1169
1170 char* concat_name = NULL;
1171 const char *probe_name = "default";
1172
1173 if (autoprobe_method == AUTOPROBE_DISABLED) {
1174 return;
1175 } else if (autoprobe_method == AUTOPROBE_ENABLE_REGEX) {
1176 result = asprintf(&concat_name, "%s/%s", m->channel, m->name);
1177 if (result == -1) {
1178 ERR("auto_probe_connect: asprintf failed (marker %s/%s)",
1179 m->channel, m->name);
1180 return;
1181 }
1182 if (regexec(&autoprobe_regex, concat_name, 0, NULL, 0)) {
1183 free(concat_name);
1184 return;
1185 }
1186 free(concat_name);
1187 }
1188
1189 result = ltt_marker_connect(m->channel, m->name, probe_name);
1190 if (result && result != -EEXIST)
1191 ERR("ltt_marker_connect (marker = %s/%s, errno = %d)", m->channel, m->name, -result);
1192
1193 DBG("auto connected marker %s (addr: %p) %s to probe default", m->channel, m, m->name);
1194
1195 }
1196
1197 static struct ustcomm_sock * init_app_socket(int epoll_fd)
1198 {
1199 char *name;
1200 int result;
1201 struct ustcomm_sock *sock;
1202
1203 result = asprintf(&name, "%s/%d", SOCK_DIR, (int)getpid());
1204 if (result < 0) {
1205 ERR("string overflow allocating socket name, "
1206 "UST thread bailing");
1207 return NULL;
1208 }
1209
1210 result = ensure_dir_exists(SOCK_DIR);
1211 if (result == -1) {
1212 ERR("Unable to create socket directory %s, UST thread bailing",
1213 SOCK_DIR);
1214 goto free_name;
1215 }
1216
1217 sock = ustcomm_init_named_socket(name, epoll_fd);
1218 if (!sock) {
1219 ERR("Error initializing named socket (%s). Check that directory"
1220 "exists and that it is writable. UST thread bailing", name);
1221 goto free_name;
1222 }
1223
1224 free(name);
1225 return sock;
1226
1227 free_name:
1228 free(name);
1229 return NULL;
1230 }
1231
1232 static void __attribute__((constructor)) init()
1233 {
1234 int result;
1235 char* autoprobe_val = NULL;
1236 char* subbuffer_size_val = NULL;
1237 char* subbuffer_count_val = NULL;
1238 unsigned int subbuffer_size;
1239 unsigned int subbuffer_count;
1240 unsigned int power;
1241
1242 /* Assign the pidunique, to be able to differentiate the processes with same
1243 * pid, (before and after an exec).
1244 */
1245 pidunique = make_pidunique();
1246 processpid = getpid();
1247
1248 DBG("Tracectl constructor");
1249
1250 /* Set up epoll */
1251 epoll_fd = epoll_create(MAX_EVENTS);
1252 if (epoll_fd == -1) {
1253 ERR("epoll_create failed, tracing shutting down");
1254 return;
1255 }
1256
1257 /* Create the socket */
1258 listen_sock = init_app_socket(epoll_fd);
1259 if (!listen_sock) {
1260 ERR("failed to create application socket,"
1261 " tracing shutting down");
1262 return;
1263 }
1264
1265 create_listener();
1266
1267 autoprobe_val = getenv("UST_AUTOPROBE");
1268 if (autoprobe_val) {
1269 struct marker_iter iter;
1270
1271 DBG("Autoprobe enabled.");
1272
1273 /* Ensure markers are initialized */
1274 //init_markers();
1275
1276 /* Ensure marker control is initialized, for the probe */
1277 init_marker_control();
1278
1279 /* first, set the callback that will connect the
1280 * probe on new markers
1281 */
1282 if (autoprobe_val[0] == '/') {
1283 result = regcomp(&autoprobe_regex, autoprobe_val+1, 0);
1284 if (result) {
1285 char regexerr[150];
1286
1287 regerror(result, &autoprobe_regex, regexerr, sizeof(regexerr));
1288 ERR("cannot parse regex %s (%s), will ignore UST_AUTOPROBE", autoprobe_val, regexerr);
1289 /* don't crash the application just for this */
1290 } else {
1291 autoprobe_method = AUTOPROBE_ENABLE_REGEX;
1292 }
1293 } else {
1294 /* just enable all instrumentation */
1295 autoprobe_method = AUTOPROBE_ENABLE_ALL;
1296 }
1297
1298 marker_set_new_marker_cb(auto_probe_connect);
1299
1300 /* Now, connect the probes that were already registered. */
1301 marker_iter_reset(&iter);
1302 marker_iter_start(&iter);
1303
1304 DBG("now iterating on markers already registered");
1305 while (iter.marker) {
1306 DBG("now iterating on marker %s", iter.marker->name);
1307 auto_probe_connect(iter.marker);
1308 marker_iter_next(&iter);
1309 }
1310 }
1311
1312 if (getenv("UST_OVERWRITE")) {
1313 int val = atoi(getenv("UST_OVERWRITE"));
1314 if (val == 0 || val == 1) {
1315 STORE_SHARED(ust_channels_overwrite_by_default, val);
1316 } else {
1317 WARN("invalid value for UST_OVERWRITE");
1318 }
1319 }
1320
1321 if (getenv("UST_AUTOCOLLECT")) {
1322 int val = atoi(getenv("UST_AUTOCOLLECT"));
1323 if (val == 0 || val == 1) {
1324 STORE_SHARED(ust_channels_request_collection_by_default, val);
1325 } else {
1326 WARN("invalid value for UST_AUTOCOLLECT");
1327 }
1328 }
1329
1330 subbuffer_size_val = getenv("UST_SUBBUF_SIZE");
1331 if (subbuffer_size_val) {
1332 sscanf(subbuffer_size_val, "%u", &subbuffer_size);
1333 power = pow2_higher_or_eq(subbuffer_size);
1334 if (power != subbuffer_size)
1335 WARN("using the next power of two for buffer size = %u\n", power);
1336 chan_infos[LTT_CHANNEL_UST].def_subbufsize = power;
1337 }
1338
1339 subbuffer_count_val = getenv("UST_SUBBUF_NUM");
1340 if (subbuffer_count_val) {
1341 sscanf(subbuffer_count_val, "%u", &subbuffer_count);
1342 if (subbuffer_count < 2)
1343 subbuffer_count = 2;
1344 chan_infos[LTT_CHANNEL_UST].def_subbufcount = subbuffer_count;
1345 }
1346
1347 if (getenv("UST_TRACE")) {
1348 char trace_name[] = "auto";
1349 char trace_type[] = "ustrelay";
1350
1351 DBG("starting early tracing");
1352
1353 /* Ensure marker control is initialized */
1354 init_marker_control();
1355
1356 /* Ensure markers are initialized */
1357 init_markers();
1358
1359 /* Ensure buffers are initialized, for the transport to be available.
1360 * We are about to set a trace type and it will fail without this.
1361 */
1362 init_ustrelay_transport();
1363
1364 /* FIXME: When starting early tracing (here), depending on the
1365 * order of constructors, it is very well possible some marker
1366 * sections are not yet registered. Because of this, some
1367 * channels may not be registered. Yet, we are about to ask the
1368 * daemon to collect the channels. Channels which are not yet
1369 * registered will not be collected.
1370 *
1371 * Currently, in LTTng, there is no way to add a channel after
1372 * trace start. The reason for this is that it induces complex
1373 * concurrency issues on the trace structures, which can only
1374 * be resolved using RCU. This has not been done yet. As a
1375 * workaround, we are forcing the registration of the "ust"
1376 * channel here. This is the only channel (apart from metadata)
1377 * that can be reliably used in early tracing.
1378 *
1379 * Non-early tracing does not have this problem and can use
1380 * arbitrary channel names.
1381 */
1382 ltt_channels_register("ust");
1383
1384 result = ltt_trace_setup(trace_name);
1385 if (result < 0) {
1386 ERR("ltt_trace_setup failed");
1387 return;
1388 }
1389
1390 result = ltt_trace_set_type(trace_name, trace_type);
1391 if (result < 0) {
1392 ERR("ltt_trace_set_type failed");
1393 return;
1394 }
1395
1396 result = ltt_trace_alloc(trace_name);
1397 if (result < 0) {
1398 ERR("ltt_trace_alloc failed");
1399 return;
1400 }
1401
1402 result = ltt_trace_start(trace_name);
1403 if (result < 0) {
1404 ERR("ltt_trace_start failed");
1405 return;
1406 }
1407
1408 /* Do this after the trace is started in order to avoid creating confusion
1409 * if the trace fails to start. */
1410 inform_consumer_daemon(trace_name);
1411 }
1412
1413 return;
1414
1415 /* should decrementally destroy stuff if error */
1416
1417 }
1418
1419 /* This is only called if we terminate normally, not with an unhandled signal,
1420 * so we cannot rely on it. However, for now, LTTV requires that the header of
1421 * the last sub-buffer contain a valid end time for the trace. This is done
1422 * automatically only when the trace is properly stopped.
1423 *
1424 * If the traced program crashed, it is always possible to manually add the
1425 * right value in the header, or to open the trace in text mode.
1426 *
1427 * FIXME: Fix LTTV so it doesn't need this.
1428 */
1429
1430 static void destroy_traces(void)
1431 {
1432 int result;
1433
1434 /* if trace running, finish it */
1435
1436 DBG("destructor stopping traces");
1437
1438 result = ltt_trace_stop("auto");
1439 if (result == -1) {
1440 ERR("ltt_trace_stop error");
1441 }
1442
1443 result = ltt_trace_destroy("auto", 0);
1444 if (result == -1) {
1445 ERR("ltt_trace_destroy error");
1446 }
1447 }
1448
1449 static int trace_recording(void)
1450 {
1451 int retval = 0;
1452 struct ust_trace *trace;
1453
1454 ltt_lock_traces();
1455
1456 list_for_each_entry(trace, &ltt_traces.head, list) {
1457 if (trace->active) {
1458 retval = 1;
1459 break;
1460 }
1461 }
1462
1463 ltt_unlock_traces();
1464
1465 return retval;
1466 }
1467
1468 int restarting_usleep(useconds_t usecs)
1469 {
1470 struct timespec tv;
1471 int result;
1472
1473 tv.tv_sec = 0;
1474 tv.tv_nsec = usecs * 1000;
1475
1476 do {
1477 result = nanosleep(&tv, &tv);
1478 } while (result == -1 && errno == EINTR);
1479
1480 return result;
1481 }
1482
1483 static void stop_listener(void)
1484 {
1485 int result;
1486
1487 if (!have_listener)
1488 return;
1489
1490 result = pthread_cancel(listener_thread);
1491 if (result != 0) {
1492 ERR("pthread_cancel: %s", strerror(result));
1493 }
1494 result = pthread_join(listener_thread, NULL);
1495 if (result != 0) {
1496 ERR("pthread_join: %s", strerror(result));
1497 }
1498 }
1499
1500 /* This destructor keeps the process alive for a few seconds in order
1501 * to leave time to ustd to connect to its buffers. This is necessary
1502 * for programs whose execution is very short. It is also useful in all
1503 * programs when tracing is started close to the end of the program
1504 * execution.
1505 *
1506 * FIXME: For now, this only works for the first trace created in a
1507 * process.
1508 */
1509
1510 static void __attribute__((destructor)) keepalive()
1511 {
1512 if (processpid != getpid()) {
1513 return;
1514 }
1515
1516 if (trace_recording() && LOAD_SHARED(buffers_to_export)) {
1517 int total = 0;
1518 DBG("Keeping process alive for consumer daemon...");
1519 while (LOAD_SHARED(buffers_to_export)) {
1520 const int interv = 200000;
1521 restarting_usleep(interv);
1522 total += interv;
1523
1524 if (total >= 3000000) {
1525 WARN("non-consumed buffers remaining after wait limit; not waiting anymore");
1526 break;
1527 }
1528 }
1529 DBG("Finally dying...");
1530 }
1531
1532 destroy_traces();
1533
1534 /* Ask the listener to stop and clean up. */
1535 stop_listener();
1536 }
1537
1538 void ust_potential_exec(void)
1539 {
1540 trace_mark(ust, potential_exec, MARK_NOARGS);
1541
1542 DBG("test");
1543
1544 keepalive();
1545 }
1546
1547 /* Notify ust that there was a fork. This needs to be called inside
1548 * the new process, anytime a process whose memory is not shared with
1549 * the parent is created. If this function is not called, the events
1550 * of the new process will not be collected.
1551 *
1552 * Signals should be disabled before the fork and reenabled only after
1553 * this call in order to guarantee tracing is not started before ust_fork()
1554 * sanitizes the new process.
1555 */
1556
1557 static void ust_fork(void)
1558 {
1559 struct ust_buffer *buf, *buf_tmp;
1560 struct ustcomm_sock *sock, *sock_tmp;
1561 int result;
1562
1563 /* FIXME: technically, the locks could have been taken before the fork */
1564 DBG("ust: forking");
1565
1566 /* Get the pid of the new process */
1567 processpid = getpid();
1568
1569 /* break lock if necessary */
1570 ltt_unlock_traces();
1571
1572 ltt_trace_stop("auto");
1573 ltt_trace_destroy("auto", 1);
1574 /* Delete all active connections, but leave them in the epoll set */
1575 list_for_each_entry_safe(sock, sock_tmp, &ust_socks, list) {
1576 ustcomm_del_sock(sock, 1);
1577 }
1578
1579 /* Delete all blocked consumers */
1580 list_for_each_entry_safe(buf, buf_tmp, &open_buffers_list,
1581 open_buffers_list) {
1582 result = close(buf->data_ready_fd_read);
1583 if (result == -1) {
1584 PERROR("close");
1585 }
1586 result = close(buf->data_ready_fd_write);
1587 if (result == -1) {
1588 PERROR("close");
1589 }
1590 list_del(&buf->open_buffers_list);
1591 }
1592
1593 /* Clean up the listener socket and epoll, keeping the scoket file */
1594 ustcomm_del_named_sock(listen_sock, 1);
1595 close(epoll_fd);
1596
1597 /* Re-start the launch sequence */
1598 STORE_SHARED(buffers_to_export, 0);
1599 have_listener = 0;
1600
1601 /* Set up epoll */
1602 epoll_fd = epoll_create(MAX_EVENTS);
1603 if (epoll_fd == -1) {
1604 ERR("epoll_create failed, tracing shutting down");
1605 return;
1606 }
1607
1608 /* Create the socket */
1609 listen_sock = init_app_socket(epoll_fd);
1610 if (!listen_sock) {
1611 ERR("failed to create application socket,"
1612 " tracing shutting down");
1613 return;
1614 }
1615 create_listener();
1616 ltt_trace_setup("auto");
1617 result = ltt_trace_set_type("auto", "ustrelay");
1618 if (result < 0) {
1619 ERR("ltt_trace_set_type failed");
1620 return;
1621 }
1622
1623 ltt_trace_alloc("auto");
1624 ltt_trace_start("auto");
1625 inform_consumer_daemon("auto");
1626 }
1627
1628 void ust_before_fork(ust_fork_info_t *fork_info)
1629 {
1630 /* Disable signals. This is to avoid that the child
1631 * intervenes before it is properly setup for tracing. It is
1632 * safer to disable all signals, because then we know we are not
1633 * breaking anything by restoring the original mask.
1634 */
1635 sigset_t all_sigs;
1636 int result;
1637
1638 /* FIXME:
1639 - only do this if tracing is active
1640 */
1641
1642 /* Disable signals */
1643 sigfillset(&all_sigs);
1644 result = sigprocmask(SIG_BLOCK, &all_sigs, &fork_info->orig_sigs);
1645 if (result == -1) {
1646 PERROR("sigprocmask");
1647 return;
1648 }
1649 }
1650
1651 /* Don't call this function directly in a traced program */
1652 static void ust_after_fork_common(ust_fork_info_t *fork_info)
1653 {
1654 int result;
1655
1656 /* Restore signals */
1657 result = sigprocmask(SIG_SETMASK, &fork_info->orig_sigs, NULL);
1658 if (result == -1) {
1659 PERROR("sigprocmask");
1660 return;
1661 }
1662 }
1663
1664 void ust_after_fork_parent(ust_fork_info_t *fork_info)
1665 {
1666 /* Reenable signals */
1667 ust_after_fork_common(fork_info);
1668 }
1669
1670 void ust_after_fork_child(ust_fork_info_t *fork_info)
1671 {
1672 /* First sanitize the child */
1673 ust_fork();
1674
1675 /* Then reenable interrupts */
1676 ust_after_fork_common(fork_info);
1677 }
1678
This page took 0.063604 seconds and 5 git commands to generate.