Generalize some ustcomm functionality
[ust.git] / libust / tracectl.c
1 /* Copyright (C) 2009 Pierre-Marc Fournier
2 *
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public
5 * License as published by the Free Software Foundation; either
6 * version 2.1 of the License, or (at your option) any later version.
7 *
8 * This library is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
12 *
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
16 */
17
18 /* This file contains the implementation of the UST listener thread, which
19 * receives trace control commands. It also coordinates the initialization of
20 * libust.
21 */
22
23 #define _GNU_SOURCE
24 #include <stdio.h>
25 #include <stdlib.h>
26 #include <stdint.h>
27 #include <pthread.h>
28 #include <signal.h>
29 #include <sys/epoll.h>
30 #include <sys/time.h>
31 #include <sys/types.h>
32 #include <sys/socket.h>
33 #include <fcntl.h>
34 #include <poll.h>
35 #include <regex.h>
36 #include <urcu/uatomic_arch.h>
37 #include <urcu/list.h>
38
39 #include <ust/marker.h>
40 #include <ust/tracepoint.h>
41 #include <ust/tracectl.h>
42 #include "tracer.h"
43 #include "usterr.h"
44 #include "ustcomm.h"
45 #include "buffers.h"
46 #include "marker-control.h"
47
48 /* This should only be accessed by the constructor, before the creation
49 * of the listener, and then only by the listener.
50 */
51 s64 pidunique = -1LL;
52
53 /* The process pid is used to detect a non-traceable fork
54 * and allow the non-traceable fork to be ignored
55 * by destructor sequences in libust
56 */
57 static pid_t processpid = 0;
58
59 static struct ustcomm_header _receive_header;
60 static struct ustcomm_header *receive_header = &_receive_header;
61 static char receive_buffer[USTCOMM_BUFFER_SIZE];
62 static char send_buffer[USTCOMM_BUFFER_SIZE];
63
64 static int epoll_fd;
65 static struct ustcomm_sock *listen_sock;
66
67 extern struct chan_info_struct chan_infos[];
68
69 static struct cds_list_head open_buffers_list = CDS_LIST_HEAD_INIT(open_buffers_list);
70
71 static struct cds_list_head ust_socks = CDS_LIST_HEAD_INIT(ust_socks);
72
73 /* volatile because shared between the listener and the main thread */
74 int buffers_to_export = 0;
75
76 static long long make_pidunique(void)
77 {
78 s64 retval;
79 struct timeval tv;
80
81 gettimeofday(&tv, NULL);
82
83 retval = tv.tv_sec;
84 retval <<= 32;
85 retval |= tv.tv_usec;
86
87 return retval;
88 }
89
90 static void print_markers(FILE *fp)
91 {
92 struct marker_iter iter;
93
94 lock_markers();
95 marker_iter_reset(&iter);
96 marker_iter_start(&iter);
97
98 while (iter.marker) {
99 fprintf(fp, "marker: %s/%s %d \"%s\" %p\n",
100 iter.marker->channel,
101 iter.marker->name,
102 (int)imv_read(iter.marker->state),
103 iter.marker->format,
104 iter.marker->location);
105 marker_iter_next(&iter);
106 }
107 unlock_markers();
108 }
109
110 static void print_trace_events(FILE *fp)
111 {
112 struct trace_event_iter iter;
113
114 lock_trace_events();
115 trace_event_iter_reset(&iter);
116 trace_event_iter_start(&iter);
117
118 while (iter.trace_event) {
119 fprintf(fp, "trace_event: %s\n", iter.trace_event->name);
120 trace_event_iter_next(&iter);
121 }
122 unlock_trace_events();
123 }
124
125 static int connect_ustd(void)
126 {
127 int result, fd;
128 char default_daemon_path[] = SOCK_DIR "/ustd";
129 char *explicit_daemon_path, *daemon_path;
130
131 explicit_daemon_path = getenv("UST_DAEMON_SOCKET");
132 if (explicit_daemon_path) {
133 daemon_path = explicit_daemon_path;
134 } else {
135 daemon_path = default_daemon_path;
136 }
137
138 DBG("Connecting to daemon_path %s", daemon_path);
139
140 result = ustcomm_connect_path(daemon_path, &fd);
141 if (result < 0) {
142 WARN("connect_ustd failed, daemon_path: %s",
143 daemon_path);
144 return result;
145 }
146
147 return fd;
148 }
149
150
151 static void request_buffer_consumer(int sock,
152 const char *trace,
153 const char *channel,
154 int cpu)
155 {
156 struct ustcomm_header send_header, recv_header;
157 struct ustcomm_buffer_info buf_inf;
158 int result = 0;
159
160 result = ustcomm_pack_buffer_info(&send_header,
161 &buf_inf,
162 trace,
163 channel,
164 cpu);
165
166 if (result < 0) {
167 ERR("failed to pack buffer info message %s_%d",
168 channel, cpu);
169 return;
170 }
171
172 buf_inf.pid = getpid();
173 send_header.command = CONSUME_BUFFER;
174
175 result = ustcomm_req(sock, &send_header, (char *) &buf_inf,
176 &recv_header, NULL);
177 if (result <= 0) {
178 PERROR("request for buffer consumer failed, is the daemon online?");
179 }
180
181 return;
182 }
183
184 /* Ask the daemon to collect a trace called trace_name and being
185 * produced by this pid.
186 *
187 * The trace must be at least allocated. (It can also be started.)
188 * This is because _ltt_trace_find is used.
189 */
190
191 static void inform_consumer_daemon(const char *trace_name)
192 {
193 int sock, i,j;
194 struct ust_trace *trace;
195 const char *ch_name;
196
197 sock = connect_ustd();
198 if (sock < 0) {
199 return;
200 }
201
202 DBG("Connected to ustd");
203
204 ltt_lock_traces();
205
206 trace = _ltt_trace_find(trace_name);
207 if (trace == NULL) {
208 WARN("inform_consumer_daemon: could not find trace \"%s\"; it is probably already destroyed", trace_name);
209 goto unlock_traces;
210 }
211
212 for (i=0; i < trace->nr_channels; i++) {
213 if (trace->channels[i].request_collection) {
214 /* iterate on all cpus */
215 for (j=0; j<trace->channels[i].n_cpus; j++) {
216 ch_name = trace->channels[i].channel_name;
217 request_buffer_consumer(sock, trace_name,
218 ch_name, j);
219 CMM_STORE_SHARED(buffers_to_export,
220 CMM_LOAD_SHARED(buffers_to_export)+1);
221 }
222 }
223 }
224
225 unlock_traces:
226 ltt_unlock_traces();
227
228 close(sock);
229 }
230
231 static struct ust_channel *find_channel(const char *ch_name,
232 struct ust_trace *trace)
233 {
234 int i;
235
236 for (i=0; i<trace->nr_channels; i++) {
237 if (!strcmp(trace->channels[i].channel_name, ch_name)) {
238 return &trace->channels[i];
239 }
240 }
241
242 return NULL;
243 }
244
245 static int get_buffer_shmid_pipe_fd(const char *trace_name, const char *ch_name,
246 int ch_cpu,
247 int *buf_shmid,
248 int *buf_struct_shmid,
249 int *buf_pipe_fd)
250 {
251 struct ust_trace *trace;
252 struct ust_channel *channel;
253 struct ust_buffer *buf;
254
255 DBG("get_buffer_shmid_pipe_fd");
256
257 ltt_lock_traces();
258 trace = _ltt_trace_find(trace_name);
259 ltt_unlock_traces();
260
261 if (trace == NULL) {
262 ERR("cannot find trace!");
263 return -ENODATA;
264 }
265
266 channel = find_channel(ch_name, trace);
267 if (!channel) {
268 ERR("cannot find channel %s!", ch_name);
269 return -ENODATA;
270 }
271
272 buf = channel->buf[ch_cpu];
273
274 *buf_shmid = buf->shmid;
275 *buf_struct_shmid = channel->buf_struct_shmids[ch_cpu];
276 *buf_pipe_fd = buf->data_ready_fd_read;
277
278 return 0;
279 }
280
281 static int get_subbuf_num_size(const char *trace_name, const char *ch_name,
282 int *num, int *size)
283 {
284 struct ust_trace *trace;
285 struct ust_channel *channel;
286
287 DBG("get_subbuf_size");
288
289 ltt_lock_traces();
290 trace = _ltt_trace_find(trace_name);
291 ltt_unlock_traces();
292
293 if (!trace) {
294 ERR("cannot find trace!");
295 return -ENODATA;
296 }
297
298 channel = find_channel(ch_name, trace);
299 if (!channel) {
300 ERR("unable to find channel");
301 return -ENODATA;
302 }
303
304 *num = channel->subbuf_cnt;
305 *size = channel->subbuf_size;
306
307 return 0;
308 }
309
310 /* Return the power of two which is equal or higher to v */
311
312 static unsigned int pow2_higher_or_eq(unsigned int v)
313 {
314 int hb = fls(v);
315 int retval = 1<<(hb-1);
316
317 if (v-retval == 0)
318 return retval;
319 else
320 return retval<<1;
321 }
322
323 static int set_subbuf_size(const char *trace_name, const char *ch_name,
324 unsigned int size)
325 {
326 unsigned int power;
327 int retval = 0;
328 struct ust_trace *trace;
329 struct ust_channel *channel;
330
331 DBG("set_subbuf_size");
332
333 power = pow2_higher_or_eq(size);
334 power = max_t(unsigned int, 2u, power);
335 if (power != size) {
336 WARN("using the next power of two for buffer size = %u\n", power);
337 }
338
339 ltt_lock_traces();
340 trace = _ltt_trace_find_setup(trace_name);
341 if (trace == NULL) {
342 ERR("cannot find trace!");
343 retval = -ENODATA;
344 goto unlock_traces;
345 }
346
347 channel = find_channel(ch_name, trace);
348 if (!channel) {
349 ERR("unable to find channel");
350 retval = -ENODATA;
351 goto unlock_traces;
352 }
353
354 channel->subbuf_size = power;
355 DBG("the set_subbuf_size for the requested channel is %u", channel->subbuf_size);
356
357 unlock_traces:
358 ltt_unlock_traces();
359
360 return retval;
361 }
362
363 static int set_subbuf_num(const char *trace_name, const char *ch_name,
364 unsigned int num)
365 {
366 struct ust_trace *trace;
367 struct ust_channel *channel;
368 int retval = 0;
369
370 DBG("set_subbuf_num");
371
372 if (num < 2) {
373 ERR("subbuffer count should be greater than 2");
374 return -EINVAL;
375 }
376
377 ltt_lock_traces();
378 trace = _ltt_trace_find_setup(trace_name);
379 if (trace == NULL) {
380 ERR("cannot find trace!");
381 retval = -ENODATA;
382 goto unlock_traces;
383 }
384
385 channel = find_channel(ch_name, trace);
386 if (!channel) {
387 ERR("unable to find channel");
388 retval = -ENODATA;
389 goto unlock_traces;
390 }
391
392 channel->subbuf_cnt = num;
393 DBG("the set_subbuf_cnt for the requested channel is %zd", channel->subbuf_cnt);
394
395 unlock_traces:
396 ltt_unlock_traces();
397 return retval;
398 }
399
400 static int get_subbuffer(const char *trace_name, const char *ch_name,
401 int ch_cpu, long *consumed_old)
402 {
403 int retval = 0;
404 struct ust_trace *trace;
405 struct ust_channel *channel;
406 struct ust_buffer *buf;
407
408 DBG("get_subbuf");
409
410 *consumed_old = 0;
411
412 ltt_lock_traces();
413 trace = _ltt_trace_find(trace_name);
414
415 if (!trace) {
416 DBG("Cannot find trace. It was likely destroyed by the user.");
417 retval = -ENODATA;
418 goto unlock_traces;
419 }
420
421 channel = find_channel(ch_name, trace);
422 if (!channel) {
423 ERR("unable to find channel");
424 retval = -ENODATA;
425 goto unlock_traces;
426 }
427
428 buf = channel->buf[ch_cpu];
429
430 retval = ust_buffers_get_subbuf(buf, consumed_old);
431 if (retval < 0) {
432 WARN("missed buffer?");
433 }
434
435 unlock_traces:
436 ltt_unlock_traces();
437
438 return retval;
439 }
440
441
442 static int notify_buffer_mapped(const char *trace_name,
443 const char *ch_name,
444 int ch_cpu)
445 {
446 int retval = 0;
447 struct ust_trace *trace;
448 struct ust_channel *channel;
449 struct ust_buffer *buf;
450
451 DBG("get_buffer_fd");
452
453 ltt_lock_traces();
454 trace = _ltt_trace_find(trace_name);
455
456 if (!trace) {
457 retval = -ENODATA;
458 DBG("Cannot find trace. It was likely destroyed by the user.");
459 goto unlock_traces;
460 }
461
462 channel = find_channel(ch_name, trace);
463 if (!channel) {
464 retval = -ENODATA;
465 ERR("unable to find channel");
466 goto unlock_traces;
467 }
468
469 buf = channel->buf[ch_cpu];
470
471 /* Being here is the proof the daemon has mapped the buffer in its
472 * memory. We may now decrement buffers_to_export.
473 */
474 if (uatomic_read(&buf->consumed) == 0) {
475 DBG("decrementing buffers_to_export");
476 CMM_STORE_SHARED(buffers_to_export, CMM_LOAD_SHARED(buffers_to_export)-1);
477 }
478
479 /* The buffer has been exported, ergo, we can add it to the
480 * list of open buffers
481 */
482 cds_list_add(&buf->open_buffers_list, &open_buffers_list);
483
484 unlock_traces:
485 ltt_unlock_traces();
486
487 return retval;
488 }
489
490 static int put_subbuffer(const char *trace_name, const char *ch_name,
491 int ch_cpu, long consumed_old)
492 {
493 int retval = 0;
494 struct ust_trace *trace;
495 struct ust_channel *channel;
496 struct ust_buffer *buf;
497
498 DBG("put_subbuf");
499
500 ltt_lock_traces();
501 trace = _ltt_trace_find(trace_name);
502
503 if (!trace) {
504 retval = -ENODATA;
505 DBG("Cannot find trace. It was likely destroyed by the user.");
506 goto unlock_traces;
507 }
508
509 channel = find_channel(ch_name, trace);
510 if (!channel) {
511 retval = -ENODATA;
512 ERR("unable to find channel");
513 goto unlock_traces;
514 }
515
516 buf = channel->buf[ch_cpu];
517
518 retval = ust_buffers_put_subbuf(buf, consumed_old);
519 if (retval < 0) {
520 WARN("ust_buffers_put_subbuf: error (subbuf=%s_%d)",
521 ch_name, ch_cpu);
522 } else {
523 DBG("ust_buffers_put_subbuf: success (subbuf=%s_%d)",
524 ch_name, ch_cpu);
525 }
526
527 unlock_traces:
528 ltt_unlock_traces();
529
530 return retval;
531 }
532
533 static void listener_cleanup(void *ptr)
534 {
535 ustcomm_del_named_sock(listen_sock, 0);
536 }
537
538 static void force_subbuf_switch()
539 {
540 struct ust_buffer *buf;
541
542 cds_list_for_each_entry(buf, &open_buffers_list,
543 open_buffers_list) {
544 ltt_force_switch(buf, FORCE_FLUSH);
545 }
546 }
547
548 /* Simple commands are those which need only respond with a return value. */
549 static int process_simple_client_cmd(int command, char *recv_buf)
550 {
551 int result;
552
553 switch(command) {
554 case SET_SOCK_PATH:
555 {
556 struct ustcomm_single_field *sock_msg;
557 sock_msg = (struct ustcomm_single_field *)recv_buf;
558 result = ustcomm_unpack_single_field(sock_msg);
559 if (result < 0) {
560 return result;
561 }
562 return setenv("UST_DAEMON_SOCKET", sock_msg->field, 1);
563 }
564
565 case FORCE_SUBBUF_SWITCH:
566 /* FIXME: return codes? */
567 force_subbuf_switch();
568
569 break;
570
571 default:
572 return -EINVAL;
573 }
574
575 return 0;
576 }
577
578
579 static int process_trace_cmd(int command, char *trace_name)
580 {
581 int result;
582 char trace_type[] = "ustrelay";
583
584 switch(command) {
585 case START:
586 /* start is an operation that setups the trace, allocates it and starts it */
587 result = ltt_trace_setup(trace_name);
588 if (result < 0) {
589 ERR("ltt_trace_setup failed");
590 return result;
591 }
592
593 result = ltt_trace_set_type(trace_name, trace_type);
594 if (result < 0) {
595 ERR("ltt_trace_set_type failed");
596 return result;
597 }
598
599 result = ltt_trace_alloc(trace_name);
600 if (result < 0) {
601 ERR("ltt_trace_alloc failed");
602 return result;
603 }
604
605 inform_consumer_daemon(trace_name);
606
607 result = ltt_trace_start(trace_name);
608 if (result < 0) {
609 ERR("ltt_trace_start failed");
610 return result;
611 }
612
613 return 0;
614 case SETUP_TRACE:
615 DBG("trace setup");
616
617 result = ltt_trace_setup(trace_name);
618 if (result < 0) {
619 ERR("ltt_trace_setup failed");
620 return result;
621 }
622
623 result = ltt_trace_set_type(trace_name, trace_type);
624 if (result < 0) {
625 ERR("ltt_trace_set_type failed");
626 return result;
627 }
628
629 return 0;
630 case ALLOC_TRACE:
631 DBG("trace alloc");
632
633 result = ltt_trace_alloc(trace_name);
634 if (result < 0) {
635 ERR("ltt_trace_alloc failed");
636 return result;
637 }
638 inform_consumer_daemon(trace_name);
639
640 return 0;
641
642 case CREATE_TRACE:
643 DBG("trace create");
644
645 result = ltt_trace_setup(trace_name);
646 if (result < 0) {
647 ERR("ltt_trace_setup failed");
648 return result;
649 }
650
651 result = ltt_trace_set_type(trace_name, trace_type);
652 if (result < 0) {
653 ERR("ltt_trace_set_type failed");
654 return result;
655 }
656
657 return 0;
658 case START_TRACE:
659 DBG("trace start");
660
661 result = ltt_trace_alloc(trace_name);
662 if (result < 0) {
663 ERR("ltt_trace_alloc failed");
664 return result;
665 }
666 if (!result) {
667 inform_consumer_daemon(trace_name);
668 }
669
670 result = ltt_trace_start(trace_name);
671 if (result < 0) {
672 ERR("ltt_trace_start failed");
673 return result;
674 }
675
676 return 0;
677 case STOP_TRACE:
678 DBG("trace stop");
679
680 result = ltt_trace_stop(trace_name);
681 if (result < 0) {
682 ERR("ltt_trace_stop failed");
683 return result;
684 }
685
686 return 0;
687 case DESTROY_TRACE:
688 DBG("trace destroy");
689
690 result = ltt_trace_destroy(trace_name, 0);
691 if (result < 0) {
692 ERR("ltt_trace_destroy failed");
693 return result;
694 }
695 return 0;
696 }
697
698 return 0;
699 }
700
701
702 static void process_channel_cmd(int sock, int command,
703 struct ustcomm_channel_info *ch_inf)
704 {
705 struct ustcomm_header _reply_header;
706 struct ustcomm_header *reply_header = &_reply_header;
707 struct ustcomm_channel_info *reply_msg =
708 (struct ustcomm_channel_info *)send_buffer;
709 int result, offset = 0, num, size;
710
711 memset(reply_header, 0, sizeof(*reply_header));
712
713 switch (command) {
714 case GET_SUBBUF_NUM_SIZE:
715 result = get_subbuf_num_size(ch_inf->trace,
716 ch_inf->channel,
717 &num, &size);
718 if (result < 0) {
719 reply_header->result = result;
720 break;
721 }
722
723 reply_msg->channel = USTCOMM_POISON_PTR;
724 reply_msg->subbuf_num = num;
725 reply_msg->subbuf_size = size;
726
727
728 reply_header->size = COMPUTE_MSG_SIZE(reply_msg, offset);
729
730 break;
731 case SET_SUBBUF_NUM:
732 reply_header->result = set_subbuf_num(ch_inf->trace,
733 ch_inf->channel,
734 ch_inf->subbuf_num);
735
736 break;
737 case SET_SUBBUF_SIZE:
738 reply_header->result = set_subbuf_size(ch_inf->trace,
739 ch_inf->channel,
740 ch_inf->subbuf_size);
741
742
743 break;
744 }
745 if (ustcomm_send(sock, reply_header, (char *)reply_msg) < 0) {
746 ERR("ustcomm_send failed");
747 }
748 }
749
750 static void process_buffer_cmd(int sock, int command,
751 struct ustcomm_buffer_info *buf_inf)
752 {
753 struct ustcomm_header _reply_header;
754 struct ustcomm_header *reply_header = &_reply_header;
755 struct ustcomm_buffer_info *reply_msg =
756 (struct ustcomm_buffer_info *)send_buffer;
757 int result, offset = 0, buf_shmid, buf_struct_shmid, buf_pipe_fd;
758 long consumed_old;
759
760 memset(reply_header, 0, sizeof(*reply_header));
761
762 switch (command) {
763 case GET_BUF_SHMID_PIPE_FD:
764 result = get_buffer_shmid_pipe_fd(buf_inf->trace,
765 buf_inf->channel,
766 buf_inf->ch_cpu,
767 &buf_shmid,
768 &buf_struct_shmid,
769 &buf_pipe_fd);
770 if (result < 0) {
771 reply_header->result = result;
772 break;
773 }
774
775 reply_msg->channel = USTCOMM_POISON_PTR;
776 reply_msg->buf_shmid = buf_shmid;
777 reply_msg->buf_struct_shmid = buf_struct_shmid;
778
779 reply_header->size = COMPUTE_MSG_SIZE(reply_msg, offset);
780 reply_header->fd_included = 1;
781
782 if (ustcomm_send_fd(sock, reply_header, (char *)reply_msg,
783 &buf_pipe_fd) < 0) {
784 ERR("ustcomm_send failed");
785 }
786 return;
787
788 case NOTIFY_BUF_MAPPED:
789 reply_header->result =
790 notify_buffer_mapped(buf_inf->trace,
791 buf_inf->channel,
792 buf_inf->ch_cpu);
793 break;
794 case GET_SUBBUFFER:
795 result = get_subbuffer(buf_inf->trace, buf_inf->channel,
796 buf_inf->ch_cpu, &consumed_old);
797 if (result < 0) {
798 reply_header->result = result;
799 break;
800 }
801
802 reply_msg->channel = USTCOMM_POISON_PTR;
803 reply_msg->consumed_old = consumed_old;
804
805 reply_header->size = COMPUTE_MSG_SIZE(reply_msg, offset);
806
807 break;
808 case PUT_SUBBUFFER:
809 result = put_subbuffer(buf_inf->trace, buf_inf->channel,
810 buf_inf->ch_cpu,
811 buf_inf->consumed_old);
812 reply_header->result = result;
813
814 break;
815 }
816
817 if (ustcomm_send(sock, reply_header, (char *)reply_msg) < 0) {
818 ERR("ustcomm_send failed");
819 }
820
821 }
822
823 static void process_marker_cmd(int sock, int command,
824 struct ustcomm_marker_info *marker_inf)
825 {
826 struct ustcomm_header _reply_header;
827 struct ustcomm_header *reply_header = &_reply_header;
828 int result;
829
830 memset(reply_header, 0, sizeof(*reply_header));
831
832 switch(command) {
833 case ENABLE_MARKER:
834
835 result = ltt_marker_connect(marker_inf->channel,
836 marker_inf->marker,
837 "default");
838 if (result < 0) {
839 WARN("could not enable marker; channel=%s,"
840 " name=%s",
841 marker_inf->channel,
842 marker_inf->marker);
843
844 }
845 break;
846 case DISABLE_MARKER:
847 result = ltt_marker_disconnect(marker_inf->channel,
848 marker_inf->marker,
849 "default");
850 if (result < 0) {
851 WARN("could not disable marker; channel=%s,"
852 " name=%s",
853 marker_inf->channel,
854 marker_inf->marker);
855 }
856 break;
857 }
858
859 reply_header->result = result;
860
861 if (ustcomm_send(sock, reply_header, NULL) < 0) {
862 ERR("ustcomm_send failed");
863 }
864
865 }
866 static void process_client_cmd(struct ustcomm_header *recv_header,
867 char *recv_buf, int sock)
868 {
869 int result;
870 struct ustcomm_header _reply_header;
871 struct ustcomm_header *reply_header = &_reply_header;
872 char *send_buf = send_buffer;
873
874 memset(reply_header, 0, sizeof(*reply_header));
875 memset(send_buf, 0, sizeof(send_buffer));
876
877 switch(recv_header->command) {
878 case GET_SUBBUF_NUM_SIZE:
879 case SET_SUBBUF_NUM:
880 case SET_SUBBUF_SIZE:
881 {
882 struct ustcomm_channel_info *ch_inf;
883 ch_inf = (struct ustcomm_channel_info *)recv_buf;
884 result = ustcomm_unpack_channel_info(ch_inf);
885 if (result < 0) {
886 ERR("couldn't unpack channel info");
887 reply_header->result = -EINVAL;
888 goto send_response;
889 }
890 process_channel_cmd(sock, recv_header->command, ch_inf);
891 return;
892 }
893 case GET_BUF_SHMID_PIPE_FD:
894 case NOTIFY_BUF_MAPPED:
895 case GET_SUBBUFFER:
896 case PUT_SUBBUFFER:
897 {
898 struct ustcomm_buffer_info *buf_inf;
899 buf_inf = (struct ustcomm_buffer_info *)recv_buf;
900 result = ustcomm_unpack_buffer_info(buf_inf);
901 if (result < 0) {
902 ERR("couldn't unpack buffer info");
903 reply_header->result = -EINVAL;
904 goto send_response;
905 }
906 process_buffer_cmd(sock, recv_header->command, buf_inf);
907 return;
908 }
909 case ENABLE_MARKER:
910 case DISABLE_MARKER:
911 {
912 struct ustcomm_marker_info *marker_inf;
913 marker_inf = (struct ustcomm_marker_info *)recv_buf;
914 result = ustcomm_unpack_marker_info(marker_inf);
915 if (result < 0) {
916 ERR("couldn't unpack marker info");
917 reply_header->result = -EINVAL;
918 goto send_response;
919 }
920 process_marker_cmd(sock, recv_header->command, marker_inf);
921 return;
922 }
923 case LIST_MARKERS:
924 {
925 char *ptr;
926 size_t size;
927 FILE *fp;
928
929 fp = open_memstream(&ptr, &size);
930 if (fp == NULL) {
931 ERR("opening memstream failed");
932 return;
933 }
934 print_markers(fp);
935 fclose(fp);
936
937 reply_header->size = size;
938
939 result = ustcomm_send(sock, reply_header, ptr);
940
941 free(ptr);
942
943 if (result < 0) {
944 PERROR("failed to send markers list");
945 }
946
947 break;
948 }
949 case LIST_TRACE_EVENTS:
950 {
951 char *ptr;
952 size_t size;
953 FILE *fp;
954
955 fp = open_memstream(&ptr, &size);
956 if (fp == NULL) {
957 ERR("opening memstream failed");
958 return;
959 }
960 print_trace_events(fp);
961 fclose(fp);
962
963 reply_header->size = size;
964
965 result = ustcomm_send(sock, reply_header, ptr);
966
967 free(ptr);
968
969 if (result < 0) {
970 ERR("list_trace_events failed");
971 return;
972 }
973
974 break;
975 }
976 case LOAD_PROBE_LIB:
977 {
978 char *libfile;
979
980 /* FIXME: No functionality at all... */
981 libfile = recv_buf;
982
983 DBG("load_probe_lib loading %s", libfile);
984
985 break;
986 }
987 case GET_PIDUNIQUE:
988 {
989 struct ustcomm_pidunique *pid_msg;
990 pid_msg = (struct ustcomm_pidunique *)send_buf;
991
992 pid_msg->pidunique = pidunique;
993 reply_header->size = sizeof(pid_msg);
994
995 goto send_response;
996
997 }
998 case GET_SOCK_PATH:
999 {
1000 struct ustcomm_single_field *sock_msg;
1001 char *sock_path_env;
1002
1003 sock_msg = (struct ustcomm_single_field *)send_buf;
1004
1005 sock_path_env = getenv("UST_DAEMON_SOCKET");
1006
1007 if (!sock_path_env) {
1008 result = ustcomm_pack_single_field(reply_header,
1009 sock_msg,
1010 SOCK_DIR "/ustd");
1011
1012 } else {
1013 result = ustcomm_pack_single_field(reply_header,
1014 sock_msg,
1015 sock_path_env);
1016 }
1017 reply_header->result = result;
1018
1019 goto send_response;
1020 }
1021 case START:
1022 case SETUP_TRACE:
1023 case ALLOC_TRACE:
1024 case CREATE_TRACE:
1025 case START_TRACE:
1026 case STOP_TRACE:
1027 case DESTROY_TRACE:
1028 {
1029 struct ustcomm_single_field *trace_inf =
1030 (struct ustcomm_single_field *)recv_buf;
1031
1032 result = ustcomm_unpack_single_field(trace_inf);
1033 if (result < 0) {
1034 ERR("couldn't unpack trace info");
1035 reply_header->result = -EINVAL;
1036 goto send_response;
1037 }
1038
1039 reply_header->result =
1040 process_trace_cmd(recv_header->command,
1041 trace_inf->field);
1042 goto send_response;
1043
1044 }
1045 default:
1046 reply_header->result =
1047 process_simple_client_cmd(recv_header->command,
1048 recv_buf);
1049 goto send_response;
1050
1051 }
1052
1053 return;
1054
1055 send_response:
1056 ustcomm_send(sock, reply_header, send_buf);
1057 }
1058
1059 #define MAX_EVENTS 10
1060
1061 void *listener_main(void *p)
1062 {
1063 struct ustcomm_sock *epoll_sock;
1064 struct epoll_event events[MAX_EVENTS];
1065 struct sockaddr addr;
1066 int accept_fd, nfds, result, i, addr_size;
1067
1068 DBG("LISTENER");
1069
1070 pthread_cleanup_push(listener_cleanup, NULL);
1071
1072 for(;;) {
1073 nfds = epoll_wait(epoll_fd, events, MAX_EVENTS, -1);
1074 if (nfds == -1) {
1075 PERROR("listener_main: epoll_wait failed");
1076 continue;
1077 }
1078
1079 for (i = 0; i < nfds; i++) {
1080 epoll_sock = (struct ustcomm_sock *)events[i].data.ptr;
1081 if (epoll_sock == listen_sock) {
1082 addr_size = sizeof(struct sockaddr);
1083 accept_fd = accept(epoll_sock->fd,
1084 &addr,
1085 (socklen_t *)&addr_size);
1086 if (accept_fd == -1) {
1087 PERROR("listener_main: accept failed");
1088 continue;
1089 }
1090 ustcomm_init_sock(accept_fd, epoll_fd,
1091 &ust_socks);
1092 } else {
1093 memset(receive_header, 0,
1094 sizeof(*receive_header));
1095 memset(receive_buffer, 0,
1096 sizeof(receive_buffer));
1097 result = ustcomm_recv(epoll_sock->fd,
1098 receive_header,
1099 receive_buffer);
1100 if (result == 0) {
1101 ustcomm_del_sock(epoll_sock, 0);
1102 } else {
1103 process_client_cmd(receive_header,
1104 receive_buffer,
1105 epoll_sock->fd);
1106 }
1107 }
1108 }
1109 }
1110
1111 pthread_cleanup_pop(1);
1112 }
1113
1114 /* These should only be accessed in the parent thread,
1115 * not the listener.
1116 */
1117 static volatile sig_atomic_t have_listener = 0;
1118 static pthread_t listener_thread;
1119
1120 void create_listener(void)
1121 {
1122 int result;
1123 sigset_t sig_all_blocked;
1124 sigset_t orig_parent_mask;
1125
1126 if (have_listener) {
1127 WARN("not creating listener because we already had one");
1128 return;
1129 }
1130
1131 /* A new thread created by pthread_create inherits the signal mask
1132 * from the parent. To avoid any signal being received by the
1133 * listener thread, we block all signals temporarily in the parent,
1134 * while we create the listener thread.
1135 */
1136
1137 sigfillset(&sig_all_blocked);
1138
1139 result = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_parent_mask);
1140 if (result) {
1141 PERROR("pthread_sigmask: %s", strerror(result));
1142 }
1143
1144 result = pthread_create(&listener_thread, NULL, listener_main, NULL);
1145 if (result == -1) {
1146 PERROR("pthread_create");
1147 }
1148
1149 /* Restore original signal mask in parent */
1150 result = pthread_sigmask(SIG_SETMASK, &orig_parent_mask, NULL);
1151 if (result) {
1152 PERROR("pthread_sigmask: %s", strerror(result));
1153 } else {
1154 have_listener = 1;
1155 }
1156 }
1157
1158 #define AUTOPROBE_DISABLED 0
1159 #define AUTOPROBE_ENABLE_ALL 1
1160 #define AUTOPROBE_ENABLE_REGEX 2
1161 static int autoprobe_method = AUTOPROBE_DISABLED;
1162 static regex_t autoprobe_regex;
1163
1164 static void auto_probe_connect(struct marker *m)
1165 {
1166 int result;
1167
1168 char* concat_name = NULL;
1169 const char *probe_name = "default";
1170
1171 if (autoprobe_method == AUTOPROBE_DISABLED) {
1172 return;
1173 } else if (autoprobe_method == AUTOPROBE_ENABLE_REGEX) {
1174 result = asprintf(&concat_name, "%s/%s", m->channel, m->name);
1175 if (result == -1) {
1176 ERR("auto_probe_connect: asprintf failed (marker %s/%s)",
1177 m->channel, m->name);
1178 return;
1179 }
1180 if (regexec(&autoprobe_regex, concat_name, 0, NULL, 0)) {
1181 free(concat_name);
1182 return;
1183 }
1184 free(concat_name);
1185 }
1186
1187 result = ltt_marker_connect(m->channel, m->name, probe_name);
1188 if (result && result != -EEXIST)
1189 ERR("ltt_marker_connect (marker = %s/%s, errno = %d)", m->channel, m->name, -result);
1190
1191 DBG("auto connected marker %s (addr: %p) %s to probe default", m->channel, m, m->name);
1192
1193 }
1194
1195 static struct ustcomm_sock * init_app_socket(int epoll_fd)
1196 {
1197 char *name;
1198 int result;
1199 struct ustcomm_sock *sock;
1200
1201 result = asprintf(&name, "%s/%d", SOCK_DIR, (int)getpid());
1202 if (result < 0) {
1203 ERR("string overflow allocating socket name, "
1204 "UST thread bailing");
1205 return NULL;
1206 }
1207
1208 result = ensure_dir_exists(SOCK_DIR);
1209 if (result == -1) {
1210 ERR("Unable to create socket directory %s, UST thread bailing",
1211 SOCK_DIR);
1212 goto free_name;
1213 }
1214
1215 sock = ustcomm_init_named_socket(name, epoll_fd);
1216 if (!sock) {
1217 ERR("Error initializing named socket (%s). Check that directory"
1218 "exists and that it is writable. UST thread bailing", name);
1219 goto free_name;
1220 }
1221
1222 free(name);
1223 return sock;
1224
1225 free_name:
1226 free(name);
1227 return NULL;
1228 }
1229
1230 static void __attribute__((constructor)) init()
1231 {
1232 int result;
1233 char* autoprobe_val = NULL;
1234 char* subbuffer_size_val = NULL;
1235 char* subbuffer_count_val = NULL;
1236 unsigned int subbuffer_size;
1237 unsigned int subbuffer_count;
1238 unsigned int power;
1239
1240 /* Assign the pidunique, to be able to differentiate the processes with same
1241 * pid, (before and after an exec).
1242 */
1243 pidunique = make_pidunique();
1244 processpid = getpid();
1245
1246 DBG("Tracectl constructor");
1247
1248 /* Set up epoll */
1249 epoll_fd = epoll_create(MAX_EVENTS);
1250 if (epoll_fd == -1) {
1251 ERR("epoll_create failed, tracing shutting down");
1252 return;
1253 }
1254
1255 /* Create the socket */
1256 listen_sock = init_app_socket(epoll_fd);
1257 if (!listen_sock) {
1258 ERR("failed to create application socket,"
1259 " tracing shutting down");
1260 return;
1261 }
1262
1263 create_listener();
1264
1265 autoprobe_val = getenv("UST_AUTOPROBE");
1266 if (autoprobe_val) {
1267 struct marker_iter iter;
1268
1269 DBG("Autoprobe enabled.");
1270
1271 /* Ensure markers are initialized */
1272 //init_markers();
1273
1274 /* Ensure marker control is initialized, for the probe */
1275 init_marker_control();
1276
1277 /* first, set the callback that will connect the
1278 * probe on new markers
1279 */
1280 if (autoprobe_val[0] == '/') {
1281 result = regcomp(&autoprobe_regex, autoprobe_val+1, 0);
1282 if (result) {
1283 char regexerr[150];
1284
1285 regerror(result, &autoprobe_regex, regexerr, sizeof(regexerr));
1286 ERR("cannot parse regex %s (%s), will ignore UST_AUTOPROBE", autoprobe_val, regexerr);
1287 /* don't crash the application just for this */
1288 } else {
1289 autoprobe_method = AUTOPROBE_ENABLE_REGEX;
1290 }
1291 } else {
1292 /* just enable all instrumentation */
1293 autoprobe_method = AUTOPROBE_ENABLE_ALL;
1294 }
1295
1296 marker_set_new_marker_cb(auto_probe_connect);
1297
1298 /* Now, connect the probes that were already registered. */
1299 marker_iter_reset(&iter);
1300 marker_iter_start(&iter);
1301
1302 DBG("now iterating on markers already registered");
1303 while (iter.marker) {
1304 DBG("now iterating on marker %s", iter.marker->name);
1305 auto_probe_connect(iter.marker);
1306 marker_iter_next(&iter);
1307 }
1308 }
1309
1310 if (getenv("UST_OVERWRITE")) {
1311 int val = atoi(getenv("UST_OVERWRITE"));
1312 if (val == 0 || val == 1) {
1313 CMM_STORE_SHARED(ust_channels_overwrite_by_default, val);
1314 } else {
1315 WARN("invalid value for UST_OVERWRITE");
1316 }
1317 }
1318
1319 if (getenv("UST_AUTOCOLLECT")) {
1320 int val = atoi(getenv("UST_AUTOCOLLECT"));
1321 if (val == 0 || val == 1) {
1322 CMM_STORE_SHARED(ust_channels_request_collection_by_default, val);
1323 } else {
1324 WARN("invalid value for UST_AUTOCOLLECT");
1325 }
1326 }
1327
1328 subbuffer_size_val = getenv("UST_SUBBUF_SIZE");
1329 if (subbuffer_size_val) {
1330 sscanf(subbuffer_size_val, "%u", &subbuffer_size);
1331 power = pow2_higher_or_eq(subbuffer_size);
1332 if (power != subbuffer_size)
1333 WARN("using the next power of two for buffer size = %u\n", power);
1334 chan_infos[LTT_CHANNEL_UST].def_subbufsize = power;
1335 }
1336
1337 subbuffer_count_val = getenv("UST_SUBBUF_NUM");
1338 if (subbuffer_count_val) {
1339 sscanf(subbuffer_count_val, "%u", &subbuffer_count);
1340 if (subbuffer_count < 2)
1341 subbuffer_count = 2;
1342 chan_infos[LTT_CHANNEL_UST].def_subbufcount = subbuffer_count;
1343 }
1344
1345 if (getenv("UST_TRACE")) {
1346 char trace_name[] = "auto";
1347 char trace_type[] = "ustrelay";
1348
1349 DBG("starting early tracing");
1350
1351 /* Ensure marker control is initialized */
1352 init_marker_control();
1353
1354 /* Ensure markers are initialized */
1355 init_markers();
1356
1357 /* Ensure buffers are initialized, for the transport to be available.
1358 * We are about to set a trace type and it will fail without this.
1359 */
1360 init_ustrelay_transport();
1361
1362 /* FIXME: When starting early tracing (here), depending on the
1363 * order of constructors, it is very well possible some marker
1364 * sections are not yet registered. Because of this, some
1365 * channels may not be registered. Yet, we are about to ask the
1366 * daemon to collect the channels. Channels which are not yet
1367 * registered will not be collected.
1368 *
1369 * Currently, in LTTng, there is no way to add a channel after
1370 * trace start. The reason for this is that it induces complex
1371 * concurrency issues on the trace structures, which can only
1372 * be resolved using RCU. This has not been done yet. As a
1373 * workaround, we are forcing the registration of the "ust"
1374 * channel here. This is the only channel (apart from metadata)
1375 * that can be reliably used in early tracing.
1376 *
1377 * Non-early tracing does not have this problem and can use
1378 * arbitrary channel names.
1379 */
1380 ltt_channels_register("ust");
1381
1382 result = ltt_trace_setup(trace_name);
1383 if (result < 0) {
1384 ERR("ltt_trace_setup failed");
1385 return;
1386 }
1387
1388 result = ltt_trace_set_type(trace_name, trace_type);
1389 if (result < 0) {
1390 ERR("ltt_trace_set_type failed");
1391 return;
1392 }
1393
1394 result = ltt_trace_alloc(trace_name);
1395 if (result < 0) {
1396 ERR("ltt_trace_alloc failed");
1397 return;
1398 }
1399
1400 result = ltt_trace_start(trace_name);
1401 if (result < 0) {
1402 ERR("ltt_trace_start failed");
1403 return;
1404 }
1405
1406 /* Do this after the trace is started in order to avoid creating confusion
1407 * if the trace fails to start. */
1408 inform_consumer_daemon(trace_name);
1409 }
1410
1411 return;
1412
1413 /* should decrementally destroy stuff if error */
1414
1415 }
1416
1417 /* This is only called if we terminate normally, not with an unhandled signal,
1418 * so we cannot rely on it. However, for now, LTTV requires that the header of
1419 * the last sub-buffer contain a valid end time for the trace. This is done
1420 * automatically only when the trace is properly stopped.
1421 *
1422 * If the traced program crashed, it is always possible to manually add the
1423 * right value in the header, or to open the trace in text mode.
1424 *
1425 * FIXME: Fix LTTV so it doesn't need this.
1426 */
1427
1428 static void destroy_traces(void)
1429 {
1430 int result;
1431
1432 /* if trace running, finish it */
1433
1434 DBG("destructor stopping traces");
1435
1436 result = ltt_trace_stop("auto");
1437 if (result == -1) {
1438 ERR("ltt_trace_stop error");
1439 }
1440
1441 result = ltt_trace_destroy("auto", 0);
1442 if (result == -1) {
1443 ERR("ltt_trace_destroy error");
1444 }
1445 }
1446
1447 static int trace_recording(void)
1448 {
1449 int retval = 0;
1450 struct ust_trace *trace;
1451
1452 ltt_lock_traces();
1453
1454 cds_list_for_each_entry(trace, &ltt_traces.head, list) {
1455 if (trace->active) {
1456 retval = 1;
1457 break;
1458 }
1459 }
1460
1461 ltt_unlock_traces();
1462
1463 return retval;
1464 }
1465
1466 int restarting_usleep(useconds_t usecs)
1467 {
1468 struct timespec tv;
1469 int result;
1470
1471 tv.tv_sec = 0;
1472 tv.tv_nsec = usecs * 1000;
1473
1474 do {
1475 result = nanosleep(&tv, &tv);
1476 } while (result == -1 && errno == EINTR);
1477
1478 return result;
1479 }
1480
1481 static void stop_listener(void)
1482 {
1483 int result;
1484
1485 if (!have_listener)
1486 return;
1487
1488 result = pthread_cancel(listener_thread);
1489 if (result != 0) {
1490 ERR("pthread_cancel: %s", strerror(result));
1491 }
1492 result = pthread_join(listener_thread, NULL);
1493 if (result != 0) {
1494 ERR("pthread_join: %s", strerror(result));
1495 }
1496 }
1497
1498 /* This destructor keeps the process alive for a few seconds in order
1499 * to leave time to ustd to connect to its buffers. This is necessary
1500 * for programs whose execution is very short. It is also useful in all
1501 * programs when tracing is started close to the end of the program
1502 * execution.
1503 *
1504 * FIXME: For now, this only works for the first trace created in a
1505 * process.
1506 */
1507
1508 static void __attribute__((destructor)) keepalive()
1509 {
1510 if (processpid != getpid()) {
1511 return;
1512 }
1513
1514 if (trace_recording() && CMM_LOAD_SHARED(buffers_to_export)) {
1515 int total = 0;
1516 DBG("Keeping process alive for consumer daemon...");
1517 while (CMM_LOAD_SHARED(buffers_to_export)) {
1518 const int interv = 200000;
1519 restarting_usleep(interv);
1520 total += interv;
1521
1522 if (total >= 3000000) {
1523 WARN("non-consumed buffers remaining after wait limit; not waiting anymore");
1524 break;
1525 }
1526 }
1527 DBG("Finally dying...");
1528 }
1529
1530 destroy_traces();
1531
1532 /* Ask the listener to stop and clean up. */
1533 stop_listener();
1534 }
1535
1536 void ust_potential_exec(void)
1537 {
1538 trace_mark(ust, potential_exec, MARK_NOARGS);
1539
1540 DBG("test");
1541
1542 keepalive();
1543 }
1544
1545 /* Notify ust that there was a fork. This needs to be called inside
1546 * the new process, anytime a process whose memory is not shared with
1547 * the parent is created. If this function is not called, the events
1548 * of the new process will not be collected.
1549 *
1550 * Signals should be disabled before the fork and reenabled only after
1551 * this call in order to guarantee tracing is not started before ust_fork()
1552 * sanitizes the new process.
1553 */
1554
1555 static void ust_fork(void)
1556 {
1557 struct ust_buffer *buf, *buf_tmp;
1558 struct ustcomm_sock *sock, *sock_tmp;
1559 int result;
1560
1561 /* FIXME: technically, the locks could have been taken before the fork */
1562 DBG("ust: forking");
1563
1564 /* Get the pid of the new process */
1565 processpid = getpid();
1566
1567 /* break lock if necessary */
1568 ltt_unlock_traces();
1569
1570 ltt_trace_stop("auto");
1571 ltt_trace_destroy("auto", 1);
1572 /* Delete all active connections, but leave them in the epoll set */
1573 cds_list_for_each_entry_safe(sock, sock_tmp, &ust_socks, list) {
1574 ustcomm_del_sock(sock, 1);
1575 }
1576
1577 /* Delete all blocked consumers */
1578 cds_list_for_each_entry_safe(buf, buf_tmp, &open_buffers_list,
1579 open_buffers_list) {
1580 result = close(buf->data_ready_fd_read);
1581 if (result == -1) {
1582 PERROR("close");
1583 }
1584 result = close(buf->data_ready_fd_write);
1585 if (result == -1) {
1586 PERROR("close");
1587 }
1588 cds_list_del(&buf->open_buffers_list);
1589 }
1590
1591 /* Clean up the listener socket and epoll, keeping the scoket file */
1592 ustcomm_del_named_sock(listen_sock, 1);
1593 close(epoll_fd);
1594
1595 /* Re-start the launch sequence */
1596 CMM_STORE_SHARED(buffers_to_export, 0);
1597 have_listener = 0;
1598
1599 /* Set up epoll */
1600 epoll_fd = epoll_create(MAX_EVENTS);
1601 if (epoll_fd == -1) {
1602 ERR("epoll_create failed, tracing shutting down");
1603 return;
1604 }
1605
1606 /* Create the socket */
1607 listen_sock = init_app_socket(epoll_fd);
1608 if (!listen_sock) {
1609 ERR("failed to create application socket,"
1610 " tracing shutting down");
1611 return;
1612 }
1613 create_listener();
1614 ltt_trace_setup("auto");
1615 result = ltt_trace_set_type("auto", "ustrelay");
1616 if (result < 0) {
1617 ERR("ltt_trace_set_type failed");
1618 return;
1619 }
1620
1621 ltt_trace_alloc("auto");
1622 ltt_trace_start("auto");
1623 inform_consumer_daemon("auto");
1624 }
1625
1626 void ust_before_fork(ust_fork_info_t *fork_info)
1627 {
1628 /* Disable signals. This is to avoid that the child
1629 * intervenes before it is properly setup for tracing. It is
1630 * safer to disable all signals, because then we know we are not
1631 * breaking anything by restoring the original mask.
1632 */
1633 sigset_t all_sigs;
1634 int result;
1635
1636 /* FIXME:
1637 - only do this if tracing is active
1638 */
1639
1640 /* Disable signals */
1641 sigfillset(&all_sigs);
1642 result = sigprocmask(SIG_BLOCK, &all_sigs, &fork_info->orig_sigs);
1643 if (result == -1) {
1644 PERROR("sigprocmask");
1645 return;
1646 }
1647 }
1648
1649 /* Don't call this function directly in a traced program */
1650 static void ust_after_fork_common(ust_fork_info_t *fork_info)
1651 {
1652 int result;
1653
1654 /* Restore signals */
1655 result = sigprocmask(SIG_SETMASK, &fork_info->orig_sigs, NULL);
1656 if (result == -1) {
1657 PERROR("sigprocmask");
1658 return;
1659 }
1660 }
1661
1662 void ust_after_fork_parent(ust_fork_info_t *fork_info)
1663 {
1664 /* Reenable signals */
1665 ust_after_fork_common(fork_info);
1666 }
1667
1668 void ust_after_fork_child(ust_fork_info_t *fork_info)
1669 {
1670 /* First sanitize the child */
1671 ust_fork();
1672
1673 /* Then reenable interrupts */
1674 ust_after_fork_common(fork_info);
1675 }
1676
This page took 0.064341 seconds and 5 git commands to generate.