3d09cc2a136ea5e49e48ba19d6d1c40b443a79cb
[ust.git] / libust / tracectl.c
1 /* Copyright (C) 2009 Pierre-Marc Fournier
2 *
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public
5 * License as published by the Free Software Foundation; either
6 * version 2.1 of the License, or (at your option) any later version.
7 *
8 * This library is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
12 *
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
16 */
17
18 /* This file contains the implementation of the UST listener thread, which
19 * receives trace control commands. It also coordinates the initialization of
20 * libust.
21 */
22
23 #define _GNU_SOURCE
24 #include <stdio.h>
25 #include <stdlib.h>
26 #include <stdint.h>
27 #include <pthread.h>
28 #include <signal.h>
29 #include <sys/epoll.h>
30 #include <sys/time.h>
31 #include <sys/types.h>
32 #include <sys/socket.h>
33 #include <fcntl.h>
34 #include <poll.h>
35 #include <regex.h>
36 #include <urcu/uatomic_arch.h>
37 #include <urcu/list.h>
38
39 #include <ust/marker.h>
40 #include <ust/tracepoint.h>
41 #include <ust/tracectl.h>
42 #include "tracer.h"
43 #include "usterr.h"
44 #include "ustcomm.h"
45 #include "buffers.h"
46 #include "marker-control.h"
47
48 /* This should only be accessed by the constructor, before the creation
49 * of the listener, and then only by the listener.
50 */
51 s64 pidunique = -1LL;
52
53 static struct ustcomm_header _receive_header;
54 static struct ustcomm_header *receive_header = &_receive_header;
55 static char receive_buffer[USTCOMM_BUFFER_SIZE];
56 static char send_buffer[USTCOMM_BUFFER_SIZE];
57
58 static int epoll_fd;
59 static struct ustcomm_sock *listen_sock;
60
61 extern struct chan_info_struct chan_infos[];
62
63 static struct list_head open_buffers_list = LIST_HEAD_INIT(open_buffers_list);
64
65 static struct list_head ust_socks = LIST_HEAD_INIT(ust_socks);
66
67 /* volatile because shared between the listener and the main thread */
68 int buffers_to_export = 0;
69
70 static long long make_pidunique(void)
71 {
72 s64 retval;
73 struct timeval tv;
74
75 gettimeofday(&tv, NULL);
76
77 retval = tv.tv_sec;
78 retval <<= 32;
79 retval |= tv.tv_usec;
80
81 return retval;
82 }
83
84 static void print_markers(FILE *fp)
85 {
86 struct marker_iter iter;
87
88 lock_markers();
89 marker_iter_reset(&iter);
90 marker_iter_start(&iter);
91
92 while (iter.marker) {
93 fprintf(fp, "marker: %s/%s %d \"%s\" %p\n",
94 iter.marker->channel,
95 iter.marker->name,
96 (int)imv_read(iter.marker->state),
97 iter.marker->format,
98 iter.marker->location);
99 marker_iter_next(&iter);
100 }
101 unlock_markers();
102 }
103
104 static void print_trace_events(FILE *fp)
105 {
106 struct trace_event_iter iter;
107
108 lock_trace_events();
109 trace_event_iter_reset(&iter);
110 trace_event_iter_start(&iter);
111
112 while (iter.trace_event) {
113 fprintf(fp, "trace_event: %s\n", iter.trace_event->name);
114 trace_event_iter_next(&iter);
115 }
116 unlock_trace_events();
117 }
118
119 static int connect_ustd(void)
120 {
121 int result, fd;
122 char default_daemon_path[] = SOCK_DIR "/ustd";
123 char *explicit_daemon_path, *daemon_path;
124
125 explicit_daemon_path = getenv("UST_DAEMON_SOCKET");
126 if (explicit_daemon_path) {
127 daemon_path = explicit_daemon_path;
128 } else {
129 daemon_path = default_daemon_path;
130 }
131
132 DBG("Connecting to daemon_path %s", daemon_path);
133
134 result = ustcomm_connect_path(daemon_path, &fd);
135 if (result < 0) {
136 WARN("connect_ustd failed, daemon_path: %s",
137 daemon_path);
138 return result;
139 }
140
141 return fd;
142 }
143
144
145 static void request_buffer_consumer(int sock,
146 const char *channel,
147 int cpu)
148 {
149 struct ustcomm_header send_header, recv_header;
150 struct ustcomm_buffer_info buf_inf;
151 int result = 0;
152
153 result = ustcomm_pack_buffer_info(&send_header,
154 &buf_inf,
155 channel,
156 cpu);
157
158 if (result < 0) {
159 ERR("failed to pack buffer info message %s_%d",
160 channel, cpu);
161 return;
162 }
163
164 buf_inf.pid = getpid();
165 send_header.command = CONSUME_BUFFER;
166
167 result = ustcomm_req(sock, &send_header, (char *) &buf_inf,
168 &recv_header, NULL);
169 if (result <= 0) {
170 PERROR("request for buffer consumer failed, is the daemon online?");
171 }
172
173 return;
174 }
175
176 /* Ask the daemon to collect a trace called trace_name and being
177 * produced by this pid.
178 *
179 * The trace must be at least allocated. (It can also be started.)
180 * This is because _ltt_trace_find is used.
181 */
182
183 static void inform_consumer_daemon(const char *trace_name)
184 {
185 int sock, i,j;
186 struct ust_trace *trace;
187 const char *ch_name;
188
189 sock = connect_ustd();
190 if (sock < 0) {
191 return;
192 }
193
194 DBG("Connected to ustd");
195
196 ltt_lock_traces();
197
198 trace = _ltt_trace_find(trace_name);
199 if (trace == NULL) {
200 WARN("inform_consumer_daemon: could not find trace \"%s\"; it is probably already destroyed", trace_name);
201 goto unlock_traces;
202 }
203
204 for (i=0; i < trace->nr_channels; i++) {
205 if (trace->channels[i].request_collection) {
206 /* iterate on all cpus */
207 for (j=0; j<trace->channels[i].n_cpus; j++) {
208 ch_name = trace->channels[i].channel_name;
209 request_buffer_consumer(sock, ch_name, j);
210 STORE_SHARED(buffers_to_export,
211 LOAD_SHARED(buffers_to_export)+1);
212 }
213 }
214 }
215
216 unlock_traces:
217 ltt_unlock_traces();
218
219 close(sock);
220 }
221
222 static struct ust_channel *find_channel(const char *ch_name,
223 struct ust_trace *trace)
224 {
225 int i;
226
227 for (i=0; i<trace->nr_channels; i++) {
228 if (!strcmp(trace->channels[i].channel_name, ch_name)) {
229 return &trace->channels[i];
230 }
231 }
232
233 return NULL;
234 }
235
236 static int get_buffer_shmid_pipe_fd(const char *trace_name, const char *ch_name,
237 int ch_cpu,
238 int *buf_shmid,
239 int *buf_struct_shmid,
240 int *buf_pipe_fd)
241 {
242 struct ust_trace *trace;
243 struct ust_channel *channel;
244 struct ust_buffer *buf;
245
246 DBG("get_buffer_shmid_pipe_fd");
247
248 ltt_lock_traces();
249 trace = _ltt_trace_find(trace_name);
250 ltt_unlock_traces();
251
252 if (trace == NULL) {
253 ERR("cannot find trace!");
254 return -ENODATA;
255 }
256
257 channel = find_channel(ch_name, trace);
258 if (!channel) {
259 ERR("cannot find channel %s!", ch_name);
260 return -ENODATA;
261 }
262
263 buf = channel->buf[ch_cpu];
264
265 *buf_shmid = buf->shmid;
266 *buf_struct_shmid = channel->buf_struct_shmids[ch_cpu];
267 *buf_pipe_fd = buf->data_ready_fd_read;
268
269 return 0;
270 }
271
272 static int get_subbuf_num_size(const char *trace_name, const char *ch_name,
273 int *num, int *size)
274 {
275 struct ust_trace *trace;
276 struct ust_channel *channel;
277
278 DBG("get_subbuf_size");
279
280 ltt_lock_traces();
281 trace = _ltt_trace_find(trace_name);
282 ltt_unlock_traces();
283
284 if (!trace) {
285 ERR("cannot find trace!");
286 return -ENODATA;
287 }
288
289 channel = find_channel(ch_name, trace);
290 if (!channel) {
291 ERR("unable to find channel");
292 return -ENODATA;
293 }
294
295 *num = channel->subbuf_cnt;
296 *size = channel->subbuf_size;
297
298 return 0;
299 }
300
301 /* Return the power of two which is equal or higher to v */
302
303 static unsigned int pow2_higher_or_eq(unsigned int v)
304 {
305 int hb = fls(v);
306 int retval = 1<<(hb-1);
307
308 if (v-retval == 0)
309 return retval;
310 else
311 return retval<<1;
312 }
313
314 static int set_subbuf_size(const char *trace_name, const char *ch_name,
315 unsigned int size)
316 {
317 unsigned int power;
318 int retval = 0;
319 struct ust_trace *trace;
320 struct ust_channel *channel;
321
322 DBG("set_subbuf_size");
323
324 power = pow2_higher_or_eq(size);
325 power = max_t(unsigned int, 2u, power);
326 if (power != size) {
327 WARN("using the next power of two for buffer size = %u\n", power);
328 }
329
330 ltt_lock_traces();
331 trace = _ltt_trace_find_setup(trace_name);
332 if (trace == NULL) {
333 ERR("cannot find trace!");
334 retval = -ENODATA;
335 goto unlock_traces;
336 }
337
338 channel = find_channel(ch_name, trace);
339 if (!channel) {
340 ERR("unable to find channel");
341 retval = -ENODATA;
342 goto unlock_traces;
343 }
344
345 channel->subbuf_size = power;
346 DBG("the set_subbuf_size for the requested channel is %u", channel->subbuf_size);
347
348 unlock_traces:
349 ltt_unlock_traces();
350
351 return retval;
352 }
353
354 static int set_subbuf_num(const char *trace_name, const char *ch_name,
355 unsigned int num)
356 {
357 struct ust_trace *trace;
358 struct ust_channel *channel;
359 int retval = 0;
360
361 DBG("set_subbuf_num");
362
363 if (num < 2) {
364 ERR("subbuffer count should be greater than 2");
365 return -EINVAL;
366 }
367
368 ltt_lock_traces();
369 trace = _ltt_trace_find_setup(trace_name);
370 if (trace == NULL) {
371 ERR("cannot find trace!");
372 retval = -ENODATA;
373 goto unlock_traces;
374 }
375
376 channel = find_channel(ch_name, trace);
377 if (!channel) {
378 ERR("unable to find channel");
379 retval = -ENODATA;
380 goto unlock_traces;
381 }
382
383 channel->subbuf_cnt = num;
384 DBG("the set_subbuf_cnt for the requested channel is %zd", channel->subbuf_cnt);
385
386 unlock_traces:
387 ltt_unlock_traces();
388 return retval;
389 }
390
391 static int get_subbuffer(const char *trace_name, const char *ch_name,
392 int ch_cpu, long *consumed_old)
393 {
394 int retval = 0;
395 struct ust_trace *trace;
396 struct ust_channel *channel;
397 struct ust_buffer *buf;
398
399 DBG("get_subbuf");
400
401 *consumed_old = 0;
402
403 ltt_lock_traces();
404 trace = _ltt_trace_find(trace_name);
405
406 if (!trace) {
407 DBG("Cannot find trace. It was likely destroyed by the user.");
408 retval = -ENODATA;
409 goto unlock_traces;
410 }
411
412 channel = find_channel(ch_name, trace);
413 if (!channel) {
414 ERR("unable to find channel");
415 retval = -ENODATA;
416 goto unlock_traces;
417 }
418
419 buf = channel->buf[ch_cpu];
420
421 retval = ust_buffers_get_subbuf(buf, consumed_old);
422 if (retval < 0) {
423 WARN("missed buffer?");
424 }
425
426 unlock_traces:
427 ltt_unlock_traces();
428
429 return retval;
430 }
431
432
433 static int notify_buffer_mapped(const char *trace_name,
434 const char *ch_name,
435 int ch_cpu)
436 {
437 int retval = 0;
438 struct ust_trace *trace;
439 struct ust_channel *channel;
440 struct ust_buffer *buf;
441
442 DBG("get_buffer_fd");
443
444 ltt_lock_traces();
445 trace = _ltt_trace_find(trace_name);
446
447 if (!trace) {
448 retval = -ENODATA;
449 DBG("Cannot find trace. It was likely destroyed by the user.");
450 goto unlock_traces;
451 }
452
453 channel = find_channel(ch_name, trace);
454 if (!channel) {
455 retval = -ENODATA;
456 ERR("unable to find channel");
457 goto unlock_traces;
458 }
459
460 buf = channel->buf[ch_cpu];
461
462 /* Being here is the proof the daemon has mapped the buffer in its
463 * memory. We may now decrement buffers_to_export.
464 */
465 if (uatomic_read(&buf->consumed) == 0) {
466 DBG("decrementing buffers_to_export");
467 STORE_SHARED(buffers_to_export, LOAD_SHARED(buffers_to_export)-1);
468 }
469
470 /* The buffer has been exported, ergo, we can add it to the
471 * list of open buffers
472 */
473 list_add(&buf->open_buffers_list, &open_buffers_list);
474
475 unlock_traces:
476 ltt_unlock_traces();
477
478 return retval;
479 }
480
481 static int put_subbuffer(const char *trace_name, const char *ch_name,
482 int ch_cpu, long consumed_old)
483 {
484 int retval = 0;
485 struct ust_trace *trace;
486 struct ust_channel *channel;
487 struct ust_buffer *buf;
488
489 DBG("put_subbuf");
490
491 ltt_lock_traces();
492 trace = _ltt_trace_find(trace_name);
493
494 if (!trace) {
495 retval = -ENODATA;
496 DBG("Cannot find trace. It was likely destroyed by the user.");
497 goto unlock_traces;
498 }
499
500 channel = find_channel(ch_name, trace);
501 if (!channel) {
502 retval = -ENODATA;
503 ERR("unable to find channel");
504 goto unlock_traces;
505 }
506
507 buf = channel->buf[ch_cpu];
508
509 retval = ust_buffers_put_subbuf(buf, consumed_old);
510 if (retval < 0) {
511 WARN("ust_buffers_put_subbuf: error (subbuf=%s_%d)",
512 ch_name, ch_cpu);
513 } else {
514 DBG("ust_buffers_put_subbuf: success (subbuf=%s_%d)",
515 ch_name, ch_cpu);
516 }
517
518 unlock_traces:
519 ltt_unlock_traces();
520
521 return retval;
522 }
523
524 static void listener_cleanup(void *ptr)
525 {
526 ustcomm_del_named_sock(listen_sock, 0);
527 }
528
529 static void force_subbuf_switch()
530 {
531 struct ust_buffer *buf;
532
533 list_for_each_entry(buf, &open_buffers_list,
534 open_buffers_list) {
535 ltt_force_switch(buf, FORCE_FLUSH);
536 }
537 }
538
539 /* Simple commands are those which need only respond with a return value. */
540 static int process_simple_client_cmd(int command, char *recv_buf)
541 {
542 int result;
543 char trace_type[] = "ustrelay";
544 char trace_name[] = "auto";
545
546 switch(command) {
547 case SET_SOCK_PATH:
548 {
549 struct ustcomm_sock_path *sock_msg;
550 sock_msg = (struct ustcomm_sock_path *)recv_buf;
551 sock_msg->sock_path =
552 ustcomm_restore_ptr(sock_msg->sock_path,
553 sock_msg->data,
554 sizeof(sock_msg->data));
555 if (!sock_msg->sock_path) {
556
557 return -EINVAL;
558 }
559 return setenv("UST_DAEMON_SOCKET", sock_msg->sock_path, 1);
560 }
561 case START:
562 /* start is an operation that setups the trace, allocates it and starts it */
563 result = ltt_trace_setup(trace_name);
564 if (result < 0) {
565 ERR("ltt_trace_setup failed");
566 return result;
567 }
568
569 result = ltt_trace_set_type(trace_name, trace_type);
570 if (result < 0) {
571 ERR("ltt_trace_set_type failed");
572 return result;
573 }
574
575 result = ltt_trace_alloc(trace_name);
576 if (result < 0) {
577 ERR("ltt_trace_alloc failed");
578 return result;
579 }
580
581 inform_consumer_daemon(trace_name);
582
583 result = ltt_trace_start(trace_name);
584 if (result < 0) {
585 ERR("ltt_trace_start failed");
586 return result;
587 }
588
589 return 0;
590 case SETUP_TRACE:
591 DBG("trace setup");
592
593 result = ltt_trace_setup(trace_name);
594 if (result < 0) {
595 ERR("ltt_trace_setup failed");
596 return result;
597 }
598
599 result = ltt_trace_set_type(trace_name, trace_type);
600 if (result < 0) {
601 ERR("ltt_trace_set_type failed");
602 return result;
603 }
604
605 return 0;
606 case ALLOC_TRACE:
607 DBG("trace alloc");
608
609 result = ltt_trace_alloc(trace_name);
610 if (result < 0) {
611 ERR("ltt_trace_alloc failed");
612 return result;
613 }
614 inform_consumer_daemon(trace_name);
615
616 return 0;
617
618 case CREATE_TRACE:
619 DBG("trace create");
620
621 result = ltt_trace_setup(trace_name);
622 if (result < 0) {
623 ERR("ltt_trace_setup failed");
624 return result;
625 }
626
627 result = ltt_trace_set_type(trace_name, trace_type);
628 if (result < 0) {
629 ERR("ltt_trace_set_type failed");
630 return result;
631 }
632
633 return 0;
634 case START_TRACE:
635 DBG("trace start");
636
637 result = ltt_trace_alloc(trace_name);
638 if (result < 0) {
639 ERR("ltt_trace_alloc failed");
640 return result;
641 }
642 if (!result) {
643 inform_consumer_daemon(trace_name);
644 }
645
646 result = ltt_trace_start(trace_name);
647 if (result < 0) {
648 ERR("ltt_trace_start failed");
649 return result;
650 }
651
652 return 0;
653 case STOP_TRACE:
654 DBG("trace stop");
655
656 result = ltt_trace_stop(trace_name);
657 if (result < 0) {
658 ERR("ltt_trace_stop failed");
659 return result;
660 }
661
662 return 0;
663 case DESTROY_TRACE:
664 DBG("trace destroy");
665
666 result = ltt_trace_destroy(trace_name, 0);
667 if (result < 0) {
668 ERR("ltt_trace_destroy failed");
669 return result;
670 }
671 return 0;
672 case FORCE_SUBBUF_SWITCH:
673 /* FIXME: return codes? */
674 force_subbuf_switch();
675
676 break;
677
678 default:
679 return -EINVAL;
680 }
681
682 return 0;
683 }
684
685 static void process_channel_cmd(int sock, int command,
686 struct ustcomm_channel_info *ch_inf)
687 {
688 struct ustcomm_header _reply_header;
689 struct ustcomm_header *reply_header = &_reply_header;
690 struct ustcomm_channel_info *reply_msg =
691 (struct ustcomm_channel_info *)send_buffer;
692 char trace_name[] = "auto";
693 int result, offset = 0, num, size;
694
695 memset(reply_header, 0, sizeof(*reply_header));
696
697 switch (command) {
698 case GET_SUBBUF_NUM_SIZE:
699 result = get_subbuf_num_size(trace_name,
700 ch_inf->channel,
701 &num, &size);
702 if (result < 0) {
703 reply_header->result = result;
704 break;
705 }
706
707 reply_msg->channel = USTCOMM_POISON_PTR;
708 reply_msg->subbuf_num = num;
709 reply_msg->subbuf_size = size;
710
711
712 reply_header->size = COMPUTE_MSG_SIZE(reply_msg, offset);
713
714 break;
715 case SET_SUBBUF_NUM:
716 reply_header->result = set_subbuf_num(trace_name,
717 ch_inf->channel,
718 ch_inf->subbuf_num);
719
720 break;
721 case SET_SUBBUF_SIZE:
722 reply_header->result = set_subbuf_size(trace_name,
723 ch_inf->channel,
724 ch_inf->subbuf_size);
725
726
727 break;
728 }
729 if (ustcomm_send(sock, reply_header, (char *)reply_msg) < 0) {
730 ERR("ustcomm_send failed");
731 }
732 }
733
734 static void process_buffer_cmd(int sock, int command,
735 struct ustcomm_buffer_info *buf_inf)
736 {
737 struct ustcomm_header _reply_header;
738 struct ustcomm_header *reply_header = &_reply_header;
739 struct ustcomm_buffer_info *reply_msg =
740 (struct ustcomm_buffer_info *)send_buffer;
741 char trace_name[] = "auto";
742 int result, offset = 0, buf_shmid, buf_struct_shmid, buf_pipe_fd;
743 long consumed_old;
744
745 memset(reply_header, 0, sizeof(*reply_header));
746
747 switch (command) {
748 case GET_BUF_SHMID_PIPE_FD:
749 result = get_buffer_shmid_pipe_fd(trace_name, buf_inf->channel,
750 buf_inf->ch_cpu,
751 &buf_shmid,
752 &buf_struct_shmid,
753 &buf_pipe_fd);
754 if (result < 0) {
755 reply_header->result = result;
756 break;
757 }
758
759 reply_msg->channel = USTCOMM_POISON_PTR;
760 reply_msg->buf_shmid = buf_shmid;
761 reply_msg->buf_struct_shmid = buf_struct_shmid;
762
763 reply_header->size = COMPUTE_MSG_SIZE(reply_msg, offset);
764 reply_header->fd_included = 1;
765
766 if (ustcomm_send_fd(sock, reply_header, (char *)reply_msg,
767 &buf_pipe_fd) < 0) {
768 ERR("ustcomm_send failed");
769 }
770 return;
771
772 case NOTIFY_BUF_MAPPED:
773 reply_header->result =
774 notify_buffer_mapped(trace_name,
775 buf_inf->channel,
776 buf_inf->ch_cpu);
777 break;
778 case GET_SUBBUFFER:
779 result = get_subbuffer(trace_name, buf_inf->channel,
780 buf_inf->ch_cpu, &consumed_old);
781 if (result < 0) {
782 reply_header->result = result;
783 break;
784 }
785
786 reply_msg->channel = USTCOMM_POISON_PTR;
787 reply_msg->consumed_old = consumed_old;
788
789 reply_header->size = COMPUTE_MSG_SIZE(reply_msg, offset);
790
791 break;
792 case PUT_SUBBUFFER:
793 result = put_subbuffer(trace_name, buf_inf->channel,
794 buf_inf->ch_cpu,
795 buf_inf->consumed_old);
796 reply_header->result = result;
797
798 break;
799 }
800
801 if (ustcomm_send(sock, reply_header, (char *)reply_msg) < 0) {
802 ERR("ustcomm_send failed");
803 }
804
805 }
806
807 static void process_marker_cmd(int sock, int command,
808 struct ustcomm_marker_info *marker_inf)
809 {
810 struct ustcomm_header _reply_header;
811 struct ustcomm_header *reply_header = &_reply_header;
812 int result;
813
814 memset(reply_header, 0, sizeof(*reply_header));
815
816 switch(command) {
817 case ENABLE_MARKER:
818
819 result = ltt_marker_connect(marker_inf->channel,
820 marker_inf->marker,
821 "default");
822 if (result < 0) {
823 WARN("could not enable marker; channel=%s,"
824 " name=%s",
825 marker_inf->channel,
826 marker_inf->marker);
827
828 }
829 break;
830 case DISABLE_MARKER:
831 result = ltt_marker_disconnect(marker_inf->channel,
832 marker_inf->marker,
833 "default");
834 if (result < 0) {
835 WARN("could not disable marker; channel=%s,"
836 " name=%s",
837 marker_inf->channel,
838 marker_inf->marker);
839 }
840 break;
841 }
842
843 reply_header->result = result;
844
845 if (ustcomm_send(sock, reply_header, NULL) < 0) {
846 ERR("ustcomm_send failed");
847 }
848
849 }
850 static void process_client_cmd(struct ustcomm_header *recv_header,
851 char *recv_buf, int sock)
852 {
853 int result;
854 struct ustcomm_header _reply_header;
855 struct ustcomm_header *reply_header = &_reply_header;
856 char *send_buf = send_buffer;
857
858 memset(reply_header, 0, sizeof(*reply_header));
859 memset(send_buf, 0, sizeof(send_buffer));
860
861 switch(recv_header->command) {
862 case GET_SUBBUF_NUM_SIZE:
863 case SET_SUBBUF_NUM:
864 case SET_SUBBUF_SIZE:
865 {
866 struct ustcomm_channel_info *ch_inf;
867 ch_inf = (struct ustcomm_channel_info *)recv_buf;
868 result = ustcomm_unpack_channel_info(ch_inf);
869 if (result < 0) {
870 ERR("couldn't unpack channel info");
871 reply_header->result = -EINVAL;
872 goto send_response;
873 }
874 process_channel_cmd(sock, recv_header->command, ch_inf);
875 return;
876 }
877 case GET_BUF_SHMID_PIPE_FD:
878 case NOTIFY_BUF_MAPPED:
879 case GET_SUBBUFFER:
880 case PUT_SUBBUFFER:
881 {
882 struct ustcomm_buffer_info *buf_inf;
883 buf_inf = (struct ustcomm_buffer_info *)recv_buf;
884 result = ustcomm_unpack_buffer_info(buf_inf);
885 if (result < 0) {
886 ERR("couldn't unpack buffer info");
887 reply_header->result = -EINVAL;
888 goto send_response;
889 }
890 process_buffer_cmd(sock, recv_header->command, buf_inf);
891 return;
892 }
893 case ENABLE_MARKER:
894 case DISABLE_MARKER:
895 {
896 struct ustcomm_marker_info *marker_inf;
897 marker_inf = (struct ustcomm_marker_info *)recv_buf;
898 result = ustcomm_unpack_marker_info(marker_inf);
899 if (result < 0) {
900 ERR("couldn't unpack marker info");
901 reply_header->result = -EINVAL;
902 goto send_response;
903 }
904 process_marker_cmd(sock, recv_header->command, marker_inf);
905 return;
906 }
907 case LIST_MARKERS:
908 {
909 char *ptr;
910 size_t size;
911 FILE *fp;
912
913 fp = open_memstream(&ptr, &size);
914 if (fp == NULL) {
915 ERR("opening memstream failed");
916 return;
917 }
918 print_markers(fp);
919 fclose(fp);
920
921 reply_header->size = size;
922
923 result = ustcomm_send(sock, reply_header, ptr);
924
925 free(ptr);
926
927 if (result < 0) {
928 PERROR("failed to send markers list");
929 }
930
931 break;
932 }
933 case LIST_TRACE_EVENTS:
934 {
935 char *ptr;
936 size_t size;
937 FILE *fp;
938
939 fp = open_memstream(&ptr, &size);
940 if (fp == NULL) {
941 ERR("opening memstream failed");
942 return;
943 }
944 print_trace_events(fp);
945 fclose(fp);
946
947 reply_header->size = size;
948
949 result = ustcomm_send(sock, reply_header, ptr);
950
951 free(ptr);
952
953 if (result < 0) {
954 ERR("list_trace_events failed");
955 return;
956 }
957
958 break;
959 }
960 case LOAD_PROBE_LIB:
961 {
962 char *libfile;
963
964 /* FIXME: No functionality at all... */
965 libfile = recv_buf;
966
967 DBG("load_probe_lib loading %s", libfile);
968
969 break;
970 }
971 case GET_PIDUNIQUE:
972 {
973 struct ustcomm_pidunique *pid_msg;
974 pid_msg = (struct ustcomm_pidunique *)send_buf;
975
976 pid_msg->pidunique = pidunique;
977 reply_header->size = sizeof(pid_msg);
978
979 goto send_response;
980
981 }
982 case GET_SOCK_PATH:
983 {
984 struct ustcomm_sock_path *sock_msg;
985 char *sock_path_env;
986
987 sock_msg = (struct ustcomm_sock_path *)send_buf;
988
989 sock_path_env = getenv("UST_DAEMON_SOCKET");
990
991 if (!sock_path_env) {
992 result = ustcomm_pack_sock_path(reply_header,
993 sock_msg,
994 SOCK_DIR "/ustd");
995
996 } else {
997 result = ustcomm_pack_sock_path(reply_header,
998 sock_msg,
999 sock_path_env);
1000 }
1001 reply_header->result = result;
1002
1003 goto send_response;
1004 }
1005 default:
1006 reply_header->result =
1007 process_simple_client_cmd(recv_header->command,
1008 recv_buf);
1009 goto send_response;
1010
1011 }
1012
1013 return;
1014
1015 send_response:
1016 ustcomm_send(sock, reply_header, send_buf);
1017 }
1018
1019 #define MAX_EVENTS 10
1020
1021 void *listener_main(void *p)
1022 {
1023 struct ustcomm_sock *epoll_sock;
1024 struct epoll_event events[MAX_EVENTS];
1025 struct sockaddr addr;
1026 int accept_fd, nfds, result, i, addr_size;
1027
1028 DBG("LISTENER");
1029
1030 pthread_cleanup_push(listener_cleanup, NULL);
1031
1032 for(;;) {
1033 nfds = epoll_wait(epoll_fd, events, MAX_EVENTS, -1);
1034 if (nfds == -1) {
1035 PERROR("listener_main: epoll_wait failed");
1036 continue;
1037 }
1038
1039 for (i = 0; i < nfds; i++) {
1040 epoll_sock = (struct ustcomm_sock *)events[i].data.ptr;
1041 if (epoll_sock == listen_sock) {
1042 addr_size = sizeof(struct sockaddr);
1043 accept_fd = accept(epoll_sock->fd,
1044 &addr,
1045 (socklen_t *)&addr_size);
1046 if (accept_fd == -1) {
1047 PERROR("listener_main: accept failed");
1048 continue;
1049 }
1050 ustcomm_init_sock(accept_fd, epoll_fd,
1051 &ust_socks);
1052 } else {
1053 memset(receive_header, 0,
1054 sizeof(*receive_header));
1055 memset(receive_buffer, 0,
1056 sizeof(receive_buffer));
1057 result = ustcomm_recv(epoll_sock->fd,
1058 receive_header,
1059 receive_buffer);
1060 if (result == 0) {
1061 ustcomm_del_sock(epoll_sock, 0);
1062 } else {
1063 process_client_cmd(receive_header,
1064 receive_buffer,
1065 epoll_sock->fd);
1066 }
1067 }
1068 }
1069 }
1070
1071 pthread_cleanup_pop(1);
1072 }
1073
1074 /* These should only be accessed in the parent thread,
1075 * not the listener.
1076 */
1077 static volatile sig_atomic_t have_listener = 0;
1078 static pthread_t listener_thread;
1079
1080 void create_listener(void)
1081 {
1082 int result;
1083 sigset_t sig_all_blocked;
1084 sigset_t orig_parent_mask;
1085
1086 if (have_listener) {
1087 WARN("not creating listener because we already had one");
1088 return;
1089 }
1090
1091 /* A new thread created by pthread_create inherits the signal mask
1092 * from the parent. To avoid any signal being received by the
1093 * listener thread, we block all signals temporarily in the parent,
1094 * while we create the listener thread.
1095 */
1096
1097 sigfillset(&sig_all_blocked);
1098
1099 result = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_parent_mask);
1100 if (result) {
1101 PERROR("pthread_sigmask: %s", strerror(result));
1102 }
1103
1104 result = pthread_create(&listener_thread, NULL, listener_main, NULL);
1105 if (result == -1) {
1106 PERROR("pthread_create");
1107 }
1108
1109 /* Restore original signal mask in parent */
1110 result = pthread_sigmask(SIG_SETMASK, &orig_parent_mask, NULL);
1111 if (result) {
1112 PERROR("pthread_sigmask: %s", strerror(result));
1113 } else {
1114 have_listener = 1;
1115 }
1116 }
1117
1118 #define AUTOPROBE_DISABLED 0
1119 #define AUTOPROBE_ENABLE_ALL 1
1120 #define AUTOPROBE_ENABLE_REGEX 2
1121 static int autoprobe_method = AUTOPROBE_DISABLED;
1122 static regex_t autoprobe_regex;
1123
1124 static void auto_probe_connect(struct marker *m)
1125 {
1126 int result;
1127
1128 char* concat_name = NULL;
1129 const char *probe_name = "default";
1130
1131 if (autoprobe_method == AUTOPROBE_DISABLED) {
1132 return;
1133 } else if (autoprobe_method == AUTOPROBE_ENABLE_REGEX) {
1134 result = asprintf(&concat_name, "%s/%s", m->channel, m->name);
1135 if (result == -1) {
1136 ERR("auto_probe_connect: asprintf failed (marker %s/%s)",
1137 m->channel, m->name);
1138 return;
1139 }
1140 if (regexec(&autoprobe_regex, concat_name, 0, NULL, 0)) {
1141 free(concat_name);
1142 return;
1143 }
1144 free(concat_name);
1145 }
1146
1147 result = ltt_marker_connect(m->channel, m->name, probe_name);
1148 if (result && result != -EEXIST)
1149 ERR("ltt_marker_connect (marker = %s/%s, errno = %d)", m->channel, m->name, -result);
1150
1151 DBG("auto connected marker %s (addr: %p) %s to probe default", m->channel, m, m->name);
1152
1153 }
1154
1155 static struct ustcomm_sock * init_app_socket(int epoll_fd)
1156 {
1157 char *name;
1158 int result;
1159 struct ustcomm_sock *sock;
1160
1161 result = asprintf(&name, "%s/%d", SOCK_DIR, (int)getpid());
1162 if (result < 0) {
1163 ERR("string overflow allocating socket name, "
1164 "UST thread bailing");
1165 return NULL;
1166 }
1167
1168 result = ensure_dir_exists(SOCK_DIR);
1169 if (result == -1) {
1170 ERR("Unable to create socket directory %s, UST thread bailing",
1171 SOCK_DIR);
1172 goto free_name;
1173 }
1174
1175 sock = ustcomm_init_named_socket(name, epoll_fd);
1176 if (!sock) {
1177 ERR("Error initializing named socket (%s). Check that directory"
1178 "exists and that it is writable. UST thread bailing", name);
1179 goto free_name;
1180 }
1181
1182 free(name);
1183 return sock;
1184
1185 free_name:
1186 free(name);
1187 return NULL;
1188 }
1189
1190 static void __attribute__((constructor)) init()
1191 {
1192 int result;
1193 char* autoprobe_val = NULL;
1194 char* subbuffer_size_val = NULL;
1195 char* subbuffer_count_val = NULL;
1196 unsigned int subbuffer_size;
1197 unsigned int subbuffer_count;
1198 unsigned int power;
1199
1200 /* Assign the pidunique, to be able to differentiate the processes with same
1201 * pid, (before and after an exec).
1202 */
1203 pidunique = make_pidunique();
1204
1205 DBG("Tracectl constructor");
1206
1207 /* Set up epoll */
1208 epoll_fd = epoll_create(MAX_EVENTS);
1209 if (epoll_fd == -1) {
1210 ERR("epoll_create failed, tracing shutting down");
1211 return;
1212 }
1213
1214 /* Create the socket */
1215 listen_sock = init_app_socket(epoll_fd);
1216 if (!listen_sock) {
1217 ERR("failed to create application socket,"
1218 " tracing shutting down");
1219 return;
1220 }
1221
1222 create_listener();
1223
1224 autoprobe_val = getenv("UST_AUTOPROBE");
1225 if (autoprobe_val) {
1226 struct marker_iter iter;
1227
1228 DBG("Autoprobe enabled.");
1229
1230 /* Ensure markers are initialized */
1231 //init_markers();
1232
1233 /* Ensure marker control is initialized, for the probe */
1234 init_marker_control();
1235
1236 /* first, set the callback that will connect the
1237 * probe on new markers
1238 */
1239 if (autoprobe_val[0] == '/') {
1240 result = regcomp(&autoprobe_regex, autoprobe_val+1, 0);
1241 if (result) {
1242 char regexerr[150];
1243
1244 regerror(result, &autoprobe_regex, regexerr, sizeof(regexerr));
1245 ERR("cannot parse regex %s (%s), will ignore UST_AUTOPROBE", autoprobe_val, regexerr);
1246 /* don't crash the application just for this */
1247 } else {
1248 autoprobe_method = AUTOPROBE_ENABLE_REGEX;
1249 }
1250 } else {
1251 /* just enable all instrumentation */
1252 autoprobe_method = AUTOPROBE_ENABLE_ALL;
1253 }
1254
1255 marker_set_new_marker_cb(auto_probe_connect);
1256
1257 /* Now, connect the probes that were already registered. */
1258 marker_iter_reset(&iter);
1259 marker_iter_start(&iter);
1260
1261 DBG("now iterating on markers already registered");
1262 while (iter.marker) {
1263 DBG("now iterating on marker %s", iter.marker->name);
1264 auto_probe_connect(iter.marker);
1265 marker_iter_next(&iter);
1266 }
1267 }
1268
1269 if (getenv("UST_OVERWRITE")) {
1270 int val = atoi(getenv("UST_OVERWRITE"));
1271 if (val == 0 || val == 1) {
1272 STORE_SHARED(ust_channels_overwrite_by_default, val);
1273 } else {
1274 WARN("invalid value for UST_OVERWRITE");
1275 }
1276 }
1277
1278 if (getenv("UST_AUTOCOLLECT")) {
1279 int val = atoi(getenv("UST_AUTOCOLLECT"));
1280 if (val == 0 || val == 1) {
1281 STORE_SHARED(ust_channels_request_collection_by_default, val);
1282 } else {
1283 WARN("invalid value for UST_AUTOCOLLECT");
1284 }
1285 }
1286
1287 subbuffer_size_val = getenv("UST_SUBBUF_SIZE");
1288 if (subbuffer_size_val) {
1289 sscanf(subbuffer_size_val, "%u", &subbuffer_size);
1290 power = pow2_higher_or_eq(subbuffer_size);
1291 if (power != subbuffer_size)
1292 WARN("using the next power of two for buffer size = %u\n", power);
1293 chan_infos[LTT_CHANNEL_UST].def_subbufsize = power;
1294 }
1295
1296 subbuffer_count_val = getenv("UST_SUBBUF_NUM");
1297 if (subbuffer_count_val) {
1298 sscanf(subbuffer_count_val, "%u", &subbuffer_count);
1299 if (subbuffer_count < 2)
1300 subbuffer_count = 2;
1301 chan_infos[LTT_CHANNEL_UST].def_subbufcount = subbuffer_count;
1302 }
1303
1304 if (getenv("UST_TRACE")) {
1305 char trace_name[] = "auto";
1306 char trace_type[] = "ustrelay";
1307
1308 DBG("starting early tracing");
1309
1310 /* Ensure marker control is initialized */
1311 init_marker_control();
1312
1313 /* Ensure markers are initialized */
1314 init_markers();
1315
1316 /* Ensure buffers are initialized, for the transport to be available.
1317 * We are about to set a trace type and it will fail without this.
1318 */
1319 init_ustrelay_transport();
1320
1321 /* FIXME: When starting early tracing (here), depending on the
1322 * order of constructors, it is very well possible some marker
1323 * sections are not yet registered. Because of this, some
1324 * channels may not be registered. Yet, we are about to ask the
1325 * daemon to collect the channels. Channels which are not yet
1326 * registered will not be collected.
1327 *
1328 * Currently, in LTTng, there is no way to add a channel after
1329 * trace start. The reason for this is that it induces complex
1330 * concurrency issues on the trace structures, which can only
1331 * be resolved using RCU. This has not been done yet. As a
1332 * workaround, we are forcing the registration of the "ust"
1333 * channel here. This is the only channel (apart from metadata)
1334 * that can be reliably used in early tracing.
1335 *
1336 * Non-early tracing does not have this problem and can use
1337 * arbitrary channel names.
1338 */
1339 ltt_channels_register("ust");
1340
1341 result = ltt_trace_setup(trace_name);
1342 if (result < 0) {
1343 ERR("ltt_trace_setup failed");
1344 return;
1345 }
1346
1347 result = ltt_trace_set_type(trace_name, trace_type);
1348 if (result < 0) {
1349 ERR("ltt_trace_set_type failed");
1350 return;
1351 }
1352
1353 result = ltt_trace_alloc(trace_name);
1354 if (result < 0) {
1355 ERR("ltt_trace_alloc failed");
1356 return;
1357 }
1358
1359 result = ltt_trace_start(trace_name);
1360 if (result < 0) {
1361 ERR("ltt_trace_start failed");
1362 return;
1363 }
1364
1365 /* Do this after the trace is started in order to avoid creating confusion
1366 * if the trace fails to start. */
1367 inform_consumer_daemon(trace_name);
1368 }
1369
1370 return;
1371
1372 /* should decrementally destroy stuff if error */
1373
1374 }
1375
1376 /* This is only called if we terminate normally, not with an unhandled signal,
1377 * so we cannot rely on it. However, for now, LTTV requires that the header of
1378 * the last sub-buffer contain a valid end time for the trace. This is done
1379 * automatically only when the trace is properly stopped.
1380 *
1381 * If the traced program crashed, it is always possible to manually add the
1382 * right value in the header, or to open the trace in text mode.
1383 *
1384 * FIXME: Fix LTTV so it doesn't need this.
1385 */
1386
1387 static void destroy_traces(void)
1388 {
1389 int result;
1390
1391 /* if trace running, finish it */
1392
1393 DBG("destructor stopping traces");
1394
1395 result = ltt_trace_stop("auto");
1396 if (result == -1) {
1397 ERR("ltt_trace_stop error");
1398 }
1399
1400 result = ltt_trace_destroy("auto", 0);
1401 if (result == -1) {
1402 ERR("ltt_trace_destroy error");
1403 }
1404 }
1405
1406 static int trace_recording(void)
1407 {
1408 int retval = 0;
1409 struct ust_trace *trace;
1410
1411 ltt_lock_traces();
1412
1413 list_for_each_entry(trace, &ltt_traces.head, list) {
1414 if (trace->active) {
1415 retval = 1;
1416 break;
1417 }
1418 }
1419
1420 ltt_unlock_traces();
1421
1422 return retval;
1423 }
1424
1425 int restarting_usleep(useconds_t usecs)
1426 {
1427 struct timespec tv;
1428 int result;
1429
1430 tv.tv_sec = 0;
1431 tv.tv_nsec = usecs * 1000;
1432
1433 do {
1434 result = nanosleep(&tv, &tv);
1435 } while (result == -1 && errno == EINTR);
1436
1437 return result;
1438 }
1439
1440 static void stop_listener(void)
1441 {
1442 int result;
1443
1444 if (!have_listener)
1445 return;
1446
1447 result = pthread_cancel(listener_thread);
1448 if (result != 0) {
1449 ERR("pthread_cancel: %s", strerror(result));
1450 }
1451 result = pthread_join(listener_thread, NULL);
1452 if (result != 0) {
1453 ERR("pthread_join: %s", strerror(result));
1454 }
1455 }
1456
1457 /* This destructor keeps the process alive for a few seconds in order
1458 * to leave time to ustd to connect to its buffers. This is necessary
1459 * for programs whose execution is very short. It is also useful in all
1460 * programs when tracing is started close to the end of the program
1461 * execution.
1462 *
1463 * FIXME: For now, this only works for the first trace created in a
1464 * process.
1465 */
1466
1467 static void __attribute__((destructor)) keepalive()
1468 {
1469 if (trace_recording() && LOAD_SHARED(buffers_to_export)) {
1470 int total = 0;
1471 DBG("Keeping process alive for consumer daemon...");
1472 while (LOAD_SHARED(buffers_to_export)) {
1473 const int interv = 200000;
1474 restarting_usleep(interv);
1475 total += interv;
1476
1477 if (total >= 3000000) {
1478 WARN("non-consumed buffers remaining after wait limit; not waiting anymore");
1479 break;
1480 }
1481 }
1482 DBG("Finally dying...");
1483 }
1484
1485 destroy_traces();
1486
1487 /* Ask the listener to stop and clean up. */
1488 stop_listener();
1489 }
1490
1491 void ust_potential_exec(void)
1492 {
1493 trace_mark(ust, potential_exec, MARK_NOARGS);
1494
1495 DBG("test");
1496
1497 keepalive();
1498 }
1499
1500 /* Notify ust that there was a fork. This needs to be called inside
1501 * the new process, anytime a process whose memory is not shared with
1502 * the parent is created. If this function is not called, the events
1503 * of the new process will not be collected.
1504 *
1505 * Signals should be disabled before the fork and reenabled only after
1506 * this call in order to guarantee tracing is not started before ust_fork()
1507 * sanitizes the new process.
1508 */
1509
1510 static void ust_fork(void)
1511 {
1512 struct ust_buffer *buf, *buf_tmp;
1513 struct ustcomm_sock *sock, *sock_tmp;
1514 int result;
1515
1516 /* FIXME: technically, the locks could have been taken before the fork */
1517 DBG("ust: forking");
1518
1519 /* break lock if necessary */
1520 ltt_unlock_traces();
1521
1522 ltt_trace_stop("auto");
1523 ltt_trace_destroy("auto", 1);
1524 /* Delete all active connections, but leave them in the epoll set */
1525 list_for_each_entry_safe(sock, sock_tmp, &ust_socks, list) {
1526 ustcomm_del_sock(sock, 1);
1527 }
1528
1529 /* Delete all blocked consumers */
1530 list_for_each_entry_safe(buf, buf_tmp, &open_buffers_list,
1531 open_buffers_list) {
1532 result = close(buf->data_ready_fd_read);
1533 if (result == -1) {
1534 PERROR("close");
1535 }
1536 result = close(buf->data_ready_fd_write);
1537 if (result == -1) {
1538 PERROR("close");
1539 }
1540 list_del(&buf->open_buffers_list);
1541 }
1542
1543 /* Clean up the listener socket and epoll, keeping the scoket file */
1544 ustcomm_del_named_sock(listen_sock, 1);
1545 close(epoll_fd);
1546
1547 /* Re-start the launch sequence */
1548 STORE_SHARED(buffers_to_export, 0);
1549 have_listener = 0;
1550
1551 /* Set up epoll */
1552 epoll_fd = epoll_create(MAX_EVENTS);
1553 if (epoll_fd == -1) {
1554 ERR("epoll_create failed, tracing shutting down");
1555 return;
1556 }
1557
1558 /* Create the socket */
1559 listen_sock = init_app_socket(epoll_fd);
1560 if (!listen_sock) {
1561 ERR("failed to create application socket,"
1562 " tracing shutting down");
1563 return;
1564 }
1565 create_listener();
1566 ltt_trace_setup("auto");
1567 result = ltt_trace_set_type("auto", "ustrelay");
1568 if (result < 0) {
1569 ERR("ltt_trace_set_type failed");
1570 return;
1571 }
1572
1573 ltt_trace_alloc("auto");
1574 ltt_trace_start("auto");
1575 inform_consumer_daemon("auto");
1576 }
1577
1578 void ust_before_fork(ust_fork_info_t *fork_info)
1579 {
1580 /* Disable signals. This is to avoid that the child
1581 * intervenes before it is properly setup for tracing. It is
1582 * safer to disable all signals, because then we know we are not
1583 * breaking anything by restoring the original mask.
1584 */
1585 sigset_t all_sigs;
1586 int result;
1587
1588 /* FIXME:
1589 - only do this if tracing is active
1590 */
1591
1592 /* Disable signals */
1593 sigfillset(&all_sigs);
1594 result = sigprocmask(SIG_BLOCK, &all_sigs, &fork_info->orig_sigs);
1595 if (result == -1) {
1596 PERROR("sigprocmask");
1597 return;
1598 }
1599 }
1600
1601 /* Don't call this function directly in a traced program */
1602 static void ust_after_fork_common(ust_fork_info_t *fork_info)
1603 {
1604 int result;
1605
1606 /* Restore signals */
1607 result = sigprocmask(SIG_SETMASK, &fork_info->orig_sigs, NULL);
1608 if (result == -1) {
1609 PERROR("sigprocmask");
1610 return;
1611 }
1612 }
1613
1614 void ust_after_fork_parent(ust_fork_info_t *fork_info)
1615 {
1616 /* Reenable signals */
1617 ust_after_fork_common(fork_info);
1618 }
1619
1620 void ust_after_fork_child(ust_fork_info_t *fork_info)
1621 {
1622 /* First sanitize the child */
1623 ust_fork();
1624
1625 /* Then reenable interrupts */
1626 ust_after_fork_common(fork_info);
1627 }
1628
This page took 0.062949 seconds and 3 git commands to generate.