*/
s64 pidunique = -1LL;
+/* The process pid is used to detect a non-traceable fork
+ * and allow the non-traceable fork to be ignored
+ * by destructor sequences in libust
+ */
+static pid_t processpid = 0;
+
static struct ustcomm_header _receive_header;
static struct ustcomm_header *receive_header = &_receive_header;
static char receive_buffer[USTCOMM_BUFFER_SIZE];
extern struct chan_info_struct chan_infos[];
-static struct list_head open_buffers_list = LIST_HEAD_INIT(open_buffers_list);
+static struct cds_list_head open_buffers_list = CDS_LIST_HEAD_INIT(open_buffers_list);
-static struct list_head ust_socks = LIST_HEAD_INIT(ust_socks);
+static struct cds_list_head ust_socks = CDS_LIST_HEAD_INIT(ust_socks);
/* volatile because shared between the listener and the main thread */
int buffers_to_export = 0;
static void request_buffer_consumer(int sock,
- const char *channel,
- int cpu)
+ const char *trace,
+ const char *channel,
+ int cpu)
{
struct ustcomm_header send_header, recv_header;
struct ustcomm_buffer_info buf_inf;
result = ustcomm_pack_buffer_info(&send_header,
&buf_inf,
+ trace,
channel,
cpu);
/* iterate on all cpus */
for (j=0; j<trace->channels[i].n_cpus; j++) {
ch_name = trace->channels[i].channel_name;
- request_buffer_consumer(sock, ch_name, j);
- STORE_SHARED(buffers_to_export,
- LOAD_SHARED(buffers_to_export)+1);
+ request_buffer_consumer(sock, trace_name,
+ ch_name, j);
+ CMM_STORE_SHARED(buffers_to_export,
+ CMM_LOAD_SHARED(buffers_to_export)+1);
}
}
}
*/
if (uatomic_read(&buf->consumed) == 0) {
DBG("decrementing buffers_to_export");
- STORE_SHARED(buffers_to_export, LOAD_SHARED(buffers_to_export)-1);
+ CMM_STORE_SHARED(buffers_to_export, CMM_LOAD_SHARED(buffers_to_export)-1);
}
/* The buffer has been exported, ergo, we can add it to the
* list of open buffers
*/
- list_add(&buf->open_buffers_list, &open_buffers_list);
+ cds_list_add(&buf->open_buffers_list, &open_buffers_list);
unlock_traces:
ltt_unlock_traces();
{
struct ust_buffer *buf;
- list_for_each_entry(buf, &open_buffers_list,
+ cds_list_for_each_entry(buf, &open_buffers_list,
open_buffers_list) {
ltt_force_switch(buf, FORCE_FLUSH);
}
/* Simple commands are those which need only respond with a return value. */
static int process_simple_client_cmd(int command, char *recv_buf)
{
- int result;
- char trace_type[] = "ustrelay";
- char trace_name[] = "auto";
-
switch(command) {
case SET_SOCK_PATH:
{
}
return setenv("UST_DAEMON_SOCKET", sock_msg->sock_path, 1);
}
+
+ case FORCE_SUBBUF_SWITCH:
+ /* FIXME: return codes? */
+ force_subbuf_switch();
+
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
+static int process_trace_cmd(int command, char *trace_name)
+{
+ int result;
+ char trace_type[] = "ustrelay";
+
+ switch(command) {
case START:
/* start is an operation that setups the trace, allocates it and starts it */
result = ltt_trace_setup(trace_name);
return result;
}
return 0;
- case FORCE_SUBBUF_SWITCH:
- /* FIXME: return codes? */
- force_subbuf_switch();
-
- break;
-
- default:
- return -EINVAL;
}
return 0;
}
+
static void process_channel_cmd(int sock, int command,
struct ustcomm_channel_info *ch_inf)
{
struct ustcomm_header *reply_header = &_reply_header;
struct ustcomm_channel_info *reply_msg =
(struct ustcomm_channel_info *)send_buffer;
- char trace_name[] = "auto";
int result, offset = 0, num, size;
memset(reply_header, 0, sizeof(*reply_header));
switch (command) {
case GET_SUBBUF_NUM_SIZE:
- result = get_subbuf_num_size(trace_name,
+ result = get_subbuf_num_size(ch_inf->trace,
ch_inf->channel,
&num, &size);
if (result < 0) {
break;
case SET_SUBBUF_NUM:
- reply_header->result = set_subbuf_num(trace_name,
+ reply_header->result = set_subbuf_num(ch_inf->trace,
ch_inf->channel,
ch_inf->subbuf_num);
break;
case SET_SUBBUF_SIZE:
- reply_header->result = set_subbuf_size(trace_name,
+ reply_header->result = set_subbuf_size(ch_inf->trace,
ch_inf->channel,
ch_inf->subbuf_size);
struct ustcomm_header *reply_header = &_reply_header;
struct ustcomm_buffer_info *reply_msg =
(struct ustcomm_buffer_info *)send_buffer;
- char trace_name[] = "auto";
int result, offset = 0, buf_shmid, buf_struct_shmid, buf_pipe_fd;
long consumed_old;
switch (command) {
case GET_BUF_SHMID_PIPE_FD:
- result = get_buffer_shmid_pipe_fd(trace_name, buf_inf->channel,
+ result = get_buffer_shmid_pipe_fd(buf_inf->trace,
+ buf_inf->channel,
buf_inf->ch_cpu,
&buf_shmid,
&buf_struct_shmid,
case NOTIFY_BUF_MAPPED:
reply_header->result =
- notify_buffer_mapped(trace_name,
+ notify_buffer_mapped(buf_inf->trace,
buf_inf->channel,
buf_inf->ch_cpu);
break;
case GET_SUBBUFFER:
- result = get_subbuffer(trace_name, buf_inf->channel,
+ result = get_subbuffer(buf_inf->trace, buf_inf->channel,
buf_inf->ch_cpu, &consumed_old);
if (result < 0) {
reply_header->result = result;
break;
case PUT_SUBBUFFER:
- result = put_subbuffer(trace_name, buf_inf->channel,
+ result = put_subbuffer(buf_inf->trace, buf_inf->channel,
buf_inf->ch_cpu,
buf_inf->consumed_old);
reply_header->result = result;
goto send_response;
}
+ case START:
+ case SETUP_TRACE:
+ case ALLOC_TRACE:
+ case CREATE_TRACE:
+ case START_TRACE:
+ case STOP_TRACE:
+ case DESTROY_TRACE:
+ {
+ struct ustcomm_trace_info *trace_inf =
+ (struct ustcomm_trace_info *)recv_buf;
+
+ result = ustcomm_unpack_trace_info(trace_inf);
+ if (result < 0) {
+ ERR("couldn't unpack trace info");
+ reply_header->result = -EINVAL;
+ goto send_response;
+ }
+
+ reply_header->result =
+ process_trace_cmd(recv_header->command,
+ trace_inf->trace);
+ goto send_response;
+
+ }
default:
reply_header->result =
process_simple_client_cmd(recv_header->command,
* pid, (before and after an exec).
*/
pidunique = make_pidunique();
+ processpid = getpid();
DBG("Tracectl constructor");
if (getenv("UST_OVERWRITE")) {
int val = atoi(getenv("UST_OVERWRITE"));
if (val == 0 || val == 1) {
- STORE_SHARED(ust_channels_overwrite_by_default, val);
+ CMM_STORE_SHARED(ust_channels_overwrite_by_default, val);
} else {
WARN("invalid value for UST_OVERWRITE");
}
if (getenv("UST_AUTOCOLLECT")) {
int val = atoi(getenv("UST_AUTOCOLLECT"));
if (val == 0 || val == 1) {
- STORE_SHARED(ust_channels_request_collection_by_default, val);
+ CMM_STORE_SHARED(ust_channels_request_collection_by_default, val);
} else {
WARN("invalid value for UST_AUTOCOLLECT");
}
ltt_lock_traces();
- list_for_each_entry(trace, <t_traces.head, list) {
+ cds_list_for_each_entry(trace, <t_traces.head, list) {
if (trace->active) {
retval = 1;
break;
static void __attribute__((destructor)) keepalive()
{
- if (trace_recording() && LOAD_SHARED(buffers_to_export)) {
+ if (processpid != getpid()) {
+ return;
+ }
+
+ if (trace_recording() && CMM_LOAD_SHARED(buffers_to_export)) {
int total = 0;
DBG("Keeping process alive for consumer daemon...");
- while (LOAD_SHARED(buffers_to_export)) {
+ while (CMM_LOAD_SHARED(buffers_to_export)) {
const int interv = 200000;
restarting_usleep(interv);
total += interv;
/* FIXME: technically, the locks could have been taken before the fork */
DBG("ust: forking");
+ /* Get the pid of the new process */
+ processpid = getpid();
+
/* break lock if necessary */
ltt_unlock_traces();
ltt_trace_stop("auto");
ltt_trace_destroy("auto", 1);
/* Delete all active connections, but leave them in the epoll set */
- list_for_each_entry_safe(sock, sock_tmp, &ust_socks, list) {
+ cds_list_for_each_entry_safe(sock, sock_tmp, &ust_socks, list) {
ustcomm_del_sock(sock, 1);
}
/* Delete all blocked consumers */
- list_for_each_entry_safe(buf, buf_tmp, &open_buffers_list,
+ cds_list_for_each_entry_safe(buf, buf_tmp, &open_buffers_list,
open_buffers_list) {
result = close(buf->data_ready_fd_read);
if (result == -1) {
if (result == -1) {
PERROR("close");
}
- list_del(&buf->open_buffers_list);
+ cds_list_del(&buf->open_buffers_list);
}
/* Clean up the listener socket and epoll, keeping the scoket file */
close(epoll_fd);
/* Re-start the launch sequence */
- STORE_SHARED(buffers_to_export, 0);
+ CMM_STORE_SHARED(buffers_to_export, 0);
have_listener = 0;
/* Set up epoll */