#include <getopt.h>
#include <grp.h>
#include <limits.h>
+#include <poll.h>
#include <pthread.h>
#include <semaphore.h>
#include <signal.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ipc.h>
+#include <sys/mount.h>
#include <sys/shm.h>
#include <sys/socket.h>
#include <sys/stat.h>
#include "session.h"
#include "traceable-app.h"
#include "lttng-kconsumerd.h"
-#include "libustctl.h"
+#include "utils.h"
/*
* TODO:
static int is_root; /* Set to 1 if the daemon is running as root */
static pid_t ppid; /* Parent PID for --sig-parent option */
static pid_t kconsumerd_pid;
+static struct pollfd *kernel_pollfd;
static char apps_unix_sock_path[PATH_MAX]; /* Global application Unix socket path */
static char client_unix_sock_path[PATH_MAX]; /* Global client Unix socket path */
static int kconsumerd_err_sock;
static int kconsumerd_cmd_sock;
static int kernel_tracer_fd;
+static int kernel_poll_pipe[2];
+
+/*
+ * Quit pipe for all threads. This permits a single cancellation point
+ * for all threads when receiving an event on the pipe.
+ */
+static int thread_quit_pipe[2];
/* Pthread, Mutexes and Semaphores */
static pthread_t kconsumerd_thread;
static pthread_t apps_thread;
static pthread_t client_thread;
+static pthread_t kernel_thread;
static sem_t kconsumerd_sem;
static pthread_mutex_t kconsumerd_pid_mutex; /* Mutex to control kconsumerd pid assignation */
+/*
+ * Pointer initialized before thread creation.
+ *
+ * This points to the tracing session list containing the session count and a
+ * mutex lock. The lock MUST be taken if you iterate over the list. The lock
+ * MUST NOT be taken if you call a public function in session.c.
+ *
+ * The lock is nested inside the structure: session_list_ptr->lock.
+ */
+static struct ltt_session_list *session_list_ptr;
+
+/*
+ * Init quit pipe.
+ *
+ * Return -1 on error or 0 if all pipes are created.
+ */
+static int init_thread_quit_pipe(void)
+{
+ int ret;
+
+ ret = pipe2(thread_quit_pipe, O_CLOEXEC);
+ if (ret < 0) {
+ perror("thread quit pipe");
+ goto error;
+ }
+
+error:
+ return ret;
+}
+
/*
* teardown_kernel_session
*
- * Complete teardown of a kernel session. This free all data structure
- * related to a kernel session and update counter.
+ * Complete teardown of a kernel session. This free all data structure related
+ * to a kernel session and update counter.
*/
static void teardown_kernel_session(struct ltt_session *session)
{
trace_destroy_kernel_session(session->kernel_session);
/* Extra precaution */
session->kernel_session = NULL;
- /* Decrement session count */
- session->kern_session_count--;
}
}
/*
- * cleanup
- *
- * Cleanup the daemon on exit
+ * Cleanup the daemon
*/
static void cleanup()
{
DBG("Cleaning up");
/* <fun> */
- MSG("\n%c[%d;%dm*** assert failed *** ==> %c[%dm", 27,1,31,27,0);
- MSG("%c[%d;%dmMatthew, BEET driven development works!%c[%dm",27,1,33,27,0);
+ MSG("\n%c[%d;%dm*** assert failed *** ==> %c[%dm%c[%d;%dm"
+ "Matthew, BEET driven development works!%c[%dm",
+ 27, 1, 31, 27, 0, 27, 1, 33, 27, 0);
/* </fun> */
/* Stopping all threads */
DBG("Terminating all threads");
- pthread_cancel(client_thread);
- pthread_cancel(apps_thread);
- if (kconsumerd_pid != 0) {
- pthread_cancel(kconsumerd_thread);
- }
-
- DBG("Unlinking all unix socket");
- unlink(client_unix_sock_path);
- unlink(apps_unix_sock_path);
- unlink(kconsumerd_err_unix_sock_path);
+ close(thread_quit_pipe[0]);
+ close(thread_quit_pipe[1]);
DBG("Removing %s directory", LTTNG_RUNDIR);
ret = asprintf(&cmd, "rm -rf " LTTNG_RUNDIR);
}
DBG("Cleaning up all session");
- /* Cleanup ALL session */
- cds_list_for_each_entry(sess, <t_session_list.head, list) {
- teardown_kernel_session(sess);
- // TODO complete session cleanup (including UST)
+
+ /* Destroy session list mutex */
+ if (session_list_ptr != NULL) {
+ pthread_mutex_destroy(&session_list_ptr->lock);
+
+ /* Cleanup ALL session */
+ cds_list_for_each_entry(sess, &session_list_ptr->head, list) {
+ teardown_kernel_session(sess);
+ // TODO complete session cleanup (including UST)
+ }
}
+ pthread_mutex_destroy(&kconsumerd_pid_mutex);
+
DBG("Closing kernel fd");
close(kernel_tracer_fd);
}
*
* Free memory of a command context structure.
*/
-static void clean_command_ctx(struct command_ctx *cmd_ctx)
+static void clean_command_ctx(struct command_ctx **cmd_ctx)
{
- DBG("Clean command context structure %p", cmd_ctx);
- if (cmd_ctx) {
- if (cmd_ctx->llm) {
- free(cmd_ctx->llm);
+ DBG("Clean command context structure");
+ if (*cmd_ctx) {
+ if ((*cmd_ctx)->llm) {
+ free((*cmd_ctx)->llm);
}
- if (cmd_ctx->lsm) {
- free(cmd_ctx->lsm);
+ if ((*cmd_ctx)->lsm) {
+ free((*cmd_ctx)->lsm);
}
- free(cmd_ctx);
- cmd_ctx = NULL;
+ free(*cmd_ctx);
+ *cmd_ctx = NULL;
}
}
/*
- * send_kconsumerd_fds
+ * send_kconsumerd_channel_fds
*
- * Send all stream fds of the kernel session to the consumer.
+ * Send all stream fds of kernel channel to the consumer.
*/
-static int send_kconsumerd_fds(int sock, struct ltt_kernel_session *session)
+static int send_kconsumerd_channel_fds(int sock, struct ltt_kernel_channel *channel)
{
int ret;
size_t nb_fd;
struct ltt_kernel_stream *stream;
- struct ltt_kernel_channel *chan;
struct lttcomm_kconsumerd_header lkh;
struct lttcomm_kconsumerd_msg lkm;
- nb_fd = session->stream_count_global;
+ DBG("Sending fds of channel %s to kernel consumer", channel->channel->name);
+
+ nb_fd = channel->stream_count;
/* Setup header */
- lkh.payload_size = (nb_fd + 1) * sizeof(struct lttcomm_kconsumerd_msg);
+ lkh.payload_size = nb_fd * sizeof(struct lttcomm_kconsumerd_msg);
lkh.cmd_type = ADD_STREAM;
DBG("Sending kconsumerd header");
goto error;
}
- DBG("Sending metadata stream fd");
-
- /* Send metadata stream fd first */
- lkm.fd = session->metadata_stream_fd;
- lkm.state = ACTIVE_FD;
- lkm.max_sb_size = session->metadata->conf->attr.subbuf_size;
- strncpy(lkm.path_name, session->metadata->pathname, PATH_MAX);
-
- ret = lttcomm_send_fds_unix_sock(sock, &lkm, &lkm.fd, 1, sizeof(lkm));
- if (ret < 0) {
- perror("send kconsumerd fd");
- goto error;
- }
-
- cds_list_for_each_entry(chan, &session->channel_list.head, list) {
- cds_list_for_each_entry(stream, &chan->stream_list.head, list) {
+ cds_list_for_each_entry(stream, &channel->stream_list.head, list) {
+ if (stream->fd != 0) {
lkm.fd = stream->fd;
lkm.state = stream->state;
- lkm.max_sb_size = chan->channel->attr.subbuf_size;
+ lkm.max_sb_size = channel->channel->attr.subbuf_size;
strncpy(lkm.path_name, stream->pathname, PATH_MAX);
DBG("Sending fd %d to kconsumerd", lkm.fd);
}
}
- DBG("Kconsumerd fds sent");
+ DBG("Kconsumerd channel fds sent");
return 0;
}
/*
- * create_trace_dir
+ * send_kconsumerd_fds
*
- * Create the trace output directory.
+ * Send all stream fds of the kernel session to the consumer.
*/
-static int create_trace_dir(struct ltt_kernel_session *session)
+static int send_kconsumerd_fds(int sock, struct ltt_kernel_session *session)
{
int ret;
struct ltt_kernel_channel *chan;
+ struct lttcomm_kconsumerd_header lkh;
+ struct lttcomm_kconsumerd_msg lkm;
+
+ /* Setup header */
+ lkh.payload_size = sizeof(struct lttcomm_kconsumerd_msg);
+ lkh.cmd_type = ADD_STREAM;
+
+ DBG("Sending kconsumerd header for metadata");
+
+ ret = lttcomm_send_unix_sock(sock, &lkh, sizeof(struct lttcomm_kconsumerd_header));
+ if (ret < 0) {
+ perror("send kconsumerd header");
+ goto error;
+ }
+
+ DBG("Sending metadata stream fd");
+
+ if (session->metadata_stream_fd != 0) {
+ /* Send metadata stream fd first */
+ lkm.fd = session->metadata_stream_fd;
+ lkm.state = ACTIVE_FD;
+ lkm.max_sb_size = session->metadata->conf->attr.subbuf_size;
+ strncpy(lkm.path_name, session->metadata->pathname, PATH_MAX);
+
+ ret = lttcomm_send_fds_unix_sock(sock, &lkm, &lkm.fd, 1, sizeof(lkm));
+ if (ret < 0) {
+ perror("send kconsumerd fd");
+ goto error;
+ }
+ }
- /* Create all channel directories */
cds_list_for_each_entry(chan, &session->channel_list.head, list) {
- DBG("Creating trace directory at %s", chan->pathname);
- // TODO: recursive create dir
- ret = mkdir(chan->pathname, S_IRWXU | S_IRWXG );
+ ret = send_kconsumerd_channel_fds(sock, chan);
if (ret < 0) {
- if (ret != EEXIST) {
- perror("mkdir trace path");
- ret = -errno;
- goto error;
- }
+ goto error;
}
}
+ DBG("Kconsumerd fds (metadata and channel streams) sent");
+
return 0;
error:
return ret;
}
+#ifdef DISABLED
/*
* ust_connect_app
*
return sock;
}
+#endif /* DISABLED */
/*
* notify_apps
return ret;
}
+/*
+ * update_kernel_pollfd
+ *
+ * Update the kernel pollfd set of all channel fd available over
+ * all tracing session. Add the wakeup pipe at the end of the set.
+ */
+static int update_kernel_pollfd(void)
+{
+ int i = 0;
+ /*
+ * The wakup pipe and the quit pipe are needed so the number of fds starts
+ * at 2 for those pipes.
+ */
+ unsigned int nb_fd = 2;
+ struct ltt_session *session;
+ struct ltt_kernel_channel *channel;
+
+ DBG("Updating kernel_pollfd");
+
+ /* Get the number of channel of all kernel session */
+ lock_session_list();
+ cds_list_for_each_entry(session, &session_list_ptr->head, list) {
+ lock_session(session);
+ if (session->kernel_session == NULL) {
+ unlock_session(session);
+ continue;
+ }
+ nb_fd += session->kernel_session->channel_count;
+ unlock_session(session);
+ }
+
+ DBG("Resizing kernel_pollfd to size %d", nb_fd);
+
+ kernel_pollfd = realloc(kernel_pollfd, nb_fd * sizeof(struct pollfd));
+ if (kernel_pollfd == NULL) {
+ perror("malloc kernel_pollfd");
+ goto error;
+ }
+
+ cds_list_for_each_entry(session, &session_list_ptr->head, list) {
+ lock_session(session);
+ if (session->kernel_session == NULL) {
+ unlock_session(session);
+ continue;
+ }
+ if (i >= nb_fd) {
+ ERR("To much channel for kernel_pollfd size");
+ unlock_session(session);
+ break;
+ }
+ cds_list_for_each_entry(channel, &session->kernel_session->channel_list.head, list) {
+ kernel_pollfd[i].fd = channel->fd;
+ kernel_pollfd[i].events = POLLIN | POLLRDNORM;
+ i++;
+ }
+ unlock_session(session);
+ }
+ unlock_session_list();
+
+ /* Adding wake up pipe */
+ kernel_pollfd[nb_fd - 2].fd = kernel_poll_pipe[0];
+ kernel_pollfd[nb_fd - 2].events = POLLIN;
+
+ /* Adding the quit pipe */
+ kernel_pollfd[nb_fd - 1].fd = thread_quit_pipe[0];
+
+ return nb_fd;
+
+error:
+ unlock_session_list();
+ return -1;
+}
+
+/*
+ * update_kernel_stream
+ *
+ * Find the channel fd from 'fd' over all tracing session. When found, check
+ * for new channel stream and send those stream fds to the kernel consumer.
+ *
+ * Useful for CPU hotplug feature.
+ */
+static int update_kernel_stream(int fd)
+{
+ int ret = 0;
+ struct ltt_session *session;
+ struct ltt_kernel_channel *channel;
+
+ DBG("Updating kernel streams for channel fd %d", fd);
+
+ lock_session_list();
+ cds_list_for_each_entry(session, &session_list_ptr->head, list) {
+ lock_session(session);
+ if (session->kernel_session == NULL) {
+ unlock_session(session);
+ continue;
+ }
+ cds_list_for_each_entry(channel, &session->kernel_session->channel_list.head, list) {
+ if (channel->fd == fd) {
+ DBG("Channel found, updating kernel streams");
+ ret = kernel_open_channel_stream(channel);
+ if (ret < 0) {
+ goto end;
+ }
+ /*
+ * Have we already sent fds to the consumer? If yes, it means that
+ * tracing is started so it is safe to send our updated stream fds.
+ */
+ if (session->kernel_session->kconsumer_fds_sent == 1) {
+ ret = send_kconsumerd_channel_fds(kconsumerd_cmd_sock, channel);
+ if (ret < 0) {
+ goto end;
+ }
+ }
+ goto end;
+ }
+ }
+ unlock_session(session);
+ }
+
+end:
+ unlock_session_list();
+ if (session) {
+ unlock_session(session);
+ }
+ return ret;
+}
+
+/*
+ * thread_manage_kernel
+ *
+ * This thread manage event coming from the kernel.
+ *
+ * Features supported in this thread:
+ * -) CPU Hotplug
+ */
+static void *thread_manage_kernel(void *data)
+{
+ int ret, i, nb_fd = 0;
+ char tmp;
+ int update_poll_flag = 1;
+
+ DBG("Thread manage kernel started");
+
+ while (1) {
+ if (update_poll_flag == 1) {
+ nb_fd = update_kernel_pollfd();
+ if (nb_fd < 0) {
+ goto error;
+ }
+ update_poll_flag = 0;
+ }
+
+ DBG("Polling on %d fds", nb_fd);
+
+ /* Poll infinite value of time */
+ ret = poll(kernel_pollfd, nb_fd, -1);
+ if (ret < 0) {
+ perror("poll kernel thread");
+ goto error;
+ } else if (ret == 0) {
+ /* Should not happen since timeout is infinite */
+ continue;
+ }
+
+ /* Thread quit pipe has been closed. Killing thread. */
+ if (kernel_pollfd[nb_fd - 1].revents == POLLNVAL) {
+ goto error;
+ }
+
+ DBG("Kernel poll event triggered");
+
+ /*
+ * Check if the wake up pipe was triggered. If so, the kernel_pollfd
+ * must be updated.
+ */
+ switch (kernel_pollfd[nb_fd - 2].revents) {
+ case POLLIN:
+ ret = read(kernel_poll_pipe[0], &tmp, 1);
+ update_poll_flag = 1;
+ continue;
+ case POLLERR:
+ goto error;
+ default:
+ break;
+ }
+
+ for (i = 0; i < nb_fd; i++) {
+ switch (kernel_pollfd[i].revents) {
+ /*
+ * New CPU detected by the kernel. Adding kernel stream to kernel
+ * session and updating the kernel consumer
+ */
+ case POLLIN | POLLRDNORM:
+ ret = update_kernel_stream(kernel_pollfd[i].fd);
+ if (ret < 0) {
+ continue;
+ }
+ break;
+ }
+ }
+ }
+
+error:
+ DBG("Kernel thread dying");
+ if (kernel_pollfd) {
+ free(kernel_pollfd);
+ }
+
+ close(kernel_poll_pipe[0]);
+ close(kernel_poll_pipe[1]);
+ return NULL;
+}
+
/*
* thread_manage_kconsumerd
*
*/
static void *thread_manage_kconsumerd(void *data)
{
- int sock, ret;
+ int sock = 0, ret;
enum lttcomm_return_code code;
+ struct pollfd pollfd[2];
DBG("[thread] Manage kconsumerd started");
goto error;
}
+ /* First fd is always the quit pipe */
+ pollfd[0].fd = thread_quit_pipe[0];
+
+ /* Apps socket */
+ pollfd[1].fd = kconsumerd_err_sock;
+ pollfd[1].events = POLLIN;
+
+ /* Inifinite blocking call, waiting for transmission */
+ ret = poll(pollfd, 2, -1);
+ if (ret < 0) {
+ perror("poll kconsumerd thread");
+ goto error;
+ }
+
+ /* Thread quit pipe has been closed. Killing thread. */
+ if (pollfd[0].revents == POLLNVAL) {
+ goto error;
+ } else if (pollfd[1].revents == POLLERR) {
+ ERR("Kconsumerd err socket poll error");
+ goto error;
+ }
+
sock = lttcomm_accept_unix_sock(kconsumerd_err_sock);
if (sock < 0) {
goto error;
ERR("Kconsumerd return code : %s", lttcomm_get_readable_code(-code));
error:
- kconsumerd_pid = 0;
DBG("Kconsumerd thread dying");
+ if (kconsumerd_err_sock) {
+ close(kconsumerd_err_sock);
+ }
+ if (kconsumerd_cmd_sock) {
+ close(kconsumerd_cmd_sock);
+ }
+ if (sock) {
+ close(sock);
+ }
+
+ unlink(kconsumerd_err_unix_sock_path);
+ unlink(kconsumerd_cmd_unix_sock_path);
+
+ kconsumerd_pid = 0;
return NULL;
}
*/
static void *thread_manage_apps(void *data)
{
- int sock, ret;
+ int sock = 0, ret;
+ struct pollfd pollfd[2];
/* TODO: Something more elegant is needed but fine for now */
/* FIXME: change all types to either uint8_t, uint32_t, uint64_t
goto error;
}
+ /* First fd is always the quit pipe */
+ pollfd[0].fd = thread_quit_pipe[0];
+
+ /* Apps socket */
+ pollfd[1].fd = apps_sock;
+ pollfd[1].events = POLLIN;
+
/* Notify all applications to register */
notify_apps(default_global_apps_pipe);
while (1) {
DBG("Accepting application registration");
- /* Blocking call, waiting for transmission */
+
+ /* Inifinite blocking call, waiting for transmission */
+ ret = poll(pollfd, 2, -1);
+ if (ret < 0) {
+ perror("poll apps thread");
+ goto error;
+ }
+
+ /* Thread quit pipe has been closed. Killing thread. */
+ if (pollfd[0].revents == POLLNVAL) {
+ goto error;
+ } else if (pollfd[1].revents == POLLERR) {
+ ERR("Apps socket poll error");
+ goto error;
+ }
+
sock = lttcomm_accept_unix_sock(apps_sock);
if (sock < 0) {
goto error;
}
- /* Basic recv here to handle the very simple data
+ /*
+ * Basic recv here to handle the very simple data
* that the libust send to register (reg_msg).
*/
ret = recv(sock, ®_msg, sizeof(reg_msg), 0);
}
error:
+ DBG("Apps thread dying");
+ if (apps_sock) {
+ close(apps_sock);
+ }
+ if (sock) {
+ close(sock);
+ }
+ unlink(apps_unix_sock_path);
return NULL;
}
/*
* Exec kconsumerd.
*/
- execlp("kconsumerd", "kconsumerd", "--verbose", NULL);
+ execlp("ltt-kconsumerd", "ltt-kconsumerd", "--verbose", NULL);
if (errno != 0) {
perror("kernel start consumer exec");
}
pthread_mutex_lock(&kconsumerd_pid_mutex);
if (kconsumerd_pid != 0) {
+ pthread_mutex_unlock(&kconsumerd_pid_mutex);
goto end;
}
}
end:
- pthread_mutex_unlock(&kconsumerd_pid_mutex);
return 0;
error:
return ret;
}
+/*
+ * modprobe_kernel_modules
+ */
+static int modprobe_kernel_modules(void)
+{
+ int ret = 0, i = 0;
+ char modprobe[256];
+
+ while (kernel_modules_list[i] != NULL) {
+ ret = snprintf(modprobe, sizeof(modprobe), "/sbin/modprobe %s",
+ kernel_modules_list[i]);
+ if (ret < 0) {
+ perror("snprintf modprobe");
+ goto error;
+ }
+ ret = system(modprobe);
+ if (ret < 0) {
+ ERR("Unable to load module %s", kernel_modules_list[i]);
+ }
+ DBG("Modprobe successfully %s", kernel_modules_list[i]);
+ i++;
+ }
+
+error:
+ return ret;
+}
+
+/*
+ * mount_debugfs
+ */
+static int mount_debugfs(char *path)
+{
+ int ret;
+ char *type = "debugfs";
+
+ ret = mkdir_recursive(path, S_IRWXU | S_IRWXG);
+ if (ret < 0) {
+ goto error;
+ }
+
+ ret = mount(type, path, type, 0, NULL);
+ if (ret < 0) {
+ perror("mount debugfs");
+ goto error;
+ }
+
+ DBG("Mounted debugfs successfully at %s", path);
+
+error:
+ return ret;
+}
+
/*
* init_kernel_tracer
*
*/
static void init_kernel_tracer(void)
{
- /* Set the global kernel tracer fd */
- kernel_tracer_fd = open(DEFAULT_KERNEL_TRACER_PATH, O_RDWR);
+ int ret;
+ char *proc_mounts = "/proc/mounts";
+ char line[256];
+ char *debugfs_path = NULL, *lttng_path;
+ FILE *fp;
+
+ /* Detect debugfs */
+ fp = fopen(proc_mounts, "r");
+ if (fp == NULL) {
+ ERR("Unable to probe %s", proc_mounts);
+ goto error;
+ }
+
+ while (fgets(line, sizeof(line), fp) != NULL) {
+ if (strstr(line, "debugfs") != NULL) {
+ /* Remove first string */
+ strtok(line, " ");
+ /* Dup string here so we can reuse line later on */
+ debugfs_path = strdup(strtok(NULL, " "));
+ DBG("Got debugfs path : %s", debugfs_path);
+ break;
+ }
+ }
+
+ fclose(fp);
+
+ /* Mount debugfs if needded */
+ if (debugfs_path == NULL) {
+ ret = asprintf(&debugfs_path, "/mnt/debugfs");
+ if (ret < 0) {
+ perror("asprintf debugfs path");
+ goto error;
+ }
+ ret = mount_debugfs(debugfs_path);
+ if (ret < 0) {
+ goto error;
+ }
+ }
+
+ /* Modprobe lttng kernel modules */
+ ret = modprobe_kernel_modules();
+ if (ret < 0) {
+ goto error;
+ }
+
+ /* Setup lttng kernel path */
+ ret = asprintf(<tng_path, "%s/lttng", debugfs_path);
+ if (ret < 0) {
+ perror("asprintf lttng path");
+ goto error;
+ }
+
+ /* Open debugfs lttng */
+ kernel_tracer_fd = open(lttng_path, O_RDWR);
if (kernel_tracer_fd < 0) {
- WARN("No kernel tracer available");
- kernel_tracer_fd = 0;
+ DBG("Failed to open %s", lttng_path);
+ goto error;
}
+ free(lttng_path);
+ free(debugfs_path);
DBG("Kernel tracer fd %d", kernel_tracer_fd);
+ return;
+
+error:
+ if (lttng_path) {
+ free(lttng_path);
+ }
+ if (debugfs_path) {
+ free(debugfs_path);
+ }
+ WARN("No kernel tracer available");
+ kernel_tracer_fd = 0;
+ return;
}
/*
{
int ret;
- /* Create trace directory */
- ret = create_trace_dir(session);
- if (ret < 0) {
- if (ret == -EEXIST) {
- ret = LTTCOMM_KERN_DIR_EXIST;
- } else {
- ret = LTTCOMM_KERN_DIR_FAIL;
- goto error;
- }
- }
-
if (session->kconsumer_fds_sent == 0) {
ret = send_kconsumerd_fds(kconsumerd_cmd_sock, session);
if (ret < 0) {
return ret;
}
+/*
+ * Notify kernel thread to update it's pollfd.
+ */
+static int notify_kernel_pollfd(void)
+{
+ int ret;
+
+ /* Inform kernel thread of the new kernel channel */
+ ret = write(kernel_poll_pipe[1], "!", 1);
+ if (ret < 0) {
+ perror("write kernel poll pipe");
+ }
+
+ return ret;
+}
+
/*
* init_default_channel
*
chan->attr.num_subbuf = DEFAULT_CHANNEL_SUBBUF_NUM;
chan->attr.switch_timer_interval = DEFAULT_CHANNEL_SWITCH_TIMER;
chan->attr.read_timer_interval = DEFAULT_CHANNEL_READ_TIMER;
+ chan->attr.output = DEFAULT_KERNEL_CHANNEL_OUTPUT;
error:
return chan;
static int create_kernel_session(struct ltt_session *session)
{
int ret;
- struct lttng_channel *chan;
DBG("Creating kernel session");
goto error;
}
- chan = init_default_channel();
- if (chan == NULL) {
- ret = LTTCOMM_FATAL;
- goto error;
+ ret = mkdir_recursive(session->path, S_IRWXU | S_IRWXG );
+ if (ret < 0) {
+ if (ret != EEXIST) {
+ ERR("Trace directory creation error");
+ goto error;
+ }
}
- DBG("Creating default kernel channel %s", DEFAULT_CHANNEL_NAME);
+error:
+ return ret;
+}
+
+/*
+ * Using the session list, filled a lttng_session array to send back to the
+ * client for session listing.
+ *
+ * The session list lock MUST be acquired before calling this function. Use
+ * lock_session_list() and unlock_session_list().
+ */
+static void list_lttng_sessions(struct lttng_session *sessions)
+{
+ int i = 0;
+ struct ltt_session *session;
- ret = kernel_create_channel(session->kernel_session, chan);
- if (ret < 0) {
- ret = LTTCOMM_KERN_CHAN_FAIL;
- goto error;
+ DBG("Getting all available session");
+ /*
+ * Iterate over session list and append data after the control struct in
+ * the buffer.
+ */
+ cds_list_for_each_entry(session, &session_list_ptr->head, list) {
+ strncpy(sessions[i].path, session->path, PATH_MAX);
+ strncpy(sessions[i].name, session->name, NAME_MAX);
+ i++;
}
-
-error:
- return ret;
}
/*
ret = LTTCOMM_SELECT_SESS;
}
goto error;
+ } else {
+ /* Acquire lock for the session */
+ lock_session(cmd_ctx->session);
}
break;
}
* Check kernel command for kernel session.
*/
switch (cmd_ctx->lsm->cmd_type) {
- case LTTNG_KERNEL_CREATE_CHANNEL:
+ case LTTNG_KERNEL_ADD_CONTEXT:
case LTTNG_KERNEL_DISABLE_ALL_EVENT:
case LTTNG_KERNEL_DISABLE_CHANNEL:
case LTTNG_KERNEL_DISABLE_EVENT:
}
}
+#ifdef DISABLED
/* Connect to ust apps if available pid */
if (cmd_ctx->lsm->pid > 0) {
/* Connect to app using ustctl API */
goto error;
}
}
+#endif /* DISABLED */
/* Process by command type */
switch (cmd_ctx->lsm->cmd_type) {
- case LTTNG_KERNEL_CREATE_CHANNEL:
+ case LTTNG_KERNEL_ADD_CONTEXT:
{
+ int found = 0, no_event = 0;
+ struct ltt_kernel_channel *chan;
+ struct ltt_kernel_event *event;
+ struct lttng_kernel_context ctx;
+
/* Setup lttng message with no payload */
ret = setup_lttng_msg(cmd_ctx, 0);
if (ret < 0) {
goto setup_error;
}
- /* Kernel tracer */
- DBG("Creating kernel channel");
+ /* Check if event name is given */
+ if (strlen(cmd_ctx->lsm->u.context.event_name) == 0) {
+ no_event = 1;
+ }
+
+ /* Create Kernel context */
+ ctx.ctx = cmd_ctx->lsm->u.context.ctx.ctx;
+ ctx.u.perf_counter.type = cmd_ctx->lsm->u.context.ctx.u.perf_counter.type;
+ ctx.u.perf_counter.config = cmd_ctx->lsm->u.context.ctx.u.perf_counter.config;
+ strncpy(ctx.u.perf_counter.name,
+ cmd_ctx->lsm->u.context.ctx.u.perf_counter.name,
+ sizeof(ctx.u.perf_counter.name));
+
+ if (strlen(cmd_ctx->lsm->u.context.channel_name) == 0) {
+ /* Go over all channels */
+ DBG("Adding context to all channels");
+ cds_list_for_each_entry(chan,
+ &cmd_ctx->session->kernel_session->channel_list.head, list) {
+ if (no_event) {
+ ret = kernel_add_channel_context(chan, &ctx);
+ if (ret < 0) {
+ continue;
+ }
+ } else {
+ event = get_kernel_event_by_name(cmd_ctx->lsm->u.context.event_name, chan);
+ if (event != NULL) {
+ ret = kernel_add_event_context(event, &ctx);
+ if (ret < 0) {
+ ret = LTTCOMM_KERN_CONTEXT_FAIL;
+ goto error;
+ }
+ found = 1;
+ break;
+ }
+ }
+ }
+ } else {
+ chan = get_kernel_channel_by_name(cmd_ctx->lsm->u.context.channel_name,
+ cmd_ctx->session->kernel_session);
+ if (chan == NULL) {
+ ret = LTTCOMM_KERN_CHAN_NOT_FOUND;
+ goto error;
+ }
+
+ if (no_event) {
+ ret = kernel_add_channel_context(chan, &ctx);
+ if (ret < 0) {
+ ret = LTTCOMM_KERN_CONTEXT_FAIL;
+ goto error;
+ }
+ } else {
+ event = get_kernel_event_by_name(cmd_ctx->lsm->u.context.event_name, chan);
+ if (event != NULL) {
+ ret = kernel_add_event_context(event, &ctx);
+ if (ret < 0) {
+ ret = LTTCOMM_KERN_CONTEXT_FAIL;
+ goto error;
+ }
+ }
+ }
+ }
+
+ if (!found && !no_event) {
+ ret = LTTCOMM_NO_EVENT;
+ goto error;
+ }
+
+ ret = LTTCOMM_OK;
+ break;
+ }
+ case LTTNG_KERNEL_DISABLE_CHANNEL:
+ {
+ struct ltt_kernel_channel *chan;
- ret = kernel_create_channel(cmd_ctx->session->kernel_session,
- &cmd_ctx->lsm->u.channel.chan);
+ /* Setup lttng message with no payload */
+ ret = setup_lttng_msg(cmd_ctx, 0);
if (ret < 0) {
- ret = LTTCOMM_KERN_CHAN_FAIL;
+ goto setup_error;
+ }
+
+ chan = get_kernel_channel_by_name(cmd_ctx->lsm->u.disable.channel_name,
+ cmd_ctx->session->kernel_session);
+ if (chan == NULL) {
+ ret = LTTCOMM_KERN_CHAN_NOT_FOUND;
goto error;
+ } else if (chan->enabled == 1) {
+ ret = kernel_disable_channel(chan);
+ if (ret < 0) {
+ if (ret != EEXIST) {
+ ret = LTTCOMM_KERN_CHAN_DISABLE_FAIL;
+ }
+ goto error;
+ }
}
+ kernel_wait_quiescent(kernel_tracer_fd);
ret = LTTCOMM_OK;
break;
}
ret = LTTCOMM_OK;
break;
}
- case LTTNG_KERNEL_ENABLE_EVENT:
+ case LTTNG_KERNEL_DISABLE_ALL_EVENT:
{
struct ltt_kernel_channel *chan;
struct ltt_kernel_event *ev;
goto setup_error;
}
- chan = get_kernel_channel_by_name(cmd_ctx->lsm->u.enable.channel_name,
+ DBG("Disabling all enabled kernel events");
+
+ chan = get_kernel_channel_by_name(cmd_ctx->lsm->u.disable.channel_name,
cmd_ctx->session->kernel_session);
if (chan == NULL) {
ret = LTTCOMM_KERN_CHAN_NOT_FOUND;
goto error;
}
- ev = get_kernel_event_by_name(cmd_ctx->lsm->u.enable.event.name, chan);
+ /* For each event in the kernel session */
+ cds_list_for_each_entry(ev, &chan->events_list.head, list) {
+ DBG("Disabling kernel event %s for channel %s.",
+ ev->event->name, cmd_ctx->lsm->u.disable.channel_name);
+ ret = kernel_disable_event(ev);
+ if (ret < 0) {
+ continue;
+ }
+ }
+
+ /* Quiescent wait after event disable */
+ kernel_wait_quiescent(kernel_tracer_fd);
+ ret = LTTCOMM_OK;
+ break;
+ }
+ case LTTNG_KERNEL_ENABLE_CHANNEL:
+ {
+ struct ltt_kernel_channel *chan;
+
+ /* Setup lttng message with no payload */
+ ret = setup_lttng_msg(cmd_ctx, 0);
+ if (ret < 0) {
+ goto setup_error;
+ }
+
+ chan = get_kernel_channel_by_name(cmd_ctx->lsm->u.enable.channel_name,
+ cmd_ctx->session->kernel_session);
+ if (chan == NULL) {
+ /* Channel not found, creating it */
+ DBG("Creating kernel channel");
+
+ ret = kernel_create_channel(cmd_ctx->session->kernel_session,
+ &cmd_ctx->lsm->u.channel.chan, cmd_ctx->session->path);
+ if (ret < 0) {
+ ret = LTTCOMM_KERN_CHAN_FAIL;
+ goto error;
+ }
+
+ /* Notify kernel thread that there is a new channel */
+ ret = notify_kernel_pollfd();
+ if (ret < 0) {
+ ret = LTTCOMM_FATAL;
+ goto error;
+ }
+ } else if (chan->enabled == 0) {
+ ret = kernel_enable_channel(chan);
+ if (ret < 0) {
+ if (ret != EEXIST) {
+ ret = LTTCOMM_KERN_CHAN_ENABLE_FAIL;
+ }
+ goto error;
+ }
+ }
+
+ kernel_wait_quiescent(kernel_tracer_fd);
+ ret = LTTCOMM_OK;
+ break;
+ }
+ case LTTNG_KERNEL_ENABLE_EVENT:
+ {
+ char *channel_name;
+ struct ltt_kernel_channel *kchan;
+ struct ltt_kernel_event *ev;
+ struct lttng_channel *chan;
+
+ /* Setup lttng message with no payload */
+ ret = setup_lttng_msg(cmd_ctx, 0);
+ if (ret < 0) {
+ goto setup_error;
+ }
+
+ channel_name = cmd_ctx->lsm->u.enable.channel_name;
+
+ do {
+ kchan = get_kernel_channel_by_name(channel_name,
+ cmd_ctx->session->kernel_session);
+ if (kchan == NULL) {
+ DBG("Creating default channel");
+
+ chan = init_default_channel();
+ if (chan == NULL) {
+ ret = LTTCOMM_FATAL;
+ goto error;
+ }
+
+ ret = kernel_create_channel(cmd_ctx->session->kernel_session,
+ chan, cmd_ctx->session->path);
+ if (ret < 0) {
+ ret = LTTCOMM_KERN_CHAN_FAIL;
+ goto error;
+ }
+ }
+ } while (kchan == NULL);
+
+ ev = get_kernel_event_by_name(cmd_ctx->lsm->u.enable.event.name, kchan);
if (ev == NULL) {
DBG("Creating kernel event %s for channel %s.",
- cmd_ctx->lsm->u.enable.event.name, cmd_ctx->lsm->u.enable.channel_name);
- ret = kernel_create_event(&cmd_ctx->lsm->u.enable.event, chan);
+ cmd_ctx->lsm->u.enable.event.name, channel_name);
+ ret = kernel_create_event(&cmd_ctx->lsm->u.enable.event, kchan);
} else {
DBG("Enabling kernel event %s for channel %s.",
- cmd_ctx->lsm->u.enable.event.name, cmd_ctx->lsm->u.enable.channel_name);
+ cmd_ctx->lsm->u.enable.event.name, channel_name);
ret = kernel_enable_event(ev);
+ if (ret == -EEXIST) {
+ ret = LTTCOMM_KERN_EVENT_EXIST;
+ goto error;
+ }
}
if (ret < 0) {
}
case LTTNG_KERNEL_ENABLE_ALL_EVENT:
{
- int pos, size, found;
- char *event_list, *event, *ptr;
- struct ltt_kernel_channel *chan;
- struct lttng_event ev;
+ int pos, size;
+ char *event_list, *event, *ptr, *channel_name;
+ struct ltt_kernel_channel *kchan;
+ struct ltt_kernel_event *ev;
+ struct lttng_event ev_attr;
+ struct lttng_channel *chan;
/* Setup lttng message with no payload */
ret = setup_lttng_msg(cmd_ctx, 0);
DBG("Enabling all kernel event");
- size = kernel_list_events(kernel_tracer_fd, &event_list);
- if (size < 0) {
- ret = LTTCOMM_KERN_LIST_FAIL;
- goto error;
- }
+ channel_name = cmd_ctx->lsm->u.enable.channel_name;
- /* Get channel by name and create event for that channel */
- cds_list_for_each_entry(chan, &cmd_ctx->session->kernel_session->channel_list.head, list) {
- if (strcmp(cmd_ctx->lsm->u.enable.channel_name, chan->channel->name) == 0) {
- found = 1;
- break;
+ do {
+ kchan = get_kernel_channel_by_name(channel_name,
+ cmd_ctx->session->kernel_session);
+ if (kchan == NULL) {
+ DBG("Creating default channel");
+
+ chan = init_default_channel();
+ if (chan == NULL) {
+ ret = LTTCOMM_FATAL;
+ goto error;
+ }
+
+ ret = kernel_create_channel(cmd_ctx->session->kernel_session,
+ &cmd_ctx->lsm->u.channel.chan, cmd_ctx->session->path);
+ if (ret < 0) {
+ ret = LTTCOMM_KERN_CHAN_FAIL;
+ goto error;
+ }
+ }
+ } while (kchan == NULL);
+
+ /* For each event in the kernel session */
+ cds_list_for_each_entry(ev, &kchan->events_list.head, list) {
+ DBG("Enabling kernel event %s for channel %s.",
+ ev->event->name, channel_name);
+ ret = kernel_enable_event(ev);
+ if (ret < 0) {
+ continue;
}
}
- if (!found) {
- ret = LTTCOMM_KERN_CHAN_NOT_FOUND;
+ size = kernel_list_events(kernel_tracer_fd, &event_list);
+ if (size < 0) {
+ ret = LTTCOMM_KERN_LIST_FAIL;
goto error;
}
ptr = event_list;
while ((size = sscanf(ptr, "event { name = %m[^;]; };%n\n", &event, &pos)) == 1) {
- strncpy(ev.name, event, LTTNG_SYM_NAME_LEN);
- /* Default event type for enable all */
- ev.type = LTTNG_EVENT_TRACEPOINTS;
- /* Enable each single tracepoint event */
- ret = kernel_create_event(&ev, chan);
- if (ret < 0) {
- ret = LTTCOMM_KERN_ENABLE_FAIL;
- goto error;
+ ev = get_kernel_event_by_name(event, kchan);
+ if (ev == NULL) {
+ strncpy(ev_attr.name, event, LTTNG_SYM_NAME_LEN);
+ /* Default event type for enable all */
+ ev_attr.type = LTTNG_EVENT_TRACEPOINT;
+ /* Enable each single tracepoint event */
+ ret = kernel_create_event(&ev_attr, kchan);
+ if (ret < 0) {
+ /* Ignore error here and continue */
+ }
}
+
/* Move pointer to the next line */
ptr += pos + 1;
free(event);
if (cmd_ctx->session->kernel_session != NULL) {
if (cmd_ctx->session->kernel_session->metadata == NULL) {
DBG("Open kernel metadata");
- ret = kernel_open_metadata(cmd_ctx->session->kernel_session);
+ ret = kernel_open_metadata(cmd_ctx->session->kernel_session,
+ cmd_ctx->session->path);
if (ret < 0) {
ret = LTTCOMM_KERN_META_FAIL;
goto error;
goto error;
}
+ /*
+ * Must notify the kernel thread here to update it's pollfd in order to
+ * remove the channel(s)' fd just destroyed.
+ */
+ ret = notify_kernel_pollfd();
+ if (ret < 0) {
+ ret = LTTCOMM_FATAL;
+ goto error;
+ }
+
ret = LTTCOMM_OK;
break;
}
*/
case LTTNG_LIST_SESSIONS:
{
- unsigned int session_count;
+ lock_session_list();
- session_count = get_session_count();
- if (session_count == 0) {
+ if (session_list_ptr->count == 0) {
ret = LTTCOMM_NO_SESSION;
goto error;
}
- ret = setup_lttng_msg(cmd_ctx, sizeof(struct lttng_session) * session_count);
+ ret = setup_lttng_msg(cmd_ctx, sizeof(struct lttng_session) *
+ session_list_ptr->count);
if (ret < 0) {
goto setup_error;
}
- get_lttng_session((struct lttng_session *)(cmd_ctx->llm->payload));
+ /* Filled the session array */
+ list_lttng_sessions((struct lttng_session *)(cmd_ctx->llm->payload));
+
+ unlock_session_list();
ret = LTTCOMM_OK;
break;
/* Set return code */
cmd_ctx->llm->ret_code = ret;
+ if (cmd_ctx->session) {
+ unlock_session(cmd_ctx->session);
+ }
+
return ret;
error:
cmd_ctx->llm->ret_code = ret;
setup_error:
+ if (cmd_ctx->session) {
+ unlock_session(cmd_ctx->session);
+ }
return ret;
}
*/
static void *thread_manage_clients(void *data)
{
- int sock, ret;
- struct command_ctx *cmd_ctx;
+ int sock = 0, ret;
+ struct command_ctx *cmd_ctx = NULL;
+ struct pollfd pollfd[2];
DBG("[thread] Manage client started");
goto error;
}
+ /* First fd is always the quit pipe */
+ pollfd[0].fd = thread_quit_pipe[0];
+
+ /* Apps socket */
+ pollfd[1].fd = client_sock;
+ pollfd[1].events = POLLIN;
+
/* Notify parent pid that we are ready
* to accept command for client side.
*/
}
while (1) {
- /* Blocking call, waiting for transmission */
DBG("Accepting client command ...");
+
+ /* Inifinite blocking call, waiting for transmission */
+ ret = poll(pollfd, 2, -1);
+ if (ret < 0) {
+ perror("poll client thread");
+ goto error;
+ }
+
+ /* Thread quit pipe has been closed. Killing thread. */
+ if (pollfd[0].revents == POLLNVAL) {
+ goto error;
+ } else if (pollfd[1].revents == POLLERR) {
+ ERR("Client socket poll error");
+ goto error;
+ }
+
sock = lttcomm_accept_unix_sock(client_sock);
if (sock < 0) {
goto error;
/* TODO: Inform client somehow of the fatal error. At this point,
* ret < 0 means that a malloc failed (ENOMEM). */
/* Error detected but still accept command */
- clean_command_ctx(cmd_ctx);
+ clean_command_ctx(&cmd_ctx);
continue;
}
ERR("Failed to send data back to client");
}
- clean_command_ctx(cmd_ctx);
+ clean_command_ctx(&cmd_ctx);
/* End of transmission */
close(sock);
}
error:
+ DBG("Client thread dying");
+ if (client_sock) {
+ close(client_sock);
+ }
+ if (sock) {
+ close(sock);
+ }
+
+ unlink(client_unix_sock_path);
+
+ clean_command_ctx(&cmd_ctx);
return NULL;
}
return ret;
}
-/*
- * get_home_dir
- *
- * Return pointer to home directory path using
- * the env variable HOME.
- *
- * Default : /tmp
- */
-static const char *get_home_dir(void)
-{
- const char *home_path;
-
- if ((home_path = (const char *) getenv("HOME")) == NULL) {
- home_path = default_home_dir;
- }
-
- return home_path;
-}
-
/*
* set_permissions
*
* Set the tracing group gid onto the client socket.
*
* Race window between mkdir and chown is OK because we are going from
- * less permissive (root.root) to more permissive (root.tracing).
+ * more permissive (root.root) to les permissive (root.tracing).
*/
static int set_permissions(void)
{
(grp = getgrnam(default_tracing_group));
if (grp == NULL) {
- ERR("Missing tracing group. Aborting execution.\n");
- ret = -1;
+ if (is_root) {
+ WARN("No tracing group detected");
+ ret = 0;
+ } else {
+ ERR("Missing tracing group. Aborting execution.");
+ ret = -1;
+ }
goto end;
}
return ret;
}
+/*
+ * create_kernel_poll_pipe
+ *
+ * Create the pipe used to wake up the kernel thread.
+ */
+static int create_kernel_poll_pipe(void)
+{
+ return pipe2(kernel_poll_pipe, O_CLOEXEC);
+}
+
/*
* create_lttng_rundir
*
int ret;
struct rlimit lim;
+ /* The kernel does not allowed an infinite limit for open files */
lim.rlim_cur = 65535;
lim.rlim_max = 65535;
{
int ret = 0;
void *status;
+ const char *home_path;
+
+ /* Create thread quit pipe */
+ if (init_thread_quit_pipe() < 0) {
+ goto exit;
+ }
/* Parse arguments */
progname = argv[0];
if ((ret = parse_args(argc, argv) < 0)) {
- goto error;
+ goto exit;
}
/* Daemonize */
ret = daemon(0, 0);
if (ret < 0) {
perror("daemon");
- goto error;
+ goto exit;
}
}
/* Check if daemon is UID = 0 */
is_root = !getuid();
- /* Set all sockets path */
if (is_root) {
ret = create_lttng_rundir();
if (ret < 0) {
- goto error;
+ goto exit;
}
if (strlen(apps_unix_sock_path) == 0) {
snprintf(client_unix_sock_path, PATH_MAX,
DEFAULT_GLOBAL_CLIENT_UNIX_SOCK);
}
-
- ret = set_kconsumerd_sockets();
- if (ret < 0) {
- goto error;
+ } else {
+ home_path = get_home_dir();
+ if (home_path == NULL) {
+ /* TODO: Add --socket PATH option */
+ ERR("Can't get HOME directory for sockets creation.");
+ goto exit;
}
- /* Setup kernel tracer */
- init_kernel_tracer();
-
- /* Set ulimit for open files */
- set_ulimit();
- } else {
if (strlen(apps_unix_sock_path) == 0) {
snprintf(apps_unix_sock_path, PATH_MAX,
- DEFAULT_HOME_APPS_UNIX_SOCK, get_home_dir());
+ DEFAULT_HOME_APPS_UNIX_SOCK, home_path);
}
/* Set the cli tool unix socket path */
if (strlen(client_unix_sock_path) == 0) {
snprintf(client_unix_sock_path, PATH_MAX,
- DEFAULT_HOME_CLIENT_UNIX_SOCK, get_home_dir());
+ DEFAULT_HOME_CLIENT_UNIX_SOCK, home_path);
}
}
DBG("Client socket path %s", client_unix_sock_path);
DBG("Application socket path %s", apps_unix_sock_path);
- /* See if daemon already exist. If any of the two
- * socket needed by the daemon are present, this test fails
+ /*
+ * See if daemon already exist. If any of the two socket needed by the
+ * daemon are present, this test fails. However, if the daemon is killed
+ * with a SIGKILL, those unix socket must be unlinked by hand.
*/
if ((ret = check_existing_daemon()) == 0) {
ERR("Already running daemon.\n");
- /* We do not goto error because we must not
- * cleanup() because a daemon is already running.
+ /*
+ * We do not goto error because we must not cleanup() because a daemon
+ * is already running.
*/
- exit(EXIT_FAILURE);
+ goto exit;
+ }
+
+ /* After this point, we can safely call cleanup() so goto error is used */
+
+ /*
+ * These actions must be executed as root. We do that *after* setting up
+ * the sockets path because we MUST make the check for another daemon using
+ * those paths *before* trying to set the kernel consumer sockets and init
+ * kernel tracer.
+ */
+ if (is_root) {
+ ret = set_kconsumerd_sockets();
+ if (ret < 0) {
+ goto error;
+ }
+
+ /* Setup kernel tracer */
+ init_kernel_tracer();
+
+ /* Set ulimit for open files */
+ set_ulimit();
}
if (set_signal_handler() < 0) {
ppid = getppid();
}
+ /* Setup the kernel pipe for waking up the kernel thread */
+ if (create_kernel_poll_pipe() < 0) {
+ goto error;
+ }
+
+ /*
+ * Get session list pointer. This pointer MUST NOT be free().
+ * This list is statically declared in session.c
+ */
+ session_list_ptr = get_session_list();
+
while (1) {
/* Create thread to manage the client socket */
ret = pthread_create(&client_thread, NULL, thread_manage_clients, (void *) NULL);
goto error;
}
+ /* Create kernel thread to manage kernel event */
+ ret = pthread_create(&kernel_thread, NULL, thread_manage_kernel, (void *) NULL);
+ if (ret != 0) {
+ perror("pthread_create");
+ goto error;
+ }
+
ret = pthread_join(client_thread, &status);
if (ret != 0) {
perror("pthread_join");
error:
cleanup();
+
+exit:
exit(EXIT_FAILURE);
}