#include "testpoint.h"
#include "ust-thread.h"
#include "jul-thread.h"
+#include "save.h"
+#include "load-session-thread.h"
#define CONSUMERD_FILE "lttng-consumerd"
static char *opt_pidfile;
static int opt_sig_parent;
static int opt_verbose_consumer;
-static int opt_daemon;
+static int opt_daemon, opt_background;
static int opt_no_kernel;
-static int is_root; /* Set to 1 if the daemon is running as root */
+static char *opt_load_session_path;
static pid_t ppid; /* Parent PID for --sig-parent option */
static pid_t child_ppid; /* Internal parent PID use with daemonize. */
static char *rundir;
{ "consumerd64-path", 1, 0, 't' },
{ "consumerd64-libdir", 1, 0, 'T' },
{ "daemonize", 0, 0, 'd' },
+ { "background", 0, 0, 'b' },
{ "sig-parent", 0, 0, 'S' },
{ "help", 0, 0, 'h' },
{ "group", 1, 0, 'g' },
{ "pidfile", 1, 0, 'p' },
{ "jul-tcp-port", 1, 0, 'J' },
{ "config", 1, 0, 'f' },
+ { "load", 1, 0, 'l' },
{ NULL, 0, 0, 0 }
};
static pthread_t health_thread;
static pthread_t ht_cleanup_thread;
static pthread_t jul_reg_thread;
+static pthread_t load_session_thread;
/*
* UST registration command queue. This queue is tied with a futex and uses a N
* wakers / 1 waiter implemented and detailed in futex.c/.h
*
- * The thread_manage_apps and thread_dispatch_ust_registration interact with
- * this queue and the wait/wake scheme.
+ * The thread_registration_apps and thread_dispatch_ust_registration uses this
+ * queue along with the wait/wake scheme. The thread_manage_apps receives down
+ * the line new application socket and monitors it for any I/O error or clean
+ * close that triggers an unregistration of the application.
*/
static struct ust_cmd_queue ust_cmd_queue;
/* JUL TCP port for registration. Used by the JUL thread. */
unsigned int jul_tcp_port = DEFAULT_JUL_TCP_PORT;
+/* Am I root or not. */
+int is_root; /* Set to 1 if the daemon is running as root */
+
const char * const config_section_name = "sessiond";
+/* Load session thread information to operate. */
+struct load_session_thread_data *load_info;
+
/*
* Whether sessiond is ready for commands/health check requests.
* NR_LTTNG_SESSIOND_READY must match the number of calls to
- * lttng_sessiond_notify_ready().
+ * sessiond_notify_ready().
*/
-#define NR_LTTNG_SESSIOND_READY 2
+#define NR_LTTNG_SESSIOND_READY 3
int lttng_sessiond_ready = NR_LTTNG_SESSIOND_READY;
/* Notify parents that we are ready for cmd and health check */
-static
-void lttng_sessiond_notify_ready(void)
+LTTNG_HIDDEN
+void sessiond_notify_ready(void)
{
if (uatomic_sub_return(<tng_sessiond_ready, 1) == 0) {
/*
* Notify the parent of the fork() process that we are
* ready.
*/
- if (opt_daemon) {
+ if (opt_daemon || opt_background) {
kill(child_ppid, SIGUSR1);
}
}
free(opt_pidfile);
}
+ if (opt_load_session_path) {
+ free(opt_load_session_path);
+ }
+
+ if (load_info) {
+ load_session_destroy_data(load_info);
+ free(load_info);
+ }
+
/* <fun> */
DBG("%c[%d;%dm*** assert failed :-) *** ==> %c[%dm%c[%d;%dm"
"Matthew, BEET driven development works!%c[%dm",
}
health_code_update();
-
if (code == LTTCOMM_CONSUMERD_COMMAND_SOCK_READY) {
/* Connect both socket, command and metadata. */
consumer_data->cmd_sock =
}
consumer_data->cmd_sock = -1;
}
- if (*consumer_data->metadata_sock.fd_ptr >= 0) {
+ if (consumer_data->metadata_sock.fd_ptr &&
+ *consumer_data->metadata_sock.fd_ptr >= 0) {
ret = close(*consumer_data->metadata_sock.fd_ptr);
if (ret) {
PERROR("close");
}
}
-
if (sock >= 0) {
ret = close(sock);
if (ret) {
pthread_mutex_unlock(&consumer_data->lock);
/* Cleanup metadata socket mutex. */
- pthread_mutex_destroy(consumer_data->metadata_sock.lock);
- free(consumer_data->metadata_sock.lock);
-
+ if (consumer_data->metadata_sock.lock) {
+ pthread_mutex_destroy(consumer_data->metadata_sock.lock);
+ free(consumer_data->metadata_sock.lock);
+ }
lttng_poll_clean(&events);
error_poll:
if (err) {
health_register(health_sessiond, HEALTH_SESSIOND_TYPE_APP_REG_DISPATCH);
+ if (testpoint(sessiond_thread_app_reg_dispatch)) {
+ goto error_testpoint;
+ }
+
health_code_update();
CDS_INIT_LIST_HEAD(&wait_queue.head);
free(wait_node);
}
+error_testpoint:
DBG("Dispatch thread dying");
if (err) {
health_error();
exit:
error:
- if (err) {
- health_error();
- ERR("Health error occurred in %s", __func__);
- }
-
/* Notify that the registration thread is gone */
notify_ust_apps(0);
error_create_poll:
error_testpoint:
DBG("UST Registration thread cleanup complete");
+ if (err) {
+ health_error();
+ ERR("Health error occurred in %s", __func__);
+ }
health_unregister(health_sessiond);
return NULL;
if (ret != 0) {
errno = ret;
if (ret == ETIMEDOUT) {
+ int pth_ret;
+
/*
* Call has timed out so we kill the kconsumerd_thread and return
* an error.
*/
ERR("Condition timed out. The consumer thread was never ready."
" Killing it");
- ret = pthread_cancel(consumer_data->thread);
- if (ret < 0) {
+ pth_ret = pthread_cancel(consumer_data->thread);
+ if (pth_ret < 0) {
PERROR("pthread_cancel consumer thread");
}
} else {
PERROR("pthread_cond_wait failed consumer thread");
}
+ /* Caller is expecting a negative value on failure. */
+ ret = -1;
goto error;
}
*/
if (opt_verbose_consumer) {
verbosity = "--verbose";
- } else {
+ } else if (lttng_opt_quiet) {
verbosity = "--quiet";
+ } else {
+ verbosity = "";
}
+
switch (consumer_data->type) {
case LTTNG_CONSUMER_KERNEL:
/*
consumer_to_use = consumerd32_bin;
} else {
DBG("Could not find any valid consumerd executable");
+ ret = -EINVAL;
break;
}
DBG("Using kernel consumer at: %s", consumer_to_use);
- execl(consumer_to_use,
+ ret = execl(consumer_to_use,
"lttng-consumerd", verbosity, "-k",
"--consumerd-cmd-sock", consumer_data->cmd_unix_sock_path,
"--consumerd-err-sock", consumer_data->err_unix_sock_path,
if (consumerd64_libdir[0] != '\0') {
free(tmpnew);
}
- if (ret) {
- goto error;
- }
break;
}
case LTTNG_CONSUMER32_UST:
if (consumerd32_libdir[0] != '\0') {
free(tmpnew);
}
- if (ret) {
- goto error;
- }
break;
}
default:
exit(EXIT_FAILURE);
}
if (errno != 0) {
- PERROR("kernel start consumer exec");
+ PERROR("Consumer execl()");
}
+ /* Reaching this point, we got a failure on our execl(). */
exit(EXIT_FAILURE);
} else if (pid > 0) {
ret = pid;
case LTTNG_SNAPSHOT_DEL_OUTPUT:
case LTTNG_SNAPSHOT_LIST_OUTPUT:
case LTTNG_SNAPSHOT_RECORD:
+ case LTTNG_SAVE_SESSION:
need_domain = 0;
break;
default:
case LTTNG_LIST_SESSIONS:
case LTTNG_LIST_TRACEPOINTS:
case LTTNG_LIST_TRACEPOINT_FIELDS:
+ case LTTNG_SAVE_SESSION:
need_tracing_session = 0;
break;
default:
{
struct lttng_event_exclusion *exclusion = NULL;
struct lttng_filter_bytecode *bytecode = NULL;
+ char *filter_expression = NULL;
/* Handle exclusion events and receive it from the client. */
if (cmd_ctx->lsm->u.enable.exclusion_count > 0) {
}
}
+ /* Get filter expression from client. */
+ if (cmd_ctx->lsm->u.enable.expression_len > 0) {
+ size_t expression_len =
+ cmd_ctx->lsm->u.enable.expression_len;
+
+ if (expression_len > LTTNG_FILTER_MAX_LEN) {
+ ret = LTTNG_ERR_FILTER_INVAL;
+ free(exclusion);
+ goto error;
+ }
+
+ filter_expression = zmalloc(expression_len);
+ if (!filter_expression) {
+ free(exclusion);
+ ret = LTTNG_ERR_FILTER_NOMEM;
+ goto error;
+ }
+
+ /* Receive var. len. data */
+ DBG("Receiving var len filter's expression from client ...");
+ ret = lttcomm_recv_unix_sock(sock, filter_expression,
+ expression_len);
+ if (ret <= 0) {
+ DBG("Nothing recv() from client car len data... continuing");
+ *sock_error = 1;
+ free(filter_expression);
+ free(exclusion);
+ ret = LTTNG_ERR_FILTER_INVAL;
+ goto error;
+ }
+ }
+
/* Handle filter and get bytecode from client. */
if (cmd_ctx->lsm->u.enable.bytecode_len > 0) {
size_t bytecode_len = cmd_ctx->lsm->u.enable.bytecode_len;
ret = cmd_enable_event(cmd_ctx->session, &cmd_ctx->lsm->domain,
cmd_ctx->lsm->u.enable.channel_name,
- &cmd_ctx->lsm->u.enable.event, bytecode, exclusion,
+ &cmd_ctx->lsm->u.enable.event,
+ filter_expression, bytecode, exclusion,
kernel_poll_pipe[1]);
break;
}
ret = cmd_enable_event_all(cmd_ctx->session, &cmd_ctx->lsm->domain,
cmd_ctx->lsm->u.enable.channel_name,
- cmd_ctx->lsm->u.enable.event.type, NULL, kernel_poll_pipe[1]);
+ cmd_ctx->lsm->u.enable.event.type, NULL, NULL,
+ kernel_poll_pipe[1]);
break;
}
case LTTNG_LIST_TRACEPOINTS:
struct lttng_event *events;
ssize_t nb_events;
+ session_lock_list();
nb_events = cmd_list_tracepoints(cmd_ctx->lsm->domain.type, &events);
+ session_unlock_list();
if (nb_events < 0) {
/* Return value is a negative lttng_error_code. */
ret = -nb_events;
struct lttng_event_field *fields;
ssize_t nb_fields;
+ session_lock_list();
nb_fields = cmd_list_tracepoint_fields(cmd_ctx->lsm->domain.type,
&fields);
+ session_unlock_list();
if (nb_fields < 0) {
/* Return value is a negative lttng_error_code. */
ret = -nb_fields;
free(uris);
break;
}
+ case LTTNG_SAVE_SESSION:
+ {
+ ret = cmd_save_sessions(&cmd_ctx->lsm->u.save_session.attr,
+ &cmd_ctx->creds);
+ break;
+ }
default:
ret = LTTNG_ERR_UND;
break;
goto error;
}
- lttng_sessiond_notify_ready();
+ sessiond_notify_ready();
while (1) {
DBG("Health check ready");
rcu_thread_online();
- reply.ret_code = 0;
+ memset(&reply, 0, sizeof(reply));
for (i = 0; i < NR_HEALTH_SESSIOND_TYPES; i++) {
/*
* health_check_state returns 0 if health is
goto error;
}
- lttng_sessiond_notify_ready();
+ sessiond_notify_ready();
+ ret = sem_post(&load_info->message_thread_ready);
+ if (ret) {
+ PERROR("sem_post message_thread_ready");
+ goto error;
+ }
/* This testpoint is after we signal readiness to the parent. */
if (testpoint(sessiond_thread_manage_clients)) {
fprintf(stderr, " --consumerd64-path PATH Specify path for the 64-bit UST consumer daemon binary\n");
fprintf(stderr, " --consumerd64-libdir PATH Specify path for the 64-bit UST consumer daemon libraries\n");
fprintf(stderr, " -d, --daemonize Start as a daemon.\n");
+ fprintf(stderr, " -b, --background Start as a daemon, keeping console open.\n");
fprintf(stderr, " -g, --group NAME Specify the tracing group name. (default: tracing)\n");
fprintf(stderr, " -V, --version Show version number.\n");
fprintf(stderr, " -S, --sig-parent Send SIGUSR1 to parent pid to notify readiness.\n");
fprintf(stderr, " --no-kernel Disable kernel tracer\n");
fprintf(stderr, " --jul-tcp-port JUL application registration TCP port\n");
fprintf(stderr, " -f --config Load daemon configuration file\n");
+ fprintf(stderr, " -l --load PATH Load session configuration\n");
}
/*
case 'd':
opt_daemon = 1;
break;
+ case 'b':
+ opt_background = 1;
+ break;
case 'g':
tracing_group_name = strdup(arg);
break;
DBG3("JUL TCP port set to non default: %u", jul_tcp_port);
break;
}
+ case 'l':
+ opt_load_session_path = strdup(arg);
+ if (!opt_load_session_path) {
+ perror("strdup");
+ ret = -ENOMEM;
+ }
+ break;
+ case 'f':
+ /* This is handled in set_options() thus silent break. */
+ break;
default:
/* Unknown option or other error.
* Error is printed by getopt, just return */
return;
}
+/*
+ * Start the load session thread and dettach from it so the main thread can
+ * continue. This does not return a value since whatever the outcome, the main
+ * thread will continue.
+ */
+static void start_load_session_thread(void)
+{
+ int ret;
+
+ /* Create session loading thread. */
+ ret = pthread_create(&load_session_thread, NULL, thread_load_session,
+ load_info);
+ if (ret != 0) {
+ PERROR("pthread_create load_session_thread");
+ goto error_create;
+ }
+
+ ret = pthread_detach(load_session_thread);
+ if (ret != 0) {
+ PERROR("pthread_detach load_session_thread");
+ }
+
+ /* Everything went well so don't cleanup anything. */
+
+error_create:
+ /* The cleanup() function will destroy the load_info data. */
+ return;
+}
+
/*
* main
*/
}
/* Daemonize */
- if (opt_daemon) {
+ if (opt_daemon || opt_background) {
int i;
- ret = lttng_daemonize(&child_ppid, &recv_child_signal, 1);
+ ret = lttng_daemonize(&child_ppid, &recv_child_signal,
+ !opt_background);
if (ret < 0) {
goto error;
}
/* This is to get the TCP timeout value. */
lttcomm_inet_init();
+ if (load_session_init_data(&load_info) < 0) {
+ goto exit;
+ }
+ load_info->path = opt_load_session_path;
+
/*
* Initialize the health check subsystem. This call should set the
* appropriate time values.
PERROR("pthread_create kernel");
goto exit_kernel;
}
+ }
+
+ /* Load possible session(s). */
+ start_load_session_thread();
+ if (is_root && !opt_no_kernel) {
ret = pthread_join(kernel_thread, &status);
if (ret != 0) {
PERROR("pthread_join");