#include <common/utils.h>
#include <lttng/userspace-probe-internal.h>
#include <lttng/event-internal.h>
+#include <lttng/session-internal.h>
+#include <lttng/session-descriptor-internal.h>
#include "client.h"
#include "lttng-sessiond.h"
#include "health-sessiond.h"
#include "testpoint.h"
#include "utils.h"
+#include "manage-consumer.h"
static bool is_root;
static struct thread_state {
- pthread_cond_t cond;
- pthread_mutex_t lock;
- bool is_running;
-} thread_state = {
- .cond = PTHREAD_COND_INITIALIZER,
- .lock = PTHREAD_MUTEX_INITIALIZER,
- .is_running = false
-};
-
-void set_thread_state_running(void)
+ sem_t ready;
+ bool running;
+} thread_state;
+
+static void set_thread_status(bool running)
{
- pthread_mutex_lock(&thread_state.lock);
- thread_state.is_running = true;
- pthread_cond_broadcast(&thread_state.cond);
- pthread_mutex_unlock(&thread_state.lock);
+ DBG("Marking client thread's state as %s", running ? "running" : "error");
+ thread_state.running = running;
+ sem_post(&thread_state.ready);
}
-static void wait_thread_state_running(void)
+static bool wait_thread_status(void)
{
- pthread_mutex_lock(&thread_state.lock);
- while (!thread_state.is_running) {
- pthread_cond_wait(&thread_state.cond,
- &thread_state.lock);
+ DBG("Waiting for client thread to be ready");
+ sem_wait(&thread_state.ready);
+ if (thread_state.running) {
+ DBG("Client thread is ready");
+ } else {
+ ERR("Initialization of client thread failed");
}
- pthread_mutex_unlock(&thread_state.lock);
+
+ return thread_state.running;
}
/*
const size_t payload_offset = cmd_header_offset + cmd_header_len;
const size_t total_msg_size = header_len + cmd_header_len + payload_len;
+ free(cmd_ctx->llm);
cmd_ctx->llm = zmalloc(total_msg_size);
if (cmd_ctx->llm == NULL) {
/*
* Start the thread_manage_consumer. This must be done after a lttng-consumerd
- * exec or it will fails.
+ * exec or it will fail.
*/
static int spawn_consumer_thread(struct consumer_data *consumer_data)
{
- int ret, clock_ret;
- struct timespec timeout;
-
- /*
- * Make sure we set the readiness flag to 0 because we are NOT ready.
- * This access to consumer_thread_is_ready does not need to be
- * protected by consumer_data.cond_mutex (yet) since the consumer
- * management thread has not been started at this point.
- */
- consumer_data->consumer_thread_is_ready = 0;
-
- /* Setup pthread condition */
- ret = pthread_condattr_init(&consumer_data->condattr);
- if (ret) {
- errno = ret;
- PERROR("pthread_condattr_init consumer data");
- goto error;
- }
-
- /*
- * Set the monotonic clock in order to make sure we DO NOT jump in time
- * between the clock_gettime() call and the timedwait call. See bug #324
- * for a more details and how we noticed it.
- */
- ret = pthread_condattr_setclock(&consumer_data->condattr, CLOCK_MONOTONIC);
- if (ret) {
- errno = ret;
- PERROR("pthread_condattr_setclock consumer data");
- goto error;
- }
-
- ret = pthread_cond_init(&consumer_data->cond, &consumer_data->condattr);
- if (ret) {
- errno = ret;
- PERROR("pthread_cond_init consumer data");
- goto error;
- }
-
- ret = pthread_create(&consumer_data->thread, default_pthread_attr(),
- thread_manage_consumer, consumer_data);
- if (ret) {
- errno = ret;
- PERROR("pthread_create consumer");
- ret = -1;
- goto error;
- }
-
- /* We are about to wait on a pthread condition */
- pthread_mutex_lock(&consumer_data->cond_mutex);
-
- /* Get time for sem_timedwait absolute timeout */
- clock_ret = lttng_clock_gettime(CLOCK_MONOTONIC, &timeout);
- /*
- * Set the timeout for the condition timed wait even if the clock gettime
- * call fails since we might loop on that call and we want to avoid to
- * increment the timeout too many times.
- */
- timeout.tv_sec += DEFAULT_SEM_WAIT_TIMEOUT;
-
- /*
- * The following loop COULD be skipped in some conditions so this is why we
- * set ret to 0 in order to make sure at least one round of the loop is
- * done.
- */
- ret = 0;
-
- /*
- * Loop until the condition is reached or when a timeout is reached. Note
- * that the pthread_cond_timedwait(P) man page specifies that EINTR can NOT
- * be returned but the pthread_cond(3), from the glibc-doc, says that it is
- * possible. This loop does not take any chances and works with both of
- * them.
- */
- while (!consumer_data->consumer_thread_is_ready && ret != ETIMEDOUT) {
- if (clock_ret < 0) {
- PERROR("clock_gettime spawn consumer");
- /* Infinite wait for the consumerd thread to be ready */
- ret = pthread_cond_wait(&consumer_data->cond,
- &consumer_data->cond_mutex);
- } else {
- ret = pthread_cond_timedwait(&consumer_data->cond,
- &consumer_data->cond_mutex, &timeout);
- }
- }
-
- /* Release the pthread condition */
- pthread_mutex_unlock(&consumer_data->cond_mutex);
-
- if (ret != 0) {
- errno = ret;
- if (ret == ETIMEDOUT) {
- int pth_ret;
-
- /*
- * Call has timed out so we kill the kconsumerd_thread and return
- * an error.
- */
- ERR("Condition timed out. The consumer thread was never ready."
- " Killing it");
- pth_ret = pthread_cancel(consumer_data->thread);
- if (pth_ret < 0) {
- PERROR("pthread_cancel consumer thread");
- }
- } else {
- PERROR("pthread_cond_wait failed consumer thread");
- }
- /* Caller is expecting a negative value on failure. */
- ret = -1;
- goto error;
- }
-
- pthread_mutex_lock(&consumer_data->pid_mutex);
- if (consumer_data->pid == 0) {
- ERR("Consumerd did not start");
- pthread_mutex_unlock(&consumer_data->pid_mutex);
- goto error;
- }
- pthread_mutex_unlock(&consumer_data->pid_mutex);
-
- return 0;
-
-error:
- return ret;
+ return launch_consumer_management_thread(consumer_data) ? 0 : -1;
}
/*
}
/* Append correct directory to subdir */
- strncat(consumer->subdir, dir_name,
- sizeof(consumer->subdir) - strlen(consumer->subdir) - 1);
- DBG3("Copy session consumer subdir %s", consumer->subdir);
-
+ ret = lttng_strncpy(consumer->domain_subdir, dir_name,
+ sizeof(consumer->domain_subdir));
+ if (ret) {
+ ret = LTTNG_ERR_UNK;
+ goto error;
+ }
+ DBG3("Copy session consumer subdir %s", consumer->domain_subdir);
ret = LTTNG_OK;
error:
DBG("Creating kernel session");
- ret = kernel_create_session(session, kernel_tracer_fd);
+ ret = kernel_create_session(session);
if (ret < 0) {
ret = LTTNG_ERR_KERN_SESS_FAIL;
- goto error;
+ goto error_create;
}
/* Code flow safety */
error:
trace_kernel_destroy_session(session->kernel_session);
session->kernel_session = NULL;
+error_create:
return ret;
}
return ret;
}
-/*
- * Join consumer thread
- */
-static int join_consumer_thread(struct consumer_data *consumer_data)
-{
- void *status;
-
- /* Consumer pid must be a real one. */
- if (consumer_data->pid > 0) {
- int ret;
- ret = kill(consumer_data->pid, SIGTERM);
- if (ret) {
- PERROR("Error killing consumer daemon");
- return ret;
- }
- return pthread_join(consumer_data->thread, &status);
- } else {
- return 0;
- }
-}
-
/*
* Version of setup_lttng_msg() without command header.
*/
* Return any error encountered or 0 for success.
*
* "sock" is only used for special-case var. len data.
+ * A command may assume the ownership of the socket, in which case its value
+ * should be set to -1.
*
* Should *NOT* be called with RCU read-side lock held.
*/
-static int process_client_msg(struct command_ctx *cmd_ctx, int sock,
+static int process_client_msg(struct command_ctx *cmd_ctx, int *sock,
int *sock_error)
{
int ret = LTTNG_OK;
*sock_error = 0;
switch (cmd_ctx->lsm->cmd_type) {
- case LTTNG_CREATE_SESSION:
- case LTTNG_CREATE_SESSION_SNAPSHOT:
- case LTTNG_CREATE_SESSION_LIVE:
+ case LTTNG_CREATE_SESSION_EXT:
case LTTNG_DESTROY_SESSION:
case LTTNG_LIST_SESSIONS:
case LTTNG_LIST_DOMAINS:
/* Commands that DO NOT need a session. */
switch (cmd_ctx->lsm->cmd_type) {
- case LTTNG_CREATE_SESSION:
- case LTTNG_CREATE_SESSION_SNAPSHOT:
- case LTTNG_CREATE_SESSION_LIVE:
+ case LTTNG_CREATE_SESSION_EXT:
case LTTNG_LIST_SESSIONS:
case LTTNG_LIST_TRACEPOINTS:
case LTTNG_LIST_SYSCALLS:
goto error;
}
+ /* Kernel tracer check */
+ if (!kernel_tracer_is_initialized()) {
+ /* Basically, load kernel tracer modules */
+ ret = init_kernel_tracer();
+ if (ret != 0) {
+ goto error;
+ }
+ }
+
/* Consumer is in an ERROR state. Report back to client */
if (uatomic_read(&kernel_consumerd_state) == CONSUMER_ERROR) {
ret = LTTNG_ERR_NO_KERNCONSUMERD;
if (need_tracing_session) {
if (cmd_ctx->session->kernel_session == NULL) {
ret = create_kernel_session(cmd_ctx->session);
- if (ret < 0) {
+ if (ret != LTTNG_OK) {
ret = LTTNG_ERR_KERN_SESS_FAIL;
goto error;
}
cmd_ctx->lsm->u.context.ctx.u.app_ctx.ctx_name =
context_name;
- ret = lttcomm_recv_unix_sock(sock, provider_name,
+ ret = lttcomm_recv_unix_sock(*sock, provider_name,
provider_name_len);
if (ret < 0) {
goto error_add_context;
}
- ret = lttcomm_recv_unix_sock(sock, context_name,
+ ret = lttcomm_recv_unix_sock(*sock, context_name,
context_name_len);
if (ret < 0) {
goto error_add_context;
DBG("Discarding disable event command payload of size %zu", count);
while (count) {
- ret = lttcomm_recv_unix_sock(sock, data,
+ ret = lttcomm_recv_unix_sock(*sock, data,
count > sizeof(data) ? sizeof(data) : count);
if (ret < 0) {
goto error;
DBG("Receiving var len exclusion event list from client ...");
exclusion->count = count;
- ret = lttcomm_recv_unix_sock(sock, exclusion->names,
+ ret = lttcomm_recv_unix_sock(*sock, exclusion->names,
count * LTTNG_SYMBOL_NAME_LEN);
if (ret <= 0) {
DBG("Nothing recv() from client var len data... continuing");
/* Receive var. len. data */
DBG("Receiving var len filter's expression from client ...");
- ret = lttcomm_recv_unix_sock(sock, filter_expression,
+ ret = lttcomm_recv_unix_sock(*sock, filter_expression,
expression_len);
if (ret <= 0) {
DBG("Nothing recv() from client var len data... continuing");
/* Receive var. len. data */
DBG("Receiving var len filter's bytecode from client ...");
- ret = lttcomm_recv_unix_sock(sock, bytecode, bytecode_len);
+ ret = lttcomm_recv_unix_sock(*sock, bytecode, bytecode_len);
if (ret <= 0) {
DBG("Nothing recv() from client var len data... continuing");
*sock_error = 1;
if (cmd_ctx->lsm->u.enable.userspace_probe_location_len > 0) {
/* Expect a userspace probe description. */
- ret = receive_userspace_probe(cmd_ctx, sock, sock_error, ev);
+ ret = receive_userspace_probe(cmd_ctx, *sock, sock_error, ev);
if (ret) {
free(filter_expression);
free(bytecode);
/* Receive variable len data */
DBG("Receiving %zu URI(s) from client ...", nb_uri);
- ret = lttcomm_recv_unix_sock(sock, uris, len);
+ ret = lttcomm_recv_unix_sock(*sock, uris, len);
if (ret <= 0) {
DBG("No URIs received from client... continuing");
*sock_error = 1;
ret = cmd_stop_trace(cmd_ctx->session);
break;
}
- case LTTNG_CREATE_SESSION:
- {
- size_t nb_uri, len;
- struct lttng_uri *uris = NULL;
-
- nb_uri = cmd_ctx->lsm->u.uri.size;
- len = nb_uri * sizeof(struct lttng_uri);
-
- if (nb_uri > 0) {
- uris = zmalloc(len);
- if (uris == NULL) {
- ret = LTTNG_ERR_FATAL;
- goto error;
- }
-
- /* Receive variable len data */
- DBG("Waiting for %zu URIs from client ...", nb_uri);
- ret = lttcomm_recv_unix_sock(sock, uris, len);
- if (ret <= 0) {
- DBG("No URIs received from client... continuing");
- *sock_error = 1;
- ret = LTTNG_ERR_SESSION_FAIL;
- free(uris);
- goto error;
- }
-
- if (nb_uri == 1 && uris[0].dtype != LTTNG_DST_PATH) {
- DBG("Creating session with ONE network URI is a bad call");
- ret = LTTNG_ERR_SESSION_FAIL;
- free(uris);
- goto error;
- }
- }
-
- ret = cmd_create_session_uri(cmd_ctx->lsm->session.name, uris, nb_uri,
- &cmd_ctx->creds, 0);
-
- free(uris);
-
- break;
- }
case LTTNG_DESTROY_SESSION:
{
ret = cmd_destroy_session(cmd_ctx->session,
- notification_thread_handle);
+ notification_thread_handle,
+ sock);
break;
}
case LTTNG_LIST_DOMAINS:
nr_sessions = lttng_sessions_count(
LTTNG_SOCK_GET_UID_CRED(&cmd_ctx->creds),
LTTNG_SOCK_GET_GID_CRED(&cmd_ctx->creds));
- payload_len = sizeof(struct lttng_session) * nr_sessions;
+
+ payload_len = (sizeof(struct lttng_session) * nr_sessions) +
+ (sizeof(struct lttng_session_extended) * nr_sessions);
sessions_payload = zmalloc(payload_len);
if (!sessions_payload) {
goto setup_error;
}
- cmd_list_lttng_sessions(sessions_payload,
+ cmd_list_lttng_sessions(sessions_payload, nr_sessions,
LTTNG_SOCK_GET_UID_CRED(&cmd_ctx->creds),
LTTNG_SOCK_GET_GID_CRED(&cmd_ctx->creds));
session_unlock_list();
cmd_ctx->lsm->u.snapshot_record.wait);
break;
}
- case LTTNG_CREATE_SESSION_SNAPSHOT:
+ case LTTNG_CREATE_SESSION_EXT:
{
- size_t nb_uri, len;
- struct lttng_uri *uris = NULL;
-
- nb_uri = cmd_ctx->lsm->u.uri.size;
- len = nb_uri * sizeof(struct lttng_uri);
+ struct lttng_dynamic_buffer payload;
+ struct lttng_session_descriptor *return_descriptor = NULL;
- if (nb_uri > 0) {
- uris = zmalloc(len);
- if (uris == NULL) {
- ret = LTTNG_ERR_FATAL;
- goto error;
- }
-
- /* Receive variable len data */
- DBG("Waiting for %zu URIs from client ...", nb_uri);
- ret = lttcomm_recv_unix_sock(sock, uris, len);
- if (ret <= 0) {
- DBG("No URIs received from client... continuing");
- *sock_error = 1;
- ret = LTTNG_ERR_SESSION_FAIL;
- free(uris);
- goto error;
- }
-
- if (nb_uri == 1 && uris[0].dtype != LTTNG_DST_PATH) {
- DBG("Creating session with ONE network URI is a bad call");
- ret = LTTNG_ERR_SESSION_FAIL;
- free(uris);
- goto error;
- }
+ lttng_dynamic_buffer_init(&payload);
+ ret = cmd_create_session(cmd_ctx, *sock, &return_descriptor);
+ if (ret != LTTNG_OK) {
+ goto error;
}
- ret = cmd_create_session_snapshot(cmd_ctx->lsm->session.name, uris,
- nb_uri, &cmd_ctx->creds);
- free(uris);
- break;
- }
- case LTTNG_CREATE_SESSION_LIVE:
- {
- size_t nb_uri, len;
- struct lttng_uri *uris = NULL;
-
- nb_uri = cmd_ctx->lsm->u.uri.size;
- len = nb_uri * sizeof(struct lttng_uri);
-
- if (nb_uri > 0) {
- uris = zmalloc(len);
- if (uris == NULL) {
- ret = LTTNG_ERR_FATAL;
- goto error;
- }
-
- /* Receive variable len data */
- DBG("Waiting for %zu URIs from client ...", nb_uri);
- ret = lttcomm_recv_unix_sock(sock, uris, len);
- if (ret <= 0) {
- DBG("No URIs received from client... continuing");
- *sock_error = 1;
- ret = LTTNG_ERR_SESSION_FAIL;
- free(uris);
- goto error;
- }
-
- if (nb_uri == 1 && uris[0].dtype != LTTNG_DST_PATH) {
- DBG("Creating session with ONE network URI is a bad call");
- ret = LTTNG_ERR_SESSION_FAIL;
- free(uris);
- goto error;
- }
+ ret = lttng_session_descriptor_serialize(return_descriptor,
+ &payload);
+ if (ret) {
+ ERR("Failed to serialize session descriptor in reply to \"create session\" command");
+ lttng_session_descriptor_destroy(return_descriptor);
+ ret = LTTNG_ERR_NOMEM;
+ goto error;
}
-
- ret = cmd_create_session_uri(cmd_ctx->lsm->session.name, uris,
- nb_uri, &cmd_ctx->creds, cmd_ctx->lsm->u.session_live.timer_interval);
- free(uris);
+ ret = setup_lttng_msg_no_cmd_header(cmd_ctx, payload.data,
+ payload.size);
+ if (ret) {
+ lttng_session_descriptor_destroy(return_descriptor);
+ ret = LTTNG_ERR_NOMEM;
+ goto error;
+ }
+ lttng_dynamic_buffer_reset(&payload);
+ lttng_session_descriptor_destroy(return_descriptor);
+ ret = LTTNG_OK;
break;
}
case LTTNG_SAVE_SESSION:
}
case LTTNG_REGISTER_TRIGGER:
{
- ret = cmd_register_trigger(cmd_ctx, sock,
+ ret = cmd_register_trigger(cmd_ctx, *sock,
notification_thread_handle);
break;
}
case LTTNG_UNREGISTER_TRIGGER:
{
- ret = cmd_unregister_trigger(cmd_ctx, sock,
+ ret = cmd_unregister_trigger(cmd_ctx, *sock,
notification_thread_handle);
break;
}
goto error;
}
- ret = cmd_rotate_session(cmd_ctx->session, &rotate_return);
+ ret = cmd_rotate_session(cmd_ctx->session, &rotate_return,
+ false);
if (ret < 0) {
ret = -ret;
goto error;
if (cmd_ctx->session) {
session_unlock(cmd_ctx->session);
session_put(cmd_ctx->session);
+ cmd_ctx->session = NULL;
}
if (need_tracing_session) {
session_unlock_list();
lttng_pipe_destroy(quit_pipe);
}
+static void thread_init_cleanup(void *data)
+{
+ set_thread_status(false);
+}
+
/*
* This thread manage all clients request using the unix client socket for
* communication.
is_root = (getuid() == 0);
+ pthread_cleanup_push(thread_init_cleanup, NULL);
client_sock = create_client_sock();
if (client_sock < 0) {
goto error_listen;
goto error;
}
+ /* Set state as running. */
+ set_thread_status(true);
+ pthread_cleanup_pop(0);
+
/* This testpoint is after we signal readiness to the parent. */
if (testpoint(sessiond_thread_manage_clients)) {
goto error;
health_code_update();
- /* Set state as running. */
- set_thread_state_running();
-
while (1) {
const struct cmd_completion_handler *cmd_completion_handler;
health_code_update();
- if (!revents) {
- /* No activity for this FD (poll implementation). */
- continue;
- }
-
if (pollfd == thread_quit_pipe_fd) {
err = 0;
goto exit;
* informations for the client. The command context struct contains
* everything this function may needs.
*/
- ret = process_client_msg(cmd_ctx, sock, &sock_error);
+ ret = process_client_msg(cmd_ctx, &sock, &sock_error);
rcu_thread_offline();
if (ret < 0) {
- ret = close(sock);
- if (ret) {
- PERROR("close");
- }
- sock = -1;
+ if (sock >= 0) {
+ ret = close(sock);
+ if (ret) {
+ PERROR("close");
+ }
+ }
+ sock = -1;
/*
* TODO: Inform client somehow of the fatal error. At
* this point, ret < 0 means that a zmalloc failed
health_code_update();
- DBG("Sending response (size: %d, retcode: %s (%d))",
- cmd_ctx->lttng_msg_size,
- lttng_strerror(-cmd_ctx->llm->ret_code),
- cmd_ctx->llm->ret_code);
- ret = send_unix_sock(sock, cmd_ctx->llm, cmd_ctx->lttng_msg_size);
- if (ret < 0) {
- ERR("Failed to send data back to client");
- }
+ if (sock >= 0) {
+ DBG("Sending response (size: %d, retcode: %s (%d))",
+ cmd_ctx->lttng_msg_size,
+ lttng_strerror(-cmd_ctx->llm->ret_code),
+ cmd_ctx->llm->ret_code);
+ ret = send_unix_sock(sock, cmd_ctx->llm,
+ cmd_ctx->lttng_msg_size);
+ if (ret < 0) {
+ ERR("Failed to send data back to client");
+ }
- /* End of transmission */
- ret = close(sock);
- if (ret) {
- PERROR("close");
- }
- sock = -1;
+ /* End of transmission */
+ ret = close(sock);
+ if (ret) {
+ PERROR("close");
+ }
+ }
+ sock = -1;
clean_command_ctx(&cmd_ctx);
DBG("Client thread dying");
rcu_unregister_thread();
-
- /*
- * Since we are creating the consumer threads, we own them, so we need
- * to join them before our thread exits.
- */
- ret = join_consumer_thread(&kconsumer_data);
- if (ret) {
- errno = ret;
- PERROR("join_consumer");
- }
-
- ret = join_consumer_thread(&ustconsumer32_data);
- if (ret) {
- errno = ret;
- PERROR("join_consumer ust32");
- }
-
- ret = join_consumer_thread(&ustconsumer64_data);
- if (ret) {
- errno = ret;
- PERROR("join_consumer ust64");
- }
return NULL;
}
struct lttng_thread *launch_client_thread(void)
{
+ bool thread_running;
struct lttng_pipe *client_quit_pipe;
struct lttng_thread *thread;
+ sem_init(&thread_state.ready, 0, 0);
client_quit_pipe = lttng_pipe_open(FD_CLOEXEC);
if (!client_quit_pipe) {
goto error;
* This thread is part of the threads that need to be fully
* initialized before the session daemon is marked as "ready".
*/
- wait_thread_state_running();
-
+ thread_running = wait_thread_status();
+ if (!thread_running) {
+ lttng_thread_put(thread);
+ thread = NULL;
+ }
return thread;
error:
cleanup_client_thread(client_quit_pipe);