+ kernel_wait_quiescent(kernel_tracer_fd);
+ break;
+ }
+ case LTTNG_DOMAIN_UST:
+ {
+ struct ltt_ust_channel *uchan;
+ struct ltt_ust_session *usess = session->ust_session;
+
+ assert(usess);
+
+ /*
+ * If a non-default channel has been created in the
+ * session, explicitely require that -c chan_name needs
+ * to be provided.
+ */
+ if (usess->has_non_default_channel && channel_name[0] == '\0') {
+ ret = LTTNG_ERR_NEED_CHANNEL_NAME;
+ goto error;
+ }
+
+ /* Get channel from global UST domain */
+ uchan = trace_ust_find_channel_by_name(usess->domain_global.channels,
+ channel_name);
+ if (uchan == NULL) {
+ /* Create default channel */
+ attr = channel_new_default_attr(LTTNG_DOMAIN_UST,
+ usess->buffer_type);
+ if (attr == NULL) {
+ ret = LTTNG_ERR_FATAL;
+ goto error;
+ }
+ if (lttng_strncpy(attr->name, channel_name,
+ sizeof(attr->name))) {
+ ret = LTTNG_ERR_INVALID;
+ goto error;
+ }
+
+ ret = cmd_enable_channel(session, domain, attr, wpipe);
+ if (ret != LTTNG_OK) {
+ goto error;
+ }
+
+ /* Get the newly created channel reference back */
+ uchan = trace_ust_find_channel_by_name(
+ usess->domain_global.channels, channel_name);
+ assert(uchan);
+ }
+
+ if (uchan->domain != LTTNG_DOMAIN_UST && !internal_event) {
+ /*
+ * Don't allow users to add UST events to channels which
+ * are assigned to a userspace subdomain (JUL, Log4J,
+ * Python, etc.).
+ */
+ ret = LTTNG_ERR_INVALID_CHANNEL_DOMAIN;
+ goto error;
+ }
+
+ if (!internal_event) {
+ /*
+ * Ensure the event name is not reserved for internal
+ * use.
+ */
+ ret = validate_ust_event_name(event->name);
+ if (ret) {
+ WARN("Userspace event name %s failed validation.",
+ event->name);
+ ret = LTTNG_ERR_INVALID_EVENT_NAME;
+ goto error;
+ }
+ }
+
+ /* At this point, the session and channel exist on the tracer */
+ ret = event_ust_enable_tracepoint(usess, uchan, event,
+ filter_expression, filter, exclusion,
+ internal_event);
+ /* We have passed ownership */
+ filter_expression = NULL;
+ filter = NULL;
+ exclusion = NULL;
+ if (ret == LTTNG_ERR_UST_EVENT_ENABLED) {
+ goto already_enabled;
+ } else if (ret != LTTNG_OK) {
+ goto error;
+ }
+ break;
+ }
+ case LTTNG_DOMAIN_LOG4J:
+ case LTTNG_DOMAIN_JUL:
+ case LTTNG_DOMAIN_PYTHON:
+ {
+ const char *default_event_name, *default_chan_name;
+ struct agent *agt;
+ struct lttng_event uevent;
+ struct lttng_domain tmp_dom;
+ struct ltt_ust_session *usess = session->ust_session;
+
+ assert(usess);
+
+ if (!agent_tracing_is_enabled()) {
+ DBG("Attempted to enable an event in an agent domain but the agent thread is not running");
+ ret = LTTNG_ERR_AGENT_TRACING_DISABLED;
+ goto error;
+ }
+
+ agt = trace_ust_find_agent(usess, domain->type);
+ if (!agt) {
+ agt = agent_create(domain->type);
+ if (!agt) {
+ ret = LTTNG_ERR_NOMEM;
+ goto error;
+ }
+ agent_add(agt, usess->agents);
+ }
+
+ /* Create the default tracepoint. */
+ memset(&uevent, 0, sizeof(uevent));
+ uevent.type = LTTNG_EVENT_TRACEPOINT;
+ uevent.loglevel_type = LTTNG_EVENT_LOGLEVEL_ALL;
+ default_event_name = event_get_default_agent_ust_name(
+ domain->type);
+ if (!default_event_name) {
+ ret = LTTNG_ERR_FATAL;
+ goto error;
+ }
+ strncpy(uevent.name, default_event_name, sizeof(uevent.name));
+ uevent.name[sizeof(uevent.name) - 1] = '\0';
+
+ /*
+ * The domain type is changed because we are about to enable the
+ * default channel and event for the JUL domain that are hardcoded.
+ * This happens in the UST domain.
+ */
+ memcpy(&tmp_dom, domain, sizeof(tmp_dom));
+ tmp_dom.type = LTTNG_DOMAIN_UST;
+
+ switch (domain->type) {
+ case LTTNG_DOMAIN_LOG4J:
+ default_chan_name = DEFAULT_LOG4J_CHANNEL_NAME;
+ break;
+ case LTTNG_DOMAIN_JUL:
+ default_chan_name = DEFAULT_JUL_CHANNEL_NAME;
+ break;
+ case LTTNG_DOMAIN_PYTHON:
+ default_chan_name = DEFAULT_PYTHON_CHANNEL_NAME;
+ break;
+ default:
+ /* The switch/case we are in makes this impossible */
+ assert(0);
+ }
+
+ {
+ char *filter_expression_copy = NULL;
+ struct lttng_filter_bytecode *filter_copy = NULL;
+
+ if (filter) {
+ const size_t filter_size = sizeof(
+ struct lttng_filter_bytecode)
+ + filter->len;
+
+ filter_copy = zmalloc(filter_size);
+ if (!filter_copy) {
+ ret = LTTNG_ERR_NOMEM;
+ goto error;
+ }
+ memcpy(filter_copy, filter, filter_size);
+
+ filter_expression_copy =
+ strdup(filter_expression);
+ if (!filter_expression) {
+ ret = LTTNG_ERR_NOMEM;
+ }
+
+ if (!filter_expression_copy || !filter_copy) {
+ free(filter_expression_copy);
+ free(filter_copy);
+ goto error;
+ }
+ }
+
+ ret = cmd_enable_event_internal(session, &tmp_dom,
+ (char *) default_chan_name,
+ &uevent, filter_expression_copy,
+ filter_copy, NULL, wpipe);
+ }
+
+ if (ret == LTTNG_ERR_UST_EVENT_ENABLED) {
+ goto already_enabled;
+ } else if (ret != LTTNG_OK) {
+ goto error;
+ }
+
+ /* The wild card * means that everything should be enabled. */
+ if (strncmp(event->name, "*", 1) == 0 && strlen(event->name) == 1) {
+ ret = event_agent_enable_all(usess, agt, event, filter,
+ filter_expression);
+ } else {
+ ret = event_agent_enable(usess, agt, event, filter,
+ filter_expression);
+ }
+ filter = NULL;
+ filter_expression = NULL;
+ if (ret != LTTNG_OK) {
+ goto error;
+ }
+
+ break;
+ }
+ default:
+ ret = LTTNG_ERR_UND;
+ goto error;
+ }
+
+ ret = LTTNG_OK;
+
+already_enabled:
+error:
+ free(filter_expression);
+ free(filter);
+ free(exclusion);
+ channel_attr_destroy(attr);
+ rcu_read_unlock();
+ return ret;
+}
+
+/*
+ * Command LTTNG_ENABLE_EVENT processed by the client thread.
+ * We own filter, exclusion, and filter_expression.
+ */
+int cmd_enable_event(struct ltt_session *session, struct lttng_domain *domain,
+ char *channel_name, struct lttng_event *event,
+ char *filter_expression,
+ struct lttng_filter_bytecode *filter,
+ struct lttng_event_exclusion *exclusion,
+ int wpipe)
+{
+ return _cmd_enable_event(session, domain, channel_name, event,
+ filter_expression, filter, exclusion, wpipe, false);
+}
+
+/*
+ * Enable an event which is internal to LTTng. An internal should
+ * never be made visible to clients and are immune to checks such as
+ * reserved names.
+ */
+static int cmd_enable_event_internal(struct ltt_session *session,
+ struct lttng_domain *domain,
+ char *channel_name, struct lttng_event *event,
+ char *filter_expression,
+ struct lttng_filter_bytecode *filter,
+ struct lttng_event_exclusion *exclusion,
+ int wpipe)
+{
+ return _cmd_enable_event(session, domain, channel_name, event,
+ filter_expression, filter, exclusion, wpipe, true);
+}
+
+/*
+ * Command LTTNG_LIST_TRACEPOINTS processed by the client thread.
+ */
+ssize_t cmd_list_tracepoints(enum lttng_domain_type domain,
+ struct lttng_event **events)
+{
+ int ret;
+ ssize_t nb_events = 0;
+
+ switch (domain) {
+ case LTTNG_DOMAIN_KERNEL:
+ nb_events = kernel_list_events(kernel_tracer_fd, events);
+ if (nb_events < 0) {
+ ret = LTTNG_ERR_KERN_LIST_FAIL;
+ goto error;
+ }
+ break;
+ case LTTNG_DOMAIN_UST:
+ nb_events = ust_app_list_events(events);
+ if (nb_events < 0) {
+ ret = LTTNG_ERR_UST_LIST_FAIL;
+ goto error;
+ }
+ break;
+ case LTTNG_DOMAIN_LOG4J:
+ case LTTNG_DOMAIN_JUL:
+ case LTTNG_DOMAIN_PYTHON:
+ nb_events = agent_list_events(events, domain);
+ if (nb_events < 0) {
+ ret = LTTNG_ERR_UST_LIST_FAIL;
+ goto error;
+ }
+ break;
+ default:
+ ret = LTTNG_ERR_UND;
+ goto error;
+ }
+
+ return nb_events;
+
+error:
+ /* Return negative value to differentiate return code */
+ return -ret;
+}
+
+/*
+ * Command LTTNG_LIST_TRACEPOINT_FIELDS processed by the client thread.
+ */
+ssize_t cmd_list_tracepoint_fields(enum lttng_domain_type domain,
+ struct lttng_event_field **fields)
+{
+ int ret;
+ ssize_t nb_fields = 0;
+
+ switch (domain) {
+ case LTTNG_DOMAIN_UST:
+ nb_fields = ust_app_list_event_fields(fields);
+ if (nb_fields < 0) {
+ ret = LTTNG_ERR_UST_LIST_FAIL;
+ goto error;
+ }
+ break;
+ case LTTNG_DOMAIN_KERNEL:
+ default: /* fall-through */
+ ret = LTTNG_ERR_UND;
+ goto error;
+ }
+
+ return nb_fields;
+
+error:
+ /* Return negative value to differentiate return code */
+ return -ret;
+}
+
+ssize_t cmd_list_syscalls(struct lttng_event **events)
+{
+ return syscall_table_list(events);
+}
+
+/*
+ * Command LTTNG_LIST_TRACKER_PIDS processed by the client thread.
+ *
+ * Called with session lock held.
+ */
+ssize_t cmd_list_tracker_pids(struct ltt_session *session,
+ enum lttng_domain_type domain, int32_t **pids)
+{
+ int ret;
+ ssize_t nr_pids = 0;
+
+ switch (domain) {
+ case LTTNG_DOMAIN_KERNEL:
+ {
+ struct ltt_kernel_session *ksess;
+
+ ksess = session->kernel_session;
+ nr_pids = kernel_list_tracker_pids(ksess, pids);
+ if (nr_pids < 0) {
+ ret = LTTNG_ERR_KERN_LIST_FAIL;
+ goto error;
+ }
+ break;
+ }
+ case LTTNG_DOMAIN_UST:
+ {
+ struct ltt_ust_session *usess;
+
+ usess = session->ust_session;
+ nr_pids = trace_ust_list_tracker_pids(usess, pids);
+ if (nr_pids < 0) {
+ ret = LTTNG_ERR_UST_LIST_FAIL;
+ goto error;
+ }
+ break;
+ }
+ case LTTNG_DOMAIN_LOG4J:
+ case LTTNG_DOMAIN_JUL:
+ case LTTNG_DOMAIN_PYTHON:
+ default:
+ ret = LTTNG_ERR_UND;
+ goto error;
+ }
+
+ return nr_pids;
+
+error:
+ /* Return negative value to differentiate return code */
+ return -ret;
+}
+
+static
+int domain_mkdir(const struct consumer_output *output,
+ const struct ltt_session *session,
+ uid_t uid, gid_t gid)
+{
+ struct consumer_socket *socket;
+ struct lttng_ht_iter iter;
+ int ret;
+ char *path = NULL;
+
+ if (!output || !output->socks) {
+ ERR("No consumer output found");
+ ret = -1;
+ goto end;
+ }
+
+ path = zmalloc(LTTNG_PATH_MAX * sizeof(char));
+ if (!path) {
+ ERR("Cannot allocate mkdir path");
+ ret = -1;
+ goto end;
+ }
+
+ ret = snprintf(path, LTTNG_PATH_MAX, "%s%s%s",
+ session_get_base_path(session),
+ output->chunk_path, output->subdir);
+ if (ret < 0 || ret >= LTTNG_PATH_MAX) {
+ ERR("Format path");
+ ret = -1;
+ goto end;
+ }
+
+ DBG("Domain mkdir %s for session %" PRIu64, path, session->id);
+ rcu_read_lock();
+ /*
+ * We have to iterate to find a socket, but we only need to send the
+ * rename command to one consumer, so we break after the first one.
+ */
+ cds_lfht_for_each_entry(output->socks->ht, &iter.iter, socket, node.node) {
+ pthread_mutex_lock(socket->lock);
+ ret = consumer_mkdir(socket, session->id, output, path, uid, gid);
+ pthread_mutex_unlock(socket->lock);
+ if (ret) {
+ ERR("Consumer mkdir");
+ ret = -1;
+ goto end_unlock;
+ }
+ break;
+ }
+
+ ret = 0;
+
+end_unlock:
+ rcu_read_unlock();
+end:
+ free(path);
+ return ret;
+}
+
+static
+int session_mkdir(const struct ltt_session *session)
+{
+ int ret;
+ struct consumer_output *output;
+ uid_t uid;
+ gid_t gid;
+
+ /*
+ * Unsupported feature in lttng-relayd before 2.11, not an error since it
+ * is only needed for session rotation and the user will get an error
+ * on rotate.
+ */
+ if (session->consumer->type == CONSUMER_DST_NET &&
+ session->consumer->relay_major_version == 2 &&
+ session->consumer->relay_minor_version < 11) {
+ ret = 0;
+ goto end;
+ }
+
+ if (session->kernel_session) {
+ output = session->kernel_session->consumer;
+ uid = session->kernel_session->uid;
+ gid = session->kernel_session->gid;
+ ret = domain_mkdir(output, session, uid, gid);
+ if (ret) {
+ ERR("Mkdir kernel");
+ goto end;
+ }
+ }
+
+ if (session->ust_session) {
+ output = session->ust_session->consumer;
+ uid = session->ust_session->uid;
+ gid = session->ust_session->gid;
+ ret = domain_mkdir(output, session, uid, gid);
+ if (ret) {
+ ERR("Mkdir UST");
+ goto end;
+ }
+ }
+
+ ret = 0;
+
+end:
+ return ret;
+}
+
+/*
+ * Command LTTNG_START_TRACE processed by the client thread.
+ *
+ * Called with session mutex held.
+ */
+int cmd_start_trace(struct ltt_session *session)
+{
+ int ret;
+ unsigned long nb_chan = 0;
+ struct ltt_kernel_session *ksession;
+ struct ltt_ust_session *usess;
+
+ assert(session);
+
+ /* Ease our life a bit ;) */
+ ksession = session->kernel_session;
+ usess = session->ust_session;
+
+ /* Is the session already started? */
+ if (session->active) {
+ ret = LTTNG_ERR_TRACE_ALREADY_STARTED;
+ goto error;
+ }
+
+ /*
+ * Starting a session without channel is useless since after that it's not
+ * possible to enable channel thus inform the client.
+ */
+ if (usess && usess->domain_global.channels) {
+ nb_chan += lttng_ht_get_count(usess->domain_global.channels);
+ }
+ if (ksession) {
+ nb_chan += ksession->channel_count;
+ }
+ if (!nb_chan) {
+ ret = LTTNG_ERR_NO_CHANNEL;
+ goto error;
+ }
+
+ /*
+ * Record the timestamp of the first time the session is started for
+ * an eventual session rotation call.
+ */
+ if (!session->has_been_started) {
+ session->current_chunk_start_ts = time(NULL);
+ if (session->current_chunk_start_ts == (time_t) -1) {
+ PERROR("Failed to retrieve the \"%s\" session's start time",
+ session->name);
+ ret = LTTNG_ERR_FATAL;
+ goto error;
+ }
+ if (!session->snapshot_mode && session->output_traces) {
+ ret = session_mkdir(session);
+ if (ret) {
+ ERR("Failed to create the session directories");
+ ret = LTTNG_ERR_CREATE_DIR_FAIL;
+ goto error;
+ }
+ }
+ }
+
+ /* Kernel tracing */
+ if (ksession != NULL) {
+ DBG("Start kernel tracing session %s", session->name);
+ ret = start_kernel_session(ksession, kernel_tracer_fd);
+ if (ret != LTTNG_OK) {
+ goto error;
+ }
+ }
+
+ /* Flag session that trace should start automatically */
+ if (usess) {
+ /*
+ * Even though the start trace might fail, flag this session active so
+ * other application coming in are started by default.
+ */
+ usess->active = 1;
+
+ ret = ust_app_start_trace_all(usess);
+ if (ret < 0) {
+ ret = LTTNG_ERR_UST_START_FAIL;
+ goto error;
+ }
+ }
+
+ /* Flag this after a successful start. */
+ session->has_been_started = 1;
+ session->active = 1;
+
+ /*
+ * Clear the flag that indicates that a rotation was done while the
+ * session was stopped.
+ */
+ session->rotated_after_last_stop = false;
+
+ if (session->rotate_timer_period) {
+ ret = timer_session_rotation_schedule_timer_start(session,
+ session->rotate_timer_period);
+ if (ret < 0) {
+ ERR("Failed to enable rotate timer");
+ ret = LTTNG_ERR_UNK;
+ goto error;
+ }
+ }
+
+ ret = LTTNG_OK;
+
+error:
+ return ret;
+}
+
+/*
+ * Command LTTNG_STOP_TRACE processed by the client thread.
+ */
+int cmd_stop_trace(struct ltt_session *session)
+{
+ int ret;
+ struct ltt_kernel_channel *kchan;
+ struct ltt_kernel_session *ksession;
+ struct ltt_ust_session *usess;
+ bool error_occured = false;
+
+ assert(session);
+
+ DBG("Begin stop session %s (id %" PRIu64 ")", session->name, session->id);
+ /* Short cut */
+ ksession = session->kernel_session;
+ usess = session->ust_session;
+
+ /* Session is not active. Skip everythong and inform the client. */
+ if (!session->active) {
+ ret = LTTNG_ERR_TRACE_ALREADY_STOPPED;
+ goto error;
+ }
+
+ if (session->rotation_schedule_timer_enabled) {
+ if (timer_session_rotation_schedule_timer_stop(
+ session)) {
+ ERR("Failed to stop the \"rotation schedule\" timer of session %s",
+ session->name);
+ }
+ }
+
+ /*
+ * A rotation is still ongoing. The check timer will continue to wait
+ * for the rotation to complete. When the rotation finally completes,
+ * a check will be performed to rename the "active" chunk to the
+ * expected "timestamp_begin-timestamp_end" format.
+ */
+ if (session->current_archive_id > 0 &&
+ session->rotation_state != LTTNG_ROTATION_STATE_ONGOING) {
+ ret = rename_active_chunk(session);
+ if (ret) {
+ /*
+ * This error should not prevent the user from stopping
+ * the session. However, it will be reported at the end.
+ */
+ error_occured = true;
+ }
+ }
+
+ /* Kernel tracer */
+ if (ksession && ksession->active) {
+ DBG("Stop kernel tracing");
+
+ ret = kernel_stop_session(ksession);
+ if (ret < 0) {
+ ret = LTTNG_ERR_KERN_STOP_FAIL;
+ goto error;
+ }
+
+ kernel_wait_quiescent(kernel_tracer_fd);
+
+ /* Flush metadata after stopping (if exists) */
+ if (ksession->metadata_stream_fd >= 0) {
+ ret = kernel_metadata_flush_buffer(ksession->metadata_stream_fd);
+ if (ret < 0) {
+ ERR("Kernel metadata flush failed");
+ }
+ }
+
+ /* Flush all buffers after stopping */
+ cds_list_for_each_entry(kchan, &ksession->channel_list.head, list) {
+ ret = kernel_flush_buffer(kchan);
+ if (ret < 0) {
+ ERR("Kernel flush buffer error");
+ }
+ }
+
+ ksession->active = 0;
+ DBG("Kernel session stopped %s (id %" PRIu64 ")", session->name,
+ session->id);
+ }
+
+ if (usess && usess->active) {
+ /*
+ * Even though the stop trace might fail, flag this session inactive so
+ * other application coming in are not started by default.
+ */
+ usess->active = 0;
+
+ ret = ust_app_stop_trace_all(usess);
+ if (ret < 0) {
+ ret = LTTNG_ERR_UST_STOP_FAIL;
+ goto error;
+ }
+ }
+
+ /* Flag inactive after a successful stop. */
+ session->active = 0;
+ ret = !error_occured ? LTTNG_OK : LTTNG_ERR_UNK;
+
+error:
+ return ret;
+}
+
+/*
+ * Command LTTNG_SET_CONSUMER_URI processed by the client thread.
+ */
+int cmd_set_consumer_uri(struct ltt_session *session, size_t nb_uri,
+ struct lttng_uri *uris)
+{
+ int ret, i;
+ struct ltt_kernel_session *ksess = session->kernel_session;
+ struct ltt_ust_session *usess = session->ust_session;
+
+ assert(session);
+ assert(uris);
+ assert(nb_uri > 0);
+
+ /* Can't set consumer URI if the session is active. */
+ if (session->active) {
+ ret = LTTNG_ERR_TRACE_ALREADY_STARTED;
+ goto error;
+ }
+
+ /* Set the "global" consumer URIs */
+ for (i = 0; i < nb_uri; i++) {
+ ret = add_uri_to_consumer(session->consumer,
+ &uris[i], 0, session->name);
+ if (ret != LTTNG_OK) {
+ goto error;
+ }
+ }
+
+ /* Set UST session URIs */
+ if (session->ust_session) {
+ for (i = 0; i < nb_uri; i++) {
+ ret = add_uri_to_consumer(
+ session->ust_session->consumer,
+ &uris[i], LTTNG_DOMAIN_UST,
+ session->name);
+ if (ret != LTTNG_OK) {
+ goto error;
+ }
+ }
+ }
+
+ /* Set kernel session URIs */
+ if (session->kernel_session) {
+ for (i = 0; i < nb_uri; i++) {
+ ret = add_uri_to_consumer(
+ session->kernel_session->consumer,
+ &uris[i], LTTNG_DOMAIN_KERNEL,
+ session->name);
+ if (ret != LTTNG_OK) {
+ goto error;
+ }
+ }
+ }
+
+ /*
+ * Make sure to set the session in output mode after we set URI since a
+ * session can be created without URL (thus flagged in no output mode).
+ */
+ session->output_traces = 1;
+ if (ksess) {
+ ksess->output_traces = 1;
+ }
+
+ if (usess) {
+ usess->output_traces = 1;
+ }
+
+ /* All good! */
+ ret = LTTNG_OK;
+
+error:
+ return ret;
+}
+
+/*
+ * Command LTTNG_CREATE_SESSION processed by the client thread.
+ */
+int cmd_create_session_uri(char *name, struct lttng_uri *uris,
+ size_t nb_uri, lttng_sock_cred *creds, unsigned int live_timer)
+{
+ int ret;
+ struct ltt_session *session;
+
+ assert(name);
+ assert(creds);
+
+ /*
+ * Verify if the session already exist
+ *
+ * XXX: There is no need for the session lock list here since the caller
+ * (process_client_msg) is holding it. We might want to change that so a
+ * single command does not lock the entire session list.
+ */
+ session = session_find_by_name(name);
+ if (session != NULL) {
+ ret = LTTNG_ERR_EXIST_SESS;
+ goto find_error;
+ }
+
+ /* Create tracing session in the registry */
+ ret = session_create(name, LTTNG_SOCK_GET_UID_CRED(creds),
+ LTTNG_SOCK_GET_GID_CRED(creds));
+ if (ret != LTTNG_OK) {
+ goto session_error;
+ }
+
+ /*
+ * Get the newly created session pointer back
+ *
+ * XXX: There is no need for the session lock list here since the caller
+ * (process_client_msg) is holding it. We might want to change that so a
+ * single command does not lock the entire session list.
+ */
+ session = session_find_by_name(name);
+ assert(session);
+
+ session->live_timer = live_timer;
+ /* Create default consumer output for the session not yet created. */
+ session->consumer = consumer_create_output(CONSUMER_DST_LOCAL);
+ if (session->consumer == NULL) {
+ ret = LTTNG_ERR_FATAL;
+ goto consumer_error;
+ }
+
+ if (uris) {
+ ret = cmd_set_consumer_uri(session, nb_uri, uris);
+ if (ret != LTTNG_OK) {
+ goto consumer_error;
+ }
+ session->output_traces = 1;
+ } else {
+ session->output_traces = 0;
+ DBG2("Session %s created with no output", session->name);
+ }
+
+ session->consumer->enabled = 1;
+
+ return LTTNG_OK;
+
+consumer_error:
+ session_destroy(session);
+session_error:
+find_error:
+ return ret;
+}
+
+/*
+ * Command LTTNG_CREATE_SESSION_SNAPSHOT processed by the client thread.
+ */
+int cmd_create_session_snapshot(char *name, struct lttng_uri *uris,
+ size_t nb_uri, lttng_sock_cred *creds)
+{
+ int ret;
+ struct ltt_session *session;
+ struct snapshot_output *new_output = NULL;
+
+ assert(name);
+ assert(creds);
+
+ /*
+ * Create session in no output mode with URIs set to NULL. The uris we've
+ * received are for a default snapshot output if one.
+ */
+ ret = cmd_create_session_uri(name, NULL, 0, creds, 0);
+ if (ret != LTTNG_OK) {
+ goto error;
+ }
+
+ /* Get the newly created session pointer back. This should NEVER fail. */
+ session = session_find_by_name(name);
+ assert(session);
+
+ /* Flag session for snapshot mode. */
+ session->snapshot_mode = 1;
+
+ /* Skip snapshot output creation if no URI is given. */
+ if (nb_uri == 0) {
+ goto end;
+ }
+
+ new_output = snapshot_output_alloc();
+ if (!new_output) {
+ ret = LTTNG_ERR_NOMEM;
+ goto error_snapshot_alloc;
+ }
+
+ ret = snapshot_output_init_with_uri(DEFAULT_SNAPSHOT_MAX_SIZE, NULL,
+ uris, nb_uri, session->consumer, new_output, &session->snapshot);
+ if (ret < 0) {
+ if (ret == -ENOMEM) {
+ ret = LTTNG_ERR_NOMEM;
+ } else {
+ ret = LTTNG_ERR_INVALID;
+ }
+ goto error_snapshot;
+ }
+
+ rcu_read_lock();
+ snapshot_add_output(&session->snapshot, new_output);
+ rcu_read_unlock();
+
+end:
+ return LTTNG_OK;
+
+error_snapshot:
+ snapshot_output_destroy(new_output);
+error_snapshot_alloc:
+ session_destroy(session);
+error:
+ return ret;
+}
+
+/*
+ * Command LTTNG_DESTROY_SESSION processed by the client thread.
+ *
+ * Called with session lock held.
+ */
+int cmd_destroy_session(struct ltt_session *session, int wpipe,
+ struct notification_thread_handle *notification_thread_handle)
+{
+ int ret;
+ struct ltt_ust_session *usess;
+ struct ltt_kernel_session *ksess;
+
+ /* Safety net */
+ assert(session);
+
+ usess = session->ust_session;
+ ksess = session->kernel_session;
+
+ DBG("Begin destroy session %s (id %" PRIu64 ")", session->name, session->id);
+
+ if (session->rotation_pending_check_timer_enabled) {
+ if (timer_session_rotation_pending_check_stop(session)) {
+ ERR("Failed to stop the \"rotation pending check\" timer of session %s",
+ session->name);
+ }
+ }
+
+ if (session->rotation_schedule_timer_enabled) {
+ if (timer_session_rotation_schedule_timer_stop(
+ session)) {
+ ERR("Failed to stop the \"rotation schedule\" timer of session %s",
+ session->name);
+ }
+ }
+
+ if (session->rotate_size) {
+ unsubscribe_session_consumed_size_rotation(session, notification_thread_handle);
+ session->rotate_size = 0;
+ }
+
+ /*
+ * The rename of the current chunk is performed at stop, but if we rotated
+ * the session after the previous stop command, we need to rename the
+ * new (and empty) chunk that was started in between.
+ */
+ if (session->rotated_after_last_stop) {
+ rename_active_chunk(session);
+ }
+
+ /* Clean kernel session teardown */
+ kernel_destroy_session(ksess);
+
+ /* UST session teardown */
+ if (usess) {
+ /* Close any relayd session */
+ consumer_output_send_destroy_relayd(usess->consumer);
+
+ /* Destroy every UST application related to this session. */
+ ret = ust_app_destroy_trace_all(usess);
+ if (ret) {
+ ERR("Error in ust_app_destroy_trace_all");
+ }
+
+ /* Clean up the rest. */
+ trace_ust_destroy_session(usess);
+ }
+
+ /*
+ * Must notify the kernel thread here to update it's poll set in order to
+ * remove the channel(s)' fd just destroyed.
+ */
+ ret = notify_thread_pipe(wpipe);
+ if (ret < 0) {
+ PERROR("write kernel poll pipe");
+ }
+
+ if (session->shm_path[0]) {
+ /*
+ * When a session is created with an explicit shm_path,
+ * the consumer daemon will create its shared memory files
+ * at that location and will *not* unlink them. This is normal
+ * as the intention of that feature is to make it possible
+ * to retrieve the content of those files should a crash occur.
+ *
+ * To ensure the content of those files can be used, the
+ * sessiond daemon will replicate the content of the metadata
+ * cache in a metadata file.
+ *
+ * On clean-up, it is expected that the consumer daemon will
+ * unlink the shared memory files and that the session daemon
+ * will unlink the metadata file. Then, the session's directory
+ * in the shm path can be removed.
+ *
+ * Unfortunately, a flaw in the design of the sessiond's and
+ * consumerd's tear down of channels makes it impossible to
+ * determine when the sessiond _and_ the consumerd have both
+ * destroyed their representation of a channel. For one, the
+ * unlinking, close, and rmdir happen in deferred 'call_rcu'
+ * callbacks in both daemons.
+ *
+ * However, it is also impossible for the sessiond to know when
+ * the consumer daemon is done destroying its channel(s) since
+ * it occurs as a reaction to the closing of the channel's file
+ * descriptor. There is no resulting communication initiated
+ * from the consumerd to the sessiond to confirm that the
+ * operation is completed (and was successful).
+ *
+ * Until this is all fixed, the session daemon checks for the
+ * removal of the session's shm path which makes it possible
+ * to safely advertise a session as having been destroyed.
+ *
+ * Prior to this fix, it was not possible to reliably save
+ * a session making use of the --shm-path option, destroy it,
+ * and load it again. This is because the creation of the
+ * session would fail upon seeing the session's shm path
+ * already in existence.
+ *
+ * Note that none of the error paths in the check for the
+ * directory's existence return an error. This is normal
+ * as there isn't much that can be done. The session will
+ * be destroyed properly, except that we can't offer the
+ * guarantee that the same session can be re-created.
+ */
+ current_completion_handler = &destroy_completion_handler.handler;
+ ret = lttng_strncpy(destroy_completion_handler.shm_path,
+ session->shm_path,
+ sizeof(destroy_completion_handler.shm_path));
+ assert(!ret);
+ }
+ ret = session_destroy(session);
+
+ return ret;
+}
+
+/*
+ * Command LTTNG_REGISTER_CONSUMER processed by the client thread.
+ */
+int cmd_register_consumer(struct ltt_session *session,
+ enum lttng_domain_type domain, const char *sock_path,
+ struct consumer_data *cdata)
+{
+ int ret, sock;
+ struct consumer_socket *socket = NULL;
+
+ assert(session);
+ assert(cdata);
+ assert(sock_path);
+
+ switch (domain) {
+ case LTTNG_DOMAIN_KERNEL:
+ {
+ struct ltt_kernel_session *ksess = session->kernel_session;
+
+ assert(ksess);
+
+ /* Can't register a consumer if there is already one */
+ if (ksess->consumer_fds_sent != 0) {
+ ret = LTTNG_ERR_KERN_CONSUMER_FAIL;
+ goto error;
+ }
+
+ sock = lttcomm_connect_unix_sock(sock_path);
+ if (sock < 0) {
+ ret = LTTNG_ERR_CONNECT_FAIL;
+ goto error;
+ }
+ cdata->cmd_sock = sock;
+
+ socket = consumer_allocate_socket(&cdata->cmd_sock);
+ if (socket == NULL) {
+ ret = close(sock);
+ if (ret < 0) {
+ PERROR("close register consumer");
+ }
+ cdata->cmd_sock = -1;
+ ret = LTTNG_ERR_FATAL;
+ goto error;
+ }
+
+ socket->lock = zmalloc(sizeof(pthread_mutex_t));
+ if (socket->lock == NULL) {
+ PERROR("zmalloc pthread mutex");
+ ret = LTTNG_ERR_FATAL;
+ goto error;
+ }
+ pthread_mutex_init(socket->lock, NULL);
+ socket->registered = 1;
+
+ rcu_read_lock();
+ consumer_add_socket(socket, ksess->consumer);
+ rcu_read_unlock();
+
+ pthread_mutex_lock(&cdata->pid_mutex);
+ cdata->pid = -1;
+ pthread_mutex_unlock(&cdata->pid_mutex);
+
+ break;
+ }
+ default:
+ /* TODO: Userspace tracing */
+ ret = LTTNG_ERR_UND;
+ goto error;
+ }
+
+ return LTTNG_OK;
+
+error:
+ if (socket) {
+ consumer_destroy_socket(socket);
+ }
+ return ret;
+}
+
+/*
+ * Command LTTNG_LIST_DOMAINS processed by the client thread.
+ */
+ssize_t cmd_list_domains(struct ltt_session *session,
+ struct lttng_domain **domains)
+{
+ int ret, index = 0;
+ ssize_t nb_dom = 0;
+ struct agent *agt;
+ struct lttng_ht_iter iter;
+
+ if (session->kernel_session != NULL) {
+ DBG3("Listing domains found kernel domain");
+ nb_dom++;
+ }
+
+ if (session->ust_session != NULL) {
+ DBG3("Listing domains found UST global domain");
+ nb_dom++;
+
+ rcu_read_lock();
+ cds_lfht_for_each_entry(session->ust_session->agents->ht, &iter.iter,
+ agt, node.node) {
+ if (agt->being_used) {
+ nb_dom++;
+ }
+ }
+ rcu_read_unlock();
+ }
+
+ if (!nb_dom) {
+ goto end;
+ }
+
+ *domains = zmalloc(nb_dom * sizeof(struct lttng_domain));
+ if (*domains == NULL) {
+ ret = LTTNG_ERR_FATAL;
+ goto error;
+ }
+
+ if (session->kernel_session != NULL) {
+ (*domains)[index].type = LTTNG_DOMAIN_KERNEL;
+
+ /* Kernel session buffer type is always GLOBAL */
+ (*domains)[index].buf_type = LTTNG_BUFFER_GLOBAL;
+
+ index++;
+ }
+
+ if (session->ust_session != NULL) {
+ (*domains)[index].type = LTTNG_DOMAIN_UST;
+ (*domains)[index].buf_type = session->ust_session->buffer_type;
+ index++;
+
+ rcu_read_lock();
+ cds_lfht_for_each_entry(session->ust_session->agents->ht, &iter.iter,
+ agt, node.node) {
+ if (agt->being_used) {
+ (*domains)[index].type = agt->domain;
+ (*domains)[index].buf_type = session->ust_session->buffer_type;
+ index++;
+ }
+ }
+ rcu_read_unlock();
+ }
+end:
+ return nb_dom;
+
+error:
+ /* Return negative value to differentiate return code */
+ return -ret;
+}
+
+
+/*
+ * Command LTTNG_LIST_CHANNELS processed by the client thread.
+ */
+ssize_t cmd_list_channels(enum lttng_domain_type domain,
+ struct ltt_session *session, struct lttng_channel **channels)
+{
+ ssize_t nb_chan = 0, payload_size = 0, ret;
+
+ switch (domain) {
+ case LTTNG_DOMAIN_KERNEL:
+ if (session->kernel_session != NULL) {
+ nb_chan = session->kernel_session->channel_count;
+ }
+ DBG3("Number of kernel channels %zd", nb_chan);
+ if (nb_chan <= 0) {
+ ret = -LTTNG_ERR_KERN_CHAN_NOT_FOUND;
+ goto end;
+ }
+ break;
+ case LTTNG_DOMAIN_UST:
+ if (session->ust_session != NULL) {
+ rcu_read_lock();
+ nb_chan = lttng_ht_get_count(
+ session->ust_session->domain_global.channels);
+ rcu_read_unlock();
+ }
+ DBG3("Number of UST global channels %zd", nb_chan);
+ if (nb_chan < 0) {
+ ret = -LTTNG_ERR_UST_CHAN_NOT_FOUND;
+ goto end;
+ }
+ break;
+ default:
+ ret = -LTTNG_ERR_UND;
+ goto end;
+ }
+
+ if (nb_chan > 0) {
+ const size_t channel_size = sizeof(struct lttng_channel) +
+ sizeof(struct lttng_channel_extended);
+ struct lttng_channel_extended *channel_exts;
+
+ payload_size = nb_chan * channel_size;
+ *channels = zmalloc(payload_size);
+ if (*channels == NULL) {
+ ret = -LTTNG_ERR_FATAL;
+ goto end;
+ }
+
+ channel_exts = ((void *) *channels) +
+ (nb_chan * sizeof(struct lttng_channel));
+ ret = list_lttng_channels(domain, session, *channels, channel_exts);
+ if (ret != LTTNG_OK) {
+ free(*channels);
+ *channels = NULL;
+ goto end;
+ }
+ } else {
+ *channels = NULL;
+ }
+
+ ret = payload_size;
+end:
+ return ret;
+}
+
+/*
+ * Command LTTNG_LIST_EVENTS processed by the client thread.
+ */
+ssize_t cmd_list_events(enum lttng_domain_type domain,
+ struct ltt_session *session, char *channel_name,
+ struct lttng_event **events, size_t *total_size)
+{
+ int ret = 0;
+ ssize_t nb_event = 0;
+
+ switch (domain) {
+ case LTTNG_DOMAIN_KERNEL:
+ if (session->kernel_session != NULL) {
+ nb_event = list_lttng_kernel_events(channel_name,
+ session->kernel_session, events,
+ total_size);
+ }
+ break;
+ case LTTNG_DOMAIN_UST:
+ {
+ if (session->ust_session != NULL) {
+ nb_event = list_lttng_ust_global_events(channel_name,
+ &session->ust_session->domain_global, events,
+ total_size);
+ }
+ break;
+ }
+ case LTTNG_DOMAIN_LOG4J:
+ case LTTNG_DOMAIN_JUL:
+ case LTTNG_DOMAIN_PYTHON:
+ if (session->ust_session) {
+ struct lttng_ht_iter iter;
+ struct agent *agt;
+
+ rcu_read_lock();
+ cds_lfht_for_each_entry(session->ust_session->agents->ht,
+ &iter.iter, agt, node.node) {
+ if (agt->domain == domain) {
+ nb_event = list_lttng_agent_events(
+ agt, events,
+ total_size);
+ break;
+ }
+ }
+ rcu_read_unlock();
+ }
+ break;
+ default:
+ ret = LTTNG_ERR_UND;
+ goto error;
+ }
+
+ return nb_event;
+
+error:
+ /* Return negative value to differentiate return code */
+ return -ret;
+}
+
+/*
+ * Using the session list, filled a lttng_session array to send back to the
+ * client for session listing.
+ *
+ * The session list lock MUST be acquired before calling this function. Use
+ * session_lock_list() and session_unlock_list().
+ */
+void cmd_list_lttng_sessions(struct lttng_session *sessions, uid_t uid,
+ gid_t gid)
+{
+ int ret;
+ unsigned int i = 0;
+ struct ltt_session *session;
+ struct ltt_session_list *list = session_get_list();
+
+ DBG("Getting all available session for UID %d GID %d",
+ uid, gid);
+ /*
+ * Iterate over session list and append data after the control struct in
+ * the buffer.
+ */
+ cds_list_for_each_entry(session, &list->head, list) {
+ /*
+ * Only list the sessions the user can control.
+ */
+ if (!session_access_ok(session, uid, gid)) {
+ continue;
+ }
+
+ struct ltt_kernel_session *ksess = session->kernel_session;
+ struct ltt_ust_session *usess = session->ust_session;
+
+ if (session->consumer->type == CONSUMER_DST_NET ||
+ (ksess && ksess->consumer->type == CONSUMER_DST_NET) ||
+ (usess && usess->consumer->type == CONSUMER_DST_NET)) {
+ ret = build_network_session_path(sessions[i].path,
+ sizeof(sessions[i].path), session);
+ } else {
+ ret = snprintf(sessions[i].path, sizeof(sessions[i].path), "%s",
+ session->consumer->dst.session_root_path);
+ }
+ if (ret < 0) {
+ PERROR("snprintf session path");
+ continue;
+ }
+
+ strncpy(sessions[i].name, session->name, NAME_MAX);
+ sessions[i].name[NAME_MAX - 1] = '\0';
+ sessions[i].enabled = session->active;
+ sessions[i].snapshot_mode = session->snapshot_mode;
+ sessions[i].live_timer_interval = session->live_timer;
+ i++;
+ }
+}
+
+/*
+ * Command LTTNG_DATA_PENDING returning 0 if the data is NOT pending meaning
+ * ready for trace analysis (or any kind of reader) or else 1 for pending data.
+ */
+int cmd_data_pending(struct ltt_session *session)
+{
+ int ret;
+ struct ltt_kernel_session *ksess = session->kernel_session;
+ struct ltt_ust_session *usess = session->ust_session;
+
+ assert(session);
+
+ DBG("Data pending for session %s", session->name);
+
+ /* Session MUST be stopped to ask for data availability. */
+ if (session->active) {
+ ret = LTTNG_ERR_SESSION_STARTED;
+ goto error;
+ } else {
+ /*
+ * If stopped, just make sure we've started before else the above call
+ * will always send that there is data pending.
+ *
+ * The consumer assumes that when the data pending command is received,
+ * the trace has been started before or else no output data is written
+ * by the streams which is a condition for data pending. So, this is
+ * *VERY* important that we don't ask the consumer before a start
+ * trace.
+ */
+ if (!session->has_been_started) {
+ ret = 0;
+ goto error;
+ }
+ }
+
+ /* A rotation is still pending, we have to wait. */
+ if (session->rotation_state == LTTNG_ROTATION_STATE_ONGOING) {
+ DBG("Rotate still pending for session %s", session->name);
+ ret = 1;
+ goto error;
+ }
+
+ if (ksess && ksess->consumer) {
+ ret = consumer_is_data_pending(ksess->id, ksess->consumer);
+ if (ret == 1) {
+ /* Data is still being extracted for the kernel. */
+ goto error;
+ }
+ }
+
+ if (usess && usess->consumer) {
+ ret = consumer_is_data_pending(usess->id, usess->consumer);
+ if (ret == 1) {
+ /* Data is still being extracted for the kernel. */
+ goto error;
+ }
+ }
+
+ /* Data is ready to be read by a viewer */
+ ret = 0;
+
+error:
+ return ret;
+}
+
+/*
+ * Command LTTNG_SNAPSHOT_ADD_OUTPUT from the lttng ctl library.
+ *
+ * Return LTTNG_OK on success or else a LTTNG_ERR code.
+ */
+int cmd_snapshot_add_output(struct ltt_session *session,
+ struct lttng_snapshot_output *output, uint32_t *id)
+{
+ int ret;
+ struct snapshot_output *new_output;
+
+ assert(session);
+ assert(output);
+
+ DBG("Cmd snapshot add output for session %s", session->name);
+
+ /*
+ * Can't create an output if the session is not set in no-output mode.
+ */
+ if (session->output_traces) {
+ ret = LTTNG_ERR_NOT_SNAPSHOT_SESSION;
+ goto error;
+ }
+
+ /* Only one output is allowed until we have the "tee" feature. */
+ if (session->snapshot.nb_output == 1) {
+ ret = LTTNG_ERR_SNAPSHOT_OUTPUT_EXIST;
+ goto error;
+ }
+
+ new_output = snapshot_output_alloc();
+ if (!new_output) {
+ ret = LTTNG_ERR_NOMEM;
+ goto error;
+ }
+
+ ret = snapshot_output_init(output->max_size, output->name,
+ output->ctrl_url, output->data_url, session->consumer, new_output,
+ &session->snapshot);
+ if (ret < 0) {
+ if (ret == -ENOMEM) {
+ ret = LTTNG_ERR_NOMEM;
+ } else {
+ ret = LTTNG_ERR_INVALID;
+ }
+ goto free_error;
+ }
+
+ rcu_read_lock();
+ snapshot_add_output(&session->snapshot, new_output);
+ if (id) {
+ *id = new_output->id;
+ }
+ rcu_read_unlock();
+
+ return LTTNG_OK;
+
+free_error:
+ snapshot_output_destroy(new_output);
+error:
+ return ret;
+}
+
+/*
+ * Command LTTNG_SNAPSHOT_DEL_OUTPUT from lib lttng ctl.
+ *
+ * Return LTTNG_OK on success or else a LTTNG_ERR code.
+ */
+int cmd_snapshot_del_output(struct ltt_session *session,
+ struct lttng_snapshot_output *output)
+{
+ int ret;
+ struct snapshot_output *sout = NULL;
+
+ assert(session);
+ assert(output);
+
+ rcu_read_lock();
+
+ /*
+ * Permission denied to create an output if the session is not
+ * set in no output mode.
+ */
+ if (session->output_traces) {
+ ret = LTTNG_ERR_NOT_SNAPSHOT_SESSION;
+ goto error;
+ }
+
+ if (output->id) {
+ DBG("Cmd snapshot del output id %" PRIu32 " for session %s", output->id,
+ session->name);
+ sout = snapshot_find_output_by_id(output->id, &session->snapshot);
+ } else if (*output->name != '\0') {
+ DBG("Cmd snapshot del output name %s for session %s", output->name,
+ session->name);
+ sout = snapshot_find_output_by_name(output->name, &session->snapshot);
+ }
+ if (!sout) {
+ ret = LTTNG_ERR_INVALID;
+ goto error;
+ }
+
+ snapshot_delete_output(&session->snapshot, sout);
+ snapshot_output_destroy(sout);
+ ret = LTTNG_OK;
+
+error:
+ rcu_read_unlock();
+ return ret;
+}
+
+/*
+ * Command LTTNG_SNAPSHOT_LIST_OUTPUT from lib lttng ctl.
+ *
+ * If no output is available, outputs is untouched and 0 is returned.
+ *
+ * Return the size of the newly allocated outputs or a negative LTTNG_ERR code.
+ */
+ssize_t cmd_snapshot_list_outputs(struct ltt_session *session,
+ struct lttng_snapshot_output **outputs)
+{
+ int ret, idx = 0;
+ struct lttng_snapshot_output *list = NULL;
+ struct lttng_ht_iter iter;
+ struct snapshot_output *output;