return ret;
}
+/*
+ * Wait on consumer process termination.
+ *
+ * Need to be called with the consumer data lock held or from a context
+ * ensuring no concurrent access to data (e.g: cleanup).
+ */
+static void wait_consumer(struct consumer_data *consumer_data)
+{
+ pid_t ret;
+ int status;
+
+ if (consumer_data->pid <= 0) {
+ return;
+ }
+
+ DBG("Waiting for complete teardown of consumerd (PID: %d)",
+ consumer_data->pid);
+ ret = waitpid(consumer_data->pid, &status, 0);
+ if (ret == -1) {
+ PERROR("consumerd waitpid pid: %d", consumer_data->pid)
+ }
+ if (!WIFEXITED(status)) {
+ ERR("consumerd termination with error: %d",
+ WEXITSTATUS(ret));
+ }
+ consumer_data->pid = 0;
+}
+
/*
* Cleanup the session daemon's data structures.
*/
}
}
+ wait_consumer(&kconsumer_data);
+ wait_consumer(&ustconsumer64_data);
+ wait_consumer(&ustconsumer32_data);
+
DBG("Cleaning up all agent apps");
agent_app_ht_clean();
DBG("[thread] Manage consumer started");
+ rcu_register_thread();
+ rcu_thread_online();
+
health_register(health_sessiond, HEALTH_SESSIOND_TYPE_CONSUMER);
health_code_update();
unlink(consumer_data->err_unix_sock_path);
unlink(consumer_data->cmd_unix_sock_path);
- consumer_data->pid = 0;
pthread_mutex_unlock(&consumer_data->lock);
/* Cleanup metadata socket mutex. */
health_unregister(health_sessiond);
DBG("consumer thread cleanup completed");
+ rcu_thread_offline();
+ rcu_unregister_thread();
+
return NULL;
}
wait_queue->count--;
ust_app_destroy(wait_node->app);
free(wait_node);
+ /*
+ * Silence warning of use-after-free in
+ * cds_list_for_each_entry_safe which uses
+ * __typeof__(*wait_node).
+ */
+ wait_node = NULL;
break;
}
}
* Don't care about return value. Let the manage apps threads
* handle app unregistration upon socket close.
*/
- (void) ust_app_register_done(app->sock);
+ (void) ust_app_register_done(app);
/*
* Even if the application socket has been closed, send the app
free(wait_node);
}
+ /* Empty command queue. */
+ for (;;) {
+ /* Dequeue command for registration */
+ node = cds_wfcq_dequeue_blocking(&ust_cmd_queue.head, &ust_cmd_queue.tail);
+ if (node == NULL) {
+ break;
+ }
+ ust_cmd = caa_container_of(node, struct ust_command, node);
+ ret = close(ust_cmd->sock);
+ if (ret < 0) {
+ PERROR("close ust sock exit dispatch %d", ust_cmd->sock);
+ }
+ lttng_fd_put(LTTNG_FD_APPS, 1);
+ free(ust_cmd);
+ }
+
error_testpoint:
DBG("Dispatch thread dying");
if (err) {
ust_cmd = zmalloc(sizeof(struct ust_command));
if (ust_cmd == NULL) {
PERROR("ust command zmalloc");
+ ret = close(sock);
+ if (ret) {
+ PERROR("close");
+ }
goto error;
}
* domain.
*/
if (session->kernel_session->consumer) {
- consumer_destroy_output(session->kernel_session->consumer);
+ consumer_output_put(session->kernel_session->consumer);
}
session->kernel_session->consumer =
consumer_copy_output(session->consumer);
case LTTNG_DOMAIN_UST:
DBG3("Copying tracing session consumer output in UST session");
if (session->ust_session->consumer) {
- consumer_destroy_output(session->ust_session->consumer);
+ consumer_output_put(session->ust_session->consumer);
}
session->ust_session->consumer =
consumer_copy_output(session->consumer);
session->kernel_session->consumer->dst.trace_path,
S_IRWXU | S_IRWXG, session->uid, session->gid);
if (ret < 0) {
- if (ret != -EEXIST) {
+ if (errno != EEXIST) {
ERR("Trace directory creation error");
goto error;
}
DBG("Processing client command %d", cmd_ctx->lsm->cmd_type);
+ assert(!rcu_read_ongoing());
+
*sock_error = 0;
switch (cmd_ctx->lsm->cmd_type) {
}
case LTTNG_DISABLE_EVENT:
{
+
+ /*
+ * FIXME: handle filter; for now we just receive the filter's
+ * bytecode along with the filter expression which are sent by
+ * liblttng-ctl and discard them.
+ *
+ * This fixes an issue where the client may block while sending
+ * the filter payload and encounter an error because the session
+ * daemon closes the socket without ever handling this data.
+ */
+ size_t count = cmd_ctx->lsm->u.disable.expression_len +
+ cmd_ctx->lsm->u.disable.bytecode_len;
+
+ if (count) {
+ char data[LTTNG_FILTER_MAX_LEN];
+
+ DBG("Discarding disable event command payload of size %zu", count);
+ while (count) {
+ ret = lttcomm_recv_unix_sock(sock, data,
+ count > sizeof(data) ? sizeof(data) : count);
+ if (ret < 0) {
+ goto error;
+ }
+
+ count -= (size_t) ret;
+ }
+ }
/* FIXME: passing packed structure to non-packed pointer */
- /* TODO: handle filter */
ret = cmd_disable_event(cmd_ctx->session, cmd_ctx->lsm->domain.type,
cmd_ctx->lsm->u.disable.channel_name,
&cmd_ctx->lsm->u.disable.event);
}
case LTTNG_DATA_PENDING:
{
- ret = cmd_data_pending(cmd_ctx->session);
+ int pending_ret;
+
+ /* 1 byte to return whether or not data is pending */
+ ret = setup_lttng_msg(cmd_ctx, 1);
+ if (ret < 0) {
+ goto setup_error;
+ }
+
+ pending_ret = cmd_data_pending(cmd_ctx->session);
+ /*
+ * FIXME
+ *
+ * This function may returns 0 or 1 to indicate whether or not
+ * there is data pending. In case of error, it should return an
+ * LTTNG_ERR code. However, some code paths may still return
+ * a nondescript error code, which we handle by returning an
+ * "unknown" error.
+ */
+ if (pending_ret == 0 || pending_ret == 1) {
+ ret = LTTNG_OK;
+ } else if (pending_ret < 0) {
+ ret = LTTNG_ERR_UNK;
+ goto setup_error;
+ } else {
+ ret = pending_ret;
+ goto setup_error;
+ }
+
+ *cmd_ctx->llm->payload = (uint8_t) pending_ret;
break;
}
case LTTNG_SNAPSHOT_ADD_OUTPUT:
session_unlock_list();
}
init_setup_error:
+ assert(!rcu_read_ongoing());
return ret;
}
}
exit_reg_apps:
+ /*
+ * Join dispatch thread after joining reg_apps_thread to ensure
+ * we don't leak applications in the queue.
+ */
ret = pthread_join(dispatch_thread, &status);
if (ret) {
errno = ret;
sessiond_cleanup_options();
exit_set_signal_handler:
+ /* Ensure all prior call_rcu are done. */
+ rcu_barrier();
+
if (!retval) {
exit(EXIT_SUCCESS);
} else {