return ret;
}
+/*
+ * Wait on consumer process termination.
+ *
+ * Need to be called with the consumer data lock held or from a context
+ * ensuring no concurrent access to data (e.g: cleanup).
+ */
+static void wait_consumer(struct consumer_data *consumer_data)
+{
+ pid_t ret;
+ int status;
+
+ if (consumer_data->pid <= 0) {
+ return;
+ }
+
+ DBG("Waiting for complete teardown of consumerd (PID: %d)",
+ consumer_data->pid);
+ ret = waitpid(consumer_data->pid, &status, 0);
+ if (ret == -1) {
+ PERROR("consumerd waitpid pid: %d", consumer_data->pid)
+ }
+ if (!WIFEXITED(status)) {
+ ERR("consumerd termination with error: %d",
+ WEXITSTATUS(ret));
+ }
+ consumer_data->pid = 0;
+}
+
/*
* Cleanup the session daemon's data structures.
*/
}
}
+ wait_consumer(&kconsumer_data);
+ wait_consumer(&ustconsumer64_data);
+ wait_consumer(&ustconsumer32_data);
+
DBG("Cleaning up all agent apps");
agent_app_ht_clean();
free(kmod_probes_list);
free(kmod_extra_probes_list);
+ run_as_destroy_worker();
+
/* <fun> */
DBG("%c[%d;%dm*** assert failed :-) *** ==> %c[%dm%c[%d;%dm"
"Matthew, BEET driven development works!%c[%dm",
}
/* Check for data on kernel pipe */
- if (pollfd == kernel_poll_pipe[0] && (revents & LPOLLIN)) {
- (void) lttng_read(kernel_poll_pipe[0],
- &tmp, 1);
- /*
- * Ret value is useless here, if this pipe gets any actions an
- * update is required anyway.
- */
- update_poll_flag = 1;
- continue;
- } else {
- /*
- * New CPU detected by the kernel. Adding kernel stream to
- * kernel session and updating the kernel consumer
- */
- if (revents & LPOLLIN) {
+ if (revents & LPOLLIN) {
+ if (pollfd == kernel_poll_pipe[0]) {
+ (void) lttng_read(kernel_poll_pipe[0],
+ &tmp, 1);
+ /*
+ * Ret value is useless here, if this pipe gets any actions an
+ * update is required anyway.
+ */
+ update_poll_flag = 1;
+ continue;
+ } else {
+ /*
+ * New CPU detected by the kernel. Adding kernel stream to
+ * kernel session and updating the kernel consumer
+ */
ret = update_kernel_stream(&kconsumer_data, pollfd);
if (ret < 0) {
continue;
}
break;
- /*
- * TODO: We might want to handle the LPOLLERR | LPOLLHUP
- * and unregister kernel stream at this point.
- */
}
+ } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
+ update_poll_flag = 1;
+ continue;
+ } else {
+ ERR("Unexpected poll events %u for sock %d", revents, pollfd);
+ goto error;
}
}
}
/* Event on the registration socket */
if (pollfd == consumer_data->err_sock) {
- if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
+ if (revents & LPOLLIN) {
+ continue;
+ } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
ERR("consumer err socket poll error");
goto error;
+ } else {
+ ERR("Unexpected poll events %u for sock %d", revents, pollfd);
+ goto error;
}
}
}
if (pollfd == sock) {
/* Event on the consumerd socket */
- if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
+ if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)
+ && !(revents & LPOLLIN)) {
ERR("consumer err socket second poll error");
goto error;
}
goto exit;
} else if (pollfd == consumer_data->metadata_fd) {
+ if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)
+ && !(revents & LPOLLIN)) {
+ ERR("consumer err metadata socket second poll error");
+ goto error;
+ }
/* UST metadata requests */
ret = ust_consumer_metadata_request(
&consumer_data->metadata_sock);
unlink(consumer_data->err_unix_sock_path);
unlink(consumer_data->cmd_unix_sock_path);
- consumer_data->pid = 0;
pthread_mutex_unlock(&consumer_data->lock);
/* Cleanup metadata socket mutex. */
/* Inspect the apps cmd pipe */
if (pollfd == apps_cmd_pipe[0]) {
- if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
- ERR("Apps command pipe error");
- goto error;
- } else if (revents & LPOLLIN) {
+ if (revents & LPOLLIN) {
int sock;
/* Empty pipe */
health_code_update();
/*
- * We only monitor the error events of the socket. This
- * thread does not handle any incoming data from UST
- * (POLLIN).
+ * Since this is a command socket (write then read),
+ * we only monitor the error events of the socket.
*/
ret = lttng_poll_add(&events, sock,
LPOLLERR | LPOLLHUP | LPOLLRDHUP);
}
DBG("Apps with sock %d added to poll set", sock);
+ } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
+ ERR("Apps command pipe error");
+ goto error;
+ } else {
+ ERR("Unknown poll events %u for sock %d", revents, pollfd);
+ goto error;
}
} else {
/*
/* Socket closed on remote end. */
ust_app_unregister(pollfd);
+ } else {
+ ERR("Unexpected poll events %u for sock %d", revents, pollfd);
+ goto error;
}
}
wait_queue->count--;
ust_app_destroy(wait_node->app);
free(wait_node);
+ /*
+ * Silence warning of use-after-free in
+ * cds_list_for_each_entry_safe which uses
+ * __typeof__(*wait_node).
+ */
+ wait_node = NULL;
break;
+ } else {
+ ERR("Unexpected poll events %u for sock %d", revents, pollfd);
+ goto error;
}
}
}
* Don't care about return value. Let the manage apps threads
* handle app unregistration upon socket close.
*/
- (void) ust_app_register_done(app->sock);
+ (void) ust_app_register_done(app);
/*
* Even if the application socket has been closed, send the app
/* Event on the registration socket */
if (pollfd == apps_sock) {
- if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
- ERR("Register apps socket poll error");
- goto error;
- } else if (revents & LPOLLIN) {
+ if (revents & LPOLLIN) {
sock = lttcomm_accept_unix_sock(apps_sock);
if (sock < 0) {
goto error;
* barrier with the exchange in cds_wfcq_enqueue.
*/
futex_nto1_wake(&ust_cmd_queue.futex);
+ } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
+ ERR("Register apps socket poll error");
+ goto error;
+ } else {
+ ERR("Unexpected poll events %u for sock %d", revents, pollfd);
+ goto error;
}
}
}
DBG("Processing client command %d", cmd_ctx->lsm->cmd_type);
+ assert(!rcu_read_ongoing());
+
*sock_error = 0;
switch (cmd_ctx->lsm->cmd_type) {
session_unlock_list();
}
init_setup_error:
+ assert(!rcu_read_ongoing());
return ret;
}
/* Event on the registration socket */
if (pollfd == sock) {
- if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
+ if (revents & LPOLLIN) {
+ continue;
+ } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
ERR("Health socket poll error");
goto error;
+ } else {
+ ERR("Unexpected poll events %u for sock %d", revents, pollfd);
+ goto error;
}
}
}
/* Event on the registration socket */
if (pollfd == client_sock) {
- if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
+ if (revents & LPOLLIN) {
+ continue;
+ } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
ERR("Client socket poll error");
goto error;
+ } else {
+ ERR("Unexpected poll events %u for sock %d", revents, pollfd);
+ goto error;
}
}
}
}
}
+ if (run_as_create_worker(argv[0]) < 0) {
+ goto exit_create_run_as_worker_cleanup;
+ }
+
/*
* Starting from here, we can create threads. This needs to be after
* lttng_daemonize due to RCU.
health_app_destroy(health_sessiond);
exit_health_sessiond_cleanup:
+exit_create_run_as_worker_cleanup:
exit_options:
+ /* Ensure all prior call_rcu are done. */
+ rcu_barrier();
+
sessiond_cleanup_options();
exit_set_signal_handler:
- /* Ensure all prior call_rcu are done. */
- rcu_barrier();
if (!retval) {
exit(EXIT_SUCCESS);