Fix: Consumer sockets leak on error
[lttng-tools.git] / src / common / consumer.c
index 51f861e23695c642a56a943aa272fb24c89c7a5d..5dc1d92b40685598e1c841b8d3128073aeed65ba 100644 (file)
@@ -917,7 +917,7 @@ static int consumer_update_poll_array(
                 * changed where this function will be called back again.
                 */
                if (stream->state != LTTNG_CONSUMER_ACTIVE_STREAM ||
-                               stream->endpoint_status) {
+                               stream->endpoint_status == CONSUMER_ENDPOINT_INACTIVE) {
                        continue;
                }
                DBG("Active FD %d", stream->wait_fd);
@@ -1267,6 +1267,8 @@ end:
  * core function for writing trace buffers to either the local filesystem or
  * the network.
  *
+ * It must be called with the stream lock held.
+ *
  * Careful review MUST be put if any changes occur!
  *
  * Returns the number of bytes written
@@ -1419,6 +1421,8 @@ end:
 /*
  * Splice the data from the ring buffer to the tracefile.
  *
+ * It must be called with the stream lock held.
+ *
  * Returns the number of bytes spliced.
  */
 ssize_t lttng_consumer_on_read_subbuffer_splice(
@@ -1950,7 +1954,7 @@ static void validate_endpoint_status_data_stream(void)
        rcu_read_lock();
        cds_lfht_for_each_entry(data_ht->ht, &iter.iter, stream, node.node) {
                /* Validate delete flag of the stream */
-               if (stream->endpoint_status != CONSUMER_ENDPOINT_INACTIVE) {
+               if (stream->endpoint_status == CONSUMER_ENDPOINT_ACTIVE) {
                        continue;
                }
                /* Delete it right now */
@@ -1975,7 +1979,7 @@ static void validate_endpoint_status_metadata_stream(
        rcu_read_lock();
        cds_lfht_for_each_entry(metadata_ht->ht, &iter.iter, stream, node.node) {
                /* Validate delete flag of the stream */
-               if (!stream->endpoint_status) {
+               if (stream->endpoint_status == CONSUMER_ENDPOINT_ACTIVE) {
                        continue;
                }
                /*
@@ -2450,7 +2454,7 @@ end:
  */
 void *consumer_thread_sessiond_poll(void *data)
 {
-       int sock, client_socket, ret;
+       int sock = -1, client_socket, ret;
        /*
         * structure to poll for incoming data on communication socket avoids
         * making blocking sockets.
@@ -2510,6 +2514,13 @@ void *consumer_thread_sessiond_poll(void *data)
                goto end;
        }
 
+       /* This socket is not useful anymore. */
+       ret = close(client_socket);
+       if (ret < 0) {
+               PERROR("close client_socket");
+       }
+       client_socket = -1;
+
        /* update the polling structure to poll on the established socket */
        consumer_sockpoll[1].fd = sock;
        consumer_sockpoll[1].events = POLLIN | POLLPRI;
@@ -2553,6 +2564,20 @@ end:
         */
        notify_thread_pipe(ctx->consumer_data_pipe[1]);
 
+       /* Cleaning up possibly open sockets. */
+       if (sock >= 0) {
+               ret = close(sock);
+               if (ret < 0) {
+                       PERROR("close sock sessiond poll");
+               }
+       }
+       if (client_socket >= 0) {
+               ret = close(sock);
+               if (ret < 0) {
+                       PERROR("close client_socket sessiond poll");
+               }
+       }
+
        rcu_unregister_thread();
        return NULL;
 }
This page took 0.024415 seconds and 4 git commands to generate.