Fix: correctly close metadata on sessiond thread shutdown
[lttng-tools.git] / src / common / ust-consumer / ust-consumer.c
index 986826fca267c455ba9825ea288edbff2e1ca8f4..6a692b9ba5a8bffd7826bd164dfc3f1de9f8c9df 100644 (file)
@@ -194,18 +194,41 @@ static int send_stream_to_thread(struct lttng_consumer_stream *stream,
 
        /* Get the right pipe where the stream will be sent. */
        if (stream->metadata_flag) {
+               ret = consumer_add_metadata_stream(stream);
+               if (ret) {
+                       ERR("Consumer add metadata stream %" PRIu64 " failed.",
+                                       stream->key);
+                       goto error;
+               }
                stream_pipe = ctx->consumer_metadata_pipe;
        } else {
+               ret = consumer_add_data_stream(stream);
+               if (ret) {
+                       ERR("Consumer add stream %" PRIu64 " failed.",
+                                       stream->key);
+                       goto error;
+               }
                stream_pipe = ctx->consumer_data_pipe;
        }
 
+       /*
+        * From this point on, the stream's ownership has been moved away from
+        * the channel and becomes globally visible.
+        */
+       stream->globally_visible = 1;
+
        ret = lttng_pipe_write(stream_pipe, &stream, sizeof(stream));
        if (ret < 0) {
                ERR("Consumer write %s stream to pipe %d",
                                stream->metadata_flag ? "metadata" : "data",
                                lttng_pipe_get_writefd(stream_pipe));
+               if (stream->metadata_flag) {
+                       consumer_del_stream_for_metadata(stream);
+               } else {
+                       consumer_del_stream_for_data(stream);
+               }
        }
-
+error:
        return ret;
 }
 
@@ -533,60 +556,20 @@ static int send_streams_to_thread(struct lttng_consumer_channel *channel,
                         * If we are unable to send the stream to the thread, there is
                         * a big problem so just stop everything.
                         */
+                       /* Remove node from the channel stream list. */
+                       cds_list_del(&stream->send_node);
                        goto error;
                }
 
                /* Remove node from the channel stream list. */
                cds_list_del(&stream->send_node);
 
-               /*
-                * From this point on, the stream's ownership has been moved away from
-                * the channel and becomes globally visible.
-                */
-               stream->globally_visible = 1;
        }
 
 error:
        return ret;
 }
 
-/*
- * Write metadata to the given channel using ustctl to convert the string to
- * the ringbuffer.
- * Called only from consumer_metadata_cache_write.
- * The metadata cache lock MUST be acquired to write in the cache.
- *
- * Return 0 on success else a negative value.
- */
-int lttng_ustconsumer_push_metadata(struct lttng_consumer_channel *metadata,
-               const char *metadata_str, uint64_t target_offset, uint64_t len)
-{
-       int ret;
-
-       assert(metadata);
-       assert(metadata_str);
-
-       DBG("UST consumer writing metadata to channel %s", metadata->name);
-
-       if (!metadata->metadata_stream) {
-               ret = 0;
-               goto error;
-       }
-
-       assert(target_offset <= metadata->metadata_cache->max_offset);
-       ret = ustctl_write_metadata_to_channel(metadata->uchan,
-                       metadata_str + target_offset, len);
-       if (ret < 0) {
-               ERR("ustctl write metadata fail with ret %d, len %" PRIu64, ret, len);
-               goto error;
-       }
-
-       ustctl_flush_buffer(metadata->metadata_stream->ustream, 1);
-
-error:
-       return ret;
-}
-
 /*
  * Flush channel's streams using the given key to retrieve the channel.
  *
@@ -616,12 +599,51 @@ static int flush_channel(uint64_t chan_key)
        cds_lfht_for_each_entry_duplicate(ht->ht,
                        ht->hash_fct(&channel->key, lttng_ht_seed), ht->match_fct,
                        &channel->key, &iter.iter, stream, node_channel_id.node) {
-                       ustctl_flush_buffer(stream->ustream, 1);
+               ustctl_flush_buffer(stream->ustream, 1);
        }
 error:
        rcu_read_unlock();
        return ret;
 }
+/*
+ * Close metadata stream wakeup_fd using the given key to retrieve the channel.
+ * RCU read side lock MUST be acquired before calling this function.
+ *
+ * NOTE: This function does NOT take any channel nor stream lock.
+ *
+ * Return 0 on success else LTTng error code.
+ */
+static int _close_metadata(struct lttng_consumer_channel *channel)
+{
+       int ret = LTTNG_OK;
+
+       assert(channel);
+       assert(channel->type == CONSUMER_CHANNEL_TYPE_METADATA);
+
+       if (channel->switch_timer_enabled == 1) {
+               DBG("Deleting timer on metadata channel");
+               consumer_timer_switch_stop(channel);
+       }
+
+       if (channel->metadata_stream) {
+               ret = ustctl_stream_close_wakeup_fd(channel->metadata_stream->ustream);
+               if (ret < 0) {
+                       ERR("UST consumer unable to close fd of metadata (ret: %d)", ret);
+                       ret = LTTCOMM_CONSUMERD_ERROR_METADATA;
+               }
+
+               if (channel->monitor) {
+                       /* Close the read-side in consumer_del_metadata_stream */
+                       ret = close(channel->metadata_stream->ust_metadata_poll_pipe[1]);
+                       if (ret < 0) {
+                               PERROR("Close UST metadata write-side poll pipe");
+                               ret = LTTCOMM_CONSUMERD_ERROR_METADATA;
+                       }
+               }
+       }
+
+       return ret;
+}
 
 /*
  * Close metadata stream wakeup_fd using the given key to retrieve the channel.
@@ -656,26 +678,7 @@ static int close_metadata(uint64_t chan_key)
                goto error_unlock;
        }
 
-       if (channel->switch_timer_enabled == 1) {
-               DBG("Deleting timer on metadata channel");
-               consumer_timer_switch_stop(channel);
-       }
-
-       if (channel->metadata_stream) {
-               ret = ustctl_stream_close_wakeup_fd(channel->metadata_stream->ustream);
-               if (ret < 0) {
-                       ERR("UST consumer unable to close fd of metadata (ret: %d)", ret);
-                       ret = LTTCOMM_CONSUMERD_ERROR_METADATA;
-                       goto error_unlock;
-               }
-               if (channel->monitor) {
-                       /* close the read-side in consumer_del_metadata_stream */
-                       ret = close(channel->metadata_stream->ust_metadata_poll_pipe[1]);
-                       if (ret < 0) {
-                               PERROR("Close UST metadata write-side poll pipe");
-                       }
-               }
-       }
+       ret = _close_metadata(channel);
 
 error_unlock:
        pthread_mutex_unlock(&channel->lock);
@@ -784,7 +787,8 @@ static int snapshot_metadata(uint64_t key, char *path, uint64_t relayd_id,
 
        metadata_channel = consumer_find_channel(key);
        if (!metadata_channel) {
-               ERR("UST snapshot metadata channel not found for key %lu", key);
+               ERR("UST snapshot metadata channel not found for key %" PRIu64,
+                       key);
                ret = -1;
                goto error;
        }
@@ -794,7 +798,7 @@ static int snapshot_metadata(uint64_t key, char *path, uint64_t relayd_id,
         * Ask the sessiond if we have new metadata waiting and update the
         * consumer metadata cache.
         */
-       ret = lttng_ustconsumer_request_metadata(ctx, metadata_channel);
+       ret = lttng_ustconsumer_request_metadata(ctx, metadata_channel, 0);
        if (ret < 0) {
                goto error;
        }
@@ -880,12 +884,12 @@ static int snapshot_channel(uint64_t key, char *path, uint64_t relayd_id,
 
        channel = consumer_find_channel(key);
        if (!channel) {
-               ERR("UST snapshot channel not found for key %lu", key);
+               ERR("UST snapshot channel not found for key %" PRIu64, key);
                ret = -1;
                goto error;
        }
        assert(!channel->monitor);
-       DBG("UST consumer snapshot channel %lu", key);
+       DBG("UST consumer snapshot channel %" PRIu64, key);
 
        cds_list_for_each_entry(stream, &channel->streams.head, send_node) {
                /* Lock stream because we are about to change its state. */
@@ -974,12 +978,12 @@ static int snapshot_channel(uint64_t key, char *path, uint64_t relayd_id,
                                        padded_len - len);
                        if (use_relayd) {
                                if (read_len != len) {
-                                       ret = -1;
+                                       ret = -EPERM;
                                        goto error_put_subbuf;
                                }
                        } else {
                                if (read_len != padded_len) {
-                                       ret = -1;
+                                       ret = -EPERM;
                                        goto error_put_subbuf;
                                }
                        }
@@ -1017,7 +1021,8 @@ error:
  * Receive the metadata updates from the sessiond.
  */
 int lttng_ustconsumer_recv_metadata(int sock, uint64_t key, uint64_t offset,
-               uint64_t len, struct lttng_consumer_channel *channel)
+               uint64_t len, struct lttng_consumer_channel *channel,
+               int timer)
 {
        int ret, ret_code = LTTNG_OK;
        char *metadata_str;
@@ -1039,17 +1044,6 @@ int lttng_ustconsumer_recv_metadata(int sock, uint64_t key, uint64_t offset,
                goto end_free;
        }
 
-       /*
-        * XXX: The consumer data lock is acquired before calling metadata cache
-        * write which calls push metadata that MUST be protected by the consumer
-        * lock in order to be able to check the validity of the metadata stream of
-        * the channel.
-        *
-        * Note that this will be subject to change to better fine grained locking
-        * and ultimately try to get rid of this global consumer data lock.
-        */
-       pthread_mutex_lock(&consumer_data.lock);
-       pthread_mutex_lock(&channel->lock);
        pthread_mutex_lock(&channel->metadata_cache->lock);
        ret = consumer_metadata_cache_write(channel, offset, len, metadata_str);
        if (ret < 0) {
@@ -1061,15 +1055,11 @@ int lttng_ustconsumer_recv_metadata(int sock, uint64_t key, uint64_t offset,
                 * waiting for the metadata cache to be flushed.
                 */
                pthread_mutex_unlock(&channel->metadata_cache->lock);
-               pthread_mutex_unlock(&channel->lock);
-               pthread_mutex_unlock(&consumer_data.lock);
                goto end_free;
        }
        pthread_mutex_unlock(&channel->metadata_cache->lock);
-       pthread_mutex_unlock(&channel->lock);
-       pthread_mutex_unlock(&consumer_data.lock);
 
-       while (consumer_metadata_cache_flushed(channel, offset + len)) {
+       while (consumer_metadata_cache_flushed(channel, offset + len, timer)) {
                DBG("Waiting for metadata to be flushed");
                usleep(DEFAULT_METADATA_AVAILABILITY_WAIT_TIME);
        }
@@ -1208,6 +1198,13 @@ int lttng_ustconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
                        goto end_channel_error;
                }
 
+               /*
+                * Assign UST application UID to the channel. This value is ignored for
+                * per PID buffers. This is specific to UST thus setting this after the
+                * allocation.
+                */
+               channel->ust_app_uid = msg.u.ask_channel.ust_app_uid;
+
                /* Build channel attributes from received message. */
                attr.subbuf_size = msg.u.ask_channel.subbuf_size;
                attr.num_subbuf = msg.u.ask_channel.num_subbuf;
@@ -1404,7 +1401,7 @@ int lttng_ustconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
                }
 
                ret = lttng_ustconsumer_recv_metadata(sock, key, offset,
-                               len, channel);
+                               len, channel, 0);
                if (ret < 0) {
                        /* error receiving from sessiond */
                        goto error_fatal;
@@ -1769,6 +1766,12 @@ int lttng_ustconsumer_data_pending(struct lttng_consumer_stream *stream)
        }
 
        if (stream->chan->type == CONSUMER_CHANNEL_TYPE_METADATA) {
+               uint64_t contiguous, pushed;
+
+               /* Ease our life a bit. */
+               contiguous = stream->chan->metadata_cache->contiguous;
+               pushed = stream->ust_metadata_pushed;
+
                /*
                 * We can simply check whether all contiguously available data
                 * has been pushed to the ring buffer, since the push operation
@@ -1780,10 +1783,10 @@ int lttng_ustconsumer_data_pending(struct lttng_consumer_stream *stream)
                 * metadata has been consumed from the metadata stream.
                 */
                DBG("UST consumer metadata pending check: contiguous %" PRIu64 " vs pushed %" PRIu64,
-                       stream->chan->metadata_cache->contiguous,
-                       stream->ust_metadata_pushed);
-               if (stream->chan->metadata_cache->contiguous
-                               != stream->ust_metadata_pushed) {
+                               contiguous, pushed);
+               assert(((int64_t) contiguous - pushed) >= 0);
+               if ((contiguous != pushed) ||
+                               (((int64_t) contiguous - pushed) > 0 || contiguous == 0)) {
                        ret = 1;        /* Data is pending */
                        goto end;
                }
@@ -1819,7 +1822,6 @@ end:
  */
 void lttng_ustconsumer_close_metadata(struct lttng_ht *metadata_ht)
 {
-       int ret;
        struct lttng_ht_iter iter;
        struct lttng_consumer_stream *stream;
 
@@ -1831,17 +1833,16 @@ void lttng_ustconsumer_close_metadata(struct lttng_ht *metadata_ht)
        rcu_read_lock();
        cds_lfht_for_each_entry(metadata_ht->ht, &iter.iter, stream,
                        node.node) {
-               int fd = stream->wait_fd;
-
+               pthread_mutex_lock(&stream->chan->lock);
                /*
-                * Whatever happens here we have to continue to try to close every
-                * streams. Let's report at least the error on failure.
+                * Whatever returned value, we must continue to try to close everything
+                * so ignore it.
                 */
-               ret = ustctl_stream_close_wakeup_fd(stream->ustream);
-               if (ret) {
-                       ERR("Unable to close metadata stream fd %d ret %d", fd, ret);
-               }
-               DBG("Metadata wait fd %d closed", fd);
+               (void) _close_metadata(stream->chan);
+               DBG("Metadata wait fd %d and poll pipe fd %d closed", stream->wait_fd,
+                               stream->ust_metadata_poll_pipe[1]);
+               pthread_mutex_unlock(&stream->chan->lock);
+
        }
        rcu_read_unlock();
 }
@@ -1856,8 +1857,14 @@ void lttng_ustconsumer_close_stream_wakeup(struct lttng_consumer_stream *stream)
        }
 }
 
+/*
+ * Please refer to consumer-timer.c before adding any lock within this
+ * function or any of its callees. Timers have a very strict locking
+ * semantic with respect to teardown. Failure to respect this semantic
+ * introduces deadlocks.
+ */
 int lttng_ustconsumer_request_metadata(struct lttng_consumer_local_data *ctx,
-               struct lttng_consumer_channel *channel)
+               struct lttng_consumer_channel *channel, int timer)
 {
        struct lttcomm_metadata_request_msg request;
        struct lttcomm_consumer_msg msg;
@@ -1883,12 +1890,18 @@ int lttng_ustconsumer_request_metadata(struct lttng_consumer_local_data *ctx,
 
        request.session_id = channel->session_id;
        request.session_id_per_pid = channel->session_id_per_pid;
-       request.uid = channel->uid;
+       /*
+        * Request the application UID here so the metadata of that application can
+        * be sent back. The channel UID corresponds to the user UID of the session
+        * used for the rights on the stream file(s).
+        */
+       request.uid = channel->ust_app_uid;
        request.key = channel->key;
+
        DBG("Sending metadata request to sessiond, session id %" PRIu64
-                       ", per-pid %" PRIu64,
-                       channel->session_id,
-                       channel->session_id_per_pid);
+                       ", per-pid %" PRIu64 ", app UID %u and channek key %" PRIu64,
+                       request.session_id, request.session_id_per_pid, request.uid,
+                       request.key);
 
        pthread_mutex_lock(&ctx->metadata_socket_lock);
        ret = lttcomm_send_unix_sock(ctx->consumer_metadata_socket, &request,
@@ -1945,7 +1958,7 @@ int lttng_ustconsumer_request_metadata(struct lttng_consumer_local_data *ctx,
        }
 
        ret_code = lttng_ustconsumer_recv_metadata(ctx->consumer_metadata_socket,
-                       key, offset, len, channel);
+                       key, offset, len, channel, timer);
        if (ret_code >= 0) {
                /*
                 * Only send the status msg if the sessiond is alive meaning a positive
This page took 0.029503 seconds and 4 git commands to generate.