lttng_ustconsumer_del_channel(channel);
lttng_ustconsumer_free_channel(channel);
}
+
+ if (channel->trace_chunk) {
+ lttng_trace_chunk_put(channel->trace_chunk);
+ }
+
free(channel);
}
}
return run_as_open(shm_path,
O_RDWR | O_CREAT | O_EXCL, S_IRUSR | S_IWUSR,
- session_credentials->uid, session_credentials->gid);
+ lttng_credentials_get_uid(session_credentials),
+ lttng_credentials_get_gid(session_credentials));
error_shm_path:
return -1;
ERR("Cannot get stream shm path");
}
closeret = run_as_unlink(shm_path,
- channel->buffer_credentials.value.uid,
- channel->buffer_credentials.value.gid);
+ lttng_credentials_get_uid(LTTNG_OPTIONAL_GET_PTR(
+ channel->buffer_credentials)),
+ lttng_credentials_get_gid(LTTNG_OPTIONAL_GET_PTR(
+ channel->buffer_credentials)));
if (closeret) {
PERROR("unlink %s", shm_path);
}
/* Try to rmdir all directories under shm_path root. */
if (channel->root_shm_path[0]) {
(void) run_as_rmdir_recursive(channel->root_shm_path,
- channel->buffer_credentials.value.uid,
- channel->buffer_credentials.value.gid,
+ lttng_credentials_get_uid(LTTNG_OPTIONAL_GET_PTR(
+ channel->buffer_credentials)),
+ lttng_credentials_get_gid(LTTNG_OPTIONAL_GET_PTR(
+ channel->buffer_credentials)),
LTTNG_DIRECTORY_HANDLE_SKIP_NON_EMPTY_FLAG);
}
free(stream_fds);
return ret;
}
+static
+void metadata_stream_reset_cache_consumed_position(
+ struct lttng_consumer_stream *stream)
+{
+ ASSERT_LOCKED(stream->lock);
+
+ DBG("Reset metadata cache of session %" PRIu64,
+ stream->chan->session_id);
+ stream->ust_metadata_pushed = 0;
+}
+
/*
* Receive the metadata updates from the sessiond. Supports receiving
* overlapping metadata, but is needs to always belong to a contiguous
{
int ret, ret_code = LTTCOMM_CONSUMERD_SUCCESS;
char *metadata_str;
+ enum consumer_metadata_cache_write_status cache_write_status;
DBG("UST consumer push metadata key %" PRIu64 " of len %" PRIu64, key, len);
health_code_update();
pthread_mutex_lock(&channel->metadata_cache->lock);
- ret = consumer_metadata_cache_write(channel, offset, len, version,
+ cache_write_status = consumer_metadata_cache_write(
+ channel->metadata_cache, offset, len, version,
metadata_str);
- if (ret < 0) {
+ pthread_mutex_unlock(&channel->metadata_cache->lock);
+ switch (cache_write_status) {
+ case CONSUMER_METADATA_CACHE_WRITE_STATUS_NO_CHANGE:
+ /*
+ * The write entirely overlapped with existing contents of the
+ * same metadata version (same content); there is nothing to do.
+ */
+ break;
+ case CONSUMER_METADATA_CACHE_WRITE_STATUS_INVALIDATED:
+ /*
+ * The metadata cache was invalidated (previously pushed
+ * content has been overwritten). Reset the stream's consumed
+ * metadata position to ensure the metadata poll thread consumes
+ * the whole cache.
+ */
+ pthread_mutex_lock(&channel->metadata_stream->lock);
+ metadata_stream_reset_cache_consumed_position(
+ channel->metadata_stream);
+ pthread_mutex_unlock(&channel->metadata_stream->lock);
+ /* Fall-through. */
+ case CONSUMER_METADATA_CACHE_WRITE_STATUS_APPENDED_CONTENT:
+ /*
+ * In both cases, the metadata poll thread has new data to
+ * consume.
+ */
+ ret = consumer_metadata_wakeup_pipe(channel);
+ if (ret) {
+ ret_code = LTTCOMM_CONSUMERD_ERROR_METADATA;
+ goto end_free;
+ }
+ break;
+ case CONSUMER_METADATA_CACHE_WRITE_STATUS_ERROR:
/* Unable to handle metadata. Notify session daemon. */
ret_code = LTTCOMM_CONSUMERD_ERROR_METADATA;
/*
* not have been updated which could create an infinite loop below when
* waiting for the metadata cache to be flushed.
*/
- pthread_mutex_unlock(&channel->metadata_cache->lock);
goto end_free;
+ default:
+ abort();
}
- pthread_mutex_unlock(&channel->metadata_cache->lock);
if (!wait) {
goto end_free;
struct ustctl_consumer_channel_attr attr;
const uint64_t chunk_id = msg.u.ask_channel.chunk_id.value;
const struct lttng_credentials buffer_credentials = {
- .uid = msg.u.ask_channel.buffer_credentials.uid,
- .gid = msg.u.ask_channel.buffer_credentials.gid,
+ .uid = LTTNG_OPTIONAL_INIT_VALUE(msg.u.ask_channel.buffer_credentials.uid),
+ .gid = LTTNG_OPTIONAL_INIT_VALUE(msg.u.ask_channel.buffer_credentials.gid),
};
/* Create a plain object and reserve a channel key. */
case LTTNG_CONSUMER_CREATE_TRACE_CHUNK:
{
const struct lttng_credentials credentials = {
- .uid = msg.u.create_trace_chunk.credentials.value.uid,
- .gid = msg.u.create_trace_chunk.credentials.value.gid,
+ .uid = LTTNG_OPTIONAL_INIT_VALUE(msg.u.create_trace_chunk.credentials.value.uid),
+ .gid = LTTNG_OPTIONAL_INIT_VALUE(msg.u.create_trace_chunk.credentials.value.gid),
};
const bool is_local_trace =
!msg.u.create_trace_chunk.relayd_id.is_set;
msg.u.trace_chunk_exists.chunk_id);
goto end_msg_sessiond;
}
+ case LTTNG_CONSUMER_OPEN_CHANNEL_PACKETS:
+ {
+ const uint64_t key = msg.u.open_channel_packets.key;
+ struct lttng_consumer_channel *channel =
+ consumer_find_channel(key);
+
+ if (channel) {
+ pthread_mutex_lock(&channel->lock);
+ ret_code = lttng_consumer_open_channel_packets(channel);
+ pthread_mutex_unlock(&channel->lock);
+ } else {
+ /*
+ * The channel could have disappeared in per-pid
+ * buffering mode.
+ */
+ DBG("Channel %" PRIu64 " not found", key);
+ ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
+ }
+
+ health_code_update();
+ goto end_msg_sessiond;
+ }
default:
break;
}
ERR("Cannot get stream shm path");
}
ret = run_as_unlink(shm_path,
- chan->buffer_credentials.value.uid,
- chan->buffer_credentials.value.gid);
+ lttng_credentials_get_uid(LTTNG_OPTIONAL_GET_PTR(
+ chan->buffer_credentials)),
+ lttng_credentials_get_gid(LTTNG_OPTIONAL_GET_PTR(
+ chan->buffer_credentials)));
if (ret) {
PERROR("unlink %s", shm_path);
}
/* Try to rmdir all directories under shm_path root. */
if (chan->root_shm_path[0]) {
(void) run_as_rmdir_recursive(chan->root_shm_path,
- chan->buffer_credentials.value.uid,
- chan->buffer_credentials.value.gid,
+ lttng_credentials_get_uid(LTTNG_OPTIONAL_GET_PTR(
+ chan->buffer_credentials)),
+ lttng_credentials_get_gid(LTTNG_OPTIONAL_GET_PTR(
+ chan->buffer_credentials)),
LTTNG_DIRECTORY_HANDLE_SKIP_NON_EMPTY_FLAG);
}
free(chan->stream_fds);
return ustctl_stream_close_wakeup_fd(stream->ustream);
}
-static
-void metadata_stream_reset_cache_consumed_position(
- struct lttng_consumer_stream *stream)
-{
- DBG("Reset metadata cache of session %" PRIu64,
- stream->chan->session_id);
- stream->ust_metadata_pushed = 0;
-}
-
/*
* Write up to one packet from the metadata cache to the channel.
*
- * Returns the number of bytes pushed in the cache, or a negative value
- * on error.
+ * Returns the number of bytes pushed from the cache into the ring buffer, or a
+ * negative value on error.
*/
static
int commit_one_metadata_packet(struct lttng_consumer_stream *stream)
int ret;
pthread_mutex_lock(&stream->chan->metadata_cache->lock);
- if (stream->chan->metadata_cache->max_offset ==
- stream->ust_metadata_pushed) {
+ if (stream->chan->metadata_cache->contents.size ==
+ stream->ust_metadata_pushed) {
/*
* In the context of a user space metadata channel, a
* change in version can be detected in two ways:
}
write_len = ustctl_write_one_packet_to_channel(stream->chan->uchan,
- &stream->chan->metadata_cache->data[stream->ust_metadata_pushed],
- stream->chan->metadata_cache->max_offset
- - stream->ust_metadata_pushed);
+ &stream->chan->metadata_cache->contents.data[stream->ust_metadata_pushed],
+ stream->chan->metadata_cache->contents.size -
+ stream->ust_metadata_pushed);
assert(write_len != 0);
if (write_len < 0) {
ERR("Writing one metadata packet");
}
stream->ust_metadata_pushed += write_len;
- assert(stream->chan->metadata_cache->max_offset >=
+ assert(stream->chan->metadata_cache->contents.size >=
stream->ust_metadata_pushed);
ret = write_len;
* awaiting on metadata to be pushed out.
*
* The RCU read side lock must be held by the caller.
- *
- * Return 0 if new metadatda is available, EAGAIN if the metadata stream
- * is empty or a negative value on error.
*/
-int lttng_ustconsumer_sync_metadata(struct lttng_consumer_local_data *ctx,
+enum sync_metadata_status lttng_ustconsumer_sync_metadata(
+ struct lttng_consumer_local_data *ctx,
struct lttng_consumer_stream *metadata_stream)
{
int ret;
- int retry = 0;
+ enum sync_metadata_status status;
struct lttng_consumer_channel *metadata_channel;
assert(ctx);
ret = lttng_ustconsumer_request_metadata(ctx, metadata_channel, 0, 0);
pthread_mutex_lock(&metadata_stream->lock);
if (ret < 0) {
+ status = SYNC_METADATA_STATUS_ERROR;
goto end;
}
if (consumer_stream_is_deleted(metadata_stream)) {
DBG("Metadata stream %" PRIu64 " was deleted during the metadata synchronization",
metadata_stream->key);
- ret = 0;
+ status = SYNC_METADATA_STATUS_NO_DATA;
goto end;
}
ret = commit_one_metadata_packet(metadata_stream);
- if (ret <= 0) {
+ if (ret < 0) {
+ status = SYNC_METADATA_STATUS_ERROR;
goto end;
} else if (ret > 0) {
- retry = 1;
+ status = SYNC_METADATA_STATUS_NEW_DATA;
+ } else /* ret == 0 */ {
+ status = SYNC_METADATA_STATUS_NO_DATA;
+ goto end;
}
ret = ustctl_snapshot(metadata_stream->ustream);
if (ret < 0) {
- if (errno != EAGAIN) {
- ERR("Sync metadata, taking UST snapshot");
- goto end;
- }
- DBG("No new metadata when syncing them.");
- /* No new metadata, exit. */
- ret = ENODATA;
+ ERR("Failed to take a snapshot of the metadata ring-buffer positions, ret = %d", ret);
+ status = SYNC_METADATA_STATUS_ERROR;
goto end;
}
- /*
- * After this flush, we still need to extract metadata.
- */
- if (retry) {
- ret = EAGAIN;
- }
-
end:
- return ret;
+ return status;
}
/*
}
} else {
pthread_mutex_lock(&stream->chan->metadata_cache->lock);
- cache_empty = stream->chan->metadata_cache->max_offset ==
- stream->ust_metadata_pushed;
+ cache_empty = stream->chan->metadata_cache->contents.size ==
+ stream->ust_metadata_pushed;
pthread_mutex_unlock(&stream->chan->metadata_cache->lock);
}
} while (!got_subbuffer);
static int signal_metadata(struct lttng_consumer_stream *stream,
struct lttng_consumer_local_data *ctx)
{
+ ASSERT_LOCKED(stream->metadata_rdv_lock);
return pthread_cond_broadcast(&stream->metadata_rdv) ? -errno : 0;
}
assert(stream);
assert(stream->ustream);
+ ASSERT_LOCKED(stream->lock);
DBG("UST consumer checking data pending");
uint64_t contiguous, pushed;
/* Ease our life a bit. */
- contiguous = stream->chan->metadata_cache->max_offset;
+ pthread_mutex_lock(&stream->chan->metadata_cache->lock);
+ contiguous = stream->chan->metadata_cache->contents.size;
+ pthread_mutex_unlock(&stream->chan->metadata_cache->lock);
pushed = stream->ust_metadata_pushed;
/*