goto err_free_cache;
metadata_cache->cache_alloc = METADATA_CACHE_DEFAULT_SIZE;
kref_init(&metadata_cache->refcount);
+ mutex_init(&metadata_cache->lock);
session->metadata_cache = metadata_cache;
INIT_LIST_HEAD(&metadata_cache->metadata_stream);
+ memcpy(&metadata_cache->uuid, &session->uuid,
+ sizeof(metadata_cache->uuid));
list_add(&session->list, &sessions);
mutex_unlock(&sessions_mutex);
return session;
ret = -ENOENT;
goto register_error;
}
- ret = kabi_2635_tracepoint_probe_register(event->desc->kname,
+ ret = lttng_wrapper_tracepoint_probe_register(event->desc->kname,
event->desc->probe_callback,
event);
if (ret) {
switch (event->instrumentation) {
case LTTNG_KERNEL_TRACEPOINT:
- ret = kabi_2635_tracepoint_probe_unregister(event->desc->kname,
+ ret = lttng_wrapper_tracepoint_probe_unregister(event->desc->kname,
event->desc->probe_callback,
event);
if (ret)
/*
* Serialize at most one packet worth of metadata into a metadata
* channel.
- * We have exclusive access to our metadata buffer (protected by the
- * sessions_mutex), so we can do racy operations such as looking for
- * remaining space left in packet and write, since mutual exclusion
- * protects us from concurrent writes.
+ * We grab the metadata cache mutex to get exclusive access to our metadata
+ * buffer and to the metadata cache. Exclusive access to the metadata buffer
+ * allows us to do racy operations such as looking for remaining space left in
+ * packet and write, since mutual exclusion protects us from concurrent writes.
+ * Mutual exclusion on the metadata cache allow us to read the cache content
+ * without racing against reallocation of the cache by updates.
* Returns the number of bytes written in the channel, 0 if no data
* was written and a negative value on error.
*/
size_t len, reserve_len;
/*
- * Ensure we support mutiple get_next / put sequences followed
- * by put_next.
+ * Ensure we support mutiple get_next / put sequences followed by
+ * put_next. The metadata cache lock protects reading the metadata
+ * cache. It can indeed be read concurrently by "get_next_subbuf" and
+ * "flush" operations on the buffer invoked by different processes.
+ * Moreover, since the metadata cache memory can be reallocated, we
+ * need to have exclusive access against updates even though we only
+ * read it.
*/
+ mutex_lock(&stream->metadata_cache->lock);
WARN_ON(stream->metadata_in < stream->metadata_out);
if (stream->metadata_in != stream->metadata_out)
- return 0;
+ goto end;
len = stream->metadata_cache->metadata_written -
stream->metadata_in;
if (!len)
- return 0;
+ goto end;
reserve_len = min_t(size_t,
stream->transport->ops.packet_avail_size(chan),
len);
ret = reserve_len;
end:
+ mutex_unlock(&stream->metadata_cache->lock);
return ret;
}
/*
* Write the metadata to the metadata cache.
* Must be called with sessions_mutex held.
+ * The metadata cache lock protects us from concurrent read access from
+ * thread outputting metadata content to ring buffer.
*/
int lttng_metadata_printf(struct lttng_session *session,
const char *fmt, ...)
return -ENOMEM;
len = strlen(str);
+ mutex_lock(&session->metadata_cache->lock);
if (session->metadata_cache->metadata_written + len >
session->metadata_cache->cache_alloc) {
char *tmp_cache_realloc;
session->metadata_cache->metadata_written,
str, len);
session->metadata_cache->metadata_written += len;
+ mutex_unlock(&session->metadata_cache->lock);
kfree(str);
list_for_each_entry(stream, &session->metadata_cache->metadata_stream, list)
return 0;
err:
+ mutex_unlock(&session->metadata_cache->lock);
kfree(str);
return -ENOMEM;
}
{
int ret;
+ ret = wrapper_lttng_fixup_sig(THIS_MODULE);
+ if (ret)
+ return ret;
+
+ ret = lttng_tracepoint_init();
+ if (ret)
+ return ret;
event_cache = KMEM_CACHE(lttng_event, 0);
- if (!event_cache)
- return -ENOMEM;
+ if (!event_cache) {
+ ret = -ENOMEM;
+ goto error_kmem;
+ }
ret = lttng_abi_init();
if (ret)
goto error_abi;
lttng_abi_exit();
error_abi:
kmem_cache_destroy(event_cache);
+error_kmem:
+ lttng_tracepoint_exit();
return ret;
}
list_for_each_entry_safe(session, tmpsession, &sessions, list)
lttng_session_destroy(session);
kmem_cache_destroy(event_cache);
+ lttng_tracepoint_exit();
}
module_exit(lttng_events_exit);