kref_init(&metadata_cache->refcount);
session->metadata_cache = metadata_cache;
INIT_LIST_HEAD(&metadata_cache->metadata_stream);
+ memcpy(&metadata_cache->uuid, &session->uuid,
+ sizeof(metadata_cache->uuid));
list_add(&session->list, &sessions);
mutex_unlock(&sessions_mutex);
return session;
}
list_for_each_entry(metadata_stream, &session->metadata_cache->metadata_stream, list)
_lttng_metadata_channel_hangup(metadata_stream);
+ if (session->pid_tracker)
+ lttng_pid_tracker_destroy(session->pid_tracker);
kref_put(&session->metadata_cache->refcount, metadata_cache_destroy);
list_del(&session->list);
mutex_unlock(&sessions_mutex);
*/
list_for_each_entry(event, &chan->session->events, list) {
if (!strcmp(event->desc->name, event_param->name)) {
- ret = -EEXIST;
- goto exist;
+ /*
+ * Allow events with the same name to appear in
+ * different channels.
+ */
+ if (event->chan == chan) {
+ ret = -EEXIST;
+ goto exist;
+ }
}
}
event = kmem_cache_zalloc(event_cache, GFP_KERNEL);
ret = -ENOENT;
goto register_error;
}
- ret = kabi_2635_tracepoint_probe_register(event_param->name,
+ ret = lttng_wrapper_tracepoint_probe_register(event->desc->kname,
event->desc->probe_callback,
event);
if (ret) {
switch (event->instrumentation) {
case LTTNG_KERNEL_TRACEPOINT:
- ret = kabi_2635_tracepoint_probe_unregister(event->desc->name,
+ ret = lttng_wrapper_tracepoint_probe_unregister(event->desc->kname,
event->desc->probe_callback,
event);
if (ret)
kmem_cache_free(event_cache, event);
}
+int lttng_session_track_pid(struct lttng_session *session, int pid)
+{
+ int ret;
+
+ if (pid < -1)
+ return -EINVAL;
+ mutex_lock(&sessions_mutex);
+ if (pid == -1) {
+ /* track all pids: destroy tracker. */
+ if (session->pid_tracker) {
+ struct lttng_pid_tracker *lpf;
+
+ lpf = session->pid_tracker;
+ rcu_assign_pointer(session->pid_tracker, NULL);
+ synchronize_trace();
+ lttng_pid_tracker_destroy(lpf);
+ }
+ ret = 0;
+ } else {
+ if (!session->pid_tracker) {
+ struct lttng_pid_tracker *lpf;
+
+ lpf = lttng_pid_tracker_create();
+ if (!lpf) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+ ret = lttng_pid_tracker_add(lpf, pid);
+ rcu_assign_pointer(session->pid_tracker, lpf);
+ } else {
+ ret = lttng_pid_tracker_add(session->pid_tracker, pid);
+ }
+ }
+unlock:
+ mutex_unlock(&sessions_mutex);
+ return ret;
+}
+
+int lttng_session_untrack_pid(struct lttng_session *session, int pid)
+{
+ int ret;
+
+ if (pid < -1)
+ return -EINVAL;
+ mutex_lock(&sessions_mutex);
+ if (pid == -1) {
+ /* untrack all pids: replace by empty tracker. */
+ struct lttng_pid_tracker *old_lpf = session->pid_tracker;
+ struct lttng_pid_tracker *lpf;
+
+ lpf = lttng_pid_tracker_create();
+ if (!lpf) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+ rcu_assign_pointer(session->pid_tracker, lpf);
+ synchronize_trace();
+ if (old_lpf)
+ lttng_pid_tracker_destroy(old_lpf);
+ ret = 0;
+ } else {
+ if (!session->pid_tracker) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+ ret = lttng_pid_tracker_del(session->pid_tracker, pid);
+ }
+unlock:
+ mutex_unlock(&sessions_mutex);
+ return ret;
+}
+
/*
* Serialize at most one packet worth of metadata into a metadata
* channel.
/*
* Ensure we support mutiple get_next / put sequences followed
- * by put_next.
+ * by put_next. The metadata stream lock internally protects
+ * reading the metadata cache. It can indeed be read
+ * concurrently by "get_next_subbuf" and "flush" operations on
+ * the buffer invoked by different processes.
*/
+ mutex_lock(&stream->lock);
WARN_ON(stream->metadata_in < stream->metadata_out);
if (stream->metadata_in != stream->metadata_out)
- return 0;
+ goto end;
len = stream->metadata_cache->metadata_written -
stream->metadata_in;
if (!len)
- return 0;
+ goto end;
reserve_len = min_t(size_t,
stream->transport->ops.packet_avail_size(chan),
len);
ret = reserve_len;
end:
+ mutex_unlock(&stream->lock);
return ret;
}
{
int ret;
+ ret = wrapper_lttng_fixup_sig(THIS_MODULE);
+ if (ret)
+ return ret;
+
+ ret = lttng_tracepoint_init();
+ if (ret)
+ return ret;
event_cache = KMEM_CACHE(lttng_event, 0);
- if (!event_cache)
- return -ENOMEM;
+ if (!event_cache) {
+ ret = -ENOMEM;
+ goto error_kmem;
+ }
ret = lttng_abi_init();
if (ret)
goto error_abi;
+ ret = lttng_logger_init();
+ if (ret)
+ goto error_logger;
return 0;
+
+error_logger:
+ lttng_abi_exit();
error_abi:
kmem_cache_destroy(event_cache);
+error_kmem:
+ lttng_tracepoint_exit();
return ret;
}
{
struct lttng_session *session, *tmpsession;
+ lttng_logger_exit();
lttng_abi_exit();
list_for_each_entry_safe(session, tmpsession, &sessions, list)
lttng_session_destroy(session);
kmem_cache_destroy(event_cache);
+ lttng_tracepoint_exit();
}
module_exit(lttng_events_exit);
MODULE_DESCRIPTION("LTTng Events");
MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
__stringify(LTTNG_MODULES_MINOR_VERSION) "."
- __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION));
+ __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
+ LTTNG_MODULES_EXTRAVERSION);