+ return event;
+}
+
+/* Only used for tracepoints for now. */
+static
+void register_event(struct lttng_event *event)
+{
+ const struct lttng_event_desc *desc;
+ int ret = -EINVAL;
+
+ if (event->registered)
+ return;
+
+ desc = event->desc;
+ switch (event->instrumentation) {
+ case LTTNG_KERNEL_TRACEPOINT:
+ ret = lttng_wrapper_tracepoint_probe_register(desc->kname,
+ desc->probe_callback,
+ event);
+ break;
+ case LTTNG_KERNEL_SYSCALL:
+ ret = lttng_syscall_filter_enable(event->chan,
+ desc->name);
+ break;
+ case LTTNG_KERNEL_KPROBE:
+ case LTTNG_KERNEL_UPROBE:
+ case LTTNG_KERNEL_KRETPROBE:
+ case LTTNG_KERNEL_NOOP:
+ ret = 0;
+ break;
+ case LTTNG_KERNEL_FUNCTION: /* Fall-through */
+ default:
+ WARN_ON_ONCE(1);
+ }
+ if (!ret)
+ event->registered = 1;
+}
+
+/*
+ * Only used internally at session destruction.
+ */
+int _lttng_event_unregister(struct lttng_event *event)
+{
+ const struct lttng_event_desc *desc;
+ int ret = -EINVAL;
+
+ if (!event->registered)
+ return 0;
+
+ desc = event->desc;
+ switch (event->instrumentation) {
+ case LTTNG_KERNEL_TRACEPOINT:
+ ret = lttng_wrapper_tracepoint_probe_unregister(event->desc->kname,
+ event->desc->probe_callback,
+ event);
+ break;
+ case LTTNG_KERNEL_KPROBE:
+ lttng_kprobes_unregister(event);
+ ret = 0;
+ break;
+ case LTTNG_KERNEL_KRETPROBE:
+ lttng_kretprobes_unregister(event);
+ ret = 0;
+ break;
+ case LTTNG_KERNEL_SYSCALL:
+ ret = lttng_syscall_filter_disable(event->chan,
+ desc->name);
+ break;
+ case LTTNG_KERNEL_NOOP:
+ ret = 0;
+ break;
+ case LTTNG_KERNEL_UPROBE:
+ lttng_uprobes_unregister(event);
+ ret = 0;
+ break;
+ case LTTNG_KERNEL_FUNCTION: /* Fall-through */
+ default:
+ WARN_ON_ONCE(1);
+ }
+ if (!ret)
+ event->registered = 0;
+ return ret;
+}
+
+/*
+ * Only used internally at session destruction.
+ */
+static
+void _lttng_event_destroy(struct lttng_event *event)
+{
+ switch (event->instrumentation) {
+ case LTTNG_KERNEL_TRACEPOINT:
+ lttng_event_put(event->desc);
+ break;
+ case LTTNG_KERNEL_KPROBE:
+ module_put(event->desc->owner);
+ lttng_kprobes_destroy_private(event);
+ break;
+ case LTTNG_KERNEL_KRETPROBE:
+ module_put(event->desc->owner);
+ lttng_kretprobes_destroy_private(event);
+ break;
+ case LTTNG_KERNEL_NOOP:
+ case LTTNG_KERNEL_SYSCALL:
+ break;
+ case LTTNG_KERNEL_UPROBE:
+ module_put(event->desc->owner);
+ lttng_uprobes_destroy_private(event);
+ break;
+ case LTTNG_KERNEL_FUNCTION: /* Fall-through */
+ default:
+ WARN_ON_ONCE(1);
+ }
+ list_del(&event->list);
+ lttng_destroy_context(event->ctx);
+ kmem_cache_free(event_cache, event);
+}
+
+struct lttng_id_tracker *get_tracker(struct lttng_session *session,
+ enum tracker_type tracker_type)
+{
+ switch (tracker_type) {
+ case TRACKER_PID:
+ return &session->pid_tracker;
+ case TRACKER_VPID:
+ return &session->vpid_tracker;
+ case TRACKER_UID:
+ return &session->uid_tracker;
+ case TRACKER_VUID:
+ return &session->vuid_tracker;
+ case TRACKER_GID:
+ return &session->gid_tracker;
+ case TRACKER_VGID:
+ return &session->vgid_tracker;
+ default:
+ WARN_ON_ONCE(1);
+ return NULL;
+ }
+}
+
+int lttng_session_track_id(struct lttng_session *session,
+ enum tracker_type tracker_type, int id)
+{
+ struct lttng_id_tracker *tracker;
+ int ret;
+
+ tracker = get_tracker(session, tracker_type);
+ if (!tracker)
+ return -EINVAL;
+ if (id < -1)
+ return -EINVAL;
+ mutex_lock(&sessions_mutex);
+ if (id == -1) {
+ /* track all ids: destroy tracker. */
+ lttng_id_tracker_destroy(tracker, true);
+ ret = 0;
+ } else {
+ ret = lttng_id_tracker_add(tracker, id);
+ }
+ mutex_unlock(&sessions_mutex);
+ return ret;
+}
+
+int lttng_session_untrack_id(struct lttng_session *session,
+ enum tracker_type tracker_type, int id)
+{
+ struct lttng_id_tracker *tracker;
+ int ret;
+
+ tracker = get_tracker(session, tracker_type);
+ if (!tracker)
+ return -EINVAL;
+ if (id < -1)
+ return -EINVAL;
+ mutex_lock(&sessions_mutex);
+ if (id == -1) {
+ /* untrack all ids: replace by empty tracker. */
+ ret = lttng_id_tracker_empty_set(tracker);
+ } else {
+ ret = lttng_id_tracker_del(tracker, id);
+ }
+ mutex_unlock(&sessions_mutex);
+ return ret;
+}
+
+static
+void *id_list_start(struct seq_file *m, loff_t *pos)
+{
+ struct lttng_id_tracker *id_tracker = m->private;
+ struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
+ struct lttng_id_hash_node *e;
+ int iter = 0, i;
+
+ mutex_lock(&sessions_mutex);
+ if (id_tracker_p) {
+ for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
+ struct hlist_head *head = &id_tracker_p->id_hash[i];
+
+ lttng_hlist_for_each_entry(e, head, hlist) {
+ if (iter++ >= *pos)
+ return e;
+ }
+ }
+ } else {
+ /* ID tracker disabled. */
+ if (iter >= *pos && iter == 0) {
+ return id_tracker_p; /* empty tracker */
+ }
+ iter++;
+ }
+ /* End of list */
+ return NULL;
+}
+
+/* Called with sessions_mutex held. */
+static
+void *id_list_next(struct seq_file *m, void *p, loff_t *ppos)
+{
+ struct lttng_id_tracker *id_tracker = m->private;
+ struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
+ struct lttng_id_hash_node *e;
+ int iter = 0, i;
+
+ (*ppos)++;
+ if (id_tracker_p) {
+ for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
+ struct hlist_head *head = &id_tracker_p->id_hash[i];
+
+ lttng_hlist_for_each_entry(e, head, hlist) {
+ if (iter++ >= *ppos)
+ return e;
+ }
+ }
+ } else {
+ /* ID tracker disabled. */
+ if (iter >= *ppos && iter == 0)
+ return p; /* empty tracker */
+ iter++;
+ }
+
+ /* End of list */
+ return NULL;
+}
+
+static
+void id_list_stop(struct seq_file *m, void *p)
+{
+ mutex_unlock(&sessions_mutex);
+}
+
+static
+int id_list_show(struct seq_file *m, void *p)
+{
+ struct lttng_id_tracker *id_tracker = m->private;
+ struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
+ int id;
+
+ if (p == id_tracker_p) {
+ /* Tracker disabled. */
+ id = -1;
+ } else {
+ const struct lttng_id_hash_node *e = p;
+
+ id = lttng_id_tracker_get_node_id(e);
+ }
+ switch (id_tracker->tracker_type) {
+ case TRACKER_PID:
+ seq_printf(m, "process { pid = %d; };\n", id);
+ break;
+ case TRACKER_VPID:
+ seq_printf(m, "process { vpid = %d; };\n", id);
+ break;
+ case TRACKER_UID:
+ seq_printf(m, "user { uid = %d; };\n", id);
+ break;
+ case TRACKER_VUID:
+ seq_printf(m, "user { vuid = %d; };\n", id);
+ break;
+ case TRACKER_GID:
+ seq_printf(m, "group { gid = %d; };\n", id);
+ break;
+ case TRACKER_VGID:
+ seq_printf(m, "group { vgid = %d; };\n", id);
+ break;
+ default:
+ seq_printf(m, "UNKNOWN { field = %d };\n", id);
+ }
+ return 0;
+}
+
+static
+const struct seq_operations lttng_tracker_ids_list_seq_ops = {
+ .start = id_list_start,
+ .next = id_list_next,
+ .stop = id_list_stop,
+ .show = id_list_show,
+};
+
+static
+int lttng_tracker_ids_list_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, <tng_tracker_ids_list_seq_ops);
+}
+
+static
+int lttng_tracker_ids_list_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *m = file->private_data;
+ struct lttng_id_tracker *id_tracker = m->private;
+ int ret;
+
+ WARN_ON_ONCE(!id_tracker);
+ ret = seq_release(inode, file);
+ if (!ret)
+ fput(id_tracker->session->file);
+ return ret;
+}
+
+const struct file_operations lttng_tracker_ids_list_fops = {
+ .owner = THIS_MODULE,
+ .open = lttng_tracker_ids_list_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = lttng_tracker_ids_list_release,
+};
+
+int lttng_session_list_tracker_ids(struct lttng_session *session,
+ enum tracker_type tracker_type)
+{
+ struct file *tracker_ids_list_file;
+ struct seq_file *m;
+ int file_fd, ret;
+
+ file_fd = lttng_get_unused_fd();
+ if (file_fd < 0) {
+ ret = file_fd;
+ goto fd_error;
+ }
+
+ tracker_ids_list_file = anon_inode_getfile("[lttng_tracker_ids_list]",
+ <tng_tracker_ids_list_fops,
+ NULL, O_RDWR);
+ if (IS_ERR(tracker_ids_list_file)) {
+ ret = PTR_ERR(tracker_ids_list_file);
+ goto file_error;
+ }
+ if (!atomic_long_add_unless(&session->file->f_count, 1, LONG_MAX)) {
+ ret = -EOVERFLOW;
+ goto refcount_error;
+ }
+ ret = lttng_tracker_ids_list_fops.open(NULL, tracker_ids_list_file);
+ if (ret < 0)
+ goto open_error;
+ m = tracker_ids_list_file->private_data;
+
+ m->private = get_tracker(session, tracker_type);
+ BUG_ON(!m->private);
+ fd_install(file_fd, tracker_ids_list_file);
+
+ return file_fd;
+
+open_error:
+ atomic_long_dec(&session->file->f_count);
+refcount_error:
+ fput(tracker_ids_list_file);
+file_error:
+ put_unused_fd(file_fd);
+fd_error:
+ return ret;
+}
+
+/*
+ * Enabler management.
+ */
+static
+int lttng_match_enabler_star_glob(const char *desc_name,
+ const char *pattern)
+{
+ if (!strutils_star_glob_match(pattern, LTTNG_SIZE_MAX,
+ desc_name, LTTNG_SIZE_MAX))
+ return 0;
+ return 1;
+}
+
+static
+int lttng_match_enabler_name(const char *desc_name,
+ const char *name)
+{
+ if (strcmp(desc_name, name))
+ return 0;
+ return 1;
+}
+
+static
+int lttng_desc_match_enabler(const struct lttng_event_desc *desc,
+ struct lttng_enabler *enabler)
+{
+ const char *desc_name, *enabler_name;
+
+ enabler_name = enabler->event_param.name;
+ switch (enabler->event_param.instrumentation) {
+ case LTTNG_KERNEL_TRACEPOINT:
+ desc_name = desc->name;
+ break;
+ case LTTNG_KERNEL_SYSCALL:
+ desc_name = desc->name;
+ if (!strncmp(desc_name, "compat_", strlen("compat_")))
+ desc_name += strlen("compat_");
+ if (!strncmp(desc_name, "syscall_exit_",
+ strlen("syscall_exit_"))) {
+ desc_name += strlen("syscall_exit_");
+ } else if (!strncmp(desc_name, "syscall_entry_",
+ strlen("syscall_entry_"))) {
+ desc_name += strlen("syscall_entry_");
+ } else {
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+ switch (enabler->type) {
+ case LTTNG_ENABLER_STAR_GLOB:
+ return lttng_match_enabler_star_glob(desc_name, enabler_name);
+ case LTTNG_ENABLER_NAME:
+ return lttng_match_enabler_name(desc_name, enabler_name);
+ default:
+ return -EINVAL;
+ }
+}
+
+static
+int lttng_event_match_enabler(struct lttng_event *event,
+ struct lttng_enabler *enabler)
+{
+ if (enabler->event_param.instrumentation != event->instrumentation)
+ return 0;
+ if (lttng_desc_match_enabler(event->desc, enabler)
+ && event->chan == enabler->chan)
+ return 1;
+ else
+ return 0;
+}
+
+static
+struct lttng_enabler_ref *lttng_event_enabler_ref(struct lttng_event *event,
+ struct lttng_enabler *enabler)
+{
+ struct lttng_enabler_ref *enabler_ref;
+
+ list_for_each_entry(enabler_ref,
+ &event->enablers_ref_head, node) {
+ if (enabler_ref->ref == enabler)
+ return enabler_ref;
+ }
+ return NULL;
+}
+
+static
+void lttng_create_tracepoint_if_missing(struct lttng_enabler *enabler)
+{
+ struct lttng_session *session = enabler->chan->session;
+ struct lttng_probe_desc *probe_desc;
+ const struct lttng_event_desc *desc;
+ int i;
+ struct list_head *probe_list;
+
+ probe_list = lttng_get_probe_list_head();
+ /*
+ * For each probe event, if we find that a probe event matches
+ * our enabler, create an associated lttng_event if not
+ * already present.
+ */
+ list_for_each_entry(probe_desc, probe_list, head) {
+ for (i = 0; i < probe_desc->nr_events; i++) {
+ int found = 0;
+ struct hlist_head *head;
+ const char *event_name;
+ size_t name_len;
+ uint32_t hash;
+ struct lttng_event *event;
+
+ desc = probe_desc->event_desc[i];
+ if (!lttng_desc_match_enabler(desc, enabler))
+ continue;
+ event_name = desc->name;
+ name_len = strlen(event_name);
+
+ /*
+ * Check if already created.
+ */
+ hash = jhash(event_name, name_len, 0);
+ head = &session->events_ht.table[hash & (LTTNG_EVENT_HT_SIZE - 1)];
+ lttng_hlist_for_each_entry(event, head, hlist) {
+ if (event->desc == desc
+ && event->chan == enabler->chan)
+ found = 1;
+ }
+ if (found)
+ continue;
+
+ /*
+ * We need to create an event for this
+ * event probe.
+ */
+ event = _lttng_event_create(enabler->chan,
+ NULL, NULL, desc,
+ LTTNG_KERNEL_TRACEPOINT);
+ if (!event) {
+ printk(KERN_INFO "Unable to create event %s\n",
+ probe_desc->event_desc[i]->name);
+ }
+ }
+ }
+}
+
+static
+void lttng_create_syscall_if_missing(struct lttng_enabler *enabler)
+{
+ int ret;
+
+ ret = lttng_syscalls_register(enabler->chan, NULL);
+ WARN_ON_ONCE(ret);
+}
+
+/*
+ * Create struct lttng_event if it is missing and present in the list of
+ * tracepoint probes.
+ * Should be called with sessions mutex held.
+ */
+static
+void lttng_create_event_if_missing(struct lttng_enabler *enabler)
+{
+ switch (enabler->event_param.instrumentation) {
+ case LTTNG_KERNEL_TRACEPOINT:
+ lttng_create_tracepoint_if_missing(enabler);
+ break;
+ case LTTNG_KERNEL_SYSCALL:
+ lttng_create_syscall_if_missing(enabler);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ break;
+ }
+}
+
+/*
+ * Create events associated with an enabler (if not already present),
+ * and add backward reference from the event to the enabler.
+ * Should be called with sessions mutex held.
+ */
+static
+int lttng_enabler_ref_events(struct lttng_enabler *enabler)
+{
+ struct lttng_session *session = enabler->chan->session;
+ struct lttng_event *event;
+
+ /* First ensure that probe events are created for this enabler. */
+ lttng_create_event_if_missing(enabler);
+
+ /* For each event matching enabler in session event list. */
+ list_for_each_entry(event, &session->events, list) {
+ struct lttng_enabler_ref *enabler_ref;
+
+ if (!lttng_event_match_enabler(event, enabler))
+ continue;
+ enabler_ref = lttng_event_enabler_ref(event, enabler);
+ if (!enabler_ref) {
+ /*
+ * If no backward ref, create it.
+ * Add backward ref from event to enabler.
+ */
+ enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL);
+ if (!enabler_ref)
+ return -ENOMEM;
+ enabler_ref->ref = enabler;
+ list_add(&enabler_ref->node,
+ &event->enablers_ref_head);
+ }
+
+ /*
+ * Link filter bytecodes if not linked yet.
+ */
+ lttng_enabler_event_link_bytecode(event, enabler);
+
+ /* TODO: merge event context. */
+ }
+ return 0;
+}
+
+/*
+ * Called at module load: connect the probe on all enablers matching
+ * this event.
+ * Called with sessions lock held.
+ */
+int lttng_fix_pending_events(void)
+{
+ struct lttng_session *session;
+
+ list_for_each_entry(session, &sessions, list)
+ lttng_session_lazy_sync_enablers(session);
+ return 0;
+}
+
+struct lttng_enabler *lttng_enabler_create(enum lttng_enabler_type type,
+ struct lttng_kernel_event *event_param,
+ struct lttng_channel *chan)
+{
+ struct lttng_enabler *enabler;
+
+ enabler = kzalloc(sizeof(*enabler), GFP_KERNEL);
+ if (!enabler)
+ return NULL;
+ enabler->type = type;
+ INIT_LIST_HEAD(&enabler->filter_bytecode_head);
+ memcpy(&enabler->event_param, event_param,
+ sizeof(enabler->event_param));
+ enabler->chan = chan;
+ /* ctx left NULL */
+ enabler->enabled = 0;
+ enabler->evtype = LTTNG_TYPE_ENABLER;
+ mutex_lock(&sessions_mutex);
+ list_add(&enabler->node, &enabler->chan->session->enablers_head);
+ lttng_session_lazy_sync_enablers(enabler->chan->session);
+ mutex_unlock(&sessions_mutex);
+ return enabler;
+}
+
+int lttng_enabler_enable(struct lttng_enabler *enabler)
+{
+ mutex_lock(&sessions_mutex);
+ enabler->enabled = 1;
+ lttng_session_lazy_sync_enablers(enabler->chan->session);
+ mutex_unlock(&sessions_mutex);
+ return 0;
+}
+
+int lttng_enabler_disable(struct lttng_enabler *enabler)
+{
+ mutex_lock(&sessions_mutex);
+ enabler->enabled = 0;
+ lttng_session_lazy_sync_enablers(enabler->chan->session);
+ mutex_unlock(&sessions_mutex);
+ return 0;
+}
+
+int lttng_enabler_attach_bytecode(struct lttng_enabler *enabler,
+ struct lttng_kernel_filter_bytecode __user *bytecode)
+{
+ struct lttng_filter_bytecode_node *bytecode_node;
+ uint32_t bytecode_len;
+ int ret;
+
+ ret = get_user(bytecode_len, &bytecode->len);
+ if (ret)
+ return ret;
+ bytecode_node = kzalloc(sizeof(*bytecode_node) + bytecode_len,
+ GFP_KERNEL);
+ if (!bytecode_node)
+ return -ENOMEM;
+ ret = copy_from_user(&bytecode_node->bc, bytecode,
+ sizeof(*bytecode) + bytecode_len);
+ if (ret)
+ goto error_free;
+ bytecode_node->enabler = enabler;
+ /* Enforce length based on allocated size */
+ bytecode_node->bc.len = bytecode_len;
+ list_add_tail(&bytecode_node->node, &enabler->filter_bytecode_head);
+ lttng_session_lazy_sync_enablers(enabler->chan->session);
+ return 0;
+
+error_free:
+ kfree(bytecode_node);
+ return ret;
+}
+
+int lttng_event_add_callsite(struct lttng_event *event,
+ struct lttng_kernel_event_callsite __user *callsite)
+{
+
+ switch (event->instrumentation) {
+ case LTTNG_KERNEL_UPROBE:
+ return lttng_uprobes_add_callsite(event, callsite);
+ default:
+ return -EINVAL;
+ }
+}
+
+int lttng_enabler_attach_context(struct lttng_enabler *enabler,
+ struct lttng_kernel_context *context_param)
+{
+ return -ENOSYS;
+}
+
+static
+void lttng_enabler_destroy(struct lttng_enabler *enabler)
+{
+ struct lttng_filter_bytecode_node *filter_node, *tmp_filter_node;
+
+ /* Destroy filter bytecode */
+ list_for_each_entry_safe(filter_node, tmp_filter_node,
+ &enabler->filter_bytecode_head, node) {
+ kfree(filter_node);
+ }
+
+ /* Destroy contexts */
+ lttng_destroy_context(enabler->ctx);
+
+ list_del(&enabler->node);
+ kfree(enabler);
+}
+
+/*
+ * lttng_session_sync_enablers should be called just before starting a
+ * session.
+ * Should be called with sessions mutex held.
+ */
+static
+void lttng_session_sync_enablers(struct lttng_session *session)
+{
+ struct lttng_enabler *enabler;
+ struct lttng_event *event;
+
+ list_for_each_entry(enabler, &session->enablers_head, node)
+ lttng_enabler_ref_events(enabler);
+ /*
+ * For each event, if at least one of its enablers is enabled,
+ * and its channel and session transient states are enabled, we
+ * enable the event, else we disable it.
+ */
+ list_for_each_entry(event, &session->events, list) {
+ struct lttng_enabler_ref *enabler_ref;
+ struct lttng_bytecode_runtime *runtime;
+ int enabled = 0, has_enablers_without_bytecode = 0;
+
+ switch (event->instrumentation) {
+ case LTTNG_KERNEL_TRACEPOINT:
+ case LTTNG_KERNEL_SYSCALL:
+ /* Enable events */
+ list_for_each_entry(enabler_ref,
+ &event->enablers_ref_head, node) {
+ if (enabler_ref->ref->enabled) {
+ enabled = 1;
+ break;
+ }
+ }
+ break;
+ default:
+ /* Not handled with lazy sync. */
+ continue;
+ }
+ /*
+ * Enabled state is based on union of enablers, with
+ * intesection of session and channel transient enable
+ * states.
+ */
+ enabled = enabled && session->tstate && event->chan->tstate;
+
+ WRITE_ONCE(event->enabled, enabled);
+ /*
+ * Sync tracepoint registration with event enabled
+ * state.
+ */
+ if (enabled) {
+ register_event(event);
+ } else {
+ _lttng_event_unregister(event);
+ }
+
+ /* Check if has enablers without bytecode enabled */
+ list_for_each_entry(enabler_ref,
+ &event->enablers_ref_head, node) {
+ if (enabler_ref->ref->enabled
+ && list_empty(&enabler_ref->ref->filter_bytecode_head)) {
+ has_enablers_without_bytecode = 1;
+ break;
+ }
+ }
+ event->has_enablers_without_bytecode =
+ has_enablers_without_bytecode;
+
+ /* Enable filters */
+ list_for_each_entry(runtime,
+ &event->bytecode_runtime_head, node)
+ lttng_filter_sync_state(runtime);
+ }
+}
+
+/*
+ * Apply enablers to session events, adding events to session if need
+ * be. It is required after each modification applied to an active
+ * session, and right before session "start".
+ * "lazy" sync means we only sync if required.
+ * Should be called with sessions mutex held.
+ */
+static
+void lttng_session_lazy_sync_enablers(struct lttng_session *session)
+{
+ /* We can skip if session is not active */
+ if (!session->active)
+ return;
+ lttng_session_sync_enablers(session);
+}
+
+/*
+ * Serialize at most one packet worth of metadata into a metadata
+ * channel.
+ * We grab the metadata cache mutex to get exclusive access to our metadata
+ * buffer and to the metadata cache. Exclusive access to the metadata buffer
+ * allows us to do racy operations such as looking for remaining space left in
+ * packet and write, since mutual exclusion protects us from concurrent writes.
+ * Mutual exclusion on the metadata cache allow us to read the cache content
+ * without racing against reallocation of the cache by updates.
+ * Returns the number of bytes written in the channel, 0 if no data
+ * was written and a negative value on error.
+ */
+int lttng_metadata_output_channel(struct lttng_metadata_stream *stream,
+ struct channel *chan)
+{
+ struct lib_ring_buffer_ctx ctx;
+ int ret = 0;
+ size_t len, reserve_len;
+
+ /*
+ * Ensure we support mutiple get_next / put sequences followed by
+ * put_next. The metadata cache lock protects reading the metadata
+ * cache. It can indeed be read concurrently by "get_next_subbuf" and
+ * "flush" operations on the buffer invoked by different processes.
+ * Moreover, since the metadata cache memory can be reallocated, we
+ * need to have exclusive access against updates even though we only
+ * read it.
+ */
+ mutex_lock(&stream->metadata_cache->lock);
+ WARN_ON(stream->metadata_in < stream->metadata_out);
+ if (stream->metadata_in != stream->metadata_out)
+ goto end;
+
+ /* Metadata regenerated, change the version. */
+ if (stream->metadata_cache->version != stream->version)
+ stream->version = stream->metadata_cache->version;
+
+ len = stream->metadata_cache->metadata_written -
+ stream->metadata_in;
+ if (!len)
+ goto end;
+ reserve_len = min_t(size_t,
+ stream->transport->ops.packet_avail_size(chan),
+ len);
+ lib_ring_buffer_ctx_init(&ctx, chan, NULL, reserve_len,
+ sizeof(char), -1);
+ /*
+ * If reservation failed, return an error to the caller.
+ */
+ ret = stream->transport->ops.event_reserve(&ctx, 0);
+ if (ret != 0) {
+ printk(KERN_WARNING "LTTng: Metadata event reservation failed\n");
+ goto end;
+ }
+ stream->transport->ops.event_write(&ctx,
+ stream->metadata_cache->data + stream->metadata_in,
+ reserve_len);
+ stream->transport->ops.event_commit(&ctx);
+ stream->metadata_in += reserve_len;
+ ret = reserve_len;
+
+end:
+ mutex_unlock(&stream->metadata_cache->lock);
+ return ret;
+}
+
+/*
+ * Write the metadata to the metadata cache.
+ * Must be called with sessions_mutex held.
+ * The metadata cache lock protects us from concurrent read access from
+ * thread outputting metadata content to ring buffer.
+ */
+int lttng_metadata_printf(struct lttng_session *session,
+ const char *fmt, ...)
+{
+ char *str;
+ size_t len;
+ va_list ap;
+ struct lttng_metadata_stream *stream;
+
+ WARN_ON_ONCE(!READ_ONCE(session->active));
+
+ va_start(ap, fmt);
+ str = kvasprintf(GFP_KERNEL, fmt, ap);
+ va_end(ap);
+ if (!str)
+ return -ENOMEM;
+
+ len = strlen(str);
+ mutex_lock(&session->metadata_cache->lock);
+ if (session->metadata_cache->metadata_written + len >
+ session->metadata_cache->cache_alloc) {
+ char *tmp_cache_realloc;
+ unsigned int tmp_cache_alloc_size;
+
+ tmp_cache_alloc_size = max_t(unsigned int,
+ session->metadata_cache->cache_alloc + len,
+ session->metadata_cache->cache_alloc << 1);
+ tmp_cache_realloc = vzalloc(tmp_cache_alloc_size);
+ if (!tmp_cache_realloc)
+ goto err;
+ if (session->metadata_cache->data) {
+ memcpy(tmp_cache_realloc,
+ session->metadata_cache->data,
+ session->metadata_cache->cache_alloc);
+ vfree(session->metadata_cache->data);
+ }
+
+ session->metadata_cache->cache_alloc = tmp_cache_alloc_size;
+ session->metadata_cache->data = tmp_cache_realloc;
+ }
+ memcpy(session->metadata_cache->data +
+ session->metadata_cache->metadata_written,
+ str, len);
+ session->metadata_cache->metadata_written += len;
+ mutex_unlock(&session->metadata_cache->lock);
+ kfree(str);
+
+ list_for_each_entry(stream, &session->metadata_cache->metadata_stream, list)
+ wake_up_interruptible(&stream->read_wait);
+
+ return 0;
+
+err:
+ mutex_unlock(&session->metadata_cache->lock);
+ kfree(str);
+ return -ENOMEM;
+}
+
+static
+int print_tabs(struct lttng_session *session, size_t nesting)
+{
+ size_t i;
+
+ for (i = 0; i < nesting; i++) {
+ int ret;
+
+ ret = lttng_metadata_printf(session, " ");
+ if (ret) {
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static
+int lttng_field_name_statedump(struct lttng_session *session,
+ const struct lttng_event_field *field,
+ size_t nesting)
+{
+ return lttng_metadata_printf(session, " _%s;\n", field->name);
+}
+
+static
+int _lttng_integer_type_statedump(struct lttng_session *session,
+ const struct lttng_type *type,
+ size_t nesting)
+{
+ int ret;
+
+ WARN_ON_ONCE(type->atype != atype_integer);
+ ret = print_tabs(session, nesting);
+ if (ret)
+ return ret;
+ ret = lttng_metadata_printf(session,
+ "integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s }",
+ type->u.integer.size,
+ type->u.integer.alignment,
+ type->u.integer.signedness,
+ (type->u.integer.encoding == lttng_encode_none)
+ ? "none"
+ : (type->u.integer.encoding == lttng_encode_UTF8)
+ ? "UTF8"
+ : "ASCII",
+ type->u.integer.base,
+#if __BYTE_ORDER == __BIG_ENDIAN
+ type->u.integer.reverse_byte_order ? " byte_order = le;" : ""
+#else
+ type->u.integer.reverse_byte_order ? " byte_order = be;" : ""
+#endif
+ );
+ return ret;
+}
+
+/*
+ * Must be called with sessions_mutex held.
+ */
+static
+int _lttng_struct_type_statedump(struct lttng_session *session,
+ const struct lttng_type *type,
+ size_t nesting)
+{
+ int ret;
+ uint32_t i, nr_fields;
+ unsigned int alignment;
+
+ WARN_ON_ONCE(type->atype != atype_struct_nestable);
+
+ ret = print_tabs(session, nesting);
+ if (ret)
+ return ret;
+ ret = lttng_metadata_printf(session,
+ "struct {\n");
+ if (ret)
+ return ret;
+ nr_fields = type->u.struct_nestable.nr_fields;
+ for (i = 0; i < nr_fields; i++) {
+ const struct lttng_event_field *iter_field;
+
+ iter_field = &type->u.struct_nestable.fields[i];
+ ret = _lttng_field_statedump(session, iter_field, nesting + 1);
+ if (ret)
+ return ret;
+ }
+ ret = print_tabs(session, nesting);
+ if (ret)
+ return ret;
+ alignment = type->u.struct_nestable.alignment;
+ if (alignment) {
+ ret = lttng_metadata_printf(session,
+ "} align(%u)",
+ alignment);
+ } else {
+ ret = lttng_metadata_printf(session,
+ "}");
+ }
+ return ret;
+}
+
+/*
+ * Must be called with sessions_mutex held.
+ */
+static
+int _lttng_struct_field_statedump(struct lttng_session *session,
+ const struct lttng_event_field *field,
+ size_t nesting)
+{
+ int ret;
+
+ ret = _lttng_struct_type_statedump(session,
+ &field->type, nesting);
+ if (ret)
+ return ret;
+ return lttng_field_name_statedump(session, field, nesting);
+}
+
+/*
+ * Must be called with sessions_mutex held.
+ */
+static
+int _lttng_variant_type_statedump(struct lttng_session *session,
+ const struct lttng_type *type,
+ size_t nesting)
+{
+ int ret;
+ uint32_t i, nr_choices;
+
+ WARN_ON_ONCE(type->atype != atype_variant_nestable);
+ /*
+ * CTF 1.8 does not allow expressing nonzero variant alignment in a nestable way.
+ */
+ if (type->u.variant_nestable.alignment != 0)
+ return -EINVAL;
+ ret = print_tabs(session, nesting);
+ if (ret)
+ return ret;
+ ret = lttng_metadata_printf(session,
+ "variant <_%s> {\n",
+ type->u.variant_nestable.tag_name);
+ if (ret)
+ return ret;
+ nr_choices = type->u.variant_nestable.nr_choices;
+ for (i = 0; i < nr_choices; i++) {
+ const struct lttng_event_field *iter_field;
+
+ iter_field = &type->u.variant_nestable.choices[i];
+ ret = _lttng_field_statedump(session, iter_field, nesting + 1);
+ if (ret)
+ return ret;
+ }
+ ret = print_tabs(session, nesting);
+ if (ret)
+ return ret;
+ ret = lttng_metadata_printf(session,
+ "}");
+ return ret;
+}
+
+/*
+ * Must be called with sessions_mutex held.
+ */
+static
+int _lttng_variant_field_statedump(struct lttng_session *session,
+ const struct lttng_event_field *field,
+ size_t nesting)
+{
+ int ret;
+
+ ret = _lttng_variant_type_statedump(session,
+ &field->type, nesting);
+ if (ret)
+ return ret;
+ return lttng_field_name_statedump(session, field, nesting);
+}
+
+/*
+ * Must be called with sessions_mutex held.
+ */
+static
+int _lttng_array_field_statedump(struct lttng_session *session,
+ const struct lttng_event_field *field,
+ size_t nesting)
+{
+ int ret;
+ const struct lttng_type *elem_type;
+
+ WARN_ON_ONCE(field->type.atype != atype_array_nestable);
+
+ if (field->type.u.array_nestable.alignment) {
+ ret = print_tabs(session, nesting);
+ if (ret)
+ return ret;
+ ret = lttng_metadata_printf(session,
+ "struct { } align(%u) _%s_padding;\n",
+ field->type.u.array_nestable.alignment * CHAR_BIT,
+ field->name);
+ if (ret)
+ return ret;
+ }
+ /*
+ * Nested compound types: Only array of structures and variants are
+ * currently supported.
+ */
+ elem_type = field->type.u.array_nestable.elem_type;
+ switch (elem_type->atype) {
+ case atype_integer:
+ case atype_struct_nestable:
+ case atype_variant_nestable:
+ ret = _lttng_type_statedump(session, elem_type, nesting);
+ if (ret)
+ return ret;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ ret = lttng_metadata_printf(session,
+ " _%s[%u];\n",
+ field->name,
+ field->type.u.array_nestable.length);
+ return ret;
+}
+
+/*
+ * Must be called with sessions_mutex held.
+ */
+static
+int _lttng_sequence_field_statedump(struct lttng_session *session,
+ const struct lttng_event_field *field,
+ size_t nesting)
+{
+ int ret;
+ const char *length_name;
+ const struct lttng_type *elem_type;
+
+ WARN_ON_ONCE(field->type.atype != atype_sequence_nestable);
+
+ length_name = field->type.u.sequence_nestable.length_name;
+
+ if (field->type.u.sequence_nestable.alignment) {
+ ret = print_tabs(session, nesting);
+ if (ret)
+ return ret;
+ ret = lttng_metadata_printf(session,
+ "struct { } align(%u) _%s_padding;\n",
+ field->type.u.sequence_nestable.alignment * CHAR_BIT,
+ field->name);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Nested compound types: Only array of structures and variants are
+ * currently supported.
+ */
+ elem_type = field->type.u.sequence_nestable.elem_type;
+ switch (elem_type->atype) {
+ case atype_integer:
+ case atype_struct_nestable:
+ case atype_variant_nestable:
+ ret = _lttng_type_statedump(session, elem_type, nesting);
+ if (ret)
+ return ret;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ ret = lttng_metadata_printf(session,
+ " _%s[ _%s ];\n",
+ field->name,
+ field->type.u.sequence_nestable.length_name);
+ return ret;
+}
+
+/*
+ * Must be called with sessions_mutex held.
+ */
+static
+int _lttng_enum_type_statedump(struct lttng_session *session,
+ const struct lttng_type *type,
+ size_t nesting)
+{
+ const struct lttng_enum_desc *enum_desc;
+ const struct lttng_type *container_type;
+ int ret;
+ unsigned int i, nr_entries;
+
+ container_type = type->u.enum_nestable.container_type;
+ if (container_type->atype != atype_integer) {
+ ret = -EINVAL;
+ goto end;
+ }
+ enum_desc = type->u.enum_nestable.desc;
+ nr_entries = enum_desc->nr_entries;
+
+ ret = print_tabs(session, nesting);
+ if (ret)
+ goto end;
+ ret = lttng_metadata_printf(session, "enum : ");
+ if (ret)
+ goto end;
+ ret = _lttng_integer_type_statedump(session, container_type, 0);
+ if (ret)
+ goto end;
+ ret = lttng_metadata_printf(session, " {\n");
+ if (ret)
+ goto end;
+ /* Dump all entries */
+ for (i = 0; i < nr_entries; i++) {
+ const struct lttng_enum_entry *entry = &enum_desc->entries[i];
+ int j, len;
+
+ ret = print_tabs(session, nesting + 1);
+ if (ret)
+ goto end;
+ ret = lttng_metadata_printf(session,
+ "\"");
+ if (ret)
+ goto end;
+ len = strlen(entry->string);
+ /* Escape the character '"' */
+ for (j = 0; j < len; j++) {
+ char c = entry->string[j];
+
+ switch (c) {
+ case '"':
+ ret = lttng_metadata_printf(session,
+ "\\\"");
+ break;
+ case '\\':
+ ret = lttng_metadata_printf(session,
+ "\\\\");
+ break;
+ default:
+ ret = lttng_metadata_printf(session,
+ "%c", c);
+ break;
+ }
+ if (ret)
+ goto end;
+ }
+ ret = lttng_metadata_printf(session, "\"");
+ if (ret)
+ goto end;
+
+ if (entry->options.is_auto) {
+ ret = lttng_metadata_printf(session, ",\n");
+ if (ret)
+ goto end;
+ } else {
+ ret = lttng_metadata_printf(session,
+ " = ");
+ if (ret)
+ goto end;
+ if (entry->start.signedness)
+ ret = lttng_metadata_printf(session,
+ "%lld", (long long) entry->start.value);
+ else
+ ret = lttng_metadata_printf(session,
+ "%llu", entry->start.value);
+ if (ret)
+ goto end;
+ if (entry->start.signedness == entry->end.signedness &&
+ entry->start.value
+ == entry->end.value) {
+ ret = lttng_metadata_printf(session,
+ ",\n");
+ } else {
+ if (entry->end.signedness) {
+ ret = lttng_metadata_printf(session,
+ " ... %lld,\n",
+ (long long) entry->end.value);
+ } else {
+ ret = lttng_metadata_printf(session,
+ " ... %llu,\n",
+ entry->end.value);
+ }
+ }
+ if (ret)
+ goto end;
+ }
+ }
+ ret = print_tabs(session, nesting);
+ if (ret)
+ goto end;
+ ret = lttng_metadata_printf(session, "}");
+end:
+ return ret;
+}
+
+/*
+ * Must be called with sessions_mutex held.
+ */
+static
+int _lttng_enum_field_statedump(struct lttng_session *session,
+ const struct lttng_event_field *field,
+ size_t nesting)
+{
+ int ret;
+
+ ret = _lttng_enum_type_statedump(session, &field->type, nesting);
+ if (ret)
+ return ret;
+ return lttng_field_name_statedump(session, field, nesting);
+}
+
+static
+int _lttng_integer_field_statedump(struct lttng_session *session,
+ const struct lttng_event_field *field,
+ size_t nesting)
+{
+ int ret;
+
+ ret = _lttng_integer_type_statedump(session, &field->type, nesting);
+ if (ret)
+ return ret;
+ return lttng_field_name_statedump(session, field, nesting);
+}
+
+static
+int _lttng_string_type_statedump(struct lttng_session *session,
+ const struct lttng_type *type,
+ size_t nesting)
+{
+ int ret;
+
+ WARN_ON_ONCE(type->atype != atype_string);
+ /* Default encoding is UTF8 */
+ ret = print_tabs(session, nesting);
+ if (ret)
+ return ret;
+ ret = lttng_metadata_printf(session,
+ "string%s",
+ type->u.string.encoding == lttng_encode_ASCII ?
+ " { encoding = ASCII; }" : "");
+ return ret;