#include <lttng/tracer.h>
#include <lttng/tp-mempool.h>
#include <ringbuffer/frontend_types.h>
+#include <ringbuffer/iterator.h>
/*
* This is LTTng's own personal way to create a system call as an external
#endif
static const struct file_operations lttng_session_fops;
+static const struct file_operations lttng_event_notifier_group_fops;
static const struct file_operations lttng_channel_fops;
static const struct file_operations lttng_metadata_fops;
static const struct file_operations lttng_event_fops;
static struct file_operations lttng_stream_ring_buffer_file_operations;
static int put_u64(uint64_t val, unsigned long arg);
+static int put_u32(uint32_t val, unsigned long arg);
+
+static int validate_zeroed_padding(char *p, size_t len)
+{
+ size_t i;
+
+ for (i = 0; i < len; i++) {
+ if (p[i])
+ return -1;
+ }
+ return 0;
+}
/*
* Teardown management: opened file descriptors keep a refcount on the module,
return ret;
}
+void event_notifier_send_notification_work_wakeup(struct irq_work *entry)
+{
+ struct lttng_event_notifier_group *event_notifier_group =
+ container_of(entry, struct lttng_event_notifier_group,
+ wakeup_pending);
+ wake_up_interruptible(&event_notifier_group->read_wait);
+}
+
+static
+int lttng_abi_create_event_notifier_group(void)
+{
+ struct lttng_event_notifier_group *event_notifier_group;
+ struct file *event_notifier_group_file;
+ int event_notifier_group_fd, ret;
+
+ event_notifier_group = lttng_event_notifier_group_create();
+ if (!event_notifier_group)
+ return -ENOMEM;
+
+ event_notifier_group_fd = lttng_get_unused_fd();
+ if (event_notifier_group_fd < 0) {
+ ret = event_notifier_group_fd;
+ goto fd_error;
+ }
+ event_notifier_group_file = anon_inode_getfile("[lttng_event_notifier_group]",
+ <tng_event_notifier_group_fops,
+ event_notifier_group, O_RDWR);
+ if (IS_ERR(event_notifier_group_file)) {
+ ret = PTR_ERR(event_notifier_group_file);
+ goto file_error;
+ }
+
+ event_notifier_group->file = event_notifier_group_file;
+ init_waitqueue_head(&event_notifier_group->read_wait);
+ init_irq_work(&event_notifier_group->wakeup_pending,
+ event_notifier_send_notification_work_wakeup);
+ fd_install(event_notifier_group_fd, event_notifier_group_file);
+ return event_notifier_group_fd;
+
+file_error:
+ put_unused_fd(event_notifier_group_fd);
+fd_error:
+ lttng_event_notifier_group_destroy(event_notifier_group);
+ return ret;
+}
+
static
int lttng_abi_tracepoint_list(void)
{
return lttng_add_vegid_to_ctx(ctx);
case LTTNG_KERNEL_CONTEXT_VSGID:
return lttng_add_vsgid_to_ctx(ctx);
+ case LTTNG_KERNEL_CONTEXT_TIME_NS:
+ return lttng_add_time_ns_to_ctx(ctx);
default:
return -EINVAL;
}
* Returns after all previously running probes have completed
* LTTNG_KERNEL_TRACER_ABI_VERSION
* Returns the LTTng kernel tracer ABI version
+ * LTTNG_KERNEL_EVENT_NOTIFIER_GROUP_CREATE
+ * Returns a LTTng event notifier group file descriptor
*
* The returned session will be deleted when its file descriptor is closed.
*/
case LTTNG_KERNEL_OLD_SESSION:
case LTTNG_KERNEL_SESSION:
return lttng_abi_create_session();
+ case LTTNG_KERNEL_EVENT_NOTIFIER_GROUP_CREATE:
+ return lttng_abi_create_event_notifier_group();
case LTTNG_KERNEL_OLD_TRACER_VERSION:
{
struct lttng_kernel_tracer_version v;
return 0;
}
+static
+int lttng_counter_release(struct inode *inode, struct file *file)
+{
+ struct lttng_counter *counter = file->private_data;
+
+ if (counter) {
+ /*
+ * Do not destroy the counter itself. Wait of the owner
+ * (event_notifier group) to be destroyed.
+ */
+ fput(counter->owner);
+ }
+
+ return 0;
+}
+
+static
+long lttng_counter_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct lttng_counter *counter = file->private_data;
+ size_t indexes[LTTNG_KERNEL_COUNTER_DIMENSION_MAX] = { 0 };
+ int i;
+
+ switch (cmd) {
+ case LTTNG_KERNEL_COUNTER_READ:
+ {
+ struct lttng_kernel_counter_read local_counter_read;
+ struct lttng_kernel_counter_read __user *ucounter_read =
+ (struct lttng_kernel_counter_read __user *) arg;
+ bool overflow, underflow;
+ int64_t value;
+ int32_t cpu;
+ int ret;
+
+ if (copy_from_user(&local_counter_read, ucounter_read,
+ sizeof(local_counter_read)))
+ return -EFAULT;
+ if (validate_zeroed_padding(local_counter_read.padding,
+ sizeof(local_counter_read.padding)))
+ return -EINVAL;
+
+ /* Cast all indexes into size_t. */
+ for (i = 0; i < local_counter_read.index.number_dimensions; i++)
+ indexes[i] = (size_t) local_counter_read.index.dimension_indexes[i];
+ cpu = local_counter_read.cpu;
+
+ ret = lttng_kernel_counter_read(counter, indexes, cpu, &value,
+ &overflow, &underflow);
+ if (ret)
+ return ret;
+ local_counter_read.value.value = value;
+ local_counter_read.value.overflow = overflow;
+ local_counter_read.value.underflow = underflow;
+
+ if (copy_to_user(&ucounter_read->value, &local_counter_read.value,
+ sizeof(local_counter_read.value)))
+ return -EFAULT;
+
+ return 0;
+ }
+ case LTTNG_KERNEL_COUNTER_AGGREGATE:
+ {
+ struct lttng_kernel_counter_aggregate local_counter_aggregate;
+ struct lttng_kernel_counter_aggregate __user *ucounter_aggregate =
+ (struct lttng_kernel_counter_aggregate __user *) arg;
+ bool overflow, underflow;
+ int64_t value;
+ int ret;
+
+ if (copy_from_user(&local_counter_aggregate, ucounter_aggregate,
+ sizeof(local_counter_aggregate)))
+ return -EFAULT;
+ if (validate_zeroed_padding(local_counter_aggregate.padding,
+ sizeof(local_counter_aggregate.padding)))
+ return -EINVAL;
+
+ /* Cast all indexes into size_t. */
+ for (i = 0; i < local_counter_aggregate.index.number_dimensions; i++)
+ indexes[i] = (size_t) local_counter_aggregate.index.dimension_indexes[i];
+
+ ret = lttng_kernel_counter_aggregate(counter, indexes, &value,
+ &overflow, &underflow);
+ if (ret)
+ return ret;
+ local_counter_aggregate.value.value = value;
+ local_counter_aggregate.value.overflow = overflow;
+ local_counter_aggregate.value.underflow = underflow;
+
+ if (copy_to_user(&ucounter_aggregate->value, &local_counter_aggregate.value,
+ sizeof(local_counter_aggregate.value)))
+ return -EFAULT;
+
+ return 0;
+ }
+ case LTTNG_KERNEL_COUNTER_CLEAR:
+ {
+ struct lttng_kernel_counter_clear local_counter_clear;
+ struct lttng_kernel_counter_clear __user *ucounter_clear =
+ (struct lttng_kernel_counter_clear __user *) arg;
+
+ if (copy_from_user(&local_counter_clear, ucounter_clear,
+ sizeof(local_counter_clear)))
+ return -EFAULT;
+ if (validate_zeroed_padding(local_counter_clear.padding,
+ sizeof(local_counter_clear.padding)))
+ return -EINVAL;
+
+ /* Cast all indexes into size_t. */
+ for (i = 0; i < local_counter_clear.index.number_dimensions; i++)
+ indexes[i] = (size_t) local_counter_clear.index.dimension_indexes[i];
+
+ return lttng_kernel_counter_clear(counter, indexes);
+ }
+ default:
+ WARN_ON_ONCE(1);
+ return -ENOSYS;
+ }
+}
+
+static const struct file_operations lttng_counter_fops = {
+ .owner = THIS_MODULE,
+ .release = lttng_counter_release,
+ .unlocked_ioctl = lttng_counter_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = lttng_counter_ioctl,
+#endif
+};
+
+
static
enum tracker_type get_tracker_type(struct lttng_kernel_tracker_args *tracker)
{
#endif
};
+/*
+ * When encountering empty buffer, flush current sub-buffer if non-empty
+ * and retry (if new data available to read after flush).
+ */
+static
+ssize_t lttng_event_notifier_group_notif_read(struct file *filp, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct lttng_event_notifier_group *event_notifier_group = filp->private_data;
+ struct channel *chan = event_notifier_group->chan;
+ struct lib_ring_buffer *buf = event_notifier_group->buf;
+ ssize_t read_count = 0, len;
+ size_t read_offset;
+
+ might_sleep();
+ if (!lttng_access_ok(VERIFY_WRITE, user_buf, count))
+ return -EFAULT;
+
+ /* Finish copy of previous record */
+ if (*ppos != 0) {
+ if (read_count < count) {
+ len = chan->iter.len_left;
+ read_offset = *ppos;
+ goto skip_get_next;
+ }
+ }
+
+ while (read_count < count) {
+ size_t copy_len, space_left;
+
+ len = lib_ring_buffer_get_next_record(chan, buf);
+len_test:
+ if (len < 0) {
+ /*
+ * Check if buffer is finalized (end of file).
+ */
+ if (len == -ENODATA) {
+ /* A 0 read_count will tell about end of file */
+ goto nodata;
+ }
+ if (filp->f_flags & O_NONBLOCK) {
+ if (!read_count)
+ read_count = -EAGAIN;
+ goto nodata;
+ } else {
+ int error;
+
+ /*
+ * No data available at the moment, return what
+ * we got.
+ */
+ if (read_count)
+ goto nodata;
+
+ /*
+ * Wait for returned len to be >= 0 or -ENODATA.
+ */
+ error = wait_event_interruptible(
+ event_notifier_group->read_wait,
+ ((len = lib_ring_buffer_get_next_record(
+ chan, buf)), len != -EAGAIN));
+ CHAN_WARN_ON(chan, len == -EBUSY);
+ if (error) {
+ read_count = error;
+ goto nodata;
+ }
+ CHAN_WARN_ON(chan, len < 0 && len != -ENODATA);
+ goto len_test;
+ }
+ }
+ read_offset = buf->iter.read_offset;
+skip_get_next:
+ space_left = count - read_count;
+ if (len <= space_left) {
+ copy_len = len;
+ chan->iter.len_left = 0;
+ *ppos = 0;
+ } else {
+ copy_len = space_left;
+ chan->iter.len_left = len - copy_len;
+ *ppos = read_offset + copy_len;
+ }
+ if (__lib_ring_buffer_copy_to_user(&buf->backend, read_offset,
+ &user_buf[read_count],
+ copy_len)) {
+ /*
+ * Leave the len_left and ppos values at their current
+ * state, as we currently have a valid event to read.
+ */
+ return -EFAULT;
+ }
+ read_count += copy_len;
+ }
+ goto put_record;
+
+nodata:
+ *ppos = 0;
+ chan->iter.len_left = 0;
+
+put_record:
+ lib_ring_buffer_put_current_record(buf);
+ return read_count;
+}
+
+/*
+ * If the ring buffer is non empty (even just a partial subbuffer), return that
+ * there is data available. Perform a ring buffer flush if we encounter a
+ * non-empty ring buffer which does not have any consumeable subbuffer available.
+ */
+static
+unsigned int lttng_event_notifier_group_notif_poll(struct file *filp,
+ poll_table *wait)
+{
+ unsigned int mask = 0;
+ struct lttng_event_notifier_group *event_notifier_group = filp->private_data;
+ struct channel *chan = event_notifier_group->chan;
+ struct lib_ring_buffer *buf = event_notifier_group->buf;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
+ int finalized, disabled;
+ unsigned long consumed, offset;
+ size_t subbuffer_header_size = config->cb.subbuffer_header_size();
+
+ if (filp->f_mode & FMODE_READ) {
+ poll_wait_set_exclusive(wait);
+ poll_wait(filp, &event_notifier_group->read_wait, wait);
+
+ finalized = lib_ring_buffer_is_finalized(config, buf);
+ disabled = lib_ring_buffer_channel_is_disabled(chan);
+
+ /*
+ * lib_ring_buffer_is_finalized() contains a smp_rmb() ordering
+ * finalized load before offsets loads.
+ */
+ WARN_ON(atomic_long_read(&buf->active_readers) != 1);
+retry:
+ if (disabled)
+ return POLLERR;
+
+ offset = lib_ring_buffer_get_offset(config, buf);
+ consumed = lib_ring_buffer_get_consumed(config, buf);
+
+ /*
+ * If there is no buffer available to consume.
+ */
+ if (subbuf_trunc(offset, chan) - subbuf_trunc(consumed, chan) == 0) {
+ /*
+ * If there is a non-empty subbuffer, flush and try again.
+ */
+ if (subbuf_offset(offset, chan) > subbuffer_header_size) {
+ lib_ring_buffer_switch_remote(buf);
+ goto retry;
+ }
+
+ if (finalized)
+ return POLLHUP;
+ else {
+ /*
+ * The memory barriers
+ * __wait_event()/wake_up_interruptible() take
+ * care of "raw_spin_is_locked" memory ordering.
+ */
+ if (raw_spin_is_locked(&buf->raw_tick_nohz_spinlock))
+ goto retry;
+ else
+ return 0;
+ }
+ } else {
+ if (subbuf_trunc(offset, chan) - subbuf_trunc(consumed, chan)
+ >= chan->backend.buf_size)
+ return POLLPRI | POLLRDBAND;
+ else
+ return POLLIN | POLLRDNORM;
+ }
+ }
+
+ return mask;
+}
+
+/**
+ * lttng_event_notifier_group_notif_open - event_notifier ring buffer open file operation
+ * @inode: opened inode
+ * @file: opened file
+ *
+ * Open implementation. Makes sure only one open instance of a buffer is
+ * done at a given moment.
+ */
+static int lttng_event_notifier_group_notif_open(struct inode *inode, struct file *file)
+{
+ struct lttng_event_notifier_group *event_notifier_group = inode->i_private;
+ struct lib_ring_buffer *buf = event_notifier_group->buf;
+
+ file->private_data = event_notifier_group;
+ return lib_ring_buffer_open(inode, file, buf);
+}
+
+/**
+ * lttng_event_notifier_group_notif_release - event_notifier ring buffer release file operation
+ * @inode: opened inode
+ * @file: opened file
+ *
+ * Release implementation.
+ */
+static int lttng_event_notifier_group_notif_release(struct inode *inode, struct file *file)
+{
+ struct lttng_event_notifier_group *event_notifier_group = file->private_data;
+ struct lib_ring_buffer *buf = event_notifier_group->buf;
+ int ret;
+
+ ret = lib_ring_buffer_release(inode, file, buf);
+ if (ret)
+ return ret;
+ fput(event_notifier_group->file);
+ return 0;
+}
+
+static const struct file_operations lttng_event_notifier_group_notif_fops = {
+ .owner = THIS_MODULE,
+ .open = lttng_event_notifier_group_notif_open,
+ .release = lttng_event_notifier_group_notif_release,
+ .read = lttng_event_notifier_group_notif_read,
+ .poll = lttng_event_notifier_group_notif_poll,
+};
+
/**
* lttng_metadata_ring_buffer_poll - LTTng ring buffer poll file operation
* @filp: the file
int ret;
struct lttng_metadata_stream *stream = filp->private_data;
struct lib_ring_buffer *buf = stream->priv;
+ unsigned int rb_cmd;
+ bool coherent;
+
+ if (cmd == RING_BUFFER_GET_NEXT_SUBBUF_METADATA_CHECK)
+ rb_cmd = RING_BUFFER_GET_NEXT_SUBBUF;
+ else
+ rb_cmd = cmd;
switch (cmd) {
case RING_BUFFER_GET_NEXT_SUBBUF:
struct lib_ring_buffer *buf = stream->priv;
struct channel *chan = buf->backend.chan;
- ret = lttng_metadata_output_channel(stream, chan);
+ ret = lttng_metadata_output_channel(stream, chan, NULL);
if (ret > 0) {
lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
ret = 0;
* Before doing the actual ring buffer flush, write up to one
* packet of metadata in the ring buffer.
*/
- ret = lttng_metadata_output_channel(stream, chan);
+ ret = lttng_metadata_output_channel(stream, chan, NULL);
if (ret < 0)
goto err;
break;
return lttng_metadata_cache_dump(stream);
}
+ case RING_BUFFER_GET_NEXT_SUBBUF_METADATA_CHECK:
+ {
+ struct lttng_metadata_stream *stream = filp->private_data;
+ struct lib_ring_buffer *buf = stream->priv;
+ struct channel *chan = buf->backend.chan;
+
+ ret = lttng_metadata_output_channel(stream, chan, &coherent);
+ if (ret > 0) {
+ lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
+ ret = 0;
+ } else if (ret < 0) {
+ goto err;
+ }
+ break;
+ }
default:
break;
}
/* PUT_SUBBUF is the one from lib ring buffer, unmodified. */
/* Performing lib ring buffer ioctl after our own. */
- ret = lib_ring_buffer_ioctl(filp, cmd, arg, buf);
+ ret = lib_ring_buffer_ioctl(filp, rb_cmd, arg, buf);
if (ret < 0)
goto err;
cmd, arg);
break;
}
+ case RING_BUFFER_GET_NEXT_SUBBUF_METADATA_CHECK:
+ {
+ return put_u32(coherent, arg);
+ }
default:
break;
}
int ret;
struct lttng_metadata_stream *stream = filp->private_data;
struct lib_ring_buffer *buf = stream->priv;
+ unsigned int rb_cmd;
+ bool coherent;
+
+ if (cmd == RING_BUFFER_GET_NEXT_SUBBUF_METADATA_CHECK)
+ rb_cmd = RING_BUFFER_GET_NEXT_SUBBUF;
+ else
+ rb_cmd = cmd;
switch (cmd) {
case RING_BUFFER_GET_NEXT_SUBBUF:
struct lib_ring_buffer *buf = stream->priv;
struct channel *chan = buf->backend.chan;
- ret = lttng_metadata_output_channel(stream, chan);
+ ret = lttng_metadata_output_channel(stream, chan, NULL);
if (ret > 0) {
lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
ret = 0;
* Before doing the actual ring buffer flush, write up to one
* packet of metadata in the ring buffer.
*/
- ret = lttng_metadata_output_channel(stream, chan);
+ ret = lttng_metadata_output_channel(stream, chan, NULL);
if (ret < 0)
goto err;
break;
return lttng_metadata_cache_dump(stream);
}
+ case RING_BUFFER_GET_NEXT_SUBBUF_METADATA_CHECK:
+ {
+ struct lttng_metadata_stream *stream = filp->private_data;
+ struct lib_ring_buffer *buf = stream->priv;
+ struct channel *chan = buf->backend.chan;
+
+ ret = lttng_metadata_output_channel(stream, chan, &coherent);
+ if (ret > 0) {
+ lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
+ ret = 0;
+ } else if (ret < 0) {
+ goto err;
+ }
+ break;
+ }
default:
break;
}
/* PUT_SUBBUF is the one from lib ring buffer, unmodified. */
/* Performing lib ring buffer ioctl after our own. */
- ret = lib_ring_buffer_compat_ioctl(filp, cmd, arg, buf);
+ ret = lib_ring_buffer_compat_ioctl(filp, rb_cmd, arg, buf);
if (ret < 0)
goto err;
cmd, arg);
break;
}
+ case RING_BUFFER_GET_NEXT_SUBBUF_METADATA_CHECK:
+ {
+ return put_u32(coherent, arg);
+ }
default:
break;
}
* session, we need to keep our own reference on the transport.
*/
if (!try_module_get(stream->transport->owner)) {
- printk(KERN_WARNING "LTT : Can't lock transport module.\n");
+ printk(KERN_WARNING "LTTng: Can't lock transport module.\n");
return -EBUSY;
}
return lib_ring_buffer_open(inode, file, buf);
struct lttng_metadata_stream *stream = file->private_data;
struct lib_ring_buffer *buf = stream->priv;
+ mutex_lock(&stream->metadata_cache->lock);
+ list_del(&stream->list);
+ mutex_unlock(&stream->metadata_cache->lock);
kref_put(&stream->metadata_cache->refcount, metadata_cache_destroy);
module_put(stream->transport->owner);
+ kfree(stream);
return lib_ring_buffer_release(inode, file, buf);
}
static
int lttng_abi_create_stream_fd(struct file *channel_file, void *stream_priv,
- const struct file_operations *fops)
+ const struct file_operations *fops, const char *name)
{
int stream_fd, ret;
struct file *stream_file;
ret = stream_fd;
goto fd_error;
}
- stream_file = anon_inode_getfile("[lttng_stream]", fops,
- stream_priv, O_RDWR);
+ stream_file = anon_inode_getfile(name, fops, stream_priv, O_RDWR);
if (IS_ERR(stream_file)) {
ret = PTR_ERR(stream_file);
goto file_error;
stream_priv = buf;
ret = lttng_abi_create_stream_fd(channel_file, stream_priv,
- <tng_stream_ring_buffer_file_operations);
+ <tng_stream_ring_buffer_file_operations,
+ "[lttng_stream]");
if (ret < 0)
goto fd_error;
metadata_stream->priv = buf;
stream_priv = metadata_stream;
metadata_stream->transport = channel->transport;
+ /* Initial state is an empty metadata, considered as incoherent. */
+ metadata_stream->coherent = false;
/*
* Since life-time of metadata cache differs from that of
* session, we need to keep our own reference on the transport.
*/
if (!try_module_get(metadata_stream->transport->owner)) {
- printk(KERN_WARNING "LTT : Can't lock transport module.\n");
+ printk(KERN_WARNING "LTTng: Can't lock transport module.\n");
ret = -EINVAL;
goto notransport;
}
}
ret = lttng_abi_create_stream_fd(channel_file, stream_priv,
- <tng_metadata_ring_buffer_file_operations);
+ <tng_metadata_ring_buffer_file_operations,
+ "[lttng_metadata_stream]");
if (ret < 0)
goto fd_error;
+ mutex_lock(&session->metadata_cache->lock);
list_add(&metadata_stream->list,
&session->metadata_cache->metadata_stream);
+ mutex_unlock(&session->metadata_cache->lock);
return ret;
fd_error:
return ret;
}
+static
+int lttng_abi_open_event_notifier_group_stream(struct file *notif_file)
+{
+ struct lttng_event_notifier_group *event_notifier_group = notif_file->private_data;
+ struct channel *chan = event_notifier_group->chan;
+ struct lib_ring_buffer *buf;
+ int ret;
+ void *stream_priv;
+
+ buf = event_notifier_group->ops->buffer_read_open(chan);
+ if (!buf)
+ return -ENOENT;
+
+ /* The event_notifier notification fd holds a reference on the event_notifier group */
+ if (!atomic_long_add_unless(¬if_file->f_count, 1, LONG_MAX)) {
+ ret = -EOVERFLOW;
+ goto refcount_error;
+ }
+ event_notifier_group->buf = buf;
+ stream_priv = event_notifier_group;
+ ret = lttng_abi_create_stream_fd(notif_file, stream_priv,
+ <tng_event_notifier_group_notif_fops,
+ "[lttng_event_notifier_stream]");
+ if (ret < 0)
+ goto fd_error;
+
+ return ret;
+
+fd_error:
+ atomic_long_dec(¬if_file->f_count);
+refcount_error:
+ event_notifier_group->ops->buffer_read_close(buf);
+ return ret;
+}
+
+static
+int lttng_abi_validate_event_param(struct lttng_kernel_event *event_param)
+{
+ /* Limit ABI to implemented features. */
+ switch (event_param->instrumentation) {
+ case LTTNG_KERNEL_SYSCALL:
+ switch (event_param->u.syscall.entryexit) {
+ case LTTNG_KERNEL_SYSCALL_ENTRY:
+ case LTTNG_KERNEL_SYSCALL_EXIT:
+ case LTTNG_KERNEL_SYSCALL_ENTRYEXIT:
+ break;
+ default:
+ return -EINVAL;
+ }
+ switch (event_param->u.syscall.abi) {
+ case LTTNG_KERNEL_SYSCALL_ABI_ALL:
+ break;
+ default:
+ return -EINVAL;
+ }
+ switch (event_param->u.syscall.match) {
+ case LTTNG_KERNEL_SYSCALL_MATCH_NAME:
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+
+ case LTTNG_KERNEL_TRACEPOINT: /* Fallthrough */
+ case LTTNG_KERNEL_KPROBE: /* Fallthrough */
+ case LTTNG_KERNEL_KRETPROBE: /* Fallthrough */
+ case LTTNG_KERNEL_NOOP: /* Fallthrough */
+ case LTTNG_KERNEL_UPROBE:
+ break;
+
+ case LTTNG_KERNEL_FUNCTION: /* Fallthrough */
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
static
int lttng_abi_create_event(struct file *channel_file,
struct lttng_kernel_event *event_param)
ret = -EOVERFLOW;
goto refcount_error;
}
+ ret = lttng_abi_validate_event_param(event_param);
+ if (ret)
+ goto event_error;
if (event_param->instrumentation == LTTNG_KERNEL_TRACEPOINT
|| event_param->instrumentation == LTTNG_KERNEL_SYSCALL) {
- struct lttng_enabler *enabler;
+ struct lttng_event_enabler *event_enabler;
if (strutils_is_star_glob_pattern(event_param->name)) {
/*
* If the event name is a star globbing pattern,
* we create the special star globbing enabler.
*/
- enabler = lttng_enabler_create(LTTNG_ENABLER_STAR_GLOB,
+ event_enabler = lttng_event_enabler_create(LTTNG_ENABLER_FORMAT_STAR_GLOB,
event_param, channel);
} else {
- enabler = lttng_enabler_create(LTTNG_ENABLER_NAME,
+ event_enabler = lttng_event_enabler_create(LTTNG_ENABLER_FORMAT_NAME,
event_param, channel);
}
- priv = enabler;
+ priv = event_enabler;
} else {
struct lttng_event *event;
return ret;
}
+static
+long lttng_event_notifier_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct lttng_event_notifier *event_notifier;
+ struct lttng_event_notifier_enabler *event_notifier_enabler;
+ enum lttng_event_type *evtype = file->private_data;
+
+ switch (cmd) {
+ case LTTNG_KERNEL_ENABLE:
+ switch (*evtype) {
+ case LTTNG_TYPE_EVENT:
+ event_notifier = file->private_data;
+ return lttng_event_notifier_enable(event_notifier);
+ case LTTNG_TYPE_ENABLER:
+ event_notifier_enabler = file->private_data;
+ return lttng_event_notifier_enabler_enable(event_notifier_enabler);
+ default:
+ WARN_ON_ONCE(1);
+ return -ENOSYS;
+ }
+ case LTTNG_KERNEL_DISABLE:
+ switch (*evtype) {
+ case LTTNG_TYPE_EVENT:
+ event_notifier = file->private_data;
+ return lttng_event_notifier_disable(event_notifier);
+ case LTTNG_TYPE_ENABLER:
+ event_notifier_enabler = file->private_data;
+ return lttng_event_notifier_enabler_disable(event_notifier_enabler);
+ default:
+ WARN_ON_ONCE(1);
+ return -ENOSYS;
+ }
+ case LTTNG_KERNEL_FILTER:
+ switch (*evtype) {
+ case LTTNG_TYPE_EVENT:
+ return -EINVAL;
+ case LTTNG_TYPE_ENABLER:
+ event_notifier_enabler = file->private_data;
+ return lttng_event_notifier_enabler_attach_filter_bytecode(
+ event_notifier_enabler,
+ (struct lttng_kernel_filter_bytecode __user *) arg);
+ default:
+ WARN_ON_ONCE(1);
+ return -ENOSYS;
+ }
+
+ case LTTNG_KERNEL_CAPTURE:
+ switch (*evtype) {
+ case LTTNG_TYPE_EVENT:
+ return -EINVAL;
+ case LTTNG_TYPE_ENABLER:
+ event_notifier_enabler = file->private_data;
+ return lttng_event_notifier_enabler_attach_capture_bytecode(
+ event_notifier_enabler,
+ (struct lttng_kernel_capture_bytecode __user *) arg);
+ default:
+ WARN_ON_ONCE(1);
+ return -ENOSYS;
+ }
+ case LTTNG_KERNEL_ADD_CALLSITE:
+ switch (*evtype) {
+ case LTTNG_TYPE_EVENT:
+ event_notifier = file->private_data;
+ return lttng_event_notifier_add_callsite(event_notifier,
+ (struct lttng_kernel_event_callsite __user *) arg);
+ case LTTNG_TYPE_ENABLER:
+ return -EINVAL;
+ default:
+ WARN_ON_ONCE(1);
+ return -ENOSYS;
+ }
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
+static
+int lttng_event_notifier_release(struct inode *inode, struct file *file)
+{
+ struct lttng_event_notifier *event_notifier;
+ struct lttng_event_notifier_enabler *event_notifier_enabler;
+ enum lttng_event_type *evtype = file->private_data;
+
+ if (!evtype)
+ return 0;
+
+ switch (*evtype) {
+ case LTTNG_TYPE_EVENT:
+ event_notifier = file->private_data;
+ if (event_notifier)
+ fput(event_notifier->group->file);
+ break;
+ case LTTNG_TYPE_ENABLER:
+ event_notifier_enabler = file->private_data;
+ if (event_notifier_enabler)
+ fput(event_notifier_enabler->group->file);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ break;
+ }
+
+ return 0;
+}
+
+static const struct file_operations lttng_event_notifier_fops = {
+ .owner = THIS_MODULE,
+ .release = lttng_event_notifier_release,
+ .unlocked_ioctl = lttng_event_notifier_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = lttng_event_notifier_ioctl,
+#endif
+};
+
+static
+int lttng_abi_create_event_notifier(struct file *event_notifier_group_file,
+ struct lttng_kernel_event_notifier *event_notifier_param)
+{
+ struct lttng_event_notifier_group *event_notifier_group =
+ event_notifier_group_file->private_data;
+ int event_notifier_fd, ret;
+ struct file *event_notifier_file;
+ void *priv;
+
+ switch (event_notifier_param->event.instrumentation) {
+ case LTTNG_KERNEL_TRACEPOINT:
+ case LTTNG_KERNEL_UPROBE:
+ break;
+ case LTTNG_KERNEL_KPROBE:
+ event_notifier_param->event.u.kprobe.symbol_name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
+ break;
+ case LTTNG_KERNEL_SYSCALL:
+ break;
+ case LTTNG_KERNEL_KRETPROBE:
+ /* Placing an event notifier on kretprobe is not supported. */
+ case LTTNG_KERNEL_FUNCTION:
+ case LTTNG_KERNEL_NOOP:
+ default:
+ ret = -EINVAL;
+ goto inval_instr;
+ }
+
+ event_notifier_param->event.name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0';
+
+ event_notifier_fd = lttng_get_unused_fd();
+ if (event_notifier_fd < 0) {
+ ret = event_notifier_fd;
+ goto fd_error;
+ }
+
+ event_notifier_file = anon_inode_getfile("[lttng_event_notifier]",
+ <tng_event_notifier_fops,
+ NULL, O_RDWR);
+ if (IS_ERR(event_notifier_file)) {
+ ret = PTR_ERR(event_notifier_file);
+ goto file_error;
+ }
+
+ /* The event notifier holds a reference on the event notifier group. */
+ if (!atomic_long_add_unless(&event_notifier_group_file->f_count, 1, LONG_MAX)) {
+ ret = -EOVERFLOW;
+ goto refcount_error;
+ }
+
+ if (event_notifier_param->event.instrumentation == LTTNG_KERNEL_TRACEPOINT
+ || event_notifier_param->event.instrumentation == LTTNG_KERNEL_SYSCALL) {
+ struct lttng_event_notifier_enabler *enabler;
+
+ if (strutils_is_star_glob_pattern(event_notifier_param->event.name)) {
+ /*
+ * If the event name is a star globbing pattern,
+ * we create the special star globbing enabler.
+ */
+ enabler = lttng_event_notifier_enabler_create(
+ event_notifier_group,
+ LTTNG_ENABLER_FORMAT_STAR_GLOB,
+ event_notifier_param);
+ } else {
+ enabler = lttng_event_notifier_enabler_create(
+ event_notifier_group,
+ LTTNG_ENABLER_FORMAT_NAME,
+ event_notifier_param);
+ }
+ priv = enabler;
+ } else {
+ struct lttng_event_notifier *event_notifier;
+
+ /*
+ * We tolerate no failure path after event notifier creation.
+ * It will stay invariant for the rest of the session.
+ */
+ event_notifier = lttng_event_notifier_create(NULL,
+ event_notifier_param->event.token,
+ event_notifier_param->error_counter_index,
+ event_notifier_group,
+ event_notifier_param, NULL,
+ event_notifier_param->event.instrumentation);
+ WARN_ON_ONCE(!event_notifier);
+ if (IS_ERR(event_notifier)) {
+ ret = PTR_ERR(event_notifier);
+ goto event_notifier_error;
+ }
+ priv = event_notifier;
+ }
+ event_notifier_file->private_data = priv;
+ fd_install(event_notifier_fd, event_notifier_file);
+ return event_notifier_fd;
+
+event_notifier_error:
+ atomic_long_dec(&event_notifier_group_file->f_count);
+refcount_error:
+ fput(event_notifier_file);
+file_error:
+ put_unused_fd(event_notifier_fd);
+fd_error:
+inval_instr:
+ return ret;
+}
+
+static
+long lttng_abi_event_notifier_group_create_error_counter(
+ struct file *event_notifier_group_file,
+ const struct lttng_kernel_counter_conf *error_counter_conf)
+{
+ int counter_fd, ret;
+ char *counter_transport_name;
+ size_t counter_len;
+ struct lttng_counter *counter = NULL;
+ struct file *counter_file;
+ struct lttng_event_notifier_group *event_notifier_group =
+ (struct lttng_event_notifier_group *) event_notifier_group_file->private_data;
+
+ if (error_counter_conf->arithmetic != LTTNG_KERNEL_COUNTER_ARITHMETIC_MODULAR) {
+ printk(KERN_ERR "LTTng: event_notifier: Error counter of the wrong arithmetic type.\n");
+ return -EINVAL;
+ }
+
+ if (error_counter_conf->number_dimensions != 1) {
+ printk(KERN_ERR "LTTng: event_notifier: Error counter has more than one dimension.\n");
+ return -EINVAL;
+ }
+
+ switch (error_counter_conf->bitness) {
+ case LTTNG_KERNEL_COUNTER_BITNESS_64:
+ counter_transport_name = "counter-per-cpu-64-modular";
+ break;
+ case LTTNG_KERNEL_COUNTER_BITNESS_32:
+ counter_transport_name = "counter-per-cpu-32-modular";
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * Lock sessions to provide mutual exclusion against concurrent
+ * modification of event_notifier group, which would result in
+ * overwriting the error counter if set concurrently.
+ */
+ lttng_lock_sessions();
+
+ if (event_notifier_group->error_counter) {
+ printk(KERN_ERR "Error counter already created in event_notifier group\n");
+ ret = -EBUSY;
+ goto fd_error;
+ }
+
+ counter_fd = lttng_get_unused_fd();
+ if (counter_fd < 0) {
+ ret = counter_fd;
+ goto fd_error;
+ }
+
+ counter_file = anon_inode_getfile("[lttng_counter]",
+ <tng_counter_fops,
+ NULL, O_RDONLY);
+ if (IS_ERR(counter_file)) {
+ ret = PTR_ERR(counter_file);
+ goto file_error;
+ }
+
+ counter_len = error_counter_conf->dimensions[0].size;
+
+ if (!atomic_long_add_unless(&event_notifier_group_file->f_count, 1, LONG_MAX)) {
+ ret = -EOVERFLOW;
+ goto refcount_error;
+ }
+
+ counter = lttng_kernel_counter_create(counter_transport_name,
+ 1, &counter_len);
+ if (!counter) {
+ ret = -EINVAL;
+ goto counter_error;
+ }
+
+ event_notifier_group->error_counter = counter;
+ event_notifier_group->error_counter_len = counter_len;
+
+ counter->file = counter_file;
+ counter->owner = event_notifier_group->file;
+ counter_file->private_data = counter;
+ /* Ownership transferred. */
+ counter = NULL;
+
+ fd_install(counter_fd, counter_file);
+ lttng_unlock_sessions();
+
+ return counter_fd;
+
+counter_error:
+ atomic_long_dec(&event_notifier_group_file->f_count);
+refcount_error:
+ fput(counter_file);
+file_error:
+ put_unused_fd(counter_fd);
+fd_error:
+ lttng_unlock_sessions();
+ return ret;
+}
+
+static
+long lttng_event_notifier_group_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ switch (cmd) {
+ case LTTNG_KERNEL_EVENT_NOTIFIER_GROUP_NOTIFICATION_FD:
+ {
+ return lttng_abi_open_event_notifier_group_stream(file);
+ }
+ case LTTNG_KERNEL_EVENT_NOTIFIER_CREATE:
+ {
+ struct lttng_kernel_event_notifier uevent_notifier_param;
+
+ if (copy_from_user(&uevent_notifier_param,
+ (struct lttng_kernel_event_notifier __user *) arg,
+ sizeof(uevent_notifier_param)))
+ return -EFAULT;
+ return lttng_abi_create_event_notifier(file, &uevent_notifier_param);
+ }
+ case LTTNG_KERNEL_COUNTER:
+ {
+ struct lttng_kernel_counter_conf uerror_counter_conf;
+
+ if (copy_from_user(&uerror_counter_conf,
+ (struct lttng_kernel_counter_conf __user *) arg,
+ sizeof(uerror_counter_conf)))
+ return -EFAULT;
+ return lttng_abi_event_notifier_group_create_error_counter(file,
+ &uerror_counter_conf);
+ }
+ default:
+ return -ENOIOCTLCMD;
+ }
+ return 0;
+}
+
+static
+int lttng_event_notifier_group_release(struct inode *inode, struct file *file)
+{
+ struct lttng_event_notifier_group *event_notifier_group =
+ file->private_data;
+
+ if (event_notifier_group)
+ lttng_event_notifier_group_destroy(event_notifier_group);
+ return 0;
+}
+
+static const struct file_operations lttng_event_notifier_group_fops = {
+ .owner = THIS_MODULE,
+ .release = lttng_event_notifier_group_release,
+ .unlocked_ioctl = lttng_event_notifier_group_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = lttng_event_notifier_group_ioctl,
+#endif
+};
+
/**
* lttng_channel_ioctl - lttng syscall through ioctl
*
long lttng_event_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct lttng_event *event;
- struct lttng_enabler *enabler;
+ struct lttng_event_enabler *event_enabler;
enum lttng_event_type *evtype = file->private_data;
switch (cmd) {
event = file->private_data;
return lttng_event_enable(event);
case LTTNG_TYPE_ENABLER:
- enabler = file->private_data;
- return lttng_enabler_enable(enabler);
+ event_enabler = file->private_data;
+ return lttng_event_enabler_enable(event_enabler);
default:
WARN_ON_ONCE(1);
return -ENOSYS;
event = file->private_data;
return lttng_event_disable(event);
case LTTNG_TYPE_ENABLER:
- enabler = file->private_data;
- return lttng_enabler_disable(enabler);
+ event_enabler = file->private_data;
+ return lttng_event_enabler_disable(event_enabler);
default:
WARN_ON_ONCE(1);
return -ENOSYS;
return -EINVAL;
case LTTNG_TYPE_ENABLER:
{
- enabler = file->private_data;
- return lttng_enabler_attach_bytecode(enabler,
+ event_enabler = file->private_data;
+ return lttng_event_enabler_attach_filter_bytecode(
+ event_enabler,
(struct lttng_kernel_filter_bytecode __user *) arg);
}
default:
int lttng_event_release(struct inode *inode, struct file *file)
{
struct lttng_event *event;
- struct lttng_enabler *enabler;
+ struct lttng_event_enabler *event_enabler;
enum lttng_event_type *evtype = file->private_data;
if (!evtype)
fput(event->chan->file);
break;
case LTTNG_TYPE_ENABLER:
- enabler = file->private_data;
- if (enabler)
- fput(enabler->chan->file);
+ event_enabler = file->private_data;
+ if (event_enabler)
+ fput(event_enabler->chan->file);
break;
default:
WARN_ON_ONCE(1);
return put_user(val, (uint64_t __user *) arg);
}
+static int put_u32(uint32_t val, unsigned long arg)
+{
+ return put_user(val, (uint32_t __user *) arg);
+}
+
static long lttng_stream_ring_buffer_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg)
{
<tng_proc_ops, NULL);
if (!lttng_proc_dentry) {
- printk(KERN_ERR "Error creating LTTng control file\n");
+ printk(KERN_ERR "LTTng: Error creating control file\n");
ret = -ENOMEM;
goto error;
}