#include <wrapper/poll.h>
#include <wrapper/file.h>
#include <wrapper/kref.h>
+#include <wrapper/barrier.h>
#include <lttng/string-utils.h>
#include <lttng/abi.h>
#include <lttng/abi-old.h>
#include <lttng/events.h>
+#include <lttng/events-internal.h>
#include <lttng/tracer.h>
#include <lttng/tp-mempool.h>
#include <ringbuffer/frontend_types.h>
static struct proc_dir_entry *lttng_proc_dentry;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0))
+#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,6,0))
static const struct proc_ops lttng_proc_ops;
#else
static const struct file_operations lttng_proc_ops;
static int put_u64(uint64_t val, unsigned long arg);
static int put_u32(uint32_t val, unsigned long arg);
+static int validate_zeroed_padding(char *p, size_t len)
+{
+ size_t i;
+
+ for (i = 0; i < len; i++) {
+ if (p[i])
+ return -1;
+ }
+ return 0;
+}
+
/*
* Teardown management: opened file descriptors keep a refcount on the module,
* so it can only exit when all file descriptors are closed.
}
}
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0))
+#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,6,0))
static const struct proc_ops lttng_proc_ops = {
.proc_ioctl = lttng_ioctl,
#ifdef CONFIG_COMPAT
return 0;
}
+static
+int lttng_counter_release(struct inode *inode, struct file *file)
+{
+ struct lttng_counter *counter = file->private_data;
+
+ if (counter) {
+ /*
+ * Do not destroy the counter itself. Wait of the owner
+ * (event_notifier group) to be destroyed.
+ */
+ fput(counter->owner);
+ }
+
+ return 0;
+}
+
+static
+long lttng_counter_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct lttng_counter *counter = file->private_data;
+ size_t indexes[LTTNG_KERNEL_COUNTER_DIMENSION_MAX] = { 0 };
+ int i;
+
+ switch (cmd) {
+ case LTTNG_KERNEL_COUNTER_READ:
+ {
+ struct lttng_kernel_counter_read local_counter_read;
+ struct lttng_kernel_counter_read __user *ucounter_read =
+ (struct lttng_kernel_counter_read __user *) arg;
+ bool overflow, underflow;
+ int64_t value;
+ int32_t cpu;
+ int ret;
+
+ if (copy_from_user(&local_counter_read, ucounter_read,
+ sizeof(local_counter_read)))
+ return -EFAULT;
+ if (validate_zeroed_padding(local_counter_read.padding,
+ sizeof(local_counter_read.padding)))
+ return -EINVAL;
+
+ /* Cast all indexes into size_t. */
+ for (i = 0; i < local_counter_read.index.number_dimensions; i++)
+ indexes[i] = (size_t) local_counter_read.index.dimension_indexes[i];
+ cpu = local_counter_read.cpu;
+
+ ret = lttng_kernel_counter_read(counter, indexes, cpu, &value,
+ &overflow, &underflow);
+ if (ret)
+ return ret;
+ local_counter_read.value.value = value;
+ local_counter_read.value.overflow = overflow;
+ local_counter_read.value.underflow = underflow;
+
+ if (copy_to_user(&ucounter_read->value, &local_counter_read.value,
+ sizeof(local_counter_read.value)))
+ return -EFAULT;
+
+ return 0;
+ }
+ case LTTNG_KERNEL_COUNTER_AGGREGATE:
+ {
+ struct lttng_kernel_counter_aggregate local_counter_aggregate;
+ struct lttng_kernel_counter_aggregate __user *ucounter_aggregate =
+ (struct lttng_kernel_counter_aggregate __user *) arg;
+ bool overflow, underflow;
+ int64_t value;
+ int ret;
+
+ if (copy_from_user(&local_counter_aggregate, ucounter_aggregate,
+ sizeof(local_counter_aggregate)))
+ return -EFAULT;
+ if (validate_zeroed_padding(local_counter_aggregate.padding,
+ sizeof(local_counter_aggregate.padding)))
+ return -EINVAL;
+
+ /* Cast all indexes into size_t. */
+ for (i = 0; i < local_counter_aggregate.index.number_dimensions; i++)
+ indexes[i] = (size_t) local_counter_aggregate.index.dimension_indexes[i];
+
+ ret = lttng_kernel_counter_aggregate(counter, indexes, &value,
+ &overflow, &underflow);
+ if (ret)
+ return ret;
+ local_counter_aggregate.value.value = value;
+ local_counter_aggregate.value.overflow = overflow;
+ local_counter_aggregate.value.underflow = underflow;
+
+ if (copy_to_user(&ucounter_aggregate->value, &local_counter_aggregate.value,
+ sizeof(local_counter_aggregate.value)))
+ return -EFAULT;
+
+ return 0;
+ }
+ case LTTNG_KERNEL_COUNTER_CLEAR:
+ {
+ struct lttng_kernel_counter_clear local_counter_clear;
+ struct lttng_kernel_counter_clear __user *ucounter_clear =
+ (struct lttng_kernel_counter_clear __user *) arg;
+
+ if (copy_from_user(&local_counter_clear, ucounter_clear,
+ sizeof(local_counter_clear)))
+ return -EFAULT;
+ if (validate_zeroed_padding(local_counter_clear.padding,
+ sizeof(local_counter_clear.padding)))
+ return -EINVAL;
+
+ /* Cast all indexes into size_t. */
+ for (i = 0; i < local_counter_clear.index.number_dimensions; i++)
+ indexes[i] = (size_t) local_counter_clear.index.dimension_indexes[i];
+
+ return lttng_kernel_counter_clear(counter, indexes);
+ }
+ default:
+ WARN_ON_ONCE(1);
+ return -ENOSYS;
+ }
+}
+
+static const struct file_operations lttng_counter_fops = {
+ .owner = THIS_MODULE,
+ .release = lttng_counter_release,
+ .unlocked_ioctl = lttng_counter_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = lttng_counter_ioctl,
+#endif
+};
+
+
static
enum tracker_type get_tracker_type(struct lttng_kernel_tracker_args *tracker)
{
switch (event_param->instrumentation) {
case LTTNG_KERNEL_SYSCALL:
switch (event_param->u.syscall.entryexit) {
- case LTTNG_KERNEL_SYSCALL_ENTRY:
- case LTTNG_KERNEL_SYSCALL_EXIT:
+ case LTTNG_KERNEL_SYSCALL_ENTRY: /* Fall-through */
+ case LTTNG_KERNEL_SYSCALL_EXIT: /* Fall-through */
case LTTNG_KERNEL_SYSCALL_ENTRYEXIT:
break;
default:
}
break;
- case LTTNG_KERNEL_TRACEPOINT: /* Fallthrough */
- case LTTNG_KERNEL_KPROBE: /* Fallthrough */
- case LTTNG_KERNEL_KRETPROBE: /* Fallthrough */
- case LTTNG_KERNEL_NOOP: /* Fallthrough */
+ case LTTNG_KERNEL_KRETPROBE:
+ switch (event_param->u.kretprobe.entryexit) {
+ case LTTNG_KERNEL_SYSCALL_ENTRYEXIT:
+ break;
+ case LTTNG_KERNEL_SYSCALL_ENTRY: /* Fall-through */
+ case LTTNG_KERNEL_SYSCALL_EXIT: /* Fall-through */
+ default:
+ return -EINVAL;
+ }
+ break;
+
+ case LTTNG_KERNEL_TRACEPOINT: /* Fall-through */
+ case LTTNG_KERNEL_KPROBE: /* Fall-through */
case LTTNG_KERNEL_UPROBE:
break;
- case LTTNG_KERNEL_FUNCTION: /* Fallthrough */
+ case LTTNG_KERNEL_FUNCTION: /* Fall-through */
+ case LTTNG_KERNEL_NOOP: /* Fall-through */
default:
return -EINVAL;
}
ret = lttng_abi_validate_event_param(event_param);
if (ret)
goto event_error;
- if (event_param->instrumentation == LTTNG_KERNEL_TRACEPOINT
- || event_param->instrumentation == LTTNG_KERNEL_SYSCALL) {
+
+ switch (event_param->instrumentation) {
+ case LTTNG_KERNEL_TRACEPOINT: /* Fall-through */
+ case LTTNG_KERNEL_SYSCALL:
+ {
struct lttng_event_enabler *event_enabler;
if (strutils_is_star_glob_pattern(event_param->name)) {
event_param, channel);
}
priv = event_enabler;
- } else {
+ break;
+ }
+
+ case LTTNG_KERNEL_KPROBE: /* Fall-through */
+ case LTTNG_KERNEL_KRETPROBE: /* Fall-through */
+ case LTTNG_KERNEL_UPROBE:
+ {
struct lttng_event *event;
/*
goto event_error;
}
priv = event;
+ break;
+ }
+
+ case LTTNG_KERNEL_FUNCTION: /* Fall-through */
+ case LTTNG_KERNEL_NOOP: /* Fall-through */
+ default:
+ ret = -EINVAL;
+ goto event_error;
}
event_file->private_data = priv;
fd_install(event_fd, event_file);
goto refcount_error;
}
- if (event_notifier_param->event.instrumentation == LTTNG_KERNEL_TRACEPOINT
- || event_notifier_param->event.instrumentation == LTTNG_KERNEL_SYSCALL) {
+ ret = lttng_abi_validate_event_param(&event_notifier_param->event);
+ if (ret)
+ goto event_notifier_error;
+
+ switch (event_notifier_param->event.instrumentation) {
+ case LTTNG_KERNEL_TRACEPOINT: /* Fall-through */
+ case LTTNG_KERNEL_SYSCALL:
+ {
struct lttng_event_notifier_enabler *enabler;
if (strutils_is_star_glob_pattern(event_notifier_param->event.name)) {
event_notifier_param);
}
priv = enabler;
- } else {
+ break;
+ }
+
+ case LTTNG_KERNEL_KPROBE: /* Fall-through */
+ case LTTNG_KERNEL_KRETPROBE: /* Fall-through */
+ case LTTNG_KERNEL_UPROBE:
+ {
struct lttng_event_notifier *event_notifier;
/*
* It will stay invariant for the rest of the session.
*/
event_notifier = lttng_event_notifier_create(NULL,
- event_notifier_param->event.token, event_notifier_group,
+ event_notifier_param->event.token,
+ event_notifier_param->error_counter_index,
+ event_notifier_group,
event_notifier_param, NULL,
event_notifier_param->event.instrumentation);
WARN_ON_ONCE(!event_notifier);
goto event_notifier_error;
}
priv = event_notifier;
+ break;
+ }
+
+ case LTTNG_KERNEL_FUNCTION: /* Fall-through */
+ case LTTNG_KERNEL_NOOP: /* Fall-through */
+ default:
+ ret = -EINVAL;
+ goto event_notifier_error;
}
event_notifier_file->private_data = priv;
fd_install(event_notifier_fd, event_notifier_file);
return ret;
}
+static
+long lttng_abi_event_notifier_group_create_error_counter(
+ struct file *event_notifier_group_file,
+ const struct lttng_kernel_counter_conf *error_counter_conf)
+{
+ int counter_fd, ret;
+ char *counter_transport_name;
+ size_t counter_len;
+ struct lttng_counter *counter = NULL;
+ struct file *counter_file;
+ struct lttng_event_notifier_group *event_notifier_group =
+ (struct lttng_event_notifier_group *) event_notifier_group_file->private_data;
+
+ if (error_counter_conf->arithmetic != LTTNG_KERNEL_COUNTER_ARITHMETIC_MODULAR) {
+ printk(KERN_ERR "LTTng: event_notifier: Error counter of the wrong arithmetic type.\n");
+ return -EINVAL;
+ }
+
+ if (error_counter_conf->number_dimensions != 1) {
+ printk(KERN_ERR "LTTng: event_notifier: Error counter has more than one dimension.\n");
+ return -EINVAL;
+ }
+
+ switch (error_counter_conf->bitness) {
+ case LTTNG_KERNEL_COUNTER_BITNESS_64:
+ counter_transport_name = "counter-per-cpu-64-modular";
+ break;
+ case LTTNG_KERNEL_COUNTER_BITNESS_32:
+ counter_transport_name = "counter-per-cpu-32-modular";
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * Lock sessions to provide mutual exclusion against concurrent
+ * modification of event_notifier group, which would result in
+ * overwriting the error counter if set concurrently.
+ */
+ lttng_lock_sessions();
+
+ if (event_notifier_group->error_counter) {
+ printk(KERN_ERR "Error counter already created in event_notifier group\n");
+ ret = -EBUSY;
+ goto fd_error;
+ }
+
+ counter_fd = lttng_get_unused_fd();
+ if (counter_fd < 0) {
+ ret = counter_fd;
+ goto fd_error;
+ }
+
+ counter_file = anon_inode_getfile("[lttng_counter]",
+ <tng_counter_fops,
+ NULL, O_RDONLY);
+ if (IS_ERR(counter_file)) {
+ ret = PTR_ERR(counter_file);
+ goto file_error;
+ }
+
+ counter_len = error_counter_conf->dimensions[0].size;
+
+ if (!atomic_long_add_unless(&event_notifier_group_file->f_count, 1, LONG_MAX)) {
+ ret = -EOVERFLOW;
+ goto refcount_error;
+ }
+
+ counter = lttng_kernel_counter_create(counter_transport_name,
+ 1, &counter_len);
+ if (!counter) {
+ ret = -EINVAL;
+ goto counter_error;
+ }
+
+ event_notifier_group->error_counter_len = counter_len;
+ /*
+ * store-release to publish error counter matches load-acquire
+ * in record_error. Ensures the counter is created and the
+ * error_counter_len is set before they are used.
+ */
+ lttng_smp_store_release(&event_notifier_group->error_counter, counter);
+
+ counter->file = counter_file;
+ counter->owner = event_notifier_group->file;
+ counter_file->private_data = counter;
+ /* Ownership transferred. */
+ counter = NULL;
+
+ fd_install(counter_fd, counter_file);
+ lttng_unlock_sessions();
+
+ return counter_fd;
+
+counter_error:
+ atomic_long_dec(&event_notifier_group_file->f_count);
+refcount_error:
+ fput(counter_file);
+file_error:
+ put_unused_fd(counter_fd);
+fd_error:
+ lttng_unlock_sessions();
+ return ret;
+}
+
static
long lttng_event_notifier_group_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
return -EFAULT;
return lttng_abi_create_event_notifier(file, &uevent_notifier_param);
}
+ case LTTNG_KERNEL_COUNTER:
+ {
+ struct lttng_kernel_counter_conf uerror_counter_conf;
+
+ if (copy_from_user(&uerror_counter_conf,
+ (struct lttng_kernel_counter_conf __user *) arg,
+ sizeof(uerror_counter_conf)))
+ return -EFAULT;
+ return lttng_abi_event_notifier_group_create_error_counter(file,
+ &uerror_counter_conf);
+ }
default:
return -ENOIOCTLCMD;
}