#include <wrapper/types.h>
#include <lttng/kernel-version.h>
#include <lttng/events.h>
+#include <lttng/lttng-bytecode.h>
#include <lttng/tracer.h>
#include <lttng/event-notifier-notification.h>
#include <lttng/abi-old.h>
static LIST_HEAD(sessions);
static LIST_HEAD(event_notifier_groups);
static LIST_HEAD(lttng_transport_list);
+static LIST_HEAD(lttng_counter_transport_list);
/*
* Protect the sessions and metadata caches.
*/
void synchronize_trace(void)
{
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,1,0))
+#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,1,0))
synchronize_rcu();
#else
synchronize_sched();
#endif
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
+#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,4,0))
#ifdef CONFIG_PREEMPT_RT_FULL
synchronize_rcu();
#endif
-#else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) */
+#else /* (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,4,0)) */
#ifdef CONFIG_PREEMPT_RT
synchronize_rcu();
#endif
-#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) */
+#endif /* (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,4,0)) */
}
void lttng_lock_sessions(void)
return NULL;
}
+static
+struct lttng_counter_transport *lttng_counter_transport_find(const char *name)
+{
+ struct lttng_counter_transport *transport;
+
+ list_for_each_entry(transport, <tng_counter_transport_list, node) {
+ if (!strcmp(transport->name, name))
+ return transport;
+ }
+ return NULL;
+}
+
+struct lttng_counter *lttng_kernel_counter_create(
+ const char *counter_transport_name,
+ size_t number_dimensions, const size_t *dimensions_sizes)
+{
+ struct lttng_counter *counter = NULL;
+ struct lttng_counter_transport *counter_transport = NULL;
+
+ counter_transport = lttng_counter_transport_find(counter_transport_name);
+ if (!counter_transport) {
+ printk(KERN_WARNING "LTTng: counter transport %s not found.\n",
+ counter_transport_name);
+ goto notransport;
+ }
+ if (!try_module_get(counter_transport->owner)) {
+ printk(KERN_WARNING "LTTng: Can't lock counter transport module.\n");
+ goto notransport;
+ }
+
+ counter = lttng_kvzalloc(sizeof(struct lttng_counter), GFP_KERNEL);
+ if (!counter)
+ goto nomem;
+
+ /* Create event notifier error counter. */
+ counter->ops = &counter_transport->ops;
+ counter->transport = counter_transport;
+
+ counter->counter = counter->ops->counter_create(
+ number_dimensions, dimensions_sizes, 0);
+ if (!counter->counter) {
+ goto create_error;
+ }
+
+ return counter;
+
+create_error:
+ lttng_kvfree(counter);
+nomem:
+ if (counter_transport)
+ module_put(counter_transport->owner);
+notransport:
+ return NULL;
+}
+
struct lttng_event_notifier_group *lttng_event_notifier_group_create(void)
{
struct lttng_transport *transport = NULL;
mutex_lock(&sessions_mutex);
WRITE_ONCE(session->active, 0);
list_for_each_entry(chan, &session->chan, list) {
- ret = lttng_syscalls_unregister_event(chan);
+ ret = lttng_syscalls_unregister_channel(chan);
WARN_ON(ret);
}
list_for_each_entry(event, &session->events, list) {
mutex_lock(&sessions_mutex);
- ret = lttng_syscalls_unregister_event_notifier(event_notifier_group);
+ ret = lttng_syscalls_unregister_event_notifier_group(event_notifier_group);
WARN_ON(ret);
list_for_each_entry_safe(event_notifier, tmpevent_notifier,
&event_notifier_group->event_notifiers_head, list)
_lttng_event_notifier_destroy(event_notifier);
+ if (event_notifier_group->error_counter) {
+ struct lttng_counter *error_counter = event_notifier_group->error_counter;
+
+ error_counter->ops->counter_destroy(error_counter->counter);
+ module_put(error_counter->transport->owner);
+ lttng_kvfree(error_counter);
+ event_notifier_group->error_counter = NULL;
+ }
+
event_notifier_group->ops->channel_destroy(event_notifier_group->chan);
module_put(event_notifier_group->transport->owner);
list_del(&event_notifier_group->node);
goto end;
}
switch (event->instrumentation) {
- case LTTNG_KERNEL_TRACEPOINT:
+ case LTTNG_KERNEL_TRACEPOINT: /* Fall-through */
case LTTNG_KERNEL_SYSCALL:
ret = -EINVAL;
break;
- case LTTNG_KERNEL_KPROBE:
+
+ case LTTNG_KERNEL_KPROBE: /* Fall-through */
case LTTNG_KERNEL_UPROBE:
- case LTTNG_KERNEL_NOOP:
WRITE_ONCE(event->enabled, 1);
break;
+
case LTTNG_KERNEL_KRETPROBE:
ret = lttng_kretprobes_event_enable_state(event, 1);
break;
- case LTTNG_KERNEL_FUNCTION: /* Fall-through. */
+
+ case LTTNG_KERNEL_FUNCTION: /* Fall-through */
+ case LTTNG_KERNEL_NOOP: /* Fall-through */
default:
WARN_ON_ONCE(1);
ret = -EINVAL;
goto end;
}
switch (event->instrumentation) {
- case LTTNG_KERNEL_TRACEPOINT:
+ case LTTNG_KERNEL_TRACEPOINT: /* Fall-through */
case LTTNG_KERNEL_SYSCALL:
ret = -EINVAL;
break;
- case LTTNG_KERNEL_KPROBE:
+
+ case LTTNG_KERNEL_KPROBE: /* Fall-through */
case LTTNG_KERNEL_UPROBE:
- case LTTNG_KERNEL_NOOP:
WRITE_ONCE(event->enabled, 0);
break;
+
case LTTNG_KERNEL_KRETPROBE:
+
ret = lttng_kretprobes_event_enable_state(event, 0);
break;
- case LTTNG_KERNEL_FUNCTION: /* Fall-through. */
+
+ case LTTNG_KERNEL_FUNCTION: /* Fall-through */
+ case LTTNG_KERNEL_NOOP: /* Fall-through */
default:
WARN_ON_ONCE(1);
ret = -EINVAL;
goto end;
}
switch (event_notifier->instrumentation) {
- case LTTNG_KERNEL_TRACEPOINT:
+ case LTTNG_KERNEL_TRACEPOINT: /* Fall-through */
case LTTNG_KERNEL_SYSCALL:
ret = -EINVAL;
break;
- case LTTNG_KERNEL_KPROBE:
+
+ case LTTNG_KERNEL_KPROBE: /* Fall-through */
case LTTNG_KERNEL_UPROBE:
WRITE_ONCE(event_notifier->enabled, 1);
break;
- case LTTNG_KERNEL_FUNCTION:
- case LTTNG_KERNEL_NOOP:
- case LTTNG_KERNEL_KRETPROBE:
+
+ case LTTNG_KERNEL_FUNCTION: /* Fall-through */
+ case LTTNG_KERNEL_KRETPROBE: /* Fall-through */
+ case LTTNG_KERNEL_NOOP: /* Fall-through */
default:
WARN_ON_ONCE(1);
ret = -EINVAL;
goto end;
}
switch (event_notifier->instrumentation) {
- case LTTNG_KERNEL_TRACEPOINT:
+ case LTTNG_KERNEL_TRACEPOINT: /* Fall-through */
case LTTNG_KERNEL_SYSCALL:
ret = -EINVAL;
break;
- case LTTNG_KERNEL_KPROBE:
+
+ case LTTNG_KERNEL_KPROBE: /* Fall-through */
case LTTNG_KERNEL_UPROBE:
WRITE_ONCE(event_notifier->enabled, 0);
break;
- case LTTNG_KERNEL_FUNCTION:
- case LTTNG_KERNEL_NOOP:
- case LTTNG_KERNEL_KRETPROBE:
+
+ case LTTNG_KERNEL_FUNCTION: /* Fall-through */
+ case LTTNG_KERNEL_KRETPROBE: /* Fall-through */
+ case LTTNG_KERNEL_NOOP: /* Fall-through */
default:
WARN_ON_ONCE(1);
ret = -EINVAL;
wake_up_interruptible(&stream->read_wait);
}
+
/*
* Supports event creation while tracing session is active.
* Needs to be called with sessions mutex held.
case LTTNG_KERNEL_TRACEPOINT:
event_name = event_desc->name;
break;
- case LTTNG_KERNEL_KPROBE:
- case LTTNG_KERNEL_UPROBE:
- case LTTNG_KERNEL_KRETPROBE:
- case LTTNG_KERNEL_NOOP:
+
+ case LTTNG_KERNEL_KPROBE: /* Fall-through */
+ case LTTNG_KERNEL_UPROBE: /* Fall-through */
+ case LTTNG_KERNEL_KRETPROBE: /* Fall-through */
case LTTNG_KERNEL_SYSCALL:
event_name = event_param->name;
break;
- case LTTNG_KERNEL_FUNCTION: /* Fall-through. */
+
+ case LTTNG_KERNEL_FUNCTION: /* Fall-through */
+ case LTTNG_KERNEL_NOOP: /* Fall-through */
default:
WARN_ON_ONCE(1);
ret = -EINVAL;
event->id = chan->free_event_id++;
event->instrumentation = itype;
event->evtype = LTTNG_TYPE_EVENT;
- INIT_LIST_HEAD(&event->bytecode_runtime_head);
+ INIT_LIST_HEAD(&event->filter_bytecode_runtime_head);
INIT_LIST_HEAD(&event->enablers_ref_head);
switch (itype) {
/* Populate lttng_event structure before event registration. */
smp_wmb();
break;
+
case LTTNG_KERNEL_KPROBE:
/*
* Needs to be explicitly enabled after creation, since
ret = try_module_get(event->desc->owner);
WARN_ON_ONCE(!ret);
break;
+
case LTTNG_KERNEL_KRETPROBE:
{
struct lttng_event *event_return;
event_return->enabled = 0;
event_return->registered = 1;
event_return->instrumentation = itype;
+ INIT_LIST_HEAD(&event_return->filter_bytecode_runtime_head);
+ INIT_LIST_HEAD(&event_return->enablers_ref_head);
/*
* Populate lttng_event structure before kretprobe registration.
*/
list_add(&event_return->list, &chan->session->events);
break;
}
- case LTTNG_KERNEL_NOOP:
+
case LTTNG_KERNEL_SYSCALL:
/*
* Needs to be explicitly enabled after creation, since
goto register_error;
}
break;
+
case LTTNG_KERNEL_UPROBE:
/*
* Needs to be explicitly enabled after creation, since
ret = try_module_get(event->desc->owner);
WARN_ON_ONCE(!ret);
break;
+
case LTTNG_KERNEL_FUNCTION: /* Fall-through */
+ case LTTNG_KERNEL_NOOP: /* Fall-through */
default:
WARN_ON_ONCE(1);
ret = -EINVAL;
struct lttng_event_notifier *_lttng_event_notifier_create(
const struct lttng_event_desc *event_desc,
- uint64_t token, struct lttng_event_notifier_group *event_notifier_group,
+ uint64_t token, uint64_t error_counter_index,
+ struct lttng_event_notifier_group *event_notifier_group,
struct lttng_kernel_event_notifier *event_notifier_param,
void *filter, enum lttng_kernel_instrumentation itype)
{
struct lttng_event_notifier *event_notifier;
+ struct lttng_counter *error_counter;
const char *event_name;
struct hlist_head *head;
int ret;
case LTTNG_KERNEL_TRACEPOINT:
event_name = event_desc->name;
break;
- case LTTNG_KERNEL_KPROBE:
- case LTTNG_KERNEL_UPROBE:
+
+ case LTTNG_KERNEL_KPROBE: /* Fall-through */
+ case LTTNG_KERNEL_UPROBE: /* Fall-through */
case LTTNG_KERNEL_SYSCALL:
event_name = event_notifier_param->event.name;
break;
- case LTTNG_KERNEL_KRETPROBE:
- case LTTNG_KERNEL_FUNCTION:
- case LTTNG_KERNEL_NOOP:
+
+ case LTTNG_KERNEL_KRETPROBE: /* Fall-through */
+ case LTTNG_KERNEL_FUNCTION: /* Fall-through */
+ case LTTNG_KERNEL_NOOP: /* Fall-through */
default:
WARN_ON_ONCE(1);
ret = -EINVAL;
event_notifier->group = event_notifier_group;
event_notifier->user_token = token;
+ event_notifier->error_counter_index = error_counter_index;
+ event_notifier->num_captures = 0;
event_notifier->filter = filter;
event_notifier->instrumentation = itype;
event_notifier->evtype = LTTNG_TYPE_EVENT;
event_notifier->send_notification = lttng_event_notifier_notification_send;
- INIT_LIST_HEAD(&event_notifier->bytecode_runtime_head);
+ INIT_LIST_HEAD(&event_notifier->filter_bytecode_runtime_head);
+ INIT_LIST_HEAD(&event_notifier->capture_bytecode_runtime_head);
INIT_LIST_HEAD(&event_notifier->enablers_ref_head);
switch (itype) {
/* Populate lttng_event_notifier structure before event registration. */
smp_wmb();
break;
+
case LTTNG_KERNEL_KPROBE:
/*
* Needs to be explicitly enabled after creation, since
ret = try_module_get(event_notifier->desc->owner);
WARN_ON_ONCE(!ret);
break;
- case LTTNG_KERNEL_NOOP:
+
case LTTNG_KERNEL_SYSCALL:
/*
* Needs to be explicitly enabled after creation, since
goto register_error;
}
break;
+
case LTTNG_KERNEL_UPROBE:
/*
* Needs to be explicitly enabled after creation, since
ret = try_module_get(event_notifier->desc->owner);
WARN_ON_ONCE(!ret);
break;
- case LTTNG_KERNEL_KRETPROBE:
- case LTTNG_KERNEL_FUNCTION:
+
+ case LTTNG_KERNEL_KRETPROBE: /* Fall-through */
+ case LTTNG_KERNEL_FUNCTION: /* Fall-through */
+ case LTTNG_KERNEL_NOOP: /* Fall-through */
default:
WARN_ON_ONCE(1);
ret = -EINVAL;
list_add(&event_notifier->list, &event_notifier_group->event_notifiers_head);
hlist_add_head(&event_notifier->hlist, head);
+
+ /*
+ * Clear the error counter bucket. The sessiond keeps track of which
+ * bucket is currently in use. We trust it. The session lock
+ * synchronizes against concurrent creation of the error
+ * counter.
+ */
+ error_counter = event_notifier_group->error_counter;
+ if (error_counter) {
+ size_t dimension_index[1];
+
+ /*
+ * Check that the index is within the boundary of the counter.
+ */
+ if (event_notifier->error_counter_index >= event_notifier_group->error_counter_len) {
+ printk(KERN_INFO "LTTng: event_notifier: Error counter index out-of-bound: counter-len=%zu, index=%llu\n",
+ event_notifier_group->error_counter_len, event_notifier->error_counter_index);
+ ret = -EINVAL;
+ goto register_error;
+ }
+
+ dimension_index[0] = event_notifier->error_counter_index;
+ ret = error_counter->ops->counter_clear(error_counter->counter, dimension_index);
+ if (ret) {
+ printk(KERN_INFO "LTTng: event_notifier: Unable to clear error counter bucket %llu\n",
+ event_notifier->error_counter_index);
+ goto register_error;
+ }
+ }
+
return event_notifier;
register_error:
return ERR_PTR(ret);
}
+int lttng_kernel_counter_read(struct lttng_counter *counter,
+ const size_t *dim_indexes, int32_t cpu,
+ int64_t *val, bool *overflow, bool *underflow)
+{
+ return counter->ops->counter_read(counter->counter, dim_indexes,
+ cpu, val, overflow, underflow);
+}
+
+int lttng_kernel_counter_aggregate(struct lttng_counter *counter,
+ const size_t *dim_indexes, int64_t *val,
+ bool *overflow, bool *underflow)
+{
+ return counter->ops->counter_aggregate(counter->counter, dim_indexes,
+ val, overflow, underflow);
+}
+
+int lttng_kernel_counter_clear(struct lttng_counter *counter,
+ const size_t *dim_indexes)
+{
+ return counter->ops->counter_clear(counter->counter, dim_indexes);
+}
+
struct lttng_event *lttng_event_create(struct lttng_channel *chan,
struct lttng_kernel_event *event_param,
void *filter,
struct lttng_event_notifier *lttng_event_notifier_create(
const struct lttng_event_desc *event_desc,
- uint64_t id, struct lttng_event_notifier_group *event_notifier_group,
+ uint64_t id, uint64_t error_counter_index,
+ struct lttng_event_notifier_group *event_notifier_group,
struct lttng_kernel_event_notifier *event_notifier_param,
void *filter, enum lttng_kernel_instrumentation itype)
{
mutex_lock(&sessions_mutex);
event_notifier = _lttng_event_notifier_create(event_desc, id,
- event_notifier_group, event_notifier_param, filter, itype);
+ error_counter_index, event_notifier_group,
+ event_notifier_param, filter, itype);
mutex_unlock(&sessions_mutex);
return event_notifier;
}
desc->probe_callback,
event);
break;
+
case LTTNG_KERNEL_SYSCALL:
ret = lttng_syscall_filter_enable_event(event->chan, event);
break;
- case LTTNG_KERNEL_KPROBE:
- case LTTNG_KERNEL_UPROBE:
+
+ case LTTNG_KERNEL_KPROBE: /* Fall-through */
+ case LTTNG_KERNEL_UPROBE: /* Fall-through */
case LTTNG_KERNEL_KRETPROBE:
- case LTTNG_KERNEL_NOOP:
ret = 0;
break;
+
case LTTNG_KERNEL_FUNCTION: /* Fall-through */
+ case LTTNG_KERNEL_NOOP: /* Fall-through */
default:
WARN_ON_ONCE(1);
}
event->desc->probe_callback,
event);
break;
+
case LTTNG_KERNEL_KPROBE:
lttng_kprobes_unregister_event(event);
ret = 0;
break;
+
case LTTNG_KERNEL_KRETPROBE:
lttng_kretprobes_unregister(event);
ret = 0;
break;
+
case LTTNG_KERNEL_SYSCALL:
ret = lttng_syscall_filter_disable_event(event->chan, event);
break;
+
case LTTNG_KERNEL_NOOP:
ret = 0;
break;
+
case LTTNG_KERNEL_UPROBE:
lttng_uprobes_unregister_event(event);
ret = 0;
break;
+
case LTTNG_KERNEL_FUNCTION: /* Fall-through */
default:
WARN_ON_ONCE(1);
desc->event_notifier_callback,
event_notifier);
break;
+
case LTTNG_KERNEL_SYSCALL:
ret = lttng_syscall_filter_enable_event_notifier(event_notifier);
break;
- case LTTNG_KERNEL_KPROBE:
+
+ case LTTNG_KERNEL_KPROBE: /* Fall-through */
case LTTNG_KERNEL_UPROBE:
ret = 0;
break;
- case LTTNG_KERNEL_KRETPROBE:
- case LTTNG_KERNEL_FUNCTION:
- case LTTNG_KERNEL_NOOP:
+
+ case LTTNG_KERNEL_KRETPROBE: /* Fall-through */
+ case LTTNG_KERNEL_FUNCTION: /* Fall-through */
+ case LTTNG_KERNEL_NOOP: /* Fall-through */
default:
WARN_ON_ONCE(1);
}
event_notifier->desc->event_notifier_callback,
event_notifier);
break;
+
case LTTNG_KERNEL_KPROBE:
lttng_kprobes_unregister_event_notifier(event_notifier);
ret = 0;
break;
+
case LTTNG_KERNEL_UPROBE:
lttng_uprobes_unregister_event_notifier(event_notifier);
ret = 0;
break;
+
case LTTNG_KERNEL_SYSCALL:
ret = lttng_syscall_filter_disable_event_notifier(event_notifier);
break;
- case LTTNG_KERNEL_KRETPROBE:
- case LTTNG_KERNEL_FUNCTION:
- case LTTNG_KERNEL_NOOP:
+
+ case LTTNG_KERNEL_KRETPROBE: /* Fall-through */
+ case LTTNG_KERNEL_FUNCTION: /* Fall-through */
+ case LTTNG_KERNEL_NOOP: /* Fall-through */
default:
WARN_ON_ONCE(1);
}
static
void _lttng_event_destroy(struct lttng_event *event)
{
+ struct lttng_enabler_ref *enabler_ref, *tmp_enabler_ref;
+
switch (event->instrumentation) {
case LTTNG_KERNEL_TRACEPOINT:
lttng_event_desc_put(event->desc);
break;
+
case LTTNG_KERNEL_KPROBE:
module_put(event->desc->owner);
lttng_kprobes_destroy_event_private(event);
break;
+
case LTTNG_KERNEL_KRETPROBE:
module_put(event->desc->owner);
lttng_kretprobes_destroy_private(event);
break;
- case LTTNG_KERNEL_NOOP:
+
case LTTNG_KERNEL_SYSCALL:
break;
+
case LTTNG_KERNEL_UPROBE:
module_put(event->desc->owner);
lttng_uprobes_destroy_event_private(event);
break;
+
case LTTNG_KERNEL_FUNCTION: /* Fall-through */
+ case LTTNG_KERNEL_NOOP: /* Fall-through */
default:
WARN_ON_ONCE(1);
}
list_del(&event->list);
lttng_destroy_context(event->ctx);
+ lttng_free_event_filter_runtime(event);
+ /* Free event enabler refs */
+ list_for_each_entry_safe(enabler_ref, tmp_enabler_ref,
+ &event->enablers_ref_head, node)
+ kfree(enabler_ref);
kmem_cache_free(event_cache, event);
}
static
void _lttng_event_notifier_destroy(struct lttng_event_notifier *event_notifier)
{
+ struct lttng_enabler_ref *enabler_ref, *tmp_enabler_ref;
+
switch (event_notifier->instrumentation) {
case LTTNG_KERNEL_TRACEPOINT:
lttng_event_desc_put(event_notifier->desc);
break;
+
case LTTNG_KERNEL_KPROBE:
module_put(event_notifier->desc->owner);
lttng_kprobes_destroy_event_notifier_private(event_notifier);
break;
- case LTTNG_KERNEL_NOOP:
+
case LTTNG_KERNEL_SYSCALL:
break;
+
case LTTNG_KERNEL_UPROBE:
module_put(event_notifier->desc->owner);
lttng_uprobes_destroy_event_notifier_private(event_notifier);
break;
- case LTTNG_KERNEL_KRETPROBE:
- case LTTNG_KERNEL_FUNCTION:
+
+ case LTTNG_KERNEL_KRETPROBE: /* Fall-through */
+ case LTTNG_KERNEL_FUNCTION: /* Fall-through */
+ case LTTNG_KERNEL_NOOP: /* Fall-through */
default:
WARN_ON_ONCE(1);
}
list_del(&event_notifier->list);
+ lttng_free_event_notifier_filter_runtime(event_notifier);
+ /* Free event enabler refs */
+ list_for_each_entry_safe(enabler_ref, tmp_enabler_ref,
+ &event_notifier->enablers_ref_head, node)
+ kfree(enabler_ref);
kmem_cache_free(event_notifier_cache, event_notifier);
}
return -EINVAL;
}
break;
+
case LTTNG_KERNEL_SYSCALL:
desc_name = desc->name;
if (!strncmp(desc_name, "compat_", strlen("compat_"))) {
return -EINVAL;
}
break;
+
default:
WARN_ON_ONCE(1);
return -EINVAL;
*/
event_notifier = _lttng_event_notifier_create(desc,
event_notifier_enabler->base.user_token,
+ event_notifier_enabler->error_counter_index,
event_notifier_group, NULL, NULL,
LTTNG_KERNEL_TRACEPOINT);
if (IS_ERR(event_notifier)) {
{
int ret;
- ret = lttng_syscalls_register_event(event_enabler->chan, NULL);
+ ret = lttng_syscalls_register_event(event_enabler, NULL);
WARN_ON_ONCE(ret);
}
case LTTNG_KERNEL_TRACEPOINT:
lttng_create_tracepoint_event_if_missing(event_enabler);
break;
+
case LTTNG_KERNEL_SYSCALL:
lttng_create_syscall_event_if_missing(event_enabler);
break;
+
default:
WARN_ON_ONCE(1);
break;
struct lttng_event *event;
if (base_enabler->event_param.instrumentation == LTTNG_KERNEL_SYSCALL &&
- base_enabler->event_param.u.syscall.entryexit == LTTNG_KERNEL_SYSCALL_ENTRYEXIT &&
base_enabler->event_param.u.syscall.abi == LTTNG_KERNEL_SYSCALL_ABI_ALL &&
base_enabler->event_param.u.syscall.match == LTTNG_KERNEL_SYSCALL_MATCH_NAME &&
!strcmp(base_enabler->event_param.name, "*")) {
- if (base_enabler->enabled)
- WRITE_ONCE(chan->syscall_all, 1);
- else
- WRITE_ONCE(chan->syscall_all, 0);
+ int enabled = base_enabler->enabled;
+ enum lttng_kernel_syscall_entryexit entryexit = base_enabler->event_param.u.syscall.entryexit;
+
+ if (entryexit == LTTNG_KERNEL_SYSCALL_ENTRY || entryexit == LTTNG_KERNEL_SYSCALL_ENTRYEXIT)
+ WRITE_ONCE(chan->syscall_all_entry, enabled);
+
+ if (entryexit == LTTNG_KERNEL_SYSCALL_EXIT || entryexit == LTTNG_KERNEL_SYSCALL_ENTRYEXIT)
+ WRITE_ONCE(chan->syscall_all_exit, enabled);
}
/* First ensure that probe events are created for this enabler. */
*/
lttng_enabler_link_bytecode(event->desc,
lttng_static_ctx,
- &event->bytecode_runtime_head,
- lttng_event_enabler_as_enabler(event_enabler));
+ &event->filter_bytecode_runtime_head,
+ <tng_event_enabler_as_enabler(event_enabler)->filter_bytecode_head);
/* TODO: merge event context. */
}
case LTTNG_KERNEL_TRACEPOINT:
lttng_create_tracepoint_event_notifier_if_missing(event_notifier_enabler);
break;
+
case LTTNG_KERNEL_SYSCALL:
lttng_create_syscall_event_notifier_if_missing(event_notifier_enabler);
break;
+
default:
WARN_ON_ONCE(1);
break;
* Link filter bytecodes if not linked yet.
*/
lttng_enabler_link_bytecode(event_notifier->desc,
- lttng_static_ctx, &event_notifier->bytecode_runtime_head,
- lttng_event_notifier_enabler_as_enabler(event_notifier_enabler));
+ lttng_static_ctx, &event_notifier->filter_bytecode_runtime_head,
+ <tng_event_notifier_enabler_as_enabler(event_notifier_enabler)->filter_bytecode_head);
+
+ /* Link capture bytecodes if not linked yet. */
+ lttng_enabler_link_bytecode(event_notifier->desc,
+ lttng_static_ctx, &event_notifier->capture_bytecode_runtime_head,
+ &event_notifier_enabler->capture_bytecode_head);
+
+ event_notifier->num_captures = event_notifier_enabler->num_captures;
}
return 0;
}
}
static
-int lttng_enabler_attach_bytecode(struct lttng_enabler *enabler,
+int lttng_enabler_attach_filter_bytecode(struct lttng_enabler *enabler,
struct lttng_kernel_filter_bytecode __user *bytecode)
{
- struct lttng_filter_bytecode_node *bytecode_node;
+ struct lttng_bytecode_node *bytecode_node;
uint32_t bytecode_len;
int ret;
ret = get_user(bytecode_len, &bytecode->len);
if (ret)
return ret;
- bytecode_node = kzalloc(sizeof(*bytecode_node) + bytecode_len,
+ bytecode_node = lttng_kvzalloc(sizeof(*bytecode_node) + bytecode_len,
GFP_KERNEL);
if (!bytecode_node)
return -ENOMEM;
if (ret)
goto error_free;
+ bytecode_node->type = LTTNG_BYTECODE_NODE_TYPE_FILTER;
bytecode_node->enabler = enabler;
/* Enforce length based on allocated size */
bytecode_node->bc.len = bytecode_len;
return 0;
error_free:
- kfree(bytecode_node);
+ lttng_kvfree(bytecode_node);
return ret;
}
-int lttng_event_enabler_attach_bytecode(struct lttng_event_enabler *event_enabler,
+int lttng_event_enabler_attach_filter_bytecode(struct lttng_event_enabler *event_enabler,
struct lttng_kernel_filter_bytecode __user *bytecode)
{
int ret;
- ret = lttng_enabler_attach_bytecode(
+ ret = lttng_enabler_attach_filter_bytecode(
lttng_event_enabler_as_enabler(event_enabler), bytecode);
if (ret)
goto error;
static
void lttng_enabler_destroy(struct lttng_enabler *enabler)
{
- struct lttng_filter_bytecode_node *filter_node, *tmp_filter_node;
+ struct lttng_bytecode_node *filter_node, *tmp_filter_node;
/* Destroy filter bytecode */
list_for_each_entry_safe(filter_node, tmp_filter_node,
&enabler->filter_bytecode_head, node) {
- kfree(filter_node);
+ lttng_kvfree(filter_node);
}
}
event_notifier_enabler->base.format_type = format_type;
INIT_LIST_HEAD(&event_notifier_enabler->base.filter_bytecode_head);
+ INIT_LIST_HEAD(&event_notifier_enabler->capture_bytecode_head);
+
+ event_notifier_enabler->error_counter_index = event_notifier_param->error_counter_index;
+ event_notifier_enabler->num_captures = 0;
memcpy(&event_notifier_enabler->base.event_param, &event_notifier_param->event,
sizeof(event_notifier_enabler->base.event_param));
return 0;
}
-int lttng_event_notifier_enabler_attach_bytecode(
+int lttng_event_notifier_enabler_attach_filter_bytecode(
struct lttng_event_notifier_enabler *event_notifier_enabler,
struct lttng_kernel_filter_bytecode __user *bytecode)
{
int ret;
- ret = lttng_enabler_attach_bytecode(
+ ret = lttng_enabler_attach_filter_bytecode(
lttng_event_notifier_enabler_as_enabler(event_notifier_enabler),
bytecode);
if (ret)
return ret;
}
+int lttng_event_notifier_enabler_attach_capture_bytecode(
+ struct lttng_event_notifier_enabler *event_notifier_enabler,
+ struct lttng_kernel_capture_bytecode __user *bytecode)
+{
+ struct lttng_bytecode_node *bytecode_node;
+ struct lttng_enabler *enabler =
+ lttng_event_notifier_enabler_as_enabler(event_notifier_enabler);
+ uint32_t bytecode_len;
+ int ret;
+
+ ret = get_user(bytecode_len, &bytecode->len);
+ if (ret)
+ return ret;
+
+ bytecode_node = lttng_kvzalloc(sizeof(*bytecode_node) + bytecode_len,
+ GFP_KERNEL);
+ if (!bytecode_node)
+ return -ENOMEM;
+
+ ret = copy_from_user(&bytecode_node->bc, bytecode,
+ sizeof(*bytecode) + bytecode_len);
+ if (ret)
+ goto error_free;
+
+ bytecode_node->type = LTTNG_BYTECODE_NODE_TYPE_CAPTURE;
+ bytecode_node->enabler = enabler;
+
+ /* Enforce length based on allocated size */
+ bytecode_node->bc.len = bytecode_len;
+ list_add_tail(&bytecode_node->node, &event_notifier_enabler->capture_bytecode_head);
+
+ event_notifier_enabler->num_captures++;
+
+ lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
+ goto end;
+
+error_free:
+ lttng_kvfree(bytecode_node);
+end:
+ return ret;
+}
+
int lttng_event_notifier_add_callsite(struct lttng_event_notifier *event_notifier,
struct lttng_kernel_event_callsite __user *callsite)
{
int enabled = 0, has_enablers_without_bytecode = 0;
switch (event->instrumentation) {
- case LTTNG_KERNEL_TRACEPOINT:
+ case LTTNG_KERNEL_TRACEPOINT: /* Fall-through */
case LTTNG_KERNEL_SYSCALL:
/* Enable events */
list_for_each_entry(enabler_ref,
}
}
break;
+
default:
/* Not handled with lazy sync. */
continue;
/* Enable filters */
list_for_each_entry(runtime,
- &event->bytecode_runtime_head, node)
- lttng_filter_sync_state(runtime);
+ &event->filter_bytecode_runtime_head, node)
+ lttng_bytecode_filter_sync_state(runtime);
}
}
int enabled = 0, has_enablers_without_bytecode = 0;
switch (event_notifier->instrumentation) {
- case LTTNG_KERNEL_TRACEPOINT:
+ case LTTNG_KERNEL_TRACEPOINT: /* Fall-through */
case LTTNG_KERNEL_SYSCALL:
/* Enable event_notifiers */
list_for_each_entry(enabler_ref,
}
}
break;
+
default:
/* Not handled with sync. */
continue;
/* Enable filters */
list_for_each_entry(runtime,
- &event_notifier->bytecode_runtime_head, node)
- lttng_filter_sync_state(runtime);
+ &event_notifier->filter_bytecode_runtime_head, node)
+ lttng_bytecode_filter_sync_state(runtime);
+
+ /* Enable captures */
+ list_for_each_entry(runtime,
+ &event_notifier->capture_bytecode_runtime_head, node)
+ lttng_bytecode_capture_sync_state(runtime);
+
+ WRITE_ONCE(event_notifier->eval_capture, !!event_notifier->num_captures);
}
}
}
EXPORT_SYMBOL_GPL(lttng_transport_unregister);
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+void lttng_counter_transport_register(struct lttng_counter_transport *transport)
+{
+ /*
+ * Make sure no page fault can be triggered by the module about to be
+ * registered. We deal with this here so we don't have to call
+ * vmalloc_sync_mappings() in each module's init.
+ */
+ wrapper_vmalloc_sync_mappings();
+
+ mutex_lock(&sessions_mutex);
+ list_add_tail(&transport->node, <tng_counter_transport_list);
+ mutex_unlock(&sessions_mutex);
+}
+EXPORT_SYMBOL_GPL(lttng_counter_transport_register);
+
+void lttng_counter_transport_unregister(struct lttng_counter_transport *transport)
+{
+ mutex_lock(&sessions_mutex);
+ list_del(&transport->node);
+ mutex_unlock(&sessions_mutex);
+}
+EXPORT_SYMBOL_GPL(lttng_counter_transport_unregister);
+
+#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
enum cpuhp_state lttng_hp_prepare;
enum cpuhp_state lttng_hp_online;
cpuhp_remove_multi_state(lttng_hp_prepare);
}
-#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
static int lttng_init_cpu_hotplug(void)
{
return 0;
static void lttng_exit_cpu_hotplug(void)
{
}
-#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
static int __init lttng_events_init(void)