#include <wrapper/types.h>
#include <lttng/kernel-version.h>
#include <lttng/events.h>
+#include <lttng/lttng-bytecode.h>
#include <lttng/tracer.h>
#include <lttng/event-notifier-notification.h>
#include <lttng/abi-old.h>
static LIST_HEAD(sessions);
static LIST_HEAD(event_notifier_groups);
static LIST_HEAD(lttng_transport_list);
+static LIST_HEAD(lttng_counter_transport_list);
/*
* Protect the sessions and metadata caches.
*/
wake_up_interruptible(&stream->read_wait);
}
+static
+struct lttng_counter_transport *lttng_counter_transport_find(const char *name)
+{
+ struct lttng_counter_transport *transport;
+
+ list_for_each_entry(transport, <tng_counter_transport_list, node) {
+ if (!strcmp(transport->name, name))
+ return transport;
+ }
+ return NULL;
+}
+
/*
* Supports event creation while tracing session is active.
* Needs to be called with sessions mutex held.
event->id = chan->free_event_id++;
event->instrumentation = itype;
event->evtype = LTTNG_TYPE_EVENT;
- INIT_LIST_HEAD(&event->bytecode_runtime_head);
+ INIT_LIST_HEAD(&event->filter_bytecode_runtime_head);
INIT_LIST_HEAD(&event->enablers_ref_head);
switch (itype) {
event_notifier->group = event_notifier_group;
event_notifier->user_token = token;
+ event_notifier->num_captures = 0;
event_notifier->filter = filter;
event_notifier->instrumentation = itype;
event_notifier->evtype = LTTNG_TYPE_EVENT;
event_notifier->send_notification = lttng_event_notifier_notification_send;
- INIT_LIST_HEAD(&event_notifier->bytecode_runtime_head);
+ INIT_LIST_HEAD(&event_notifier->filter_bytecode_runtime_head);
+ INIT_LIST_HEAD(&event_notifier->capture_bytecode_runtime_head);
INIT_LIST_HEAD(&event_notifier->enablers_ref_head);
switch (itype) {
*/
lttng_enabler_link_bytecode(event->desc,
lttng_static_ctx,
- &event->bytecode_runtime_head,
- lttng_event_enabler_as_enabler(event_enabler));
+ &event->filter_bytecode_runtime_head,
+ <tng_event_enabler_as_enabler(event_enabler)->filter_bytecode_head);
/* TODO: merge event context. */
}
* Link filter bytecodes if not linked yet.
*/
lttng_enabler_link_bytecode(event_notifier->desc,
- lttng_static_ctx, &event_notifier->bytecode_runtime_head,
- lttng_event_notifier_enabler_as_enabler(event_notifier_enabler));
+ lttng_static_ctx, &event_notifier->filter_bytecode_runtime_head,
+ <tng_event_notifier_enabler_as_enabler(event_notifier_enabler)->filter_bytecode_head);
+
+ /* Link capture bytecodes if not linked yet. */
+ lttng_enabler_link_bytecode(event_notifier->desc,
+ lttng_static_ctx, &event_notifier->capture_bytecode_runtime_head,
+ &event_notifier_enabler->capture_bytecode_head);
+
+ event_notifier->num_captures = event_notifier_enabler->num_captures;
}
return 0;
}
}
static
-int lttng_enabler_attach_bytecode(struct lttng_enabler *enabler,
+int lttng_enabler_attach_filter_bytecode(struct lttng_enabler *enabler,
struct lttng_kernel_filter_bytecode __user *bytecode)
{
- struct lttng_filter_bytecode_node *bytecode_node;
+ struct lttng_bytecode_node *bytecode_node;
uint32_t bytecode_len;
int ret;
ret = get_user(bytecode_len, &bytecode->len);
if (ret)
return ret;
- bytecode_node = kzalloc(sizeof(*bytecode_node) + bytecode_len,
+ bytecode_node = lttng_kvzalloc(sizeof(*bytecode_node) + bytecode_len,
GFP_KERNEL);
if (!bytecode_node)
return -ENOMEM;
if (ret)
goto error_free;
+ bytecode_node->type = LTTNG_BYTECODE_NODE_TYPE_FILTER;
bytecode_node->enabler = enabler;
/* Enforce length based on allocated size */
bytecode_node->bc.len = bytecode_len;
return 0;
error_free:
- kfree(bytecode_node);
+ lttng_kvfree(bytecode_node);
return ret;
}
-int lttng_event_enabler_attach_bytecode(struct lttng_event_enabler *event_enabler,
+int lttng_event_enabler_attach_filter_bytecode(struct lttng_event_enabler *event_enabler,
struct lttng_kernel_filter_bytecode __user *bytecode)
{
int ret;
- ret = lttng_enabler_attach_bytecode(
+ ret = lttng_enabler_attach_filter_bytecode(
lttng_event_enabler_as_enabler(event_enabler), bytecode);
if (ret)
goto error;
static
void lttng_enabler_destroy(struct lttng_enabler *enabler)
{
- struct lttng_filter_bytecode_node *filter_node, *tmp_filter_node;
+ struct lttng_bytecode_node *filter_node, *tmp_filter_node;
/* Destroy filter bytecode */
list_for_each_entry_safe(filter_node, tmp_filter_node,
&enabler->filter_bytecode_head, node) {
- kfree(filter_node);
+ lttng_kvfree(filter_node);
}
}
event_notifier_enabler->base.format_type = format_type;
INIT_LIST_HEAD(&event_notifier_enabler->base.filter_bytecode_head);
+ INIT_LIST_HEAD(&event_notifier_enabler->capture_bytecode_head);
+
+ event_notifier_enabler->num_captures = 0;
memcpy(&event_notifier_enabler->base.event_param, &event_notifier_param->event,
sizeof(event_notifier_enabler->base.event_param));
return 0;
}
-int lttng_event_notifier_enabler_attach_bytecode(
+int lttng_event_notifier_enabler_attach_filter_bytecode(
struct lttng_event_notifier_enabler *event_notifier_enabler,
struct lttng_kernel_filter_bytecode __user *bytecode)
{
int ret;
- ret = lttng_enabler_attach_bytecode(
+ ret = lttng_enabler_attach_filter_bytecode(
lttng_event_notifier_enabler_as_enabler(event_notifier_enabler),
bytecode);
if (ret)
return ret;
}
+int lttng_event_notifier_enabler_attach_capture_bytecode(
+ struct lttng_event_notifier_enabler *event_notifier_enabler,
+ struct lttng_kernel_capture_bytecode __user *bytecode)
+{
+ struct lttng_bytecode_node *bytecode_node;
+ struct lttng_enabler *enabler =
+ lttng_event_notifier_enabler_as_enabler(event_notifier_enabler);
+ uint32_t bytecode_len;
+ int ret;
+
+ ret = get_user(bytecode_len, &bytecode->len);
+ if (ret)
+ return ret;
+
+ bytecode_node = lttng_kvzalloc(sizeof(*bytecode_node) + bytecode_len,
+ GFP_KERNEL);
+ if (!bytecode_node)
+ return -ENOMEM;
+
+ ret = copy_from_user(&bytecode_node->bc, bytecode,
+ sizeof(*bytecode) + bytecode_len);
+ if (ret)
+ goto error_free;
+
+ bytecode_node->type = LTTNG_BYTECODE_NODE_TYPE_CAPTURE;
+ bytecode_node->enabler = enabler;
+
+ /* Enforce length based on allocated size */
+ bytecode_node->bc.len = bytecode_len;
+ list_add_tail(&bytecode_node->node, &event_notifier_enabler->capture_bytecode_head);
+
+ event_notifier_enabler->num_captures++;
+
+ lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
+ goto end;
+
+error_free:
+ lttng_kvfree(bytecode_node);
+end:
+ return ret;
+}
+
int lttng_event_notifier_add_callsite(struct lttng_event_notifier *event_notifier,
struct lttng_kernel_event_callsite __user *callsite)
{
/* Enable filters */
list_for_each_entry(runtime,
- &event->bytecode_runtime_head, node)
- lttng_filter_sync_state(runtime);
+ &event->filter_bytecode_runtime_head, node)
+ lttng_bytecode_filter_sync_state(runtime);
}
}
/* Enable filters */
list_for_each_entry(runtime,
- &event_notifier->bytecode_runtime_head, node)
- lttng_filter_sync_state(runtime);
+ &event_notifier->filter_bytecode_runtime_head, node)
+ lttng_bytecode_filter_sync_state(runtime);
+
+ /* Enable captures */
+ list_for_each_entry(runtime,
+ &event_notifier->capture_bytecode_runtime_head, node)
+ lttng_bytecode_capture_sync_state(runtime);
}
}
}
EXPORT_SYMBOL_GPL(lttng_transport_unregister);
+void lttng_counter_transport_register(struct lttng_counter_transport *transport)
+{
+ /*
+ * Make sure no page fault can be triggered by the module about to be
+ * registered. We deal with this here so we don't have to call
+ * vmalloc_sync_mappings() in each module's init.
+ */
+ wrapper_vmalloc_sync_mappings();
+
+ mutex_lock(&sessions_mutex);
+ list_add_tail(&transport->node, <tng_counter_transport_list);
+ mutex_unlock(&sessions_mutex);
+}
+EXPORT_SYMBOL_GPL(lttng_counter_transport_register);
+
+void lttng_counter_transport_unregister(struct lttng_counter_transport *transport)
+{
+ mutex_lock(&sessions_mutex);
+ list_del(&transport->node);
+ mutex_unlock(&sessions_mutex);
+}
+EXPORT_SYMBOL_GPL(lttng_counter_transport_unregister);
+
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
enum cpuhp_state lttng_hp_prepare;