X-Git-Url: http://git.lttng.org/?a=blobdiff_plain;f=src%2Flttng-events.c;h=9f683db18f2274badbaeb37288d4843fc7c246e0;hb=99f52fcce5865809584c1e022bca1409702ea292;hp=34e5fe9d98e657aaba9de8cac0f250614b6eaff0;hpb=2dfda770cc6781ec372ee8dadd8eb4f6ab37375a;p=lttng-modules.git diff --git a/src/lttng-events.c b/src/lttng-events.c index 34e5fe9d..9f683db1 100644 --- a/src/lttng-events.c +++ b/src/lttng-events.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #include #include @@ -37,10 +36,13 @@ #include #include #include +#include #include +#include #include #include #include +#include #include #include #include @@ -48,20 +50,27 @@ #define METADATA_CACHE_DEFAULT_SIZE 4096 static LIST_HEAD(sessions); +static LIST_HEAD(event_notifier_groups); static LIST_HEAD(lttng_transport_list); +static LIST_HEAD(lttng_counter_transport_list); /* * Protect the sessions and metadata caches. */ static DEFINE_MUTEX(sessions_mutex); static struct kmem_cache *event_cache; +static struct kmem_cache *event_notifier_cache; static void lttng_session_lazy_sync_event_enablers(struct lttng_session *session); static void lttng_session_sync_event_enablers(struct lttng_session *session); static void lttng_event_enabler_destroy(struct lttng_event_enabler *event_enabler); +static void lttng_event_notifier_enabler_destroy(struct lttng_event_notifier_enabler *event_notifier_enabler); +static void lttng_event_notifier_group_sync_enablers(struct lttng_event_notifier_group *event_notifier_group); static void _lttng_event_destroy(struct lttng_event *event); +static void _lttng_event_notifier_destroy(struct lttng_event_notifier *event_notifier); static void _lttng_channel_destroy(struct lttng_channel *chan); static int _lttng_event_unregister(struct lttng_event *event); +static int _lttng_event_notifier_unregister(struct lttng_event_notifier *event_notifier); static int _lttng_event_metadata_statedump(struct lttng_session *session, struct lttng_channel *chan, @@ -108,6 +117,17 @@ void lttng_unlock_sessions(void) mutex_unlock(&sessions_mutex); } +static struct lttng_transport *lttng_transport_find(const char *name) +{ + struct lttng_transport *transport; + + list_for_each_entry(transport, <tng_transport_list, node) { + if (!strcmp(transport->name, name)) + return transport; + } + return NULL; +} + /* * Called with sessions lock held. */ @@ -178,6 +198,126 @@ err: return NULL; } +static +struct lttng_counter_transport *lttng_counter_transport_find(const char *name) +{ + struct lttng_counter_transport *transport; + + list_for_each_entry(transport, <tng_counter_transport_list, node) { + if (!strcmp(transport->name, name)) + return transport; + } + return NULL; +} + +struct lttng_counter *lttng_kernel_counter_create( + const char *counter_transport_name, + size_t number_dimensions, const size_t *dimensions_sizes) +{ + struct lttng_counter *counter = NULL; + struct lttng_counter_transport *counter_transport = NULL; + + counter_transport = lttng_counter_transport_find(counter_transport_name); + if (!counter_transport) { + printk(KERN_WARNING "LTTng: counter transport %s not found.\n", + counter_transport_name); + goto notransport; + } + if (!try_module_get(counter_transport->owner)) { + printk(KERN_WARNING "LTTng: Can't lock counter transport module.\n"); + goto notransport; + } + + counter = lttng_kvzalloc(sizeof(struct lttng_counter), GFP_KERNEL); + if (!counter) + goto nomem; + + /* Create event notifier error counter. */ + counter->ops = &counter_transport->ops; + counter->transport = counter_transport; + + counter->counter = counter->ops->counter_create( + number_dimensions, dimensions_sizes, 0); + if (!counter->counter) { + goto create_error; + } + + return counter; + +create_error: + lttng_kvfree(counter); +nomem: + if (counter_transport) + module_put(counter_transport->owner); +notransport: + return NULL; +} + +struct lttng_event_notifier_group *lttng_event_notifier_group_create(void) +{ + struct lttng_transport *transport = NULL; + struct lttng_event_notifier_group *event_notifier_group; + const char *transport_name = "relay-event-notifier"; + size_t subbuf_size = 4096; //TODO + size_t num_subbuf = 16; //TODO + unsigned int switch_timer_interval = 0; + unsigned int read_timer_interval = 0; + int i; + + mutex_lock(&sessions_mutex); + + transport = lttng_transport_find(transport_name); + if (!transport) { + printk(KERN_WARNING "LTTng: transport %s not found\n", + transport_name); + goto notransport; + } + if (!try_module_get(transport->owner)) { + printk(KERN_WARNING "LTTng: Can't lock transport %s module.\n", + transport_name); + goto notransport; + } + + event_notifier_group = lttng_kvzalloc(sizeof(struct lttng_event_notifier_group), + GFP_KERNEL); + if (!event_notifier_group) + goto nomem; + + /* + * Initialize the ring buffer used to store event notifier + * notifications. + */ + event_notifier_group->ops = &transport->ops; + event_notifier_group->chan = transport->ops.channel_create( + transport_name, event_notifier_group, NULL, + subbuf_size, num_subbuf, switch_timer_interval, + read_timer_interval); + if (!event_notifier_group->chan) + goto create_error; + + event_notifier_group->transport = transport; + + INIT_LIST_HEAD(&event_notifier_group->enablers_head); + INIT_LIST_HEAD(&event_notifier_group->event_notifiers_head); + for (i = 0; i < LTTNG_EVENT_NOTIFIER_HT_SIZE; i++) + INIT_HLIST_HEAD(&event_notifier_group->event_notifiers_ht.table[i]); + + list_add(&event_notifier_group->node, &event_notifier_groups); + + mutex_unlock(&sessions_mutex); + + return event_notifier_group; + +create_error: + lttng_kvfree(event_notifier_group); +nomem: + if (transport) + module_put(transport->owner); +notransport: + mutex_unlock(&sessions_mutex); + return NULL; +} + void metadata_cache_destroy(struct kref *kref) { struct lttng_metadata_cache *cache = @@ -197,7 +337,7 @@ void lttng_session_destroy(struct lttng_session *session) mutex_lock(&sessions_mutex); WRITE_ONCE(session->active, 0); list_for_each_entry(chan, &session->chan, list) { - ret = lttng_syscalls_unregister(chan); + ret = lttng_syscalls_unregister_event(chan); WARN_ON(ret); } list_for_each_entry(event, &session->events, list) { @@ -206,7 +346,7 @@ void lttng_session_destroy(struct lttng_session *session) } synchronize_trace(); /* Wait for in-flight events to complete */ list_for_each_entry(chan, &session->chan, list) { - ret = lttng_syscalls_destroy(chan); + ret = lttng_syscalls_destroy_event(chan); WARN_ON(ret); } list_for_each_entry_safe(event_enabler, tmp_event_enabler, @@ -234,6 +374,58 @@ void lttng_session_destroy(struct lttng_session *session) lttng_kvfree(session); } +void lttng_event_notifier_group_destroy( + struct lttng_event_notifier_group *event_notifier_group) +{ + struct lttng_event_notifier_enabler *event_notifier_enabler, *tmp_event_notifier_enabler; + struct lttng_event_notifier *event_notifier, *tmpevent_notifier; + int ret; + + if (!event_notifier_group) + return; + + mutex_lock(&sessions_mutex); + + ret = lttng_syscalls_unregister_event_notifier(event_notifier_group); + WARN_ON(ret); + + list_for_each_entry_safe(event_notifier, tmpevent_notifier, + &event_notifier_group->event_notifiers_head, list) { + ret = _lttng_event_notifier_unregister(event_notifier); + WARN_ON(ret); + } + + /* Wait for in-flight event notifier to complete */ + synchronize_trace(); + + irq_work_sync(&event_notifier_group->wakeup_pending); + + kfree(event_notifier_group->sc_filter); + + list_for_each_entry_safe(event_notifier_enabler, tmp_event_notifier_enabler, + &event_notifier_group->enablers_head, node) + lttng_event_notifier_enabler_destroy(event_notifier_enabler); + + list_for_each_entry_safe(event_notifier, tmpevent_notifier, + &event_notifier_group->event_notifiers_head, list) + _lttng_event_notifier_destroy(event_notifier); + + if (event_notifier_group->error_counter) { + struct lttng_counter *error_counter = event_notifier_group->error_counter; + error_counter->ops->counter_destroy(error_counter->counter); + module_put(error_counter->transport->owner); + lttng_kvfree(error_counter); + event_notifier_group->error_counter = NULL; + } + + event_notifier_group->ops->channel_destroy(event_notifier_group->chan); + module_put(event_notifier_group->transport->owner); + list_del(&event_notifier_group->node); + + mutex_unlock(&sessions_mutex); + lttng_kvfree(event_notifier_group); +} + int lttng_session_statedump(struct lttng_session *session) { int ret; @@ -479,15 +671,64 @@ end: return ret; } -static struct lttng_transport *lttng_transport_find(const char *name) +int lttng_event_notifier_enable(struct lttng_event_notifier *event_notifier) { - struct lttng_transport *transport; + int ret = 0; - list_for_each_entry(transport, <tng_transport_list, node) { - if (!strcmp(transport->name, name)) - return transport; + mutex_lock(&sessions_mutex); + if (event_notifier->enabled) { + ret = -EEXIST; + goto end; } - return NULL; + switch (event_notifier->instrumentation) { + case LTTNG_KERNEL_TRACEPOINT: + case LTTNG_KERNEL_SYSCALL: + ret = -EINVAL; + break; + case LTTNG_KERNEL_KPROBE: + case LTTNG_KERNEL_UPROBE: + WRITE_ONCE(event_notifier->enabled, 1); + break; + case LTTNG_KERNEL_FUNCTION: + case LTTNG_KERNEL_NOOP: + case LTTNG_KERNEL_KRETPROBE: + default: + WARN_ON_ONCE(1); + ret = -EINVAL; + } +end: + mutex_unlock(&sessions_mutex); + return ret; +} + +int lttng_event_notifier_disable(struct lttng_event_notifier *event_notifier) +{ + int ret = 0; + + mutex_lock(&sessions_mutex); + if (!event_notifier->enabled) { + ret = -EEXIST; + goto end; + } + switch (event_notifier->instrumentation) { + case LTTNG_KERNEL_TRACEPOINT: + case LTTNG_KERNEL_SYSCALL: + ret = -EINVAL; + break; + case LTTNG_KERNEL_KPROBE: + case LTTNG_KERNEL_UPROBE: + WRITE_ONCE(event_notifier->enabled, 0); + break; + case LTTNG_KERNEL_FUNCTION: + case LTTNG_KERNEL_NOOP: + case LTTNG_KERNEL_KRETPROBE: + default: + WARN_ON_ONCE(1); + ret = -EINVAL; + } +end: + mutex_unlock(&sessions_mutex); + return ret; } struct lttng_channel *lttng_channel_create(struct lttng_session *session, @@ -582,6 +823,7 @@ void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream) wake_up_interruptible(&stream->read_wait); } + /* * Supports event creation while tracing session is active. * Needs to be called with sessions mutex held. @@ -596,8 +838,6 @@ struct lttng_event *_lttng_event_create(struct lttng_channel *chan, struct lttng_event *event; const char *event_name; struct hlist_head *head; - size_t name_len; - uint32_t hash; int ret; if (chan->free_event_id == -1U) { @@ -622,9 +862,9 @@ struct lttng_event *_lttng_event_create(struct lttng_channel *chan, ret = -EINVAL; goto type_error; } - name_len = strlen(event_name); - hash = jhash(event_name, name_len, 0); - head = &session->events_ht.table[hash & (LTTNG_EVENT_HT_SIZE - 1)]; + + head = utils_borrow_hash_table_bucket(session->events_ht.table, + LTTNG_EVENT_HT_SIZE, event_name); lttng_hlist_for_each_entry(event, head, hlist) { WARN_ON_ONCE(!event->desc); if (!strncmp(event->desc->name, event_name, @@ -645,7 +885,7 @@ struct lttng_event *_lttng_event_create(struct lttng_channel *chan, event->id = chan->free_event_id++; event->instrumentation = itype; event->evtype = LTTNG_TYPE_EVENT; - INIT_LIST_HEAD(&event->bytecode_runtime_head); + INIT_LIST_HEAD(&event->filter_bytecode_runtime_head); INIT_LIST_HEAD(&event->enablers_ref_head); switch (itype) { @@ -653,7 +893,7 @@ struct lttng_event *_lttng_event_create(struct lttng_channel *chan, /* Event will be enabled by enabler sync. */ event->enabled = 0; event->registered = 0; - event->desc = lttng_event_get(event_name); + event->desc = lttng_event_desc_get(event_name); if (!event->desc) { ret = -ENOENT; goto register_error; @@ -673,7 +913,7 @@ struct lttng_event *_lttng_event_create(struct lttng_channel *chan, * registration. */ smp_wmb(); - ret = lttng_kprobes_register(event_name, + ret = lttng_kprobes_register_event(event_name, event_param->u.kprobe.symbol_name, event_param->u.kprobe.offset, event_param->u.kprobe.addr, @@ -789,7 +1029,7 @@ struct lttng_event *_lttng_event_create(struct lttng_channel *chan, */ smp_wmb(); - ret = lttng_uprobes_register(event_param->name, + ret = lttng_uprobes_register_event(event_param->name, event_param->u.uprobe.fd, event); if (ret) @@ -823,6 +1063,235 @@ full: return ERR_PTR(ret); } +struct lttng_event_notifier *_lttng_event_notifier_create( + const struct lttng_event_desc *event_desc, + uint64_t token, uint64_t error_counter_index, + struct lttng_event_notifier_group *event_notifier_group, + struct lttng_kernel_event_notifier *event_notifier_param, + void *filter, enum lttng_kernel_instrumentation itype) +{ + struct lttng_event_notifier *event_notifier; + const char *event_name; + struct hlist_head *head; + int ret; + + switch (itype) { + case LTTNG_KERNEL_TRACEPOINT: + event_name = event_desc->name; + break; + case LTTNG_KERNEL_KPROBE: + case LTTNG_KERNEL_UPROBE: + case LTTNG_KERNEL_SYSCALL: + event_name = event_notifier_param->event.name; + break; + case LTTNG_KERNEL_KRETPROBE: + case LTTNG_KERNEL_FUNCTION: + case LTTNG_KERNEL_NOOP: + default: + WARN_ON_ONCE(1); + ret = -EINVAL; + goto type_error; + } + + head = utils_borrow_hash_table_bucket(event_notifier_group->event_notifiers_ht.table, + LTTNG_EVENT_NOTIFIER_HT_SIZE, event_name); + lttng_hlist_for_each_entry(event_notifier, head, hlist) { + WARN_ON_ONCE(!event_notifier->desc); + if (!strncmp(event_notifier->desc->name, event_name, + LTTNG_KERNEL_SYM_NAME_LEN - 1) + && event_notifier_group == event_notifier->group + && token == event_notifier->user_token) { + ret = -EEXIST; + goto exist; + } + } + + event_notifier = kmem_cache_zalloc(event_notifier_cache, GFP_KERNEL); + if (!event_notifier) { + ret = -ENOMEM; + goto cache_error; + } + + event_notifier->group = event_notifier_group; + event_notifier->user_token = token; + event_notifier->error_counter_index = error_counter_index; + event_notifier->num_captures = 0; + event_notifier->filter = filter; + event_notifier->instrumentation = itype; + event_notifier->evtype = LTTNG_TYPE_EVENT; + event_notifier->send_notification = lttng_event_notifier_notification_send; + INIT_LIST_HEAD(&event_notifier->filter_bytecode_runtime_head); + INIT_LIST_HEAD(&event_notifier->capture_bytecode_runtime_head); + INIT_LIST_HEAD(&event_notifier->enablers_ref_head); + + switch (itype) { + case LTTNG_KERNEL_TRACEPOINT: + /* Event will be enabled by enabler sync. */ + event_notifier->enabled = 0; + event_notifier->registered = 0; + event_notifier->desc = lttng_event_desc_get(event_name); + if (!event_notifier->desc) { + ret = -ENOENT; + goto register_error; + } + /* Populate lttng_event_notifier structure before event registration. */ + smp_wmb(); + break; + case LTTNG_KERNEL_KPROBE: + /* + * Needs to be explicitly enabled after creation, since + * we may want to apply filters. + */ + event_notifier->enabled = 0; + event_notifier->registered = 1; + /* + * Populate lttng_event_notifier structure before event + * registration. + */ + smp_wmb(); + ret = lttng_kprobes_register_event_notifier( + event_notifier_param->event.u.kprobe.symbol_name, + event_notifier_param->event.u.kprobe.offset, + event_notifier_param->event.u.kprobe.addr, + event_notifier); + if (ret) { + ret = -EINVAL; + goto register_error; + } + ret = try_module_get(event_notifier->desc->owner); + WARN_ON_ONCE(!ret); + break; + case LTTNG_KERNEL_NOOP: + case LTTNG_KERNEL_SYSCALL: + /* + * Needs to be explicitly enabled after creation, since + * we may want to apply filters. + */ + event_notifier->enabled = 0; + event_notifier->registered = 0; + event_notifier->desc = event_desc; + switch (event_notifier_param->event.u.syscall.entryexit) { + case LTTNG_KERNEL_SYSCALL_ENTRYEXIT: + ret = -EINVAL; + goto register_error; + case LTTNG_KERNEL_SYSCALL_ENTRY: + event_notifier->u.syscall.entryexit = LTTNG_SYSCALL_ENTRY; + break; + case LTTNG_KERNEL_SYSCALL_EXIT: + event_notifier->u.syscall.entryexit = LTTNG_SYSCALL_EXIT; + break; + } + switch (event_notifier_param->event.u.syscall.abi) { + case LTTNG_KERNEL_SYSCALL_ABI_ALL: + ret = -EINVAL; + goto register_error; + case LTTNG_KERNEL_SYSCALL_ABI_NATIVE: + event_notifier->u.syscall.abi = LTTNG_SYSCALL_ABI_NATIVE; + break; + case LTTNG_KERNEL_SYSCALL_ABI_COMPAT: + event_notifier->u.syscall.abi = LTTNG_SYSCALL_ABI_COMPAT; + break; + } + + if (!event_notifier->desc) { + ret = -EINVAL; + goto register_error; + } + break; + case LTTNG_KERNEL_UPROBE: + /* + * Needs to be explicitly enabled after creation, since + * we may want to apply filters. + */ + event_notifier->enabled = 0; + event_notifier->registered = 1; + + /* + * Populate lttng_event_notifier structure before + * event_notifier registration. + */ + smp_wmb(); + + ret = lttng_uprobes_register_event_notifier( + event_notifier_param->event.name, + event_notifier_param->event.u.uprobe.fd, + event_notifier); + if (ret) + goto register_error; + ret = try_module_get(event_notifier->desc->owner); + WARN_ON_ONCE(!ret); + break; + case LTTNG_KERNEL_KRETPROBE: + case LTTNG_KERNEL_FUNCTION: + default: + WARN_ON_ONCE(1); + ret = -EINVAL; + goto register_error; + } + + list_add(&event_notifier->list, &event_notifier_group->event_notifiers_head); + hlist_add_head(&event_notifier->hlist, head); + + /* + * Clear the error counter bucket. The sessiond keeps track of which + * bucket is currently in use. We trust it. + */ + if (event_notifier_group->error_counter) { + size_t dimension_index[1]; + + /* + * Check that the index is within the boundary of the counter. + */ + if (event_notifier->error_counter_index >= event_notifier_group->error_counter_len) { + printk(KERN_INFO "LTTng: event_notifier: Error counter index out-of-bound: counter-len=%zu, index=%llu\n", + event_notifier_group->error_counter_len, event_notifier->error_counter_index); + ret = -EINVAL; + goto register_error; + } + + dimension_index[0] = event_notifier->error_counter_index; + ret = event_notifier_group->error_counter->ops->counter_clear( + event_notifier_group->error_counter->counter, + dimension_index); + if (ret) { + printk(KERN_INFO "LTTng: event_notifier: Unable to clear error counter bucket %llu\n", + event_notifier->error_counter_index); + goto register_error; + } + } + + return event_notifier; + +register_error: + kmem_cache_free(event_notifier_cache, event_notifier); +cache_error: +exist: +type_error: + return ERR_PTR(ret); +} + +int lttng_kernel_counter_read(struct lttng_counter *counter, + const size_t *dim_indexes, int32_t cpu, + int64_t *val, bool *overflow, bool *underflow) +{ + return counter->ops->counter_read(counter->counter, dim_indexes, + cpu, val, overflow, underflow); +} + +int lttng_kernel_counter_aggregate(struct lttng_counter *counter, + const size_t *dim_indexes, int64_t *val, + bool *overflow, bool *underflow) +{ + return counter->ops->counter_aggregate(counter->counter, dim_indexes, + val, overflow, underflow); +} + +int lttng_kernel_counter_clear(struct lttng_counter *counter, + const size_t *dim_indexes) +{ + return counter->ops->counter_clear(counter->counter, dim_indexes); +} + struct lttng_event *lttng_event_create(struct lttng_channel *chan, struct lttng_kernel_event *event_param, void *filter, @@ -838,6 +1307,23 @@ struct lttng_event *lttng_event_create(struct lttng_channel *chan, return event; } +struct lttng_event_notifier *lttng_event_notifier_create( + const struct lttng_event_desc *event_desc, + uint64_t id, uint64_t error_counter_index, + struct lttng_event_notifier_group *event_notifier_group, + struct lttng_kernel_event_notifier *event_notifier_param, + void *filter, enum lttng_kernel_instrumentation itype) +{ + struct lttng_event_notifier *event_notifier; + + mutex_lock(&sessions_mutex); + event_notifier = _lttng_event_notifier_create(event_desc, id, + error_counter_index, event_notifier_group, + event_notifier_param, filter, itype); + mutex_unlock(&sessions_mutex); + return event_notifier; +} + /* Only used for tracepoints for now. */ static void register_event(struct lttng_event *event) @@ -856,7 +1342,7 @@ void register_event(struct lttng_event *event) event); break; case LTTNG_KERNEL_SYSCALL: - ret = lttng_syscall_filter_enable(event->chan, event); + ret = lttng_syscall_filter_enable_event(event->chan, event); break; case LTTNG_KERNEL_KPROBE: case LTTNG_KERNEL_UPROBE: @@ -891,7 +1377,7 @@ int _lttng_event_unregister(struct lttng_event *event) event); break; case LTTNG_KERNEL_KPROBE: - lttng_kprobes_unregister(event); + lttng_kprobes_unregister_event(event); ret = 0; break; case LTTNG_KERNEL_KRETPROBE: @@ -899,13 +1385,13 @@ int _lttng_event_unregister(struct lttng_event *event) ret = 0; break; case LTTNG_KERNEL_SYSCALL: - ret = lttng_syscall_filter_disable(event->chan, event); + ret = lttng_syscall_filter_disable_event(event->chan, event); break; case LTTNG_KERNEL_NOOP: ret = 0; break; case LTTNG_KERNEL_UPROBE: - lttng_uprobes_unregister(event); + lttng_uprobes_unregister_event(event); ret = 0; break; case LTTNG_KERNEL_FUNCTION: /* Fall-through */ @@ -917,6 +1403,79 @@ int _lttng_event_unregister(struct lttng_event *event) return ret; } +/* Only used for tracepoints for now. */ +static +void register_event_notifier(struct lttng_event_notifier *event_notifier) +{ + const struct lttng_event_desc *desc; + int ret = -EINVAL; + + if (event_notifier->registered) + return; + + desc = event_notifier->desc; + switch (event_notifier->instrumentation) { + case LTTNG_KERNEL_TRACEPOINT: + ret = lttng_wrapper_tracepoint_probe_register(desc->kname, + desc->event_notifier_callback, + event_notifier); + break; + case LTTNG_KERNEL_SYSCALL: + ret = lttng_syscall_filter_enable_event_notifier(event_notifier); + break; + case LTTNG_KERNEL_KPROBE: + case LTTNG_KERNEL_UPROBE: + ret = 0; + break; + case LTTNG_KERNEL_KRETPROBE: + case LTTNG_KERNEL_FUNCTION: + case LTTNG_KERNEL_NOOP: + default: + WARN_ON_ONCE(1); + } + if (!ret) + event_notifier->registered = 1; +} + +static +int _lttng_event_notifier_unregister( + struct lttng_event_notifier *event_notifier) +{ + const struct lttng_event_desc *desc; + int ret = -EINVAL; + + if (!event_notifier->registered) + return 0; + + desc = event_notifier->desc; + switch (event_notifier->instrumentation) { + case LTTNG_KERNEL_TRACEPOINT: + ret = lttng_wrapper_tracepoint_probe_unregister(event_notifier->desc->kname, + event_notifier->desc->event_notifier_callback, + event_notifier); + break; + case LTTNG_KERNEL_KPROBE: + lttng_kprobes_unregister_event_notifier(event_notifier); + ret = 0; + break; + case LTTNG_KERNEL_UPROBE: + lttng_uprobes_unregister_event_notifier(event_notifier); + ret = 0; + break; + case LTTNG_KERNEL_SYSCALL: + ret = lttng_syscall_filter_disable_event_notifier(event_notifier); + break; + case LTTNG_KERNEL_KRETPROBE: + case LTTNG_KERNEL_FUNCTION: + case LTTNG_KERNEL_NOOP: + default: + WARN_ON_ONCE(1); + } + if (!ret) + event_notifier->registered = 0; + return ret; +} + /* * Only used internally at session destruction. */ @@ -925,11 +1484,11 @@ void _lttng_event_destroy(struct lttng_event *event) { switch (event->instrumentation) { case LTTNG_KERNEL_TRACEPOINT: - lttng_event_put(event->desc); + lttng_event_desc_put(event->desc); break; case LTTNG_KERNEL_KPROBE: module_put(event->desc->owner); - lttng_kprobes_destroy_private(event); + lttng_kprobes_destroy_event_private(event); break; case LTTNG_KERNEL_KRETPROBE: module_put(event->desc->owner); @@ -940,7 +1499,7 @@ void _lttng_event_destroy(struct lttng_event *event) break; case LTTNG_KERNEL_UPROBE: module_put(event->desc->owner); - lttng_uprobes_destroy_private(event); + lttng_uprobes_destroy_event_private(event); break; case LTTNG_KERNEL_FUNCTION: /* Fall-through */ default: @@ -951,6 +1510,36 @@ void _lttng_event_destroy(struct lttng_event *event) kmem_cache_free(event_cache, event); } +/* + * Only used internally at session destruction. + */ +static +void _lttng_event_notifier_destroy(struct lttng_event_notifier *event_notifier) +{ + switch (event_notifier->instrumentation) { + case LTTNG_KERNEL_TRACEPOINT: + lttng_event_desc_put(event_notifier->desc); + break; + case LTTNG_KERNEL_KPROBE: + module_put(event_notifier->desc->owner); + lttng_kprobes_destroy_event_notifier_private(event_notifier); + break; + case LTTNG_KERNEL_NOOP: + case LTTNG_KERNEL_SYSCALL: + break; + case LTTNG_KERNEL_UPROBE: + module_put(event_notifier->desc->owner); + lttng_uprobes_destroy_event_notifier_private(event_notifier); + break; + case LTTNG_KERNEL_KRETPROBE: + case LTTNG_KERNEL_FUNCTION: + default: + WARN_ON_ONCE(1); + } + list_del(&event_notifier->list); + kmem_cache_free(event_notifier_cache, event_notifier); +} + struct lttng_id_tracker *get_tracker(struct lttng_session *session, enum tracker_type tracker_type) { @@ -1226,7 +1815,6 @@ int lttng_match_enabler_name(const char *desc_name, return 1; } -static int lttng_desc_match_enabler(const struct lttng_event_desc *desc, struct lttng_enabler *enabler) { @@ -1292,7 +1880,7 @@ int lttng_desc_match_enabler(const struct lttng_event_desc *desc, return -EINVAL; } switch (enabler->event_param.u.syscall.match) { - case LTTNG_SYSCALL_MATCH_NAME: + case LTTNG_KERNEL_SYSCALL_MATCH_NAME: switch (enabler->format_type) { case LTTNG_ENABLER_FORMAT_STAR_GLOB: return lttng_match_enabler_star_glob(desc_name, enabler_name); @@ -1302,7 +1890,7 @@ int lttng_desc_match_enabler(const struct lttng_event_desc *desc, return -EINVAL; } break; - case LTTNG_SYSCALL_MATCH_NR: + case LTTNG_KERNEL_SYSCALL_MATCH_NR: return -EINVAL; /* Not implemented. */ default: return -EINVAL; @@ -1330,6 +1918,23 @@ int lttng_event_enabler_match_event(struct lttng_event_enabler *event_enabler, return 0; } +static +int lttng_event_notifier_enabler_match_event_notifier(struct lttng_event_notifier_enabler *event_notifier_enabler, + struct lttng_event_notifier *event_notifier) +{ + struct lttng_enabler *base_enabler = lttng_event_notifier_enabler_as_enabler( + event_notifier_enabler); + + if (base_enabler->event_param.instrumentation != event_notifier->instrumentation) + return 0; + if (lttng_desc_match_enabler(event_notifier->desc, base_enabler) + && event_notifier->group == event_notifier_enabler->group + && event_notifier->user_token == event_notifier_enabler->base.user_token) + return 1; + else + return 0; +} + static struct lttng_enabler_ref *lttng_enabler_ref( struct list_head *enablers_ref_list, @@ -1341,13 +1946,68 @@ struct lttng_enabler_ref *lttng_enabler_ref( if (enabler_ref->ref == enabler) return enabler_ref; } - return NULL; + return NULL; +} + +static +void lttng_create_tracepoint_event_if_missing(struct lttng_event_enabler *event_enabler) +{ + struct lttng_session *session = event_enabler->chan->session; + struct lttng_probe_desc *probe_desc; + const struct lttng_event_desc *desc; + int i; + struct list_head *probe_list; + + probe_list = lttng_get_probe_list_head(); + /* + * For each probe event, if we find that a probe event matches + * our enabler, create an associated lttng_event if not + * already present. + */ + list_for_each_entry(probe_desc, probe_list, head) { + for (i = 0; i < probe_desc->nr_events; i++) { + int found = 0; + struct hlist_head *head; + struct lttng_event *event; + + desc = probe_desc->event_desc[i]; + if (!lttng_desc_match_enabler(desc, + lttng_event_enabler_as_enabler(event_enabler))) + continue; + + /* + * Check if already created. + */ + head = utils_borrow_hash_table_bucket( + session->events_ht.table, LTTNG_EVENT_HT_SIZE, + desc->name); + lttng_hlist_for_each_entry(event, head, hlist) { + if (event->desc == desc + && event->chan == event_enabler->chan) + found = 1; + } + if (found) + continue; + + /* + * We need to create an event for this + * event probe. + */ + event = _lttng_event_create(event_enabler->chan, + NULL, NULL, desc, + LTTNG_KERNEL_TRACEPOINT); + if (!event) { + printk(KERN_INFO "LTTng: Unable to create event %s\n", + probe_desc->event_desc[i]->name); + } + } + } } static -void lttng_create_tracepoint_event_if_missing(struct lttng_event_enabler *event_enabler) +void lttng_create_tracepoint_event_notifier_if_missing(struct lttng_event_notifier_enabler *event_notifier_enabler) { - struct lttng_session *session = event_enabler->chan->session; + struct lttng_event_notifier_group *event_notifier_group = event_notifier_enabler->group; struct lttng_probe_desc *probe_desc; const struct lttng_event_desc *desc; int i; @@ -1356,47 +2016,44 @@ void lttng_create_tracepoint_event_if_missing(struct lttng_event_enabler *event_ probe_list = lttng_get_probe_list_head(); /* * For each probe event, if we find that a probe event matches - * our enabler, create an associated lttng_event if not + * our enabler, create an associated lttng_event_notifier if not * already present. */ list_for_each_entry(probe_desc, probe_list, head) { for (i = 0; i < probe_desc->nr_events; i++) { int found = 0; struct hlist_head *head; - const char *event_name; - size_t name_len; - uint32_t hash; - struct lttng_event *event; + struct lttng_event_notifier *event_notifier; desc = probe_desc->event_desc[i]; if (!lttng_desc_match_enabler(desc, - lttng_event_enabler_as_enabler(event_enabler))) + lttng_event_notifier_enabler_as_enabler(event_notifier_enabler))) continue; - event_name = desc->name; - name_len = strlen(event_name); /* * Check if already created. */ - hash = jhash(event_name, name_len, 0); - head = &session->events_ht.table[hash & (LTTNG_EVENT_HT_SIZE - 1)]; - lttng_hlist_for_each_entry(event, head, hlist) { - if (event->desc == desc - && event->chan == event_enabler->chan) + head = utils_borrow_hash_table_bucket( + event_notifier_group->event_notifiers_ht.table, + LTTNG_EVENT_NOTIFIER_HT_SIZE, desc->name); + lttng_hlist_for_each_entry(event_notifier, head, hlist) { + if (event_notifier->desc == desc + && event_notifier->user_token == event_notifier_enabler->base.user_token) found = 1; } if (found) continue; /* - * We need to create an event for this - * event probe. + * We need to create a event_notifier for this event probe. */ - event = _lttng_event_create(event_enabler->chan, - NULL, NULL, desc, - LTTNG_KERNEL_TRACEPOINT); - if (!event) { - printk(KERN_INFO "LTTng: Unable to create event %s\n", + event_notifier = _lttng_event_notifier_create(desc, + event_notifier_enabler->base.user_token, + event_notifier_enabler->error_counter_index, + event_notifier_group, NULL, NULL, + LTTNG_KERNEL_TRACEPOINT); + if (IS_ERR(event_notifier)) { + printk(KERN_INFO "Unable to create event_notifier %s\n", probe_desc->event_desc[i]->name); } } @@ -1408,7 +2065,18 @@ void lttng_create_syscall_event_if_missing(struct lttng_event_enabler *event_ena { int ret; - ret = lttng_syscalls_register(event_enabler->chan, NULL); + ret = lttng_syscalls_register_event(event_enabler->chan, NULL); + WARN_ON_ONCE(ret); +} + +static +void lttng_create_syscall_event_notifier_if_missing(struct lttng_event_notifier_enabler *event_notifier_enabler) +{ + int ret; + + ret = lttng_syscalls_register_event_notifier(event_notifier_enabler, NULL); + WARN_ON_ONCE(ret); + ret = lttng_syscals_create_matching_event_notifiers(event_notifier_enabler, NULL); WARN_ON_ONCE(ret); } @@ -1449,7 +2117,7 @@ int lttng_event_enabler_ref_events(struct lttng_event_enabler *event_enabler) if (base_enabler->event_param.instrumentation == LTTNG_KERNEL_SYSCALL && base_enabler->event_param.u.syscall.entryexit == LTTNG_KERNEL_SYSCALL_ENTRYEXIT && base_enabler->event_param.u.syscall.abi == LTTNG_KERNEL_SYSCALL_ABI_ALL && - base_enabler->event_param.u.syscall.match == LTTNG_SYSCALL_MATCH_NAME && + base_enabler->event_param.u.syscall.match == LTTNG_KERNEL_SYSCALL_MATCH_NAME && !strcmp(base_enabler->event_param.name, "*")) { if (base_enabler->enabled) WRITE_ONCE(chan->syscall_all, 1); @@ -1486,14 +2154,106 @@ int lttng_event_enabler_ref_events(struct lttng_event_enabler *event_enabler) */ lttng_enabler_link_bytecode(event->desc, lttng_static_ctx, - &event->bytecode_runtime_head, - lttng_event_enabler_as_enabler(event_enabler)); + &event->filter_bytecode_runtime_head, + <tng_event_enabler_as_enabler(event_enabler)->filter_bytecode_head); /* TODO: merge event context. */ } return 0; } +/* + * Create struct lttng_event_notifier if it is missing and present in the list of + * tracepoint probes. + * Should be called with sessions mutex held. + */ +static +void lttng_create_event_notifier_if_missing(struct lttng_event_notifier_enabler *event_notifier_enabler) +{ + switch (event_notifier_enabler->base.event_param.instrumentation) { + case LTTNG_KERNEL_TRACEPOINT: + lttng_create_tracepoint_event_notifier_if_missing(event_notifier_enabler); + break; + case LTTNG_KERNEL_SYSCALL: + lttng_create_syscall_event_notifier_if_missing(event_notifier_enabler); + break; + default: + WARN_ON_ONCE(1); + break; + } +} + +/* + * Create event_notifiers associated with a event_notifier enabler (if not already present). + */ +static +int lttng_event_notifier_enabler_ref_event_notifiers( + struct lttng_event_notifier_enabler *event_notifier_enabler) +{ + struct lttng_event_notifier_group *event_notifier_group = event_notifier_enabler->group; + struct lttng_enabler *base_enabler = lttng_event_notifier_enabler_as_enabler(event_notifier_enabler); + struct lttng_event_notifier *event_notifier; + + if (base_enabler->event_param.instrumentation == LTTNG_KERNEL_SYSCALL && + base_enabler->event_param.u.syscall.abi == LTTNG_KERNEL_SYSCALL_ABI_ALL && + base_enabler->event_param.u.syscall.match == LTTNG_KERNEL_SYSCALL_MATCH_NAME && + !strcmp(base_enabler->event_param.name, "*")) { + + int enabled = base_enabler->enabled; + enum lttng_kernel_syscall_entryexit entryexit = base_enabler->event_param.u.syscall.entryexit; + + if (entryexit == LTTNG_KERNEL_SYSCALL_ENTRY || entryexit == LTTNG_KERNEL_SYSCALL_ENTRYEXIT) + WRITE_ONCE(event_notifier_group->syscall_all_entry, enabled); + + if (entryexit == LTTNG_KERNEL_SYSCALL_EXIT || entryexit == LTTNG_KERNEL_SYSCALL_ENTRYEXIT) + WRITE_ONCE(event_notifier_group->syscall_all_exit, enabled); + + } + + /* First ensure that probe event_notifiers are created for this enabler. */ + lttng_create_event_notifier_if_missing(event_notifier_enabler); + + /* Link the created event_notifier with its associated enabler. */ + list_for_each_entry(event_notifier, &event_notifier_group->event_notifiers_head, list) { + struct lttng_enabler_ref *enabler_ref; + + if (!lttng_event_notifier_enabler_match_event_notifier(event_notifier_enabler, event_notifier)) + continue; + + enabler_ref = lttng_enabler_ref(&event_notifier->enablers_ref_head, + lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)); + if (!enabler_ref) { + /* + * If no backward ref, create it. + * Add backward ref from event_notifier to enabler. + */ + enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL); + if (!enabler_ref) + return -ENOMEM; + + enabler_ref->ref = lttng_event_notifier_enabler_as_enabler( + event_notifier_enabler); + list_add(&enabler_ref->node, + &event_notifier->enablers_ref_head); + } + + /* + * Link filter bytecodes if not linked yet. + */ + lttng_enabler_link_bytecode(event_notifier->desc, + lttng_static_ctx, &event_notifier->filter_bytecode_runtime_head, + <tng_event_notifier_enabler_as_enabler(event_notifier_enabler)->filter_bytecode_head); + + /* Link capture bytecodes if not linked yet. */ + lttng_enabler_link_bytecode(event_notifier->desc, + lttng_static_ctx, &event_notifier->capture_bytecode_runtime_head, + &event_notifier_enabler->capture_bytecode_head); + + event_notifier->num_captures = event_notifier_enabler->num_captures; + } + return 0; +} + /* * Called at module load: connect the probe on all enablers matching * this event. @@ -1508,6 +2268,39 @@ int lttng_fix_pending_events(void) return 0; } +static bool lttng_event_notifier_group_has_active_event_notifiers( + struct lttng_event_notifier_group *event_notifier_group) +{ + struct lttng_event_notifier_enabler *event_notifier_enabler; + + list_for_each_entry(event_notifier_enabler, &event_notifier_group->enablers_head, + node) { + if (event_notifier_enabler->base.enabled) + return true; + } + return false; +} + +bool lttng_event_notifier_active(void) +{ + struct lttng_event_notifier_group *event_notifier_group; + + list_for_each_entry(event_notifier_group, &event_notifier_groups, node) { + if (lttng_event_notifier_group_has_active_event_notifiers(event_notifier_group)) + return true; + } + return false; +} + +int lttng_fix_pending_event_notifiers(void) +{ + struct lttng_event_notifier_group *event_notifier_group; + + list_for_each_entry(event_notifier_group, &event_notifier_groups, node) + lttng_event_notifier_group_sync_enablers(event_notifier_group); + return 0; +} + struct lttng_event_enabler *lttng_event_enabler_create( enum lttng_enabler_format_type format_type, struct lttng_kernel_event *event_param, @@ -1552,17 +2345,17 @@ int lttng_event_enabler_disable(struct lttng_event_enabler *event_enabler) } static -int lttng_enabler_attach_bytecode(struct lttng_enabler *enabler, +int lttng_enabler_attach_filter_bytecode(struct lttng_enabler *enabler, struct lttng_kernel_filter_bytecode __user *bytecode) { - struct lttng_filter_bytecode_node *bytecode_node; + struct lttng_bytecode_node *bytecode_node; uint32_t bytecode_len; int ret; ret = get_user(bytecode_len, &bytecode->len); if (ret) return ret; - bytecode_node = kzalloc(sizeof(*bytecode_node) + bytecode_len, + bytecode_node = lttng_kvzalloc(sizeof(*bytecode_node) + bytecode_len, GFP_KERNEL); if (!bytecode_node) return -ENOMEM; @@ -1571,6 +2364,7 @@ int lttng_enabler_attach_bytecode(struct lttng_enabler *enabler, if (ret) goto error_free; + bytecode_node->type = LTTNG_BYTECODE_NODE_TYPE_FILTER; bytecode_node->enabler = enabler; /* Enforce length based on allocated size */ bytecode_node->bc.len = bytecode_len; @@ -1579,15 +2373,15 @@ int lttng_enabler_attach_bytecode(struct lttng_enabler *enabler, return 0; error_free: - kfree(bytecode_node); + lttng_kvfree(bytecode_node); return ret; } -int lttng_event_enabler_attach_bytecode(struct lttng_event_enabler *event_enabler, +int lttng_event_enabler_attach_filter_bytecode(struct lttng_event_enabler *event_enabler, struct lttng_kernel_filter_bytecode __user *bytecode) { int ret; - ret = lttng_enabler_attach_bytecode( + ret = lttng_enabler_attach_filter_bytecode( lttng_event_enabler_as_enabler(event_enabler), bytecode); if (ret) goto error; @@ -1605,7 +2399,7 @@ int lttng_event_add_callsite(struct lttng_event *event, switch (event->instrumentation) { case LTTNG_KERNEL_UPROBE: - return lttng_uprobes_add_callsite(event, callsite); + return lttng_uprobes_event_add_callsite(event, callsite); default: return -EINVAL; } @@ -1620,12 +2414,12 @@ int lttng_event_enabler_attach_context(struct lttng_event_enabler *event_enabler static void lttng_enabler_destroy(struct lttng_enabler *enabler) { - struct lttng_filter_bytecode_node *filter_node, *tmp_filter_node; + struct lttng_bytecode_node *filter_node, *tmp_filter_node; /* Destroy filter bytecode */ list_for_each_entry_safe(filter_node, tmp_filter_node, &enabler->filter_bytecode_head, node) { - kfree(filter_node); + lttng_kvfree(filter_node); } } @@ -1641,6 +2435,156 @@ void lttng_event_enabler_destroy(struct lttng_event_enabler *event_enabler) kfree(event_enabler); } +struct lttng_event_notifier_enabler *lttng_event_notifier_enabler_create( + struct lttng_event_notifier_group *event_notifier_group, + enum lttng_enabler_format_type format_type, + struct lttng_kernel_event_notifier *event_notifier_param) +{ + struct lttng_event_notifier_enabler *event_notifier_enabler; + + event_notifier_enabler = kzalloc(sizeof(*event_notifier_enabler), GFP_KERNEL); + if (!event_notifier_enabler) + return NULL; + + event_notifier_enabler->base.format_type = format_type; + INIT_LIST_HEAD(&event_notifier_enabler->base.filter_bytecode_head); + INIT_LIST_HEAD(&event_notifier_enabler->capture_bytecode_head); + + event_notifier_enabler->error_counter_index = event_notifier_param->error_counter_index; + event_notifier_enabler->num_captures = 0; + + memcpy(&event_notifier_enabler->base.event_param, &event_notifier_param->event, + sizeof(event_notifier_enabler->base.event_param)); + event_notifier_enabler->base.evtype = LTTNG_TYPE_ENABLER; + + event_notifier_enabler->base.enabled = 0; + event_notifier_enabler->base.user_token = event_notifier_param->event.token; + event_notifier_enabler->group = event_notifier_group; + + mutex_lock(&sessions_mutex); + list_add(&event_notifier_enabler->node, &event_notifier_enabler->group->enablers_head); + lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group); + + mutex_unlock(&sessions_mutex); + + return event_notifier_enabler; +} + +int lttng_event_notifier_enabler_enable( + struct lttng_event_notifier_enabler *event_notifier_enabler) +{ + mutex_lock(&sessions_mutex); + lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled = 1; + lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group); + mutex_unlock(&sessions_mutex); + return 0; +} + +int lttng_event_notifier_enabler_disable( + struct lttng_event_notifier_enabler *event_notifier_enabler) +{ + mutex_lock(&sessions_mutex); + lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled = 0; + lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group); + mutex_unlock(&sessions_mutex); + return 0; +} + +int lttng_event_notifier_enabler_attach_filter_bytecode( + struct lttng_event_notifier_enabler *event_notifier_enabler, + struct lttng_kernel_filter_bytecode __user *bytecode) +{ + int ret; + + ret = lttng_enabler_attach_filter_bytecode( + lttng_event_notifier_enabler_as_enabler(event_notifier_enabler), + bytecode); + if (ret) + goto error; + + lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group); + return 0; + +error: + return ret; +} + +int lttng_event_notifier_enabler_attach_capture_bytecode( + struct lttng_event_notifier_enabler *event_notifier_enabler, + struct lttng_kernel_capture_bytecode __user *bytecode) +{ + struct lttng_bytecode_node *bytecode_node; + struct lttng_enabler *enabler = + lttng_event_notifier_enabler_as_enabler(event_notifier_enabler); + uint32_t bytecode_len; + int ret; + + ret = get_user(bytecode_len, &bytecode->len); + if (ret) + return ret; + + bytecode_node = lttng_kvzalloc(sizeof(*bytecode_node) + bytecode_len, + GFP_KERNEL); + if (!bytecode_node) + return -ENOMEM; + + ret = copy_from_user(&bytecode_node->bc, bytecode, + sizeof(*bytecode) + bytecode_len); + if (ret) + goto error_free; + + bytecode_node->type = LTTNG_BYTECODE_NODE_TYPE_CAPTURE; + bytecode_node->enabler = enabler; + + /* Enforce length based on allocated size */ + bytecode_node->bc.len = bytecode_len; + list_add_tail(&bytecode_node->node, &event_notifier_enabler->capture_bytecode_head); + + event_notifier_enabler->num_captures++; + + lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group); + goto end; + +error_free: + lttng_kvfree(bytecode_node); +end: + return ret; +} + +int lttng_event_notifier_add_callsite(struct lttng_event_notifier *event_notifier, + struct lttng_kernel_event_callsite __user *callsite) +{ + + switch (event_notifier->instrumentation) { + case LTTNG_KERNEL_UPROBE: + return lttng_uprobes_event_notifier_add_callsite(event_notifier, + callsite); + default: + return -EINVAL; + } +} + +int lttng_event_notifier_enabler_attach_context( + struct lttng_event_notifier_enabler *event_notifier_enabler, + struct lttng_kernel_context *context_param) +{ + return -ENOSYS; +} + +static +void lttng_event_notifier_enabler_destroy( + struct lttng_event_notifier_enabler *event_notifier_enabler) +{ + if (!event_notifier_enabler) { + return; + } + + list_del(&event_notifier_enabler->node); + + lttng_enabler_destroy(lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)); + kfree(event_notifier_enabler); +} + /* * lttng_session_sync_event_enablers should be called just before starting a * session. @@ -1712,8 +2656,8 @@ void lttng_session_sync_event_enablers(struct lttng_session *session) /* Enable filters */ list_for_each_entry(runtime, - &event->bytecode_runtime_head, node) - lttng_filter_sync_state(runtime); + &event->filter_bytecode_runtime_head, node) + lttng_bytecode_filter_sync_state(runtime); } } @@ -1733,6 +2677,78 @@ void lttng_session_lazy_sync_event_enablers(struct lttng_session *session) lttng_session_sync_event_enablers(session); } +static +void lttng_event_notifier_group_sync_enablers(struct lttng_event_notifier_group *event_notifier_group) +{ + struct lttng_event_notifier_enabler *event_notifier_enabler; + struct lttng_event_notifier *event_notifier; + + list_for_each_entry(event_notifier_enabler, &event_notifier_group->enablers_head, node) + lttng_event_notifier_enabler_ref_event_notifiers(event_notifier_enabler); + + /* + * For each event_notifier, if at least one of its enablers is enabled, + * we enable the event_notifier, else we disable it. + */ + list_for_each_entry(event_notifier, &event_notifier_group->event_notifiers_head, list) { + struct lttng_enabler_ref *enabler_ref; + struct lttng_bytecode_runtime *runtime; + int enabled = 0, has_enablers_without_bytecode = 0; + + switch (event_notifier->instrumentation) { + case LTTNG_KERNEL_TRACEPOINT: + case LTTNG_KERNEL_SYSCALL: + /* Enable event_notifiers */ + list_for_each_entry(enabler_ref, + &event_notifier->enablers_ref_head, node) { + if (enabler_ref->ref->enabled) { + enabled = 1; + break; + } + } + break; + default: + /* Not handled with sync. */ + continue; + } + + WRITE_ONCE(event_notifier->enabled, enabled); + /* + * Sync tracepoint registration with event_notifier enabled + * state. + */ + if (enabled) { + if (!event_notifier->registered) + register_event_notifier(event_notifier); + } else { + if (event_notifier->registered) + _lttng_event_notifier_unregister(event_notifier); + } + + /* Check if has enablers without bytecode enabled */ + list_for_each_entry(enabler_ref, + &event_notifier->enablers_ref_head, node) { + if (enabler_ref->ref->enabled + && list_empty(&enabler_ref->ref->filter_bytecode_head)) { + has_enablers_without_bytecode = 1; + break; + } + } + event_notifier->has_enablers_without_bytecode = + has_enablers_without_bytecode; + + /* Enable filters */ + list_for_each_entry(runtime, + &event_notifier->filter_bytecode_runtime_head, node) + lttng_bytecode_filter_sync_state(runtime); + + /* Enable captures */ + list_for_each_entry(runtime, + &event_notifier->capture_bytecode_runtime_head, node) + lttng_bytecode_capture_sync_state(runtime); + } +} + /* * Serialize at most one packet worth of metadata into a metadata * channel. @@ -2984,6 +4000,29 @@ void lttng_transport_unregister(struct lttng_transport *transport) } EXPORT_SYMBOL_GPL(lttng_transport_unregister); +void lttng_counter_transport_register(struct lttng_counter_transport *transport) +{ + /* + * Make sure no page fault can be triggered by the module about to be + * registered. We deal with this here so we don't have to call + * vmalloc_sync_mappings() in each module's init. + */ + wrapper_vmalloc_sync_mappings(); + + mutex_lock(&sessions_mutex); + list_add_tail(&transport->node, <tng_counter_transport_list); + mutex_unlock(&sessions_mutex); +} +EXPORT_SYMBOL_GPL(lttng_counter_transport_register); + +void lttng_counter_transport_unregister(struct lttng_counter_transport *transport) +{ + mutex_lock(&sessions_mutex); + list_del(&transport->node); + mutex_unlock(&sessions_mutex); +} +EXPORT_SYMBOL_GPL(lttng_counter_transport_unregister); + #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) enum cpuhp_state lttng_hp_prepare; @@ -3136,7 +4175,12 @@ static int __init lttng_events_init(void) event_cache = KMEM_CACHE(lttng_event, 0); if (!event_cache) { ret = -ENOMEM; - goto error_kmem; + goto error_kmem_event; + } + event_notifier_cache = KMEM_CACHE(lttng_event_notifier, 0); + if (!event_notifier_cache) { + ret = -ENOMEM; + goto error_kmem_event_notifier; } ret = lttng_abi_init(); if (ret) @@ -3170,8 +4214,10 @@ error_hotplug: error_logger: lttng_abi_exit(); error_abi: + kmem_cache_destroy(event_notifier_cache); +error_kmem_event_notifier: kmem_cache_destroy(event_cache); -error_kmem: +error_kmem_event: lttng_tracepoint_exit(); error_tp: lttng_context_exit(); @@ -3206,6 +4252,7 @@ static void __exit lttng_events_exit(void) list_for_each_entry_safe(session, tmpsession, &sessions, list) lttng_session_destroy(session); kmem_cache_destroy(event_cache); + kmem_cache_destroy(event_notifier_cache); lttng_tracepoint_exit(); lttng_context_exit(); printk(KERN_NOTICE "LTTng: Unloaded modules v%s.%s.%s%s (%s)%s%s\n",