X-Git-Url: http://git.lttng.org/?a=blobdiff_plain;f=liblttng-ust%2Fltt-events.c;h=ce4c2bb01719baab3b8bbf0f52fded018881bfef;hb=67e5f39161d80ea3d972a37ecf76f45a260c480d;hp=b67998c3ab310ca68a5ffae5dbb2ccb4cb2387ca;hpb=35897f8b2d311b756b81657dad9c53ef1c0fad8a;p=lttng-ust.git diff --git a/liblttng-ust/ltt-events.c b/liblttng-ust/ltt-events.c index b67998c3..ce4c2bb0 100644 --- a/liblttng-ust/ltt-events.c +++ b/liblttng-ust/ltt-events.c @@ -14,25 +14,37 @@ #include #include #include -#include -#include -#include #include -#include #include #include #include +#include +#include +#include +#include +#include +#include "clock.h" + +#include +#include +#include +#include + +#include #include -#include + +#include +#include +#include "error.h" + +#include "tracepoint-internal.h" #include "ltt-tracer.h" #include "ltt-tracer-core.h" #include "wait.h" #include "../libringbuffer/shm.h" -#include -#include -#include #include "jhash.h" -#include + +#define PROCNAME_LEN 17 /* * The sessions mutex is the centralized mutex across UST tracing @@ -53,6 +65,12 @@ void ust_unlock(void) static CDS_LIST_HEAD(sessions); +/* + * Wildcard list, containing the active wildcards. + * Protected by ust lock. + */ +static CDS_LIST_HEAD(wildcard_list); + /* * Pending probes hash table, containing the registered ltt events for * which tracepoint probes are still missing. Protected by the sessions @@ -65,10 +83,13 @@ static struct cds_hlist_head pending_probe_table[PENDING_PROBE_HASH_SIZE]; struct ust_pending_probe { struct ltt_event *event; struct cds_hlist_node node; + enum lttng_ust_loglevel_type loglevel_type; + int loglevel; char name[]; }; static void _ltt_event_destroy(struct ltt_event *event); +static void _ltt_wildcard_destroy(struct session_wildcard *sw); static void _ltt_channel_destroy(struct ltt_channel *chan); static int _ltt_event_unregister(struct ltt_event *event); static @@ -78,23 +99,92 @@ int _ltt_event_metadata_statedump(struct ltt_session *session, static int _ltt_session_metadata_statedump(struct ltt_session *session); +int ltt_loglevel_match(const struct lttng_event_desc *desc, + enum lttng_ust_loglevel_type req_type, + int req_loglevel) +{ + int ev_loglevel; + + if (req_type == LTTNG_UST_LOGLEVEL_ALL) + return 1; + if (!desc->loglevel) + ev_loglevel = TRACE_DEFAULT; + else + ev_loglevel = *(*desc->loglevel); + switch (req_type) { + case LTTNG_UST_LOGLEVEL_RANGE: + if (ev_loglevel <= req_loglevel || req_loglevel == -1) + return 1; + else + return 0; + case LTTNG_UST_LOGLEVEL_SINGLE: + if (ev_loglevel == req_loglevel || req_loglevel == -1) + return 1; + else + return 0; + case LTTNG_UST_LOGLEVEL_ALL: + default: + return 1; + } +} + +/* + * Return wildcard for a given event name if the event name match the + * one of the wildcards. + * Must be called with ust lock held. + * Returns NULL if not present. + */ +static +struct wildcard_entry *match_wildcard(const struct lttng_event_desc *desc) +{ + struct wildcard_entry *e; + + cds_list_for_each_entry(e, &wildcard_list, list) { + /* If only contain '*' */ + if (strlen(e->name) == 1) + goto possible_match; + /* Compare excluding final '*' */ + if (!strncmp(desc->name, e->name, strlen(e->name) - 1)) + goto possible_match; + continue; /* goto next, no match */ + possible_match: + if (ltt_loglevel_match(desc, + e->loglevel_type, + e->loglevel)) { + return e; + } + /* no match, loop to next */ + } + return NULL; +} + /* * called at event creation if probe is missing. * called with session mutex held. */ static -int add_pending_probe(struct ltt_event *event, const char *name) +int add_pending_probe(struct ltt_event *event, const char *name, + enum lttng_ust_loglevel_type loglevel_type, + int loglevel) { struct cds_hlist_head *head; struct ust_pending_probe *e; - size_t name_len = strlen(name) + 1; - uint32_t hash = jhash(name, name_len - 1, 0); + size_t name_len = strlen(name); + uint32_t hash; + if (name_len > LTTNG_UST_SYM_NAME_LEN - 1) { + WARN("Truncating tracepoint name %s which exceeds size limits of %u chars", name, LTTNG_UST_SYM_NAME_LEN - 1); + name_len = LTTNG_UST_SYM_NAME_LEN - 1; + } + hash = jhash(name, name_len, 0); head = &pending_probe_table[hash & (PENDING_PROBE_HASH_SIZE - 1)]; e = zmalloc(sizeof(struct ust_pending_probe) + name_len); if (!e) return -ENOMEM; - memcpy(&e->name[0], name, name_len); + memcpy(&e->name[0], name, name_len + 1); + e->name[name_len] = '\0'; + e->loglevel_type = loglevel_type; + e->loglevel = loglevel; cds_hlist_add_head(&e->node, head); e->event = event; event->pending_probe = e; @@ -126,17 +216,61 @@ int pending_probe_fix_events(const struct lttng_event_desc *desc) struct cds_hlist_node *node, *p; struct ust_pending_probe *e; const char *name = desc->name; - size_t name_len = strlen(name) + 1; - uint32_t hash = jhash(name, name_len - 1, 0); int ret = 0; + struct lttng_ust_event event_param; + size_t name_len = strlen(name); + uint32_t hash; + + /* Wildcard */ + { + struct wildcard_entry *wildcard; + + wildcard = match_wildcard(desc); + if (strcmp(desc->name, "lttng_ust:metadata") && wildcard) { + struct session_wildcard *sw; + + cds_list_for_each_entry(sw, &wildcard->session_list, + session_list) { + struct ltt_event *ev; + int ret; + + memcpy(&event_param, &sw->event_param, + sizeof(event_param)); + memcpy(event_param.name, + desc->name, + sizeof(event_param.name)); + /* create event */ + ret = ltt_event_create(sw->chan, + &event_param, NULL, + &ev); + if (ret) { + DBG("Error creating event"); + continue; + } + cds_list_add(&ev->wildcard_list, + &sw->events); + } + } + } + if (name_len > LTTNG_UST_SYM_NAME_LEN - 1) { + WARN("Truncating tracepoint name %s which exceeds size limits of %u chars", name, LTTNG_UST_SYM_NAME_LEN - 1); + name_len = LTTNG_UST_SYM_NAME_LEN - 1; + } + hash = jhash(name, name_len, 0); head = &pending_probe_table[hash & (PENDING_PROBE_HASH_SIZE - 1)]; cds_hlist_for_each_entry_safe(e, node, p, head, node) { struct ltt_event *event; struct ltt_channel *chan; - if (strcmp(name, e->name)) + if (!ltt_loglevel_match(desc, + e->loglevel_type, + e->loglevel)) { + continue; + } + if (strncmp(name, e->name, LTTNG_UST_SYM_NAME_LEN - 1)) { continue; + } event = e->event; chan = event->chan; assert(!event->desc); @@ -145,7 +279,7 @@ int pending_probe_fix_events(const struct lttng_event_desc *desc) remove_pending_probe(e); ret |= __tracepoint_probe_register(name, event->desc->probe_callback, - event); + event, event->desc->signature); if (ret) continue; event->id = chan->free_event_id++; @@ -169,6 +303,7 @@ struct ltt_session *ltt_session_create(void) return NULL; CDS_INIT_LIST_HEAD(&session->chan); CDS_INIT_LIST_HEAD(&session->events); + CDS_INIT_LIST_HEAD(&session->wildcards); uuid_generate(session->uuid); cds_list_add(&session->list, &sessions); return session; @@ -178,6 +313,7 @@ void ltt_session_destroy(struct ltt_session *session) { struct ltt_channel *chan, *tmpchan; struct ltt_event *event, *tmpevent; + struct session_wildcard *wildcard, *tmpwildcard; int ret; CMM_ACCESS_ONCE(session->active) = 0; @@ -186,6 +322,8 @@ void ltt_session_destroy(struct ltt_session *session) WARN_ON(ret); } synchronize_trace(); /* Wait for in-flight events to complete */ + cds_list_for_each_entry_safe(wildcard, tmpwildcard, &session->wildcards, list) + _ltt_wildcard_destroy(wildcard); cds_list_for_each_entry_safe(event, tmpevent, &session->events, list) _ltt_event_destroy(event); cds_list_for_each_entry_safe(chan, tmpchan, &session->chan, list) @@ -293,8 +431,8 @@ struct ltt_channel *ltt_channel_create(struct ltt_session *session, size_t subbuf_size, size_t num_subbuf, unsigned int switch_timer_interval, unsigned int read_timer_interval, - int *shm_fd, int *wait_fd, - uint64_t *memory_map_size, + int **shm_fd, int **wait_fd, + uint64_t **memory_map_size, struct ltt_channel *chan_priv_init) { struct ltt_channel *chan = NULL; @@ -346,25 +484,55 @@ void _ltt_channel_destroy(struct ltt_channel *chan) /* * Supports event creation while tracing session is active. */ -struct ltt_event *ltt_event_create(struct ltt_channel *chan, - struct lttng_ust_event *event_param, - void *filter) +int ltt_event_create(struct ltt_channel *chan, + struct lttng_ust_event *event_param, + void *filter, + struct ltt_event **_event) { + const struct lttng_event_desc *desc = NULL; /* silence gcc */ struct ltt_event *event; - int ret; + int ret = 0; - if (chan->used_event_id == -1UL) + if (chan->used_event_id == -1U) { + ret = -ENOMEM; goto full; + } /* * This is O(n^2) (for each event, the loop is called at event * creation). Might require a hash if we have lots of events. */ - cds_list_for_each_entry(event, &chan->session->events, list) - if (event->desc && !strcmp(event->desc->name, event_param->name)) + cds_list_for_each_entry(event, &chan->session->events, list) { + if (event->desc && !strncmp(event->desc->name, + event_param->name, + LTTNG_UST_SYM_NAME_LEN - 1)) { + ret = -EEXIST; goto exist; + } + } + + /* + * Check if loglevel match. Refuse to connect event if not. + */ + if (event_param->instrumentation == LTTNG_UST_TRACEPOINT) { + desc = ltt_event_get(event_param->name); + if (desc) { + if (!ltt_loglevel_match(desc, + event_param->loglevel_type, + event_param->loglevel)) { + ret = -EPERM; + goto no_loglevel_match; + } + } + /* + * If descriptor is not there, it will be added to + * pending probes. + */ + } event = zmalloc(sizeof(struct ltt_event)); - if (!event) + if (!event) { + ret = -ENOMEM; goto cache_error; + } event->chan = chan; event->filter = filter; /* @@ -378,11 +546,11 @@ struct ltt_event *ltt_event_create(struct ltt_channel *chan, cmm_smp_wmb(); switch (event_param->instrumentation) { case LTTNG_UST_TRACEPOINT: - event->desc = ltt_event_get(event_param->name); + event->desc = desc; if (event->desc) { ret = __tracepoint_probe_register(event_param->name, event->desc->probe_callback, - event); + event, event->desc->signature); if (ret) goto register_error; event->id = chan->free_event_id++; @@ -392,7 +560,9 @@ struct ltt_event *ltt_event_create(struct ltt_channel *chan, * waiting for the probe to register, and the event->id * stays unallocated. */ - ret = add_pending_probe(event, event_param->name); + ret = add_pending_probe(event, event_param->name, + event_param->loglevel_type, + event_param->loglevel); if (ret) goto add_pending_error; } @@ -406,7 +576,8 @@ struct ltt_event *ltt_event_create(struct ltt_channel *chan, goto statedump_error; } cds_list_add(&event->list, &chan->session->events); - return event; + *_event = event; + return 0; statedump_error: if (event->desc) { @@ -419,9 +590,10 @@ add_pending_error: register_error: free(event); cache_error: +no_loglevel_match: exist: full: - return NULL; + return ret; } /* @@ -706,6 +878,7 @@ int _ltt_event_metadata_statedump(struct ltt_session *session, struct ltt_event *event) { int ret = 0; + int loglevel = TRACE_DEFAULT; if (event->metadata_dumped || !CMM_ACCESS_ONCE(session->active)) return 0; @@ -728,6 +901,15 @@ int _ltt_event_metadata_statedump(struct ltt_session *session, if (ret) goto end; + if (event->desc->loglevel) + loglevel = *(*event->desc->loglevel); + + ret = lttng_metadata_printf(session, + " loglevel = %d;\n", + loglevel); + if (ret) + goto end; + if (event->ctx) { ret = lttng_metadata_printf(session, " context := struct {\n"); @@ -822,8 +1004,8 @@ int _ltt_stream_packet_context_declare(struct ltt_session *session) { return lttng_metadata_printf(session, "struct packet_context {\n" - " uint64_t timestamp_begin;\n" - " uint64_t timestamp_end;\n" + " uint64_clock_monotonic_t timestamp_begin;\n" + " uint64_clock_monotonic_t timestamp_end;\n" " uint32_t events_discarded;\n" " uint32_t content_size;\n" " uint32_t packet_size;\n" @@ -849,11 +1031,11 @@ int _ltt_event_header_declare(struct ltt_session *session) " enum : uint5_t { compact = 0 ... 30, extended = 31 } id;\n" " variant {\n" " struct {\n" - " uint27_t timestamp;\n" + " uint27_clock_monotonic_t timestamp;\n" " } compact;\n" " struct {\n" " uint32_t id;\n" - " uint64_t timestamp;\n" + " uint64_clock_monotonic_t timestamp;\n" " } extended;\n" " } v;\n" "} align(%u);\n" @@ -862,11 +1044,11 @@ int _ltt_event_header_declare(struct ltt_session *session) " enum : uint16_t { compact = 0 ... 65534, extended = 65535 } id;\n" " variant {\n" " struct {\n" - " uint32_t timestamp;\n" + " uint32_clock_monotonic_t timestamp;\n" " } compact;\n" " struct {\n" " uint32_t id;\n" - " uint64_t timestamp;\n" + " uint64_clock_monotonic_t timestamp;\n" " } extended;\n" " } v;\n" "} align(%u);\n\n", @@ -875,6 +1057,31 @@ int _ltt_event_header_declare(struct ltt_session *session) ); } +/* + * Approximation of NTP time of day to clock monotonic correlation, + * taken at start of trace. + * Yes, this is only an approximation. Yes, we can (and will) do better + * in future versions. + */ +static +uint64_t measure_clock_offset(void) +{ + uint64_t offset, monotonic[2], realtime; + struct timespec rts = { 0, 0 }; + int ret; + + monotonic[0] = trace_clock_read64(); + ret = clock_gettime(CLOCK_REALTIME, &rts); + if (ret < 0) + return 0; + monotonic[1] = trace_clock_read64(); + offset = (monotonic[0] + monotonic[1]) >> 1; + realtime = rts.tv_sec * 1000000000ULL; + realtime += rts.tv_nsec; + offset = realtime - offset; + return offset; +} + /* * Output metadata into this session's metadata buffers. */ @@ -882,10 +1089,11 @@ static int _ltt_session_metadata_statedump(struct ltt_session *session) { unsigned char *uuid_c = session->uuid; - char uuid_s[37]; + char uuid_s[37], clock_uuid_s[CLOCK_UUID_LEN]; struct ltt_channel *chan; struct ltt_event *event; int ret = 0; + char procname[PROCNAME_LEN] = ""; if (!CMM_ACCESS_ONCE(session->active)) return 0; @@ -926,8 +1134,8 @@ int _ltt_session_metadata_statedump(struct ltt_session *session) lttng_alignof(uint16_t) * CHAR_BIT, lttng_alignof(uint32_t) * CHAR_BIT, lttng_alignof(uint64_t) * CHAR_BIT, - CTF_VERSION_MAJOR, - CTF_VERSION_MINOR, + CTF_SPEC_MAJOR, + CTF_SPEC_MINOR, uuid_s, #if (BYTE_ORDER == BIG_ENDIAN) "be" @@ -938,6 +1146,78 @@ int _ltt_session_metadata_statedump(struct ltt_session *session) if (ret) goto end; + /* ignore error, just use empty string if error. */ + (void) prctl(PR_GET_NAME, (unsigned long) procname, 0, 0, 0); + procname[PROCNAME_LEN - 1] = '\0'; + ret = lttng_metadata_printf(session, + "env {\n" + " vpid = %d;\n" + " procname = \"%s\";\n" + " domain = \"ust\";\n" + " tracer_name = \"lttng-ust\";\n" + " tracer_major = %u;\n" + " tracer_minor = %u;\n" + " tracer_patchlevel = %u;\n" + "};\n\n", + (int) getpid(), + procname, + LTTNG_UST_MAJOR_VERSION, + LTTNG_UST_MINOR_VERSION, + LTTNG_UST_PATCHLEVEL_VERSION + ); + if (ret) + goto end; + + ret = lttng_metadata_printf(session, + "clock {\n" + " name = %s;\n", + "monotonic" + ); + if (ret) + goto end; + + if (!trace_clock_uuid(clock_uuid_s)) { + ret = lttng_metadata_printf(session, + " uuid = \"%s\";\n", + clock_uuid_s + ); + if (ret) + goto end; + } + + ret = lttng_metadata_printf(session, + " description = \"Monotonic Clock\";\n" + " freq = %" PRIu64 "; /* Frequency, in Hz */\n" + " /* clock value offset from Epoch is: offset * (1/freq) */\n" + " offset = %" PRIu64 ";\n" + "};\n\n", + trace_clock_freq(), + measure_clock_offset() + ); + if (ret) + goto end; + + ret = lttng_metadata_printf(session, + "typealias integer {\n" + " size = 27; align = 1; signed = false;\n" + " map = clock.monotonic.value;\n" + "} := uint27_clock_monotonic_t;\n" + "\n" + "typealias integer {\n" + " size = 32; align = %u; signed = false;\n" + " map = clock.monotonic.value;\n" + "} := uint32_clock_monotonic_t;\n" + "\n" + "typealias integer {\n" + " size = 64; align = %u; signed = false;\n" + " map = clock.monotonic.value;\n" + "} := uint64_clock_monotonic_t;\n\n", + lttng_alignof(uint32_t) * CHAR_BIT, + lttng_alignof(uint64_t) * CHAR_BIT + ); + if (ret) + goto end; + ret = _ltt_stream_packet_context_declare(session); if (ret) goto end; @@ -970,3 +1250,221 @@ void lttng_ust_events_exit(void) cds_list_for_each_entry_safe(session, tmpsession, &sessions, list) ltt_session_destroy(session); } + +/* WILDCARDS */ + +static +int wildcard_same_loglevel(struct wildcard_entry *e, + enum lttng_ust_loglevel_type loglevel_type, + int loglevel) +{ + if (e->loglevel_type == loglevel_type && e->loglevel == loglevel) + return 1; + else + return 0; +} + +#if 0 +static +int wildcard_is_within(struct wildcard_entry *e, + enum lttng_ust_loglevel_type loglevel_type, + int loglevel) +{ + if (e->loglevel_type == LTTNG_UST_LOGLEVEL_ALL + || e->loglevel == -1) + return 1; + switch (e->loglevel_type) { + case LTTNG_UST_LOGLEVEL_RANGE: + switch (loglevel_type) { + case LTTNG_UST_LOGLEVEL_RANGE: + if (e->loglevel >= loglevel) + return 1; + else + return 0; + case LTTNG_UST_LOGLEVEL_SINGLE: + if (e->loglevel <= 0 && loglevel == 0) + return 1; + else + return 0; + } + case LTTNG_UST_LOGLEVEL_SINGLE: + switch (loglevel_type) { + case LTTNG_UST_LOGLEVEL_RANGE: + if (loglevel <= 0) + return 1; + else + return 0; + case LTTNG_UST_LOGLEVEL_SINGLE: + if (e->loglevel == loglevel) + return 1; + else + return 0; + } + } +} +#endif + +/* + * Add the wildcard to the wildcard list. Must be called with + * ust lock held. + */ +static +struct session_wildcard *add_wildcard(struct ltt_channel *chan, + struct lttng_ust_event *event_param) +{ + struct wildcard_entry *e; + struct session_wildcard *sw; + size_t name_len = strlen(event_param->name) + 1; + int found = 0; + + /* + * Try to find global wildcard entry. Given that this is shared + * across all sessions, we need to check for exact loglevel + * match, not just whether contained within the existing ones. + */ + cds_list_for_each_entry(e, &wildcard_list, list) { + if (!strncmp(event_param->name, e->name, + LTTNG_UST_SYM_NAME_LEN - 1)) { + if (wildcard_same_loglevel(e, + event_param->loglevel_type, + event_param->loglevel)) { + found = 1; + break; + } + } + } + + if (!found) { + /* + * Create global wildcard entry if not found. Using + * zmalloc here to allocate a variable length element. + * Could cause some memory fragmentation if overused. + */ + e = zmalloc(sizeof(struct wildcard_entry) + name_len); + if (!e) + return ERR_PTR(-ENOMEM); + memcpy(&e->name[0], event_param->name, name_len); + cds_list_add(&e->list, &wildcard_list); + CDS_INIT_LIST_HEAD(&e->session_list); + } + + /* session wildcard */ + cds_list_for_each_entry(sw, &e->session_list, session_list) { + if (chan == sw->chan) { + DBG("wildcard %s busy for this channel", + event_param->name); + return ERR_PTR(-EEXIST); /* Already there */ + } + } + sw = zmalloc(sizeof(struct session_wildcard)); + if (!sw) + return ERR_PTR(-ENOMEM); + sw->chan = chan; + sw->enabled = 1; + memcpy(&sw->event_param, event_param, sizeof(sw->event_param)); + sw->event_param.instrumentation = LTTNG_UST_TRACEPOINT; + sw->event_param.loglevel_type = event_param->loglevel_type; + sw->event_param.loglevel = event_param->loglevel; + CDS_INIT_LIST_HEAD(&sw->events); + cds_list_add(&sw->list, &chan->session->wildcards); + cds_list_add(&sw->session_list, &e->session_list); + sw->entry = e; + ltt_probes_create_wildcard_events(e, sw); + return sw; +} + +/* + * Remove the wildcard from the wildcard list. Must be called with + * ust_lock held. Only called at session teardown. + */ +static +void _remove_wildcard(struct session_wildcard *wildcard) +{ + struct ltt_event *ev, *tmp; + + /* + * Just remove the events owned (for enable/disable) by this + * wildcard from the list. The session teardown will take care + * of freeing the event memory. + */ + cds_list_for_each_entry_safe(ev, tmp, &wildcard->events, + wildcard_list) { + cds_list_del(&ev->wildcard_list); + } + cds_list_del(&wildcard->session_list); + cds_list_del(&wildcard->list); + if (cds_list_empty(&wildcard->entry->session_list)) { + cds_list_del(&wildcard->entry->list); + free(wildcard->entry); + } + free(wildcard); +} + +int ltt_wildcard_create(struct ltt_channel *chan, + struct lttng_ust_event *event_param, + struct session_wildcard **_sw) +{ + struct session_wildcard *sw; + + sw = add_wildcard(chan, event_param); + if (!sw || IS_ERR(sw)) { + return PTR_ERR(sw); + } + *_sw = sw; + return 0; +} + +static +void _ltt_wildcard_destroy(struct session_wildcard *sw) +{ + _remove_wildcard(sw); +} + +int ltt_wildcard_enable(struct session_wildcard *wildcard) +{ + struct ltt_event *ev; + int ret; + + if (wildcard->enabled) + return -EEXIST; + cds_list_for_each_entry(ev, &wildcard->events, wildcard_list) { + ret = ltt_event_enable(ev); + if (ret) { + DBG("Error: enable error.\n"); + return ret; + } + } + wildcard->enabled = 1; + return 0; +} + +int ltt_wildcard_disable(struct session_wildcard *wildcard) +{ + struct ltt_event *ev; + int ret; + + if (!wildcard->enabled) + return -EEXIST; + cds_list_for_each_entry(ev, &wildcard->events, wildcard_list) { + ret = ltt_event_disable(ev); + if (ret) { + DBG("Error: disable error.\n"); + return ret; + } + } + wildcard->enabled = 0; + return 0; +} + +/* + * Take the TLS "fault" in libuuid if dlopen'd, which can take the + * dynamic linker mutex, outside of the UST lock, since the UST lock is + * taken in constructors, which are called with dynamic linker mutex + * held. + */ +void lttng_fixup_event_tls(void) +{ + unsigned char uuid[37]; + + (void) uuid_generate(uuid); +}