#include <usterr-signal-safe.h>
#include <helper.h>
-#include <ust-ctl.h>
+#include <lttng/ust-ctl.h>
#include <ust-comm.h>
#include "error.h"
#include "compat.h"
#include "tracepoint-internal.h"
#include "lttng-tracer.h"
#include "lttng-tracer-core.h"
+#include "lttng-ust-baddr.h"
#include "wait.h"
#include "../libringbuffer/shm.h"
#include "jhash.h"
/*
- * The sessions mutex is the centralized mutex across UST tracing
- * control and probe registration. All operations within this file are
- * called by the communication thread, under ust_lock protection.
+ * All operations within this file are called by the communication
+ * thread, under ust_lock protection.
*/
-static pthread_mutex_t sessions_mutex = PTHREAD_MUTEX_INITIALIZER;
-void ust_lock(void)
-{
- pthread_mutex_lock(&sessions_mutex);
-}
+static CDS_LIST_HEAD(sessions);
-void ust_unlock(void)
+struct cds_list_head *_lttng_get_sessions(void)
{
- pthread_mutex_unlock(&sessions_mutex);
+ return &sessions;
}
-static CDS_LIST_HEAD(sessions);
-
static void _lttng_event_destroy(struct lttng_event *event);
-static int _lttng_event_unregister(struct lttng_event *event);
static
void lttng_session_lazy_sync_enablers(struct lttng_session *session);
enum lttng_ust_loglevel_type req_type,
int req_loglevel)
{
- if (req_type == LTTNG_UST_LOGLEVEL_ALL)
- return 1;
if (!has_loglevel)
loglevel = TRACE_DEFAULT;
switch (req_type) {
case LTTNG_UST_LOGLEVEL_RANGE:
- if (loglevel <= req_loglevel || req_loglevel == -1)
+ if (loglevel <= req_loglevel
+ || (req_loglevel == -1 && loglevel <= TRACE_DEBUG))
return 1;
else
return 0;
case LTTNG_UST_LOGLEVEL_SINGLE:
- if (loglevel == req_loglevel || req_loglevel == -1)
+ if (loglevel == req_loglevel
+ || (req_loglevel == -1 && loglevel <= TRACE_DEBUG))
return 1;
else
return 0;
case LTTNG_UST_LOGLEVEL_ALL:
default:
- return 1;
+ if (loglevel <= TRACE_DEBUG)
+ return 1;
+ else
+ return 0;
}
}
channel_destroy(chan, handle, 0);
}
+static
+void register_event(struct lttng_event *event)
+{
+ int ret;
+ const struct lttng_event_desc *desc;
+
+ assert(event->registered == 0);
+ desc = event->desc;
+ ret = __tracepoint_probe_register(desc->name,
+ desc->probe_callback,
+ event, desc->signature);
+ WARN_ON_ONCE(ret);
+ if (!ret)
+ event->registered = 1;
+}
+
+static
+void unregister_event(struct lttng_event *event)
+{
+ int ret;
+ const struct lttng_event_desc *desc;
+
+ assert(event->registered == 1);
+ desc = event->desc;
+ ret = __tracepoint_probe_unregister(desc->name,
+ desc->probe_callback,
+ event);
+ WARN_ON_ONCE(ret);
+ if (!ret)
+ event->registered = 0;
+}
+
+/*
+ * Only used internally at session destruction.
+ */
+static
+void _lttng_event_unregister(struct lttng_event *event)
+{
+ if (event->registered)
+ unregister_event(event);
+}
+
void lttng_session_destroy(struct lttng_session *session)
{
struct lttng_channel *chan, *tmpchan;
struct lttng_event *event, *tmpevent;
struct lttng_enabler *enabler, *tmpenabler;
- int ret;
CMM_ACCESS_ONCE(session->active) = 0;
cds_list_for_each_entry(event, &session->events_head, node) {
- ret = _lttng_event_unregister(event);
- WARN_ON(ret);
+ _lttng_event_unregister(event);
}
synchronize_trace(); /* Wait for in-flight events to complete */
cds_list_for_each_entry_safe(enabler, tmpenabler,
if (notify_socket < 0)
return notify_socket;
- /* We need to sync enablers with session before activation. */
- lttng_session_sync_enablers(session);
+ /* Set transient enabler state to "enabled" */
+ session->tstate = 1;
/*
* Snapshot the number of events per channel to know the type of header
*/
cds_list_for_each_entry(chan, &session->chan_head, node) {
const struct lttng_ctx *ctx;
- const struct lttng_event_field *fields = NULL;
+ const struct lttng_ctx_field *fields = NULL;
size_t nr_fields = 0;
+ uint32_t chan_id;
/* don't change it if session stop/restart */
if (chan->header_type)
ctx = chan->ctx;
if (ctx) {
nr_fields = ctx->nr_fields;
- fields = &ctx->fields->event_field;
+ fields = ctx->fields;
}
ret = ustcomm_register_channel(notify_socket,
session->objd,
chan->objd,
nr_fields,
fields,
- &chan->id,
+ &chan_id,
&chan->header_type);
if (ret) {
DBG("Error (%d) registering channel to sessiond", ret);
return ret;
}
+ if (chan_id != chan->id) {
+ DBG("Error: channel registration id (%u) does not match id assigned at creation (%u)",
+ chan_id, chan->id);
+ return -EINVAL;
+ }
}
+ /* We need to sync enablers with session before activation. */
+ lttng_session_sync_enablers(session);
+
+ /* Set atomically the state to "active" */
CMM_ACCESS_ONCE(session->active) = 1;
CMM_ACCESS_ONCE(session->been_active) = 1;
+
+ session->statedump_pending = 1;
+ lttng_ust_sockinfo_session_enabled(session->owner);
end:
return ret;
}
ret = -EBUSY;
goto end;
}
+ /* Set atomically the state to "inactive" */
CMM_ACCESS_ONCE(session->active) = 0;
+
+ /* Set transient enabler state to "disabled" */
+ session->tstate = 0;
+ lttng_session_sync_enablers(session);
end:
return ret;
}
int lttng_channel_enable(struct lttng_channel *channel)
{
- int old;
+ int ret = 0;
- old = uatomic_xchg(&channel->enabled, 1);
- if (old)
- return -EEXIST;
- return 0;
+ if (channel->enabled) {
+ ret = -EBUSY;
+ goto end;
+ }
+ /* Set transient enabler state to "enabled" */
+ channel->tstate = 1;
+ lttng_session_sync_enablers(channel->session);
+ /* Set atomically the state to "enabled" */
+ CMM_ACCESS_ONCE(channel->enabled) = 1;
+end:
+ return ret;
}
int lttng_channel_disable(struct lttng_channel *channel)
{
- int old;
+ int ret = 0;
- old = uatomic_xchg(&channel->enabled, 0);
- if (!old)
- return -EEXIST;
- return 0;
+ if (!channel->enabled) {
+ ret = -EBUSY;
+ goto end;
+ }
+ /* Set atomically the state to "disabled" */
+ CMM_ACCESS_ONCE(channel->enabled) = 0;
+ /* Set transient enabler state to "enabled" */
+ channel->tstate = 0;
+ lttng_session_sync_enablers(channel->session);
+end:
+ return ret;
}
/*
head = &chan->session->events_ht.table[hash & (LTTNG_UST_EVENT_HT_SIZE - 1)];
cds_hlist_for_each_entry(event, node, head, hlist) {
assert(event->desc);
- if (!strncmp(event->desc->name,
- desc->name,
- LTTNG_UST_SYM_NAME_LEN - 1)) {
+ if (!strncmp(event->desc->name, desc->name,
+ LTTNG_UST_SYM_NAME_LEN - 1)
+ && chan == event->chan) {
ret = -EEXIST;
goto exist;
}
}
event->chan = chan;
- event->enabled = 1;
+ /* Event will be enabled by enabler sync. */
+ event->enabled = 0;
+ event->registered = 0;
CDS_INIT_LIST_HEAD(&event->bytecode_runtime_head);
CDS_INIT_LIST_HEAD(&event->enablers_ref_head);
event->desc = desc;
/* Populate lttng_event structure before tracepoint registration. */
cmm_smp_wmb();
- ret = __tracepoint_probe_register(event_name,
- desc->probe_callback,
- event, desc->signature);
- if (ret)
- goto tracepoint_register_error;
-
cds_list_add(&event->node, &chan->session->events_head);
cds_hlist_add_head(&event->hlist, head);
return 0;
-tracepoint_register_error:
sessiond_register_error:
free(event);
cache_error:
struct lttng_enabler *enabler)
{
int loglevel = 0;
- unsigned int has_loglevel;
+ unsigned int has_loglevel = 0;
assert(enabler->type == LTTNG_ENABLER_WILDCARD);
/* Compare excluding final '*' */
int lttng_desc_match_enabler(const struct lttng_event_desc *desc,
struct lttng_enabler *enabler)
{
+ struct lttng_ust_excluder_node *excluder;
+
+ /* If event matches with an excluder, return 'does not match' */
+ cds_list_for_each_entry(excluder, &enabler->excluder_head, node) {
+ int count;
+
+ for (count = 0; count < excluder->excluder.count; count++) {
+ int found, len;
+ char *excluder_name;
+
+ excluder_name = (char *) (excluder->excluder.names)
+ + count * LTTNG_UST_SYM_NAME_LEN;
+ len = strnlen(excluder_name, LTTNG_UST_SYM_NAME_LEN);
+ if (len > 0 && excluder_name[len - 1] == '*') {
+ found = !strncmp(desc->name, excluder_name,
+ len - 1);
+ } else {
+ found = !strncmp(desc->name, excluder_name,
+ LTTNG_UST_SYM_NAME_LEN - 1);
+ }
+ if (found) {
+ return 0;
+ }
+ }
+ }
switch (enabler->type) {
case LTTNG_ENABLER_WILDCARD:
return lttng_desc_match_wildcard_enabler(desc, enabler);
int lttng_event_match_enabler(struct lttng_event *event,
struct lttng_enabler *enabler)
{
- return lttng_desc_match_enabler(event->desc, enabler);
+ if (lttng_desc_match_enabler(event->desc, enabler)
+ && event->chan == enabler->chan)
+ return 1;
+ else
+ return 0;
}
static
hash = jhash(event_name, name_len, 0);
head = &session->events_ht.table[hash & (LTTNG_UST_EVENT_HT_SIZE - 1)];
cds_hlist_for_each_entry(event, node, head, hlist) {
- if (event->desc == desc)
+ if (event->desc == desc
+ && event->chan == enabler->chan)
found = 1;
}
if (found)
/*
* Called at library load: connect the probe on all enablers matching
* this event.
- * called with session mutex held.
- * TODO: currently, for each desc added, we iterate on all event desc
- * (inefficient). We should create specific code that only target the
- * added desc.
+ * Called with session mutex held.
*/
-int lttng_fix_pending_event_desc(const struct lttng_event_desc *desc)
+int lttng_fix_pending_events(void)
{
struct lttng_session *session;
}
/*
- * Only used internally at session destruction.
+ * For each session of the owner thread, execute pending statedump.
+ * Only dump state for the sessions owned by the caller thread, because
+ * we don't keep ust_lock across the entire iteration.
*/
-int _lttng_event_unregister(struct lttng_event *event)
+void lttng_handle_pending_statedump(void *owner)
{
- return __tracepoint_probe_unregister(event->desc->name,
- event->desc->probe_callback,
- event);
+ struct lttng_session *session;
+
+ /* Execute state dump */
+ lttng_ust_baddr_statedump(owner);
+
+ /* Clear pending state dump */
+ if (ust_lock()) {
+ goto end;
+ }
+ cds_list_for_each_entry(session, &sessions, node) {
+ if (session->owner != owner)
+ continue;
+ if (!session->statedump_pending)
+ continue;
+ session->statedump_pending = 0;
+ }
+end:
+ ust_unlock();
+ return;
}
/*
return NULL;
enabler->type = type;
CDS_INIT_LIST_HEAD(&enabler->filter_bytecode_head);
+ CDS_INIT_LIST_HEAD(&enabler->excluder_head);
memcpy(&enabler->event_param, event_param,
sizeof(enabler->event_param));
enabler->chan = chan;
/* ctx left NULL */
- enabler->enabled = 1;
+ /*
+ * The "disable" event create comm field has been added to fix a
+ * race between event creation (of a started trace) and enabling
+ * filtering. New session daemon always set the "disable" field
+ * to 1, and are aware that they need to explicitly enable the
+ * event. Older session daemon (within same ABI) leave it at 0,
+ * and therefore we need to enable it here, keeping the original
+ * racy behavior.
+ */
+ enabler->enabled = !event_param->disabled;
cds_list_add(&enabler->node, &enabler->chan->session->enablers_head);
lttng_session_lazy_sync_enablers(enabler->chan->session);
return enabler;
return 0;
}
+int lttng_enabler_attach_exclusion(struct lttng_enabler *enabler,
+ struct lttng_ust_excluder_node *excluder)
+{
+ excluder->enabler = enabler;
+ cds_list_add_tail(&excluder->node, &enabler->excluder_head);
+ lttng_session_lazy_sync_enablers(enabler->chan->session);
+ return 0;
+}
+
int lttng_attach_context(struct lttng_ust_context *context_param,
struct lttng_ctx **ctx, struct lttng_session *session)
{
switch (context_param->ctx) {
case LTTNG_UST_CONTEXT_PTHREAD_ID:
return lttng_add_pthread_id_to_ctx(ctx);
+ case LTTNG_UST_CONTEXT_PERF_THREAD_COUNTER:
+ {
+ struct lttng_ust_perf_counter_ctx *perf_ctx_param;
+
+ perf_ctx_param = &context_param->u.perf_counter;
+ return lttng_add_perf_counter_to_ctx(
+ perf_ctx_param->type,
+ perf_ctx_param->config,
+ perf_ctx_param->name,
+ ctx);
+ }
case LTTNG_UST_CONTEXT_VTID:
return lttng_add_vtid_to_ctx(ctx);
case LTTNG_UST_CONTEXT_VPID:
return lttng_add_vpid_to_ctx(ctx);
case LTTNG_UST_CONTEXT_PROCNAME:
return lttng_add_procname_to_ctx(ctx);
+ case LTTNG_UST_CONTEXT_IP:
+ return lttng_add_ip_to_ctx(ctx);
default:
return -EINVAL;
}
void lttng_enabler_destroy(struct lttng_enabler *enabler)
{
struct lttng_ust_filter_bytecode_node *filter_node, *tmp_filter_node;
+ struct lttng_ust_excluder_node *excluder_node, *tmp_excluder_node;
/* Destroy filter bytecode */
cds_list_for_each_entry_safe(filter_node, tmp_filter_node,
free(filter_node);
}
+ /* Destroy excluders */
+ cds_list_for_each_entry_safe(excluder_node, tmp_excluder_node,
+ &enabler->excluder_head, node) {
+ free(excluder_node);
+ }
+
/* Destroy contexts */
lttng_destroy_context(enabler->ctx);
lttng_enabler_ref_events(enabler);
/*
* For each event, if at least one of its enablers is enabled,
- * we enable the event, else we disable it.
+ * and its channel and session transient states are enabled, we
+ * enable the event, else we disable it.
*/
cds_list_for_each_entry(event, &session->events_head, node) {
struct lttng_enabler_ref *enabler_ref;
break;
}
}
- event->enabled = enabled;
+ /*
+ * Enabled state is based on union of enablers, with
+ * intesection of session and channel transient enable
+ * states.
+ */
+ enabled = enabled && session->tstate && event->chan->tstate;
+
+ CMM_STORE_SHARED(event->enabled, enabled);
+ /*
+ * Sync tracepoint registration with event enabled
+ * state.
+ */
+ if (enabled) {
+ if (!event->registered)
+ register_event(event);
+ } else {
+ if (event->registered)
+ unregister_event(event);
+ }
/* Check if has enablers without bytecode enabled */
cds_list_for_each_entry(enabler_ref,