/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
* Copyright (C) 2008-2011 Mathieu Desnoyers
* Copyright (C) 2009 Pierre-Marc Fournier
*
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- *
* Ported to userspace by Pierre-Marc Fournier.
*/
#include <urcu/system.h>
#include <lttng/tracepoint.h>
-#include <lttng/ust-abi.h> /* for LTTNG_UST_SYM_NAME_LEN */
+#include <lttng/ust-abi.h> /* for LTTNG_UST_ABI_SYM_NAME_LEN */
#include <usterr-signal-safe.h>
-#include <helper.h>
+#include <ust-helper.h>
#include "tracepoint-internal.h"
#include "lttng-tracer-core.h"
*/
static int tracepoint_destructors_state = 1;
-/*
- * Expose the now deprecated symbol __tracepoints__disable_destructors for
- * backward compatibility of applications built against old versions of
- * lttng-ust. We need to keep __tracepoints__disable_destructors up to date
- * within the new destructor disabling API because old applications read this
- * symbol directly.
- */
-int __tracepoints__disable_destructors __attribute__((weak));
-
static void (*new_tracepoint_cb)(struct lttng_ust_tracepoint *);
/*
bool tp_entry_callsite_ref; /* Has a tp_entry took a ref on this callsite */
};
-static int tracepoint_v1_api_used;
-static void (*lttng_ust_liburcu_bp_synchronize_rcu)(void);
-static void (*lttng_ust_liburcu_bp_rcu_read_lock)(void);
-static void (*lttng_ust_liburcu_bp_rcu_read_unlock)(void);
-void (*lttng_ust_liburcu_bp_before_fork)(void);
-void (*lttng_ust_liburcu_bp_after_fork_parent)(void);
-void (*lttng_ust_liburcu_bp_after_fork_child)(void);
-
-static bool lttng_ust_tracepoint_v1_used(void)
-{
- return uatomic_read(&tracepoint_v1_api_used);
-}
-
-static void lttng_ust_tracepoint_set_v1_used(void)
-{
- if (!lttng_ust_tracepoint_v1_used()) {
- /*
- * Perform dlsym here rather than lazily on first use to
- * eliminate nesting of dynamic loader lock (used within
- * dlsym) inside the ust lock.
- */
- if (!lttng_ust_liburcu_bp_synchronize_rcu) {
- lttng_ust_liburcu_bp_synchronize_rcu = URCU_FORCE_CAST(void (*)(void),
- dlsym(RTLD_DEFAULT, "synchronize_rcu_bp"));
- if (!lttng_ust_liburcu_bp_synchronize_rcu)
- abort();
- }
- if (!lttng_ust_liburcu_bp_before_fork) {
- lttng_ust_liburcu_bp_before_fork = URCU_FORCE_CAST(void (*)(void),
- dlsym(RTLD_DEFAULT, "rcu_bp_before_fork"));
- if (!lttng_ust_liburcu_bp_before_fork)
- abort();
- }
- if (!lttng_ust_liburcu_bp_after_fork_parent) {
- lttng_ust_liburcu_bp_after_fork_parent = URCU_FORCE_CAST(void (*)(void),
- dlsym(RTLD_DEFAULT, "rcu_bp_after_fork_parent"));
- if (!lttng_ust_liburcu_bp_after_fork_parent)
- abort();
- }
- if (!lttng_ust_liburcu_bp_after_fork_child) {
- lttng_ust_liburcu_bp_after_fork_child = URCU_FORCE_CAST(void (*)(void),
- dlsym(RTLD_DEFAULT, "rcu_bp_after_fork_child"));
- if (!lttng_ust_liburcu_bp_after_fork_child)
- abort();
- }
- if (!lttng_ust_liburcu_bp_rcu_read_lock) {
- lttng_ust_liburcu_bp_rcu_read_lock = URCU_FORCE_CAST(void (*)(void),
- dlsym(RTLD_DEFAULT, "rcu_read_lock_bp"));
- if (!lttng_ust_liburcu_bp_rcu_read_lock)
- abort();
- }
- if (!lttng_ust_liburcu_bp_rcu_read_unlock) {
- lttng_ust_liburcu_bp_rcu_read_unlock = URCU_FORCE_CAST(void (*)(void),
- dlsym(RTLD_DEFAULT, "rcu_read_unlock_bp"));
- if (!lttng_ust_liburcu_bp_rcu_read_unlock)
- abort();
- }
-
- /* Fixup URCU bp TLS. */
- lttng_ust_liburcu_bp_rcu_read_lock();
- lttng_ust_liburcu_bp_rcu_read_unlock();
-
- uatomic_set(&tracepoint_v1_api_used, 1);
- }
-}
-
/* coverity[+alloc] */
static void *allocate_probes(int count)
{
if (old) {
struct tp_probes *tp_probes = caa_container_of(old,
struct tp_probes, probes[0]);
- lttng_ust_synchronize_trace();
+ lttng_ust_urcu_synchronize_rcu();
free(tp_probes);
}
}
size_t name_len = strlen(name);
uint32_t hash;
- if (name_len > LTTNG_UST_SYM_NAME_LEN - 1) {
- WARN("Truncating tracepoint name %s which exceeds size limits of %u chars", name, LTTNG_UST_SYM_NAME_LEN - 1);
- name_len = LTTNG_UST_SYM_NAME_LEN - 1;
+ if (name_len > LTTNG_UST_ABI_SYM_NAME_LEN - 1) {
+ WARN("Truncating tracepoint name %s which exceeds size limits of %u chars", name, LTTNG_UST_ABI_SYM_NAME_LEN - 1);
+ name_len = LTTNG_UST_ABI_SYM_NAME_LEN - 1;
}
hash = jhash(name, name_len, 0);
head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
cds_hlist_for_each_entry(e, node, head, hlist) {
- if (!strncmp(name, e->name, LTTNG_UST_SYM_NAME_LEN - 1))
+ if (!strncmp(name, e->name, LTTNG_UST_ABI_SYM_NAME_LEN - 1))
return e;
}
return NULL;
size_t sig_off, name_off;
uint32_t hash;
- if (name_len > LTTNG_UST_SYM_NAME_LEN - 1) {
- WARN("Truncating tracepoint name %s which exceeds size limits of %u chars", name, LTTNG_UST_SYM_NAME_LEN - 1);
- name_len = LTTNG_UST_SYM_NAME_LEN - 1;
+ if (name_len > LTTNG_UST_ABI_SYM_NAME_LEN - 1) {
+ WARN("Truncating tracepoint name %s which exceeds size limits of %u chars", name, LTTNG_UST_ABI_SYM_NAME_LEN - 1);
+ name_len = LTTNG_UST_ABI_SYM_NAME_LEN - 1;
}
hash = jhash(name, name_len, 0);
head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
cds_hlist_for_each_entry(e, node, head, hlist) {
- if (!strncmp(name, e->name, LTTNG_UST_SYM_NAME_LEN - 1)) {
+ if (!strncmp(name, e->name, LTTNG_UST_ABI_SYM_NAME_LEN - 1)) {
DBG("tracepoint %s busy", name);
return ERR_PTR(-EEXIST); /* Already there */
}
static void set_tracepoint(struct tracepoint_entry **entry,
struct lttng_ust_tracepoint *elem, int active)
{
- WARN_ON(strncmp((*entry)->name, elem->name, LTTNG_UST_SYM_NAME_LEN - 1) != 0);
+ WARN_ON(strncmp((*entry)->name, elem->name, LTTNG_UST_ABI_SYM_NAME_LEN - 1) != 0);
/*
* Check that signatures match before connecting a probe to a
* tracepoint. Warn the user if they don't.
uint32_t hash;
struct tracepoint_entry *tp_entry;
- if (name_len > LTTNG_UST_SYM_NAME_LEN - 1) {
- WARN("Truncating tracepoint name %s which exceeds size limits of %u chars", name, LTTNG_UST_SYM_NAME_LEN - 1);
- name_len = LTTNG_UST_SYM_NAME_LEN - 1;
+ if (name_len > LTTNG_UST_ABI_SYM_NAME_LEN - 1) {
+ WARN("Truncating tracepoint name %s which exceeds size limits of %u chars", name, LTTNG_UST_ABI_SYM_NAME_LEN - 1);
+ name_len = LTTNG_UST_ABI_SYM_NAME_LEN - 1;
}
hash = jhash(name, name_len, 0);
head = &callsite_table[hash & (CALLSITE_TABLE_SIZE - 1)];
struct tracepoint_entry *tp_entry;
tp_entry = get_tracepoint(name);
- if (name_len > LTTNG_UST_SYM_NAME_LEN - 1) {
- WARN("Truncating tracepoint name %s which exceeds size limits of %u chars", name, LTTNG_UST_SYM_NAME_LEN - 1);
- name_len = LTTNG_UST_SYM_NAME_LEN - 1;
+ if (name_len > LTTNG_UST_ABI_SYM_NAME_LEN - 1) {
+ WARN("Truncating tracepoint name %s which exceeds size limits of %u chars", name, LTTNG_UST_ABI_SYM_NAME_LEN - 1);
+ name_len = LTTNG_UST_ABI_SYM_NAME_LEN - 1;
}
hash = jhash(name, name_len, 0);
head = &callsite_table[hash & (CALLSITE_TABLE_SIZE - 1)];
cds_hlist_for_each_entry(e, node, head, hlist) {
struct lttng_ust_tracepoint *tp = e->tp;
- if (strncmp(name, tp->name, LTTNG_UST_SYM_NAME_LEN - 1))
+ if (strncmp(name, tp->name, LTTNG_UST_ABI_SYM_NAME_LEN - 1))
continue;
if (tp_entry) {
if (!e->tp_entry_callsite_ref) {
/*
* Caller needs to invoke __tracepoint_probe_release_queue() after
- * calling __tracepoint_probe_register_queue_release() one or multiple
+ * calling lttng_ust_tp_probe_register_queue_release() one or multiple
* times to ensure it does not leak memory.
*/
-int __tracepoint_probe_register_queue_release(const char *name,
+int lttng_ust_tp_probe_register_queue_release(const char *name,
void (*probe)(void), void *data, const char *signature)
{
void *old;
/*
* Caller needs to invoke __tracepoint_probe_release_queue() after
- * calling __tracepoint_probe_unregister_queue_release() one or multiple
+ * calling lttng_ust_tp_probe_unregister_queue_release() one or multiple
* times to ensure it does not leak memory.
*/
-int __tracepoint_probe_unregister_queue_release(const char *name,
+int lttng_ust_tp_probe_unregister_queue_release(const char *name,
void (*probe)(void), void *data)
{
void *old;
return ret;
}
-void __tracepoint_probe_prune_release_queue(void)
+void lttng_ust_tp_probe_prune_release_queue(void)
{
CDS_LIST_HEAD(release_probes);
struct tp_probes *pos, *next;
release_queue_need_update = 0;
/* Wait for grace period between all sync_callsites and free. */
- lttng_ust_synchronize_trace();
+ lttng_ust_urcu_synchronize_rcu();
cds_list_for_each_entry_safe(pos, next, &release_probes, u.list) {
cds_list_del(&pos->u.list);
tracepoint_update_probes();
/* Wait for grace period between update_probes and free. */
- lttng_ust_synchronize_trace();
+ lttng_ust_urcu_synchronize_rcu();
cds_list_for_each_entry_safe(pos, next, &release_probes, u.list) {
cds_list_del(&pos->u.list);
free(pos);
pthread_mutex_unlock(&tracepoint_mutex);
}
-void tracepoint_set_new_tracepoint_cb(void (*cb)(struct lttng_ust_tracepoint *))
-{
- new_tracepoint_cb = cb;
-}
-
static void new_tracepoints(struct lttng_ust_tracepoint * const *start,
struct lttng_ust_tracepoint * const *end)
{
* against recent liblttng-ust headers require a recent liblttng-ust
* runtime for those tracepoints to be taken into account.
*/
-int tracepoint_register_lib2(struct lttng_ust_tracepoint * const *tracepoints_start,
+int tracepoint_register_lib(struct lttng_ust_tracepoint * const *tracepoints_start,
+ int tracepoints_count);
+int tracepoint_register_lib(struct lttng_ust_tracepoint * const *tracepoints_start,
int tracepoints_count)
{
struct tracepoint_lib *pl, *iter;
- init_tracepoint();
+ lttng_ust_tp_init();
pl = (struct tracepoint_lib *) zmalloc(sizeof(struct tracepoint_lib));
if (!pl) {
DBG("just registered a tracepoints section from %p and having %d tracepoints",
tracepoints_start, tracepoints_count);
- if (ust_debug()) {
+ if (ust_err_debug_enabled()) {
int i;
for (i = 0; i < tracepoints_count; i++) {
return 0;
}
-/* Exposed for backward compatibility with old instrumented applications. */
-int tracepoint_register_lib(struct lttng_ust_tracepoint * const *tracepoints_start,
- int tracepoints_count)
-{
- lttng_ust_tracepoint_set_v1_used();
- return tracepoint_register_lib2(tracepoints_start, tracepoints_count);
-}
-
-int tracepoint_unregister_lib2(struct lttng_ust_tracepoint * const *tracepoints_start)
+int tracepoint_unregister_lib(struct lttng_ust_tracepoint * const *tracepoints_start);
+int tracepoint_unregister_lib(struct lttng_ust_tracepoint * const *tracepoints_start)
{
struct tracepoint_lib *lib;
return 0;
}
-/* Exposed for backward compatibility with old instrumented applications. */
-int tracepoint_unregister_lib(struct lttng_ust_tracepoint * const *tracepoints_start)
-{
- lttng_ust_tracepoint_set_v1_used();
- return tracepoint_unregister_lib2(tracepoints_start);
-}
-
/*
* Report in debug message whether the compiler correctly supports weak
* hidden symbols. This test checks that the address associated with two
"DIFFERENT addresses");
}
-void init_tracepoint(void)
+void lttng_ust_tp_init(void)
{
if (uatomic_xchg(&initialized, 1) == 1)
return;
- init_usterr();
+ ust_err_init();
check_weak_hidden();
}
-void exit_tracepoint(void)
+void lttng_ust_tp_exit(void)
{
initialized = 0;
}
#undef tp_rcu_read_unlock
#undef tp_rcu_dereference
+void tp_rcu_read_lock(void);
void tp_rcu_read_lock(void)
{
lttng_ust_urcu_read_lock();
}
+void tp_rcu_read_unlock(void);
void tp_rcu_read_unlock(void)
{
lttng_ust_urcu_read_unlock();
}
+void *tp_rcu_dereference_sym(void *p);
void *tp_rcu_dereference_sym(void *p)
{
return lttng_ust_rcu_dereference(p);
* dlopen(3) and dlsym(3) to get an handle on the
* tp_disable_destructors and tp_get_destructors_state symbols below.
*/
+void tp_disable_destructors(void);
void tp_disable_destructors(void)
{
uatomic_set(&tracepoint_destructors_state, 0);
* Returns 1 if the destructors are enabled and should be executed.
* Returns 0 if the destructors are disabled.
*/
+int tp_get_destructors_state(void);
int tp_get_destructors_state(void)
{
return uatomic_read(&tracepoint_destructors_state);
}
-
-void lttng_ust_synchronize_trace(void)
-{
- lttng_ust_urcu_synchronize_rcu();
- /*
- * For legacy tracepoint instrumentation, also wait for urcu-bp
- * grace period.
- */
- if (lttng_ust_liburcu_bp_synchronize_rcu)
- lttng_ust_liburcu_bp_synchronize_rcu();
-}
-
-/*
- * Create the wrapper symbols for legacy v1 API.
- */
-void tp_rcu_read_lock_bp(void)
-{
- lttng_ust_urcu_read_lock();
-}
-
-void tp_rcu_read_unlock_bp(void)
-{
- lttng_ust_urcu_read_unlock();
-}
-
-void *tp_rcu_dereference_sym_bp(void *p)
-{
- return lttng_ust_rcu_dereference(p);
-}