lttng_ust_init_thread: initialise cached context values
[lttng-ust.git] / src / lib / lttng-ust / lttng-context-perf-counters.c
index e1ce489030b688b3c529af2eb21c068cd7ce9756..52371a0dc6eb13cb600055cf85eba36077748cf5 100644 (file)
@@ -21,7 +21,8 @@
 #include <lttng/ust-arch.h>
 #include <lttng/ust-events.h>
 #include <lttng/ust-tracer.h>
-#include <lttng/ringbuffer-context.h>
+#include <lttng/ust-ringbuffer-context.h>
+#include <lttng/ust-cancelstate.h>
 #include <urcu/system.h>
 #include <urcu/arch.h>
 #include <urcu/rculist.h>
@@ -71,18 +72,12 @@ static pthread_key_t perf_counter_key;
  * lttng_perf_lock - Protect lttng-ust perf counter data structures
  *
  * Nests within the ust_lock, and therefore within the libc dl lock.
- * Therefore, we need to fixup the TLS before nesting into this lock.
+ * Therefore, we need to allocate the TLS before nesting into this lock.
  * Nests inside RCU bp read-side lock. Protects against concurrent
  * fork.
  */
 static pthread_mutex_t ust_perf_mutex = PTHREAD_MUTEX_INITIALIZER;
 
-/*
- * Cancel state when grabbing the ust_perf_mutex. Saved when locking,
- * restored on unlock. Protected by ust_perf_mutex.
- */
-static int ust_perf_saved_cancelstate;
-
 /*
  * Track whether we are tracing from a signal handler nested on an
  * application thread.
@@ -90,21 +85,21 @@ static int ust_perf_saved_cancelstate;
 static DEFINE_URCU_TLS(int, ust_perf_mutex_nest);
 
 /*
- * Force a read (imply TLS fixup for dlopen) of TLS variables.
+ * Force a read (imply TLS allocation for dlopen) of TLS variables.
  */
-void lttng_ust_fixup_perf_counter_tls(void)
+void lttng_ust_perf_counter_init_thread(int flags)
 {
        asm volatile ("" : : "m" (URCU_TLS(ust_perf_mutex_nest)));
+       (void)flags;
 }
 
 void lttng_perf_lock(void)
 {
        sigset_t sig_all_blocked, orig_mask;
-       int ret, oldstate;
+       int ret;
 
-       ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldstate);
-       if (ret) {
-               ERR("pthread_setcancelstate: %s", strerror(ret));
+       if (lttng_ust_cancelstate_disable_push()) {
+               ERR("lttng_ust_cancelstate_disable_push");
        }
        sigfillset(&sig_all_blocked);
        ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
@@ -118,7 +113,6 @@ void lttng_perf_lock(void)
                 */
                cmm_barrier();
                pthread_mutex_lock(&ust_perf_mutex);
-               ust_perf_saved_cancelstate = oldstate;
        }
        ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
        if (ret) {
@@ -129,8 +123,7 @@ void lttng_perf_lock(void)
 void lttng_perf_unlock(void)
 {
        sigset_t sig_all_blocked, orig_mask;
-       int ret, newstate, oldstate;
-       bool restore_cancel = false;
+       int ret;
 
        sigfillset(&sig_all_blocked);
        ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
@@ -143,24 +136,20 @@ void lttng_perf_unlock(void)
         */
        cmm_barrier();
        if (!--URCU_TLS(ust_perf_mutex_nest)) {
-               newstate = ust_perf_saved_cancelstate;
-               restore_cancel = true;
                pthread_mutex_unlock(&ust_perf_mutex);
        }
        ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
        if (ret) {
                ERR("pthread_sigmask: %s", strerror(ret));
        }
-       if (restore_cancel) {
-               ret = pthread_setcancelstate(newstate, &oldstate);
-               if (ret) {
-                       ERR("pthread_setcancelstate: %s", strerror(ret));
-               }
+       if (lttng_ust_cancelstate_disable_pop()) {
+               ERR("lttng_ust_cancelstate_disable_pop");
        }
 }
 
 static
 size_t perf_counter_get_size(void *priv __attribute__((unused)),
+               struct lttng_ust_probe_ctx *probe_ctx __attribute__((unused)),
                size_t offset)
 {
        size_t size = 0;
@@ -262,7 +251,7 @@ uint64_t arch_read_perf_counter(
 }
 
 static
-int arch_perf_keep_fd(struct lttng_perf_counter_thread_field *thread_field)
+int arch_perf_keep_fd(struct lttng_perf_counter_thread_field *thread_field __attribute__((unused)))
 {
        return 1;
 }
@@ -442,8 +431,9 @@ uint64_t wrapper_perf_counter_read(void *priv)
 
 static
 void perf_counter_record(void *priv,
-                struct lttng_ust_ring_buffer_ctx *ctx,
-                struct lttng_ust_channel_buffer *chan)
+               struct lttng_ust_probe_ctx *probe_ctx __attribute__((unused)),
+               struct lttng_ust_ring_buffer_ctx *ctx,
+               struct lttng_ust_channel_buffer *chan)
 {
        uint64_t value;
 
@@ -453,9 +443,10 @@ void perf_counter_record(void *priv,
 
 static
 void perf_counter_get_value(void *priv,
+               struct lttng_ust_probe_ctx *probe_ctx __attribute__((unused)),
                struct lttng_ust_ctx_value *value)
 {
-       value->u.s64 = wrapper_perf_counter_read(priv);
+       value->u.u64 = wrapper_perf_counter_read(priv);
 }
 
 /* Called with perf lock held */
@@ -531,7 +522,7 @@ static const struct lttng_ust_type_common *ust_type =
        lttng_ust_static_type_integer(sizeof(uint64_t) * CHAR_BIT,
                        lttng_ust_rb_alignof(uint64_t) * CHAR_BIT,
                        lttng_ust_is_signed_type(uint64_t),
-                       BYTE_ORDER, 10);
+                       LTTNG_UST_BYTE_ORDER, 10);
 
 /* Called with UST lock held */
 int lttng_add_perf_counter_to_ctx(uint32_t type,
This page took 0.02421 seconds and 4 git commands to generate.