Privatize headers
[ust.git] / libust / type-serializer.c
index bf1c4966a67630bd3aee064e57b5cab34255ccdd..070542c65e2a938a158e00e88aad320f867a55b2 100644 (file)
 
 /* This file contains functions for tracepoint custom probes support. */
 
+#define _GNU_SOURCE
+#define _LGPL_SOURCE
 #include <urcu/rculist.h>
-#include <ust/type-serializer.h>
 #include <ust/core.h>
 #include <ust/clock.h>
+#include <urcu-bp.h>
 #include "tracer.h"
+#include "type-serializer.h"
 
 notrace
-void _ltt_specialized_trace(const struct marker *mdata, void *probe_data,
+void _ltt_specialized_trace(const struct ust_marker *mdata, void *probe_data,
                void *serialize_private, unsigned int data_size,
                unsigned int largest_align)
 {
@@ -43,7 +46,7 @@ void _ltt_specialized_trace(const struct marker *mdata, void *probe_data,
        cpu = ust_get_cpu();
 
        /* Force volatile access. */
-       STORE_SHARED(ltt_nesting, LOAD_SHARED(ltt_nesting) + 1);
+       CMM_STORE_SHARED(ltt_nesting, CMM_LOAD_SHARED(ltt_nesting) + 1);
 
        /*
         * asm volatile and "memory" clobber prevent the compiler from moving
@@ -52,7 +55,7 @@ void _ltt_specialized_trace(const struct marker *mdata, void *probe_data,
         * traps, divisions by 0, ...) are triggered within the incremented
         * nesting count section.
         */
-       barrier();
+       cmm_barrier();
        eID = mdata->event_id;
        chan_index = mdata->channel_id;
 
@@ -60,7 +63,7 @@ void _ltt_specialized_trace(const struct marker *mdata, void *probe_data,
         * Iterate on each trace, typically small number of active traces,
         * list iteration with prefetch is usually slower.
         */
-       list_for_each_entry_rcu(trace, &ltt_traces.head, list) {
+       cds_list_for_each_entry_rcu(trace, &ltt_traces.head, list) {
                if (unlikely(!trace->active))
                        continue;
 //ust//                if (unlikely(!ltt_run_filter(trace, eID)))
@@ -82,6 +85,14 @@ void _ltt_specialized_trace(const struct marker *mdata, void *probe_data,
                if (!chan->active)
                        continue;
 
+               /* If a new cpu was plugged since the trace was started, we did
+                * not add it to the trace, and therefore we write the event to
+                * cpu 0.
+                */
+               if(cpu >= chan->n_cpus) {
+                       cpu = 0;
+               }
+
                /* reserve space : header and data */
                ret = ltt_reserve_slot(chan, trace, data_size, largest_align,
                                       cpu, &buf, &slot_size, &buf_offset, &tsc,
@@ -109,7 +120,7 @@ void _ltt_specialized_trace(const struct marker *mdata, void *probe_data,
         * traps, divisions by 0, ...) are triggered within the incremented
         * nesting count section.
         */
-       barrier();
-       STORE_SHARED(ltt_nesting, LOAD_SHARED(ltt_nesting) - 1);
+       cmm_barrier();
+       CMM_STORE_SHARED(ltt_nesting, CMM_LOAD_SHARED(ltt_nesting) - 1);
        rcu_read_unlock();
 }
This page took 0.025504 seconds and 4 git commands to generate.