X-Git-Url: http://git.lttng.org/?a=blobdiff_plain;f=libust%2Ftype-serializer.c;h=dcaea1ef029664bbb22218b84ea663378af0c91d;hb=8d6300d3b3cb0219e1109e931a2219dbd812b24d;hp=3ee54ebab37eedcd41d101017bc8db3dfe53e0f0;hpb=12e81b07455a1aef2e2bcc73004f14a7b73596fa;p=ust.git diff --git a/libust/type-serializer.c b/libust/type-serializer.c index 3ee54eb..dcaea1e 100644 --- a/libust/type-serializer.c +++ b/libust/type-serializer.c @@ -7,9 +7,13 @@ * * Dual LGPL v2.1/GPL v2 license. */ + +/* This file contains functions for tracepoint custom probes support. */ + #include #include #include +#include #include "tracer.h" notrace @@ -39,7 +43,7 @@ void _ltt_specialized_trace(const struct marker *mdata, void *probe_data, cpu = ust_get_cpu(); /* Force volatile access. */ - STORE_SHARED(ltt_nesting, LOAD_SHARED(ltt_nesting) + 1); + CMM_STORE_SHARED(ltt_nesting, CMM_LOAD_SHARED(ltt_nesting) + 1); /* * asm volatile and "memory" clobber prevent the compiler from moving @@ -48,7 +52,7 @@ void _ltt_specialized_trace(const struct marker *mdata, void *probe_data, * traps, divisions by 0, ...) are triggered within the incremented * nesting count section. */ - barrier(); + cmm_barrier(); eID = mdata->event_id; chan_index = mdata->channel_id; @@ -56,7 +60,7 @@ void _ltt_specialized_trace(const struct marker *mdata, void *probe_data, * Iterate on each trace, typically small number of active traces, * list iteration with prefetch is usually slower. */ - list_for_each_entry_rcu(trace, <t_traces.head, list) { + cds_list_for_each_entry_rcu(trace, <t_traces.head, list) { if (unlikely(!trace->active)) continue; //ust// if (unlikely(!ltt_run_filter(trace, eID))) @@ -78,6 +82,14 @@ void _ltt_specialized_trace(const struct marker *mdata, void *probe_data, if (!chan->active) continue; + /* If a new cpu was plugged since the trace was started, we did + * not add it to the trace, and therefore we write the event to + * cpu 0. + */ + if(cpu >= chan->n_cpus) { + cpu = 0; + } + /* reserve space : header and data */ ret = ltt_reserve_slot(chan, trace, data_size, largest_align, cpu, &buf, &slot_size, &buf_offset, &tsc, @@ -105,7 +117,7 @@ void _ltt_specialized_trace(const struct marker *mdata, void *probe_data, * traps, divisions by 0, ...) are triggered within the incremented * nesting count section. */ - barrier(); - STORE_SHARED(ltt_nesting, LOAD_SHARED(ltt_nesting) - 1); + cmm_barrier(); + CMM_STORE_SHARED(ltt_nesting, CMM_LOAD_SHARED(ltt_nesting) - 1); rcu_read_unlock(); }