2 * ltt-type-serializer.c
4 * LTTng specialized type serializer.
6 * Copyright Mathieu Desnoyers, 2008.
8 * Dual LGPL v2.1/GPL v2 license.
10 #include <urcu/rculist.h>
11 #include <ust/type-serializer.h>
16 void _ltt_specialized_trace(const struct marker
*mdata
, void *probe_data
,
17 void *serialize_private
, unsigned int data_size
,
18 unsigned int largest_align
)
23 unsigned int chan_index
;
24 struct ust_buffer
*buf
;
25 struct ust_channel
*chan
;
26 struct ust_trace
*trace
;
33 * If we get here, it's probably because we have useful work to do.
35 if (unlikely(ltt_traces
.num_active_traces
== 0))
41 /* Force volatile access. */
42 STORE_SHARED(ltt_nesting
, LOAD_SHARED(ltt_nesting
) + 1);
45 * asm volatile and "memory" clobber prevent the compiler from moving
46 * instructions out of the ltt nesting count. This is required to ensure
47 * that probe side-effects which can cause recursion (e.g. unforeseen
48 * traps, divisions by 0, ...) are triggered within the incremented
49 * nesting count section.
52 eID
= mdata
->event_id
;
53 chan_index
= mdata
->channel_id
;
56 * Iterate on each trace, typically small number of active traces,
57 * list iteration with prefetch is usually slower.
59 list_for_each_entry_rcu(trace
, <t_traces
.head
, list
) {
60 if (unlikely(!trace
->active
))
62 //ust// if (unlikely(!ltt_run_filter(trace, eID)))
64 #ifdef CONFIG_LTT_DEBUG_EVENT_SIZE
65 rflags
= LTT_RFLAG_ID_SIZE
;
67 if (unlikely(eID
>= LTT_FREE_EVENTS
))
68 rflags
= LTT_RFLAG_ID
;
73 * Skip channels added after trace creation.
75 if (unlikely(chan_index
>= trace
->nr_channels
))
77 chan
= &trace
->channels
[chan_index
];
81 /* reserve space : header and data */
82 ret
= ltt_reserve_slot(chan
, trace
, data_size
, largest_align
,
83 cpu
, &buf
, &slot_size
, &buf_offset
, &tsc
,
85 if (unlikely(ret
< 0))
86 continue; /* buffer full */
88 /* Out-of-order write : header and data */
89 buf_offset
= ltt_write_event_header(chan
, buf
,
90 buf_offset
, eID
, data_size
,
93 buf_offset
+= ltt_align(buf_offset
, largest_align
);
94 ust_buffers_write(buf
, buf_offset
,
95 serialize_private
, data_size
);
96 buf_offset
+= data_size
;
98 /* Out-of-order commit */
99 ltt_commit_slot(chan
, buf
, buf_offset
, data_size
, slot_size
);
102 * asm volatile and "memory" clobber prevent the compiler from moving
103 * instructions out of the ltt nesting count. This is required to ensure
104 * that probe side-effects which can cause recursion (e.g. unforeseen
105 * traps, divisions by 0, ...) are triggered within the incremented
106 * nesting count section.
109 STORE_SHARED(ltt_nesting
, LOAD_SHARED(ltt_nesting
) - 1);
This page took 0.031457 seconds and 4 git commands to generate.