Commit | Line | Data |
---|---|---|
12e81b07 PMF |
1 | /** |
2 | * ltt-type-serializer.c | |
3 | * | |
4 | * LTTng specialized type serializer. | |
5 | * | |
6 | * Copyright Mathieu Desnoyers, 2008. | |
7 | * | |
8 | * Dual LGPL v2.1/GPL v2 license. | |
9 | */ | |
93e5ce29 PMF |
10 | |
11 | /* This file contains functions for tracepoint custom probes support. */ | |
12 | ||
12e81b07 PMF |
13 | #include <urcu/rculist.h> |
14 | #include <ust/type-serializer.h> | |
15 | #include <ust/core.h> | |
518d7abb | 16 | #include <ust/clock.h> |
12e81b07 PMF |
17 | #include "tracer.h" |
18 | ||
19 | notrace | |
20 | void _ltt_specialized_trace(const struct marker *mdata, void *probe_data, | |
21 | void *serialize_private, unsigned int data_size, | |
22 | unsigned int largest_align) | |
23 | { | |
24 | int ret; | |
25 | uint16_t eID; | |
26 | size_t slot_size; | |
27 | unsigned int chan_index; | |
28 | struct ust_buffer *buf; | |
29 | struct ust_channel *chan; | |
30 | struct ust_trace *trace; | |
31 | u64 tsc; | |
32 | long buf_offset; | |
33 | int cpu; | |
34 | unsigned int rflags; | |
35 | ||
36 | /* | |
37 | * If we get here, it's probably because we have useful work to do. | |
38 | */ | |
39 | if (unlikely(ltt_traces.num_active_traces == 0)) | |
40 | return; | |
41 | ||
42 | rcu_read_lock(); | |
43 | cpu = ust_get_cpu(); | |
44 | ||
45 | /* Force volatile access. */ | |
0222e121 | 46 | CMM_STORE_SHARED(ltt_nesting, CMM_LOAD_SHARED(ltt_nesting) + 1); |
12e81b07 PMF |
47 | |
48 | /* | |
49 | * asm volatile and "memory" clobber prevent the compiler from moving | |
50 | * instructions out of the ltt nesting count. This is required to ensure | |
51 | * that probe side-effects which can cause recursion (e.g. unforeseen | |
52 | * traps, divisions by 0, ...) are triggered within the incremented | |
53 | * nesting count section. | |
54 | */ | |
0222e121 | 55 | cmm_barrier(); |
12e81b07 PMF |
56 | eID = mdata->event_id; |
57 | chan_index = mdata->channel_id; | |
58 | ||
59 | /* | |
60 | * Iterate on each trace, typically small number of active traces, | |
61 | * list iteration with prefetch is usually slower. | |
62 | */ | |
0222e121 | 63 | cds_list_for_each_entry_rcu(trace, <t_traces.head, list) { |
12e81b07 PMF |
64 | if (unlikely(!trace->active)) |
65 | continue; | |
66 | //ust// if (unlikely(!ltt_run_filter(trace, eID))) | |
67 | //ust// continue; | |
68 | #ifdef CONFIG_LTT_DEBUG_EVENT_SIZE | |
69 | rflags = LTT_RFLAG_ID_SIZE; | |
70 | #else | |
71 | if (unlikely(eID >= LTT_FREE_EVENTS)) | |
72 | rflags = LTT_RFLAG_ID; | |
73 | else | |
74 | rflags = 0; | |
75 | #endif | |
76 | /* | |
77 | * Skip channels added after trace creation. | |
78 | */ | |
79 | if (unlikely(chan_index >= trace->nr_channels)) | |
80 | continue; | |
81 | chan = &trace->channels[chan_index]; | |
82 | if (!chan->active) | |
83 | continue; | |
84 | ||
93c2f023 MD |
85 | /* If a new cpu was plugged since the trace was started, we did |
86 | * not add it to the trace, and therefore we write the event to | |
87 | * cpu 0. | |
88 | */ | |
89 | if(cpu >= chan->n_cpus) { | |
90 | cpu = 0; | |
91 | } | |
92 | ||
12e81b07 PMF |
93 | /* reserve space : header and data */ |
94 | ret = ltt_reserve_slot(chan, trace, data_size, largest_align, | |
95 | cpu, &buf, &slot_size, &buf_offset, &tsc, | |
96 | &rflags); | |
97 | if (unlikely(ret < 0)) | |
98 | continue; /* buffer full */ | |
99 | ||
100 | /* Out-of-order write : header and data */ | |
101 | buf_offset = ltt_write_event_header(chan, buf, | |
102 | buf_offset, eID, data_size, | |
103 | tsc, rflags); | |
104 | if (data_size) { | |
105 | buf_offset += ltt_align(buf_offset, largest_align); | |
106 | ust_buffers_write(buf, buf_offset, | |
107 | serialize_private, data_size); | |
108 | buf_offset += data_size; | |
109 | } | |
110 | /* Out-of-order commit */ | |
111 | ltt_commit_slot(chan, buf, buf_offset, data_size, slot_size); | |
112 | } | |
113 | /* | |
114 | * asm volatile and "memory" clobber prevent the compiler from moving | |
115 | * instructions out of the ltt nesting count. This is required to ensure | |
116 | * that probe side-effects which can cause recursion (e.g. unforeseen | |
117 | * traps, divisions by 0, ...) are triggered within the incremented | |
118 | * nesting count section. | |
119 | */ | |
0222e121 MD |
120 | cmm_barrier(); |
121 | CMM_STORE_SHARED(ltt_nesting, CMM_LOAD_SHARED(ltt_nesting) - 1); | |
12e81b07 PMF |
122 | rcu_read_unlock(); |
123 | } |