fork: don't switch buffer when destroying trace in child
[ust.git] / libust / tracer.h
1 /*
2 * Copyright (C) 2005,2006,2008 Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
3 * Copyright (C) 2009 Pierre-Marc Fournier
4 *
5 * This contains the definitions for the Linux Trace Toolkit tracer.
6 *
7 * Ported to userspace by Pierre-Marc Fournier.
8 *
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 */
23
24 #ifndef _LTT_TRACER_H
25 #define _LTT_TRACER_H
26
27 #include <sys/types.h>
28 #include <stdarg.h>
29 #include <ust/kernelcompat.h>
30 #include "channels.h"
31 #include "tracercore.h"
32 #include "tracerconst.h"
33 #include <ust/marker.h>
34 #include <ust/probe.h>
35 #include "buffers.h"
36
37 /* Number of bytes to log with a read/write event */
38 #define LTT_LOG_RW_SIZE 32L
39
40 /* Interval (in jiffies) at which the LTT per-CPU timer fires */
41 #define LTT_PERCPU_TIMER_INTERVAL 1
42
43 #ifndef LTT_ARCH_TYPE
44 #define LTT_ARCH_TYPE LTT_ARCH_TYPE_UNDEFINED
45 #endif
46
47 #ifndef LTT_ARCH_VARIANT
48 #define LTT_ARCH_VARIANT LTT_ARCH_VARIANT_NONE
49 #endif
50
51 struct ltt_active_marker;
52
53 /* Maximum number of callbacks per marker */
54 #define LTT_NR_CALLBACKS 10
55
56 struct ltt_serialize_closure;
57 struct ltt_probe_private_data;
58
59 struct ltt_serialize_closure {
60 ltt_serialize_cb *callbacks;
61 long cb_args[LTT_NR_CALLBACKS];
62 unsigned int cb_idx;
63 };
64
65 extern size_t ltt_serialize_data(struct ust_buffer *buf, size_t buf_offset,
66 struct ltt_serialize_closure *closure,
67 void *serialize_private,
68 int *largest_align, const char *fmt, va_list *args);
69
70 struct ltt_probe_private_data {
71 struct ust_trace *trace; /*
72 * Target trace, for metadata
73 * or statedump.
74 */
75 ltt_serialize_cb serializer; /*
76 * Serialization function override.
77 */
78 void *serialize_private; /*
79 * Private data for serialization
80 * functions.
81 */
82 };
83
84 enum ltt_channels {
85 LTT_CHANNEL_METADATA,
86 LTT_CHANNEL_UST,
87 };
88
89 struct ltt_active_marker {
90 struct list_head node; /* active markers list */
91 const char *channel;
92 const char *name;
93 const char *format;
94 struct ltt_available_probe *probe;
95 };
96
97 struct marker; //ust//
98 extern void ltt_vtrace(const struct marker *mdata, void *probe_data,
99 struct registers *regs, void *call_data, const char *fmt, va_list *args);
100 extern void ltt_trace(const struct marker *mdata, void *probe_data,
101 struct registers *regs, void *call_data, const char *fmt, ...);
102
103 /*
104 * Unique ID assigned to each registered probe.
105 */
106 enum marker_id {
107 MARKER_ID_SET_MARKER_ID = 0, /* Static IDs available (range 0-7) */
108 MARKER_ID_SET_MARKER_FORMAT,
109 MARKER_ID_COMPACT, /* Compact IDs (range: 8-127) */
110 MARKER_ID_DYNAMIC, /* Dynamic IDs (range: 128-65535) */
111 };
112
113 /* static ids 0-1 reserved for internal use. */
114 #define MARKER_CORE_IDS 2
115 static __inline__ enum marker_id marker_id_type(uint16_t id)
116 {
117 if (id < MARKER_CORE_IDS)
118 return (enum marker_id)id;
119 else
120 return MARKER_ID_DYNAMIC;
121 }
122
123 struct user_dbg_data {
124 unsigned long avail_size;
125 unsigned long write;
126 unsigned long read;
127 };
128
129 struct ltt_trace_ops {
130 /* First 32 bytes cache-hot cacheline */
131 void (*wakeup_channel) (struct ust_channel *channel);
132 int (*user_blocking) (struct ust_trace *trace,
133 unsigned int index, size_t data_size,
134 struct user_dbg_data *dbg);
135 /* End of first 32 bytes cacheline */
136 int (*create_dirs) (struct ust_trace *new_trace);
137 void (*remove_dirs) (struct ust_trace *new_trace);
138 int (*create_channel) (const char *trace_name,
139 struct ust_trace *trace,
140 const char *channel_name,
141 struct ust_channel *channel,
142 unsigned int subbuf_size,
143 unsigned int n_subbufs, int overwrite);
144 void (*finish_channel) (struct ust_channel *channel);
145 void (*remove_channel) (struct ust_channel *channel);
146 void (*user_errors) (struct ust_trace *trace,
147 unsigned int index, size_t data_size,
148 struct user_dbg_data *dbg, unsigned int cpu);
149 };
150
151 struct ltt_transport {
152 char *name;
153 struct module *owner;
154 struct list_head node;
155 struct ltt_trace_ops ops;
156 };
157
158 enum trace_mode { LTT_TRACE_NORMAL, LTT_TRACE_FLIGHT, LTT_TRACE_HYBRID };
159
160 #define CHANNEL_FLAG_ENABLE (1U<<0)
161 #define CHANNEL_FLAG_OVERWRITE (1U<<1)
162
163 /* Per-trace information - each trace/flight recorder represented by one */
164 struct ust_trace {
165 /* First 32 bytes cache-hot cacheline */
166 struct list_head list;
167 struct ltt_trace_ops *ops;
168 int active;
169 /* Second 32 bytes cache-hot cacheline */
170 struct ust_channel *channels;
171 unsigned int nr_channels;
172 u32 freq_scale;
173 u64 start_freq;
174 u64 start_tsc;
175 unsigned long long start_monotonic;
176 struct timeval start_time;
177 struct ltt_channel_setting *settings;
178 struct {
179 struct dentry *trace_root;
180 } dentry;
181 struct kref kref; /* Each channel has a kref of the trace struct */
182 struct ltt_transport *transport;
183 struct kref ltt_transport_kref;
184 char trace_name[NAME_MAX];
185 } ____cacheline_aligned;
186
187 /*
188 * We use asm/timex.h : cpu_khz/HZ variable in here : we might have to deal
189 * specifically with CPU frequency scaling someday, so using an interpolation
190 * between the start and end of buffer values is not flexible enough. Using an
191 * immediate frequency value permits to calculate directly the times for parts
192 * of a buffer that would be before a frequency change.
193 *
194 * Keep the natural field alignment for _each field_ within this structure if
195 * you ever add/remove a field from this header. Packed attribute is not used
196 * because gcc generates poor code on at least powerpc and mips. Don't ever
197 * let gcc add padding between the structure elements.
198 */
199 struct ltt_subbuffer_header {
200 uint64_t cycle_count_begin; /* Cycle count at subbuffer start */
201 uint64_t cycle_count_end; /* Cycle count at subbuffer end */
202 uint32_t magic_number; /*
203 * Trace magic number.
204 * contains endianness information.
205 */
206 uint8_t major_version;
207 uint8_t minor_version;
208 uint8_t arch_size; /* Architecture pointer size */
209 uint8_t alignment; /* LTT data alignment */
210 uint64_t start_time_sec; /* NTP-corrected start time */
211 uint64_t start_time_usec;
212 uint64_t start_freq; /*
213 * Frequency at trace start,
214 * used all along the trace.
215 */
216 uint32_t freq_scale; /* Frequency scaling (divisor) */
217 uint32_t data_size; /* Size of data in subbuffer */
218 uint32_t sb_size; /* Subbuffer size (including padding) */
219 uint32_t events_lost; /*
220 * Events lost in this subbuffer since
221 * the beginning of the trace.
222 * (may overflow)
223 */
224 uint32_t subbuf_corrupt; /*
225 * Corrupted (lost) subbuffers since
226 * the begginig of the trace.
227 * (may overflow)
228 */
229 uint8_t header_end[0]; /* End of header */
230 };
231
232 /**
233 * ltt_subbuffer_header_size - called on buffer-switch to a new sub-buffer
234 *
235 * Return header size without padding after the structure. Don't use packed
236 * structure because gcc generates inefficient code on some architectures
237 * (powerpc, mips..)
238 */
239 static __inline__ size_t ltt_subbuffer_header_size(void)
240 {
241 return offsetof(struct ltt_subbuffer_header, header_end);
242 }
243
244 extern size_t ltt_write_event_header_slow(struct ust_trace *trace,
245 struct ust_channel *channel,
246 struct ust_buffer *buf, long buf_offset,
247 u16 eID, u32 event_size,
248 u64 tsc, unsigned int rflags);
249
250
251 /*
252 * ltt_write_event_header
253 *
254 * Writes the event header to the offset (already aligned on 32-bits).
255 *
256 * @trace : trace to write to.
257 * @channel : pointer to the channel structure..
258 * @buf : buffer to write to.
259 * @buf_offset : buffer offset to write to (aligned on 32 bits).
260 * @eID : event ID
261 * @event_size : size of the event, excluding the event header.
262 * @tsc : time stamp counter.
263 * @rflags : reservation flags.
264 *
265 * returns : offset where the event data must be written.
266 */
267 static __inline__ size_t ltt_write_event_header(struct ust_trace *trace,
268 struct ust_channel *chan,
269 struct ust_buffer *buf, long buf_offset,
270 u16 eID, u32 event_size,
271 u64 tsc, unsigned int rflags)
272 {
273 struct ltt_event_header header;
274
275 if (unlikely(rflags))
276 goto slow_path;
277
278 header.id_time = eID << LTT_TSC_BITS;
279 header.id_time |= (u32)tsc & LTT_TSC_MASK;
280 ust_buffers_write(buf, buf_offset, &header, sizeof(header));
281 buf_offset += sizeof(header);
282
283 return buf_offset;
284
285 slow_path:
286 return ltt_write_event_header_slow(trace, chan, buf, buf_offset,
287 eID, event_size, tsc, rflags);
288 }
289
290 /*
291 * Control channels :
292 * control/metadata
293 * control/interrupts
294 * control/...
295 *
296 * cpu channel :
297 * cpu
298 */
299
300 #define LTT_METADATA_CHANNEL "metadata_state"
301 #define LTT_UST_CHANNEL "ust"
302
303 #define LTT_FLIGHT_PREFIX "flight-"
304
305 /* Tracer properties */
306 //#define LTT_DEFAULT_SUBBUF_SIZE_LOW 134217728
307 #define LTT_DEFAULT_SUBBUF_SIZE_LOW 65536
308 //#define LTT_DEFAULT_SUBBUF_SIZE_LOW 4096
309 #define LTT_DEFAULT_N_SUBBUFS_LOW 2
310 //#define LTT_DEFAULT_SUBBUF_SIZE_MED 134217728
311 #define LTT_DEFAULT_SUBBUF_SIZE_MED 262144
312 //#define LTT_DEFAULT_SUBBUF_SIZE_MED 4096
313 #define LTT_DEFAULT_N_SUBBUFS_MED 2
314 //#define LTT_DEFAULT_SUBBUF_SIZE_HIGH 134217728
315 #define LTT_DEFAULT_SUBBUF_SIZE_HIGH 1048576
316 //#define LTT_DEFAULT_SUBBUF_SIZE_HIGH 4096
317 #define LTT_DEFAULT_N_SUBBUFS_HIGH 2
318 #define LTT_TRACER_MAGIC_NUMBER 0x00D6B7ED
319 #define LTT_TRACER_VERSION_MAJOR 2
320 #define LTT_TRACER_VERSION_MINOR 6
321
322 /**
323 * ust_write_trace_header - Write trace header
324 * @trace: Trace information
325 * @header: Memory address where the information must be written to
326 */
327 static __inline__ void ltt_write_trace_header(struct ust_trace *trace,
328 struct ltt_subbuffer_header *header)
329 {
330 header->magic_number = LTT_TRACER_MAGIC_NUMBER;
331 header->major_version = LTT_TRACER_VERSION_MAJOR;
332 header->minor_version = LTT_TRACER_VERSION_MINOR;
333 header->arch_size = sizeof(void *);
334 header->alignment = ltt_get_alignment();
335 header->start_time_sec = trace->start_time.tv_sec;
336 header->start_time_usec = trace->start_time.tv_usec;
337 header->start_freq = trace->start_freq;
338 header->freq_scale = trace->freq_scale;
339 }
340
341
342 /*
343 * Size reserved for high priority events (interrupts, NMI, BH) at the end of a
344 * nearly full buffer. User space won't use this last amount of space when in
345 * blocking mode. This space also includes the event header that would be
346 * written by this user space event.
347 */
348 #define LTT_RESERVE_CRITICAL 4096
349
350 /* Register and unregister function pointers */
351
352 enum ltt_module_function {
353 LTT_FUNCTION_RUN_FILTER,
354 LTT_FUNCTION_FILTER_CONTROL,
355 LTT_FUNCTION_STATEDUMP
356 };
357
358 extern void ltt_transport_register(struct ltt_transport *transport);
359 extern void ltt_transport_unregister(struct ltt_transport *transport);
360
361 /* Exported control function */
362
363 union ltt_control_args {
364 struct {
365 enum trace_mode mode;
366 unsigned int subbuf_size_low;
367 unsigned int n_subbufs_low;
368 unsigned int subbuf_size_med;
369 unsigned int n_subbufs_med;
370 unsigned int subbuf_size_high;
371 unsigned int n_subbufs_high;
372 } new_trace;
373 };
374
375 extern int _ltt_trace_setup(const char *trace_name);
376 extern int ltt_trace_setup(const char *trace_name);
377 extern struct ust_trace *_ltt_trace_find_setup(const char *trace_name);
378 extern int ltt_trace_set_type(const char *trace_name, const char *trace_type);
379 extern int ltt_trace_set_channel_subbufsize(const char *trace_name,
380 const char *channel_name, unsigned int size);
381 extern int ltt_trace_set_channel_subbufcount(const char *trace_name,
382 const char *channel_name, unsigned int cnt);
383 extern int ltt_trace_set_channel_enable(const char *trace_name,
384 const char *channel_name, unsigned int enable);
385 extern int ltt_trace_set_channel_overwrite(const char *trace_name,
386 const char *channel_name, unsigned int overwrite);
387 extern int ltt_trace_alloc(const char *trace_name);
388 extern int ltt_trace_destroy(const char *trace_name, int drop);
389 extern int ltt_trace_start(const char *trace_name);
390 extern int ltt_trace_stop(const char *trace_name);
391
392 enum ltt_filter_control_msg {
393 LTT_FILTER_DEFAULT_ACCEPT,
394 LTT_FILTER_DEFAULT_REJECT
395 };
396
397 extern int ltt_filter_control(enum ltt_filter_control_msg msg,
398 const char *trace_name);
399
400 extern struct dentry *get_filter_root(void);
401
402 extern void ltt_write_trace_header(struct ust_trace *trace,
403 struct ltt_subbuffer_header *header);
404 extern void ltt_buffer_destroy(struct ust_channel *ltt_chan);
405
406 extern void ltt_core_register(int (*function)(u8, void *));
407
408 extern void ltt_core_unregister(void);
409
410 extern void ltt_release_trace(struct kref *kref);
411 extern void ltt_release_transport(struct kref *kref);
412
413 extern void ltt_dump_marker_state(struct ust_trace *trace);
414
415 extern void ltt_lock_traces(void);
416 extern void ltt_unlock_traces(void);
417
418 extern struct ust_trace *_ltt_trace_find(const char *trace_name);
419
420 #endif /* _LTT_TRACER_H */
This page took 0.03718 seconds and 4 git commands to generate.