Move kernelcompat.h to include/ust/ and share.h, usterr.h to include/
[ust.git] / libust / tracer.h
1 /*
2 * Copyright (C) 2005,2006,2008 Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
3 * Copyright (C) 2009 Pierre-Marc Fournier
4 *
5 * This contains the definitions for the Linux Trace Toolkit tracer.
6 *
7 * Ported to userspace by Pierre-Marc Fournier.
8 *
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 */
23
24 #ifndef _LTT_TRACER_H
25 #define _LTT_TRACER_H
26
27 #include <sys/types.h>
28 #include <stdarg.h>
29 //#include "list.h"
30 #include <ust/kernelcompat.h>
31 #include "buffer.h"
32 #include "relay.h"
33 #include "channels.h"
34 #include "tracercore.h"
35 #include <ust/marker.h>
36
37 /* Number of bytes to log with a read/write event */
38 #define LTT_LOG_RW_SIZE 32L
39
40 /* Interval (in jiffies) at which the LTT per-CPU timer fires */
41 #define LTT_PERCPU_TIMER_INTERVAL 1
42
43 #ifndef LTT_ARCH_TYPE
44 #define LTT_ARCH_TYPE LTT_ARCH_TYPE_UNDEFINED
45 #endif
46
47 #ifndef LTT_ARCH_VARIANT
48 #define LTT_ARCH_VARIANT LTT_ARCH_VARIANT_NONE
49 #endif
50
51 struct ltt_active_marker;
52
53 /* Maximum number of callbacks per marker */
54 #define LTT_NR_CALLBACKS 10
55
56 struct ltt_serialize_closure;
57 struct ltt_probe_private_data;
58
59 /* Serialization callback '%k' */
60 typedef size_t (*ltt_serialize_cb)(struct rchan_buf *buf, size_t buf_offset,
61 struct ltt_serialize_closure *closure,
62 void *serialize_private, int *largest_align,
63 const char *fmt, va_list *args);
64
65 struct ltt_serialize_closure {
66 ltt_serialize_cb *callbacks;
67 long cb_args[LTT_NR_CALLBACKS];
68 unsigned int cb_idx;
69 };
70
71 extern size_t ltt_serialize_data(struct rchan_buf *buf, size_t buf_offset,
72 struct ltt_serialize_closure *closure,
73 void *serialize_private,
74 int *largest_align, const char *fmt, va_list *args);
75
76 struct ltt_available_probe {
77 const char *name; /* probe name */
78 const char *format;
79 marker_probe_func *probe_func;
80 ltt_serialize_cb callbacks[LTT_NR_CALLBACKS];
81 struct list_head node; /* registered probes list */
82 };
83
84 struct ltt_probe_private_data {
85 struct ltt_trace_struct *trace; /*
86 * Target trace, for metadata
87 * or statedump.
88 */
89 ltt_serialize_cb serializer; /*
90 * Serialization function override.
91 */
92 void *serialize_private; /*
93 * Private data for serialization
94 * functions.
95 */
96 };
97
98 enum ltt_channels {
99 LTT_CHANNEL_METADATA,
100 LTT_CHANNEL_UST,
101 };
102
103 struct ltt_active_marker {
104 struct list_head node; /* active markers list */
105 const char *channel;
106 const char *name;
107 const char *format;
108 struct ltt_available_probe *probe;
109 };
110
111 struct marker; //ust//
112 extern void ltt_vtrace(const struct marker *mdata, void *probe_data,
113 void *call_data, const char *fmt, va_list *args);
114 extern void ltt_trace(const struct marker *mdata, void *probe_data,
115 void *call_data, const char *fmt, ...);
116
117 /*
118 * Unique ID assigned to each registered probe.
119 */
120 enum marker_id {
121 MARKER_ID_SET_MARKER_ID = 0, /* Static IDs available (range 0-7) */
122 MARKER_ID_SET_MARKER_FORMAT,
123 MARKER_ID_COMPACT, /* Compact IDs (range: 8-127) */
124 MARKER_ID_DYNAMIC, /* Dynamic IDs (range: 128-65535) */
125 };
126
127 /* static ids 0-1 reserved for internal use. */
128 #define MARKER_CORE_IDS 2
129 static inline enum marker_id marker_id_type(uint16_t id)
130 {
131 if (id < MARKER_CORE_IDS)
132 return (enum marker_id)id;
133 else
134 return MARKER_ID_DYNAMIC;
135 }
136
137 struct user_dbg_data {
138 unsigned long avail_size;
139 unsigned long write;
140 unsigned long read;
141 };
142
143 struct ltt_trace_ops {
144 /* First 32 bytes cache-hot cacheline */
145 int (*reserve_slot) (struct ltt_trace_struct *trace,
146 struct ltt_channel_struct *channel,
147 void **transport_data, size_t data_size,
148 size_t *slot_size, long *buf_offset, u64 *tsc,
149 unsigned int *rflags,
150 int largest_align);
151 //ust// void (*commit_slot) (struct ltt_channel_struct *channel,
152 //ust// void **transport_data, long buf_offset,
153 //ust// size_t slot_size);
154 void (*wakeup_channel) (struct ltt_channel_struct *ltt_channel);
155 int (*user_blocking) (struct ltt_trace_struct *trace,
156 unsigned int index, size_t data_size,
157 struct user_dbg_data *dbg);
158 /* End of first 32 bytes cacheline */
159 int (*create_dirs) (struct ltt_trace_struct *new_trace);
160 void (*remove_dirs) (struct ltt_trace_struct *new_trace);
161 int (*create_channel) (const char *trace_name,
162 struct ltt_trace_struct *trace,
163 struct dentry *dir, const char *channel_name,
164 struct ltt_channel_struct *ltt_chan,
165 unsigned int subbuf_size,
166 unsigned int n_subbufs, int overwrite);
167 void (*finish_channel) (struct ltt_channel_struct *channel);
168 void (*remove_channel) (struct ltt_channel_struct *channel);
169 void (*user_errors) (struct ltt_trace_struct *trace,
170 unsigned int index, size_t data_size,
171 struct user_dbg_data *dbg);
172 } ____cacheline_aligned;
173
174 struct ltt_transport {
175 char *name;
176 struct module *owner;
177 struct list_head node;
178 struct ltt_trace_ops ops;
179 };
180
181 enum trace_mode { LTT_TRACE_NORMAL, LTT_TRACE_FLIGHT, LTT_TRACE_HYBRID };
182
183 #define CHANNEL_FLAG_ENABLE (1U<<0)
184 #define CHANNEL_FLAG_OVERWRITE (1U<<1)
185
186 /* Per-trace information - each trace/flight recorder represented by one */
187 struct ltt_trace_struct {
188 /* First 32 bytes cache-hot cacheline */
189 struct list_head list;
190 struct ltt_trace_ops *ops;
191 int active;
192 /* Second 32 bytes cache-hot cacheline */
193 struct ltt_channel_struct *channels;
194 unsigned int nr_channels;
195 u32 freq_scale;
196 u64 start_freq;
197 u64 start_tsc;
198 unsigned long long start_monotonic;
199 struct timeval start_time;
200 struct ltt_channel_setting *settings;
201 struct {
202 struct dentry *trace_root;
203 } dentry;
204 struct kref kref; /* Each channel has a kref of the trace struct */
205 struct ltt_transport *transport;
206 struct kref ltt_transport_kref;
207 char trace_name[NAME_MAX];
208 } ____cacheline_aligned;
209
210 /* Hardcoded event headers
211 *
212 * event header for a trace with active heartbeat : 27 bits timestamps
213 *
214 * headers are 32-bits aligned. In order to insure such alignment, a dynamic per
215 * trace alignment value must be done.
216 *
217 * Remember that the C compiler does align each member on the boundary
218 * equivalent to their own size.
219 *
220 * As relay subbuffers are aligned on pages, we are sure that they are 4 and 8
221 * bytes aligned, so the buffer header and trace header are aligned.
222 *
223 * Event headers are aligned depending on the trace alignment option.
224 *
225 * Note using C structure bitfields for cross-endianness and portability
226 * concerns.
227 */
228
229 #define LTT_RESERVED_EVENTS 3
230 #define LTT_EVENT_BITS 5
231 #define LTT_FREE_EVENTS ((1 << LTT_EVENT_BITS) - LTT_RESERVED_EVENTS)
232 #define LTT_TSC_BITS 27
233 #define LTT_TSC_MASK ((1 << LTT_TSC_BITS) - 1)
234
235 struct ltt_event_header {
236 u32 id_time; /* 5 bits event id (MSB); 27 bits time (LSB) */
237 };
238
239 /* Reservation flags */
240 #define LTT_RFLAG_ID (1 << 0)
241 #define LTT_RFLAG_ID_SIZE (1 << 1)
242 #define LTT_RFLAG_ID_SIZE_TSC (1 << 2)
243
244 /*
245 * We use asm/timex.h : cpu_khz/HZ variable in here : we might have to deal
246 * specifically with CPU frequency scaling someday, so using an interpolation
247 * between the start and end of buffer values is not flexible enough. Using an
248 * immediate frequency value permits to calculate directly the times for parts
249 * of a buffer that would be before a frequency change.
250 *
251 * Keep the natural field alignment for _each field_ within this structure if
252 * you ever add/remove a field from this header. Packed attribute is not used
253 * because gcc generates poor code on at least powerpc and mips. Don't ever
254 * let gcc add padding between the structure elements.
255 */
256 struct ltt_subbuffer_header {
257 uint64_t cycle_count_begin; /* Cycle count at subbuffer start */
258 uint64_t cycle_count_end; /* Cycle count at subbuffer end */
259 uint32_t magic_number; /*
260 * Trace magic number.
261 * contains endianness information.
262 */
263 uint8_t major_version;
264 uint8_t minor_version;
265 uint8_t arch_size; /* Architecture pointer size */
266 uint8_t alignment; /* LTT data alignment */
267 uint64_t start_time_sec; /* NTP-corrected start time */
268 uint64_t start_time_usec;
269 uint64_t start_freq; /*
270 * Frequency at trace start,
271 * used all along the trace.
272 */
273 uint32_t freq_scale; /* Frequency scaling (divisor) */
274 uint32_t lost_size; /* Size unused at end of subbuffer */
275 uint32_t buf_size; /* Size of this subbuffer */
276 uint32_t events_lost; /*
277 * Events lost in this subbuffer since
278 * the beginning of the trace.
279 * (may overflow)
280 */
281 uint32_t subbuf_corrupt; /*
282 * Corrupted (lost) subbuffers since
283 * the begginig of the trace.
284 * (may overflow)
285 */
286 uint8_t header_end[0]; /* End of header */
287 };
288
289 /**
290 * ltt_subbuffer_header_size - called on buffer-switch to a new sub-buffer
291 *
292 * Return header size without padding after the structure. Don't use packed
293 * structure because gcc generates inefficient code on some architectures
294 * (powerpc, mips..)
295 */
296 static inline size_t ltt_subbuffer_header_size(void)
297 {
298 return offsetof(struct ltt_subbuffer_header, header_end);
299 }
300
301 /*
302 * ltt_get_header_size
303 *
304 * Calculate alignment offset to 32-bits. This is the alignment offset of the
305 * event header.
306 *
307 * Important note :
308 * The event header must be 32-bits. The total offset calculated here :
309 *
310 * Alignment of header struct on 32 bits (min arch size, header size)
311 * + sizeof(header struct) (32-bits)
312 * + (opt) u16 (ext. event id)
313 * + (opt) u16 (event_size) (if event_size == 0xFFFFUL, has ext. event size)
314 * + (opt) u32 (ext. event size)
315 * + (opt) u64 full TSC (aligned on min(64-bits, arch size))
316 *
317 * The payload must itself determine its own alignment from the biggest type it
318 * contains.
319 * */
320 static inline unsigned char ltt_get_header_size(
321 struct ltt_channel_struct *channel,
322 size_t offset,
323 size_t data_size,
324 size_t *before_hdr_pad,
325 unsigned int rflags)
326 {
327 size_t orig_offset = offset;
328 size_t padding;
329
330 padding = ltt_align(offset, sizeof(struct ltt_event_header));
331 offset += padding;
332 offset += sizeof(struct ltt_event_header);
333
334 switch (rflags) {
335 case LTT_RFLAG_ID_SIZE_TSC:
336 offset += sizeof(u16) + sizeof(u16);
337 if (data_size >= 0xFFFFU)
338 offset += sizeof(u32);
339 offset += ltt_align(offset, sizeof(u64));
340 offset += sizeof(u64);
341 break;
342 case LTT_RFLAG_ID_SIZE:
343 offset += sizeof(u16) + sizeof(u16);
344 if (data_size >= 0xFFFFU)
345 offset += sizeof(u32);
346 break;
347 case LTT_RFLAG_ID:
348 offset += sizeof(u16);
349 break;
350 }
351
352 *before_hdr_pad = padding;
353 return offset - orig_offset;
354 }
355
356 /*
357 * ltt_write_event_header
358 *
359 * Writes the event header to the offset (already aligned on 32-bits).
360 *
361 * @trace : trace to write to.
362 * @channel : pointer to the channel structure..
363 * @buf : buffer to write to.
364 * @buf_offset : buffer offset to write to (aligned on 32 bits).
365 * @eID : event ID
366 * @event_size : size of the event, excluding the event header.
367 * @tsc : time stamp counter.
368 * @rflags : reservation flags.
369 *
370 * returns : offset where the event data must be written.
371 */
372 static inline size_t ltt_write_event_header(struct ltt_trace_struct *trace,
373 struct ltt_channel_struct *channel,
374 struct rchan_buf *buf, long buf_offset,
375 u16 eID, size_t event_size,
376 u64 tsc, unsigned int rflags)
377 {
378 struct ltt_event_header header;
379 size_t small_size;
380
381 switch (rflags) {
382 case LTT_RFLAG_ID_SIZE_TSC:
383 header.id_time = 29 << LTT_TSC_BITS;
384 break;
385 case LTT_RFLAG_ID_SIZE:
386 header.id_time = 30 << LTT_TSC_BITS;
387 break;
388 case LTT_RFLAG_ID:
389 header.id_time = 31 << LTT_TSC_BITS;
390 break;
391 default:
392 header.id_time = eID << LTT_TSC_BITS;
393 break;
394 }
395 header.id_time |= (u32)tsc & LTT_TSC_MASK;
396 ltt_relay_write(buf, buf_offset, &header, sizeof(header));
397 buf_offset += sizeof(header);
398
399 switch (rflags) {
400 case LTT_RFLAG_ID_SIZE_TSC:
401 small_size = min_t(size_t, event_size, 0xFFFFU);
402 ltt_relay_write(buf, buf_offset,
403 (u16[]){ (u16)eID }, sizeof(u16));
404 buf_offset += sizeof(u16);
405 ltt_relay_write(buf, buf_offset,
406 (u16[]){ (u16)small_size }, sizeof(u16));
407 buf_offset += sizeof(u16);
408 if (small_size == 0xFFFFU) {
409 ltt_relay_write(buf, buf_offset,
410 (u32[]){ (u32)event_size }, sizeof(u32));
411 buf_offset += sizeof(u32);
412 }
413 buf_offset += ltt_align(buf_offset, sizeof(u64));
414 ltt_relay_write(buf, buf_offset,
415 (u64[]){ (u64)tsc }, sizeof(u64));
416 buf_offset += sizeof(u64);
417 break;
418 case LTT_RFLAG_ID_SIZE:
419 small_size = min_t(size_t, event_size, 0xFFFFU);
420 ltt_relay_write(buf, buf_offset,
421 (u16[]){ (u16)eID }, sizeof(u16));
422 buf_offset += sizeof(u16);
423 ltt_relay_write(buf, buf_offset,
424 (u16[]){ (u16)small_size }, sizeof(u16));
425 buf_offset += sizeof(u16);
426 if (small_size == 0xFFFFU) {
427 ltt_relay_write(buf, buf_offset,
428 (u32[]){ (u32)event_size }, sizeof(u32));
429 buf_offset += sizeof(u32);
430 }
431 break;
432 case LTT_RFLAG_ID:
433 ltt_relay_write(buf, buf_offset,
434 (u16[]){ (u16)eID }, sizeof(u16));
435 buf_offset += sizeof(u16);
436 break;
437 default:
438 break;
439 }
440
441 return buf_offset;
442 }
443
444 /* Lockless LTTng */
445
446 /*
447 * ltt_reserve_slot
448 *
449 * Atomic slot reservation in a LTTng buffer. It will take care of
450 * sub-buffer switching.
451 *
452 * Parameters:
453 *
454 * @trace : the trace structure to log to.
455 * @channel : the chanel to reserve space into.
456 * @transport_data : specific transport data.
457 * @data_size : size of the variable length data to log.
458 * @slot_size : pointer to total size of the slot (out)
459 * @buf_offset : pointer to reserve offset (out)
460 * @tsc : pointer to the tsc at the slot reservation (out)
461 * @rflags : reservation flags (header specificity)
462 * @cpu : cpu id
463 *
464 * Return : -ENOSPC if not enough space, else 0.
465 */
466 static inline int ltt_reserve_slot(
467 struct ltt_trace_struct *trace,
468 struct ltt_channel_struct *channel,
469 void **transport_data,
470 size_t data_size,
471 size_t *slot_size,
472 long *buf_offset,
473 u64 *tsc,
474 unsigned int *rflags,
475 int largest_align)
476 {
477 return trace->ops->reserve_slot(trace, channel, transport_data,
478 data_size, slot_size, buf_offset, tsc, rflags,
479 largest_align);
480 }
481
482
483 ///*
484 // * ltt_commit_slot
485 // *
486 // * Atomic unordered slot commit. Increments the commit count in the
487 // * specified sub-buffer, and delivers it if necessary.
488 // *
489 // * Parameters:
490 // *
491 // * @channel : the chanel to reserve space into.
492 // * @transport_data : specific transport data.
493 // * @buf_offset : offset of beginning of reserved slot
494 // * @slot_size : size of the reserved slot.
495 // */
496 //static inline void ltt_commit_slot(
497 // struct ltt_channel_struct *channel,
498 // void **transport_data,
499 // long buf_offset,
500 // size_t slot_size)
501 //{
502 // struct ltt_trace_struct *trace = channel->trace;
503 //
504 // trace->ops->commit_slot(channel, transport_data, buf_offset, slot_size);
505 //}
506
507 /*
508 * Control channels :
509 * control/metadata
510 * control/interrupts
511 * control/...
512 *
513 * cpu channel :
514 * cpu
515 */
516
517 #define LTT_METADATA_CHANNEL "metadata_state"
518 #define LTT_UST_CHANNEL "ust"
519
520 #define LTT_FLIGHT_PREFIX "flight-"
521
522 /* Tracer properties */
523 //#define LTT_DEFAULT_SUBBUF_SIZE_LOW 134217728
524 #define LTT_DEFAULT_SUBBUF_SIZE_LOW 65536
525 //#define LTT_DEFAULT_SUBBUF_SIZE_LOW 4096
526 #define LTT_DEFAULT_N_SUBBUFS_LOW 2
527 //#define LTT_DEFAULT_SUBBUF_SIZE_MED 134217728
528 #define LTT_DEFAULT_SUBBUF_SIZE_MED 262144
529 //#define LTT_DEFAULT_SUBBUF_SIZE_MED 4096
530 #define LTT_DEFAULT_N_SUBBUFS_MED 2
531 //#define LTT_DEFAULT_SUBBUF_SIZE_HIGH 134217728
532 #define LTT_DEFAULT_SUBBUF_SIZE_HIGH 1048576
533 //#define LTT_DEFAULT_SUBBUF_SIZE_HIGH 4096
534 #define LTT_DEFAULT_N_SUBBUFS_HIGH 2
535 #define LTT_TRACER_MAGIC_NUMBER 0x00D6B7ED
536 #define LTT_TRACER_VERSION_MAJOR 2
537 #define LTT_TRACER_VERSION_MINOR 3
538
539 /*
540 * Size reserved for high priority events (interrupts, NMI, BH) at the end of a
541 * nearly full buffer. User space won't use this last amount of space when in
542 * blocking mode. This space also includes the event header that would be
543 * written by this user space event.
544 */
545 #define LTT_RESERVE_CRITICAL 4096
546
547 /* Register and unregister function pointers */
548
549 enum ltt_module_function {
550 LTT_FUNCTION_RUN_FILTER,
551 LTT_FUNCTION_FILTER_CONTROL,
552 LTT_FUNCTION_STATEDUMP
553 };
554
555 extern void ltt_transport_register(struct ltt_transport *transport);
556 extern void ltt_transport_unregister(struct ltt_transport *transport);
557
558 /* Exported control function */
559
560 union ltt_control_args {
561 struct {
562 enum trace_mode mode;
563 unsigned int subbuf_size_low;
564 unsigned int n_subbufs_low;
565 unsigned int subbuf_size_med;
566 unsigned int n_subbufs_med;
567 unsigned int subbuf_size_high;
568 unsigned int n_subbufs_high;
569 } new_trace;
570 };
571
572 extern int _ltt_trace_setup(const char *trace_name);
573 extern int ltt_trace_setup(const char *trace_name);
574 extern struct ltt_trace_struct *_ltt_trace_find_setup(const char *trace_name);
575 extern int ltt_trace_set_type(const char *trace_name, const char *trace_type);
576 extern int ltt_trace_set_channel_subbufsize(const char *trace_name,
577 const char *channel_name, unsigned int size);
578 extern int ltt_trace_set_channel_subbufcount(const char *trace_name,
579 const char *channel_name, unsigned int cnt);
580 extern int ltt_trace_set_channel_enable(const char *trace_name,
581 const char *channel_name, unsigned int enable);
582 extern int ltt_trace_set_channel_overwrite(const char *trace_name,
583 const char *channel_name, unsigned int overwrite);
584 extern int ltt_trace_alloc(const char *trace_name);
585 extern int ltt_trace_destroy(const char *trace_name);
586 extern int ltt_trace_start(const char *trace_name);
587 extern int ltt_trace_stop(const char *trace_name);
588
589 enum ltt_filter_control_msg {
590 LTT_FILTER_DEFAULT_ACCEPT,
591 LTT_FILTER_DEFAULT_REJECT
592 };
593
594 extern int ltt_filter_control(enum ltt_filter_control_msg msg,
595 const char *trace_name);
596
597 extern struct dentry *get_filter_root(void);
598
599 extern void ltt_write_trace_header(struct ltt_trace_struct *trace,
600 struct ltt_subbuffer_header *header);
601 extern void ltt_buffer_destroy(struct ltt_channel_struct *ltt_chan);
602
603 extern void ltt_core_register(int (*function)(u8, void *));
604
605 extern void ltt_core_unregister(void);
606
607 extern void ltt_release_trace(struct kref *kref);
608 extern void ltt_release_transport(struct kref *kref);
609
610 extern int ltt_probe_register(struct ltt_available_probe *pdata);
611 extern int ltt_probe_unregister(struct ltt_available_probe *pdata);
612 extern int ltt_marker_connect(const char *channel, const char *mname,
613 const char *pname);
614 extern int ltt_marker_disconnect(const char *channel, const char *mname,
615 const char *pname);
616 extern void ltt_dump_marker_state(struct ltt_trace_struct *trace);
617
618 extern void ltt_lock_traces(void);
619 extern void ltt_unlock_traces(void);
620
621 extern struct ltt_trace_struct *_ltt_trace_find(const char *trace_name);
622
623 #endif /* _LTT_TRACER_H */
This page took 0.040828 seconds and 4 git commands to generate.