Add missing transport ops
[lttng-modules.git] / ltt-tracer.h
1 /*
2 * Copyright (C) 2005,2006,2008 Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
3 *
4 * This contains the definitions for the Linux Trace Toolkit tracer.
5 *
6 * Dual LGPL v2.1/GPL v2 license.
7 */
8
9 #ifndef _LTT_TRACER_H
10 #define _LTT_TRACER_H
11
12 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
13 /* Align data on its natural alignment */
14 #define RING_BUFFER_ALIGN
15 #endif
16
17 #include <linux/ringbuffer/config.h>
18
19 #include <stdarg.h>
20 #include <linux/types.h>
21 #include <linux/limits.h>
22 #include <linux/list.h>
23 #include <linux/cache.h>
24 #include <linux/kernel.h>
25 #include <linux/timex.h>
26 #include <linux/wait.h>
27 #include <linux/marker.h>
28 #include <linux/trace-clock.h>
29 #include <asm/atomic.h>
30 #include <asm/local.h>
31
32 #include "ltt-tracer-core.h"
33 #include "ltt-channels.h"
34
35 /* Number of bytes to log with a read/write event */
36 #define LTT_LOG_RW_SIZE 32L
37
38 struct ltt_active_marker;
39
40 /* Maximum number of callbacks per marker */
41 #define LTT_NR_CALLBACKS 10
42
43 struct ltt_serialize_closure {
44 ltt_serialize_cb *callbacks;
45 long cb_args[LTT_NR_CALLBACKS];
46 unsigned int cb_idx;
47 };
48
49 size_t ltt_serialize_data(struct ltt_chanbuf *buf, size_t buf_offset,
50 struct ltt_serialize_closure *closure,
51 void *serialize_private, unsigned int stack_pos_ctx,
52 int *largest_align, const char *fmt, va_list *args);
53
54 struct ltt_available_probe {
55 const char *name; /* probe name */
56 const char *format;
57 marker_probe_func *probe_func;
58 ltt_serialize_cb callbacks[LTT_NR_CALLBACKS];
59 struct list_head node; /* registered probes list */
60 };
61
62 enum ltt_channels {
63 LTT_CHANNEL_METADATA,
64 LTT_CHANNEL_FD_STATE,
65 LTT_CHANNEL_GLOBAL_STATE,
66 LTT_CHANNEL_IRQ_STATE,
67 LTT_CHANNEL_MODULE_STATE,
68 LTT_CHANNEL_NETIF_STATE,
69 LTT_CHANNEL_SOFTIRQ_STATE,
70 LTT_CHANNEL_SWAP_STATE,
71 LTT_CHANNEL_SYSCALL_STATE,
72 LTT_CHANNEL_TASK_STATE,
73 LTT_CHANNEL_VM_STATE,
74 LTT_CHANNEL_FS,
75 LTT_CHANNEL_INPUT,
76 LTT_CHANNEL_IPC,
77 LTT_CHANNEL_KERNEL,
78 LTT_CHANNEL_MM,
79 LTT_CHANNEL_RCU,
80 LTT_CHANNEL_DEFAULT,
81 };
82
83 struct ltt_active_marker {
84 struct list_head node; /* active markers list */
85 const char *channel;
86 const char *name;
87 const char *format;
88 struct ltt_available_probe *probe;
89 };
90
91 extern void ltt_vtrace(const struct marker *mdata, void *probe_data,
92 void *call_data, const char *fmt, va_list *args);
93 extern void ltt_trace(const struct marker *mdata, void *probe_data,
94 void *call_data, const char *fmt, ...);
95
96 size_t ltt_serialize_printf(struct ltt_chanbuf *buf, unsigned long buf_offset,
97 size_t *msg_size, char *output, size_t outlen,
98 const char *fmt);
99
100 /*
101 * Unique ID assigned to each registered probe.
102 */
103 enum marker_id {
104 MARKER_ID_SET_MARKER_ID = 0, /* Static IDs available (range 0-7) */
105 MARKER_ID_SET_MARKER_FORMAT,
106 MARKER_ID_COMPACT, /* Compact IDs (range: 8-127) */
107 MARKER_ID_DYNAMIC, /* Dynamic IDs (range: 128-65535) */
108 };
109
110 /* static ids 0-1 reserved for internal use. */
111 #define MARKER_CORE_IDS 2
112 static __inline__ enum marker_id marker_id_type(uint16_t id)
113 {
114 if (id < MARKER_CORE_IDS)
115 return (enum marker_id)id;
116 else
117 return MARKER_ID_DYNAMIC;
118 }
119
120 struct user_dbg_data {
121 unsigned long avail_size;
122 unsigned long write;
123 unsigned long read;
124 };
125
126 struct ltt_trace_ops {
127 int (*create_dirs) (struct ltt_trace *new_trace);
128 void (*remove_dirs) (struct ltt_trace *new_trace);
129 struct channel *ltt_channel_create(const char *name,
130 struct ltt_trace *trace,
131 void *buf_addr,
132 size_t subbuf_size, size_t num_subbuf,
133 unsigned int switch_timer_interval,
134 unsigned int read_timer_interval);
135 void ltt_channel_destroy(struct channel *chan);
136 };
137
138 struct ltt_transport {
139 char *name;
140 struct module *owner;
141 struct list_head node;
142 struct ltt_trace_ops ops;
143 };
144
145 enum trace_mode { LTT_TRACE_NORMAL, LTT_TRACE_FLIGHT, LTT_TRACE_HYBRID };
146
147 #define CHANNEL_FLAG_ENABLE (1U<<0)
148 #define CHANNEL_FLAG_OVERWRITE (1U<<1)
149
150 /* Per-trace information - each trace/flight recorder represented by one */
151 struct ltt_trace {
152 /* First 32 bytes cache-hot cacheline */
153 struct list_head list;
154 struct ltt_chan **channels;
155 unsigned int nr_channels;
156 int active;
157 /* Second 32 bytes cache-hot cacheline */
158 struct ltt_trace_ops *ops;
159 u32 freq_scale;
160 u64 start_freq;
161 u64 start_tsc;
162 unsigned long long start_monotonic;
163 struct timeval start_time;
164 struct ltt_channel_setting *settings;
165 struct {
166 struct dentry *trace_root;
167 struct dentry *ascii_root;
168 } dentry;
169 struct kref kref; /* Each channel has a kref of the trace struct */
170 struct ltt_transport *transport;
171 struct kref ltt_transport_kref;
172 wait_queue_head_t kref_wq; /* Place for ltt_trace_destroy to sleep */
173 char trace_name[NAME_MAX];
174 } ____cacheline_aligned;
175
176 /*
177 * Hardcoded event headers
178 *
179 * event header for a trace with active heartbeat : 27 bits timestamps
180 *
181 * headers are 32-bits aligned. In order to insure such alignment, a dynamic per
182 * trace alignment value must be done.
183 *
184 * Remember that the C compiler does align each member on the boundary
185 * equivalent to their own size.
186 *
187 * As relay subbuffers are aligned on pages, we are sure that they are 4 and 8
188 * bytes aligned, so the buffer header and trace header are aligned.
189 *
190 * Event headers are aligned depending on the trace alignment option.
191 *
192 * Note using C structure bitfields for cross-endianness and portability
193 * concerns.
194 */
195
196 #define LTT_RESERVED_EVENTS 3
197 #define LTT_EVENT_BITS 5
198 #define LTT_FREE_EVENTS ((1 << LTT_EVENT_BITS) - LTT_RESERVED_EVENTS)
199 #define LTT_TSC_BITS 27
200 #define LTT_TSC_MASK ((1 << LTT_TSC_BITS) - 1)
201
202 struct event_header {
203 u32 id_time; /* 5 bits event id (MSB); 27 bits time (LSB) */
204 };
205
206 /* Reservation flags */
207 #define LTT_RFLAG_ID (1 << 0)
208 #define LTT_RFLAG_ID_SIZE (1 << 1)
209 #define LTT_RFLAG_ID_SIZE_TSC (1 << 2)
210
211 #define LTT_MAX_SMALL_SIZE 0xFFFFU
212
213 /*
214 * We use asm/timex.h : cpu_khz/HZ variable in here : we might have to deal
215 * specifically with CPU frequency scaling someday, so using an interpolation
216 * between the start and end of buffer values is not flexible enough. Using an
217 * immediate frequency value permits to calculate directly the times for parts
218 * of a buffer that would be before a frequency change.
219 *
220 * Keep the natural field alignment for _each field_ within this structure if
221 * you ever add/remove a field from this header. Packed attribute is not used
222 * because gcc generates poor code on at least powerpc and mips. Don't ever
223 * let gcc add padding between the structure elements.
224 */
225 struct subbuffer_header {
226 uint64_t cycle_count_begin; /* Cycle count at subbuffer start */
227 uint64_t cycle_count_end; /* Cycle count at subbuffer end */
228 uint32_t magic_number; /*
229 * Trace magic number.
230 * contains endianness information.
231 */
232 uint8_t major_version;
233 uint8_t minor_version;
234 uint8_t arch_size; /* Architecture pointer size */
235 uint8_t alignment; /* LTT data alignment */
236 uint64_t start_time_sec; /* NTP-corrected start time */
237 uint64_t start_time_usec;
238 uint64_t start_freq; /*
239 * Frequency at trace start,
240 * used all along the trace.
241 */
242 uint32_t freq_scale; /* Frequency scaling (divisor) */
243 uint32_t data_size; /* Size of data in subbuffer */
244 uint32_t sb_size; /* Subbuffer size (include padding) */
245 uint32_t events_lost; /*
246 * Events lost in this subbuffer since
247 * the beginning of the trace.
248 * (may overflow)
249 */
250 uint32_t subbuf_corrupt; /*
251 * Corrupted (lost) subbuffers since
252 * the begginig of the trace.
253 * (may overflow)
254 */
255 uint8_t header_end[0]; /* End of header */
256 };
257
258 static inline notrace u64 lib_ring_buffer_clock_read(struct channel *chan)
259 {
260 return trace_clock_read64();
261 }
262
263 /*
264 * record_header_size - Calculate the header size and padding necessary.
265 * @config: ring buffer instance configuration
266 * @chan: channel
267 * @offset: offset in the write buffer
268 * @data_size: size of the payload
269 * @pre_header_padding: padding to add before the header (output)
270 * @rflags: reservation flags
271 * @ctx: reservation context
272 *
273 * Returns the event header size (including padding).
274 *
275 * Important note :
276 * The event header must be 32-bits. The total offset calculated here :
277 *
278 * Alignment of header struct on 32 bits (min arch size, header size)
279 * + sizeof(header struct) (32-bits)
280 * + (opt) u16 (ext. event id)
281 * + (opt) u16 (event_size)
282 * (if event_size == LTT_MAX_SMALL_SIZE, has ext. event size)
283 * + (opt) u32 (ext. event size)
284 * + (opt) u64 full TSC (aligned on min(64-bits, arch size))
285 *
286 * The payload must itself determine its own alignment from the biggest type it
287 * contains.
288 */
289 static __inline__
290 unsigned char record_header_size(const struct lib_ring_buffer_config *config,
291 struct channel *chan, size_t offset,
292 size_t data_size, size_t *pre_header_padding,
293 unsigned int rflags,
294 struct lib_ring_buffer_ctx *ctx)
295 {
296 size_t orig_offset = offset;
297 size_t padding;
298
299 BUILD_BUG_ON(sizeof(struct event_header) != sizeof(u32));
300
301 padding = lib_ring_buffer_align(config, offset,
302 sizeof(struct event_header));
303 offset += padding;
304 offset += sizeof(struct event_header);
305
306 if (unlikely(rflags)) {
307 switch (rflags) {
308 case LTT_RFLAG_ID_SIZE_TSC:
309 offset += sizeof(u16) + sizeof(u16);
310 if (data_size >= LTT_MAX_SMALL_SIZE)
311 offset += sizeof(u32);
312 offset += ltt_align(offset, sizeof(u64));
313 offset += sizeof(u64);
314 break;
315 case LTT_RFLAG_ID_SIZE:
316 offset += sizeof(u16) + sizeof(u16);
317 if (data_size >= LTT_MAX_SMALL_SIZE)
318 offset += sizeof(u32);
319 break;
320 case LTT_RFLAG_ID:
321 offset += sizeof(u16);
322 break;
323 }
324 }
325
326 *pre_header_padding = padding;
327 return offset - orig_offset;
328 }
329
330 #include <linux/ringbuffer/api.h>
331
332 extern
333 size_t ltt_write_event_header_slow(struct ltt_chanbuf_alloc *bufa,
334 struct ltt_chan_alloc *chana,
335 long buf_offset, u16 eID, u32 event_size,
336 u64 tsc, unsigned int rflags);
337
338 /*
339 * ltt_write_event_header
340 *
341 * Writes the event header to the offset (already aligned on 32-bits).
342 *
343 * @config: ring buffer instance configuration
344 * @ctx: reservation context
345 * @eID : event ID
346 * @event_size : size of the event, excluding the event header.
347 */
348 static __inline__
349 void ltt_write_event_header(const struct lib_ring_buffer_config *config,
350 struct lib_ring_buffer_ctx *ctx,
351 u16 eID, u32 event_size)
352 {
353 struct event_header header;
354
355 if (unlikely(ctx->rflags))
356 goto slow_path;
357
358 header.id_time = eID << LTT_TSC_BITS;
359 header.id_time |= (u32)ctx->tsc & LTT_TSC_MASK;
360 lib_ring_buffer_write(config, ctx, &header, sizeof(header));
361
362 slow_path:
363 ltt_write_event_header_slow(config, ctx, eID, event_size);
364 }
365
366 #if 0
367 /*
368 * ltt_read_event_header
369 * buf_offset must aligned on 32 bits
370 */
371 static __inline__
372 size_t ltt_read_event_header(struct ltt_chanbuf_alloc *bufa, long buf_offset,
373 u64 *tsc, u32 *event_size, u16 *eID,
374 unsigned int *rflags)
375 {
376 struct ltt_event_header header;
377 u16 small_size;
378
379 ltt_relay_read(bufa, buf_offset, &header, sizeof(header));
380 buf_offset += sizeof(header);
381
382 *event_size = INT_MAX;
383 *eID = header.id_time >> LTT_TSC_BITS;
384 *tsc = header.id_time & LTT_TSC_MASK;
385
386 switch (*eID) {
387 case 29:
388 *rflags = LTT_RFLAG_ID_SIZE_TSC;
389 ltt_relay_read(bufa, buf_offset, eID, sizeof(u16));
390 buf_offset += sizeof(u16);
391 ltt_relay_read(bufa, buf_offset, &small_size, sizeof(u16));
392 buf_offset += sizeof(u16);
393 if (small_size == LTT_MAX_SMALL_SIZE) {
394 ltt_relay_read(bufa, buf_offset, event_size,
395 sizeof(u32));
396 buf_offset += sizeof(u32);
397 } else
398 *event_size = small_size;
399 buf_offset += ltt_align(buf_offset, sizeof(u64));
400 ltt_relay_read(bufa, buf_offset, tsc, sizeof(u64));
401 buf_offset += sizeof(u64);
402 break;
403 case 30:
404 *rflags = LTT_RFLAG_ID_SIZE;
405 ltt_relay_read(bufa, buf_offset, eID, sizeof(u16));
406 buf_offset += sizeof(u16);
407 ltt_relay_read(bufa, buf_offset, &small_size, sizeof(u16));
408 buf_offset += sizeof(u16);
409 if (small_size == LTT_MAX_SMALL_SIZE) {
410 ltt_relay_read(bufa, buf_offset, event_size,
411 sizeof(u32));
412 buf_offset += sizeof(u32);
413 } else
414 *event_size = small_size;
415 break;
416 case 31:
417 *rflags = LTT_RFLAG_ID;
418 ltt_relay_read(bufa, buf_offset, eID, sizeof(u16));
419 buf_offset += sizeof(u16);
420 break;
421 default:
422 *rflags = 0;
423 break;
424 }
425
426 return buf_offset;
427 }
428 #endif //0
429
430 /*
431 * Control channels :
432 * control/metadata
433 * control/interrupts
434 * control/...
435 *
436 * cpu channel :
437 * cpu
438 */
439 #define LTT_RELAY_ROOT "ltt"
440
441 #define LTT_METADATA_CHANNEL "metadata_state"
442 #define LTT_FD_STATE_CHANNEL "fd_state"
443 #define LTT_GLOBAL_STATE_CHANNEL "global_state"
444 #define LTT_IRQ_STATE_CHANNEL "irq_state"
445 #define LTT_MODULE_STATE_CHANNEL "module_state"
446 #define LTT_NETIF_STATE_CHANNEL "netif_state"
447 #define LTT_SOFTIRQ_STATE_CHANNEL "softirq_state"
448 #define LTT_SWAP_STATE_CHANNEL "swap_state"
449 #define LTT_SYSCALL_STATE_CHANNEL "syscall_state"
450 #define LTT_TASK_STATE_CHANNEL "task_state"
451 #define LTT_VM_STATE_CHANNEL "vm_state"
452 #define LTT_FS_CHANNEL "fs"
453 #define LTT_INPUT_CHANNEL "input"
454 #define LTT_IPC_CHANNEL "ipc"
455 #define LTT_KERNEL_CHANNEL "kernel"
456 #define LTT_MM_CHANNEL "mm"
457 #define LTT_RCU_CHANNEL "rcu"
458
459 #define LTT_FLIGHT_PREFIX "flight-"
460
461 #define LTT_ASCII "ascii"
462
463 /* Tracer properties */
464 #define LTT_DEFAULT_SUBBUF_SIZE_LOW 65536
465 #define LTT_DEFAULT_N_SUBBUFS_LOW 2
466 #define LTT_DEFAULT_SUBBUF_SIZE_MED 262144
467 #define LTT_DEFAULT_N_SUBBUFS_MED 2
468 #define LTT_DEFAULT_SUBBUF_SIZE_HIGH 1048576
469 #define LTT_DEFAULT_N_SUBBUFS_HIGH 2
470 #define LTT_TRACER_MAGIC_NUMBER 0x00D6B7ED
471 #define LTT_TRACER_VERSION_MAJOR 2
472 #define LTT_TRACER_VERSION_MINOR 6
473
474 /**
475 * ltt_write_trace_header - Write trace header
476 * @priv: Private data (struct trace)
477 * @header: Memory address where the information must be written to
478 */
479 static __inline__
480 void ltt_write_trace_header(void *priv,
481 struct subbuffer_header *header)
482 {
483 struct ltt_trace *trace = priv;
484
485 header->magic_number = LTT_TRACER_MAGIC_NUMBER;
486 header->major_version = LTT_TRACER_VERSION_MAJOR;
487 header->minor_version = LTT_TRACER_VERSION_MINOR;
488 header->arch_size = sizeof(void *);
489 header->alignment = lib_ring_buffer_get_alignment();
490 header->start_time_sec = trace->start_time.tv_sec;
491 header->start_time_usec = trace->start_time.tv_usec;
492 header->start_freq = trace->start_freq;
493 header->freq_scale = trace->freq_scale;
494 }
495
496 /*
497 * Size reserved for high priority events (interrupts, NMI, BH) at the end of a
498 * nearly full buffer. User space won't use this last amount of space when in
499 * blocking mode. This space also includes the event header that would be
500 * written by this user space event.
501 */
502 #define LTT_RESERVE_CRITICAL 4096
503
504 /* Register and unregister function pointers */
505
506 enum ltt_module_function {
507 LTT_FUNCTION_RUN_FILTER,
508 LTT_FUNCTION_FILTER_CONTROL,
509 LTT_FUNCTION_STATEDUMP
510 };
511
512 extern int ltt_module_register(enum ltt_module_function name, void *function,
513 struct module *owner);
514 extern void ltt_module_unregister(enum ltt_module_function name);
515
516 void ltt_transport_register(struct ltt_transport *transport);
517 void ltt_transport_unregister(struct ltt_transport *transport);
518
519 /* Exported control function */
520
521 enum ltt_control_msg {
522 LTT_CONTROL_START,
523 LTT_CONTROL_STOP,
524 LTT_CONTROL_CREATE_TRACE,
525 LTT_CONTROL_DESTROY_TRACE
526 };
527
528 union ltt_control_args {
529 struct {
530 enum trace_mode mode;
531 unsigned int subbuf_size_low;
532 unsigned int n_subbufs_low;
533 unsigned int subbuf_size_med;
534 unsigned int n_subbufs_med;
535 unsigned int subbuf_size_high;
536 unsigned int n_subbufs_high;
537 } new_trace;
538 };
539
540 int _ltt_trace_setup(const char *trace_name);
541 int ltt_trace_setup(const char *trace_name);
542 struct ltt_trace *_ltt_trace_find_setup(const char *trace_name);
543 int ltt_trace_set_type(const char *trace_name, const char *trace_type);
544 int ltt_trace_set_channel_subbufsize(const char *trace_name,
545 const char *channel_name,
546 unsigned int size);
547 int ltt_trace_set_channel_subbufcount(const char *trace_name,
548 const char *channel_name,
549 unsigned int cnt);
550 int ltt_trace_set_channel_switch_timer(const char *trace_name,
551 const char *channel_name,
552 unsigned long interval);
553 int ltt_trace_set_channel_overwrite(const char *trace_name,
554 const char *channel_name,
555 unsigned int overwrite);
556 int ltt_trace_alloc(const char *trace_name);
557 int ltt_trace_destroy(const char *trace_name);
558 int ltt_trace_start(const char *trace_name);
559 int ltt_trace_stop(const char *trace_name);
560
561 extern int ltt_control(enum ltt_control_msg msg, const char *trace_name,
562 const char *trace_type, union ltt_control_args args);
563
564 enum ltt_filter_control_msg {
565 LTT_FILTER_DEFAULT_ACCEPT,
566 LTT_FILTER_DEFAULT_REJECT
567 };
568
569 extern int ltt_filter_control(enum ltt_filter_control_msg msg,
570 const char *trace_name);
571
572 extern struct dentry *get_filter_root(void);
573
574 void ltt_core_register(int (*function)(u8, void *));
575
576 void ltt_core_unregister(void);
577
578 void ltt_release_trace(struct kref *kref);
579 void ltt_release_transport(struct kref *kref);
580
581 extern int ltt_probe_register(struct ltt_available_probe *pdata);
582 extern int ltt_probe_unregister(struct ltt_available_probe *pdata);
583 extern int ltt_marker_connect(const char *channel, const char *mname,
584 const char *pname);
585 extern int ltt_marker_disconnect(const char *channel, const char *mname,
586 const char *pname);
587 extern void ltt_dump_marker_state(struct ltt_trace *trace);
588
589 void ltt_lock_traces(void);
590 void ltt_unlock_traces(void);
591
592 extern int ltt_ascii_create_dir(struct ltt_trace *new_trace);
593 extern void ltt_ascii_remove_dir(struct ltt_trace *trace);
594 extern int ltt_ascii_create(struct ltt_chan *chan);
595 extern void ltt_ascii_remove(struct ltt_chan *chan);
596
597 extern
598 void ltt_statedump_register_kprobes_dump(void (*callback)(void *call_data));
599 extern
600 void ltt_statedump_unregister_kprobes_dump(void (*callback)(void *call_data));
601
602 extern void ltt_dump_softirq_vec(void *call_data);
603
604 #ifdef CONFIG_HAVE_LTT_DUMP_TABLES
605 extern void ltt_dump_sys_call_table(void *call_data);
606 extern void ltt_dump_idt_table(void *call_data);
607 #else
608 static inline void ltt_dump_sys_call_table(void *call_data)
609 {
610 }
611
612 static inline void ltt_dump_idt_table(void *call_data)
613 {
614 }
615 #endif
616
617 #endif /* _LTT_TRACER_H */
This page took 0.0429 seconds and 4 git commands to generate.