Update ring buffer and pretty print
[lttng-modules.git] / ltt-ring-buffer-client.h
1 /*
2 * ltt-ring-buffer-client.h
3 *
4 * Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * LTTng lib ring buffer client template.
7 *
8 * Dual LGPL v2.1/GPL v2 license.
9 */
10
11 #include <linux/module.h>
12 #include <linux/types.h>
13 #include "lib/bitfield.h"
14 #include "wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
15 #include "wrapper/trace-clock.h"
16 #include "ltt-events.h"
17 #include "ltt-tracer.h"
18 #include "wrapper/ringbuffer/frontend_types.h"
19
20 /*
21 * Keep the natural field alignment for _each field_ within this structure if
22 * you ever add/remove a field from this header. Packed attribute is not used
23 * because gcc generates poor code on at least powerpc and mips. Don't ever
24 * let gcc add padding between the structure elements.
25 */
26
27 struct packet_header {
28 /* Trace packet header */
29 uint32_t magic; /*
30 * Trace magic number.
31 * contains endianness information.
32 */
33 uint8_t uuid[16];
34 uint32_t stream_id;
35
36 struct {
37 /* Stream packet context */
38 uint64_t timestamp_begin; /* Cycle count at subbuffer start */
39 uint64_t timestamp_end; /* Cycle count at subbuffer end */
40 uint32_t events_discarded; /*
41 * Events lost in this subbuffer since
42 * the beginning of the trace.
43 * (may overflow)
44 */
45 uint32_t content_size; /* Size of data in subbuffer */
46 uint32_t packet_size; /* Subbuffer size (include padding) */
47 uint32_t cpu_id; /* CPU id associated with stream */
48 uint8_t header_end; /* End of header */
49 } ctx;
50 };
51
52
53 static inline notrace u64 lib_ring_buffer_clock_read(struct channel *chan)
54 {
55 return trace_clock_read64();
56 }
57
58 /*
59 * record_header_size - Calculate the header size and padding necessary.
60 * @config: ring buffer instance configuration
61 * @chan: channel
62 * @offset: offset in the write buffer
63 * @pre_header_padding: padding to add before the header (output)
64 * @ctx: reservation context
65 *
66 * Returns the event header size (including padding).
67 *
68 * The payload must itself determine its own alignment from the biggest type it
69 * contains.
70 */
71 static __inline__
72 unsigned char record_header_size(const struct lib_ring_buffer_config *config,
73 struct channel *chan, size_t offset,
74 size_t *pre_header_padding,
75 struct lib_ring_buffer_ctx *ctx)
76 {
77 struct ltt_channel *ltt_chan = channel_get_private(chan);
78 size_t orig_offset = offset;
79 size_t padding;
80
81 switch (ltt_chan->header_type) {
82 case 1: /* compact */
83 padding = lib_ring_buffer_align(offset, ltt_alignof(uint32_t));
84 offset += padding;
85 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) {
86 offset += sizeof(uint32_t); /* id and timestamp */
87 } else {
88 /* Minimum space taken by 5-bit id */
89 offset += sizeof(uint8_t);
90 /* Align extended struct on largest member */
91 offset += lib_ring_buffer_align(offset, ltt_alignof(uint64_t));
92 offset += sizeof(uint32_t); /* id */
93 offset += lib_ring_buffer_align(offset, ltt_alignof(uint64_t));
94 offset += sizeof(uint64_t); /* timestamp */
95 }
96 break;
97 case 2: /* large */
98 padding = lib_ring_buffer_align(offset, ltt_alignof(uint16_t));
99 offset += padding;
100 offset += sizeof(uint16_t);
101 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) {
102 offset += lib_ring_buffer_align(offset, ltt_alignof(uint32_t));
103 offset += sizeof(uint32_t); /* timestamp */
104 } else {
105 /* Align extended struct on largest member */
106 offset += lib_ring_buffer_align(offset, ltt_alignof(uint64_t));
107 offset += sizeof(uint32_t); /* id */
108 offset += lib_ring_buffer_align(offset, ltt_alignof(uint64_t));
109 offset += sizeof(uint64_t); /* timestamp */
110
111 }
112 break;
113 default:
114 WARN_ON_ONCE(1);
115 }
116
117 *pre_header_padding = padding;
118 return offset - orig_offset;
119 }
120
121 #include "wrapper/ringbuffer/api.h"
122
123 extern
124 void ltt_write_event_header_slow(const struct lib_ring_buffer_config *config,
125 struct lib_ring_buffer_ctx *ctx,
126 uint32_t event_id);
127
128 /*
129 * ltt_write_event_header
130 *
131 * Writes the event header to the offset (already aligned on 32-bits).
132 *
133 * @config: ring buffer instance configuration
134 * @ctx: reservation context
135 * @event_id: event ID
136 */
137 static __inline__
138 void ltt_write_event_header(const struct lib_ring_buffer_config *config,
139 struct lib_ring_buffer_ctx *ctx,
140 uint32_t event_id)
141 {
142 struct ltt_channel *ltt_chan = channel_get_private(ctx->chan);
143
144 if (unlikely(ctx->rflags))
145 goto slow_path;
146
147 switch (ltt_chan->header_type) {
148 case 1: /* compact */
149 {
150 uint32_t id_time = 0;
151
152 bt_bitfield_write(&id_time, uint32_t, 0, 5, event_id);
153 bt_bitfield_write(&id_time, uint32_t, 5, 27, ctx->tsc);
154 lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
155 break;
156 }
157 case 2: /* large */
158 {
159 uint32_t timestamp = (uint32_t) ctx->tsc;
160
161 lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
162 lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint32_t));
163 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
164 break;
165 }
166 default:
167 WARN_ON_ONCE(1);
168 }
169 return;
170
171 slow_path:
172 ltt_write_event_header_slow(config, ctx, event_id);
173 }
174
175 void ltt_write_event_header_slow(const struct lib_ring_buffer_config *config,
176 struct lib_ring_buffer_ctx *ctx,
177 uint32_t event_id)
178 {
179 struct ltt_channel *ltt_chan = channel_get_private(ctx->chan);
180
181 switch (ltt_chan->header_type) {
182 case 1: /* compact */
183 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) {
184 uint32_t id_time = 0;
185
186 bt_bitfield_write(&id_time, uint32_t, 0, 5, event_id);
187 bt_bitfield_write(&id_time, uint32_t, 5, 27, ctx->tsc);
188 lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
189 } else {
190 uint8_t id = 0;
191 uint32_t event_id = (uint32_t) event_id;
192 uint64_t timestamp = ctx->tsc;
193
194 bt_bitfield_write(&id, uint8_t, 0, 5, 31);
195 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
196 /* Align extended struct on largest member */
197 lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint64_t));
198 lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
199 lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint64_t));
200 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
201 }
202 break;
203 case 2: /* large */
204 {
205 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) {
206 uint32_t timestamp = (uint32_t) ctx->tsc;
207
208 lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
209 lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint32_t));
210 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
211 } else {
212 uint16_t id = 65535;
213 uint64_t timestamp = ctx->tsc;
214
215 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
216 /* Align extended struct on largest member */
217 lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint64_t));
218 lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
219 lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint64_t));
220 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
221 }
222 break;
223 }
224 default:
225 WARN_ON_ONCE(1);
226 }
227 }
228
229 static const struct lib_ring_buffer_config client_config;
230
231 static u64 client_ring_buffer_clock_read(struct channel *chan)
232 {
233 return lib_ring_buffer_clock_read(chan);
234 }
235
236 static
237 size_t client_record_header_size(const struct lib_ring_buffer_config *config,
238 struct channel *chan, size_t offset,
239 size_t *pre_header_padding,
240 struct lib_ring_buffer_ctx *ctx)
241 {
242 return record_header_size(config, chan, offset,
243 pre_header_padding, ctx);
244 }
245
246 /**
247 * client_packet_header_size - called on buffer-switch to a new sub-buffer
248 *
249 * Return header size without padding after the structure. Don't use packed
250 * structure because gcc generates inefficient code on some architectures
251 * (powerpc, mips..)
252 */
253 static size_t client_packet_header_size(void)
254 {
255 return offsetof(struct packet_header, ctx.header_end);
256 }
257
258 static void client_buffer_begin(struct lib_ring_buffer *buf, u64 tsc,
259 unsigned int subbuf_idx)
260 {
261 struct channel *chan = buf->backend.chan;
262 struct packet_header *header =
263 (struct packet_header *)
264 lib_ring_buffer_offset_address(&buf->backend,
265 subbuf_idx * chan->backend.subbuf_size);
266 struct ltt_channel *ltt_chan = channel_get_private(chan);
267 struct ltt_session *session = ltt_chan->session;
268
269 header->magic = CTF_MAGIC_NUMBER;
270 memcpy(header->uuid, session->uuid.b, sizeof(session->uuid));
271 header->stream_id = ltt_chan->id;
272 header->ctx.timestamp_begin = tsc;
273 header->ctx.timestamp_end = 0;
274 header->ctx.events_discarded = 0;
275 header->ctx.content_size = 0xFFFFFFFF; /* for debugging */
276 header->ctx.packet_size = 0xFFFFFFFF;
277 header->ctx.cpu_id = buf->backend.cpu;
278 }
279
280 /*
281 * offset is assumed to never be 0 here : never deliver a completely empty
282 * subbuffer. data_size is between 1 and subbuf_size.
283 */
284 static void client_buffer_end(struct lib_ring_buffer *buf, u64 tsc,
285 unsigned int subbuf_idx, unsigned long data_size)
286 {
287 struct channel *chan = buf->backend.chan;
288 struct packet_header *header =
289 (struct packet_header *)
290 lib_ring_buffer_offset_address(&buf->backend,
291 subbuf_idx * chan->backend.subbuf_size);
292 unsigned long records_lost = 0;
293
294 header->ctx.timestamp_end = tsc;
295 header->ctx.content_size = data_size * CHAR_BIT; /* in bits */
296 header->ctx.packet_size = PAGE_ALIGN(data_size) * CHAR_BIT; /* in bits */
297 records_lost += lib_ring_buffer_get_records_lost_full(&client_config, buf);
298 records_lost += lib_ring_buffer_get_records_lost_wrap(&client_config, buf);
299 records_lost += lib_ring_buffer_get_records_lost_big(&client_config, buf);
300 header->ctx.events_discarded = records_lost;
301 }
302
303 static int client_buffer_create(struct lib_ring_buffer *buf, void *priv,
304 int cpu, const char *name)
305 {
306 return 0;
307 }
308
309 static void client_buffer_finalize(struct lib_ring_buffer *buf, void *priv, int cpu)
310 {
311 }
312
313 static const struct lib_ring_buffer_config client_config = {
314 .cb.ring_buffer_clock_read = client_ring_buffer_clock_read,
315 .cb.record_header_size = client_record_header_size,
316 .cb.subbuffer_header_size = client_packet_header_size,
317 .cb.buffer_begin = client_buffer_begin,
318 .cb.buffer_end = client_buffer_end,
319 .cb.buffer_create = client_buffer_create,
320 .cb.buffer_finalize = client_buffer_finalize,
321
322 .tsc_bits = 32,
323 .alloc = RING_BUFFER_ALLOC_PER_CPU,
324 .sync = RING_BUFFER_SYNC_PER_CPU,
325 .mode = RING_BUFFER_MODE_TEMPLATE,
326 .backend = RING_BUFFER_PAGE,
327 .output = RING_BUFFER_SPLICE,
328 .oops = RING_BUFFER_OOPS_CONSISTENCY,
329 .ipi = RING_BUFFER_IPI_BARRIER,
330 .wakeup = RING_BUFFER_WAKEUP_BY_TIMER,
331 };
332
333 static
334 struct channel *_channel_create(const char *name,
335 struct ltt_channel *ltt_chan, void *buf_addr,
336 size_t subbuf_size, size_t num_subbuf,
337 unsigned int switch_timer_interval,
338 unsigned int read_timer_interval)
339 {
340 return channel_create(&client_config, name, ltt_chan, buf_addr,
341 subbuf_size, num_subbuf, switch_timer_interval,
342 read_timer_interval);
343 }
344
345 static
346 void ltt_channel_destroy(struct channel *chan)
347 {
348 channel_destroy(chan);
349 }
350
351 static
352 struct lib_ring_buffer *ltt_buffer_read_open(struct channel *chan)
353 {
354 struct lib_ring_buffer *buf;
355 int cpu;
356
357 for_each_channel_cpu(cpu, chan) {
358 buf = channel_get_ring_buffer(&client_config, chan, cpu);
359 if (!lib_ring_buffer_open_read(buf))
360 return buf;
361 }
362 return NULL;
363 }
364
365 static
366 void ltt_buffer_read_close(struct lib_ring_buffer *buf)
367 {
368 lib_ring_buffer_release_read(buf);
369
370 }
371
372 static
373 int ltt_event_reserve(struct lib_ring_buffer_ctx *ctx,
374 uint32_t event_id)
375 {
376 struct ltt_channel *ltt_chan = channel_get_private(ctx->chan);
377 int ret, cpu;
378
379 cpu = lib_ring_buffer_get_cpu(&client_config);
380 if (cpu < 0)
381 return -EPERM;
382 ctx->cpu = cpu;
383
384 switch (ltt_chan->header_type) {
385 case 1: /* compact */
386 if (event_id > 30)
387 ctx->rflags |= LTT_RFLAG_EXTENDED;
388 break;
389 case 2: /* large */
390 if (event_id > 65534)
391 ctx->rflags |= LTT_RFLAG_EXTENDED;
392 break;
393 default:
394 WARN_ON_ONCE(1);
395 }
396
397 ret = lib_ring_buffer_reserve(&client_config, ctx);
398 if (ret)
399 goto put;
400 ltt_write_event_header(&client_config, ctx, event_id);
401 return 0;
402 put:
403 lib_ring_buffer_put_cpu(&client_config);
404 return ret;
405 }
406
407 static
408 void ltt_event_commit(struct lib_ring_buffer_ctx *ctx)
409 {
410 lib_ring_buffer_commit(&client_config, ctx);
411 lib_ring_buffer_put_cpu(&client_config);
412 }
413
414 static
415 void ltt_event_write(struct lib_ring_buffer_ctx *ctx, const void *src,
416 size_t len)
417 {
418 lib_ring_buffer_write(&client_config, ctx, src, len);
419 }
420
421 static
422 wait_queue_head_t *ltt_get_reader_wait_queue(struct ltt_channel *chan)
423 {
424 return &chan->chan->read_wait;
425 }
426
427 static struct ltt_transport ltt_relay_transport = {
428 .name = "relay-" RING_BUFFER_MODE_TEMPLATE_STRING,
429 .owner = THIS_MODULE,
430 .ops = {
431 .channel_create = _channel_create,
432 .channel_destroy = ltt_channel_destroy,
433 .buffer_read_open = ltt_buffer_read_open,
434 .buffer_read_close = ltt_buffer_read_close,
435 .event_reserve = ltt_event_reserve,
436 .event_commit = ltt_event_commit,
437 .event_write = ltt_event_write,
438 .packet_avail_size = NULL, /* Would be racy anyway */
439 .get_reader_wait_queue = ltt_get_reader_wait_queue,
440 },
441 };
442
443 static int __init ltt_ring_buffer_client_init(void)
444 {
445 /*
446 * This vmalloc sync all also takes care of the lib ring buffer
447 * vmalloc'd module pages when it is built as a module into LTTng.
448 */
449 wrapper_vmalloc_sync_all();
450 printk(KERN_INFO "LTT : ltt ring buffer client init\n");
451 ltt_transport_register(&ltt_relay_transport);
452 return 0;
453 }
454
455 module_init(ltt_ring_buffer_client_init);
456
457 static void __exit ltt_ring_buffer_client_exit(void)
458 {
459 printk(KERN_INFO "LTT : ltt ring buffer client exit\n");
460 ltt_transport_unregister(&ltt_relay_transport);
461 }
462
463 module_exit(ltt_ring_buffer_client_exit);
464
465 MODULE_LICENSE("GPL and additional rights");
466 MODULE_AUTHOR("Mathieu Desnoyers");
467 MODULE_DESCRIPTION("LTTng ring buffer " RING_BUFFER_MODE_TEMPLATE_STRING
468 " client");
This page took 0.044408 seconds and 4 git commands to generate.