Properly fix the timekeeping overflow detection
[lttng-modules.git] / lttng-ring-buffer-client.h
1 /*
2 * lttng-ring-buffer-client.h
3 *
4 * Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * LTTng lib ring buffer client template.
7 *
8 * Dual LGPL v2.1/GPL v2 license.
9 */
10
11 #include <linux/module.h>
12 #include <linux/types.h>
13 #include "lib/bitfield.h"
14 #include "wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
15 #include "wrapper/trace-clock.h"
16 #include "lttng-events.h"
17 #include "lttng-tracer.h"
18 #include "wrapper/ringbuffer/frontend_types.h"
19
20 #define LTTNG_COMPACT_EVENT_BITS 5
21 #define LTTNG_COMPACT_TSC_BITS 27
22
23 /*
24 * Keep the natural field alignment for _each field_ within this structure if
25 * you ever add/remove a field from this header. Packed attribute is not used
26 * because gcc generates poor code on at least powerpc and mips. Don't ever
27 * let gcc add padding between the structure elements.
28 *
29 * The guarantee we have with timestamps is that all the events in a
30 * packet are included (inclusive) within the begin/end timestamps of
31 * the packet. Another guarantee we have is that the "timestamp begin",
32 * as well as the event timestamps, are monotonically increasing (never
33 * decrease) when moving forward in a stream (physically). But this
34 * guarantee does not apply to "timestamp end", because it is sampled at
35 * commit time, which is not ordered with respect to space reservation.
36 */
37
38 struct packet_header {
39 /* Trace packet header */
40 uint32_t magic; /*
41 * Trace magic number.
42 * contains endianness information.
43 */
44 uint8_t uuid[16];
45 uint32_t stream_id;
46
47 struct {
48 /* Stream packet context */
49 uint64_t timestamp_begin; /* Cycle count at subbuffer start */
50 uint64_t timestamp_end; /* Cycle count at subbuffer end */
51 uint32_t events_discarded; /*
52 * Events lost in this subbuffer since
53 * the beginning of the trace.
54 * (may overflow)
55 */
56 uint32_t content_size; /* Size of data in subbuffer */
57 uint32_t packet_size; /* Subbuffer size (include padding) */
58 uint32_t cpu_id; /* CPU id associated with stream */
59 uint8_t header_end; /* End of header */
60 } ctx;
61 };
62
63
64 static inline notrace u64 lib_ring_buffer_clock_read(struct channel *chan)
65 {
66 return trace_clock_read64();
67 }
68
69 static inline
70 size_t ctx_get_size(size_t offset, struct lttng_ctx *ctx)
71 {
72 int i;
73 size_t orig_offset = offset;
74
75 if (likely(!ctx))
76 return 0;
77 for (i = 0; i < ctx->nr_fields; i++)
78 offset += ctx->fields[i].get_size(offset);
79 return offset - orig_offset;
80 }
81
82 static inline
83 void ctx_record(struct lib_ring_buffer_ctx *bufctx,
84 struct lttng_channel *chan,
85 struct lttng_ctx *ctx)
86 {
87 int i;
88
89 if (likely(!ctx))
90 return;
91 for (i = 0; i < ctx->nr_fields; i++)
92 ctx->fields[i].record(&ctx->fields[i], bufctx, chan);
93 }
94
95 /*
96 * record_header_size - Calculate the header size and padding necessary.
97 * @config: ring buffer instance configuration
98 * @chan: channel
99 * @offset: offset in the write buffer
100 * @pre_header_padding: padding to add before the header (output)
101 * @ctx: reservation context
102 *
103 * Returns the event header size (including padding).
104 *
105 * The payload must itself determine its own alignment from the biggest type it
106 * contains.
107 */
108 static __inline__
109 unsigned char record_header_size(const struct lib_ring_buffer_config *config,
110 struct channel *chan, size_t offset,
111 size_t *pre_header_padding,
112 struct lib_ring_buffer_ctx *ctx)
113 {
114 struct lttng_channel *lttng_chan = channel_get_private(chan);
115 struct lttng_event *event = ctx->priv;
116 size_t orig_offset = offset;
117 size_t padding;
118
119 switch (lttng_chan->header_type) {
120 case 1: /* compact */
121 padding = lib_ring_buffer_align(offset, lttng_alignof(uint32_t));
122 offset += padding;
123 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
124 offset += sizeof(uint32_t); /* id and timestamp */
125 } else {
126 /* Minimum space taken by LTTNG_COMPACT_EVENT_BITS id */
127 offset += (LTTNG_COMPACT_EVENT_BITS + CHAR_BIT - 1) / CHAR_BIT;
128 /* Align extended struct on largest member */
129 offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
130 offset += sizeof(uint32_t); /* id */
131 offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
132 offset += sizeof(uint64_t); /* timestamp */
133 }
134 break;
135 case 2: /* large */
136 padding = lib_ring_buffer_align(offset, lttng_alignof(uint16_t));
137 offset += padding;
138 offset += sizeof(uint16_t);
139 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
140 offset += lib_ring_buffer_align(offset, lttng_alignof(uint32_t));
141 offset += sizeof(uint32_t); /* timestamp */
142 } else {
143 /* Align extended struct on largest member */
144 offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
145 offset += sizeof(uint32_t); /* id */
146 offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
147 offset += sizeof(uint64_t); /* timestamp */
148 }
149 break;
150 default:
151 padding = 0;
152 WARN_ON_ONCE(1);
153 }
154 offset += ctx_get_size(offset, event->ctx);
155 offset += ctx_get_size(offset, lttng_chan->ctx);
156
157 *pre_header_padding = padding;
158 return offset - orig_offset;
159 }
160
161 #include "wrapper/ringbuffer/api.h"
162
163 static
164 void lttng_write_event_header_slow(const struct lib_ring_buffer_config *config,
165 struct lib_ring_buffer_ctx *ctx,
166 uint32_t event_id);
167
168 /*
169 * lttng_write_event_header
170 *
171 * Writes the event header to the offset (already aligned on 32-bits).
172 *
173 * @config: ring buffer instance configuration
174 * @ctx: reservation context
175 * @event_id: event ID
176 */
177 static __inline__
178 void lttng_write_event_header(const struct lib_ring_buffer_config *config,
179 struct lib_ring_buffer_ctx *ctx,
180 uint32_t event_id)
181 {
182 struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
183 struct lttng_event *event = ctx->priv;
184
185 if (unlikely(ctx->rflags))
186 goto slow_path;
187
188 switch (lttng_chan->header_type) {
189 case 1: /* compact */
190 {
191 uint32_t id_time = 0;
192
193 bt_bitfield_write(&id_time, uint32_t,
194 0,
195 LTTNG_COMPACT_EVENT_BITS,
196 event_id);
197 bt_bitfield_write(&id_time, uint32_t,
198 LTTNG_COMPACT_EVENT_BITS,
199 LTTNG_COMPACT_TSC_BITS,
200 ctx->tsc);
201 lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
202 break;
203 }
204 case 2: /* large */
205 {
206 uint32_t timestamp = (uint32_t) ctx->tsc;
207 uint16_t id = event_id;
208
209 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
210 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint32_t));
211 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
212 break;
213 }
214 default:
215 WARN_ON_ONCE(1);
216 }
217
218 ctx_record(ctx, lttng_chan, lttng_chan->ctx);
219 ctx_record(ctx, lttng_chan, event->ctx);
220 lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
221
222 return;
223
224 slow_path:
225 lttng_write_event_header_slow(config, ctx, event_id);
226 }
227
228 static
229 void lttng_write_event_header_slow(const struct lib_ring_buffer_config *config,
230 struct lib_ring_buffer_ctx *ctx,
231 uint32_t event_id)
232 {
233 struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
234 struct lttng_event *event = ctx->priv;
235
236 switch (lttng_chan->header_type) {
237 case 1: /* compact */
238 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
239 uint32_t id_time = 0;
240
241 bt_bitfield_write(&id_time, uint32_t,
242 0,
243 LTTNG_COMPACT_EVENT_BITS,
244 event_id);
245 bt_bitfield_write(&id_time, uint32_t,
246 LTTNG_COMPACT_EVENT_BITS,
247 LTTNG_COMPACT_TSC_BITS, ctx->tsc);
248 lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
249 } else {
250 uint8_t id = 0;
251 uint64_t timestamp = ctx->tsc;
252
253 bt_bitfield_write(&id, uint8_t,
254 0,
255 LTTNG_COMPACT_EVENT_BITS,
256 31);
257 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
258 /* Align extended struct on largest member */
259 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
260 lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
261 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
262 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
263 }
264 break;
265 case 2: /* large */
266 {
267 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
268 uint32_t timestamp = (uint32_t) ctx->tsc;
269 uint16_t id = event_id;
270
271 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
272 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint32_t));
273 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
274 } else {
275 uint16_t id = 65535;
276 uint64_t timestamp = ctx->tsc;
277
278 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
279 /* Align extended struct on largest member */
280 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
281 lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
282 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
283 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
284 }
285 break;
286 }
287 default:
288 WARN_ON_ONCE(1);
289 }
290 ctx_record(ctx, lttng_chan, lttng_chan->ctx);
291 ctx_record(ctx, lttng_chan, event->ctx);
292 lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
293 }
294
295 static const struct lib_ring_buffer_config client_config;
296
297 static u64 client_ring_buffer_clock_read(struct channel *chan)
298 {
299 return lib_ring_buffer_clock_read(chan);
300 }
301
302 static
303 size_t client_record_header_size(const struct lib_ring_buffer_config *config,
304 struct channel *chan, size_t offset,
305 size_t *pre_header_padding,
306 struct lib_ring_buffer_ctx *ctx)
307 {
308 return record_header_size(config, chan, offset,
309 pre_header_padding, ctx);
310 }
311
312 /**
313 * client_packet_header_size - called on buffer-switch to a new sub-buffer
314 *
315 * Return header size without padding after the structure. Don't use packed
316 * structure because gcc generates inefficient code on some architectures
317 * (powerpc, mips..)
318 */
319 static size_t client_packet_header_size(void)
320 {
321 return offsetof(struct packet_header, ctx.header_end);
322 }
323
324 static void client_buffer_begin(struct lib_ring_buffer *buf, u64 tsc,
325 unsigned int subbuf_idx)
326 {
327 struct channel *chan = buf->backend.chan;
328 struct packet_header *header =
329 (struct packet_header *)
330 lib_ring_buffer_offset_address(&buf->backend,
331 subbuf_idx * chan->backend.subbuf_size);
332 struct lttng_channel *lttng_chan = channel_get_private(chan);
333 struct lttng_session *session = lttng_chan->session;
334
335 header->magic = CTF_MAGIC_NUMBER;
336 memcpy(header->uuid, session->uuid.b, sizeof(session->uuid));
337 header->stream_id = lttng_chan->id;
338 header->ctx.timestamp_begin = tsc;
339 header->ctx.timestamp_end = 0;
340 header->ctx.events_discarded = 0;
341 header->ctx.content_size = 0xFFFFFFFF; /* for debugging */
342 header->ctx.packet_size = 0xFFFFFFFF;
343 header->ctx.cpu_id = buf->backend.cpu;
344 }
345
346 /*
347 * offset is assumed to never be 0 here : never deliver a completely empty
348 * subbuffer. data_size is between 1 and subbuf_size.
349 */
350 static void client_buffer_end(struct lib_ring_buffer *buf, u64 tsc,
351 unsigned int subbuf_idx, unsigned long data_size)
352 {
353 struct channel *chan = buf->backend.chan;
354 struct packet_header *header =
355 (struct packet_header *)
356 lib_ring_buffer_offset_address(&buf->backend,
357 subbuf_idx * chan->backend.subbuf_size);
358 unsigned long records_lost = 0;
359
360 header->ctx.timestamp_end = tsc;
361 header->ctx.content_size = data_size * CHAR_BIT; /* in bits */
362 header->ctx.packet_size = PAGE_ALIGN(data_size) * CHAR_BIT; /* in bits */
363 records_lost += lib_ring_buffer_get_records_lost_full(&client_config, buf);
364 records_lost += lib_ring_buffer_get_records_lost_wrap(&client_config, buf);
365 records_lost += lib_ring_buffer_get_records_lost_big(&client_config, buf);
366 header->ctx.events_discarded = records_lost;
367 }
368
369 static int client_buffer_create(struct lib_ring_buffer *buf, void *priv,
370 int cpu, const char *name)
371 {
372 return 0;
373 }
374
375 static void client_buffer_finalize(struct lib_ring_buffer *buf, void *priv, int cpu)
376 {
377 }
378
379 static const struct lib_ring_buffer_config client_config = {
380 .cb.ring_buffer_clock_read = client_ring_buffer_clock_read,
381 .cb.record_header_size = client_record_header_size,
382 .cb.subbuffer_header_size = client_packet_header_size,
383 .cb.buffer_begin = client_buffer_begin,
384 .cb.buffer_end = client_buffer_end,
385 .cb.buffer_create = client_buffer_create,
386 .cb.buffer_finalize = client_buffer_finalize,
387
388 .tsc_bits = LTTNG_COMPACT_TSC_BITS,
389 .alloc = RING_BUFFER_ALLOC_PER_CPU,
390 .sync = RING_BUFFER_SYNC_PER_CPU,
391 .mode = RING_BUFFER_MODE_TEMPLATE,
392 .backend = RING_BUFFER_PAGE,
393 .output = RING_BUFFER_OUTPUT_TEMPLATE,
394 .oops = RING_BUFFER_OOPS_CONSISTENCY,
395 .ipi = RING_BUFFER_IPI_BARRIER,
396 .wakeup = RING_BUFFER_WAKEUP_BY_TIMER,
397 };
398
399 static
400 struct channel *_channel_create(const char *name,
401 struct lttng_channel *lttng_chan, void *buf_addr,
402 size_t subbuf_size, size_t num_subbuf,
403 unsigned int switch_timer_interval,
404 unsigned int read_timer_interval)
405 {
406 return channel_create(&client_config, name, lttng_chan, buf_addr,
407 subbuf_size, num_subbuf, switch_timer_interval,
408 read_timer_interval);
409 }
410
411 static
412 void lttng_channel_destroy(struct channel *chan)
413 {
414 channel_destroy(chan);
415 }
416
417 static
418 struct lib_ring_buffer *lttng_buffer_read_open(struct channel *chan)
419 {
420 struct lib_ring_buffer *buf;
421 int cpu;
422
423 for_each_channel_cpu(cpu, chan) {
424 buf = channel_get_ring_buffer(&client_config, chan, cpu);
425 if (!lib_ring_buffer_open_read(buf))
426 return buf;
427 }
428 return NULL;
429 }
430
431 static
432 int lttng_buffer_has_read_closed_stream(struct channel *chan)
433 {
434 struct lib_ring_buffer *buf;
435 int cpu;
436
437 for_each_channel_cpu(cpu, chan) {
438 buf = channel_get_ring_buffer(&client_config, chan, cpu);
439 if (!atomic_long_read(&buf->active_readers))
440 return 1;
441 }
442 return 0;
443 }
444
445 static
446 void lttng_buffer_read_close(struct lib_ring_buffer *buf)
447 {
448 lib_ring_buffer_release_read(buf);
449 }
450
451 static
452 int lttng_event_reserve(struct lib_ring_buffer_ctx *ctx,
453 uint32_t event_id)
454 {
455 struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
456 int ret, cpu;
457
458 cpu = lib_ring_buffer_get_cpu(&client_config);
459 if (cpu < 0)
460 return -EPERM;
461 ctx->cpu = cpu;
462
463 switch (lttng_chan->header_type) {
464 case 1: /* compact */
465 if (event_id > 30)
466 ctx->rflags |= LTTNG_RFLAG_EXTENDED;
467 break;
468 case 2: /* large */
469 if (event_id > 65534)
470 ctx->rflags |= LTTNG_RFLAG_EXTENDED;
471 break;
472 default:
473 WARN_ON_ONCE(1);
474 }
475
476 ret = lib_ring_buffer_reserve(&client_config, ctx);
477 if (ret)
478 goto put;
479 lttng_write_event_header(&client_config, ctx, event_id);
480 return 0;
481 put:
482 lib_ring_buffer_put_cpu(&client_config);
483 return ret;
484 }
485
486 static
487 void lttng_event_commit(struct lib_ring_buffer_ctx *ctx)
488 {
489 lib_ring_buffer_commit(&client_config, ctx);
490 lib_ring_buffer_put_cpu(&client_config);
491 }
492
493 static
494 void lttng_event_write(struct lib_ring_buffer_ctx *ctx, const void *src,
495 size_t len)
496 {
497 lib_ring_buffer_write(&client_config, ctx, src, len);
498 }
499
500 static
501 void lttng_event_write_from_user(struct lib_ring_buffer_ctx *ctx,
502 const void __user *src, size_t len)
503 {
504 lib_ring_buffer_copy_from_user(&client_config, ctx, src, len);
505 }
506
507 static
508 void lttng_event_memset(struct lib_ring_buffer_ctx *ctx,
509 int c, size_t len)
510 {
511 lib_ring_buffer_memset(&client_config, ctx, c, len);
512 }
513
514 static
515 wait_queue_head_t *lttng_get_writer_buf_wait_queue(struct channel *chan, int cpu)
516 {
517 struct lib_ring_buffer *buf = channel_get_ring_buffer(&client_config,
518 chan, cpu);
519 return &buf->write_wait;
520 }
521
522 static
523 wait_queue_head_t *lttng_get_hp_wait_queue(struct channel *chan)
524 {
525 return &chan->hp_wait;
526 }
527
528 static
529 int lttng_is_finalized(struct channel *chan)
530 {
531 return lib_ring_buffer_channel_is_finalized(chan);
532 }
533
534 static
535 int lttng_is_disabled(struct channel *chan)
536 {
537 return lib_ring_buffer_channel_is_disabled(chan);
538 }
539
540 static struct lttng_transport lttng_relay_transport = {
541 .name = "relay-" RING_BUFFER_MODE_TEMPLATE_STRING,
542 .owner = THIS_MODULE,
543 .ops = {
544 .channel_create = _channel_create,
545 .channel_destroy = lttng_channel_destroy,
546 .buffer_read_open = lttng_buffer_read_open,
547 .buffer_has_read_closed_stream =
548 lttng_buffer_has_read_closed_stream,
549 .buffer_read_close = lttng_buffer_read_close,
550 .event_reserve = lttng_event_reserve,
551 .event_commit = lttng_event_commit,
552 .event_write = lttng_event_write,
553 .event_write_from_user = lttng_event_write_from_user,
554 .event_memset = lttng_event_memset,
555 .packet_avail_size = NULL, /* Would be racy anyway */
556 .get_writer_buf_wait_queue = lttng_get_writer_buf_wait_queue,
557 .get_hp_wait_queue = lttng_get_hp_wait_queue,
558 .is_finalized = lttng_is_finalized,
559 .is_disabled = lttng_is_disabled,
560 },
561 };
562
563 static int __init lttng_ring_buffer_client_init(void)
564 {
565 /*
566 * This vmalloc sync all also takes care of the lib ring buffer
567 * vmalloc'd module pages when it is built as a module into LTTng.
568 */
569 wrapper_vmalloc_sync_all();
570 lttng_transport_register(&lttng_relay_transport);
571 return 0;
572 }
573
574 module_init(lttng_ring_buffer_client_init);
575
576 static void __exit lttng_ring_buffer_client_exit(void)
577 {
578 lttng_transport_unregister(&lttng_relay_transport);
579 }
580
581 module_exit(lttng_ring_buffer_client_exit);
582
583 MODULE_LICENSE("GPL and additional rights");
584 MODULE_AUTHOR("Mathieu Desnoyers");
585 MODULE_DESCRIPTION("LTTng ring buffer " RING_BUFFER_MODE_TEMPLATE_STRING
586 " client");
This page took 0.041928 seconds and 5 git commands to generate.