Revert "Rename uuid.h wrapper to lttng-ust-uuid.h"
[lttng-ust.git] / liblttng-ust / ltt-ring-buffer-client.h
1 /*
2 * ltt-ring-buffer-client.h
3 *
4 * Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * LTTng lib ring buffer client template.
7 *
8 * Dual LGPL v2.1/GPL v2 license.
9 */
10
11 #include <stdint.h>
12 #include <lttng/ust-events.h>
13 #include "lttng/bitfield.h"
14 #include "clock.h"
15 #include "uuid.h"
16 #include "ltt-tracer.h"
17 #include "../libringbuffer/frontend_types.h"
18
19 #define LTTNG_COMPACT_EVENT_BITS 5
20 #define LTTNG_COMPACT_TSC_BITS 27
21
22 /*
23 * Keep the natural field alignment for _each field_ within this structure if
24 * you ever add/remove a field from this header. Packed attribute is not used
25 * because gcc generates poor code on at least powerpc and mips. Don't ever
26 * let gcc add padding between the structure elements.
27 */
28
29 struct packet_header {
30 /* Trace packet header */
31 uint32_t magic; /*
32 * Trace magic number.
33 * contains endianness information.
34 */
35 uint8_t uuid[LTTNG_UST_UUID_LEN];
36 uint32_t stream_id;
37
38 struct {
39 /* Stream packet context */
40 uint64_t timestamp_begin; /* Cycle count at subbuffer start */
41 uint64_t timestamp_end; /* Cycle count at subbuffer end */
42 uint32_t events_discarded; /*
43 * Events lost in this subbuffer since
44 * the beginning of the trace.
45 * (may overflow)
46 */
47 uint32_t content_size; /* Size of data in subbuffer */
48 uint32_t packet_size; /* Subbuffer size (include padding) */
49 uint32_t cpu_id; /* CPU id associated with stream */
50 uint8_t header_end; /* End of header */
51 } ctx;
52 };
53
54
55 static inline uint64_t lib_ring_buffer_clock_read(struct channel *chan)
56 {
57 return trace_clock_read64();
58 }
59
60 static inline
61 size_t ctx_get_size(size_t offset, struct lttng_ctx *ctx)
62 {
63 int i;
64 size_t orig_offset = offset;
65
66 if (caa_likely(!ctx))
67 return 0;
68 for (i = 0; i < ctx->nr_fields; i++)
69 offset += ctx->fields[i].get_size(offset);
70 return offset - orig_offset;
71 }
72
73 static inline
74 void ctx_record(struct lttng_ust_lib_ring_buffer_ctx *bufctx,
75 struct ltt_channel *chan,
76 struct lttng_ctx *ctx)
77 {
78 int i;
79
80 if (caa_likely(!ctx))
81 return;
82 for (i = 0; i < ctx->nr_fields; i++)
83 ctx->fields[i].record(&ctx->fields[i], bufctx, chan);
84 }
85
86 /*
87 * record_header_size - Calculate the header size and padding necessary.
88 * @config: ring buffer instance configuration
89 * @chan: channel
90 * @offset: offset in the write buffer
91 * @pre_header_padding: padding to add before the header (output)
92 * @ctx: reservation context
93 *
94 * Returns the event header size (including padding).
95 *
96 * The payload must itself determine its own alignment from the biggest type it
97 * contains.
98 */
99 static __inline__
100 unsigned char record_header_size(const struct lttng_ust_lib_ring_buffer_config *config,
101 struct channel *chan, size_t offset,
102 size_t *pre_header_padding,
103 struct lttng_ust_lib_ring_buffer_ctx *ctx)
104 {
105 struct ltt_channel *ltt_chan = channel_get_private(chan);
106 struct ltt_event *event = ctx->priv;
107 size_t orig_offset = offset;
108 size_t padding;
109
110 switch (ltt_chan->header_type) {
111 case 1: /* compact */
112 padding = lib_ring_buffer_align(offset, lttng_alignof(uint32_t));
113 offset += padding;
114 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) {
115 offset += sizeof(uint32_t); /* id and timestamp */
116 } else {
117 /* Minimum space taken by LTTNG_COMPACT_EVENT_BITS id */
118 offset += (LTTNG_COMPACT_EVENT_BITS + CHAR_BIT - 1) / CHAR_BIT;
119 /* Align extended struct on largest member */
120 offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
121 offset += sizeof(uint32_t); /* id */
122 offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
123 offset += sizeof(uint64_t); /* timestamp */
124 }
125 break;
126 case 2: /* large */
127 padding = lib_ring_buffer_align(offset, lttng_alignof(uint16_t));
128 offset += padding;
129 offset += sizeof(uint16_t);
130 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) {
131 offset += lib_ring_buffer_align(offset, lttng_alignof(uint32_t));
132 offset += sizeof(uint32_t); /* timestamp */
133 } else {
134 /* Align extended struct on largest member */
135 offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
136 offset += sizeof(uint32_t); /* id */
137 offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
138 offset += sizeof(uint64_t); /* timestamp */
139 }
140 break;
141 default:
142 padding = 0;
143 WARN_ON_ONCE(1);
144 }
145 offset += ctx_get_size(offset, event->ctx);
146 offset += ctx_get_size(offset, ltt_chan->ctx);
147
148 *pre_header_padding = padding;
149 return offset - orig_offset;
150 }
151
152 #include "../libringbuffer/api.h"
153
154 static
155 void ltt_write_event_header_slow(const struct lttng_ust_lib_ring_buffer_config *config,
156 struct lttng_ust_lib_ring_buffer_ctx *ctx,
157 uint32_t event_id);
158
159 /*
160 * ltt_write_event_header
161 *
162 * Writes the event header to the offset (already aligned on 32-bits).
163 *
164 * @config: ring buffer instance configuration
165 * @ctx: reservation context
166 * @event_id: event ID
167 */
168 static __inline__
169 void ltt_write_event_header(const struct lttng_ust_lib_ring_buffer_config *config,
170 struct lttng_ust_lib_ring_buffer_ctx *ctx,
171 uint32_t event_id)
172 {
173 struct ltt_channel *ltt_chan = channel_get_private(ctx->chan);
174 struct ltt_event *event = ctx->priv;
175
176 if (caa_unlikely(ctx->rflags))
177 goto slow_path;
178
179 switch (ltt_chan->header_type) {
180 case 1: /* compact */
181 {
182 uint32_t id_time = 0;
183
184 bt_bitfield_write(&id_time, uint32_t,
185 0,
186 LTTNG_COMPACT_EVENT_BITS,
187 event_id);
188 bt_bitfield_write(&id_time, uint32_t,
189 LTTNG_COMPACT_EVENT_BITS,
190 LTTNG_COMPACT_TSC_BITS,
191 ctx->tsc);
192 lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
193 break;
194 }
195 case 2: /* large */
196 {
197 uint32_t timestamp = (uint32_t) ctx->tsc;
198 uint16_t id = event_id;
199
200 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
201 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint32_t));
202 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
203 break;
204 }
205 default:
206 WARN_ON_ONCE(1);
207 }
208
209 ctx_record(ctx, ltt_chan, ltt_chan->ctx);
210 ctx_record(ctx, ltt_chan, event->ctx);
211 lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
212
213 return;
214
215 slow_path:
216 ltt_write_event_header_slow(config, ctx, event_id);
217 }
218
219 static
220 void ltt_write_event_header_slow(const struct lttng_ust_lib_ring_buffer_config *config,
221 struct lttng_ust_lib_ring_buffer_ctx *ctx,
222 uint32_t event_id)
223 {
224 struct ltt_channel *ltt_chan = channel_get_private(ctx->chan);
225 struct ltt_event *event = ctx->priv;
226
227 switch (ltt_chan->header_type) {
228 case 1: /* compact */
229 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) {
230 uint32_t id_time = 0;
231
232 bt_bitfield_write(&id_time, uint32_t,
233 0,
234 LTTNG_COMPACT_EVENT_BITS,
235 event_id);
236 bt_bitfield_write(&id_time, uint32_t,
237 LTTNG_COMPACT_EVENT_BITS,
238 LTTNG_COMPACT_TSC_BITS,
239 ctx->tsc);
240 lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
241 } else {
242 uint8_t id = 0;
243 uint64_t timestamp = ctx->tsc;
244
245 bt_bitfield_write(&id, uint8_t,
246 0,
247 LTTNG_COMPACT_EVENT_BITS,
248 31);
249 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
250 /* Align extended struct on largest member */
251 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
252 lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
253 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
254 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
255 }
256 break;
257 case 2: /* large */
258 {
259 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) {
260 uint32_t timestamp = (uint32_t) ctx->tsc;
261 uint16_t id = event_id;
262
263 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
264 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint32_t));
265 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
266 } else {
267 uint16_t id = 65535;
268 uint64_t timestamp = ctx->tsc;
269
270 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
271 /* Align extended struct on largest member */
272 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
273 lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
274 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
275 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
276 }
277 break;
278 }
279 default:
280 WARN_ON_ONCE(1);
281 }
282 ctx_record(ctx, ltt_chan, ltt_chan->ctx);
283 ctx_record(ctx, ltt_chan, event->ctx);
284 lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
285 }
286
287 static const struct lttng_ust_lib_ring_buffer_config client_config;
288
289 static uint64_t client_ring_buffer_clock_read(struct channel *chan)
290 {
291 return lib_ring_buffer_clock_read(chan);
292 }
293
294 static
295 size_t client_record_header_size(const struct lttng_ust_lib_ring_buffer_config *config,
296 struct channel *chan, size_t offset,
297 size_t *pre_header_padding,
298 struct lttng_ust_lib_ring_buffer_ctx *ctx)
299 {
300 return record_header_size(config, chan, offset,
301 pre_header_padding, ctx);
302 }
303
304 /**
305 * client_packet_header_size - called on buffer-switch to a new sub-buffer
306 *
307 * Return header size without padding after the structure. Don't use packed
308 * structure because gcc generates inefficient code on some architectures
309 * (powerpc, mips..)
310 */
311 static size_t client_packet_header_size(void)
312 {
313 return offsetof(struct packet_header, ctx.header_end);
314 }
315
316 static void client_buffer_begin(struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc,
317 unsigned int subbuf_idx,
318 struct lttng_ust_shm_handle *handle)
319 {
320 struct channel *chan = shmp(handle, buf->backend.chan);
321 struct packet_header *header =
322 (struct packet_header *)
323 lib_ring_buffer_offset_address(&buf->backend,
324 subbuf_idx * chan->backend.subbuf_size,
325 handle);
326 struct ltt_channel *ltt_chan = channel_get_private(chan);
327
328 header->magic = CTF_MAGIC_NUMBER;
329 memcpy(header->uuid, ltt_chan->uuid, sizeof(ltt_chan->uuid));
330 header->stream_id = ltt_chan->id;
331 header->ctx.timestamp_begin = tsc;
332 header->ctx.timestamp_end = 0;
333 header->ctx.events_discarded = 0;
334 header->ctx.content_size = 0xFFFFFFFF; /* for debugging */
335 header->ctx.packet_size = 0xFFFFFFFF;
336 header->ctx.cpu_id = buf->backend.cpu;
337 }
338
339 /*
340 * offset is assumed to never be 0 here : never deliver a completely empty
341 * subbuffer. data_size is between 1 and subbuf_size.
342 */
343 static void client_buffer_end(struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc,
344 unsigned int subbuf_idx, unsigned long data_size,
345 struct lttng_ust_shm_handle *handle)
346 {
347 struct channel *chan = shmp(handle, buf->backend.chan);
348 struct packet_header *header =
349 (struct packet_header *)
350 lib_ring_buffer_offset_address(&buf->backend,
351 subbuf_idx * chan->backend.subbuf_size,
352 handle);
353 unsigned long records_lost = 0;
354
355 header->ctx.timestamp_end = tsc;
356 header->ctx.content_size = data_size * CHAR_BIT; /* in bits */
357 header->ctx.packet_size = PAGE_ALIGN(data_size) * CHAR_BIT; /* in bits */
358 /*
359 * We do not care about the records lost count, because the metadata
360 * channel waits and retry.
361 */
362 (void) lib_ring_buffer_get_records_lost_full(&client_config, buf);
363 records_lost += lib_ring_buffer_get_records_lost_wrap(&client_config, buf);
364 records_lost += lib_ring_buffer_get_records_lost_big(&client_config, buf);
365 header->ctx.events_discarded = records_lost;
366 }
367
368 static int client_buffer_create(struct lttng_ust_lib_ring_buffer *buf, void *priv,
369 int cpu, const char *name, struct lttng_ust_shm_handle *handle)
370 {
371 return 0;
372 }
373
374 static void client_buffer_finalize(struct lttng_ust_lib_ring_buffer *buf, void *priv, int cpu, struct lttng_ust_shm_handle *handle)
375 {
376 }
377
378 static const struct lttng_ust_lib_ring_buffer_config client_config = {
379 .cb.ring_buffer_clock_read = client_ring_buffer_clock_read,
380 .cb.record_header_size = client_record_header_size,
381 .cb.subbuffer_header_size = client_packet_header_size,
382 .cb.buffer_begin = client_buffer_begin,
383 .cb.buffer_end = client_buffer_end,
384 .cb.buffer_create = client_buffer_create,
385 .cb.buffer_finalize = client_buffer_finalize,
386
387 .tsc_bits = LTTNG_COMPACT_TSC_BITS,
388 .alloc = RING_BUFFER_ALLOC_PER_CPU,
389 .sync = RING_BUFFER_SYNC_GLOBAL,
390 .mode = RING_BUFFER_MODE_TEMPLATE,
391 .backend = RING_BUFFER_PAGE,
392 .output = RING_BUFFER_MMAP,
393 .oops = RING_BUFFER_OOPS_CONSISTENCY,
394 .ipi = RING_BUFFER_NO_IPI_BARRIER,
395 .wakeup = RING_BUFFER_WAKEUP_BY_WRITER,
396 .client_type = LTTNG_CLIENT_TYPE,
397 };
398
399 const struct lttng_ust_lib_ring_buffer_client_cb *LTTNG_CLIENT_CALLBACKS = &client_config.cb;
400
401 static
402 struct ltt_channel *_channel_create(const char *name,
403 void *buf_addr,
404 size_t subbuf_size, size_t num_subbuf,
405 unsigned int switch_timer_interval,
406 unsigned int read_timer_interval,
407 int **shm_fd, int **wait_fd,
408 uint64_t **memory_map_size,
409 struct ltt_channel *chan_priv_init)
410 {
411 void *priv;
412 struct ltt_channel *ltt_chan = NULL;
413 struct lttng_ust_shm_handle *handle;
414
415 handle = channel_create(&client_config, name,
416 &priv, __alignof__(*ltt_chan), sizeof(*ltt_chan),
417 chan_priv_init,
418 buf_addr, subbuf_size, num_subbuf,
419 switch_timer_interval, read_timer_interval,
420 shm_fd, wait_fd, memory_map_size);
421 if (!handle)
422 return NULL;
423 ltt_chan = priv;
424 ltt_chan->handle = handle;
425 ltt_chan->chan = shmp(ltt_chan->handle, ltt_chan->handle->chan);
426 return ltt_chan;
427 }
428
429 static
430 void ltt_channel_destroy(struct ltt_channel *ltt_chan)
431 {
432 channel_destroy(ltt_chan->chan, ltt_chan->handle, 0);
433 }
434
435 static
436 struct lttng_ust_lib_ring_buffer *ltt_buffer_read_open(struct channel *chan,
437 struct lttng_ust_shm_handle *handle,
438 int **shm_fd, int **wait_fd,
439 uint64_t **memory_map_size)
440 {
441 struct lttng_ust_lib_ring_buffer *buf;
442 int cpu;
443
444 for_each_channel_cpu(cpu, chan) {
445 buf = channel_get_ring_buffer(&client_config, chan,
446 cpu, handle, shm_fd, wait_fd,
447 memory_map_size);
448 if (!lib_ring_buffer_open_read(buf, handle, 0))
449 return buf;
450 }
451 return NULL;
452 }
453
454 static
455 void ltt_buffer_read_close(struct lttng_ust_lib_ring_buffer *buf,
456 struct lttng_ust_shm_handle *handle)
457 {
458 lib_ring_buffer_release_read(buf, handle, 0);
459 }
460
461 static
462 int ltt_event_reserve(struct lttng_ust_lib_ring_buffer_ctx *ctx,
463 uint32_t event_id)
464 {
465 struct ltt_channel *ltt_chan = channel_get_private(ctx->chan);
466 int ret, cpu;
467
468 cpu = lib_ring_buffer_get_cpu(&client_config);
469 if (cpu < 0)
470 return -EPERM;
471 ctx->cpu = cpu;
472
473 switch (ltt_chan->header_type) {
474 case 1: /* compact */
475 if (event_id > 30)
476 ctx->rflags |= LTT_RFLAG_EXTENDED;
477 break;
478 case 2: /* large */
479 if (event_id > 65534)
480 ctx->rflags |= LTT_RFLAG_EXTENDED;
481 break;
482 default:
483 WARN_ON_ONCE(1);
484 }
485
486 ret = lib_ring_buffer_reserve(&client_config, ctx);
487 if (ret)
488 goto put;
489 ltt_write_event_header(&client_config, ctx, event_id);
490 return 0;
491 put:
492 lib_ring_buffer_put_cpu(&client_config);
493 return ret;
494 }
495
496 static
497 void ltt_event_commit(struct lttng_ust_lib_ring_buffer_ctx *ctx)
498 {
499 lib_ring_buffer_commit(&client_config, ctx);
500 lib_ring_buffer_put_cpu(&client_config);
501 }
502
503 static
504 void ltt_event_write(struct lttng_ust_lib_ring_buffer_ctx *ctx, const void *src,
505 size_t len)
506 {
507 lib_ring_buffer_write(&client_config, ctx, src, len);
508 }
509
510 #if 0
511 static
512 wait_queue_head_t *ltt_get_reader_wait_queue(struct channel *chan)
513 {
514 return &chan->read_wait;
515 }
516
517 static
518 wait_queue_head_t *ltt_get_hp_wait_queue(struct channel *chan)
519 {
520 return &chan->hp_wait;
521 }
522 #endif //0
523
524 static
525 int ltt_is_finalized(struct channel *chan)
526 {
527 return lib_ring_buffer_channel_is_finalized(chan);
528 }
529
530 static
531 int ltt_is_disabled(struct channel *chan)
532 {
533 return lib_ring_buffer_channel_is_disabled(chan);
534 }
535
536 static
537 int ltt_flush_buffer(struct channel *chan, struct lttng_ust_shm_handle *handle)
538 {
539 struct lttng_ust_lib_ring_buffer *buf;
540 int cpu;
541
542 for_each_channel_cpu(cpu, chan) {
543 int *shm_fd, *wait_fd;
544 uint64_t *memory_map_size;
545
546 buf = channel_get_ring_buffer(&client_config, chan,
547 cpu, handle, &shm_fd, &wait_fd,
548 &memory_map_size);
549 lib_ring_buffer_switch(&client_config, buf,
550 SWITCH_ACTIVE, handle);
551 }
552 return 0;
553 }
554
555 static struct ltt_transport ltt_relay_transport = {
556 .name = "relay-" RING_BUFFER_MODE_TEMPLATE_STRING "-mmap",
557 .ops = {
558 .channel_create = _channel_create,
559 .channel_destroy = ltt_channel_destroy,
560 .buffer_read_open = ltt_buffer_read_open,
561 .buffer_read_close = ltt_buffer_read_close,
562 .event_reserve = ltt_event_reserve,
563 .event_commit = ltt_event_commit,
564 .event_write = ltt_event_write,
565 .packet_avail_size = NULL, /* Would be racy anyway */
566 //.get_reader_wait_queue = ltt_get_reader_wait_queue,
567 //.get_hp_wait_queue = ltt_get_hp_wait_queue,
568 .is_finalized = ltt_is_finalized,
569 .is_disabled = ltt_is_disabled,
570 .flush_buffer = ltt_flush_buffer,
571 },
572 };
573
574 void RING_BUFFER_MODE_TEMPLATE_INIT(void)
575 {
576 DBG("LTT : ltt ring buffer client init\n");
577 ltt_transport_register(&ltt_relay_transport);
578 }
579
580 void RING_BUFFER_MODE_TEMPLATE_EXIT(void)
581 {
582 DBG("LTT : ltt ring buffer client exit\n");
583 ltt_transport_unregister(&ltt_relay_transport);
584 }
This page took 0.0415450000000001 seconds and 5 git commands to generate.