Revert "Create libuuid/libc uuid wrapper"
[lttng-ust.git] / liblttng-ust / ltt-ring-buffer-client.h
1 /*
2 * ltt-ring-buffer-client.h
3 *
4 * Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * LTTng lib ring buffer client template.
7 *
8 * Dual LGPL v2.1/GPL v2 license.
9 */
10
11 #include <stdint.h>
12 #include <lttng/ust-events.h>
13 #include "lttng/bitfield.h"
14 #include "clock.h"
15 #include "ltt-tracer.h"
16 #include "../libringbuffer/frontend_types.h"
17
18 #define LTTNG_COMPACT_EVENT_BITS 5
19 #define LTTNG_COMPACT_TSC_BITS 27
20
21 /*
22 * Keep the natural field alignment for _each field_ within this structure if
23 * you ever add/remove a field from this header. Packed attribute is not used
24 * because gcc generates poor code on at least powerpc and mips. Don't ever
25 * let gcc add padding between the structure elements.
26 */
27
28 struct packet_header {
29 /* Trace packet header */
30 uint32_t magic; /*
31 * Trace magic number.
32 * contains endianness information.
33 */
34 uint8_t uuid[16];
35 uint32_t stream_id;
36
37 struct {
38 /* Stream packet context */
39 uint64_t timestamp_begin; /* Cycle count at subbuffer start */
40 uint64_t timestamp_end; /* Cycle count at subbuffer end */
41 uint32_t events_discarded; /*
42 * Events lost in this subbuffer since
43 * the beginning of the trace.
44 * (may overflow)
45 */
46 uint32_t content_size; /* Size of data in subbuffer */
47 uint32_t packet_size; /* Subbuffer size (include padding) */
48 uint32_t cpu_id; /* CPU id associated with stream */
49 uint8_t header_end; /* End of header */
50 } ctx;
51 };
52
53
54 static inline uint64_t lib_ring_buffer_clock_read(struct channel *chan)
55 {
56 return trace_clock_read64();
57 }
58
59 static inline
60 size_t ctx_get_size(size_t offset, struct lttng_ctx *ctx)
61 {
62 int i;
63 size_t orig_offset = offset;
64
65 if (caa_likely(!ctx))
66 return 0;
67 for (i = 0; i < ctx->nr_fields; i++)
68 offset += ctx->fields[i].get_size(offset);
69 return offset - orig_offset;
70 }
71
72 static inline
73 void ctx_record(struct lttng_ust_lib_ring_buffer_ctx *bufctx,
74 struct ltt_channel *chan,
75 struct lttng_ctx *ctx)
76 {
77 int i;
78
79 if (caa_likely(!ctx))
80 return;
81 for (i = 0; i < ctx->nr_fields; i++)
82 ctx->fields[i].record(&ctx->fields[i], bufctx, chan);
83 }
84
85 /*
86 * record_header_size - Calculate the header size and padding necessary.
87 * @config: ring buffer instance configuration
88 * @chan: channel
89 * @offset: offset in the write buffer
90 * @pre_header_padding: padding to add before the header (output)
91 * @ctx: reservation context
92 *
93 * Returns the event header size (including padding).
94 *
95 * The payload must itself determine its own alignment from the biggest type it
96 * contains.
97 */
98 static __inline__
99 unsigned char record_header_size(const struct lttng_ust_lib_ring_buffer_config *config,
100 struct channel *chan, size_t offset,
101 size_t *pre_header_padding,
102 struct lttng_ust_lib_ring_buffer_ctx *ctx)
103 {
104 struct ltt_channel *ltt_chan = channel_get_private(chan);
105 struct ltt_event *event = ctx->priv;
106 size_t orig_offset = offset;
107 size_t padding;
108
109 switch (ltt_chan->header_type) {
110 case 1: /* compact */
111 padding = lib_ring_buffer_align(offset, lttng_alignof(uint32_t));
112 offset += padding;
113 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) {
114 offset += sizeof(uint32_t); /* id and timestamp */
115 } else {
116 /* Minimum space taken by LTTNG_COMPACT_EVENT_BITS id */
117 offset += (LTTNG_COMPACT_EVENT_BITS + CHAR_BIT - 1) / CHAR_BIT;
118 /* Align extended struct on largest member */
119 offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
120 offset += sizeof(uint32_t); /* id */
121 offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
122 offset += sizeof(uint64_t); /* timestamp */
123 }
124 break;
125 case 2: /* large */
126 padding = lib_ring_buffer_align(offset, lttng_alignof(uint16_t));
127 offset += padding;
128 offset += sizeof(uint16_t);
129 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) {
130 offset += lib_ring_buffer_align(offset, lttng_alignof(uint32_t));
131 offset += sizeof(uint32_t); /* timestamp */
132 } else {
133 /* Align extended struct on largest member */
134 offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
135 offset += sizeof(uint32_t); /* id */
136 offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
137 offset += sizeof(uint64_t); /* timestamp */
138 }
139 break;
140 default:
141 padding = 0;
142 WARN_ON_ONCE(1);
143 }
144 offset += ctx_get_size(offset, event->ctx);
145 offset += ctx_get_size(offset, ltt_chan->ctx);
146
147 *pre_header_padding = padding;
148 return offset - orig_offset;
149 }
150
151 #include "../libringbuffer/api.h"
152
153 static
154 void ltt_write_event_header_slow(const struct lttng_ust_lib_ring_buffer_config *config,
155 struct lttng_ust_lib_ring_buffer_ctx *ctx,
156 uint32_t event_id);
157
158 /*
159 * ltt_write_event_header
160 *
161 * Writes the event header to the offset (already aligned on 32-bits).
162 *
163 * @config: ring buffer instance configuration
164 * @ctx: reservation context
165 * @event_id: event ID
166 */
167 static __inline__
168 void ltt_write_event_header(const struct lttng_ust_lib_ring_buffer_config *config,
169 struct lttng_ust_lib_ring_buffer_ctx *ctx,
170 uint32_t event_id)
171 {
172 struct ltt_channel *ltt_chan = channel_get_private(ctx->chan);
173 struct ltt_event *event = ctx->priv;
174
175 if (caa_unlikely(ctx->rflags))
176 goto slow_path;
177
178 switch (ltt_chan->header_type) {
179 case 1: /* compact */
180 {
181 uint32_t id_time = 0;
182
183 bt_bitfield_write(&id_time, uint32_t,
184 0,
185 LTTNG_COMPACT_EVENT_BITS,
186 event_id);
187 bt_bitfield_write(&id_time, uint32_t,
188 LTTNG_COMPACT_EVENT_BITS,
189 LTTNG_COMPACT_TSC_BITS,
190 ctx->tsc);
191 lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
192 break;
193 }
194 case 2: /* large */
195 {
196 uint32_t timestamp = (uint32_t) ctx->tsc;
197 uint16_t id = event_id;
198
199 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
200 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint32_t));
201 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
202 break;
203 }
204 default:
205 WARN_ON_ONCE(1);
206 }
207
208 ctx_record(ctx, ltt_chan, ltt_chan->ctx);
209 ctx_record(ctx, ltt_chan, event->ctx);
210 lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
211
212 return;
213
214 slow_path:
215 ltt_write_event_header_slow(config, ctx, event_id);
216 }
217
218 static
219 void ltt_write_event_header_slow(const struct lttng_ust_lib_ring_buffer_config *config,
220 struct lttng_ust_lib_ring_buffer_ctx *ctx,
221 uint32_t event_id)
222 {
223 struct ltt_channel *ltt_chan = channel_get_private(ctx->chan);
224 struct ltt_event *event = ctx->priv;
225
226 switch (ltt_chan->header_type) {
227 case 1: /* compact */
228 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) {
229 uint32_t id_time = 0;
230
231 bt_bitfield_write(&id_time, uint32_t,
232 0,
233 LTTNG_COMPACT_EVENT_BITS,
234 event_id);
235 bt_bitfield_write(&id_time, uint32_t,
236 LTTNG_COMPACT_EVENT_BITS,
237 LTTNG_COMPACT_TSC_BITS,
238 ctx->tsc);
239 lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
240 } else {
241 uint8_t id = 0;
242 uint64_t timestamp = ctx->tsc;
243
244 bt_bitfield_write(&id, uint8_t,
245 0,
246 LTTNG_COMPACT_EVENT_BITS,
247 31);
248 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
249 /* Align extended struct on largest member */
250 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
251 lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
252 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
253 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
254 }
255 break;
256 case 2: /* large */
257 {
258 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) {
259 uint32_t timestamp = (uint32_t) ctx->tsc;
260 uint16_t id = event_id;
261
262 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
263 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint32_t));
264 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
265 } else {
266 uint16_t id = 65535;
267 uint64_t timestamp = ctx->tsc;
268
269 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
270 /* Align extended struct on largest member */
271 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
272 lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
273 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
274 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
275 }
276 break;
277 }
278 default:
279 WARN_ON_ONCE(1);
280 }
281 ctx_record(ctx, ltt_chan, ltt_chan->ctx);
282 ctx_record(ctx, ltt_chan, event->ctx);
283 lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
284 }
285
286 static const struct lttng_ust_lib_ring_buffer_config client_config;
287
288 static uint64_t client_ring_buffer_clock_read(struct channel *chan)
289 {
290 return lib_ring_buffer_clock_read(chan);
291 }
292
293 static
294 size_t client_record_header_size(const struct lttng_ust_lib_ring_buffer_config *config,
295 struct channel *chan, size_t offset,
296 size_t *pre_header_padding,
297 struct lttng_ust_lib_ring_buffer_ctx *ctx)
298 {
299 return record_header_size(config, chan, offset,
300 pre_header_padding, ctx);
301 }
302
303 /**
304 * client_packet_header_size - called on buffer-switch to a new sub-buffer
305 *
306 * Return header size without padding after the structure. Don't use packed
307 * structure because gcc generates inefficient code on some architectures
308 * (powerpc, mips..)
309 */
310 static size_t client_packet_header_size(void)
311 {
312 return offsetof(struct packet_header, ctx.header_end);
313 }
314
315 static void client_buffer_begin(struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc,
316 unsigned int subbuf_idx,
317 struct lttng_ust_shm_handle *handle)
318 {
319 struct channel *chan = shmp(handle, buf->backend.chan);
320 struct packet_header *header =
321 (struct packet_header *)
322 lib_ring_buffer_offset_address(&buf->backend,
323 subbuf_idx * chan->backend.subbuf_size,
324 handle);
325 struct ltt_channel *ltt_chan = channel_get_private(chan);
326
327 header->magic = CTF_MAGIC_NUMBER;
328 memcpy(header->uuid, ltt_chan->uuid, sizeof(ltt_chan->uuid));
329 header->stream_id = ltt_chan->id;
330 header->ctx.timestamp_begin = tsc;
331 header->ctx.timestamp_end = 0;
332 header->ctx.events_discarded = 0;
333 header->ctx.content_size = 0xFFFFFFFF; /* for debugging */
334 header->ctx.packet_size = 0xFFFFFFFF;
335 header->ctx.cpu_id = buf->backend.cpu;
336 }
337
338 /*
339 * offset is assumed to never be 0 here : never deliver a completely empty
340 * subbuffer. data_size is between 1 and subbuf_size.
341 */
342 static void client_buffer_end(struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc,
343 unsigned int subbuf_idx, unsigned long data_size,
344 struct lttng_ust_shm_handle *handle)
345 {
346 struct channel *chan = shmp(handle, buf->backend.chan);
347 struct packet_header *header =
348 (struct packet_header *)
349 lib_ring_buffer_offset_address(&buf->backend,
350 subbuf_idx * chan->backend.subbuf_size,
351 handle);
352 unsigned long records_lost = 0;
353
354 header->ctx.timestamp_end = tsc;
355 header->ctx.content_size = data_size * CHAR_BIT; /* in bits */
356 header->ctx.packet_size = PAGE_ALIGN(data_size) * CHAR_BIT; /* in bits */
357 /*
358 * We do not care about the records lost count, because the metadata
359 * channel waits and retry.
360 */
361 (void) lib_ring_buffer_get_records_lost_full(&client_config, buf);
362 records_lost += lib_ring_buffer_get_records_lost_wrap(&client_config, buf);
363 records_lost += lib_ring_buffer_get_records_lost_big(&client_config, buf);
364 header->ctx.events_discarded = records_lost;
365 }
366
367 static int client_buffer_create(struct lttng_ust_lib_ring_buffer *buf, void *priv,
368 int cpu, const char *name, struct lttng_ust_shm_handle *handle)
369 {
370 return 0;
371 }
372
373 static void client_buffer_finalize(struct lttng_ust_lib_ring_buffer *buf, void *priv, int cpu, struct lttng_ust_shm_handle *handle)
374 {
375 }
376
377 static const struct lttng_ust_lib_ring_buffer_config client_config = {
378 .cb.ring_buffer_clock_read = client_ring_buffer_clock_read,
379 .cb.record_header_size = client_record_header_size,
380 .cb.subbuffer_header_size = client_packet_header_size,
381 .cb.buffer_begin = client_buffer_begin,
382 .cb.buffer_end = client_buffer_end,
383 .cb.buffer_create = client_buffer_create,
384 .cb.buffer_finalize = client_buffer_finalize,
385
386 .tsc_bits = LTTNG_COMPACT_TSC_BITS,
387 .alloc = RING_BUFFER_ALLOC_PER_CPU,
388 .sync = RING_BUFFER_SYNC_GLOBAL,
389 .mode = RING_BUFFER_MODE_TEMPLATE,
390 .backend = RING_BUFFER_PAGE,
391 .output = RING_BUFFER_MMAP,
392 .oops = RING_BUFFER_OOPS_CONSISTENCY,
393 .ipi = RING_BUFFER_NO_IPI_BARRIER,
394 .wakeup = RING_BUFFER_WAKEUP_BY_WRITER,
395 .client_type = LTTNG_CLIENT_TYPE,
396 };
397
398 const struct lttng_ust_lib_ring_buffer_client_cb *LTTNG_CLIENT_CALLBACKS = &client_config.cb;
399
400 static
401 struct ltt_channel *_channel_create(const char *name,
402 void *buf_addr,
403 size_t subbuf_size, size_t num_subbuf,
404 unsigned int switch_timer_interval,
405 unsigned int read_timer_interval,
406 int **shm_fd, int **wait_fd,
407 uint64_t **memory_map_size,
408 struct ltt_channel *chan_priv_init)
409 {
410 void *priv;
411 struct ltt_channel *ltt_chan = NULL;
412 struct lttng_ust_shm_handle *handle;
413
414 handle = channel_create(&client_config, name,
415 &priv, __alignof__(*ltt_chan), sizeof(*ltt_chan),
416 chan_priv_init,
417 buf_addr, subbuf_size, num_subbuf,
418 switch_timer_interval, read_timer_interval,
419 shm_fd, wait_fd, memory_map_size);
420 if (!handle)
421 return NULL;
422 ltt_chan = priv;
423 ltt_chan->handle = handle;
424 ltt_chan->chan = shmp(ltt_chan->handle, ltt_chan->handle->chan);
425 return ltt_chan;
426 }
427
428 static
429 void ltt_channel_destroy(struct ltt_channel *ltt_chan)
430 {
431 channel_destroy(ltt_chan->chan, ltt_chan->handle, 0);
432 }
433
434 static
435 struct lttng_ust_lib_ring_buffer *ltt_buffer_read_open(struct channel *chan,
436 struct lttng_ust_shm_handle *handle,
437 int **shm_fd, int **wait_fd,
438 uint64_t **memory_map_size)
439 {
440 struct lttng_ust_lib_ring_buffer *buf;
441 int cpu;
442
443 for_each_channel_cpu(cpu, chan) {
444 buf = channel_get_ring_buffer(&client_config, chan,
445 cpu, handle, shm_fd, wait_fd,
446 memory_map_size);
447 if (!lib_ring_buffer_open_read(buf, handle, 0))
448 return buf;
449 }
450 return NULL;
451 }
452
453 static
454 void ltt_buffer_read_close(struct lttng_ust_lib_ring_buffer *buf,
455 struct lttng_ust_shm_handle *handle)
456 {
457 lib_ring_buffer_release_read(buf, handle, 0);
458 }
459
460 static
461 int ltt_event_reserve(struct lttng_ust_lib_ring_buffer_ctx *ctx,
462 uint32_t event_id)
463 {
464 struct ltt_channel *ltt_chan = channel_get_private(ctx->chan);
465 int ret, cpu;
466
467 cpu = lib_ring_buffer_get_cpu(&client_config);
468 if (cpu < 0)
469 return -EPERM;
470 ctx->cpu = cpu;
471
472 switch (ltt_chan->header_type) {
473 case 1: /* compact */
474 if (event_id > 30)
475 ctx->rflags |= LTT_RFLAG_EXTENDED;
476 break;
477 case 2: /* large */
478 if (event_id > 65534)
479 ctx->rflags |= LTT_RFLAG_EXTENDED;
480 break;
481 default:
482 WARN_ON_ONCE(1);
483 }
484
485 ret = lib_ring_buffer_reserve(&client_config, ctx);
486 if (ret)
487 goto put;
488 ltt_write_event_header(&client_config, ctx, event_id);
489 return 0;
490 put:
491 lib_ring_buffer_put_cpu(&client_config);
492 return ret;
493 }
494
495 static
496 void ltt_event_commit(struct lttng_ust_lib_ring_buffer_ctx *ctx)
497 {
498 lib_ring_buffer_commit(&client_config, ctx);
499 lib_ring_buffer_put_cpu(&client_config);
500 }
501
502 static
503 void ltt_event_write(struct lttng_ust_lib_ring_buffer_ctx *ctx, const void *src,
504 size_t len)
505 {
506 lib_ring_buffer_write(&client_config, ctx, src, len);
507 }
508
509 #if 0
510 static
511 wait_queue_head_t *ltt_get_reader_wait_queue(struct channel *chan)
512 {
513 return &chan->read_wait;
514 }
515
516 static
517 wait_queue_head_t *ltt_get_hp_wait_queue(struct channel *chan)
518 {
519 return &chan->hp_wait;
520 }
521 #endif //0
522
523 static
524 int ltt_is_finalized(struct channel *chan)
525 {
526 return lib_ring_buffer_channel_is_finalized(chan);
527 }
528
529 static
530 int ltt_is_disabled(struct channel *chan)
531 {
532 return lib_ring_buffer_channel_is_disabled(chan);
533 }
534
535 static
536 int ltt_flush_buffer(struct channel *chan, struct lttng_ust_shm_handle *handle)
537 {
538 struct lttng_ust_lib_ring_buffer *buf;
539 int cpu;
540
541 for_each_channel_cpu(cpu, chan) {
542 int *shm_fd, *wait_fd;
543 uint64_t *memory_map_size;
544
545 buf = channel_get_ring_buffer(&client_config, chan,
546 cpu, handle, &shm_fd, &wait_fd,
547 &memory_map_size);
548 lib_ring_buffer_switch(&client_config, buf,
549 SWITCH_ACTIVE, handle);
550 }
551 return 0;
552 }
553
554 static struct ltt_transport ltt_relay_transport = {
555 .name = "relay-" RING_BUFFER_MODE_TEMPLATE_STRING "-mmap",
556 .ops = {
557 .channel_create = _channel_create,
558 .channel_destroy = ltt_channel_destroy,
559 .buffer_read_open = ltt_buffer_read_open,
560 .buffer_read_close = ltt_buffer_read_close,
561 .event_reserve = ltt_event_reserve,
562 .event_commit = ltt_event_commit,
563 .event_write = ltt_event_write,
564 .packet_avail_size = NULL, /* Would be racy anyway */
565 //.get_reader_wait_queue = ltt_get_reader_wait_queue,
566 //.get_hp_wait_queue = ltt_get_hp_wait_queue,
567 .is_finalized = ltt_is_finalized,
568 .is_disabled = ltt_is_disabled,
569 .flush_buffer = ltt_flush_buffer,
570 },
571 };
572
573 void RING_BUFFER_MODE_TEMPLATE_INIT(void)
574 {
575 DBG("LTT : ltt ring buffer client init\n");
576 ltt_transport_register(&ltt_relay_transport);
577 }
578
579 void RING_BUFFER_MODE_TEMPLATE_EXIT(void)
580 {
581 DBG("LTT : ltt ring buffer client exit\n");
582 ltt_transport_unregister(&ltt_relay_transport);
583 }
This page took 0.052351 seconds and 5 git commands to generate.