lttng tracepoint event: don't clash with "ctx" argument name
[lttng-ust.git] / libust / ltt-ring-buffer-client.h
CommitLineData
3d1fc7fd
MD
1/*
2 * ltt-ring-buffer-client.h
3 *
4 * Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * LTTng lib ring buffer client template.
7 *
8 * Dual LGPL v2.1/GPL v2 license.
9 */
10
9f3fdbc6 11#include <stdint.h>
1ea172a1 12#include <ust/lttng-events.h>
9f3fdbc6
MD
13#include "ust/bitfield.h"
14#include "ust/clock.h"
3d1fc7fd 15#include "ltt-tracer.h"
9f3fdbc6 16#include "../libringbuffer/frontend_types.h"
3d1fc7fd
MD
17
18/*
19 * Keep the natural field alignment for _each field_ within this structure if
20 * you ever add/remove a field from this header. Packed attribute is not used
21 * because gcc generates poor code on at least powerpc and mips. Don't ever
22 * let gcc add padding between the structure elements.
23 */
24
25struct packet_header {
26 /* Trace packet header */
27 uint32_t magic; /*
28 * Trace magic number.
29 * contains endianness information.
30 */
31 uint8_t uuid[16];
32 uint32_t stream_id;
33
34 struct {
35 /* Stream packet context */
36 uint64_t timestamp_begin; /* Cycle count at subbuffer start */
37 uint64_t timestamp_end; /* Cycle count at subbuffer end */
38 uint32_t events_discarded; /*
39 * Events lost in this subbuffer since
40 * the beginning of the trace.
41 * (may overflow)
42 */
43 uint32_t content_size; /* Size of data in subbuffer */
44 uint32_t packet_size; /* Subbuffer size (include padding) */
45 uint32_t cpu_id; /* CPU id associated with stream */
46 uint8_t header_end; /* End of header */
47 } ctx;
48};
49
50
51static inline notrace u64 lib_ring_buffer_clock_read(struct channel *chan)
52{
53 return trace_clock_read64();
54}
55
56static inline
57size_t ctx_get_size(size_t offset, struct lttng_ctx *ctx)
58{
59 int i;
60 size_t orig_offset = offset;
61
62 if (likely(!ctx))
63 return 0;
64 for (i = 0; i < ctx->nr_fields; i++)
65 offset += ctx->fields[i].get_size(offset);
66 return offset - orig_offset;
67}
68
69static inline
70void ctx_record(struct lib_ring_buffer_ctx *bufctx,
71 struct ltt_channel *chan,
72 struct lttng_ctx *ctx)
73{
74 int i;
75
76 if (likely(!ctx))
77 return;
78 for (i = 0; i < ctx->nr_fields; i++)
79 ctx->fields[i].record(&ctx->fields[i], bufctx, chan);
80}
81
82/*
83 * record_header_size - Calculate the header size and padding necessary.
84 * @config: ring buffer instance configuration
85 * @chan: channel
86 * @offset: offset in the write buffer
87 * @pre_header_padding: padding to add before the header (output)
88 * @ctx: reservation context
89 *
90 * Returns the event header size (including padding).
91 *
92 * The payload must itself determine its own alignment from the biggest type it
93 * contains.
94 */
95static __inline__
96unsigned char record_header_size(const struct lib_ring_buffer_config *config,
97 struct channel *chan, size_t offset,
98 size_t *pre_header_padding,
99 struct lib_ring_buffer_ctx *ctx)
100{
101 struct ltt_channel *ltt_chan = channel_get_private(chan);
102 struct ltt_event *event = ctx->priv;
103 size_t orig_offset = offset;
104 size_t padding;
105
106 switch (ltt_chan->header_type) {
107 case 1: /* compact */
1dbfff0c 108 padding = lib_ring_buffer_align(offset, lttng_alignof(uint32_t));
3d1fc7fd
MD
109 offset += padding;
110 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) {
111 offset += sizeof(uint32_t); /* id and timestamp */
112 } else {
113 /* Minimum space taken by 5-bit id */
114 offset += sizeof(uint8_t);
115 /* Align extended struct on largest member */
1dbfff0c 116 offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
3d1fc7fd 117 offset += sizeof(uint32_t); /* id */
1dbfff0c 118 offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
3d1fc7fd
MD
119 offset += sizeof(uint64_t); /* timestamp */
120 }
121 break;
122 case 2: /* large */
1dbfff0c 123 padding = lib_ring_buffer_align(offset, lttng_alignof(uint16_t));
3d1fc7fd
MD
124 offset += padding;
125 offset += sizeof(uint16_t);
126 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) {
1dbfff0c 127 offset += lib_ring_buffer_align(offset, lttng_alignof(uint32_t));
3d1fc7fd
MD
128 offset += sizeof(uint32_t); /* timestamp */
129 } else {
130 /* Align extended struct on largest member */
1dbfff0c 131 offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
3d1fc7fd 132 offset += sizeof(uint32_t); /* id */
1dbfff0c 133 offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
3d1fc7fd
MD
134 offset += sizeof(uint64_t); /* timestamp */
135 }
136 break;
137 default:
9f3fdbc6 138 padding = 0;
3d1fc7fd
MD
139 WARN_ON_ONCE(1);
140 }
141 offset += ctx_get_size(offset, event->ctx);
142 offset += ctx_get_size(offset, ltt_chan->ctx);
143
144 *pre_header_padding = padding;
145 return offset - orig_offset;
146}
147
9f3fdbc6 148#include "../libringbuffer/api.h"
3d1fc7fd 149
9f3fdbc6 150static
3d1fc7fd
MD
151void ltt_write_event_header_slow(const struct lib_ring_buffer_config *config,
152 struct lib_ring_buffer_ctx *ctx,
153 uint32_t event_id);
154
155/*
156 * ltt_write_event_header
157 *
158 * Writes the event header to the offset (already aligned on 32-bits).
159 *
160 * @config: ring buffer instance configuration
161 * @ctx: reservation context
162 * @event_id: event ID
163 */
164static __inline__
165void ltt_write_event_header(const struct lib_ring_buffer_config *config,
166 struct lib_ring_buffer_ctx *ctx,
167 uint32_t event_id)
168{
169 struct ltt_channel *ltt_chan = channel_get_private(ctx->chan);
170 struct ltt_event *event = ctx->priv;
171
172 if (unlikely(ctx->rflags))
173 goto slow_path;
174
175 switch (ltt_chan->header_type) {
176 case 1: /* compact */
177 {
178 uint32_t id_time = 0;
179
180 bt_bitfield_write(&id_time, uint32_t, 0, 5, event_id);
181 bt_bitfield_write(&id_time, uint32_t, 5, 27, ctx->tsc);
182 lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
183 break;
184 }
185 case 2: /* large */
186 {
187 uint32_t timestamp = (uint32_t) ctx->tsc;
188 uint16_t id = event_id;
189
190 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
1dbfff0c 191 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint32_t));
3d1fc7fd
MD
192 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
193 break;
194 }
195 default:
196 WARN_ON_ONCE(1);
197 }
198
3d1fc7fd 199 ctx_record(ctx, ltt_chan, ltt_chan->ctx);
1ea172a1 200 ctx_record(ctx, ltt_chan, event->ctx);
3d1fc7fd
MD
201
202 return;
203
204slow_path:
205 ltt_write_event_header_slow(config, ctx, event_id);
206}
207
9f3fdbc6 208static
3d1fc7fd
MD
209void ltt_write_event_header_slow(const struct lib_ring_buffer_config *config,
210 struct lib_ring_buffer_ctx *ctx,
211 uint32_t event_id)
212{
213 struct ltt_channel *ltt_chan = channel_get_private(ctx->chan);
214 struct ltt_event *event = ctx->priv;
215
216 switch (ltt_chan->header_type) {
217 case 1: /* compact */
218 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) {
219 uint32_t id_time = 0;
220
221 bt_bitfield_write(&id_time, uint32_t, 0, 5, event_id);
222 bt_bitfield_write(&id_time, uint32_t, 5, 27, ctx->tsc);
223 lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
224 } else {
225 uint8_t id = 0;
226 uint64_t timestamp = ctx->tsc;
227
228 bt_bitfield_write(&id, uint8_t, 0, 5, 31);
229 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
230 /* Align extended struct on largest member */
1dbfff0c 231 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
3d1fc7fd 232 lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
1dbfff0c 233 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
3d1fc7fd
MD
234 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
235 }
236 break;
237 case 2: /* large */
238 {
239 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) {
240 uint32_t timestamp = (uint32_t) ctx->tsc;
241 uint16_t id = event_id;
242
243 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
1dbfff0c 244 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint32_t));
3d1fc7fd
MD
245 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
246 } else {
247 uint16_t id = 65535;
248 uint64_t timestamp = ctx->tsc;
249
250 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
251 /* Align extended struct on largest member */
1dbfff0c 252 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
3d1fc7fd 253 lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
1dbfff0c 254 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
3d1fc7fd
MD
255 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
256 }
257 break;
258 }
259 default:
260 WARN_ON_ONCE(1);
261 }
3d1fc7fd 262 ctx_record(ctx, ltt_chan, ltt_chan->ctx);
1ea172a1 263 ctx_record(ctx, ltt_chan, event->ctx);
3d1fc7fd
MD
264}
265
266static const struct lib_ring_buffer_config client_config;
267
268static u64 client_ring_buffer_clock_read(struct channel *chan)
269{
270 return lib_ring_buffer_clock_read(chan);
271}
272
273static
274size_t client_record_header_size(const struct lib_ring_buffer_config *config,
275 struct channel *chan, size_t offset,
276 size_t *pre_header_padding,
277 struct lib_ring_buffer_ctx *ctx)
278{
279 return record_header_size(config, chan, offset,
280 pre_header_padding, ctx);
281}
282
283/**
284 * client_packet_header_size - called on buffer-switch to a new sub-buffer
285 *
286 * Return header size without padding after the structure. Don't use packed
287 * structure because gcc generates inefficient code on some architectures
288 * (powerpc, mips..)
289 */
290static size_t client_packet_header_size(void)
291{
292 return offsetof(struct packet_header, ctx.header_end);
293}
294
295static void client_buffer_begin(struct lib_ring_buffer *buf, u64 tsc,
1d498196
MD
296 unsigned int subbuf_idx,
297 struct shm_handle *handle)
3d1fc7fd 298{
1d498196 299 struct channel *chan = shmp(handle, buf->backend.chan);
3d1fc7fd
MD
300 struct packet_header *header =
301 (struct packet_header *)
302 lib_ring_buffer_offset_address(&buf->backend,
1d498196
MD
303 subbuf_idx * chan->backend.subbuf_size,
304 handle);
3d1fc7fd
MD
305 struct ltt_channel *ltt_chan = channel_get_private(chan);
306 struct ltt_session *session = ltt_chan->session;
307
308 header->magic = CTF_MAGIC_NUMBER;
9f3fdbc6 309 memcpy(header->uuid, session->uuid, sizeof(session->uuid));
3d1fc7fd
MD
310 header->stream_id = ltt_chan->id;
311 header->ctx.timestamp_begin = tsc;
312 header->ctx.timestamp_end = 0;
313 header->ctx.events_discarded = 0;
314 header->ctx.content_size = 0xFFFFFFFF; /* for debugging */
315 header->ctx.packet_size = 0xFFFFFFFF;
316 header->ctx.cpu_id = buf->backend.cpu;
317}
318
319/*
320 * offset is assumed to never be 0 here : never deliver a completely empty
321 * subbuffer. data_size is between 1 and subbuf_size.
322 */
323static void client_buffer_end(struct lib_ring_buffer *buf, u64 tsc,
1d498196
MD
324 unsigned int subbuf_idx, unsigned long data_size,
325 struct shm_handle *handle)
3d1fc7fd 326{
1d498196 327 struct channel *chan = shmp(handle, buf->backend.chan);
3d1fc7fd
MD
328 struct packet_header *header =
329 (struct packet_header *)
330 lib_ring_buffer_offset_address(&buf->backend,
1d498196
MD
331 subbuf_idx * chan->backend.subbuf_size,
332 handle);
3d1fc7fd
MD
333 unsigned long records_lost = 0;
334
335 header->ctx.timestamp_end = tsc;
336 header->ctx.content_size = data_size * CHAR_BIT; /* in bits */
337 header->ctx.packet_size = PAGE_ALIGN(data_size) * CHAR_BIT; /* in bits */
9dcb02ef
MD
338 /*
339 * We do not care about the records lost count, because the metadata
340 * channel waits and retry.
341 */
342 (void) lib_ring_buffer_get_records_lost_full(&client_config, buf);
3d1fc7fd
MD
343 records_lost += lib_ring_buffer_get_records_lost_wrap(&client_config, buf);
344 records_lost += lib_ring_buffer_get_records_lost_big(&client_config, buf);
345 header->ctx.events_discarded = records_lost;
346}
347
348static int client_buffer_create(struct lib_ring_buffer *buf, void *priv,
1d498196 349 int cpu, const char *name, struct shm_handle *handle)
3d1fc7fd
MD
350{
351 return 0;
352}
353
1d498196 354static void client_buffer_finalize(struct lib_ring_buffer *buf, void *priv, int cpu, struct shm_handle *handle)
3d1fc7fd
MD
355{
356}
357
358static const struct lib_ring_buffer_config client_config = {
359 .cb.ring_buffer_clock_read = client_ring_buffer_clock_read,
360 .cb.record_header_size = client_record_header_size,
361 .cb.subbuffer_header_size = client_packet_header_size,
362 .cb.buffer_begin = client_buffer_begin,
363 .cb.buffer_end = client_buffer_end,
364 .cb.buffer_create = client_buffer_create,
365 .cb.buffer_finalize = client_buffer_finalize,
366
367 .tsc_bits = 32,
368 .alloc = RING_BUFFER_ALLOC_PER_CPU,
5d61a504 369 .sync = RING_BUFFER_SYNC_GLOBAL,
3d1fc7fd
MD
370 .mode = RING_BUFFER_MODE_TEMPLATE,
371 .backend = RING_BUFFER_PAGE,
5d61a504 372 .output = RING_BUFFER_MMAP,
3d1fc7fd 373 .oops = RING_BUFFER_OOPS_CONSISTENCY,
5d61a504
MD
374 .ipi = RING_BUFFER_NO_IPI_BARRIER,
375 .wakeup = RING_BUFFER_WAKEUP_BY_WRITER,
3d1fc7fd
MD
376};
377
378static
1d498196 379struct ltt_channel *_channel_create(const char *name,
3d1fc7fd
MD
380 struct ltt_channel *ltt_chan, void *buf_addr,
381 size_t subbuf_size, size_t num_subbuf,
382 unsigned int switch_timer_interval,
193183fb
MD
383 unsigned int read_timer_interval,
384 int *shm_fd, int *wait_fd,
385 uint64_t *memory_map_size)
3d1fc7fd 386{
1d498196 387 ltt_chan->handle = channel_create(&client_config, name, ltt_chan, buf_addr,
3d1fc7fd 388 subbuf_size, num_subbuf, switch_timer_interval,
193183fb
MD
389 read_timer_interval, shm_fd, wait_fd,
390 memory_map_size);
afdf9825
MD
391 if (!ltt_chan->handle)
392 return NULL;
dc613eb9 393 ltt_chan->chan = shmp(ltt_chan->handle, ltt_chan->handle->chan);
1d498196 394 return ltt_chan;
3d1fc7fd
MD
395}
396
397static
1d498196 398void ltt_channel_destroy(struct ltt_channel *ltt_chan)
3d1fc7fd 399{
824f40b8 400 channel_destroy(ltt_chan->chan, ltt_chan->handle, 0);
3d1fc7fd
MD
401}
402
403static
1d498196 404struct lib_ring_buffer *ltt_buffer_read_open(struct channel *chan,
381c0f1e
MD
405 struct shm_handle *handle,
406 int *shm_fd, int *wait_fd,
407 uint64_t *memory_map_size)
3d1fc7fd
MD
408{
409 struct lib_ring_buffer *buf;
410 int cpu;
411
412 for_each_channel_cpu(cpu, chan) {
381c0f1e
MD
413 buf = channel_get_ring_buffer(&client_config, chan,
414 cpu, handle, shm_fd, wait_fd,
415 memory_map_size);
824f40b8 416 if (!lib_ring_buffer_open_read(buf, handle, 0))
3d1fc7fd
MD
417 return buf;
418 }
419 return NULL;
420}
421
422static
1d498196
MD
423void ltt_buffer_read_close(struct lib_ring_buffer *buf,
424 struct shm_handle *handle)
3d1fc7fd 425{
824f40b8 426 lib_ring_buffer_release_read(buf, handle, 0);
3d1fc7fd
MD
427}
428
429static
430int ltt_event_reserve(struct lib_ring_buffer_ctx *ctx,
431 uint32_t event_id)
432{
433 struct ltt_channel *ltt_chan = channel_get_private(ctx->chan);
434 int ret, cpu;
435
436 cpu = lib_ring_buffer_get_cpu(&client_config);
437 if (cpu < 0)
438 return -EPERM;
439 ctx->cpu = cpu;
440
441 switch (ltt_chan->header_type) {
442 case 1: /* compact */
443 if (event_id > 30)
444 ctx->rflags |= LTT_RFLAG_EXTENDED;
445 break;
446 case 2: /* large */
447 if (event_id > 65534)
448 ctx->rflags |= LTT_RFLAG_EXTENDED;
449 break;
450 default:
451 WARN_ON_ONCE(1);
452 }
453
454 ret = lib_ring_buffer_reserve(&client_config, ctx);
455 if (ret)
456 goto put;
457 ltt_write_event_header(&client_config, ctx, event_id);
458 return 0;
459put:
460 lib_ring_buffer_put_cpu(&client_config);
461 return ret;
462}
463
464static
465void ltt_event_commit(struct lib_ring_buffer_ctx *ctx)
466{
467 lib_ring_buffer_commit(&client_config, ctx);
468 lib_ring_buffer_put_cpu(&client_config);
469}
470
471static
472void ltt_event_write(struct lib_ring_buffer_ctx *ctx, const void *src,
473 size_t len)
474{
475 lib_ring_buffer_write(&client_config, ctx, src, len);
476}
477
9f3fdbc6 478#if 0
3d1fc7fd
MD
479static
480wait_queue_head_t *ltt_get_reader_wait_queue(struct channel *chan)
481{
482 return &chan->read_wait;
483}
484
485static
486wait_queue_head_t *ltt_get_hp_wait_queue(struct channel *chan)
487{
488 return &chan->hp_wait;
489}
9f3fdbc6 490#endif //0
3d1fc7fd
MD
491
492static
493int ltt_is_finalized(struct channel *chan)
494{
495 return lib_ring_buffer_channel_is_finalized(chan);
496}
497
498static
499int ltt_is_disabled(struct channel *chan)
500{
501 return lib_ring_buffer_channel_is_disabled(chan);
502}
503
504static struct ltt_transport ltt_relay_transport = {
818173b9 505 .name = "relay-" RING_BUFFER_MODE_TEMPLATE_STRING "-mmap",
3d1fc7fd
MD
506 .ops = {
507 .channel_create = _channel_create,
508 .channel_destroy = ltt_channel_destroy,
509 .buffer_read_open = ltt_buffer_read_open,
510 .buffer_read_close = ltt_buffer_read_close,
511 .event_reserve = ltt_event_reserve,
512 .event_commit = ltt_event_commit,
513 .event_write = ltt_event_write,
514 .packet_avail_size = NULL, /* Would be racy anyway */
9f3fdbc6
MD
515 //.get_reader_wait_queue = ltt_get_reader_wait_queue,
516 //.get_hp_wait_queue = ltt_get_hp_wait_queue,
3d1fc7fd
MD
517 .is_finalized = ltt_is_finalized,
518 .is_disabled = ltt_is_disabled,
519 },
520};
521
edaa1431 522void RING_BUFFER_MODE_TEMPLATE_INIT(void)
3d1fc7fd 523{
18c1cd87 524 DBG("LTT : ltt ring buffer client init\n");
3d1fc7fd 525 ltt_transport_register(&ltt_relay_transport);
3d1fc7fd
MD
526}
527
edaa1431 528void RING_BUFFER_MODE_TEMPLATE_EXIT(void)
3d1fc7fd 529{
18c1cd87 530 DBG("LTT : ltt ring buffer client exit\n");
3d1fc7fd
MD
531 ltt_transport_unregister(&ltt_relay_transport);
532}
This page took 0.045337 seconds and 4 git commands to generate.