Cygwin: Pass file paths instead of file descriptors over UNIX sockets
[lttng-ust.git] / liblttng-ust / ltt-ring-buffer-client.h
1 /*
2 * ltt-ring-buffer-client.h
3 *
4 * LTTng lib ring buffer client template.
5 *
6 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; only
11 * version 2.1 of the License.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #include <stdint.h>
24 #include <lttng/ust-events.h>
25 #include "lttng/bitfield.h"
26 #include "clock.h"
27 #include "lttng-ust-uuid.h"
28 #include "ltt-tracer.h"
29 #include "../libringbuffer/frontend_types.h"
30
31 #define LTTNG_COMPACT_EVENT_BITS 5
32 #define LTTNG_COMPACT_TSC_BITS 27
33
34 /*
35 * Keep the natural field alignment for _each field_ within this structure if
36 * you ever add/remove a field from this header. Packed attribute is not used
37 * because gcc generates poor code on at least powerpc and mips. Don't ever
38 * let gcc add padding between the structure elements.
39 */
40
41 struct packet_header {
42 /* Trace packet header */
43 uint32_t magic; /*
44 * Trace magic number.
45 * contains endianness information.
46 */
47 uint8_t uuid[LTTNG_UST_UUID_LEN];
48 uint32_t stream_id;
49
50 struct {
51 /* Stream packet context */
52 uint64_t timestamp_begin; /* Cycle count at subbuffer start */
53 uint64_t timestamp_end; /* Cycle count at subbuffer end */
54 uint32_t events_discarded; /*
55 * Events lost in this subbuffer since
56 * the beginning of the trace.
57 * (may overflow)
58 */
59 uint32_t content_size; /* Size of data in subbuffer */
60 uint32_t packet_size; /* Subbuffer size (include padding) */
61 uint32_t cpu_id; /* CPU id associated with stream */
62 uint8_t header_end; /* End of header */
63 } ctx;
64 };
65
66
67 static inline uint64_t lib_ring_buffer_clock_read(struct channel *chan)
68 {
69 return trace_clock_read64();
70 }
71
72 static inline
73 size_t ctx_get_size(size_t offset, struct lttng_ctx *ctx)
74 {
75 int i;
76 size_t orig_offset = offset;
77
78 if (caa_likely(!ctx))
79 return 0;
80 for (i = 0; i < ctx->nr_fields; i++)
81 offset += ctx->fields[i].get_size(offset);
82 return offset - orig_offset;
83 }
84
85 static inline
86 void ctx_record(struct lttng_ust_lib_ring_buffer_ctx *bufctx,
87 struct ltt_channel *chan,
88 struct lttng_ctx *ctx)
89 {
90 int i;
91
92 if (caa_likely(!ctx))
93 return;
94 for (i = 0; i < ctx->nr_fields; i++)
95 ctx->fields[i].record(&ctx->fields[i], bufctx, chan);
96 }
97
98 /*
99 * record_header_size - Calculate the header size and padding necessary.
100 * @config: ring buffer instance configuration
101 * @chan: channel
102 * @offset: offset in the write buffer
103 * @pre_header_padding: padding to add before the header (output)
104 * @ctx: reservation context
105 *
106 * Returns the event header size (including padding).
107 *
108 * The payload must itself determine its own alignment from the biggest type it
109 * contains.
110 */
111 static __inline__
112 unsigned char record_header_size(const struct lttng_ust_lib_ring_buffer_config *config,
113 struct channel *chan, size_t offset,
114 size_t *pre_header_padding,
115 struct lttng_ust_lib_ring_buffer_ctx *ctx)
116 {
117 struct ltt_channel *ltt_chan = channel_get_private(chan);
118 struct ltt_event *event = ctx->priv;
119 size_t orig_offset = offset;
120 size_t padding;
121
122 switch (ltt_chan->header_type) {
123 case 1: /* compact */
124 padding = lib_ring_buffer_align(offset, lttng_alignof(uint32_t));
125 offset += padding;
126 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) {
127 offset += sizeof(uint32_t); /* id and timestamp */
128 } else {
129 /* Minimum space taken by LTTNG_COMPACT_EVENT_BITS id */
130 offset += (LTTNG_COMPACT_EVENT_BITS + CHAR_BIT - 1) / CHAR_BIT;
131 /* Align extended struct on largest member */
132 offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
133 offset += sizeof(uint32_t); /* id */
134 offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
135 offset += sizeof(uint64_t); /* timestamp */
136 }
137 break;
138 case 2: /* large */
139 padding = lib_ring_buffer_align(offset, lttng_alignof(uint16_t));
140 offset += padding;
141 offset += sizeof(uint16_t);
142 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) {
143 offset += lib_ring_buffer_align(offset, lttng_alignof(uint32_t));
144 offset += sizeof(uint32_t); /* timestamp */
145 } else {
146 /* Align extended struct on largest member */
147 offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
148 offset += sizeof(uint32_t); /* id */
149 offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
150 offset += sizeof(uint64_t); /* timestamp */
151 }
152 break;
153 default:
154 padding = 0;
155 WARN_ON_ONCE(1);
156 }
157 offset += ctx_get_size(offset, event->ctx);
158 offset += ctx_get_size(offset, ltt_chan->ctx);
159
160 *pre_header_padding = padding;
161 return offset - orig_offset;
162 }
163
164 #include "../libringbuffer/api.h"
165
166 static
167 void ltt_write_event_header_slow(const struct lttng_ust_lib_ring_buffer_config *config,
168 struct lttng_ust_lib_ring_buffer_ctx *ctx,
169 uint32_t event_id);
170
171 /*
172 * ltt_write_event_header
173 *
174 * Writes the event header to the offset (already aligned on 32-bits).
175 *
176 * @config: ring buffer instance configuration
177 * @ctx: reservation context
178 * @event_id: event ID
179 */
180 static __inline__
181 void ltt_write_event_header(const struct lttng_ust_lib_ring_buffer_config *config,
182 struct lttng_ust_lib_ring_buffer_ctx *ctx,
183 uint32_t event_id)
184 {
185 struct ltt_channel *ltt_chan = channel_get_private(ctx->chan);
186 struct ltt_event *event = ctx->priv;
187
188 if (caa_unlikely(ctx->rflags))
189 goto slow_path;
190
191 switch (ltt_chan->header_type) {
192 case 1: /* compact */
193 {
194 uint32_t id_time = 0;
195
196 bt_bitfield_write(&id_time, uint32_t,
197 0,
198 LTTNG_COMPACT_EVENT_BITS,
199 event_id);
200 bt_bitfield_write(&id_time, uint32_t,
201 LTTNG_COMPACT_EVENT_BITS,
202 LTTNG_COMPACT_TSC_BITS,
203 ctx->tsc);
204 lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
205 break;
206 }
207 case 2: /* large */
208 {
209 uint32_t timestamp = (uint32_t) ctx->tsc;
210 uint16_t id = event_id;
211
212 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
213 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint32_t));
214 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
215 break;
216 }
217 default:
218 WARN_ON_ONCE(1);
219 }
220
221 ctx_record(ctx, ltt_chan, ltt_chan->ctx);
222 ctx_record(ctx, ltt_chan, event->ctx);
223 lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
224
225 return;
226
227 slow_path:
228 ltt_write_event_header_slow(config, ctx, event_id);
229 }
230
231 static
232 void ltt_write_event_header_slow(const struct lttng_ust_lib_ring_buffer_config *config,
233 struct lttng_ust_lib_ring_buffer_ctx *ctx,
234 uint32_t event_id)
235 {
236 struct ltt_channel *ltt_chan = channel_get_private(ctx->chan);
237 struct ltt_event *event = ctx->priv;
238
239 switch (ltt_chan->header_type) {
240 case 1: /* compact */
241 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) {
242 uint32_t id_time = 0;
243
244 bt_bitfield_write(&id_time, uint32_t,
245 0,
246 LTTNG_COMPACT_EVENT_BITS,
247 event_id);
248 bt_bitfield_write(&id_time, uint32_t,
249 LTTNG_COMPACT_EVENT_BITS,
250 LTTNG_COMPACT_TSC_BITS,
251 ctx->tsc);
252 lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
253 } else {
254 uint8_t id = 0;
255 uint64_t timestamp = ctx->tsc;
256
257 bt_bitfield_write(&id, uint8_t,
258 0,
259 LTTNG_COMPACT_EVENT_BITS,
260 31);
261 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
262 /* Align extended struct on largest member */
263 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
264 lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
265 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
266 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
267 }
268 break;
269 case 2: /* large */
270 {
271 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) {
272 uint32_t timestamp = (uint32_t) ctx->tsc;
273 uint16_t id = event_id;
274
275 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
276 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint32_t));
277 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
278 } else {
279 uint16_t id = 65535;
280 uint64_t timestamp = ctx->tsc;
281
282 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
283 /* Align extended struct on largest member */
284 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
285 lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
286 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
287 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
288 }
289 break;
290 }
291 default:
292 WARN_ON_ONCE(1);
293 }
294 ctx_record(ctx, ltt_chan, ltt_chan->ctx);
295 ctx_record(ctx, ltt_chan, event->ctx);
296 lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
297 }
298
299 static const struct lttng_ust_lib_ring_buffer_config client_config;
300
301 static uint64_t client_ring_buffer_clock_read(struct channel *chan)
302 {
303 return lib_ring_buffer_clock_read(chan);
304 }
305
306 static
307 size_t client_record_header_size(const struct lttng_ust_lib_ring_buffer_config *config,
308 struct channel *chan, size_t offset,
309 size_t *pre_header_padding,
310 struct lttng_ust_lib_ring_buffer_ctx *ctx)
311 {
312 return record_header_size(config, chan, offset,
313 pre_header_padding, ctx);
314 }
315
316 /**
317 * client_packet_header_size - called on buffer-switch to a new sub-buffer
318 *
319 * Return header size without padding after the structure. Don't use packed
320 * structure because gcc generates inefficient code on some architectures
321 * (powerpc, mips..)
322 */
323 static size_t client_packet_header_size(void)
324 {
325 return offsetof(struct packet_header, ctx.header_end);
326 }
327
328 static void client_buffer_begin(struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc,
329 unsigned int subbuf_idx,
330 struct lttng_ust_shm_handle *handle)
331 {
332 struct channel *chan = shmp(handle, buf->backend.chan);
333 struct packet_header *header =
334 (struct packet_header *)
335 lib_ring_buffer_offset_address(&buf->backend,
336 subbuf_idx * chan->backend.subbuf_size,
337 handle);
338 struct ltt_channel *ltt_chan = channel_get_private(chan);
339
340 header->magic = CTF_MAGIC_NUMBER;
341 memcpy(header->uuid, ltt_chan->uuid, sizeof(ltt_chan->uuid));
342 header->stream_id = ltt_chan->id;
343 header->ctx.timestamp_begin = tsc;
344 header->ctx.timestamp_end = 0;
345 header->ctx.events_discarded = 0;
346 header->ctx.content_size = 0xFFFFFFFF; /* for debugging */
347 header->ctx.packet_size = 0xFFFFFFFF;
348 header->ctx.cpu_id = buf->backend.cpu;
349 }
350
351 /*
352 * offset is assumed to never be 0 here : never deliver a completely empty
353 * subbuffer. data_size is between 1 and subbuf_size.
354 */
355 static void client_buffer_end(struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc,
356 unsigned int subbuf_idx, unsigned long data_size,
357 struct lttng_ust_shm_handle *handle)
358 {
359 struct channel *chan = shmp(handle, buf->backend.chan);
360 struct packet_header *header =
361 (struct packet_header *)
362 lib_ring_buffer_offset_address(&buf->backend,
363 subbuf_idx * chan->backend.subbuf_size,
364 handle);
365 unsigned long records_lost = 0;
366
367 header->ctx.timestamp_end = tsc;
368 header->ctx.content_size = data_size * CHAR_BIT; /* in bits */
369 header->ctx.packet_size = PAGE_ALIGN(data_size) * CHAR_BIT; /* in bits */
370
371 records_lost += lib_ring_buffer_get_records_lost_full(&client_config, buf);
372 records_lost += lib_ring_buffer_get_records_lost_wrap(&client_config, buf);
373 records_lost += lib_ring_buffer_get_records_lost_big(&client_config, buf);
374 header->ctx.events_discarded = records_lost;
375 }
376
377 static int client_buffer_create(struct lttng_ust_lib_ring_buffer *buf, void *priv,
378 int cpu, const char *name, struct lttng_ust_shm_handle *handle)
379 {
380 return 0;
381 }
382
383 static void client_buffer_finalize(struct lttng_ust_lib_ring_buffer *buf, void *priv, int cpu, struct lttng_ust_shm_handle *handle)
384 {
385 }
386
387 static const struct lttng_ust_lib_ring_buffer_config client_config = {
388 .cb.ring_buffer_clock_read = client_ring_buffer_clock_read,
389 .cb.record_header_size = client_record_header_size,
390 .cb.subbuffer_header_size = client_packet_header_size,
391 .cb.buffer_begin = client_buffer_begin,
392 .cb.buffer_end = client_buffer_end,
393 .cb.buffer_create = client_buffer_create,
394 .cb.buffer_finalize = client_buffer_finalize,
395
396 .tsc_bits = LTTNG_COMPACT_TSC_BITS,
397 .alloc = RING_BUFFER_ALLOC_PER_CPU,
398 .sync = RING_BUFFER_SYNC_GLOBAL,
399 .mode = RING_BUFFER_MODE_TEMPLATE,
400 .backend = RING_BUFFER_PAGE,
401 .output = RING_BUFFER_MMAP,
402 .oops = RING_BUFFER_OOPS_CONSISTENCY,
403 .ipi = RING_BUFFER_NO_IPI_BARRIER,
404 .wakeup = RING_BUFFER_WAKEUP_BY_WRITER,
405 .client_type = LTTNG_CLIENT_TYPE,
406 };
407
408 const struct lttng_ust_lib_ring_buffer_client_cb *LTTNG_CLIENT_CALLBACKS = &client_config.cb;
409
410 static
411 struct ltt_channel *_channel_create(const char *name,
412 void *buf_addr,
413 size_t subbuf_size, size_t num_subbuf,
414 unsigned int switch_timer_interval,
415 unsigned int read_timer_interval,
416 int **shm_fd, char **shm_path,
417 int **wait_fd, char **wait_pipe_path,
418 uint64_t **memory_map_size,
419 struct ltt_channel *chan_priv_init)
420 {
421 void *priv;
422 struct ltt_channel *ltt_chan = NULL;
423 struct lttng_ust_shm_handle *handle;
424
425 handle = channel_create(&client_config, name,
426 &priv, __alignof__(*ltt_chan), sizeof(*ltt_chan),
427 chan_priv_init,
428 buf_addr, subbuf_size, num_subbuf,
429 switch_timer_interval, read_timer_interval,
430 shm_fd, shm_path, wait_fd, wait_pipe_path,
431 memory_map_size);
432 if (!handle)
433 return NULL;
434 ltt_chan = priv;
435 ltt_chan->handle = handle;
436 ltt_chan->chan = shmp(ltt_chan->handle, ltt_chan->handle->chan);
437 return ltt_chan;
438 }
439
440 static
441 void ltt_channel_destroy(struct ltt_channel *ltt_chan)
442 {
443 channel_destroy(ltt_chan->chan, ltt_chan->handle, 0);
444 }
445
446 static
447 struct lttng_ust_lib_ring_buffer *ltt_buffer_read_open(struct channel *chan,
448 struct lttng_ust_shm_handle *handle,
449 int **shm_fd, char **shm_path,
450 int **wait_fd, char **wait_pipe_path,
451 uint64_t **memory_map_size)
452 {
453 struct lttng_ust_lib_ring_buffer *buf;
454 int cpu;
455
456 for_each_channel_cpu(cpu, chan) {
457 buf = channel_get_ring_buffer(&client_config, chan,
458 cpu, handle, shm_fd, shm_path,
459 wait_fd, wait_pipe_path,
460 memory_map_size);
461 if (!lib_ring_buffer_open_read(buf, handle, 0))
462 return buf;
463 }
464 return NULL;
465 }
466
467 static
468 void ltt_buffer_read_close(struct lttng_ust_lib_ring_buffer *buf,
469 struct lttng_ust_shm_handle *handle)
470 {
471 lib_ring_buffer_release_read(buf, handle, 0);
472 }
473
474 static
475 int ltt_event_reserve(struct lttng_ust_lib_ring_buffer_ctx *ctx,
476 uint32_t event_id)
477 {
478 struct ltt_channel *ltt_chan = channel_get_private(ctx->chan);
479 int ret, cpu;
480
481 cpu = lib_ring_buffer_get_cpu(&client_config);
482 if (cpu < 0)
483 return -EPERM;
484 ctx->cpu = cpu;
485
486 switch (ltt_chan->header_type) {
487 case 1: /* compact */
488 if (event_id > 30)
489 ctx->rflags |= LTT_RFLAG_EXTENDED;
490 break;
491 case 2: /* large */
492 if (event_id > 65534)
493 ctx->rflags |= LTT_RFLAG_EXTENDED;
494 break;
495 default:
496 WARN_ON_ONCE(1);
497 }
498
499 ret = lib_ring_buffer_reserve(&client_config, ctx);
500 if (ret)
501 goto put;
502 ltt_write_event_header(&client_config, ctx, event_id);
503 return 0;
504 put:
505 lib_ring_buffer_put_cpu(&client_config);
506 return ret;
507 }
508
509 static
510 void ltt_event_commit(struct lttng_ust_lib_ring_buffer_ctx *ctx)
511 {
512 lib_ring_buffer_commit(&client_config, ctx);
513 lib_ring_buffer_put_cpu(&client_config);
514 }
515
516 static
517 void ltt_event_write(struct lttng_ust_lib_ring_buffer_ctx *ctx, const void *src,
518 size_t len)
519 {
520 lib_ring_buffer_write(&client_config, ctx, src, len);
521 }
522
523 #if 0
524 static
525 wait_queue_head_t *ltt_get_reader_wait_queue(struct channel *chan)
526 {
527 return &chan->read_wait;
528 }
529
530 static
531 wait_queue_head_t *ltt_get_hp_wait_queue(struct channel *chan)
532 {
533 return &chan->hp_wait;
534 }
535 #endif //0
536
537 static
538 int ltt_is_finalized(struct channel *chan)
539 {
540 return lib_ring_buffer_channel_is_finalized(chan);
541 }
542
543 static
544 int ltt_is_disabled(struct channel *chan)
545 {
546 return lib_ring_buffer_channel_is_disabled(chan);
547 }
548
549 static
550 int ltt_flush_buffer(struct channel *chan, struct lttng_ust_shm_handle *handle)
551 {
552 struct lttng_ust_lib_ring_buffer *buf;
553 int cpu;
554
555 for_each_channel_cpu(cpu, chan) {
556 int *shm_fd, *wait_fd;
557 char *shm_path, *wait_pipe_path;
558 uint64_t *memory_map_size;
559
560 buf = channel_get_ring_buffer(&client_config, chan,
561 cpu, handle, &shm_fd, &shm_path,
562 &wait_fd, &wait_pipe_path,
563 &memory_map_size);
564 lib_ring_buffer_switch(&client_config, buf,
565 SWITCH_ACTIVE, handle);
566 }
567 return 0;
568 }
569
570 static struct ltt_transport ltt_relay_transport = {
571 .name = "relay-" RING_BUFFER_MODE_TEMPLATE_STRING "-mmap",
572 .ops = {
573 .channel_create = _channel_create,
574 .channel_destroy = ltt_channel_destroy,
575 .buffer_read_open = ltt_buffer_read_open,
576 .buffer_read_close = ltt_buffer_read_close,
577 .event_reserve = ltt_event_reserve,
578 .event_commit = ltt_event_commit,
579 .event_write = ltt_event_write,
580 .packet_avail_size = NULL, /* Would be racy anyway */
581 //.get_reader_wait_queue = ltt_get_reader_wait_queue,
582 //.get_hp_wait_queue = ltt_get_hp_wait_queue,
583 .is_finalized = ltt_is_finalized,
584 .is_disabled = ltt_is_disabled,
585 .flush_buffer = ltt_flush_buffer,
586 },
587 };
588
589 void RING_BUFFER_MODE_TEMPLATE_INIT(void)
590 {
591 DBG("LTT : ltt ring buffer client init\n");
592 ltt_transport_register(&ltt_relay_transport);
593 }
594
595 void RING_BUFFER_MODE_TEMPLATE_EXIT(void)
596 {
597 DBG("LTT : ltt ring buffer client exit\n");
598 ltt_transport_unregister(&ltt_relay_transport);
599 }
This page took 0.040757 seconds and 4 git commands to generate.