2eee81a1fad57f37d1f1e305af1a4d282cedacc0
[lttng-modules.git] / src / lttng-ring-buffer-client.h
1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
2 *
3 * lttng-ring-buffer-client.h
4 *
5 * LTTng lib ring buffer client template.
6 *
7 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 */
9
10 #include <linux/module.h>
11 #include <linux/types.h>
12 #include <lttng/bitfield.h>
13 #include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_mappings() */
14 #include <wrapper/trace-clock.h>
15 #include <lttng/events.h>
16 #include <lttng/events-internal.h>
17 #include <lttng/tracer.h>
18 #include <ringbuffer/frontend_types.h>
19
20 #define LTTNG_COMPACT_EVENT_BITS 5
21 #define LTTNG_COMPACT_TSC_BITS 27
22
23 static struct lttng_transport lttng_relay_transport;
24
25 /*
26 * Keep the natural field alignment for _each field_ within this structure if
27 * you ever add/remove a field from this header. Packed attribute is not used
28 * because gcc generates poor code on at least powerpc and mips. Don't ever
29 * let gcc add padding between the structure elements.
30 *
31 * The guarantee we have with timestamps is that all the events in a
32 * packet are included (inclusive) within the begin/end timestamps of
33 * the packet. Another guarantee we have is that the "timestamp begin",
34 * as well as the event timestamps, are monotonically increasing (never
35 * decrease) when moving forward in a stream (physically). But this
36 * guarantee does not apply to "timestamp end", because it is sampled at
37 * commit time, which is not ordered with respect to space reservation.
38 */
39
40 struct packet_header {
41 /* Trace packet header */
42 uint32_t magic; /*
43 * Trace magic number.
44 * contains endianness information.
45 */
46 uint8_t uuid[16];
47 uint32_t stream_id;
48 uint64_t stream_instance_id;
49
50 struct {
51 /* Stream packet context */
52 uint64_t timestamp_begin; /* Cycle count at subbuffer start */
53 uint64_t timestamp_end; /* Cycle count at subbuffer end */
54 uint64_t content_size; /* Size of data in subbuffer */
55 uint64_t packet_size; /* Subbuffer size (include padding) */
56 uint64_t packet_seq_num; /* Packet sequence number */
57 unsigned long events_discarded; /*
58 * Events lost in this subbuffer since
59 * the beginning of the trace.
60 * (may overflow)
61 */
62 uint32_t cpu_id; /* CPU id associated with stream */
63 uint8_t header_end; /* End of header */
64 } ctx;
65 };
66
67 struct lttng_client_ctx {
68 size_t packet_context_len;
69 size_t event_context_len;
70 };
71
72 static inline notrace u64 lib_ring_buffer_clock_read(struct lttng_kernel_ring_buffer_channel *chan)
73 {
74 return trace_clock_read64();
75 }
76
77 static inline
78 size_t ctx_get_aligned_size(size_t offset, struct lttng_kernel_ctx *ctx,
79 size_t ctx_len)
80 {
81 size_t orig_offset = offset;
82
83 if (likely(!ctx))
84 return 0;
85 offset += lib_ring_buffer_align(offset, ctx->largest_align);
86 offset += ctx_len;
87 return offset - orig_offset;
88 }
89
90 static inline
91 void ctx_get_struct_size(struct lttng_kernel_ctx *ctx, size_t *ctx_len,
92 struct lttng_kernel_channel_buffer *lttng_chan,
93 struct lttng_kernel_ring_buffer_ctx *bufctx)
94 {
95 int i;
96 size_t offset = 0;
97
98 if (likely(!ctx)) {
99 *ctx_len = 0;
100 return;
101 }
102 for (i = 0; i < ctx->nr_fields; i++) {
103 offset += ctx->fields[i].get_size(ctx->fields[i].priv,
104 bufctx->probe_ctx, offset);
105 }
106 *ctx_len = offset;
107 }
108
109 static inline
110 void ctx_record(struct lttng_kernel_ring_buffer_ctx *bufctx,
111 struct lttng_kernel_channel_buffer *lttng_chan,
112 struct lttng_kernel_ctx *ctx)
113 {
114 int i;
115
116 if (likely(!ctx))
117 return;
118 lib_ring_buffer_align_ctx(bufctx, ctx->largest_align);
119 for (i = 0; i < ctx->nr_fields; i++)
120 ctx->fields[i].record(ctx->fields[i].priv, bufctx->probe_ctx,
121 bufctx, lttng_chan);
122 }
123
124 /*
125 * record_header_size - Calculate the header size and padding necessary.
126 * @config: ring buffer instance configuration
127 * @chan: channel
128 * @offset: offset in the write buffer
129 * @pre_header_padding: padding to add before the header (output)
130 * @ctx: reservation context
131 *
132 * Returns the event header size (including padding).
133 *
134 * The payload must itself determine its own alignment from the biggest type it
135 * contains.
136 */
137 static __inline__
138 size_t record_header_size(const struct lttng_kernel_ring_buffer_config *config,
139 struct lttng_kernel_ring_buffer_channel *chan, size_t offset,
140 size_t *pre_header_padding,
141 struct lttng_kernel_ring_buffer_ctx *ctx,
142 struct lttng_client_ctx *client_ctx)
143 {
144 struct lttng_kernel_channel_buffer *lttng_chan = channel_get_private(chan);
145 size_t orig_offset = offset;
146 size_t padding;
147
148 switch (lttng_chan->priv->header_type) {
149 case 1: /* compact */
150 padding = lib_ring_buffer_align(offset, lttng_alignof(uint32_t));
151 offset += padding;
152 if (!(ctx->priv.rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
153 offset += sizeof(uint32_t); /* id and timestamp */
154 } else {
155 /* Minimum space taken by LTTNG_COMPACT_EVENT_BITS id */
156 offset += (LTTNG_COMPACT_EVENT_BITS + CHAR_BIT - 1) / CHAR_BIT;
157 /* Align extended struct on largest member */
158 offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
159 offset += sizeof(uint32_t); /* id */
160 offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
161 offset += sizeof(uint64_t); /* timestamp */
162 }
163 break;
164 case 2: /* large */
165 padding = lib_ring_buffer_align(offset, lttng_alignof(uint16_t));
166 offset += padding;
167 offset += sizeof(uint16_t);
168 if (!(ctx->priv.rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
169 offset += lib_ring_buffer_align(offset, lttng_alignof(uint32_t));
170 offset += sizeof(uint32_t); /* timestamp */
171 } else {
172 /* Align extended struct on largest member */
173 offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
174 offset += sizeof(uint32_t); /* id */
175 offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
176 offset += sizeof(uint64_t); /* timestamp */
177 }
178 break;
179 default:
180 padding = 0;
181 WARN_ON_ONCE(1);
182 }
183 offset += ctx_get_aligned_size(offset, lttng_chan->priv->ctx,
184 client_ctx->packet_context_len);
185 *pre_header_padding = padding;
186 return offset - orig_offset;
187 }
188
189 #include <ringbuffer/api.h>
190
191 static
192 void lttng_write_event_header_slow(const struct lttng_kernel_ring_buffer_config *config,
193 struct lttng_kernel_ring_buffer_ctx *ctx,
194 uint32_t event_id);
195
196 /*
197 * lttng_write_event_header
198 *
199 * Writes the event header to the offset (already aligned on 32-bits).
200 *
201 * @config: ring buffer instance configuration
202 * @ctx: reservation context
203 * @event_id: event ID
204 */
205 static __inline__
206 void lttng_write_event_header(const struct lttng_kernel_ring_buffer_config *config,
207 struct lttng_kernel_ring_buffer_ctx *ctx,
208 uint32_t event_id)
209 {
210 struct lttng_kernel_channel_buffer *lttng_chan = channel_get_private(ctx->priv.chan);
211
212 if (unlikely(ctx->priv.rflags))
213 goto slow_path;
214
215 switch (lttng_chan->priv->header_type) {
216 case 1: /* compact */
217 {
218 uint32_t id_time = 0;
219
220 bt_bitfield_write(&id_time, uint32_t,
221 0,
222 LTTNG_COMPACT_EVENT_BITS,
223 event_id);
224 bt_bitfield_write(&id_time, uint32_t,
225 LTTNG_COMPACT_EVENT_BITS,
226 LTTNG_COMPACT_TSC_BITS,
227 ctx->priv.tsc);
228 lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
229 break;
230 }
231 case 2: /* large */
232 {
233 uint32_t timestamp = (uint32_t) ctx->priv.tsc;
234 uint16_t id = event_id;
235
236 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
237 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint32_t));
238 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
239 break;
240 }
241 default:
242 WARN_ON_ONCE(1);
243 }
244
245 ctx_record(ctx, lttng_chan, lttng_chan->priv->ctx);
246 lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
247
248 return;
249
250 slow_path:
251 lttng_write_event_header_slow(config, ctx, event_id);
252 }
253
254 static
255 void lttng_write_event_header_slow(const struct lttng_kernel_ring_buffer_config *config,
256 struct lttng_kernel_ring_buffer_ctx *ctx,
257 uint32_t event_id)
258 {
259 struct lttng_kernel_channel_buffer *lttng_chan = channel_get_private(ctx->priv.chan);
260
261 switch (lttng_chan->priv->header_type) {
262 case 1: /* compact */
263 if (!(ctx->priv.rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
264 uint32_t id_time = 0;
265
266 bt_bitfield_write(&id_time, uint32_t,
267 0,
268 LTTNG_COMPACT_EVENT_BITS,
269 event_id);
270 bt_bitfield_write(&id_time, uint32_t,
271 LTTNG_COMPACT_EVENT_BITS,
272 LTTNG_COMPACT_TSC_BITS, ctx->priv.tsc);
273 lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
274 } else {
275 uint8_t id = 0;
276 uint64_t timestamp = ctx->priv.tsc;
277
278 bt_bitfield_write(&id, uint8_t,
279 0,
280 LTTNG_COMPACT_EVENT_BITS,
281 31);
282 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
283 /* Align extended struct on largest member */
284 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
285 lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
286 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
287 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
288 }
289 break;
290 case 2: /* large */
291 {
292 if (!(ctx->priv.rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
293 uint32_t timestamp = (uint32_t) ctx->priv.tsc;
294 uint16_t id = event_id;
295
296 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
297 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint32_t));
298 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
299 } else {
300 uint16_t id = 65535;
301 uint64_t timestamp = ctx->priv.tsc;
302
303 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
304 /* Align extended struct on largest member */
305 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
306 lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
307 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
308 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
309 }
310 break;
311 }
312 default:
313 WARN_ON_ONCE(1);
314 }
315 ctx_record(ctx, lttng_chan, lttng_chan->priv->ctx);
316 lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
317 }
318
319 static const struct lttng_kernel_ring_buffer_config client_config;
320
321 static u64 client_ring_buffer_clock_read(struct lttng_kernel_ring_buffer_channel *chan)
322 {
323 return lib_ring_buffer_clock_read(chan);
324 }
325
326 static
327 size_t client_record_header_size(const struct lttng_kernel_ring_buffer_config *config,
328 struct lttng_kernel_ring_buffer_channel *chan, size_t offset,
329 size_t *pre_header_padding,
330 struct lttng_kernel_ring_buffer_ctx *ctx,
331 void *client_ctx)
332 {
333 return record_header_size(config, chan, offset,
334 pre_header_padding, ctx, client_ctx);
335 }
336
337 /**
338 * client_packet_header_size - called on buffer-switch to a new sub-buffer
339 *
340 * Return header size without padding after the structure. Don't use packed
341 * structure because gcc generates inefficient code on some architectures
342 * (powerpc, mips..)
343 */
344 static size_t client_packet_header_size(void)
345 {
346 return offsetof(struct packet_header, ctx.header_end);
347 }
348
349 static void client_buffer_begin(struct lttng_kernel_ring_buffer *buf, u64 tsc,
350 unsigned int subbuf_idx)
351 {
352 struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
353 struct packet_header *header =
354 (struct packet_header *)
355 lib_ring_buffer_offset_address(&buf->backend,
356 subbuf_idx * chan->backend.subbuf_size);
357 struct lttng_kernel_channel_buffer *lttng_chan = channel_get_private(chan);
358 struct lttng_kernel_session *session = lttng_chan->parent.session;
359
360 header->magic = CTF_MAGIC_NUMBER;
361 memcpy(header->uuid, session->priv->uuid.b, sizeof(session->priv->uuid));
362 header->stream_id = lttng_chan->priv->id;
363 header->stream_instance_id = buf->backend.cpu;
364 header->ctx.timestamp_begin = tsc;
365 header->ctx.timestamp_end = 0;
366 header->ctx.content_size = ~0ULL; /* for debugging */
367 header->ctx.packet_size = ~0ULL;
368 header->ctx.packet_seq_num = chan->backend.num_subbuf * \
369 buf->backend.buf_cnt[subbuf_idx].seq_cnt + \
370 subbuf_idx;
371 header->ctx.events_discarded = 0;
372 header->ctx.cpu_id = buf->backend.cpu;
373 }
374
375 /*
376 * offset is assumed to never be 0 here : never deliver a completely empty
377 * subbuffer. data_size is between 1 and subbuf_size.
378 */
379 static void client_buffer_end(struct lttng_kernel_ring_buffer *buf, u64 tsc,
380 unsigned int subbuf_idx, unsigned long data_size)
381 {
382 struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
383 struct packet_header *header =
384 (struct packet_header *)
385 lib_ring_buffer_offset_address(&buf->backend,
386 subbuf_idx * chan->backend.subbuf_size);
387 unsigned long records_lost = 0;
388
389 header->ctx.timestamp_end = tsc;
390 header->ctx.content_size =
391 (uint64_t) data_size * CHAR_BIT; /* in bits */
392 header->ctx.packet_size =
393 (uint64_t) PAGE_ALIGN(data_size) * CHAR_BIT; /* in bits */
394 records_lost += lib_ring_buffer_get_records_lost_full(&client_config, buf);
395 records_lost += lib_ring_buffer_get_records_lost_wrap(&client_config, buf);
396 records_lost += lib_ring_buffer_get_records_lost_big(&client_config, buf);
397 header->ctx.events_discarded = records_lost;
398 }
399
400 static int client_buffer_create(struct lttng_kernel_ring_buffer *buf, void *priv,
401 int cpu, const char *name)
402 {
403 return 0;
404 }
405
406 static void client_buffer_finalize(struct lttng_kernel_ring_buffer *buf, void *priv, int cpu)
407 {
408 }
409
410 static struct packet_header *client_packet_header(
411 const struct lttng_kernel_ring_buffer_config *config,
412 struct lttng_kernel_ring_buffer *buf)
413 {
414 return lib_ring_buffer_read_offset_address(&buf->backend, 0);
415 }
416
417 static int client_timestamp_begin(const struct lttng_kernel_ring_buffer_config *config,
418 struct lttng_kernel_ring_buffer *buf,
419 uint64_t *timestamp_begin)
420 {
421 struct packet_header *header = client_packet_header(config, buf);
422 *timestamp_begin = header->ctx.timestamp_begin;
423
424 return 0;
425 }
426
427 static int client_timestamp_end(const struct lttng_kernel_ring_buffer_config *config,
428 struct lttng_kernel_ring_buffer *buf,
429 uint64_t *timestamp_end)
430 {
431 struct packet_header *header = client_packet_header(config, buf);
432 *timestamp_end = header->ctx.timestamp_end;
433
434 return 0;
435 }
436
437 static int client_events_discarded(const struct lttng_kernel_ring_buffer_config *config,
438 struct lttng_kernel_ring_buffer *buf,
439 uint64_t *events_discarded)
440 {
441 struct packet_header *header = client_packet_header(config, buf);
442 *events_discarded = header->ctx.events_discarded;
443
444 return 0;
445 }
446
447 static int client_content_size(const struct lttng_kernel_ring_buffer_config *config,
448 struct lttng_kernel_ring_buffer *buf,
449 uint64_t *content_size)
450 {
451 struct packet_header *header = client_packet_header(config, buf);
452 *content_size = header->ctx.content_size;
453
454 return 0;
455 }
456
457 static int client_packet_size(const struct lttng_kernel_ring_buffer_config *config,
458 struct lttng_kernel_ring_buffer *buf,
459 uint64_t *packet_size)
460 {
461 struct packet_header *header = client_packet_header(config, buf);
462 *packet_size = header->ctx.packet_size;
463
464 return 0;
465 }
466
467 static int client_stream_id(const struct lttng_kernel_ring_buffer_config *config,
468 struct lttng_kernel_ring_buffer *buf,
469 uint64_t *stream_id)
470 {
471 struct lttng_kernel_ring_buffer_channel *chan = buf->backend.chan;
472 struct lttng_kernel_channel_buffer *lttng_chan = channel_get_private(chan);
473
474 *stream_id = lttng_chan->priv->id;
475 return 0;
476 }
477
478 static int client_current_timestamp(const struct lttng_kernel_ring_buffer_config *config,
479 struct lttng_kernel_ring_buffer *bufb,
480 uint64_t *ts)
481 {
482 *ts = config->cb.ring_buffer_clock_read(bufb->backend.chan);
483
484 return 0;
485 }
486
487 static int client_sequence_number(const struct lttng_kernel_ring_buffer_config *config,
488 struct lttng_kernel_ring_buffer *buf,
489 uint64_t *seq)
490 {
491 struct packet_header *header = client_packet_header(config, buf);
492
493 *seq = header->ctx.packet_seq_num;
494
495 return 0;
496 }
497
498 static
499 int client_instance_id(const struct lttng_kernel_ring_buffer_config *config,
500 struct lttng_kernel_ring_buffer *buf,
501 uint64_t *id)
502 {
503 *id = buf->backend.cpu;
504
505 return 0;
506 }
507
508 static const struct lttng_kernel_ring_buffer_config client_config = {
509 .cb.ring_buffer_clock_read = client_ring_buffer_clock_read,
510 .cb.record_header_size = client_record_header_size,
511 .cb.subbuffer_header_size = client_packet_header_size,
512 .cb.buffer_begin = client_buffer_begin,
513 .cb.buffer_end = client_buffer_end,
514 .cb.buffer_create = client_buffer_create,
515 .cb.buffer_finalize = client_buffer_finalize,
516
517 .tsc_bits = LTTNG_COMPACT_TSC_BITS,
518 .alloc = RING_BUFFER_ALLOC_PER_CPU,
519 .sync = RING_BUFFER_SYNC_PER_CPU,
520 .mode = RING_BUFFER_MODE_TEMPLATE,
521 .backend = RING_BUFFER_PAGE,
522 .output = RING_BUFFER_OUTPUT_TEMPLATE,
523 .oops = RING_BUFFER_OOPS_CONSISTENCY,
524 .ipi = RING_BUFFER_IPI_BARRIER,
525 .wakeup = RING_BUFFER_WAKEUP_BY_TIMER,
526 };
527
528 static
529 void release_priv_ops(void *priv_ops)
530 {
531 module_put(THIS_MODULE);
532 }
533
534 static
535 void lttng_channel_destroy(struct lttng_kernel_ring_buffer_channel *chan)
536 {
537 channel_destroy(chan);
538 }
539
540 static
541 struct lttng_kernel_ring_buffer_channel *_channel_create(const char *name,
542 void *priv, void *buf_addr,
543 size_t subbuf_size, size_t num_subbuf,
544 unsigned int switch_timer_interval,
545 unsigned int read_timer_interval)
546 {
547 struct lttng_kernel_channel_buffer *lttng_chan = priv;
548 struct lttng_kernel_ring_buffer_channel *chan;
549
550 chan = channel_create(&client_config, name, lttng_chan, buf_addr,
551 subbuf_size, num_subbuf, switch_timer_interval,
552 read_timer_interval);
553 if (chan) {
554 /*
555 * Ensure this module is not unloaded before we finish
556 * using lttng_relay_transport.ops.
557 */
558 if (!try_module_get(THIS_MODULE)) {
559 printk(KERN_WARNING "LTTng: Can't lock transport module.\n");
560 goto error;
561 }
562 chan->backend.priv_ops = &lttng_relay_transport.ops;
563 chan->backend.release_priv_ops = release_priv_ops;
564 }
565 return chan;
566
567 error:
568 lttng_channel_destroy(chan);
569 return NULL;
570 }
571
572 static
573 struct lttng_kernel_ring_buffer *lttng_buffer_read_open(struct lttng_kernel_ring_buffer_channel *chan)
574 {
575 struct lttng_kernel_ring_buffer *buf;
576 int cpu;
577
578 for_each_channel_cpu(cpu, chan) {
579 buf = channel_get_ring_buffer(&client_config, chan, cpu);
580 if (!lib_ring_buffer_open_read(buf))
581 return buf;
582 }
583 return NULL;
584 }
585
586 static
587 int lttng_buffer_has_read_closed_stream(struct lttng_kernel_ring_buffer_channel *chan)
588 {
589 struct lttng_kernel_ring_buffer *buf;
590 int cpu;
591
592 for_each_channel_cpu(cpu, chan) {
593 buf = channel_get_ring_buffer(&client_config, chan, cpu);
594 if (!atomic_long_read(&buf->active_readers))
595 return 1;
596 }
597 return 0;
598 }
599
600 static
601 void lttng_buffer_read_close(struct lttng_kernel_ring_buffer *buf)
602 {
603 lib_ring_buffer_release_read(buf);
604 }
605
606 static
607 int lttng_event_reserve(struct lttng_kernel_ring_buffer_ctx *ctx)
608 {
609 struct lttng_kernel_event_recorder *event_recorder = ctx->client_priv;
610 struct lttng_kernel_channel_buffer *lttng_chan = event_recorder->chan;
611 struct lttng_client_ctx client_ctx;
612 int ret, cpu;
613 uint32_t event_id;
614
615 cpu = lib_ring_buffer_get_cpu(&client_config);
616 if (unlikely(cpu < 0))
617 return -EPERM;
618 event_id = event_recorder->priv->id;
619 memset(&ctx->priv, 0, sizeof(ctx->priv));
620 ctx->priv.chan = lttng_chan->priv->rb_chan;
621 ctx->priv.reserve_cpu = cpu;
622
623 /* Compute internal size of context structures. */
624 ctx_get_struct_size(lttng_chan->priv->ctx, &client_ctx.packet_context_len, lttng_chan, ctx);
625
626 switch (lttng_chan->priv->header_type) {
627 case 1: /* compact */
628 if (event_id > 30)
629 ctx->priv.rflags |= LTTNG_RFLAG_EXTENDED;
630 break;
631 case 2: /* large */
632 if (event_id > 65534)
633 ctx->priv.rflags |= LTTNG_RFLAG_EXTENDED;
634 break;
635 default:
636 WARN_ON_ONCE(1);
637 }
638
639 ret = lib_ring_buffer_reserve(&client_config, ctx, &client_ctx);
640 if (unlikely(ret))
641 goto put;
642 lib_ring_buffer_backend_get_pages(&client_config, ctx,
643 &ctx->priv.backend_pages);
644 lttng_write_event_header(&client_config, ctx, event_id);
645 return 0;
646 put:
647 lib_ring_buffer_put_cpu(&client_config);
648 return ret;
649 }
650
651 static
652 void lttng_event_commit(struct lttng_kernel_ring_buffer_ctx *ctx)
653 {
654 lib_ring_buffer_commit(&client_config, ctx);
655 lib_ring_buffer_put_cpu(&client_config);
656 }
657
658 static
659 void lttng_event_write(struct lttng_kernel_ring_buffer_ctx *ctx, const void *src,
660 size_t len)
661 {
662 lib_ring_buffer_write(&client_config, ctx, src, len);
663 }
664
665 static
666 void lttng_event_write_from_user(struct lttng_kernel_ring_buffer_ctx *ctx,
667 const void __user *src, size_t len)
668 {
669 lib_ring_buffer_copy_from_user_inatomic(&client_config, ctx, src, len);
670 }
671
672 static
673 void lttng_event_memset(struct lttng_kernel_ring_buffer_ctx *ctx,
674 int c, size_t len)
675 {
676 lib_ring_buffer_memset(&client_config, ctx, c, len);
677 }
678
679 static
680 void lttng_event_strcpy(struct lttng_kernel_ring_buffer_ctx *ctx, const char *src,
681 size_t len)
682 {
683 lib_ring_buffer_strcpy(&client_config, ctx, src, len, '#');
684 }
685
686 static
687 void lttng_event_strcpy_from_user(struct lttng_kernel_ring_buffer_ctx *ctx,
688 const char __user *src, size_t len)
689 {
690 lib_ring_buffer_strcpy_from_user_inatomic(&client_config, ctx, src,
691 len, '#');
692 }
693
694 static
695 void lttng_channel_buffer_lost_event_too_big(struct lttng_kernel_channel_buffer *lttng_chan)
696 {
697 lib_ring_buffer_lost_event_too_big(lttng_chan->priv->rb_chan);
698 }
699
700 static
701 wait_queue_head_t *lttng_get_writer_buf_wait_queue(struct lttng_kernel_ring_buffer_channel *chan, int cpu)
702 {
703 struct lttng_kernel_ring_buffer *buf = channel_get_ring_buffer(&client_config,
704 chan, cpu);
705 return &buf->write_wait;
706 }
707
708 static
709 wait_queue_head_t *lttng_get_hp_wait_queue(struct lttng_kernel_ring_buffer_channel *chan)
710 {
711 return &chan->hp_wait;
712 }
713
714 static
715 int lttng_is_finalized(struct lttng_kernel_ring_buffer_channel *chan)
716 {
717 return lib_ring_buffer_channel_is_finalized(chan);
718 }
719
720 static
721 int lttng_is_disabled(struct lttng_kernel_ring_buffer_channel *chan)
722 {
723 return lib_ring_buffer_channel_is_disabled(chan);
724 }
725
726 static struct lttng_transport lttng_relay_transport = {
727 .name = "relay-" RING_BUFFER_MODE_TEMPLATE_STRING,
728 .owner = THIS_MODULE,
729 .ops = {
730 .priv = __LTTNG_COMPOUND_LITERAL(struct lttng_kernel_channel_buffer_ops_private, {
731 .pub = &lttng_relay_transport.ops,
732 .channel_create = _channel_create,
733 .channel_destroy = lttng_channel_destroy,
734 .buffer_read_open = lttng_buffer_read_open,
735 .buffer_has_read_closed_stream =
736 lttng_buffer_has_read_closed_stream,
737 .buffer_read_close = lttng_buffer_read_close,
738 .packet_avail_size = NULL, /* Would be racy anyway */
739 .get_writer_buf_wait_queue = lttng_get_writer_buf_wait_queue,
740 .get_hp_wait_queue = lttng_get_hp_wait_queue,
741 .is_finalized = lttng_is_finalized,
742 .is_disabled = lttng_is_disabled,
743 .timestamp_begin = client_timestamp_begin,
744 .timestamp_end = client_timestamp_end,
745 .events_discarded = client_events_discarded,
746 .content_size = client_content_size,
747 .packet_size = client_packet_size,
748 .stream_id = client_stream_id,
749 .current_timestamp = client_current_timestamp,
750 .sequence_number = client_sequence_number,
751 .instance_id = client_instance_id,
752 }),
753 .event_reserve = lttng_event_reserve,
754 .event_commit = lttng_event_commit,
755 .event_write = lttng_event_write,
756 .event_write_from_user = lttng_event_write_from_user,
757 .event_memset = lttng_event_memset,
758 .event_strcpy = lttng_event_strcpy,
759 .event_strcpy_from_user = lttng_event_strcpy_from_user,
760 .lost_event_too_big = lttng_channel_buffer_lost_event_too_big,
761 },
762 };
763
764 static int __init lttng_ring_buffer_client_init(void)
765 {
766 /*
767 * This vmalloc sync all also takes care of the lib ring buffer
768 * vmalloc'd module pages when it is built as a module into LTTng.
769 */
770 wrapper_vmalloc_sync_mappings();
771 lttng_transport_register(&lttng_relay_transport);
772 return 0;
773 }
774
775 module_init(lttng_ring_buffer_client_init);
776
777 static void __exit lttng_ring_buffer_client_exit(void)
778 {
779 lttng_transport_unregister(&lttng_relay_transport);
780 }
781
782 module_exit(lttng_ring_buffer_client_exit);
783
784 MODULE_LICENSE("GPL and additional rights");
785 MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
786 MODULE_DESCRIPTION("LTTng ring buffer " RING_BUFFER_MODE_TEMPLATE_STRING
787 " client");
788 MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
789 __stringify(LTTNG_MODULES_MINOR_VERSION) "."
790 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
791 LTTNG_MODULES_EXTRAVERSION);
This page took 0.043818 seconds and 3 git commands to generate.