9ac2cfe9d348d057e7b30199cb1072766c69bd8d
[lttng-modules.git] / src / lttng-ring-buffer-client.h
1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
2 *
3 * lttng-ring-buffer-client.h
4 *
5 * LTTng lib ring buffer client template.
6 *
7 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 */
9
10 #include <linux/module.h>
11 #include <linux/types.h>
12 #include <lttng/bitfield.h>
13 #include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_mappings() */
14 #include <wrapper/trace-clock.h>
15 #include <lttng/events.h>
16 #include <lttng/events-internal.h>
17 #include <lttng/tracer.h>
18 #include <ringbuffer/frontend_types.h>
19
20 #define LTTNG_COMPACT_EVENT_BITS 5
21 #define LTTNG_COMPACT_TSC_BITS 27
22
23 static struct lttng_transport lttng_relay_transport;
24
25 /*
26 * Keep the natural field alignment for _each field_ within this structure if
27 * you ever add/remove a field from this header. Packed attribute is not used
28 * because gcc generates poor code on at least powerpc and mips. Don't ever
29 * let gcc add padding between the structure elements.
30 *
31 * The guarantee we have with timestamps is that all the events in a
32 * packet are included (inclusive) within the begin/end timestamps of
33 * the packet. Another guarantee we have is that the "timestamp begin",
34 * as well as the event timestamps, are monotonically increasing (never
35 * decrease) when moving forward in a stream (physically). But this
36 * guarantee does not apply to "timestamp end", because it is sampled at
37 * commit time, which is not ordered with respect to space reservation.
38 */
39
40 struct packet_header {
41 /* Trace packet header */
42 uint32_t magic; /*
43 * Trace magic number.
44 * contains endianness information.
45 */
46 uint8_t uuid[16];
47 uint32_t stream_id;
48 uint64_t stream_instance_id;
49
50 struct {
51 /* Stream packet context */
52 uint64_t timestamp_begin; /* Cycle count at subbuffer start */
53 uint64_t timestamp_end; /* Cycle count at subbuffer end */
54 uint64_t content_size; /* Size of data in subbuffer */
55 uint64_t packet_size; /* Subbuffer size (include padding) */
56 uint64_t packet_seq_num; /* Packet sequence number */
57 unsigned long events_discarded; /*
58 * Events lost in this subbuffer since
59 * the beginning of the trace.
60 * (may overflow)
61 */
62 uint32_t cpu_id; /* CPU id associated with stream */
63 uint8_t header_end; /* End of header */
64 } ctx;
65 };
66
67 struct lttng_client_ctx {
68 size_t packet_context_len;
69 size_t event_context_len;
70 };
71
72 static inline notrace u64 lib_ring_buffer_clock_read(struct channel *chan)
73 {
74 return trace_clock_read64();
75 }
76
77 static inline
78 size_t ctx_get_aligned_size(size_t offset, struct lttng_kernel_ctx *ctx,
79 size_t ctx_len)
80 {
81 size_t orig_offset = offset;
82
83 if (likely(!ctx))
84 return 0;
85 offset += lib_ring_buffer_align(offset, ctx->largest_align);
86 offset += ctx_len;
87 return offset - orig_offset;
88 }
89
90 static inline
91 void ctx_get_struct_size(struct lttng_kernel_ctx *ctx, size_t *ctx_len,
92 struct lttng_channel *chan, struct lib_ring_buffer_ctx *bufctx)
93 {
94 int i;
95 size_t offset = 0;
96
97 if (likely(!ctx)) {
98 *ctx_len = 0;
99 return;
100 }
101 for (i = 0; i < ctx->nr_fields; i++) {
102 if (ctx->fields[i].get_size)
103 offset += ctx->fields[i].get_size(offset);
104 if (ctx->fields[i].get_size_arg)
105 offset += ctx->fields[i].get_size_arg(offset,
106 &ctx->fields[i], bufctx, chan);
107 }
108 *ctx_len = offset;
109 }
110
111 static inline
112 void ctx_record(struct lib_ring_buffer_ctx *bufctx,
113 struct lttng_channel *chan,
114 struct lttng_kernel_ctx *ctx)
115 {
116 int i;
117
118 if (likely(!ctx))
119 return;
120 lib_ring_buffer_align_ctx(bufctx, ctx->largest_align);
121 for (i = 0; i < ctx->nr_fields; i++)
122 ctx->fields[i].record(&ctx->fields[i], bufctx, chan);
123 }
124
125 /*
126 * record_header_size - Calculate the header size and padding necessary.
127 * @config: ring buffer instance configuration
128 * @chan: channel
129 * @offset: offset in the write buffer
130 * @pre_header_padding: padding to add before the header (output)
131 * @ctx: reservation context
132 *
133 * Returns the event header size (including padding).
134 *
135 * The payload must itself determine its own alignment from the biggest type it
136 * contains.
137 */
138 static __inline__
139 size_t record_header_size(const struct lib_ring_buffer_config *config,
140 struct channel *chan, size_t offset,
141 size_t *pre_header_padding,
142 struct lib_ring_buffer_ctx *ctx,
143 struct lttng_client_ctx *client_ctx)
144 {
145 struct lttng_channel *lttng_chan = channel_get_private(chan);
146 size_t orig_offset = offset;
147 size_t padding;
148
149 switch (lttng_chan->header_type) {
150 case 1: /* compact */
151 padding = lib_ring_buffer_align(offset, lttng_alignof(uint32_t));
152 offset += padding;
153 if (!(ctx->priv.rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
154 offset += sizeof(uint32_t); /* id and timestamp */
155 } else {
156 /* Minimum space taken by LTTNG_COMPACT_EVENT_BITS id */
157 offset += (LTTNG_COMPACT_EVENT_BITS + CHAR_BIT - 1) / CHAR_BIT;
158 /* Align extended struct on largest member */
159 offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
160 offset += sizeof(uint32_t); /* id */
161 offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
162 offset += sizeof(uint64_t); /* timestamp */
163 }
164 break;
165 case 2: /* large */
166 padding = lib_ring_buffer_align(offset, lttng_alignof(uint16_t));
167 offset += padding;
168 offset += sizeof(uint16_t);
169 if (!(ctx->priv.rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
170 offset += lib_ring_buffer_align(offset, lttng_alignof(uint32_t));
171 offset += sizeof(uint32_t); /* timestamp */
172 } else {
173 /* Align extended struct on largest member */
174 offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
175 offset += sizeof(uint32_t); /* id */
176 offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
177 offset += sizeof(uint64_t); /* timestamp */
178 }
179 break;
180 default:
181 padding = 0;
182 WARN_ON_ONCE(1);
183 }
184 offset += ctx_get_aligned_size(offset, lttng_chan->ctx,
185 client_ctx->packet_context_len);
186 *pre_header_padding = padding;
187 return offset - orig_offset;
188 }
189
190 #include <ringbuffer/api.h>
191
192 static
193 void lttng_write_event_header_slow(const struct lib_ring_buffer_config *config,
194 struct lib_ring_buffer_ctx *ctx,
195 uint32_t event_id);
196
197 /*
198 * lttng_write_event_header
199 *
200 * Writes the event header to the offset (already aligned on 32-bits).
201 *
202 * @config: ring buffer instance configuration
203 * @ctx: reservation context
204 * @event_id: event ID
205 */
206 static __inline__
207 void lttng_write_event_header(const struct lib_ring_buffer_config *config,
208 struct lib_ring_buffer_ctx *ctx,
209 uint32_t event_id)
210 {
211 struct lttng_channel *lttng_chan = channel_get_private(ctx->priv.chan);
212
213 if (unlikely(ctx->priv.rflags))
214 goto slow_path;
215
216 switch (lttng_chan->header_type) {
217 case 1: /* compact */
218 {
219 uint32_t id_time = 0;
220
221 bt_bitfield_write(&id_time, uint32_t,
222 0,
223 LTTNG_COMPACT_EVENT_BITS,
224 event_id);
225 bt_bitfield_write(&id_time, uint32_t,
226 LTTNG_COMPACT_EVENT_BITS,
227 LTTNG_COMPACT_TSC_BITS,
228 ctx->priv.tsc);
229 lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
230 break;
231 }
232 case 2: /* large */
233 {
234 uint32_t timestamp = (uint32_t) ctx->priv.tsc;
235 uint16_t id = event_id;
236
237 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
238 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint32_t));
239 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
240 break;
241 }
242 default:
243 WARN_ON_ONCE(1);
244 }
245
246 ctx_record(ctx, lttng_chan, lttng_chan->ctx);
247 lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
248
249 return;
250
251 slow_path:
252 lttng_write_event_header_slow(config, ctx, event_id);
253 }
254
255 static
256 void lttng_write_event_header_slow(const struct lib_ring_buffer_config *config,
257 struct lib_ring_buffer_ctx *ctx,
258 uint32_t event_id)
259 {
260 struct lttng_channel *lttng_chan = channel_get_private(ctx->priv.chan);
261
262 switch (lttng_chan->header_type) {
263 case 1: /* compact */
264 if (!(ctx->priv.rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
265 uint32_t id_time = 0;
266
267 bt_bitfield_write(&id_time, uint32_t,
268 0,
269 LTTNG_COMPACT_EVENT_BITS,
270 event_id);
271 bt_bitfield_write(&id_time, uint32_t,
272 LTTNG_COMPACT_EVENT_BITS,
273 LTTNG_COMPACT_TSC_BITS, ctx->priv.tsc);
274 lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
275 } else {
276 uint8_t id = 0;
277 uint64_t timestamp = ctx->priv.tsc;
278
279 bt_bitfield_write(&id, uint8_t,
280 0,
281 LTTNG_COMPACT_EVENT_BITS,
282 31);
283 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
284 /* Align extended struct on largest member */
285 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
286 lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
287 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
288 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
289 }
290 break;
291 case 2: /* large */
292 {
293 if (!(ctx->priv.rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
294 uint32_t timestamp = (uint32_t) ctx->priv.tsc;
295 uint16_t id = event_id;
296
297 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
298 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint32_t));
299 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
300 } else {
301 uint16_t id = 65535;
302 uint64_t timestamp = ctx->priv.tsc;
303
304 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
305 /* Align extended struct on largest member */
306 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
307 lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
308 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
309 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
310 }
311 break;
312 }
313 default:
314 WARN_ON_ONCE(1);
315 }
316 ctx_record(ctx, lttng_chan, lttng_chan->ctx);
317 lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
318 }
319
320 static const struct lib_ring_buffer_config client_config;
321
322 static u64 client_ring_buffer_clock_read(struct channel *chan)
323 {
324 return lib_ring_buffer_clock_read(chan);
325 }
326
327 static
328 size_t client_record_header_size(const struct lib_ring_buffer_config *config,
329 struct channel *chan, size_t offset,
330 size_t *pre_header_padding,
331 struct lib_ring_buffer_ctx *ctx,
332 void *client_ctx)
333 {
334 return record_header_size(config, chan, offset,
335 pre_header_padding, ctx, client_ctx);
336 }
337
338 /**
339 * client_packet_header_size - called on buffer-switch to a new sub-buffer
340 *
341 * Return header size without padding after the structure. Don't use packed
342 * structure because gcc generates inefficient code on some architectures
343 * (powerpc, mips..)
344 */
345 static size_t client_packet_header_size(void)
346 {
347 return offsetof(struct packet_header, ctx.header_end);
348 }
349
350 static void client_buffer_begin(struct lib_ring_buffer *buf, u64 tsc,
351 unsigned int subbuf_idx)
352 {
353 struct channel *chan = buf->backend.chan;
354 struct packet_header *header =
355 (struct packet_header *)
356 lib_ring_buffer_offset_address(&buf->backend,
357 subbuf_idx * chan->backend.subbuf_size);
358 struct lttng_channel *lttng_chan = channel_get_private(chan);
359 struct lttng_session *session = lttng_chan->session;
360
361 header->magic = CTF_MAGIC_NUMBER;
362 memcpy(header->uuid, session->uuid.b, sizeof(session->uuid));
363 header->stream_id = lttng_chan->id;
364 header->stream_instance_id = buf->backend.cpu;
365 header->ctx.timestamp_begin = tsc;
366 header->ctx.timestamp_end = 0;
367 header->ctx.content_size = ~0ULL; /* for debugging */
368 header->ctx.packet_size = ~0ULL;
369 header->ctx.packet_seq_num = chan->backend.num_subbuf * \
370 buf->backend.buf_cnt[subbuf_idx].seq_cnt + \
371 subbuf_idx;
372 header->ctx.events_discarded = 0;
373 header->ctx.cpu_id = buf->backend.cpu;
374 }
375
376 /*
377 * offset is assumed to never be 0 here : never deliver a completely empty
378 * subbuffer. data_size is between 1 and subbuf_size.
379 */
380 static void client_buffer_end(struct lib_ring_buffer *buf, u64 tsc,
381 unsigned int subbuf_idx, unsigned long data_size)
382 {
383 struct channel *chan = buf->backend.chan;
384 struct packet_header *header =
385 (struct packet_header *)
386 lib_ring_buffer_offset_address(&buf->backend,
387 subbuf_idx * chan->backend.subbuf_size);
388 unsigned long records_lost = 0;
389
390 header->ctx.timestamp_end = tsc;
391 header->ctx.content_size =
392 (uint64_t) data_size * CHAR_BIT; /* in bits */
393 header->ctx.packet_size =
394 (uint64_t) PAGE_ALIGN(data_size) * CHAR_BIT; /* in bits */
395 records_lost += lib_ring_buffer_get_records_lost_full(&client_config, buf);
396 records_lost += lib_ring_buffer_get_records_lost_wrap(&client_config, buf);
397 records_lost += lib_ring_buffer_get_records_lost_big(&client_config, buf);
398 header->ctx.events_discarded = records_lost;
399 }
400
401 static int client_buffer_create(struct lib_ring_buffer *buf, void *priv,
402 int cpu, const char *name)
403 {
404 return 0;
405 }
406
407 static void client_buffer_finalize(struct lib_ring_buffer *buf, void *priv, int cpu)
408 {
409 }
410
411 static struct packet_header *client_packet_header(
412 const struct lib_ring_buffer_config *config,
413 struct lib_ring_buffer *buf)
414 {
415 return lib_ring_buffer_read_offset_address(&buf->backend, 0);
416 }
417
418 static int client_timestamp_begin(const struct lib_ring_buffer_config *config,
419 struct lib_ring_buffer *buf,
420 uint64_t *timestamp_begin)
421 {
422 struct packet_header *header = client_packet_header(config, buf);
423 *timestamp_begin = header->ctx.timestamp_begin;
424
425 return 0;
426 }
427
428 static int client_timestamp_end(const struct lib_ring_buffer_config *config,
429 struct lib_ring_buffer *buf,
430 uint64_t *timestamp_end)
431 {
432 struct packet_header *header = client_packet_header(config, buf);
433 *timestamp_end = header->ctx.timestamp_end;
434
435 return 0;
436 }
437
438 static int client_events_discarded(const struct lib_ring_buffer_config *config,
439 struct lib_ring_buffer *buf,
440 uint64_t *events_discarded)
441 {
442 struct packet_header *header = client_packet_header(config, buf);
443 *events_discarded = header->ctx.events_discarded;
444
445 return 0;
446 }
447
448 static int client_content_size(const struct lib_ring_buffer_config *config,
449 struct lib_ring_buffer *buf,
450 uint64_t *content_size)
451 {
452 struct packet_header *header = client_packet_header(config, buf);
453 *content_size = header->ctx.content_size;
454
455 return 0;
456 }
457
458 static int client_packet_size(const struct lib_ring_buffer_config *config,
459 struct lib_ring_buffer *buf,
460 uint64_t *packet_size)
461 {
462 struct packet_header *header = client_packet_header(config, buf);
463 *packet_size = header->ctx.packet_size;
464
465 return 0;
466 }
467
468 static int client_stream_id(const struct lib_ring_buffer_config *config,
469 struct lib_ring_buffer *buf,
470 uint64_t *stream_id)
471 {
472 struct channel *chan = buf->backend.chan;
473 struct lttng_channel *lttng_chan = channel_get_private(chan);
474
475 *stream_id = lttng_chan->id;
476 return 0;
477 }
478
479 static int client_current_timestamp(const struct lib_ring_buffer_config *config,
480 struct lib_ring_buffer *bufb,
481 uint64_t *ts)
482 {
483 *ts = config->cb.ring_buffer_clock_read(bufb->backend.chan);
484
485 return 0;
486 }
487
488 static int client_sequence_number(const struct lib_ring_buffer_config *config,
489 struct lib_ring_buffer *buf,
490 uint64_t *seq)
491 {
492 struct packet_header *header = client_packet_header(config, buf);
493
494 *seq = header->ctx.packet_seq_num;
495
496 return 0;
497 }
498
499 static
500 int client_instance_id(const struct lib_ring_buffer_config *config,
501 struct lib_ring_buffer *buf,
502 uint64_t *id)
503 {
504 *id = buf->backend.cpu;
505
506 return 0;
507 }
508
509 static const struct lib_ring_buffer_config client_config = {
510 .cb.ring_buffer_clock_read = client_ring_buffer_clock_read,
511 .cb.record_header_size = client_record_header_size,
512 .cb.subbuffer_header_size = client_packet_header_size,
513 .cb.buffer_begin = client_buffer_begin,
514 .cb.buffer_end = client_buffer_end,
515 .cb.buffer_create = client_buffer_create,
516 .cb.buffer_finalize = client_buffer_finalize,
517
518 .tsc_bits = LTTNG_COMPACT_TSC_BITS,
519 .alloc = RING_BUFFER_ALLOC_PER_CPU,
520 .sync = RING_BUFFER_SYNC_PER_CPU,
521 .mode = RING_BUFFER_MODE_TEMPLATE,
522 .backend = RING_BUFFER_PAGE,
523 .output = RING_BUFFER_OUTPUT_TEMPLATE,
524 .oops = RING_BUFFER_OOPS_CONSISTENCY,
525 .ipi = RING_BUFFER_IPI_BARRIER,
526 .wakeup = RING_BUFFER_WAKEUP_BY_TIMER,
527 };
528
529 static
530 void release_priv_ops(void *priv_ops)
531 {
532 module_put(THIS_MODULE);
533 }
534
535 static
536 void lttng_channel_destroy(struct channel *chan)
537 {
538 channel_destroy(chan);
539 }
540
541 static
542 struct channel *_channel_create(const char *name,
543 void *priv, void *buf_addr,
544 size_t subbuf_size, size_t num_subbuf,
545 unsigned int switch_timer_interval,
546 unsigned int read_timer_interval)
547 {
548 struct lttng_channel *lttng_chan = priv;
549 struct channel *chan;
550
551 chan = channel_create(&client_config, name, lttng_chan, buf_addr,
552 subbuf_size, num_subbuf, switch_timer_interval,
553 read_timer_interval);
554 if (chan) {
555 /*
556 * Ensure this module is not unloaded before we finish
557 * using lttng_relay_transport.ops.
558 */
559 if (!try_module_get(THIS_MODULE)) {
560 printk(KERN_WARNING "LTTng: Can't lock transport module.\n");
561 goto error;
562 }
563 chan->backend.priv_ops = &lttng_relay_transport.ops;
564 chan->backend.release_priv_ops = release_priv_ops;
565 }
566 return chan;
567
568 error:
569 lttng_channel_destroy(chan);
570 return NULL;
571 }
572
573 static
574 struct lib_ring_buffer *lttng_buffer_read_open(struct channel *chan)
575 {
576 struct lib_ring_buffer *buf;
577 int cpu;
578
579 for_each_channel_cpu(cpu, chan) {
580 buf = channel_get_ring_buffer(&client_config, chan, cpu);
581 if (!lib_ring_buffer_open_read(buf))
582 return buf;
583 }
584 return NULL;
585 }
586
587 static
588 int lttng_buffer_has_read_closed_stream(struct channel *chan)
589 {
590 struct lib_ring_buffer *buf;
591 int cpu;
592
593 for_each_channel_cpu(cpu, chan) {
594 buf = channel_get_ring_buffer(&client_config, chan, cpu);
595 if (!atomic_long_read(&buf->active_readers))
596 return 1;
597 }
598 return 0;
599 }
600
601 static
602 void lttng_buffer_read_close(struct lib_ring_buffer *buf)
603 {
604 lib_ring_buffer_release_read(buf);
605 }
606
607 static
608 int lttng_event_reserve(struct lib_ring_buffer_ctx *ctx)
609 {
610 struct lttng_kernel_event_recorder *event_recorder = ctx->client_priv;
611 struct lttng_channel *lttng_chan = event_recorder->chan;
612 struct lttng_client_ctx client_ctx;
613 int ret, cpu;
614 uint32_t event_id;
615
616 cpu = lib_ring_buffer_get_cpu(&client_config);
617 if (unlikely(cpu < 0))
618 return -EPERM;
619 event_id = event_recorder->priv->id;
620 memset(&ctx->priv, 0, sizeof(ctx->priv));
621 ctx->priv.chan = lttng_chan->chan;
622 ctx->priv.reserve_cpu = cpu;
623
624 /* Compute internal size of context structures. */
625 ctx_get_struct_size(lttng_chan->ctx, &client_ctx.packet_context_len, lttng_chan, ctx);
626
627 switch (lttng_chan->header_type) {
628 case 1: /* compact */
629 if (event_id > 30)
630 ctx->priv.rflags |= LTTNG_RFLAG_EXTENDED;
631 break;
632 case 2: /* large */
633 if (event_id > 65534)
634 ctx->priv.rflags |= LTTNG_RFLAG_EXTENDED;
635 break;
636 default:
637 WARN_ON_ONCE(1);
638 }
639
640 ret = lib_ring_buffer_reserve(&client_config, ctx, &client_ctx);
641 if (unlikely(ret))
642 goto put;
643 lib_ring_buffer_backend_get_pages(&client_config, ctx,
644 &ctx->priv.backend_pages);
645 lttng_write_event_header(&client_config, ctx, event_id);
646 return 0;
647 put:
648 lib_ring_buffer_put_cpu(&client_config);
649 return ret;
650 }
651
652 static
653 void lttng_event_commit(struct lib_ring_buffer_ctx *ctx)
654 {
655 lib_ring_buffer_commit(&client_config, ctx);
656 lib_ring_buffer_put_cpu(&client_config);
657 }
658
659 static
660 void lttng_event_write(struct lib_ring_buffer_ctx *ctx, const void *src,
661 size_t len)
662 {
663 lib_ring_buffer_write(&client_config, ctx, src, len);
664 }
665
666 static
667 void lttng_event_write_from_user(struct lib_ring_buffer_ctx *ctx,
668 const void __user *src, size_t len)
669 {
670 lib_ring_buffer_copy_from_user_inatomic(&client_config, ctx, src, len);
671 }
672
673 static
674 void lttng_event_memset(struct lib_ring_buffer_ctx *ctx,
675 int c, size_t len)
676 {
677 lib_ring_buffer_memset(&client_config, ctx, c, len);
678 }
679
680 static
681 void lttng_event_strcpy(struct lib_ring_buffer_ctx *ctx, const char *src,
682 size_t len)
683 {
684 lib_ring_buffer_strcpy(&client_config, ctx, src, len, '#');
685 }
686
687 static
688 void lttng_event_strcpy_from_user(struct lib_ring_buffer_ctx *ctx,
689 const char __user *src, size_t len)
690 {
691 lib_ring_buffer_strcpy_from_user_inatomic(&client_config, ctx, src,
692 len, '#');
693 }
694
695 static
696 wait_queue_head_t *lttng_get_writer_buf_wait_queue(struct channel *chan, int cpu)
697 {
698 struct lib_ring_buffer *buf = channel_get_ring_buffer(&client_config,
699 chan, cpu);
700 return &buf->write_wait;
701 }
702
703 static
704 wait_queue_head_t *lttng_get_hp_wait_queue(struct channel *chan)
705 {
706 return &chan->hp_wait;
707 }
708
709 static
710 int lttng_is_finalized(struct channel *chan)
711 {
712 return lib_ring_buffer_channel_is_finalized(chan);
713 }
714
715 static
716 int lttng_is_disabled(struct channel *chan)
717 {
718 return lib_ring_buffer_channel_is_disabled(chan);
719 }
720
721 static struct lttng_transport lttng_relay_transport = {
722 .name = "relay-" RING_BUFFER_MODE_TEMPLATE_STRING,
723 .owner = THIS_MODULE,
724 .ops = {
725 .channel_create = _channel_create,
726 .channel_destroy = lttng_channel_destroy,
727 .buffer_read_open = lttng_buffer_read_open,
728 .buffer_has_read_closed_stream =
729 lttng_buffer_has_read_closed_stream,
730 .buffer_read_close = lttng_buffer_read_close,
731 .event_reserve = lttng_event_reserve,
732 .event_commit = lttng_event_commit,
733 .event_write = lttng_event_write,
734 .event_write_from_user = lttng_event_write_from_user,
735 .event_memset = lttng_event_memset,
736 .event_strcpy = lttng_event_strcpy,
737 .event_strcpy_from_user = lttng_event_strcpy_from_user,
738 .packet_avail_size = NULL, /* Would be racy anyway */
739 .get_writer_buf_wait_queue = lttng_get_writer_buf_wait_queue,
740 .get_hp_wait_queue = lttng_get_hp_wait_queue,
741 .is_finalized = lttng_is_finalized,
742 .is_disabled = lttng_is_disabled,
743 .timestamp_begin = client_timestamp_begin,
744 .timestamp_end = client_timestamp_end,
745 .events_discarded = client_events_discarded,
746 .content_size = client_content_size,
747 .packet_size = client_packet_size,
748 .stream_id = client_stream_id,
749 .current_timestamp = client_current_timestamp,
750 .sequence_number = client_sequence_number,
751 .instance_id = client_instance_id,
752 },
753 };
754
755 static int __init lttng_ring_buffer_client_init(void)
756 {
757 /*
758 * This vmalloc sync all also takes care of the lib ring buffer
759 * vmalloc'd module pages when it is built as a module into LTTng.
760 */
761 wrapper_vmalloc_sync_mappings();
762 lttng_transport_register(&lttng_relay_transport);
763 return 0;
764 }
765
766 module_init(lttng_ring_buffer_client_init);
767
768 static void __exit lttng_ring_buffer_client_exit(void)
769 {
770 lttng_transport_unregister(&lttng_relay_transport);
771 }
772
773 module_exit(lttng_ring_buffer_client_exit);
774
775 MODULE_LICENSE("GPL and additional rights");
776 MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
777 MODULE_DESCRIPTION("LTTng ring buffer " RING_BUFFER_MODE_TEMPLATE_STRING
778 " client");
779 MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
780 __stringify(LTTNG_MODULES_MINOR_VERSION) "."
781 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
782 LTTNG_MODULES_EXTRAVERSION);
This page took 0.043645 seconds and 3 git commands to generate.