Fix: update writeback instrumentation for kernel 4.14
[lttng-modules.git] / lttng-ring-buffer-client.h
1 /*
2 * lttng-ring-buffer-client.h
3 *
4 * LTTng lib ring buffer client template.
5 *
6 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; only
11 * version 2.1 of the License.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #include <linux/module.h>
24 #include <linux/types.h>
25 #include <lib/bitfield.h>
26 #include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_all() */
27 #include <wrapper/trace-clock.h>
28 #include <lttng-events.h>
29 #include <lttng-tracer.h>
30 #include <wrapper/ringbuffer/frontend_types.h>
31
32 #define LTTNG_COMPACT_EVENT_BITS 5
33 #define LTTNG_COMPACT_TSC_BITS 27
34
35 static struct lttng_transport lttng_relay_transport;
36
37 /*
38 * Keep the natural field alignment for _each field_ within this structure if
39 * you ever add/remove a field from this header. Packed attribute is not used
40 * because gcc generates poor code on at least powerpc and mips. Don't ever
41 * let gcc add padding between the structure elements.
42 *
43 * The guarantee we have with timestamps is that all the events in a
44 * packet are included (inclusive) within the begin/end timestamps of
45 * the packet. Another guarantee we have is that the "timestamp begin",
46 * as well as the event timestamps, are monotonically increasing (never
47 * decrease) when moving forward in a stream (physically). But this
48 * guarantee does not apply to "timestamp end", because it is sampled at
49 * commit time, which is not ordered with respect to space reservation.
50 */
51
52 struct packet_header {
53 /* Trace packet header */
54 uint32_t magic; /*
55 * Trace magic number.
56 * contains endianness information.
57 */
58 uint8_t uuid[16];
59 uint32_t stream_id;
60 uint64_t stream_instance_id;
61
62 struct {
63 /* Stream packet context */
64 uint64_t timestamp_begin; /* Cycle count at subbuffer start */
65 uint64_t timestamp_end; /* Cycle count at subbuffer end */
66 uint64_t content_size; /* Size of data in subbuffer */
67 uint64_t packet_size; /* Subbuffer size (include padding) */
68 uint64_t packet_seq_num; /* Packet sequence number */
69 unsigned long events_discarded; /*
70 * Events lost in this subbuffer since
71 * the beginning of the trace.
72 * (may overflow)
73 */
74 uint32_t cpu_id; /* CPU id associated with stream */
75 uint8_t header_end; /* End of header */
76 } ctx;
77 };
78
79 struct lttng_client_ctx {
80 size_t packet_context_len;
81 size_t event_context_len;
82 };
83
84 static inline notrace u64 lib_ring_buffer_clock_read(struct channel *chan)
85 {
86 return trace_clock_read64();
87 }
88
89 static inline
90 size_t ctx_get_aligned_size(size_t offset, struct lttng_ctx *ctx,
91 size_t ctx_len)
92 {
93 size_t orig_offset = offset;
94
95 if (likely(!ctx))
96 return 0;
97 offset += lib_ring_buffer_align(offset, ctx->largest_align);
98 offset += ctx_len;
99 return offset - orig_offset;
100 }
101
102 static inline
103 void ctx_get_struct_size(struct lttng_ctx *ctx, size_t *ctx_len)
104 {
105 int i;
106 size_t offset = 0;
107
108 if (likely(!ctx)) {
109 *ctx_len = 0;
110 return;
111 }
112 for (i = 0; i < ctx->nr_fields; i++)
113 offset += ctx->fields[i].get_size(offset);
114 *ctx_len = offset;
115 }
116
117 static inline
118 void ctx_record(struct lib_ring_buffer_ctx *bufctx,
119 struct lttng_channel *chan,
120 struct lttng_ctx *ctx)
121 {
122 int i;
123
124 if (likely(!ctx))
125 return;
126 lib_ring_buffer_align_ctx(bufctx, ctx->largest_align);
127 for (i = 0; i < ctx->nr_fields; i++)
128 ctx->fields[i].record(&ctx->fields[i], bufctx, chan);
129 }
130
131 /*
132 * record_header_size - Calculate the header size and padding necessary.
133 * @config: ring buffer instance configuration
134 * @chan: channel
135 * @offset: offset in the write buffer
136 * @pre_header_padding: padding to add before the header (output)
137 * @ctx: reservation context
138 *
139 * Returns the event header size (including padding).
140 *
141 * The payload must itself determine its own alignment from the biggest type it
142 * contains.
143 */
144 static __inline__
145 size_t record_header_size(const struct lib_ring_buffer_config *config,
146 struct channel *chan, size_t offset,
147 size_t *pre_header_padding,
148 struct lib_ring_buffer_ctx *ctx,
149 struct lttng_client_ctx *client_ctx)
150 {
151 struct lttng_channel *lttng_chan = channel_get_private(chan);
152 struct lttng_probe_ctx *lttng_probe_ctx = ctx->priv;
153 struct lttng_event *event = lttng_probe_ctx->event;
154 size_t orig_offset = offset;
155 size_t padding;
156
157 switch (lttng_chan->header_type) {
158 case 1: /* compact */
159 padding = lib_ring_buffer_align(offset, lttng_alignof(uint32_t));
160 offset += padding;
161 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
162 offset += sizeof(uint32_t); /* id and timestamp */
163 } else {
164 /* Minimum space taken by LTTNG_COMPACT_EVENT_BITS id */
165 offset += (LTTNG_COMPACT_EVENT_BITS + CHAR_BIT - 1) / CHAR_BIT;
166 /* Align extended struct on largest member */
167 offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
168 offset += sizeof(uint32_t); /* id */
169 offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
170 offset += sizeof(uint64_t); /* timestamp */
171 }
172 break;
173 case 2: /* large */
174 padding = lib_ring_buffer_align(offset, lttng_alignof(uint16_t));
175 offset += padding;
176 offset += sizeof(uint16_t);
177 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
178 offset += lib_ring_buffer_align(offset, lttng_alignof(uint32_t));
179 offset += sizeof(uint32_t); /* timestamp */
180 } else {
181 /* Align extended struct on largest member */
182 offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
183 offset += sizeof(uint32_t); /* id */
184 offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
185 offset += sizeof(uint64_t); /* timestamp */
186 }
187 break;
188 default:
189 padding = 0;
190 WARN_ON_ONCE(1);
191 }
192 offset += ctx_get_aligned_size(offset, lttng_chan->ctx,
193 client_ctx->packet_context_len);
194 offset += ctx_get_aligned_size(offset, event->ctx,
195 client_ctx->event_context_len);
196
197 *pre_header_padding = padding;
198 return offset - orig_offset;
199 }
200
201 #include <wrapper/ringbuffer/api.h>
202
203 static
204 void lttng_write_event_header_slow(const struct lib_ring_buffer_config *config,
205 struct lib_ring_buffer_ctx *ctx,
206 uint32_t event_id);
207
208 /*
209 * lttng_write_event_header
210 *
211 * Writes the event header to the offset (already aligned on 32-bits).
212 *
213 * @config: ring buffer instance configuration
214 * @ctx: reservation context
215 * @event_id: event ID
216 */
217 static __inline__
218 void lttng_write_event_header(const struct lib_ring_buffer_config *config,
219 struct lib_ring_buffer_ctx *ctx,
220 uint32_t event_id)
221 {
222 struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
223 struct lttng_probe_ctx *lttng_probe_ctx = ctx->priv;
224 struct lttng_event *event = lttng_probe_ctx->event;
225
226 if (unlikely(ctx->rflags))
227 goto slow_path;
228
229 switch (lttng_chan->header_type) {
230 case 1: /* compact */
231 {
232 uint32_t id_time = 0;
233
234 bt_bitfield_write(&id_time, uint32_t,
235 0,
236 LTTNG_COMPACT_EVENT_BITS,
237 event_id);
238 bt_bitfield_write(&id_time, uint32_t,
239 LTTNG_COMPACT_EVENT_BITS,
240 LTTNG_COMPACT_TSC_BITS,
241 ctx->tsc);
242 lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
243 break;
244 }
245 case 2: /* large */
246 {
247 uint32_t timestamp = (uint32_t) ctx->tsc;
248 uint16_t id = event_id;
249
250 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
251 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint32_t));
252 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
253 break;
254 }
255 default:
256 WARN_ON_ONCE(1);
257 }
258
259 ctx_record(ctx, lttng_chan, lttng_chan->ctx);
260 ctx_record(ctx, lttng_chan, event->ctx);
261 lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
262
263 return;
264
265 slow_path:
266 lttng_write_event_header_slow(config, ctx, event_id);
267 }
268
269 static
270 void lttng_write_event_header_slow(const struct lib_ring_buffer_config *config,
271 struct lib_ring_buffer_ctx *ctx,
272 uint32_t event_id)
273 {
274 struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
275 struct lttng_probe_ctx *lttng_probe_ctx = ctx->priv;
276 struct lttng_event *event = lttng_probe_ctx->event;
277
278 switch (lttng_chan->header_type) {
279 case 1: /* compact */
280 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
281 uint32_t id_time = 0;
282
283 bt_bitfield_write(&id_time, uint32_t,
284 0,
285 LTTNG_COMPACT_EVENT_BITS,
286 event_id);
287 bt_bitfield_write(&id_time, uint32_t,
288 LTTNG_COMPACT_EVENT_BITS,
289 LTTNG_COMPACT_TSC_BITS, ctx->tsc);
290 lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
291 } else {
292 uint8_t id = 0;
293 uint64_t timestamp = ctx->tsc;
294
295 bt_bitfield_write(&id, uint8_t,
296 0,
297 LTTNG_COMPACT_EVENT_BITS,
298 31);
299 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
300 /* Align extended struct on largest member */
301 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
302 lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
303 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
304 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
305 }
306 break;
307 case 2: /* large */
308 {
309 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
310 uint32_t timestamp = (uint32_t) ctx->tsc;
311 uint16_t id = event_id;
312
313 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
314 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint32_t));
315 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
316 } else {
317 uint16_t id = 65535;
318 uint64_t timestamp = ctx->tsc;
319
320 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
321 /* Align extended struct on largest member */
322 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
323 lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
324 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
325 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
326 }
327 break;
328 }
329 default:
330 WARN_ON_ONCE(1);
331 }
332 ctx_record(ctx, lttng_chan, lttng_chan->ctx);
333 ctx_record(ctx, lttng_chan, event->ctx);
334 lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
335 }
336
337 static const struct lib_ring_buffer_config client_config;
338
339 static u64 client_ring_buffer_clock_read(struct channel *chan)
340 {
341 return lib_ring_buffer_clock_read(chan);
342 }
343
344 static
345 size_t client_record_header_size(const struct lib_ring_buffer_config *config,
346 struct channel *chan, size_t offset,
347 size_t *pre_header_padding,
348 struct lib_ring_buffer_ctx *ctx,
349 void *client_ctx)
350 {
351 return record_header_size(config, chan, offset,
352 pre_header_padding, ctx, client_ctx);
353 }
354
355 /**
356 * client_packet_header_size - called on buffer-switch to a new sub-buffer
357 *
358 * Return header size without padding after the structure. Don't use packed
359 * structure because gcc generates inefficient code on some architectures
360 * (powerpc, mips..)
361 */
362 static size_t client_packet_header_size(void)
363 {
364 return offsetof(struct packet_header, ctx.header_end);
365 }
366
367 static void client_buffer_begin(struct lib_ring_buffer *buf, u64 tsc,
368 unsigned int subbuf_idx)
369 {
370 struct channel *chan = buf->backend.chan;
371 struct packet_header *header =
372 (struct packet_header *)
373 lib_ring_buffer_offset_address(&buf->backend,
374 subbuf_idx * chan->backend.subbuf_size);
375 struct lttng_channel *lttng_chan = channel_get_private(chan);
376 struct lttng_session *session = lttng_chan->session;
377
378 header->magic = CTF_MAGIC_NUMBER;
379 memcpy(header->uuid, session->uuid.b, sizeof(session->uuid));
380 header->stream_id = lttng_chan->id;
381 header->stream_instance_id = buf->backend.cpu;
382 header->ctx.timestamp_begin = tsc;
383 header->ctx.timestamp_end = 0;
384 header->ctx.content_size = ~0ULL; /* for debugging */
385 header->ctx.packet_size = ~0ULL;
386 header->ctx.packet_seq_num = chan->backend.num_subbuf * \
387 buf->backend.buf_cnt[subbuf_idx].seq_cnt + \
388 subbuf_idx;
389 header->ctx.events_discarded = 0;
390 header->ctx.cpu_id = buf->backend.cpu;
391 }
392
393 /*
394 * offset is assumed to never be 0 here : never deliver a completely empty
395 * subbuffer. data_size is between 1 and subbuf_size.
396 */
397 static void client_buffer_end(struct lib_ring_buffer *buf, u64 tsc,
398 unsigned int subbuf_idx, unsigned long data_size)
399 {
400 struct channel *chan = buf->backend.chan;
401 struct packet_header *header =
402 (struct packet_header *)
403 lib_ring_buffer_offset_address(&buf->backend,
404 subbuf_idx * chan->backend.subbuf_size);
405 unsigned long records_lost = 0;
406
407 header->ctx.timestamp_end = tsc;
408 header->ctx.content_size =
409 (uint64_t) data_size * CHAR_BIT; /* in bits */
410 header->ctx.packet_size =
411 (uint64_t) PAGE_ALIGN(data_size) * CHAR_BIT; /* in bits */
412 records_lost += lib_ring_buffer_get_records_lost_full(&client_config, buf);
413 records_lost += lib_ring_buffer_get_records_lost_wrap(&client_config, buf);
414 records_lost += lib_ring_buffer_get_records_lost_big(&client_config, buf);
415 header->ctx.events_discarded = records_lost;
416 }
417
418 static int client_buffer_create(struct lib_ring_buffer *buf, void *priv,
419 int cpu, const char *name)
420 {
421 return 0;
422 }
423
424 static void client_buffer_finalize(struct lib_ring_buffer *buf, void *priv, int cpu)
425 {
426 }
427
428 static struct packet_header *client_packet_header(
429 const struct lib_ring_buffer_config *config,
430 struct lib_ring_buffer *buf)
431 {
432 return lib_ring_buffer_read_offset_address(&buf->backend, 0);
433 }
434
435 static int client_timestamp_begin(const struct lib_ring_buffer_config *config,
436 struct lib_ring_buffer *buf,
437 uint64_t *timestamp_begin)
438 {
439 struct packet_header *header = client_packet_header(config, buf);
440 *timestamp_begin = header->ctx.timestamp_begin;
441
442 return 0;
443 }
444
445 static int client_timestamp_end(const struct lib_ring_buffer_config *config,
446 struct lib_ring_buffer *buf,
447 uint64_t *timestamp_end)
448 {
449 struct packet_header *header = client_packet_header(config, buf);
450 *timestamp_end = header->ctx.timestamp_end;
451
452 return 0;
453 }
454
455 static int client_events_discarded(const struct lib_ring_buffer_config *config,
456 struct lib_ring_buffer *buf,
457 uint64_t *events_discarded)
458 {
459 struct packet_header *header = client_packet_header(config, buf);
460 *events_discarded = header->ctx.events_discarded;
461
462 return 0;
463 }
464
465 static int client_content_size(const struct lib_ring_buffer_config *config,
466 struct lib_ring_buffer *buf,
467 uint64_t *content_size)
468 {
469 struct packet_header *header = client_packet_header(config, buf);
470 *content_size = header->ctx.content_size;
471
472 return 0;
473 }
474
475 static int client_packet_size(const struct lib_ring_buffer_config *config,
476 struct lib_ring_buffer *buf,
477 uint64_t *packet_size)
478 {
479 struct packet_header *header = client_packet_header(config, buf);
480 *packet_size = header->ctx.packet_size;
481
482 return 0;
483 }
484
485 static int client_stream_id(const struct lib_ring_buffer_config *config,
486 struct lib_ring_buffer *buf,
487 uint64_t *stream_id)
488 {
489 struct packet_header *header = client_packet_header(config, buf);
490 *stream_id = header->stream_id;
491
492 return 0;
493 }
494
495 static int client_current_timestamp(const struct lib_ring_buffer_config *config,
496 struct lib_ring_buffer *bufb,
497 uint64_t *ts)
498 {
499 *ts = config->cb.ring_buffer_clock_read(bufb->backend.chan);
500
501 return 0;
502 }
503
504 static int client_sequence_number(const struct lib_ring_buffer_config *config,
505 struct lib_ring_buffer *buf,
506 uint64_t *seq)
507 {
508 struct packet_header *header = client_packet_header(config, buf);
509
510 *seq = header->ctx.packet_seq_num;
511
512 return 0;
513 }
514
515 static
516 int client_instance_id(const struct lib_ring_buffer_config *config,
517 struct lib_ring_buffer *buf,
518 uint64_t *id)
519 {
520 struct packet_header *header = client_packet_header(config, buf);
521 *id = header->stream_instance_id;
522
523 return 0;
524 }
525
526 static const struct lib_ring_buffer_config client_config = {
527 .cb.ring_buffer_clock_read = client_ring_buffer_clock_read,
528 .cb.record_header_size = client_record_header_size,
529 .cb.subbuffer_header_size = client_packet_header_size,
530 .cb.buffer_begin = client_buffer_begin,
531 .cb.buffer_end = client_buffer_end,
532 .cb.buffer_create = client_buffer_create,
533 .cb.buffer_finalize = client_buffer_finalize,
534
535 .tsc_bits = LTTNG_COMPACT_TSC_BITS,
536 .alloc = RING_BUFFER_ALLOC_PER_CPU,
537 .sync = RING_BUFFER_SYNC_PER_CPU,
538 .mode = RING_BUFFER_MODE_TEMPLATE,
539 .backend = RING_BUFFER_PAGE,
540 .output = RING_BUFFER_OUTPUT_TEMPLATE,
541 .oops = RING_BUFFER_OOPS_CONSISTENCY,
542 .ipi = RING_BUFFER_IPI_BARRIER,
543 .wakeup = RING_BUFFER_WAKEUP_BY_TIMER,
544 };
545
546 static
547 void release_priv_ops(void *priv_ops)
548 {
549 module_put(THIS_MODULE);
550 }
551
552 static
553 void lttng_channel_destroy(struct channel *chan)
554 {
555 channel_destroy(chan);
556 }
557
558 static
559 struct channel *_channel_create(const char *name,
560 struct lttng_channel *lttng_chan, void *buf_addr,
561 size_t subbuf_size, size_t num_subbuf,
562 unsigned int switch_timer_interval,
563 unsigned int read_timer_interval)
564 {
565 struct channel *chan;
566
567 chan = channel_create(&client_config, name, lttng_chan, buf_addr,
568 subbuf_size, num_subbuf, switch_timer_interval,
569 read_timer_interval);
570 if (chan) {
571 /*
572 * Ensure this module is not unloaded before we finish
573 * using lttng_relay_transport.ops.
574 */
575 if (!try_module_get(THIS_MODULE)) {
576 printk(KERN_WARNING "LTT : Can't lock transport module.\n");
577 goto error;
578 }
579 chan->backend.priv_ops = &lttng_relay_transport.ops;
580 chan->backend.release_priv_ops = release_priv_ops;
581 }
582 return chan;
583
584 error:
585 lttng_channel_destroy(chan);
586 return NULL;
587 }
588
589 static
590 struct lib_ring_buffer *lttng_buffer_read_open(struct channel *chan)
591 {
592 struct lib_ring_buffer *buf;
593 int cpu;
594
595 for_each_channel_cpu(cpu, chan) {
596 buf = channel_get_ring_buffer(&client_config, chan, cpu);
597 if (!lib_ring_buffer_open_read(buf))
598 return buf;
599 }
600 return NULL;
601 }
602
603 static
604 int lttng_buffer_has_read_closed_stream(struct channel *chan)
605 {
606 struct lib_ring_buffer *buf;
607 int cpu;
608
609 for_each_channel_cpu(cpu, chan) {
610 buf = channel_get_ring_buffer(&client_config, chan, cpu);
611 if (!atomic_long_read(&buf->active_readers))
612 return 1;
613 }
614 return 0;
615 }
616
617 static
618 void lttng_buffer_read_close(struct lib_ring_buffer *buf)
619 {
620 lib_ring_buffer_release_read(buf);
621 }
622
623 static
624 int lttng_event_reserve(struct lib_ring_buffer_ctx *ctx,
625 uint32_t event_id)
626 {
627 struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
628 struct lttng_probe_ctx *lttng_probe_ctx = ctx->priv;
629 struct lttng_event *event = lttng_probe_ctx->event;
630 struct lttng_client_ctx client_ctx;
631 int ret, cpu;
632
633 /* Compute internal size of context structures. */
634 ctx_get_struct_size(lttng_chan->ctx, &client_ctx.packet_context_len);
635 ctx_get_struct_size(event->ctx, &client_ctx.event_context_len);
636
637 cpu = lib_ring_buffer_get_cpu(&client_config);
638 if (unlikely(cpu < 0))
639 return -EPERM;
640 ctx->cpu = cpu;
641
642 switch (lttng_chan->header_type) {
643 case 1: /* compact */
644 if (event_id > 30)
645 ctx->rflags |= LTTNG_RFLAG_EXTENDED;
646 break;
647 case 2: /* large */
648 if (event_id > 65534)
649 ctx->rflags |= LTTNG_RFLAG_EXTENDED;
650 break;
651 default:
652 WARN_ON_ONCE(1);
653 }
654
655 ret = lib_ring_buffer_reserve(&client_config, ctx, &client_ctx);
656 if (unlikely(ret))
657 goto put;
658 lib_ring_buffer_backend_get_pages(&client_config, ctx,
659 &ctx->backend_pages);
660 lttng_write_event_header(&client_config, ctx, event_id);
661 return 0;
662 put:
663 lib_ring_buffer_put_cpu(&client_config);
664 return ret;
665 }
666
667 static
668 void lttng_event_commit(struct lib_ring_buffer_ctx *ctx)
669 {
670 lib_ring_buffer_commit(&client_config, ctx);
671 lib_ring_buffer_put_cpu(&client_config);
672 }
673
674 static
675 void lttng_event_write(struct lib_ring_buffer_ctx *ctx, const void *src,
676 size_t len)
677 {
678 lib_ring_buffer_write(&client_config, ctx, src, len);
679 }
680
681 static
682 void lttng_event_write_from_user(struct lib_ring_buffer_ctx *ctx,
683 const void __user *src, size_t len)
684 {
685 lib_ring_buffer_copy_from_user_inatomic(&client_config, ctx, src, len);
686 }
687
688 static
689 void lttng_event_memset(struct lib_ring_buffer_ctx *ctx,
690 int c, size_t len)
691 {
692 lib_ring_buffer_memset(&client_config, ctx, c, len);
693 }
694
695 static
696 void lttng_event_strcpy(struct lib_ring_buffer_ctx *ctx, const char *src,
697 size_t len)
698 {
699 lib_ring_buffer_strcpy(&client_config, ctx, src, len, '#');
700 }
701
702 static
703 void lttng_event_strcpy_from_user(struct lib_ring_buffer_ctx *ctx,
704 const char __user *src, size_t len)
705 {
706 lib_ring_buffer_strcpy_from_user_inatomic(&client_config, ctx, src,
707 len, '#');
708 }
709
710 static
711 wait_queue_head_t *lttng_get_writer_buf_wait_queue(struct channel *chan, int cpu)
712 {
713 struct lib_ring_buffer *buf = channel_get_ring_buffer(&client_config,
714 chan, cpu);
715 return &buf->write_wait;
716 }
717
718 static
719 wait_queue_head_t *lttng_get_hp_wait_queue(struct channel *chan)
720 {
721 return &chan->hp_wait;
722 }
723
724 static
725 int lttng_is_finalized(struct channel *chan)
726 {
727 return lib_ring_buffer_channel_is_finalized(chan);
728 }
729
730 static
731 int lttng_is_disabled(struct channel *chan)
732 {
733 return lib_ring_buffer_channel_is_disabled(chan);
734 }
735
736 static struct lttng_transport lttng_relay_transport = {
737 .name = "relay-" RING_BUFFER_MODE_TEMPLATE_STRING,
738 .owner = THIS_MODULE,
739 .ops = {
740 .channel_create = _channel_create,
741 .channel_destroy = lttng_channel_destroy,
742 .buffer_read_open = lttng_buffer_read_open,
743 .buffer_has_read_closed_stream =
744 lttng_buffer_has_read_closed_stream,
745 .buffer_read_close = lttng_buffer_read_close,
746 .event_reserve = lttng_event_reserve,
747 .event_commit = lttng_event_commit,
748 .event_write = lttng_event_write,
749 .event_write_from_user = lttng_event_write_from_user,
750 .event_memset = lttng_event_memset,
751 .event_strcpy = lttng_event_strcpy,
752 .event_strcpy_from_user = lttng_event_strcpy_from_user,
753 .packet_avail_size = NULL, /* Would be racy anyway */
754 .get_writer_buf_wait_queue = lttng_get_writer_buf_wait_queue,
755 .get_hp_wait_queue = lttng_get_hp_wait_queue,
756 .is_finalized = lttng_is_finalized,
757 .is_disabled = lttng_is_disabled,
758 .timestamp_begin = client_timestamp_begin,
759 .timestamp_end = client_timestamp_end,
760 .events_discarded = client_events_discarded,
761 .content_size = client_content_size,
762 .packet_size = client_packet_size,
763 .stream_id = client_stream_id,
764 .current_timestamp = client_current_timestamp,
765 .sequence_number = client_sequence_number,
766 .instance_id = client_instance_id,
767 },
768 };
769
770 static int __init lttng_ring_buffer_client_init(void)
771 {
772 /*
773 * This vmalloc sync all also takes care of the lib ring buffer
774 * vmalloc'd module pages when it is built as a module into LTTng.
775 */
776 wrapper_vmalloc_sync_all();
777 lttng_transport_register(&lttng_relay_transport);
778 return 0;
779 }
780
781 module_init(lttng_ring_buffer_client_init);
782
783 static void __exit lttng_ring_buffer_client_exit(void)
784 {
785 lttng_transport_unregister(&lttng_relay_transport);
786 }
787
788 module_exit(lttng_ring_buffer_client_exit);
789
790 MODULE_LICENSE("GPL and additional rights");
791 MODULE_AUTHOR("Mathieu Desnoyers");
792 MODULE_DESCRIPTION("LTTng ring buffer " RING_BUFFER_MODE_TEMPLATE_STRING
793 " client");
This page took 0.045088 seconds and 4 git commands to generate.