Cleanup: modinfo keys
[lttng-modules.git] / lttng-ring-buffer-client.h
1 /*
2 * lttng-ring-buffer-client.h
3 *
4 * LTTng lib ring buffer client template.
5 *
6 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; only
11 * version 2.1 of the License.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #include <linux/module.h>
24 #include <linux/types.h>
25 #include <lib/bitfield.h>
26 #include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_all() */
27 #include <wrapper/trace-clock.h>
28 #include <lttng-events.h>
29 #include <lttng-tracer.h>
30 #include <wrapper/ringbuffer/frontend_types.h>
31
32 #define LTTNG_COMPACT_EVENT_BITS 5
33 #define LTTNG_COMPACT_TSC_BITS 27
34
35 static struct lttng_transport lttng_relay_transport;
36
37 /*
38 * Keep the natural field alignment for _each field_ within this structure if
39 * you ever add/remove a field from this header. Packed attribute is not used
40 * because gcc generates poor code on at least powerpc and mips. Don't ever
41 * let gcc add padding between the structure elements.
42 *
43 * The guarantee we have with timestamps is that all the events in a
44 * packet are included (inclusive) within the begin/end timestamps of
45 * the packet. Another guarantee we have is that the "timestamp begin",
46 * as well as the event timestamps, are monotonically increasing (never
47 * decrease) when moving forward in a stream (physically). But this
48 * guarantee does not apply to "timestamp end", because it is sampled at
49 * commit time, which is not ordered with respect to space reservation.
50 */
51
52 struct packet_header {
53 /* Trace packet header */
54 uint32_t magic; /*
55 * Trace magic number.
56 * contains endianness information.
57 */
58 uint8_t uuid[16];
59 uint32_t stream_id;
60 uint64_t stream_instance_id;
61
62 struct {
63 /* Stream packet context */
64 uint64_t timestamp_begin; /* Cycle count at subbuffer start */
65 uint64_t timestamp_end; /* Cycle count at subbuffer end */
66 uint64_t content_size; /* Size of data in subbuffer */
67 uint64_t packet_size; /* Subbuffer size (include padding) */
68 uint64_t packet_seq_num; /* Packet sequence number */
69 unsigned long events_discarded; /*
70 * Events lost in this subbuffer since
71 * the beginning of the trace.
72 * (may overflow)
73 */
74 uint32_t cpu_id; /* CPU id associated with stream */
75 uint8_t header_end; /* End of header */
76 } ctx;
77 };
78
79 struct lttng_client_ctx {
80 size_t packet_context_len;
81 size_t event_context_len;
82 };
83
84 static inline notrace u64 lib_ring_buffer_clock_read(struct channel *chan)
85 {
86 return trace_clock_read64();
87 }
88
89 static inline
90 size_t ctx_get_aligned_size(size_t offset, struct lttng_ctx *ctx,
91 size_t ctx_len)
92 {
93 size_t orig_offset = offset;
94
95 if (likely(!ctx))
96 return 0;
97 offset += lib_ring_buffer_align(offset, ctx->largest_align);
98 offset += ctx_len;
99 return offset - orig_offset;
100 }
101
102 static inline
103 void ctx_get_struct_size(struct lttng_ctx *ctx, size_t *ctx_len,
104 struct lttng_channel *chan, struct lib_ring_buffer_ctx *bufctx)
105 {
106 int i;
107 size_t offset = 0;
108
109 if (likely(!ctx)) {
110 *ctx_len = 0;
111 return;
112 }
113 for (i = 0; i < ctx->nr_fields; i++) {
114 if (ctx->fields[i].get_size)
115 offset += ctx->fields[i].get_size(offset);
116 if (ctx->fields[i].get_size_arg)
117 offset += ctx->fields[i].get_size_arg(offset,
118 &ctx->fields[i], bufctx, chan);
119 }
120 *ctx_len = offset;
121 }
122
123 static inline
124 void ctx_record(struct lib_ring_buffer_ctx *bufctx,
125 struct lttng_channel *chan,
126 struct lttng_ctx *ctx)
127 {
128 int i;
129
130 if (likely(!ctx))
131 return;
132 lib_ring_buffer_align_ctx(bufctx, ctx->largest_align);
133 for (i = 0; i < ctx->nr_fields; i++)
134 ctx->fields[i].record(&ctx->fields[i], bufctx, chan);
135 }
136
137 /*
138 * record_header_size - Calculate the header size and padding necessary.
139 * @config: ring buffer instance configuration
140 * @chan: channel
141 * @offset: offset in the write buffer
142 * @pre_header_padding: padding to add before the header (output)
143 * @ctx: reservation context
144 *
145 * Returns the event header size (including padding).
146 *
147 * The payload must itself determine its own alignment from the biggest type it
148 * contains.
149 */
150 static __inline__
151 size_t record_header_size(const struct lib_ring_buffer_config *config,
152 struct channel *chan, size_t offset,
153 size_t *pre_header_padding,
154 struct lib_ring_buffer_ctx *ctx,
155 struct lttng_client_ctx *client_ctx)
156 {
157 struct lttng_channel *lttng_chan = channel_get_private(chan);
158 struct lttng_probe_ctx *lttng_probe_ctx = ctx->priv;
159 struct lttng_event *event = lttng_probe_ctx->event;
160 size_t orig_offset = offset;
161 size_t padding;
162
163 switch (lttng_chan->header_type) {
164 case 1: /* compact */
165 padding = lib_ring_buffer_align(offset, lttng_alignof(uint32_t));
166 offset += padding;
167 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
168 offset += sizeof(uint32_t); /* id and timestamp */
169 } else {
170 /* Minimum space taken by LTTNG_COMPACT_EVENT_BITS id */
171 offset += (LTTNG_COMPACT_EVENT_BITS + CHAR_BIT - 1) / CHAR_BIT;
172 /* Align extended struct on largest member */
173 offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
174 offset += sizeof(uint32_t); /* id */
175 offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
176 offset += sizeof(uint64_t); /* timestamp */
177 }
178 break;
179 case 2: /* large */
180 padding = lib_ring_buffer_align(offset, lttng_alignof(uint16_t));
181 offset += padding;
182 offset += sizeof(uint16_t);
183 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
184 offset += lib_ring_buffer_align(offset, lttng_alignof(uint32_t));
185 offset += sizeof(uint32_t); /* timestamp */
186 } else {
187 /* Align extended struct on largest member */
188 offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
189 offset += sizeof(uint32_t); /* id */
190 offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
191 offset += sizeof(uint64_t); /* timestamp */
192 }
193 break;
194 default:
195 padding = 0;
196 WARN_ON_ONCE(1);
197 }
198 offset += ctx_get_aligned_size(offset, lttng_chan->ctx,
199 client_ctx->packet_context_len);
200 offset += ctx_get_aligned_size(offset, event->ctx,
201 client_ctx->event_context_len);
202
203 *pre_header_padding = padding;
204 return offset - orig_offset;
205 }
206
207 #include <wrapper/ringbuffer/api.h>
208
209 static
210 void lttng_write_event_header_slow(const struct lib_ring_buffer_config *config,
211 struct lib_ring_buffer_ctx *ctx,
212 uint32_t event_id);
213
214 /*
215 * lttng_write_event_header
216 *
217 * Writes the event header to the offset (already aligned on 32-bits).
218 *
219 * @config: ring buffer instance configuration
220 * @ctx: reservation context
221 * @event_id: event ID
222 */
223 static __inline__
224 void lttng_write_event_header(const struct lib_ring_buffer_config *config,
225 struct lib_ring_buffer_ctx *ctx,
226 uint32_t event_id)
227 {
228 struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
229 struct lttng_probe_ctx *lttng_probe_ctx = ctx->priv;
230 struct lttng_event *event = lttng_probe_ctx->event;
231
232 if (unlikely(ctx->rflags))
233 goto slow_path;
234
235 switch (lttng_chan->header_type) {
236 case 1: /* compact */
237 {
238 uint32_t id_time = 0;
239
240 bt_bitfield_write(&id_time, uint32_t,
241 0,
242 LTTNG_COMPACT_EVENT_BITS,
243 event_id);
244 bt_bitfield_write(&id_time, uint32_t,
245 LTTNG_COMPACT_EVENT_BITS,
246 LTTNG_COMPACT_TSC_BITS,
247 ctx->tsc);
248 lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
249 break;
250 }
251 case 2: /* large */
252 {
253 uint32_t timestamp = (uint32_t) ctx->tsc;
254 uint16_t id = event_id;
255
256 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
257 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint32_t));
258 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
259 break;
260 }
261 default:
262 WARN_ON_ONCE(1);
263 }
264
265 ctx_record(ctx, lttng_chan, lttng_chan->ctx);
266 ctx_record(ctx, lttng_chan, event->ctx);
267 lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
268
269 return;
270
271 slow_path:
272 lttng_write_event_header_slow(config, ctx, event_id);
273 }
274
275 static
276 void lttng_write_event_header_slow(const struct lib_ring_buffer_config *config,
277 struct lib_ring_buffer_ctx *ctx,
278 uint32_t event_id)
279 {
280 struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
281 struct lttng_probe_ctx *lttng_probe_ctx = ctx->priv;
282 struct lttng_event *event = lttng_probe_ctx->event;
283
284 switch (lttng_chan->header_type) {
285 case 1: /* compact */
286 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
287 uint32_t id_time = 0;
288
289 bt_bitfield_write(&id_time, uint32_t,
290 0,
291 LTTNG_COMPACT_EVENT_BITS,
292 event_id);
293 bt_bitfield_write(&id_time, uint32_t,
294 LTTNG_COMPACT_EVENT_BITS,
295 LTTNG_COMPACT_TSC_BITS, ctx->tsc);
296 lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
297 } else {
298 uint8_t id = 0;
299 uint64_t timestamp = ctx->tsc;
300
301 bt_bitfield_write(&id, uint8_t,
302 0,
303 LTTNG_COMPACT_EVENT_BITS,
304 31);
305 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
306 /* Align extended struct on largest member */
307 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
308 lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
309 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
310 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
311 }
312 break;
313 case 2: /* large */
314 {
315 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
316 uint32_t timestamp = (uint32_t) ctx->tsc;
317 uint16_t id = event_id;
318
319 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
320 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint32_t));
321 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
322 } else {
323 uint16_t id = 65535;
324 uint64_t timestamp = ctx->tsc;
325
326 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
327 /* Align extended struct on largest member */
328 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
329 lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
330 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
331 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
332 }
333 break;
334 }
335 default:
336 WARN_ON_ONCE(1);
337 }
338 ctx_record(ctx, lttng_chan, lttng_chan->ctx);
339 ctx_record(ctx, lttng_chan, event->ctx);
340 lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
341 }
342
343 static const struct lib_ring_buffer_config client_config;
344
345 static u64 client_ring_buffer_clock_read(struct channel *chan)
346 {
347 return lib_ring_buffer_clock_read(chan);
348 }
349
350 static
351 size_t client_record_header_size(const struct lib_ring_buffer_config *config,
352 struct channel *chan, size_t offset,
353 size_t *pre_header_padding,
354 struct lib_ring_buffer_ctx *ctx,
355 void *client_ctx)
356 {
357 return record_header_size(config, chan, offset,
358 pre_header_padding, ctx, client_ctx);
359 }
360
361 /**
362 * client_packet_header_size - called on buffer-switch to a new sub-buffer
363 *
364 * Return header size without padding after the structure. Don't use packed
365 * structure because gcc generates inefficient code on some architectures
366 * (powerpc, mips..)
367 */
368 static size_t client_packet_header_size(void)
369 {
370 return offsetof(struct packet_header, ctx.header_end);
371 }
372
373 static void client_buffer_begin(struct lib_ring_buffer *buf, u64 tsc,
374 unsigned int subbuf_idx)
375 {
376 struct channel *chan = buf->backend.chan;
377 struct packet_header *header =
378 (struct packet_header *)
379 lib_ring_buffer_offset_address(&buf->backend,
380 subbuf_idx * chan->backend.subbuf_size);
381 struct lttng_channel *lttng_chan = channel_get_private(chan);
382 struct lttng_session *session = lttng_chan->session;
383
384 header->magic = CTF_MAGIC_NUMBER;
385 memcpy(header->uuid, session->uuid.b, sizeof(session->uuid));
386 header->stream_id = lttng_chan->id;
387 header->stream_instance_id = buf->backend.cpu;
388 header->ctx.timestamp_begin = tsc;
389 header->ctx.timestamp_end = 0;
390 header->ctx.content_size = ~0ULL; /* for debugging */
391 header->ctx.packet_size = ~0ULL;
392 header->ctx.packet_seq_num = chan->backend.num_subbuf * \
393 buf->backend.buf_cnt[subbuf_idx].seq_cnt + \
394 subbuf_idx;
395 header->ctx.events_discarded = 0;
396 header->ctx.cpu_id = buf->backend.cpu;
397 }
398
399 /*
400 * offset is assumed to never be 0 here : never deliver a completely empty
401 * subbuffer. data_size is between 1 and subbuf_size.
402 */
403 static void client_buffer_end(struct lib_ring_buffer *buf, u64 tsc,
404 unsigned int subbuf_idx, unsigned long data_size)
405 {
406 struct channel *chan = buf->backend.chan;
407 struct packet_header *header =
408 (struct packet_header *)
409 lib_ring_buffer_offset_address(&buf->backend,
410 subbuf_idx * chan->backend.subbuf_size);
411 unsigned long records_lost = 0;
412
413 header->ctx.timestamp_end = tsc;
414 header->ctx.content_size =
415 (uint64_t) data_size * CHAR_BIT; /* in bits */
416 header->ctx.packet_size =
417 (uint64_t) PAGE_ALIGN(data_size) * CHAR_BIT; /* in bits */
418 records_lost += lib_ring_buffer_get_records_lost_full(&client_config, buf);
419 records_lost += lib_ring_buffer_get_records_lost_wrap(&client_config, buf);
420 records_lost += lib_ring_buffer_get_records_lost_big(&client_config, buf);
421 header->ctx.events_discarded = records_lost;
422 }
423
424 static int client_buffer_create(struct lib_ring_buffer *buf, void *priv,
425 int cpu, const char *name)
426 {
427 return 0;
428 }
429
430 static void client_buffer_finalize(struct lib_ring_buffer *buf, void *priv, int cpu)
431 {
432 }
433
434 static struct packet_header *client_packet_header(
435 const struct lib_ring_buffer_config *config,
436 struct lib_ring_buffer *buf)
437 {
438 return lib_ring_buffer_read_offset_address(&buf->backend, 0);
439 }
440
441 static int client_timestamp_begin(const struct lib_ring_buffer_config *config,
442 struct lib_ring_buffer *buf,
443 uint64_t *timestamp_begin)
444 {
445 struct packet_header *header = client_packet_header(config, buf);
446 *timestamp_begin = header->ctx.timestamp_begin;
447
448 return 0;
449 }
450
451 static int client_timestamp_end(const struct lib_ring_buffer_config *config,
452 struct lib_ring_buffer *buf,
453 uint64_t *timestamp_end)
454 {
455 struct packet_header *header = client_packet_header(config, buf);
456 *timestamp_end = header->ctx.timestamp_end;
457
458 return 0;
459 }
460
461 static int client_events_discarded(const struct lib_ring_buffer_config *config,
462 struct lib_ring_buffer *buf,
463 uint64_t *events_discarded)
464 {
465 struct packet_header *header = client_packet_header(config, buf);
466 *events_discarded = header->ctx.events_discarded;
467
468 return 0;
469 }
470
471 static int client_content_size(const struct lib_ring_buffer_config *config,
472 struct lib_ring_buffer *buf,
473 uint64_t *content_size)
474 {
475 struct packet_header *header = client_packet_header(config, buf);
476 *content_size = header->ctx.content_size;
477
478 return 0;
479 }
480
481 static int client_packet_size(const struct lib_ring_buffer_config *config,
482 struct lib_ring_buffer *buf,
483 uint64_t *packet_size)
484 {
485 struct packet_header *header = client_packet_header(config, buf);
486 *packet_size = header->ctx.packet_size;
487
488 return 0;
489 }
490
491 static int client_stream_id(const struct lib_ring_buffer_config *config,
492 struct lib_ring_buffer *buf,
493 uint64_t *stream_id)
494 {
495 struct packet_header *header = client_packet_header(config, buf);
496 *stream_id = header->stream_id;
497
498 return 0;
499 }
500
501 static int client_current_timestamp(const struct lib_ring_buffer_config *config,
502 struct lib_ring_buffer *bufb,
503 uint64_t *ts)
504 {
505 *ts = config->cb.ring_buffer_clock_read(bufb->backend.chan);
506
507 return 0;
508 }
509
510 static int client_sequence_number(const struct lib_ring_buffer_config *config,
511 struct lib_ring_buffer *buf,
512 uint64_t *seq)
513 {
514 struct packet_header *header = client_packet_header(config, buf);
515
516 *seq = header->ctx.packet_seq_num;
517
518 return 0;
519 }
520
521 static
522 int client_instance_id(const struct lib_ring_buffer_config *config,
523 struct lib_ring_buffer *buf,
524 uint64_t *id)
525 {
526 struct packet_header *header = client_packet_header(config, buf);
527 *id = header->stream_instance_id;
528
529 return 0;
530 }
531
532 static const struct lib_ring_buffer_config client_config = {
533 .cb.ring_buffer_clock_read = client_ring_buffer_clock_read,
534 .cb.record_header_size = client_record_header_size,
535 .cb.subbuffer_header_size = client_packet_header_size,
536 .cb.buffer_begin = client_buffer_begin,
537 .cb.buffer_end = client_buffer_end,
538 .cb.buffer_create = client_buffer_create,
539 .cb.buffer_finalize = client_buffer_finalize,
540
541 .tsc_bits = LTTNG_COMPACT_TSC_BITS,
542 .alloc = RING_BUFFER_ALLOC_PER_CPU,
543 .sync = RING_BUFFER_SYNC_PER_CPU,
544 .mode = RING_BUFFER_MODE_TEMPLATE,
545 .backend = RING_BUFFER_PAGE,
546 .output = RING_BUFFER_OUTPUT_TEMPLATE,
547 .oops = RING_BUFFER_OOPS_CONSISTENCY,
548 .ipi = RING_BUFFER_IPI_BARRIER,
549 .wakeup = RING_BUFFER_WAKEUP_BY_TIMER,
550 };
551
552 static
553 void release_priv_ops(void *priv_ops)
554 {
555 module_put(THIS_MODULE);
556 }
557
558 static
559 void lttng_channel_destroy(struct channel *chan)
560 {
561 channel_destroy(chan);
562 }
563
564 static
565 struct channel *_channel_create(const char *name,
566 struct lttng_channel *lttng_chan, void *buf_addr,
567 size_t subbuf_size, size_t num_subbuf,
568 unsigned int switch_timer_interval,
569 unsigned int read_timer_interval)
570 {
571 struct channel *chan;
572
573 chan = channel_create(&client_config, name, lttng_chan, buf_addr,
574 subbuf_size, num_subbuf, switch_timer_interval,
575 read_timer_interval);
576 if (chan) {
577 /*
578 * Ensure this module is not unloaded before we finish
579 * using lttng_relay_transport.ops.
580 */
581 if (!try_module_get(THIS_MODULE)) {
582 printk(KERN_WARNING "LTT : Can't lock transport module.\n");
583 goto error;
584 }
585 chan->backend.priv_ops = &lttng_relay_transport.ops;
586 chan->backend.release_priv_ops = release_priv_ops;
587 }
588 return chan;
589
590 error:
591 lttng_channel_destroy(chan);
592 return NULL;
593 }
594
595 static
596 struct lib_ring_buffer *lttng_buffer_read_open(struct channel *chan)
597 {
598 struct lib_ring_buffer *buf;
599 int cpu;
600
601 for_each_channel_cpu(cpu, chan) {
602 buf = channel_get_ring_buffer(&client_config, chan, cpu);
603 if (!lib_ring_buffer_open_read(buf))
604 return buf;
605 }
606 return NULL;
607 }
608
609 static
610 int lttng_buffer_has_read_closed_stream(struct channel *chan)
611 {
612 struct lib_ring_buffer *buf;
613 int cpu;
614
615 for_each_channel_cpu(cpu, chan) {
616 buf = channel_get_ring_buffer(&client_config, chan, cpu);
617 if (!atomic_long_read(&buf->active_readers))
618 return 1;
619 }
620 return 0;
621 }
622
623 static
624 void lttng_buffer_read_close(struct lib_ring_buffer *buf)
625 {
626 lib_ring_buffer_release_read(buf);
627 }
628
629 static
630 int lttng_event_reserve(struct lib_ring_buffer_ctx *ctx,
631 uint32_t event_id)
632 {
633 struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
634 struct lttng_probe_ctx *lttng_probe_ctx = ctx->priv;
635 struct lttng_event *event = lttng_probe_ctx->event;
636 struct lttng_client_ctx client_ctx;
637 int ret, cpu;
638
639 cpu = lib_ring_buffer_get_cpu(&client_config);
640 if (unlikely(cpu < 0))
641 return -EPERM;
642 ctx->cpu = cpu;
643
644 /* Compute internal size of context structures. */
645 ctx_get_struct_size(lttng_chan->ctx, &client_ctx.packet_context_len, lttng_chan, ctx);
646 ctx_get_struct_size(event->ctx, &client_ctx.event_context_len, lttng_chan, ctx);
647
648 switch (lttng_chan->header_type) {
649 case 1: /* compact */
650 if (event_id > 30)
651 ctx->rflags |= LTTNG_RFLAG_EXTENDED;
652 break;
653 case 2: /* large */
654 if (event_id > 65534)
655 ctx->rflags |= LTTNG_RFLAG_EXTENDED;
656 break;
657 default:
658 WARN_ON_ONCE(1);
659 }
660
661 ret = lib_ring_buffer_reserve(&client_config, ctx, &client_ctx);
662 if (unlikely(ret))
663 goto put;
664 lib_ring_buffer_backend_get_pages(&client_config, ctx,
665 &ctx->backend_pages);
666 lttng_write_event_header(&client_config, ctx, event_id);
667 return 0;
668 put:
669 lib_ring_buffer_put_cpu(&client_config);
670 return ret;
671 }
672
673 static
674 void lttng_event_commit(struct lib_ring_buffer_ctx *ctx)
675 {
676 lib_ring_buffer_commit(&client_config, ctx);
677 lib_ring_buffer_put_cpu(&client_config);
678 }
679
680 static
681 void lttng_event_write(struct lib_ring_buffer_ctx *ctx, const void *src,
682 size_t len)
683 {
684 lib_ring_buffer_write(&client_config, ctx, src, len);
685 }
686
687 static
688 void lttng_event_write_from_user(struct lib_ring_buffer_ctx *ctx,
689 const void __user *src, size_t len)
690 {
691 lib_ring_buffer_copy_from_user_inatomic(&client_config, ctx, src, len);
692 }
693
694 static
695 void lttng_event_memset(struct lib_ring_buffer_ctx *ctx,
696 int c, size_t len)
697 {
698 lib_ring_buffer_memset(&client_config, ctx, c, len);
699 }
700
701 static
702 void lttng_event_strcpy(struct lib_ring_buffer_ctx *ctx, const char *src,
703 size_t len)
704 {
705 lib_ring_buffer_strcpy(&client_config, ctx, src, len, '#');
706 }
707
708 static
709 void lttng_event_strcpy_from_user(struct lib_ring_buffer_ctx *ctx,
710 const char __user *src, size_t len)
711 {
712 lib_ring_buffer_strcpy_from_user_inatomic(&client_config, ctx, src,
713 len, '#');
714 }
715
716 static
717 wait_queue_head_t *lttng_get_writer_buf_wait_queue(struct channel *chan, int cpu)
718 {
719 struct lib_ring_buffer *buf = channel_get_ring_buffer(&client_config,
720 chan, cpu);
721 return &buf->write_wait;
722 }
723
724 static
725 wait_queue_head_t *lttng_get_hp_wait_queue(struct channel *chan)
726 {
727 return &chan->hp_wait;
728 }
729
730 static
731 int lttng_is_finalized(struct channel *chan)
732 {
733 return lib_ring_buffer_channel_is_finalized(chan);
734 }
735
736 static
737 int lttng_is_disabled(struct channel *chan)
738 {
739 return lib_ring_buffer_channel_is_disabled(chan);
740 }
741
742 static struct lttng_transport lttng_relay_transport = {
743 .name = "relay-" RING_BUFFER_MODE_TEMPLATE_STRING,
744 .owner = THIS_MODULE,
745 .ops = {
746 .channel_create = _channel_create,
747 .channel_destroy = lttng_channel_destroy,
748 .buffer_read_open = lttng_buffer_read_open,
749 .buffer_has_read_closed_stream =
750 lttng_buffer_has_read_closed_stream,
751 .buffer_read_close = lttng_buffer_read_close,
752 .event_reserve = lttng_event_reserve,
753 .event_commit = lttng_event_commit,
754 .event_write = lttng_event_write,
755 .event_write_from_user = lttng_event_write_from_user,
756 .event_memset = lttng_event_memset,
757 .event_strcpy = lttng_event_strcpy,
758 .event_strcpy_from_user = lttng_event_strcpy_from_user,
759 .packet_avail_size = NULL, /* Would be racy anyway */
760 .get_writer_buf_wait_queue = lttng_get_writer_buf_wait_queue,
761 .get_hp_wait_queue = lttng_get_hp_wait_queue,
762 .is_finalized = lttng_is_finalized,
763 .is_disabled = lttng_is_disabled,
764 .timestamp_begin = client_timestamp_begin,
765 .timestamp_end = client_timestamp_end,
766 .events_discarded = client_events_discarded,
767 .content_size = client_content_size,
768 .packet_size = client_packet_size,
769 .stream_id = client_stream_id,
770 .current_timestamp = client_current_timestamp,
771 .sequence_number = client_sequence_number,
772 .instance_id = client_instance_id,
773 },
774 };
775
776 static int __init lttng_ring_buffer_client_init(void)
777 {
778 /*
779 * This vmalloc sync all also takes care of the lib ring buffer
780 * vmalloc'd module pages when it is built as a module into LTTng.
781 */
782 wrapper_vmalloc_sync_all();
783 lttng_transport_register(&lttng_relay_transport);
784 return 0;
785 }
786
787 module_init(lttng_ring_buffer_client_init);
788
789 static void __exit lttng_ring_buffer_client_exit(void)
790 {
791 lttng_transport_unregister(&lttng_relay_transport);
792 }
793
794 module_exit(lttng_ring_buffer_client_exit);
795
796 MODULE_LICENSE("GPL and additional rights");
797 MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
798 MODULE_DESCRIPTION("LTTng ring buffer " RING_BUFFER_MODE_TEMPLATE_STRING
799 " client");
800 MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
801 __stringify(LTTNG_MODULES_MINOR_VERSION) "."
802 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
803 LTTNG_MODULES_EXTRAVERSION);
This page took 0.045296 seconds and 5 git commands to generate.