e7bc4329f40e2f75a7f8b5e904424e3b4006dc9a
[lttng-modules.git] / ltt-ring-buffer-client.h
1 /*
2 * ltt-ring-buffer-client.h
3 *
4 * Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * LTTng lib ring buffer client template.
7 *
8 * Dual LGPL v2.1/GPL v2 license.
9 */
10
11 #include <linux/module.h>
12 #include <linux/types.h>
13 #include "lib/bitfield.h"
14 #include "wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
15 #include "wrapper/trace-clock.h"
16 #include "ltt-events.h"
17 #include "ltt-tracer.h"
18 #include "wrapper/ringbuffer/frontend_types.h"
19
20 /*
21 * Keep the natural field alignment for _each field_ within this structure if
22 * you ever add/remove a field from this header. Packed attribute is not used
23 * because gcc generates poor code on at least powerpc and mips. Don't ever
24 * let gcc add padding between the structure elements.
25 */
26
27 struct packet_header {
28 /* Trace packet header */
29 uint32_t magic; /*
30 * Trace magic number.
31 * contains endianness information.
32 */
33 uint8_t uuid[16];
34 uint32_t stream_id;
35
36 struct {
37 /* Stream packet context */
38 uint64_t timestamp_begin; /* Cycle count at subbuffer start */
39 uint64_t timestamp_end; /* Cycle count at subbuffer end */
40 uint32_t events_discarded; /*
41 * Events lost in this subbuffer since
42 * the beginning of the trace.
43 * (may overflow)
44 */
45 uint32_t content_size; /* Size of data in subbuffer */
46 uint32_t packet_size; /* Subbuffer size (include padding) */
47 uint32_t cpu_id; /* CPU id associated with stream */
48 uint8_t header_end; /* End of header */
49 } ctx;
50 };
51
52
53 static inline notrace u64 lib_ring_buffer_clock_read(struct channel *chan)
54 {
55 return trace_clock_read64();
56 }
57
58 /*
59 * record_header_size - Calculate the header size and padding necessary.
60 * @config: ring buffer instance configuration
61 * @chan: channel
62 * @offset: offset in the write buffer
63 * @data_size: size of the payload
64 * @pre_header_padding: padding to add before the header (output)
65 * @rflags: reservation flags
66 * @ctx: reservation context
67 *
68 * Returns the event header size (including padding).
69 *
70 * The payload must itself determine its own alignment from the biggest type it
71 * contains.
72 */
73 static __inline__
74 unsigned char record_header_size(const struct lib_ring_buffer_config *config,
75 struct channel *chan, size_t offset,
76 size_t data_size, size_t *pre_header_padding,
77 unsigned int rflags,
78 struct lib_ring_buffer_ctx *ctx)
79 {
80 struct ltt_channel *ltt_chan = channel_get_private(chan);
81 size_t orig_offset = offset;
82 size_t padding;
83
84 switch (ltt_chan->header_type) {
85 case 1: /* compact */
86 padding = lib_ring_buffer_align(offset, ltt_alignof(uint32_t));
87 offset += padding;
88 if (!(rflags & RING_BUFFER_RFLAG_FULL_TSC)) {
89 offset += sizeof(uint32_t); /* id and timestamp */
90 } else {
91 /* Minimum space taken by 5-bit id */
92 offset += sizeof(uint8_t);
93 /* Align extended struct on largest member */
94 offset += lib_ring_buffer_align(offset, ltt_alignof(uint64_t));
95 offset += sizeof(uint32_t); /* id */
96 offset += lib_ring_buffer_align(offset, ltt_alignof(uint64_t));
97 offset += sizeof(uint64_t); /* timestamp */
98 }
99 break;
100 case 2: /* large */
101 padding = lib_ring_buffer_align(offset, ltt_alignof(uint16_t));
102 offset += padding;
103 offset += sizeof(uint16_t);
104 if (!(rflags & RING_BUFFER_RFLAG_FULL_TSC)) {
105 offset += lib_ring_buffer_align(offset, ltt_alignof(uint32_t));
106 offset += sizeof(uint32_t); /* timestamp */
107 } else {
108 /* Align extended struct on largest member */
109 offset += lib_ring_buffer_align(offset, ltt_alignof(uint64_t));
110 offset += sizeof(uint32_t); /* id */
111 offset += lib_ring_buffer_align(offset, ltt_alignof(uint64_t));
112 offset += sizeof(uint64_t); /* timestamp */
113
114 }
115 break;
116 default:
117 WARN_ON(1);
118 }
119
120 *pre_header_padding = padding;
121 return offset - orig_offset;
122 }
123
124 #include "wrapper/ringbuffer/api.h"
125
126 extern
127 void ltt_write_event_header_slow(const struct lib_ring_buffer_config *config,
128 struct lib_ring_buffer_ctx *ctx,
129 uint16_t event_id);
130
131 /*
132 * ltt_write_event_header
133 *
134 * Writes the event header to the offset (already aligned on 32-bits).
135 *
136 * @config: ring buffer instance configuration
137 * @ctx: reservation context
138 * @event_id: event ID
139 */
140 static __inline__
141 void ltt_write_event_header(const struct lib_ring_buffer_config *config,
142 struct lib_ring_buffer_ctx *ctx,
143 uint16_t event_id)
144 {
145 struct ltt_channel *ltt_chan = channel_get_private(ctx->chan);
146
147 if (unlikely(ctx->rflags))
148 goto slow_path;
149
150 switch (ltt_chan->header_type) {
151 case 1: /* compact */
152 {
153 uint32_t id_time = 0;
154
155 bt_bitfield_write(&id_time, uint32_t, 0, 5, event_id);
156 bt_bitfield_write(&id_time, uint32_t, 5, 27, ctx->tsc);
157 lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
158 break;
159 }
160 case 2: /* large */
161 {
162 uint32_t timestamp = (uint32_t) ctx->tsc;
163
164 lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
165 lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint32_t));
166 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
167 break;
168 }
169 default:
170 WARN_ON(1);
171 }
172 return;
173
174 slow_path:
175 ltt_write_event_header_slow(config, ctx, event_id);
176 }
177
178 /*
179 * TODO: For now, we only support 65536 event ids per channel.
180 */
181 void ltt_write_event_header_slow(const struct lib_ring_buffer_config *config,
182 struct lib_ring_buffer_ctx *ctx,
183 uint16_t event_id)
184 {
185 struct ltt_channel *ltt_chan = channel_get_private(ctx->chan);
186
187 switch (ltt_chan->header_type) {
188 case 1: /* compact */
189 if (!(ctx->rflags & RING_BUFFER_RFLAG_FULL_TSC)) {
190 uint32_t id_time = 0;
191
192 bt_bitfield_write(&id_time, uint32_t, 0, 5, event_id);
193 bt_bitfield_write(&id_time, uint32_t, 5, 27, ctx->tsc);
194 lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
195 } else {
196 uint8_t id = 0;
197 uint32_t event_id = (uint32_t) event_id;
198 uint64_t timestamp = ctx->tsc;
199
200 bt_bitfield_write(&id, uint8_t, 0, 5, 31);
201 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
202 /* Align extended struct on largest member */
203 lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint64_t));
204 lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
205 lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint64_t));
206 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
207 }
208 break;
209 case 2: /* large */
210 {
211 if (!(ctx->rflags & RING_BUFFER_RFLAG_FULL_TSC)) {
212 uint32_t timestamp = (uint32_t) ctx->tsc;
213
214 lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
215 lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint32_t));
216 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
217 } else {
218 uint16_t event_id = 65535;
219 uint32_t event_id_ext = (uint32_t) event_id;
220 uint64_t timestamp = ctx->tsc;
221
222 lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
223 /* Align extended struct on largest member */
224 lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint64_t));
225 lib_ring_buffer_write(config, ctx, &event_id_ext, sizeof(event_id_ext));
226 lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint64_t));
227 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
228 }
229 break;
230 }
231 default:
232 WARN_ON(1);
233 }
234 }
235
236 static const struct lib_ring_buffer_config client_config;
237
238 static u64 client_ring_buffer_clock_read(struct channel *chan)
239 {
240 return lib_ring_buffer_clock_read(chan);
241 }
242
243 static
244 size_t client_record_header_size(const struct lib_ring_buffer_config *config,
245 struct channel *chan, size_t offset,
246 size_t data_size,
247 size_t *pre_header_padding,
248 unsigned int rflags,
249 struct lib_ring_buffer_ctx *ctx)
250 {
251 return record_header_size(config, chan, offset, data_size,
252 pre_header_padding, rflags, ctx);
253 }
254
255 /**
256 * client_packet_header_size - called on buffer-switch to a new sub-buffer
257 *
258 * Return header size without padding after the structure. Don't use packed
259 * structure because gcc generates inefficient code on some architectures
260 * (powerpc, mips..)
261 */
262 static size_t client_packet_header_size(void)
263 {
264 return offsetof(struct packet_header, ctx.header_end);
265 }
266
267 static void client_buffer_begin(struct lib_ring_buffer *buf, u64 tsc,
268 unsigned int subbuf_idx)
269 {
270 struct channel *chan = buf->backend.chan;
271 struct packet_header *header =
272 (struct packet_header *)
273 lib_ring_buffer_offset_address(&buf->backend,
274 subbuf_idx * chan->backend.subbuf_size);
275 struct ltt_channel *ltt_chan = channel_get_private(chan);
276 struct ltt_session *session = ltt_chan->session;
277
278 header->magic = CTF_MAGIC_NUMBER;
279 memcpy(header->uuid, session->uuid.b, sizeof(session->uuid));
280 header->stream_id = ltt_chan->id;
281 header->ctx.timestamp_begin = tsc;
282 header->ctx.timestamp_end = 0;
283 header->ctx.events_discarded = 0;
284 header->ctx.content_size = 0xFFFFFFFF; /* for debugging */
285 header->ctx.packet_size = 0xFFFFFFFF;
286 header->ctx.cpu_id = buf->backend.cpu;
287 }
288
289 /*
290 * offset is assumed to never be 0 here : never deliver a completely empty
291 * subbuffer. data_size is between 1 and subbuf_size.
292 */
293 static void client_buffer_end(struct lib_ring_buffer *buf, u64 tsc,
294 unsigned int subbuf_idx, unsigned long data_size)
295 {
296 struct channel *chan = buf->backend.chan;
297 struct packet_header *header =
298 (struct packet_header *)
299 lib_ring_buffer_offset_address(&buf->backend,
300 subbuf_idx * chan->backend.subbuf_size);
301 unsigned long records_lost = 0;
302
303 header->ctx.timestamp_end = tsc;
304 header->ctx.content_size = data_size * CHAR_BIT; /* in bits */
305 header->ctx.packet_size = PAGE_ALIGN(data_size) * CHAR_BIT; /* in bits */
306 records_lost += lib_ring_buffer_get_records_lost_full(&client_config, buf);
307 records_lost += lib_ring_buffer_get_records_lost_wrap(&client_config, buf);
308 records_lost += lib_ring_buffer_get_records_lost_big(&client_config, buf);
309 header->ctx.events_discarded = records_lost;
310 }
311
312 static int client_buffer_create(struct lib_ring_buffer *buf, void *priv,
313 int cpu, const char *name)
314 {
315 return 0;
316 }
317
318 static void client_buffer_finalize(struct lib_ring_buffer *buf, void *priv, int cpu)
319 {
320 }
321
322 static const struct lib_ring_buffer_config client_config = {
323 .cb.ring_buffer_clock_read = client_ring_buffer_clock_read,
324 .cb.record_header_size = client_record_header_size,
325 .cb.subbuffer_header_size = client_packet_header_size,
326 .cb.buffer_begin = client_buffer_begin,
327 .cb.buffer_end = client_buffer_end,
328 .cb.buffer_create = client_buffer_create,
329 .cb.buffer_finalize = client_buffer_finalize,
330
331 .tsc_bits = 32,
332 .alloc = RING_BUFFER_ALLOC_PER_CPU,
333 .sync = RING_BUFFER_SYNC_PER_CPU,
334 .mode = RING_BUFFER_MODE_TEMPLATE,
335 .backend = RING_BUFFER_PAGE,
336 .output = RING_BUFFER_SPLICE,
337 .oops = RING_BUFFER_OOPS_CONSISTENCY,
338 .ipi = RING_BUFFER_IPI_BARRIER,
339 .wakeup = RING_BUFFER_WAKEUP_BY_TIMER,
340 };
341
342 static
343 struct channel *_channel_create(const char *name,
344 struct ltt_channel *ltt_chan, void *buf_addr,
345 size_t subbuf_size, size_t num_subbuf,
346 unsigned int switch_timer_interval,
347 unsigned int read_timer_interval)
348 {
349 return channel_create(&client_config, name, ltt_chan, buf_addr,
350 subbuf_size, num_subbuf, switch_timer_interval,
351 read_timer_interval);
352 }
353
354 static
355 void ltt_channel_destroy(struct channel *chan)
356 {
357 channel_destroy(chan);
358 }
359
360 static
361 struct lib_ring_buffer *ltt_buffer_read_open(struct channel *chan)
362 {
363 struct lib_ring_buffer *buf;
364 int cpu;
365
366 for_each_channel_cpu(cpu, chan) {
367 buf = channel_get_ring_buffer(&client_config, chan, cpu);
368 if (!lib_ring_buffer_open_read(buf))
369 return buf;
370 }
371 return NULL;
372 }
373
374 static
375 void ltt_buffer_read_close(struct lib_ring_buffer *buf)
376 {
377 lib_ring_buffer_release_read(buf);
378
379 }
380
381 static
382 int ltt_event_reserve(struct lib_ring_buffer_ctx *ctx,
383 uint16_t event_id)
384 {
385 int ret, cpu;
386
387 cpu = lib_ring_buffer_get_cpu(&client_config);
388 if (cpu < 0)
389 return -EPERM;
390 ctx->cpu = cpu;
391
392 ret = lib_ring_buffer_reserve(&client_config, ctx);
393 if (ret)
394 goto put;
395 ltt_write_event_header(&client_config, ctx, event_id);
396 return 0;
397 put:
398 lib_ring_buffer_put_cpu(&client_config);
399 return ret;
400 }
401
402 static
403 void ltt_event_commit(struct lib_ring_buffer_ctx *ctx)
404 {
405 lib_ring_buffer_commit(&client_config, ctx);
406 lib_ring_buffer_put_cpu(&client_config);
407 }
408
409 static
410 void ltt_event_write(struct lib_ring_buffer_ctx *ctx, const void *src,
411 size_t len)
412 {
413 lib_ring_buffer_write(&client_config, ctx, src, len);
414 }
415
416 static
417 wait_queue_head_t *ltt_get_reader_wait_queue(struct ltt_channel *chan)
418 {
419 return &chan->chan->read_wait;
420 }
421
422 static struct ltt_transport ltt_relay_transport = {
423 .name = "relay-" RING_BUFFER_MODE_TEMPLATE_STRING,
424 .owner = THIS_MODULE,
425 .ops = {
426 .channel_create = _channel_create,
427 .channel_destroy = ltt_channel_destroy,
428 .buffer_read_open = ltt_buffer_read_open,
429 .buffer_read_close = ltt_buffer_read_close,
430 .event_reserve = ltt_event_reserve,
431 .event_commit = ltt_event_commit,
432 .event_write = ltt_event_write,
433 .packet_avail_size = NULL, /* Would be racy anyway */
434 .get_reader_wait_queue = ltt_get_reader_wait_queue,
435 },
436 };
437
438 static int __init ltt_ring_buffer_client_init(void)
439 {
440 /*
441 * This vmalloc sync all also takes care of the lib ring buffer
442 * vmalloc'd module pages when it is built as a module into LTTng.
443 */
444 wrapper_vmalloc_sync_all();
445 printk(KERN_INFO "LTT : ltt ring buffer client init\n");
446 ltt_transport_register(&ltt_relay_transport);
447 return 0;
448 }
449
450 module_init(ltt_ring_buffer_client_init);
451
452 static void __exit ltt_ring_buffer_client_exit(void)
453 {
454 printk(KERN_INFO "LTT : ltt ring buffer client exit\n");
455 ltt_transport_unregister(&ltt_relay_transport);
456 }
457
458 module_exit(ltt_ring_buffer_client_exit);
459
460 MODULE_LICENSE("GPL and additional rights");
461 MODULE_AUTHOR("Mathieu Desnoyers");
462 MODULE_DESCRIPTION("LTTng ring buffer " RING_BUFFER_MODE_TEMPLATE_STRING
463 " client");
This page took 0.050143 seconds and 3 git commands to generate.