traced_tid with gettid
[lttv.git] / usertrace-fast / ltt-usertrace-fast.h
CommitLineData
b09f3215 1
04180f7f 2/* LTTng user-space "fast" tracing header
b09f3215 3 *
4 * Copyright 2006 Mathieu Desnoyers
5 *
6 */
7
04180f7f 8#ifndef _LTT_USERTRACE_FAST_H
9#define _LTT_USERTRACE_FAST_H
b09f3215 10
11#include <errno.h>
b09f3215 12#include <asm/atomic.h>
700d350d 13#include <pthread.h>
32f2b04a 14#include <stdint.h>
15#include <syscall.h>
16#include <linux/futex.h>
17
18#ifndef futex
5ffa9d14 19static inline __attribute__((no_instrument_function))
20 _syscall6(long, futex, unsigned long, uaddr, int, op, int, val,
32f2b04a 21 unsigned long, timeout, unsigned long, uaddr2, int, val2)
22#endif //futex
b09f3215 23
47d7d576 24
5ffa9d14 25
47d7d576 26#ifndef LTT_N_SUBBUFS
27#define LTT_N_SUBBUFS 2
28#endif //LTT_N_SUBBUFS
29
30#ifndef LTT_SUBBUF_SIZE_CPU
31#define LTT_SUBBUF_SIZE_CPU 1048576
51bf1553 32#endif //LTT_BUF_SIZE_CPU
b09f3215 33
47d7d576 34#define LTT_BUF_SIZE_CPU (LTT_SUBBUF_SIZE_CPU * LTT_N_SUBBUFS)
35
36#ifndef LTT_SUBBUF_SIZE_FACILITIES
37#define LTT_SUBBUF_SIZE_FACILITIES 4096
51bf1553 38#endif //LTT_BUF_SIZE_FACILITIES
b09f3215 39
47d7d576 40#define LTT_BUF_SIZE_FACILITIES (LTT_SUBBUF_SIZE_FACILITIES * LTT_N_SUBBUFS)
41
77b31f39 42#ifndef LTT_USERTRACE_ROOT
43#define LTT_USERTRACE_ROOT "/tmp/ltt-usertrace"
44#endif //LTT_USERTRACE_ROOT
45
47d7d576 46
47/* Buffer offset macros */
48
49#define BUFFER_OFFSET(offset, buf) (offset & (buf->alloc_size-1))
50#define SUBBUF_OFFSET(offset, buf) (offset & (buf->subbuf_size-1))
51#define SUBBUF_ALIGN(offset, buf) \
52 (((offset) + buf->subbuf_size) & (~(buf->subbuf_size-1)))
53#define SUBBUF_TRUNC(offset, buf) \
54 ((offset) & (~(buf->subbuf_size-1)))
55#define SUBBUF_INDEX(offset, buf) \
56 (BUFFER_OFFSET(offset,buf)/buf->subbuf_size)
57
58
32f2b04a 59#define LTT_TRACER_MAGIC_NUMBER 0x00D6B7ED
60#define LTT_TRACER_VERSION_MAJOR 0
61#define LTT_TRACER_VERSION_MINOR 7
62
63#ifndef atomic_cmpxchg
64#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
65#endif //atomic_cmpxchg
5ffa9d14 66
67typedef unsigned int ltt_facility_t;
68
32f2b04a 69struct ltt_trace_header {
70 uint32_t magic_number;
71 uint32_t arch_type;
72 uint32_t arch_variant;
73 uint32_t float_word_order; /* Only useful for user space traces */
74 uint8_t arch_size;
75 //uint32_t system_type;
76 uint8_t major_version;
77 uint8_t minor_version;
78 uint8_t flight_recorder;
79 uint8_t has_heartbeat;
80 uint8_t has_alignment; /* Event header alignment */
81 uint32_t freq_scale;
82 uint64_t start_freq;
83 uint64_t start_tsc;
84 uint64_t start_monotonic;
85 uint64_t start_time_sec;
86 uint64_t start_time_usec;
87} __attribute((packed));
88
89
90struct ltt_block_start_header {
91 struct {
92 uint64_t cycle_count;
93 uint64_t freq; /* khz */
94 } begin;
95 struct {
96 uint64_t cycle_count;
97 uint64_t freq; /* khz */
98 } end;
99 uint32_t lost_size; /* Size unused at the end of the buffer */
100 uint32_t buf_size; /* The size of this sub-buffer */
101 struct ltt_trace_header trace;
102} __attribute((packed));
103
104
105
b09f3215 106struct ltt_buf {
32f2b04a 107 void *start;
b09f3215 108 atomic_t offset;
47d7d576 109 atomic_t consumed;
110 atomic_t reserve_count[LTT_N_SUBBUFS];
111 atomic_t commit_count[LTT_N_SUBBUFS];
b09f3215 112
113 atomic_t events_lost;
32f2b04a 114 atomic_t corrupted_subbuffers;
47d7d576 115 atomic_t full; /* futex on which the writer waits : 1 : full */
116 unsigned int alloc_size;
117 unsigned int subbuf_size;
b09f3215 118};
119
700d350d 120struct ltt_trace_info {
1c48e587 121 int init;
b09f3215 122 int filter;
700d350d 123 pid_t daemon_id;
b09f3215 124 atomic_t nesting;
125 struct {
126 struct ltt_buf facilities;
127 struct ltt_buf cpu;
700d350d 128 char facilities_buf[LTT_BUF_SIZE_FACILITIES] __attribute__ ((aligned (8)));
51bf1553 129 char cpu_buf[LTT_BUF_SIZE_CPU] __attribute__ ((aligned (8)));
b09f3215 130 } channel;
131};
132
32f2b04a 133
5ffa9d14 134struct ltt_event_header_nohb {
135 uint64_t timestamp;
136 unsigned char facility_id;
137 unsigned char event_id;
138 uint16_t event_size;
139} __attribute((packed));
32f2b04a 140
700d350d 141extern __thread struct ltt_trace_info *thread_trace_info;
b09f3215 142
51bf1553 143void ltt_thread_init(void);
b09f3215 144
5ffa9d14 145void __attribute__((no_instrument_function))
146 ltt_usertrace_fast_buffer_switch(void);
147
148#ifndef LTT_PACK
149/* Calculate the offset needed to align the type */
150static inline unsigned int __attribute__((no_instrument_function))
151 ltt_align(size_t align_drift,
152 size_t size_of_type)
153{
154 size_t alignment = min(sizeof(void*), size_of_type);
155
156 return ((alignment - align_drift) & (alignment-1));
157}
158#define LTT_ALIGN
159#else
160static inline unsigned int __attribute__((no_instrument_function))
161 ltt_align(size_t align_drift,
162 size_t size_of_type)
163{
164 return 0;
165}
166#define LTT_ALIGN __attribute__((packed))
167#endif //LTT_PACK
168
169/* Get the offset of the channel in the ltt_trace_struct */
170#define GET_CHANNEL_INDEX(chan) \
171 (unsigned int)&((struct ltt_trace_info*)NULL)->channel.chan
172
173/* ltt_get_index_from_facility
174 *
175 * Get channel index from facility and event id.
176 *
177 * @fID : facility ID
178 * @eID : event number
179 *
180 * Get the channel index into which events must be written for the given
181 * facility and event number. We get this structure offset as soon as possible
182 * and remember it so we pass through this logic only once per trace call (not
183 * for every trace).
184 */
185static inline unsigned int __attribute__((no_instrument_function))
186 ltt_get_index_from_facility(ltt_facility_t fID,
187 uint8_t eID)
188{
189
190 if(fID == ltt_facility_core) {
191 switch(eID) {
192 case event_core_facility_load:
193 case event_core_facility_unload:
194 case event_core_state_dump_facility_load:
195 return GET_CHANNEL_INDEX(facilities);
196 default:
197 return GET_CHANNEL_INDEX(cpu);
198 }
199 }
200 return GET_CHANNEL_INDEX(cpu);
201}
202
203
204static inline struct ltt_buf * __attribute__((no_instrument_function))
205 ltt_get_channel_from_index(
206 struct ltt_trace_info *trace, unsigned int index)
207{
208 return *(struct ltt_buf **)((void*)trace+index);
209}
210
211
212/*
213 * ltt_get_header_size
214 *
215 * Calculate alignment offset for arch size void*. This is the
216 * alignment offset of the event header.
217 *
218 * Important note :
219 * The event header must be a size multiple of the void* size. This is necessary
220 * to be able to calculate statically the alignment offset of the variable
221 * length data fields that follows. The total offset calculated here :
222 *
223 * Alignment of header struct on arch size
224 * + sizeof(header struct)
225 * + padding added to end of struct to align on arch size.
226 * */
227static inline unsigned char __attribute__((no_instrument_function))
228 ltt_get_header_size(struct ltt_trace_info *trace,
229 void *address,
230 size_t *before_hdr_pad,
231 size_t *after_hdr_pad,
232 size_t *header_size)
233{
234 unsigned int padding;
235 unsigned int header;
236
237 header = sizeof(struct ltt_event_header_nohb);
238
239 /* Padding before the header. Calculated dynamically */
240 *before_hdr_pad = ltt_align((unsigned long)address, header);
241 padding = *before_hdr_pad;
242
243 /* Padding after header, considering header aligned on ltt_align.
244 * Calculated statically if header size if known. */
245 *after_hdr_pad = ltt_align(header, sizeof(void*));
246 padding += *after_hdr_pad;
247
248 *header_size = header;
249
250 return header+padding;
251}
252
253
254/* ltt_write_event_header
255 *
256 * Writes the event header to the pointer.
257 *
258 * @channel : pointer to the channel structure
259 * @ptr : buffer pointer
260 * @fID : facility ID
261 * @eID : event ID
262 * @event_size : size of the event, excluding the event header.
263 * @offset : offset of the beginning of the header, for alignment.
264 * Calculated by ltt_get_event_header_size.
265 * @tsc : time stamp counter.
266 */
267static inline void __attribute__((no_instrument_function))
268 ltt_write_event_header(
269 struct ltt_trace_info *trace, struct ltt_buf *buf,
270 void *ptr, ltt_facility_t fID, uint32_t eID, size_t event_size,
271 size_t offset, uint64_t tsc)
272{
273 struct ltt_event_header_nohb *nohb;
274
275 event_size = min(event_size, 0xFFFFU);
276 nohb = (struct ltt_event_header_nohb *)(ptr+offset);
277 nohb->timestamp = (uint64_t)tsc;
278 nohb->facility_id = fID;
279 nohb->event_id = eID;
280 nohb->event_size = (uint16_t)event_size;
281}
700d350d 282
32f2b04a 283
284
5ffa9d14 285static inline uint64_t __attribute__((no_instrument_function))
286ltt_get_timestamp()
32f2b04a 287{
288 return get_cycles();
289}
290
5ffa9d14 291static inline unsigned int __attribute__((no_instrument_function))
292ltt_subbuf_header_len(struct ltt_buf *buf)
32f2b04a 293{
294 return sizeof(struct ltt_block_start_header);
295}
296
297
298
5ffa9d14 299static inline void __attribute__((no_instrument_function))
300ltt_write_trace_header(struct ltt_trace_header *header)
32f2b04a 301{
302 header->magic_number = LTT_TRACER_MAGIC_NUMBER;
303 header->major_version = LTT_TRACER_VERSION_MAJOR;
304 header->minor_version = LTT_TRACER_VERSION_MINOR;
305 header->float_word_order = 0; //FIXME
306 header->arch_type = 0; //FIXME LTT_ARCH_TYPE;
307 header->arch_size = sizeof(void*);
308 header->arch_variant = 0; //FIXME LTT_ARCH_VARIANT;
309 header->flight_recorder = 0;
310 header->has_heartbeat = 0;
311
5ffa9d14 312#ifndef LTT_PACK
32f2b04a 313 header->has_alignment = sizeof(void*);
314#else
315 header->has_alignment = 0;
316#endif
317
318 //FIXME
319 header->freq_scale = 0;
320 header->start_freq = 0;
321 header->start_tsc = 0;
322 header->start_monotonic = 0;
323 header->start_time_sec = 0;
324 header->start_time_usec = 0;
325}
326
327
5ffa9d14 328static inline void __attribute__((no_instrument_function))
329ltt_buffer_begin_callback(struct ltt_buf *buf,
32f2b04a 330 uint64_t tsc, unsigned int subbuf_idx)
331{
332 struct ltt_block_start_header *header =
333 (struct ltt_block_start_header*)
334 (buf->start + (subbuf_idx*buf->subbuf_size));
335
336 header->begin.cycle_count = tsc;
337 header->begin.freq = 0; //ltt_frequency();
338
339 header->lost_size = 0xFFFFFFFF; // for debugging...
340
341 header->buf_size = buf->subbuf_size;
342
343 ltt_write_trace_header(&header->trace);
344
345}
346
347
348
5ffa9d14 349static inline void __attribute__((no_instrument_function))
350ltt_buffer_end_callback(struct ltt_buf *buf,
32f2b04a 351 uint64_t tsc, unsigned int offset, unsigned int subbuf_idx)
352{
353 struct ltt_block_start_header *header =
354 (struct ltt_block_start_header*)
355 (buf->start + (subbuf_idx*buf->subbuf_size));
356 /* offset is assumed to never be 0 here : never deliver a completely
357 * empty subbuffer. */
358 /* The lost size is between 0 and subbuf_size-1 */
359 header->lost_size = SUBBUF_OFFSET((buf->subbuf_size - offset),
360 buf);
361 header->end.cycle_count = tsc;
362 header->end.freq = 0; //ltt_frequency();
363}
364
365
5ffa9d14 366static inline void __attribute__((no_instrument_function))
367ltt_deliver_callback(struct ltt_buf *buf,
32f2b04a 368 unsigned subbuf_idx,
369 void *subbuf)
370{
371 ltt_usertrace_fast_buffer_switch();
372}
5ffa9d14 373
374
375/* ltt_reserve_slot
376 *
377 * Atomic slot reservation in a LTTng buffer. It will take care of
378 * sub-buffer switching.
379 *
380 * Parameters:
381 *
382 * @trace : the trace structure to log to.
383 * @buf : the buffer to reserve space into.
384 * @data_size : size of the variable length data to log.
385 * @slot_size : pointer to total size of the slot (out)
386 * @tsc : pointer to the tsc at the slot reservation (out)
387 * @before_hdr_pad : dynamic padding before the event header.
388 * @after_hdr_pad : dynamic padding after the event header.
389 *
390 * Return : NULL if not enough space, else returns the pointer
391 * to the beginning of the reserved slot. */
392static inline void * __attribute__((no_instrument_function)) ltt_reserve_slot(
393 struct ltt_trace_info *trace,
394 struct ltt_buf *ltt_buf,
395 unsigned int data_size,
396 unsigned int *slot_size,
397 uint64_t *tsc,
398 size_t *before_hdr_pad,
399 size_t *after_hdr_pad,
400 size_t *header_size)
401{
402 int offset_begin, offset_end, offset_old;
403 //int has_switch;
404 int begin_switch, end_switch_current, end_switch_old;
405 int reserve_commit_diff = 0;
406 unsigned int size;
407 int consumed_old, consumed_new;
408 int commit_count, reserve_count;
409 int ret;
410
411 do {
412 offset_old = atomic_read(&ltt_buf->offset);
413 offset_begin = offset_old;
414 //has_switch = 0;
415 begin_switch = 0;
416 end_switch_current = 0;
417 end_switch_old = 0;
418 *tsc = ltt_get_timestamp();
419 if(*tsc == 0) {
420 /* Error in getting the timestamp, event lost */
421 atomic_inc(&ltt_buf->events_lost);
422 return NULL;
423 }
424
425 if(SUBBUF_OFFSET(offset_begin, ltt_buf) == 0) {
426 begin_switch = 1; /* For offset_begin */
427 } else {
428 size = ltt_get_header_size(trace, ltt_buf->start + offset_begin,
429 before_hdr_pad, after_hdr_pad, header_size)
430 + data_size;
431
432 if((SUBBUF_OFFSET(offset_begin, ltt_buf)+size)>ltt_buf->subbuf_size) {
433 //has_switch = 1;
434 end_switch_old = 1; /* For offset_old */
435 begin_switch = 1; /* For offset_begin */
436 }
437 }
438
439 if(begin_switch) {
440 if(end_switch_old) {
441 offset_begin = SUBBUF_ALIGN(offset_begin, ltt_buf);
442 }
443 offset_begin = offset_begin + ltt_subbuf_header_len(ltt_buf);
444 /* Test new buffer integrity */
445 reserve_commit_diff =
446 atomic_read(&ltt_buf->reserve_count[SUBBUF_INDEX(offset_begin,
447 ltt_buf)])
448 - atomic_read(&ltt_buf->commit_count[SUBBUF_INDEX(offset_begin,
449 ltt_buf)]);
450 if(reserve_commit_diff == 0) {
451 /* Next buffer not corrupted. */
452 if((SUBBUF_TRUNC(offset_begin, ltt_buf)
453 - SUBBUF_TRUNC(atomic_read(&ltt_buf->consumed), ltt_buf))
454 >= ltt_buf->alloc_size) {
455 /* We block until the reader unblocks us */
456 atomic_set(&ltt_buf->full, 1);
457 /* We block until the reader tells us to wake up.
458 Signals will simply cause this loop to restart.
459 */
460 do {
461 ret = futex((unsigned long)&ltt_buf->full, FUTEX_WAIT, 1, 0, 0, 0);
462 } while(ret != 0 && ret != EWOULDBLOCK);
463 /* go on with the write */
464
465 } else {
466 /* next buffer not corrupted, we are either in overwrite mode or
467 * the buffer is not full. It's safe to write in this new subbuffer.*/
468 }
469 } else {
470 /* Next subbuffer corrupted. Force pushing reader even in normal
471 * mode. It's safe to write in this new subbuffer. */
472 }
473 size = ltt_get_header_size(trace, ltt_buf->start + offset_begin,
474 before_hdr_pad, after_hdr_pad, header_size) + data_size;
475 if((SUBBUF_OFFSET(offset_begin,ltt_buf)+size)>ltt_buf->subbuf_size) {
476 /* Event too big for subbuffers, report error, don't complete
477 * the sub-buffer switch. */
478 atomic_inc(&ltt_buf->events_lost);
479 return NULL;
480 } else {
481 /* We just made a successful buffer switch and the event fits in the
482 * new subbuffer. Let's write. */
483 }
484 } else {
485 /* Event fits in the current buffer and we are not on a switch boundary.
486 * It's safe to write */
487 }
488 offset_end = offset_begin + size;
489
490 if((SUBBUF_OFFSET(offset_end, ltt_buf)) == 0) {
491 /* The offset_end will fall at the very beginning of the next subbuffer.
492 */
493 end_switch_current = 1; /* For offset_begin */
494 }
495
496 } while(atomic_cmpxchg(&ltt_buf->offset, offset_old, offset_end)
497 != offset_old);
498
499
500 /* Push the reader if necessary */
501 do {
502 consumed_old = atomic_read(&ltt_buf->consumed);
503 /* If buffer is in overwrite mode, push the reader consumed count if
504 the write position has reached it and we are not at the first
505 iteration (don't push the reader farther than the writer).
506 This operation can be done concurrently by many writers in the
507 same buffer, the writer being at the fartest write position sub-buffer
508 index in the buffer being the one which will win this loop. */
509 /* If the buffer is not in overwrite mode, pushing the reader only
510 happen if a sub-buffer is corrupted */
511 if((SUBBUF_TRUNC(offset_end, ltt_buf)
512 - SUBBUF_TRUNC(consumed_old, ltt_buf))
513 >= ltt_buf->alloc_size)
514 consumed_new = SUBBUF_ALIGN(consumed_old, ltt_buf);
515 else {
516 consumed_new = consumed_old;
517 break;
518 }
519 } while(atomic_cmpxchg(&ltt_buf->consumed, consumed_old, consumed_new)
520 != consumed_old);
521
522 if(consumed_old != consumed_new) {
523 /* Reader pushed : we are the winner of the push, we can therefore
524 reequilibrate reserve and commit. Atomic increment of the commit
525 count permits other writers to play around with this variable
526 before us. We keep track of corrupted_subbuffers even in overwrite mode :
527 we never want to write over a non completely committed sub-buffer :
528 possible causes : the buffer size is too low compared to the unordered
529 data input, or there is a writer who died between the reserve and the
530 commit. */
531 if(reserve_commit_diff) {
532 /* We have to alter the sub-buffer commit count : a sub-buffer is
533 corrupted. We do not deliver it. */
534 atomic_add(reserve_commit_diff,
535 &ltt_buf->commit_count[SUBBUF_INDEX(offset_begin, ltt_buf)]);
536 atomic_inc(&ltt_buf->corrupted_subbuffers);
537 }
538 }
539
540
541 if(end_switch_old) {
542 /* old subbuffer */
543 /* Concurrency safe because we are the last and only thread to alter this
544 sub-buffer. As long as it is not delivered and read, no other thread can
545 alter the offset, alter the reserve_count or call the
546 client_buffer_end_callback on this sub-buffer.
547 The only remaining threads could be the ones with pending commits. They
548 will have to do the deliver themself.
549 Not concurrency safe in overwrite mode. We detect corrupted subbuffers
550 with commit and reserve counts. We keep a corrupted sub-buffers count
551 and push the readers across these sub-buffers.
552 Not concurrency safe if a writer is stalled in a subbuffer and
553 another writer switches in, finding out it's corrupted. The result will
554 be than the old (uncommited) subbuffer will be declared corrupted, and
555 that the new subbuffer will be declared corrupted too because of the
556 commit count adjustment.
557 Note : offset_old should never be 0 here.*/
558 ltt_buffer_end_callback(ltt_buf, *tsc, offset_old,
559 SUBBUF_INDEX((offset_old-1), ltt_buf));
560 /* Setting this reserve_count will allow the sub-buffer to be delivered by
561 the last committer. */
562 reserve_count =
563 atomic_add_return((SUBBUF_OFFSET((offset_old-1), ltt_buf)+1),
564 &ltt_buf->reserve_count[SUBBUF_INDEX((offset_old-1), ltt_buf)]);
565 if(reserve_count
566 == atomic_read(&ltt_buf->commit_count[SUBBUF_INDEX((offset_old-1),
567 ltt_buf)])) {
568 ltt_deliver_callback(ltt_buf, SUBBUF_INDEX((offset_old-1), ltt_buf),
569 NULL);
570 }
571 }
572
573 if(begin_switch) {
574 /* New sub-buffer */
575 /* This code can be executed unordered : writers may already have written
576 to the sub-buffer before this code gets executed, caution. */
577 /* The commit makes sure that this code is executed before the deliver
578 of this sub-buffer */
579 ltt_buffer_begin_callback(ltt_buf, *tsc, SUBBUF_INDEX(offset_begin, ltt_buf));
580 commit_count = atomic_add_return(ltt_subbuf_header_len(ltt_buf),
581 &ltt_buf->commit_count[SUBBUF_INDEX(offset_begin, ltt_buf)]);
582 /* Check if the written buffer has to be delivered */
583 if(commit_count
584 == atomic_read(&ltt_buf->reserve_count[SUBBUF_INDEX(offset_begin,
585 ltt_buf)])) {
586 ltt_deliver_callback(ltt_buf, SUBBUF_INDEX(offset_begin, ltt_buf), NULL);
587 }
588 }
589
590 if(end_switch_current) {
591 /* current subbuffer */
592 /* Concurrency safe because we are the last and only thread to alter this
593 sub-buffer. As long as it is not delivered and read, no other thread can
594 alter the offset, alter the reserve_count or call the
595 client_buffer_end_callback on this sub-buffer.
596 The only remaining threads could be the ones with pending commits. They
597 will have to do the deliver themself.
598 Not concurrency safe in overwrite mode. We detect corrupted subbuffers
599 with commit and reserve counts. We keep a corrupted sub-buffers count
600 and push the readers across these sub-buffers.
601 Not concurrency safe if a writer is stalled in a subbuffer and
602 another writer switches in, finding out it's corrupted. The result will
603 be than the old (uncommited) subbuffer will be declared corrupted, and
604 that the new subbuffer will be declared corrupted too because of the
605 commit count adjustment. */
606 ltt_buffer_end_callback(ltt_buf, *tsc, offset_end,
607 SUBBUF_INDEX((offset_end-1), ltt_buf));
608 /* Setting this reserve_count will allow the sub-buffer to be delivered by
609 the last committer. */
610 reserve_count =
611 atomic_add_return((SUBBUF_OFFSET((offset_end-1), ltt_buf)+1),
612 &ltt_buf->reserve_count[SUBBUF_INDEX((offset_end-1), ltt_buf)]);
613 if(reserve_count
614 == atomic_read(&ltt_buf->commit_count[SUBBUF_INDEX((offset_end-1),
615 ltt_buf)])) {
616 ltt_deliver_callback(ltt_buf, SUBBUF_INDEX((offset_end-1), ltt_buf), NULL);
617 }
618 }
619
620 *slot_size = size;
621
622 //BUG_ON(*slot_size != (data_size + *before_hdr_pad + *after_hdr_pad + *header_size));
623 //BUG_ON(*slot_size != (offset_end - offset_begin));
624
625 return ltt_buf->start + BUFFER_OFFSET(offset_begin, ltt_buf);
626}
627
628
629/* ltt_commit_slot
630 *
631 * Atomic unordered slot commit. Increments the commit count in the
632 * specified sub-buffer, and delivers it if necessary.
633 *
634 * Parameters:
635 *
636 * @buf : the buffer to commit to.
637 * @reserved : address of the beginnig of the reserved slot.
638 * @slot_size : size of the reserved slot.
639 *
640 */
641static inline void __attribute__((no_instrument_function)) ltt_commit_slot(
642 struct ltt_buf *ltt_buf,
643 void *reserved,
644 unsigned int slot_size)
645{
646 unsigned int offset_begin = reserved - ltt_buf->start;
647 int commit_count;
648
649 commit_count = atomic_add_return(slot_size,
650 &ltt_buf->commit_count[SUBBUF_INDEX(offset_begin,
651 ltt_buf)]);
652
653 /* Check if all commits have been done */
654 if(commit_count ==
655 atomic_read(&ltt_buf->reserve_count[SUBBUF_INDEX(offset_begin, ltt_buf)])) {
656 ltt_deliver_callback(ltt_buf, SUBBUF_INDEX(offset_begin, ltt_buf), NULL);
657 }
658}
659
660
661
662
663
04180f7f 664#endif //_LTT_USERTRACE_FAST_H
This page took 0.050479 seconds and 4 git commands to generate.