work in prog
[lttv.git] / usertrace-fast / ltt-usertrace-fast.h
CommitLineData
b09f3215 1
04180f7f 2/* LTTng user-space "fast" tracing header
b09f3215 3 *
4 * Copyright 2006 Mathieu Desnoyers
5 *
6 */
7
04180f7f 8#ifndef _LTT_USERTRACE_FAST_H
9#define _LTT_USERTRACE_FAST_H
b09f3215 10
8b30e7bc 11#ifdef LTT_TRACE
12
b09f3215 13#include <errno.h>
b09f3215 14#include <asm/atomic.h>
700d350d 15#include <pthread.h>
32f2b04a 16#include <stdint.h>
17#include <syscall.h>
18#include <linux/futex.h>
19
8b30e7bc 20#include <ltt/ltt-facility-id-user_generic.h>
21#include <ltt/ltt-generic.h>
22
32f2b04a 23#ifndef futex
5ffa9d14 24static inline __attribute__((no_instrument_function))
25 _syscall6(long, futex, unsigned long, uaddr, int, op, int, val,
32f2b04a 26 unsigned long, timeout, unsigned long, uaddr2, int, val2)
27#endif //futex
b09f3215 28
47d7d576 29
5ffa9d14 30
47d7d576 31#ifndef LTT_N_SUBBUFS
32#define LTT_N_SUBBUFS 2
33#endif //LTT_N_SUBBUFS
34
35#ifndef LTT_SUBBUF_SIZE_CPU
36#define LTT_SUBBUF_SIZE_CPU 1048576
51bf1553 37#endif //LTT_BUF_SIZE_CPU
b09f3215 38
47d7d576 39#define LTT_BUF_SIZE_CPU (LTT_SUBBUF_SIZE_CPU * LTT_N_SUBBUFS)
40
41#ifndef LTT_SUBBUF_SIZE_FACILITIES
42#define LTT_SUBBUF_SIZE_FACILITIES 4096
51bf1553 43#endif //LTT_BUF_SIZE_FACILITIES
b09f3215 44
47d7d576 45#define LTT_BUF_SIZE_FACILITIES (LTT_SUBBUF_SIZE_FACILITIES * LTT_N_SUBBUFS)
46
77b31f39 47#ifndef LTT_USERTRACE_ROOT
48#define LTT_USERTRACE_ROOT "/tmp/ltt-usertrace"
49#endif //LTT_USERTRACE_ROOT
50
47d7d576 51
52/* Buffer offset macros */
53
54#define BUFFER_OFFSET(offset, buf) (offset & (buf->alloc_size-1))
55#define SUBBUF_OFFSET(offset, buf) (offset & (buf->subbuf_size-1))
56#define SUBBUF_ALIGN(offset, buf) \
57 (((offset) + buf->subbuf_size) & (~(buf->subbuf_size-1)))
58#define SUBBUF_TRUNC(offset, buf) \
59 ((offset) & (~(buf->subbuf_size-1)))
60#define SUBBUF_INDEX(offset, buf) \
61 (BUFFER_OFFSET(offset,buf)/buf->subbuf_size)
62
63
32f2b04a 64#define LTT_TRACER_MAGIC_NUMBER 0x00D6B7ED
65#define LTT_TRACER_VERSION_MAJOR 0
66#define LTT_TRACER_VERSION_MINOR 7
67
68#ifndef atomic_cmpxchg
69#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
70#endif //atomic_cmpxchg
5ffa9d14 71
72typedef unsigned int ltt_facility_t;
73
32f2b04a 74struct ltt_trace_header {
75 uint32_t magic_number;
76 uint32_t arch_type;
77 uint32_t arch_variant;
78 uint32_t float_word_order; /* Only useful for user space traces */
79 uint8_t arch_size;
80 //uint32_t system_type;
81 uint8_t major_version;
82 uint8_t minor_version;
83 uint8_t flight_recorder;
84 uint8_t has_heartbeat;
85 uint8_t has_alignment; /* Event header alignment */
86 uint32_t freq_scale;
87 uint64_t start_freq;
88 uint64_t start_tsc;
89 uint64_t start_monotonic;
90 uint64_t start_time_sec;
91 uint64_t start_time_usec;
92} __attribute((packed));
93
94
95struct ltt_block_start_header {
96 struct {
97 uint64_t cycle_count;
98 uint64_t freq; /* khz */
99 } begin;
100 struct {
101 uint64_t cycle_count;
102 uint64_t freq; /* khz */
103 } end;
104 uint32_t lost_size; /* Size unused at the end of the buffer */
105 uint32_t buf_size; /* The size of this sub-buffer */
106 struct ltt_trace_header trace;
107} __attribute((packed));
108
109
110
b09f3215 111struct ltt_buf {
32f2b04a 112 void *start;
b09f3215 113 atomic_t offset;
47d7d576 114 atomic_t consumed;
115 atomic_t reserve_count[LTT_N_SUBBUFS];
116 atomic_t commit_count[LTT_N_SUBBUFS];
b09f3215 117
118 atomic_t events_lost;
32f2b04a 119 atomic_t corrupted_subbuffers;
47d7d576 120 atomic_t full; /* futex on which the writer waits : 1 : full */
121 unsigned int alloc_size;
122 unsigned int subbuf_size;
b09f3215 123};
124
700d350d 125struct ltt_trace_info {
1c48e587 126 int init;
b09f3215 127 int filter;
700d350d 128 pid_t daemon_id;
8b30e7bc 129 int nesting;
b09f3215 130 struct {
131 struct ltt_buf facilities;
132 struct ltt_buf cpu;
700d350d 133 char facilities_buf[LTT_BUF_SIZE_FACILITIES] __attribute__ ((aligned (8)));
51bf1553 134 char cpu_buf[LTT_BUF_SIZE_CPU] __attribute__ ((aligned (8)));
b09f3215 135 } channel;
136};
137
32f2b04a 138
5ffa9d14 139struct ltt_event_header_nohb {
140 uint64_t timestamp;
141 unsigned char facility_id;
142 unsigned char event_id;
143 uint16_t event_size;
144} __attribute((packed));
32f2b04a 145
700d350d 146extern __thread struct ltt_trace_info *thread_trace_info;
b09f3215 147
51bf1553 148void ltt_thread_init(void);
b09f3215 149
5ffa9d14 150void __attribute__((no_instrument_function))
151 ltt_usertrace_fast_buffer_switch(void);
152
5ffa9d14 153/* Get the offset of the channel in the ltt_trace_struct */
154#define GET_CHANNEL_INDEX(chan) \
155 (unsigned int)&((struct ltt_trace_info*)NULL)->channel.chan
156
157/* ltt_get_index_from_facility
158 *
159 * Get channel index from facility and event id.
160 *
161 * @fID : facility ID
162 * @eID : event number
163 *
164 * Get the channel index into which events must be written for the given
165 * facility and event number. We get this structure offset as soon as possible
166 * and remember it so we pass through this logic only once per trace call (not
167 * for every trace).
168 */
169static inline unsigned int __attribute__((no_instrument_function))
170 ltt_get_index_from_facility(ltt_facility_t fID,
171 uint8_t eID)
172{
5ffa9d14 173 return GET_CHANNEL_INDEX(cpu);
174}
175
176
177static inline struct ltt_buf * __attribute__((no_instrument_function))
178 ltt_get_channel_from_index(
179 struct ltt_trace_info *trace, unsigned int index)
180{
181 return *(struct ltt_buf **)((void*)trace+index);
182}
183
184
185/*
186 * ltt_get_header_size
187 *
188 * Calculate alignment offset for arch size void*. This is the
189 * alignment offset of the event header.
190 *
191 * Important note :
192 * The event header must be a size multiple of the void* size. This is necessary
193 * to be able to calculate statically the alignment offset of the variable
194 * length data fields that follows. The total offset calculated here :
195 *
196 * Alignment of header struct on arch size
197 * + sizeof(header struct)
198 * + padding added to end of struct to align on arch size.
199 * */
200static inline unsigned char __attribute__((no_instrument_function))
201 ltt_get_header_size(struct ltt_trace_info *trace,
202 void *address,
203 size_t *before_hdr_pad,
204 size_t *after_hdr_pad,
205 size_t *header_size)
206{
207 unsigned int padding;
208 unsigned int header;
209
210 header = sizeof(struct ltt_event_header_nohb);
211
212 /* Padding before the header. Calculated dynamically */
213 *before_hdr_pad = ltt_align((unsigned long)address, header);
214 padding = *before_hdr_pad;
215
216 /* Padding after header, considering header aligned on ltt_align.
217 * Calculated statically if header size if known. */
218 *after_hdr_pad = ltt_align(header, sizeof(void*));
219 padding += *after_hdr_pad;
220
221 *header_size = header;
222
223 return header+padding;
224}
225
226
227/* ltt_write_event_header
228 *
229 * Writes the event header to the pointer.
230 *
231 * @channel : pointer to the channel structure
232 * @ptr : buffer pointer
233 * @fID : facility ID
234 * @eID : event ID
235 * @event_size : size of the event, excluding the event header.
236 * @offset : offset of the beginning of the header, for alignment.
237 * Calculated by ltt_get_event_header_size.
238 * @tsc : time stamp counter.
239 */
240static inline void __attribute__((no_instrument_function))
241 ltt_write_event_header(
242 struct ltt_trace_info *trace, struct ltt_buf *buf,
243 void *ptr, ltt_facility_t fID, uint32_t eID, size_t event_size,
244 size_t offset, uint64_t tsc)
245{
246 struct ltt_event_header_nohb *nohb;
247
248 event_size = min(event_size, 0xFFFFU);
249 nohb = (struct ltt_event_header_nohb *)(ptr+offset);
250 nohb->timestamp = (uint64_t)tsc;
251 nohb->facility_id = fID;
252 nohb->event_id = eID;
253 nohb->event_size = (uint16_t)event_size;
254}
700d350d 255
32f2b04a 256
257
5ffa9d14 258static inline uint64_t __attribute__((no_instrument_function))
259ltt_get_timestamp()
32f2b04a 260{
261 return get_cycles();
262}
263
5ffa9d14 264static inline unsigned int __attribute__((no_instrument_function))
265ltt_subbuf_header_len(struct ltt_buf *buf)
32f2b04a 266{
267 return sizeof(struct ltt_block_start_header);
268}
269
270
271
5ffa9d14 272static inline void __attribute__((no_instrument_function))
273ltt_write_trace_header(struct ltt_trace_header *header)
32f2b04a 274{
275 header->magic_number = LTT_TRACER_MAGIC_NUMBER;
276 header->major_version = LTT_TRACER_VERSION_MAJOR;
277 header->minor_version = LTT_TRACER_VERSION_MINOR;
278 header->float_word_order = 0; //FIXME
279 header->arch_type = 0; //FIXME LTT_ARCH_TYPE;
280 header->arch_size = sizeof(void*);
281 header->arch_variant = 0; //FIXME LTT_ARCH_VARIANT;
282 header->flight_recorder = 0;
283 header->has_heartbeat = 0;
284
5ffa9d14 285#ifndef LTT_PACK
32f2b04a 286 header->has_alignment = sizeof(void*);
287#else
288 header->has_alignment = 0;
289#endif
290
291 //FIXME
292 header->freq_scale = 0;
293 header->start_freq = 0;
294 header->start_tsc = 0;
295 header->start_monotonic = 0;
296 header->start_time_sec = 0;
297 header->start_time_usec = 0;
298}
299
300
5ffa9d14 301static inline void __attribute__((no_instrument_function))
302ltt_buffer_begin_callback(struct ltt_buf *buf,
32f2b04a 303 uint64_t tsc, unsigned int subbuf_idx)
304{
305 struct ltt_block_start_header *header =
306 (struct ltt_block_start_header*)
307 (buf->start + (subbuf_idx*buf->subbuf_size));
308
309 header->begin.cycle_count = tsc;
310 header->begin.freq = 0; //ltt_frequency();
311
312 header->lost_size = 0xFFFFFFFF; // for debugging...
313
314 header->buf_size = buf->subbuf_size;
315
316 ltt_write_trace_header(&header->trace);
317
318}
319
320
321
5ffa9d14 322static inline void __attribute__((no_instrument_function))
323ltt_buffer_end_callback(struct ltt_buf *buf,
32f2b04a 324 uint64_t tsc, unsigned int offset, unsigned int subbuf_idx)
325{
326 struct ltt_block_start_header *header =
327 (struct ltt_block_start_header*)
328 (buf->start + (subbuf_idx*buf->subbuf_size));
329 /* offset is assumed to never be 0 here : never deliver a completely
330 * empty subbuffer. */
331 /* The lost size is between 0 and subbuf_size-1 */
332 header->lost_size = SUBBUF_OFFSET((buf->subbuf_size - offset),
333 buf);
334 header->end.cycle_count = tsc;
335 header->end.freq = 0; //ltt_frequency();
336}
337
338
5ffa9d14 339static inline void __attribute__((no_instrument_function))
340ltt_deliver_callback(struct ltt_buf *buf,
32f2b04a 341 unsigned subbuf_idx,
342 void *subbuf)
343{
344 ltt_usertrace_fast_buffer_switch();
345}
5ffa9d14 346
347
348/* ltt_reserve_slot
349 *
350 * Atomic slot reservation in a LTTng buffer. It will take care of
351 * sub-buffer switching.
352 *
353 * Parameters:
354 *
355 * @trace : the trace structure to log to.
356 * @buf : the buffer to reserve space into.
357 * @data_size : size of the variable length data to log.
358 * @slot_size : pointer to total size of the slot (out)
359 * @tsc : pointer to the tsc at the slot reservation (out)
360 * @before_hdr_pad : dynamic padding before the event header.
361 * @after_hdr_pad : dynamic padding after the event header.
362 *
363 * Return : NULL if not enough space, else returns the pointer
364 * to the beginning of the reserved slot. */
365static inline void * __attribute__((no_instrument_function)) ltt_reserve_slot(
366 struct ltt_trace_info *trace,
367 struct ltt_buf *ltt_buf,
368 unsigned int data_size,
369 unsigned int *slot_size,
370 uint64_t *tsc,
371 size_t *before_hdr_pad,
372 size_t *after_hdr_pad,
373 size_t *header_size)
374{
375 int offset_begin, offset_end, offset_old;
376 //int has_switch;
377 int begin_switch, end_switch_current, end_switch_old;
378 int reserve_commit_diff = 0;
379 unsigned int size;
380 int consumed_old, consumed_new;
381 int commit_count, reserve_count;
382 int ret;
383
384 do {
385 offset_old = atomic_read(&ltt_buf->offset);
386 offset_begin = offset_old;
387 //has_switch = 0;
388 begin_switch = 0;
389 end_switch_current = 0;
390 end_switch_old = 0;
391 *tsc = ltt_get_timestamp();
392 if(*tsc == 0) {
393 /* Error in getting the timestamp, event lost */
394 atomic_inc(&ltt_buf->events_lost);
395 return NULL;
396 }
397
398 if(SUBBUF_OFFSET(offset_begin, ltt_buf) == 0) {
399 begin_switch = 1; /* For offset_begin */
400 } else {
401 size = ltt_get_header_size(trace, ltt_buf->start + offset_begin,
402 before_hdr_pad, after_hdr_pad, header_size)
403 + data_size;
404
405 if((SUBBUF_OFFSET(offset_begin, ltt_buf)+size)>ltt_buf->subbuf_size) {
406 //has_switch = 1;
407 end_switch_old = 1; /* For offset_old */
408 begin_switch = 1; /* For offset_begin */
409 }
410 }
411
412 if(begin_switch) {
413 if(end_switch_old) {
414 offset_begin = SUBBUF_ALIGN(offset_begin, ltt_buf);
415 }
416 offset_begin = offset_begin + ltt_subbuf_header_len(ltt_buf);
417 /* Test new buffer integrity */
418 reserve_commit_diff =
419 atomic_read(&ltt_buf->reserve_count[SUBBUF_INDEX(offset_begin,
420 ltt_buf)])
421 - atomic_read(&ltt_buf->commit_count[SUBBUF_INDEX(offset_begin,
422 ltt_buf)]);
423 if(reserve_commit_diff == 0) {
424 /* Next buffer not corrupted. */
425 if((SUBBUF_TRUNC(offset_begin, ltt_buf)
426 - SUBBUF_TRUNC(atomic_read(&ltt_buf->consumed), ltt_buf))
427 >= ltt_buf->alloc_size) {
428 /* We block until the reader unblocks us */
429 atomic_set(&ltt_buf->full, 1);
430 /* We block until the reader tells us to wake up.
431 Signals will simply cause this loop to restart.
432 */
433 do {
434 ret = futex((unsigned long)&ltt_buf->full, FUTEX_WAIT, 1, 0, 0, 0);
435 } while(ret != 0 && ret != EWOULDBLOCK);
436 /* go on with the write */
437
438 } else {
439 /* next buffer not corrupted, we are either in overwrite mode or
440 * the buffer is not full. It's safe to write in this new subbuffer.*/
441 }
442 } else {
443 /* Next subbuffer corrupted. Force pushing reader even in normal
444 * mode. It's safe to write in this new subbuffer. */
445 }
446 size = ltt_get_header_size(trace, ltt_buf->start + offset_begin,
447 before_hdr_pad, after_hdr_pad, header_size) + data_size;
448 if((SUBBUF_OFFSET(offset_begin,ltt_buf)+size)>ltt_buf->subbuf_size) {
449 /* Event too big for subbuffers, report error, don't complete
450 * the sub-buffer switch. */
451 atomic_inc(&ltt_buf->events_lost);
452 return NULL;
453 } else {
454 /* We just made a successful buffer switch and the event fits in the
455 * new subbuffer. Let's write. */
456 }
457 } else {
458 /* Event fits in the current buffer and we are not on a switch boundary.
459 * It's safe to write */
460 }
461 offset_end = offset_begin + size;
462
463 if((SUBBUF_OFFSET(offset_end, ltt_buf)) == 0) {
464 /* The offset_end will fall at the very beginning of the next subbuffer.
465 */
466 end_switch_current = 1; /* For offset_begin */
467 }
468
469 } while(atomic_cmpxchg(&ltt_buf->offset, offset_old, offset_end)
470 != offset_old);
471
472
473 /* Push the reader if necessary */
474 do {
475 consumed_old = atomic_read(&ltt_buf->consumed);
476 /* If buffer is in overwrite mode, push the reader consumed count if
477 the write position has reached it and we are not at the first
478 iteration (don't push the reader farther than the writer).
479 This operation can be done concurrently by many writers in the
480 same buffer, the writer being at the fartest write position sub-buffer
481 index in the buffer being the one which will win this loop. */
482 /* If the buffer is not in overwrite mode, pushing the reader only
483 happen if a sub-buffer is corrupted */
484 if((SUBBUF_TRUNC(offset_end, ltt_buf)
485 - SUBBUF_TRUNC(consumed_old, ltt_buf))
486 >= ltt_buf->alloc_size)
487 consumed_new = SUBBUF_ALIGN(consumed_old, ltt_buf);
488 else {
489 consumed_new = consumed_old;
490 break;
491 }
492 } while(atomic_cmpxchg(&ltt_buf->consumed, consumed_old, consumed_new)
493 != consumed_old);
494
495 if(consumed_old != consumed_new) {
496 /* Reader pushed : we are the winner of the push, we can therefore
497 reequilibrate reserve and commit. Atomic increment of the commit
498 count permits other writers to play around with this variable
499 before us. We keep track of corrupted_subbuffers even in overwrite mode :
500 we never want to write over a non completely committed sub-buffer :
501 possible causes : the buffer size is too low compared to the unordered
502 data input, or there is a writer who died between the reserve and the
503 commit. */
504 if(reserve_commit_diff) {
505 /* We have to alter the sub-buffer commit count : a sub-buffer is
506 corrupted. We do not deliver it. */
507 atomic_add(reserve_commit_diff,
508 &ltt_buf->commit_count[SUBBUF_INDEX(offset_begin, ltt_buf)]);
509 atomic_inc(&ltt_buf->corrupted_subbuffers);
510 }
511 }
512
513
514 if(end_switch_old) {
515 /* old subbuffer */
516 /* Concurrency safe because we are the last and only thread to alter this
517 sub-buffer. As long as it is not delivered and read, no other thread can
518 alter the offset, alter the reserve_count or call the
519 client_buffer_end_callback on this sub-buffer.
520 The only remaining threads could be the ones with pending commits. They
521 will have to do the deliver themself.
522 Not concurrency safe in overwrite mode. We detect corrupted subbuffers
523 with commit and reserve counts. We keep a corrupted sub-buffers count
524 and push the readers across these sub-buffers.
525 Not concurrency safe if a writer is stalled in a subbuffer and
526 another writer switches in, finding out it's corrupted. The result will
527 be than the old (uncommited) subbuffer will be declared corrupted, and
528 that the new subbuffer will be declared corrupted too because of the
529 commit count adjustment.
530 Note : offset_old should never be 0 here.*/
531 ltt_buffer_end_callback(ltt_buf, *tsc, offset_old,
532 SUBBUF_INDEX((offset_old-1), ltt_buf));
533 /* Setting this reserve_count will allow the sub-buffer to be delivered by
534 the last committer. */
535 reserve_count =
536 atomic_add_return((SUBBUF_OFFSET((offset_old-1), ltt_buf)+1),
537 &ltt_buf->reserve_count[SUBBUF_INDEX((offset_old-1), ltt_buf)]);
538 if(reserve_count
539 == atomic_read(&ltt_buf->commit_count[SUBBUF_INDEX((offset_old-1),
540 ltt_buf)])) {
541 ltt_deliver_callback(ltt_buf, SUBBUF_INDEX((offset_old-1), ltt_buf),
542 NULL);
543 }
544 }
545
546 if(begin_switch) {
547 /* New sub-buffer */
548 /* This code can be executed unordered : writers may already have written
549 to the sub-buffer before this code gets executed, caution. */
550 /* The commit makes sure that this code is executed before the deliver
551 of this sub-buffer */
552 ltt_buffer_begin_callback(ltt_buf, *tsc, SUBBUF_INDEX(offset_begin, ltt_buf));
553 commit_count = atomic_add_return(ltt_subbuf_header_len(ltt_buf),
554 &ltt_buf->commit_count[SUBBUF_INDEX(offset_begin, ltt_buf)]);
555 /* Check if the written buffer has to be delivered */
556 if(commit_count
557 == atomic_read(&ltt_buf->reserve_count[SUBBUF_INDEX(offset_begin,
558 ltt_buf)])) {
559 ltt_deliver_callback(ltt_buf, SUBBUF_INDEX(offset_begin, ltt_buf), NULL);
560 }
561 }
562
563 if(end_switch_current) {
564 /* current subbuffer */
565 /* Concurrency safe because we are the last and only thread to alter this
566 sub-buffer. As long as it is not delivered and read, no other thread can
567 alter the offset, alter the reserve_count or call the
568 client_buffer_end_callback on this sub-buffer.
569 The only remaining threads could be the ones with pending commits. They
570 will have to do the deliver themself.
571 Not concurrency safe in overwrite mode. We detect corrupted subbuffers
572 with commit and reserve counts. We keep a corrupted sub-buffers count
573 and push the readers across these sub-buffers.
574 Not concurrency safe if a writer is stalled in a subbuffer and
575 another writer switches in, finding out it's corrupted. The result will
576 be than the old (uncommited) subbuffer will be declared corrupted, and
577 that the new subbuffer will be declared corrupted too because of the
578 commit count adjustment. */
579 ltt_buffer_end_callback(ltt_buf, *tsc, offset_end,
580 SUBBUF_INDEX((offset_end-1), ltt_buf));
581 /* Setting this reserve_count will allow the sub-buffer to be delivered by
582 the last committer. */
583 reserve_count =
584 atomic_add_return((SUBBUF_OFFSET((offset_end-1), ltt_buf)+1),
585 &ltt_buf->reserve_count[SUBBUF_INDEX((offset_end-1), ltt_buf)]);
586 if(reserve_count
587 == atomic_read(&ltt_buf->commit_count[SUBBUF_INDEX((offset_end-1),
588 ltt_buf)])) {
589 ltt_deliver_callback(ltt_buf, SUBBUF_INDEX((offset_end-1), ltt_buf), NULL);
590 }
591 }
592
593 *slot_size = size;
594
595 //BUG_ON(*slot_size != (data_size + *before_hdr_pad + *after_hdr_pad + *header_size));
596 //BUG_ON(*slot_size != (offset_end - offset_begin));
597
598 return ltt_buf->start + BUFFER_OFFSET(offset_begin, ltt_buf);
599}
600
601
602/* ltt_commit_slot
603 *
604 * Atomic unordered slot commit. Increments the commit count in the
605 * specified sub-buffer, and delivers it if necessary.
606 *
607 * Parameters:
608 *
609 * @buf : the buffer to commit to.
610 * @reserved : address of the beginnig of the reserved slot.
611 * @slot_size : size of the reserved slot.
612 *
613 */
614static inline void __attribute__((no_instrument_function)) ltt_commit_slot(
615 struct ltt_buf *ltt_buf,
616 void *reserved,
617 unsigned int slot_size)
618{
619 unsigned int offset_begin = reserved - ltt_buf->start;
620 int commit_count;
621
622 commit_count = atomic_add_return(slot_size,
623 &ltt_buf->commit_count[SUBBUF_INDEX(offset_begin,
624 ltt_buf)]);
625
626 /* Check if all commits have been done */
627 if(commit_count ==
628 atomic_read(&ltt_buf->reserve_count[SUBBUF_INDEX(offset_begin, ltt_buf)])) {
629 ltt_deliver_callback(ltt_buf, SUBBUF_INDEX(offset_begin, ltt_buf), NULL);
630 }
631}
632
633
8b30e7bc 634#endif //LTT_TRACE
5ffa9d14 635
636
04180f7f 637#endif //_LTT_USERTRACE_FAST_H
This page took 0.047058 seconds and 4 git commands to generate.