update usertrace fast
[lttv.git] / ltt-usertrace / ltt / ltt-usertrace-fast.h
1
2 /* LTTng user-space "fast" tracing header
3 *
4 * Copyright 2006 Mathieu Desnoyers
5 *
6 */
7
8 #ifndef _LTT_USERTRACE_FAST_H
9 #define _LTT_USERTRACE_FAST_H
10
11 #ifdef LTT_TRACE
12 #ifdef LTT_TRACE_FAST
13
14 #include <errno.h>
15 #include <pthread.h>
16 #include <stdint.h>
17 #include <syscall.h>
18 #include <semaphore.h>
19 #include <signal.h>
20
21 #include <ltt/ltt-facility-id-user_generic.h>
22
23 #ifdef __cplusplus
24 extern "C" {
25 #endif
26
27 #ifndef LTT_N_SUBBUFS
28 #define LTT_N_SUBBUFS 2
29 #endif //LTT_N_SUBBUFS
30
31 #ifndef LTT_SUBBUF_SIZE_PROCESS
32 #define LTT_SUBBUF_SIZE_PROCESS 1048576
33 #endif //LTT_BUF_SIZE_CPU
34
35 #define LTT_BUF_SIZE_PROCESS (LTT_SUBBUF_SIZE_PROCESS * LTT_N_SUBBUFS)
36
37 #ifndef LTT_USERTRACE_ROOT
38 #define LTT_USERTRACE_ROOT "/tmp/ltt-usertrace"
39 #endif //LTT_USERTRACE_ROOT
40
41
42 /* Buffer offset macros */
43
44 #define BUFFER_OFFSET(offset, buf) (offset & (buf->alloc_size-1))
45 #define SUBBUF_OFFSET(offset, buf) (offset & (buf->subbuf_size-1))
46 #define SUBBUF_ALIGN(offset, buf) \
47 (((offset) + buf->subbuf_size) & (~(buf->subbuf_size-1)))
48 #define SUBBUF_TRUNC(offset, buf) \
49 ((offset) & (~(buf->subbuf_size-1)))
50 #define SUBBUF_INDEX(offset, buf) \
51 (BUFFER_OFFSET(offset,buf)/buf->subbuf_size)
52
53
54 #define LTT_TRACER_MAGIC_NUMBER 0x00D6B7ED
55 #define LTT_TRACER_VERSION_MAJOR 0
56 #define LTT_TRACER_VERSION_MINOR 8
57
58 #ifndef atomic_cmpxchg
59 #define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
60 #endif //atomic_cmpxchg
61
62 struct ltt_trace_header {
63 uint32_t magic_number;
64 uint32_t arch_type;
65 uint32_t arch_variant;
66 uint32_t float_word_order; /* Only useful for user space traces */
67 uint8_t arch_size;
68 //uint32_t system_type;
69 uint8_t major_version;
70 uint8_t minor_version;
71 uint8_t flight_recorder;
72 uint8_t has_heartbeat;
73 uint8_t has_alignment; /* Event header alignment */
74 uint8_t tsc_lsb_truncate;
75 uint8_t tscbits;
76 uint32_t freq_scale;
77 uint64_t start_freq;
78 uint64_t start_tsc;
79 uint64_t start_monotonic;
80 uint64_t start_time_sec;
81 uint64_t start_time_usec;
82 } __attribute((packed));
83
84
85 struct ltt_block_start_header {
86 struct {
87 uint64_t cycle_count;
88 uint64_t freq; /* khz */
89 } begin;
90 struct {
91 uint64_t cycle_count;
92 uint64_t freq; /* khz */
93 } end;
94 uint32_t lost_size; /* Size unused at the end of the buffer */
95 uint32_t buf_size; /* The size of this sub-buffer */
96 struct ltt_trace_header trace;
97 } __attribute((packed));
98
99
100
101 struct ltt_buf {
102 void *start;
103 atomic_t offset;
104 atomic_t consumed;
105 atomic_t reserve_count[LTT_N_SUBBUFS];
106 atomic_t commit_count[LTT_N_SUBBUFS];
107
108 atomic_t events_lost;
109 atomic_t corrupted_subbuffers;
110 sem_t writer_sem; /* semaphore on which the writer waits */
111 unsigned int alloc_size;
112 unsigned int subbuf_size;
113 };
114
115 struct ltt_trace_info {
116 int init;
117 int filter;
118 pid_t daemon_id;
119 int nesting;
120 struct {
121 struct ltt_buf process;
122 char process_buf[LTT_BUF_SIZE_PROCESS] __attribute__ ((aligned (8)));
123 } channel;
124 };
125
126
127 struct ltt_event_header_nohb {
128 uint64_t timestamp;
129 unsigned char facility_id;
130 unsigned char event_id;
131 uint16_t event_size;
132 } __attribute((packed));
133
134 extern __thread struct ltt_trace_info *thread_trace_info;
135
136 void ltt_thread_init(void);
137
138 void __attribute__((no_instrument_function))
139 ltt_usertrace_fast_buffer_switch(void);
140
141 /* Get the offset of the channel in the ltt_trace_struct */
142 #define GET_CHANNEL_INDEX(chan) \
143 (unsigned int)&((struct ltt_trace_info*)NULL)->channel.chan
144
145 /* ltt_get_index_from_facility
146 *
147 * Get channel index from facility and event id.
148 *
149 * @fID : facility ID
150 * @eID : event number
151 *
152 * Get the channel index into which events must be written for the given
153 * facility and event number. We get this structure offset as soon as possible
154 * and remember it so we pass through this logic only once per trace call (not
155 * for every trace).
156 */
157 static inline unsigned int __attribute__((no_instrument_function))
158 ltt_get_index_from_facility(ltt_facility_t fID,
159 uint8_t eID)
160 {
161 return GET_CHANNEL_INDEX(process);
162 }
163
164
165 static inline struct ltt_buf * __attribute__((no_instrument_function))
166 ltt_get_channel_from_index(
167 struct ltt_trace_info *trace, unsigned int index)
168 {
169 return (struct ltt_buf *)((void*)trace+index);
170 }
171
172
173 /*
174 * ltt_get_header_size
175 *
176 * Calculate alignment offset for arch size void*. This is the
177 * alignment offset of the event header.
178 *
179 * Important note :
180 * The event header must be a size multiple of the void* size. This is necessary
181 * to be able to calculate statically the alignment offset of the variable
182 * length data fields that follows. The total offset calculated here :
183 *
184 * Alignment of header struct on arch size
185 * + sizeof(header struct)
186 * + padding added to end of struct to align on arch size.
187 * */
188 static inline unsigned char __attribute__((no_instrument_function))
189 ltt_get_header_size(struct ltt_trace_info *trace,
190 void *address,
191 size_t *before_hdr_pad,
192 size_t *after_hdr_pad,
193 size_t *header_size)
194 {
195 unsigned int padding;
196 unsigned int header;
197
198 header = sizeof(struct ltt_event_header_nohb);
199
200 /* Padding before the header. Calculated dynamically */
201 *before_hdr_pad = ltt_align((unsigned long)address, header);
202 padding = *before_hdr_pad;
203
204 /* Padding after header, considering header aligned on ltt_align.
205 * Calculated statically if header size if known. */
206 *after_hdr_pad = ltt_align(header, sizeof(void*));
207 padding += *after_hdr_pad;
208
209 *header_size = header;
210
211 return header+padding;
212 }
213
214
215 /* ltt_write_event_header
216 *
217 * Writes the event header to the pointer.
218 *
219 * @channel : pointer to the channel structure
220 * @ptr : buffer pointer
221 * @fID : facility ID
222 * @eID : event ID
223 * @event_size : size of the event, excluding the event header.
224 * @offset : offset of the beginning of the header, for alignment.
225 * Calculated by ltt_get_event_header_size.
226 * @tsc : time stamp counter.
227 */
228 static inline void __attribute__((no_instrument_function))
229 ltt_write_event_header(
230 struct ltt_trace_info *trace, struct ltt_buf *buf,
231 void *ptr, ltt_facility_t fID, uint32_t eID, size_t event_size,
232 size_t offset, uint64_t tsc)
233 {
234 struct ltt_event_header_nohb *nohb;
235
236 event_size = min(event_size, 0xFFFFU);
237 nohb = (struct ltt_event_header_nohb *)(ptr+offset);
238 nohb->timestamp = (uint64_t)tsc;
239 nohb->facility_id = fID;
240 nohb->event_id = eID;
241 nohb->event_size = (uint16_t)event_size;
242 }
243
244
245
246 static inline uint64_t __attribute__((no_instrument_function))
247 ltt_get_timestamp()
248 {
249 return get_cycles();
250 }
251
252 static inline unsigned int __attribute__((no_instrument_function))
253 ltt_subbuf_header_len(struct ltt_buf *buf)
254 {
255 return sizeof(struct ltt_block_start_header);
256 }
257
258
259
260 static inline void __attribute__((no_instrument_function))
261 ltt_write_trace_header(struct ltt_trace_header *header)
262 {
263 header->magic_number = LTT_TRACER_MAGIC_NUMBER;
264 header->major_version = LTT_TRACER_VERSION_MAJOR;
265 header->minor_version = LTT_TRACER_VERSION_MINOR;
266 header->float_word_order = 0; //FIXME
267 header->arch_type = 0; //FIXME LTT_ARCH_TYPE;
268 header->arch_size = sizeof(void*);
269 header->arch_variant = 0; //FIXME LTT_ARCH_VARIANT;
270 header->flight_recorder = 0;
271 header->has_heartbeat = 0;
272
273 #ifndef LTT_PACK
274 header->has_alignment = sizeof(void*);
275 #else
276 header->has_alignment = 0;
277 #endif
278
279 //FIXME
280 header->freq_scale = 0;
281 header->start_freq = 0;
282 header->start_tsc = 0;
283 header->start_monotonic = 0;
284 header->start_time_sec = 0;
285 header->start_time_usec = 0;
286 }
287
288
289 static inline void __attribute__((no_instrument_function))
290 ltt_buffer_begin_callback(struct ltt_buf *buf,
291 uint64_t tsc, unsigned int subbuf_idx)
292 {
293 struct ltt_block_start_header *header =
294 (struct ltt_block_start_header*)
295 (buf->start + (subbuf_idx*buf->subbuf_size));
296
297 header->begin.cycle_count = tsc;
298 header->begin.freq = 0; //ltt_frequency();
299
300 header->lost_size = 0xFFFFFFFF; // for debugging...
301
302 header->buf_size = buf->subbuf_size;
303
304 ltt_write_trace_header(&header->trace);
305
306 }
307
308
309
310 static inline void __attribute__((no_instrument_function))
311 ltt_buffer_end_callback(struct ltt_buf *buf,
312 uint64_t tsc, unsigned int offset, unsigned int subbuf_idx)
313 {
314 struct ltt_block_start_header *header =
315 (struct ltt_block_start_header*)
316 (buf->start + (subbuf_idx*buf->subbuf_size));
317 /* offset is assumed to never be 0 here : never deliver a completely
318 * empty subbuffer. */
319 /* The lost size is between 0 and subbuf_size-1 */
320 header->lost_size = SUBBUF_OFFSET((buf->subbuf_size - offset),
321 buf);
322 header->end.cycle_count = tsc;
323 header->end.freq = 0; //ltt_frequency();
324 }
325
326
327 static inline void __attribute__((no_instrument_function))
328 ltt_deliver_callback(struct ltt_buf *buf,
329 unsigned subbuf_idx,
330 void *subbuf)
331 {
332 ltt_usertrace_fast_buffer_switch();
333 }
334
335
336 /* ltt_reserve_slot
337 *
338 * Atomic slot reservation in a LTTng buffer. It will take care of
339 * sub-buffer switching.
340 *
341 * Parameters:
342 *
343 * @trace : the trace structure to log to.
344 * @buf : the buffer to reserve space into.
345 * @data_size : size of the variable length data to log.
346 * @slot_size : pointer to total size of the slot (out)
347 * @tsc : pointer to the tsc at the slot reservation (out)
348 * @before_hdr_pad : dynamic padding before the event header.
349 * @after_hdr_pad : dynamic padding after the event header.
350 *
351 * Return : NULL if not enough space, else returns the pointer
352 * to the beginning of the reserved slot. */
353 static inline void * __attribute__((no_instrument_function)) ltt_reserve_slot(
354 struct ltt_trace_info *trace,
355 struct ltt_buf *ltt_buf,
356 unsigned int data_size,
357 size_t *slot_size,
358 uint64_t *tsc,
359 size_t *before_hdr_pad,
360 size_t *after_hdr_pad,
361 size_t *header_size)
362 {
363 int offset_begin, offset_end, offset_old;
364 //int has_switch;
365 int begin_switch, end_switch_current, end_switch_old;
366 int reserve_commit_diff = 0;
367 unsigned int size;
368 int consumed_old, consumed_new;
369 int commit_count, reserve_count;
370 int ret;
371 sigset_t oldset, set;
372
373 do {
374 offset_old = atomic_read(&ltt_buf->offset);
375 offset_begin = offset_old;
376 //has_switch = 0;
377 begin_switch = 0;
378 end_switch_current = 0;
379 end_switch_old = 0;
380 *tsc = ltt_get_timestamp();
381 if(*tsc == 0) {
382 /* Error in getting the timestamp, event lost */
383 atomic_inc(&ltt_buf->events_lost);
384 return NULL;
385 }
386
387 if(SUBBUF_OFFSET(offset_begin, ltt_buf) == 0) {
388 begin_switch = 1; /* For offset_begin */
389 } else {
390 size = ltt_get_header_size(trace, ltt_buf->start + offset_begin,
391 before_hdr_pad, after_hdr_pad, header_size)
392 + data_size;
393
394 if((SUBBUF_OFFSET(offset_begin, ltt_buf)+size)>ltt_buf->subbuf_size) {
395 //has_switch = 1;
396 end_switch_old = 1; /* For offset_old */
397 begin_switch = 1; /* For offset_begin */
398 }
399 }
400
401 if(begin_switch) {
402 if(end_switch_old) {
403 offset_begin = SUBBUF_ALIGN(offset_begin, ltt_buf);
404 }
405 offset_begin = offset_begin + ltt_subbuf_header_len(ltt_buf);
406 /* Test new buffer integrity */
407 reserve_commit_diff =
408 atomic_read(&ltt_buf->reserve_count[SUBBUF_INDEX(offset_begin,
409 ltt_buf)])
410 - atomic_read(&ltt_buf->commit_count[SUBBUF_INDEX(offset_begin,
411 ltt_buf)]);
412
413 if(reserve_commit_diff == 0) {
414 /* Next buffer not corrupted. */
415 //if((SUBBUF_TRUNC(offset_begin, ltt_buf)
416 // - SUBBUF_TRUNC(atomic_read(&ltt_buf->consumed), ltt_buf))
417 // >= ltt_buf->alloc_size) {
418 {
419 /* sem_wait is not signal safe. Disable signals around it.
420 * Signals are kept disabled to make sure we win the cmpxchg. */
421 /* Disable signals */
422 ret = sigfillset(&set);
423 if(ret) perror("LTT Error in sigfillset\n");
424
425 ret = pthread_sigmask(SIG_BLOCK, &set, &oldset);
426 if(ret) perror("LTT Error in pthread_sigmask\n");
427
428 /* We detect if a signal came between
429 * the offset read and signal disabling:
430 * if it is the case, then we restart
431 * the loop after reenabling signals. It
432 * means that it's a signal that has
433 * won the buffer switch.*/
434 if(offset_old != atomic_read(&ltt_buf->offset)) {
435 ret = pthread_sigmask(SIG_SETMASK, &oldset, NULL);
436 if(ret) perror("LTT Error in pthread_sigmask\n");
437 continue;
438 }
439 /* If the offset is still the same, then
440 * we can safely proceed to do the
441 * buffer switch without being
442 * interrupted by a signal. */
443 sem_wait(&ltt_buf->writer_sem);
444
445 }
446 /* go on with the write */
447
448 //} else {
449 // /* next buffer not corrupted, we are either in overwrite mode or
450 // * the buffer is not full. It's safe to write in this new subbuffer.*/
451 //}
452 } else {
453 /* Next subbuffer corrupted. Force pushing reader even in normal
454 * mode. It's safe to write in this new subbuffer. */
455 /* No sem_post is required because we fall through without doing a
456 * sem_wait. */
457 }
458 size = ltt_get_header_size(trace, ltt_buf->start + offset_begin,
459 before_hdr_pad, after_hdr_pad, header_size) + data_size;
460 if((SUBBUF_OFFSET(offset_begin,ltt_buf)+size)>ltt_buf->subbuf_size) {
461 /* Event too big for subbuffers, report error, don't complete
462 * the sub-buffer switch. */
463 atomic_inc(&ltt_buf->events_lost);
464 if(reserve_commit_diff == 0) {
465 ret = pthread_sigmask(SIG_SETMASK, &oldset, NULL);
466 if(ret) perror("LTT Error in pthread_sigmask\n");
467 }
468 return NULL;
469 } else {
470 /* We just made a successful buffer switch and the event fits in the
471 * new subbuffer. Let's write. */
472 }
473 } else {
474 /* Event fits in the current buffer and we are not on a switch boundary.
475 * It's safe to write */
476 }
477 offset_end = offset_begin + size;
478
479 if((SUBBUF_OFFSET(offset_end, ltt_buf)) == 0) {
480 /* The offset_end will fall at the very beginning of the next subbuffer.
481 */
482 end_switch_current = 1; /* For offset_begin */
483 }
484
485 } while(atomic_cmpxchg(&ltt_buf->offset, offset_old, offset_end)
486 != offset_old);
487
488 /* Push the reader if necessary */
489 do {
490 consumed_old = atomic_read(&ltt_buf->consumed);
491 /* If buffer is in overwrite mode, push the reader consumed count if
492 the write position has reached it and we are not at the first
493 iteration (don't push the reader farther than the writer).
494 This operation can be done concurrently by many writers in the
495 same buffer, the writer being at the fartest write position sub-buffer
496 index in the buffer being the one which will win this loop. */
497 /* If the buffer is not in overwrite mode, pushing the reader only
498 happen if a sub-buffer is corrupted */
499 if((SUBBUF_TRUNC(offset_end-1, ltt_buf)
500 - SUBBUF_TRUNC(consumed_old, ltt_buf))
501 >= ltt_buf->alloc_size)
502 consumed_new = SUBBUF_ALIGN(consumed_old, ltt_buf);
503 else {
504 consumed_new = consumed_old;
505 break;
506 }
507 } while(atomic_cmpxchg(&ltt_buf->consumed, consumed_old, consumed_new)
508 != consumed_old);
509
510 if(consumed_old != consumed_new) {
511 /* Reader pushed : we are the winner of the push, we can therefore
512 reequilibrate reserve and commit. Atomic increment of the commit
513 count permits other writers to play around with this variable
514 before us. We keep track of corrupted_subbuffers even in overwrite mode :
515 we never want to write over a non completely committed sub-buffer :
516 possible causes : the buffer size is too low compared to the unordered
517 data input, or there is a writer who died between the reserve and the
518 commit. */
519 if(reserve_commit_diff) {
520 /* We have to alter the sub-buffer commit count : a sub-buffer is
521 corrupted. We do not deliver it. */
522 atomic_add(reserve_commit_diff,
523 &ltt_buf->commit_count[SUBBUF_INDEX(offset_begin, ltt_buf)]);
524 atomic_inc(&ltt_buf->corrupted_subbuffers);
525 }
526 }
527
528
529 if(end_switch_old) {
530 /* old subbuffer */
531 /* Concurrency safe because we are the last and only thread to alter this
532 sub-buffer. As long as it is not delivered and read, no other thread can
533 alter the offset, alter the reserve_count or call the
534 client_buffer_end_callback on this sub-buffer.
535 The only remaining threads could be the ones with pending commits. They
536 will have to do the deliver themself.
537 Not concurrency safe in overwrite mode. We detect corrupted subbuffers
538 with commit and reserve counts. We keep a corrupted sub-buffers count
539 and push the readers across these sub-buffers.
540 Not concurrency safe if a writer is stalled in a subbuffer and
541 another writer switches in, finding out it's corrupted. The result will
542 be than the old (uncommited) subbuffer will be declared corrupted, and
543 that the new subbuffer will be declared corrupted too because of the
544 commit count adjustment.
545 Note : offset_old should never be 0 here.*/
546 ltt_buffer_end_callback(ltt_buf, *tsc, offset_old,
547 SUBBUF_INDEX((offset_old-1), ltt_buf));
548 /* Setting this reserve_count will allow the sub-buffer to be delivered by
549 the last committer. */
550 reserve_count =
551 atomic_add_return((SUBBUF_OFFSET((offset_old-1), ltt_buf)+1),
552 &ltt_buf->reserve_count[SUBBUF_INDEX((offset_old-1), ltt_buf)]);
553 if(reserve_count
554 == atomic_read(&ltt_buf->commit_count[SUBBUF_INDEX((offset_old-1),
555 ltt_buf)])) {
556 ltt_deliver_callback(ltt_buf, SUBBUF_INDEX((offset_old-1), ltt_buf),
557 NULL);
558 }
559 }
560
561 if(begin_switch) {
562 /* Enable signals : this is what guaranteed that same reserve which did the
563 * sem_wait does in fact win the cmpxchg for the offset. We only call
564 * these system calls on buffer boundaries because of their performance
565 * cost. */
566 if(reserve_commit_diff == 0) {
567 ret = pthread_sigmask(SIG_SETMASK, &oldset, NULL);
568 if(ret) perror("LTT Error in pthread_sigmask\n");
569 }
570 /* New sub-buffer */
571 /* This code can be executed unordered : writers may already have written
572 to the sub-buffer before this code gets executed, caution. */
573 /* The commit makes sure that this code is executed before the deliver
574 of this sub-buffer */
575 ltt_buffer_begin_callback(ltt_buf, *tsc, SUBBUF_INDEX(offset_begin, ltt_buf));
576 commit_count = atomic_add_return(ltt_subbuf_header_len(ltt_buf),
577 &ltt_buf->commit_count[SUBBUF_INDEX(offset_begin, ltt_buf)]);
578 /* Check if the written buffer has to be delivered */
579 if(commit_count
580 == atomic_read(&ltt_buf->reserve_count[SUBBUF_INDEX(offset_begin,
581 ltt_buf)])) {
582 ltt_deliver_callback(ltt_buf, SUBBUF_INDEX(offset_begin, ltt_buf), NULL);
583 }
584 }
585
586 if(end_switch_current) {
587 /* current subbuffer */
588 /* Concurrency safe because we are the last and only thread to alter this
589 sub-buffer. As long as it is not delivered and read, no other thread can
590 alter the offset, alter the reserve_count or call the
591 client_buffer_end_callback on this sub-buffer.
592 The only remaining threads could be the ones with pending commits. They
593 will have to do the deliver themself.
594 Not concurrency safe in overwrite mode. We detect corrupted subbuffers
595 with commit and reserve counts. We keep a corrupted sub-buffers count
596 and push the readers across these sub-buffers.
597 Not concurrency safe if a writer is stalled in a subbuffer and
598 another writer switches in, finding out it's corrupted. The result will
599 be than the old (uncommited) subbuffer will be declared corrupted, and
600 that the new subbuffer will be declared corrupted too because of the
601 commit count adjustment. */
602 ltt_buffer_end_callback(ltt_buf, *tsc, offset_end,
603 SUBBUF_INDEX((offset_end-1), ltt_buf));
604 /* Setting this reserve_count will allow the sub-buffer to be delivered by
605 the last committer. */
606 reserve_count =
607 atomic_add_return((SUBBUF_OFFSET((offset_end-1), ltt_buf)+1),
608 &ltt_buf->reserve_count[SUBBUF_INDEX((offset_end-1), ltt_buf)]);
609 if(reserve_count
610 == atomic_read(&ltt_buf->commit_count[SUBBUF_INDEX((offset_end-1),
611 ltt_buf)])) {
612 ltt_deliver_callback(ltt_buf, SUBBUF_INDEX((offset_end-1), ltt_buf), NULL);
613 }
614 }
615
616 *slot_size = size;
617
618 //BUG_ON(*slot_size != (data_size + *before_hdr_pad + *after_hdr_pad + *header_size));
619 //BUG_ON(*slot_size != (offset_end - offset_begin));
620
621 return ltt_buf->start + BUFFER_OFFSET(offset_begin, ltt_buf);
622 }
623
624
625 /* ltt_commit_slot
626 *
627 * Atomic unordered slot commit. Increments the commit count in the
628 * specified sub-buffer, and delivers it if necessary.
629 *
630 * Parameters:
631 *
632 * @buf : the buffer to commit to.
633 * @reserved : address of the beginnig of the reserved slot.
634 * @slot_size : size of the reserved slot.
635 *
636 */
637 static inline void __attribute__((no_instrument_function)) ltt_commit_slot(
638 struct ltt_buf *ltt_buf,
639 void *reserved,
640 unsigned int slot_size)
641 {
642 unsigned int offset_begin = reserved - ltt_buf->start;
643 int commit_count;
644
645 commit_count = atomic_add_return(slot_size,
646 &ltt_buf->commit_count[SUBBUF_INDEX(offset_begin,
647 ltt_buf)]);
648
649 /* Check if all commits have been done */
650 if(commit_count ==
651 atomic_read(&ltt_buf->reserve_count[SUBBUF_INDEX(offset_begin, ltt_buf)])) {
652 ltt_deliver_callback(ltt_buf, SUBBUF_INDEX(offset_begin, ltt_buf), NULL);
653 }
654 }
655
656 #ifdef __cplusplus
657 } /* end of extern "C" */
658 #endif
659
660 #endif //LTT_TRACE_FAST
661 #endif //LTT_TRACE
662 #endif //_LTT_USERTRACE_FAST_H
This page took 0.049746 seconds and 5 git commands to generate.