update ppc
[lttv.git] / ltt-usertrace / ltt / ltt-usertrace-fast.h
CommitLineData
b09f3215 1
04180f7f 2/* LTTng user-space "fast" tracing header
b09f3215 3 *
4 * Copyright 2006 Mathieu Desnoyers
5 *
6 */
7
04180f7f 8#ifndef _LTT_USERTRACE_FAST_H
9#define _LTT_USERTRACE_FAST_H
b09f3215 10
8b30e7bc 11#ifdef LTT_TRACE
38f24d5c 12#ifdef LTT_TRACE_FAST
8b30e7bc 13
b09f3215 14#include <errno.h>
700d350d 15#include <pthread.h>
32f2b04a 16#include <stdint.h>
17#include <syscall.h>
b5d612cb 18#include <asm/timex.h>
85b94320 19#include <semaphore.h>
be5cc22c 20#include <signal.h>
4359c2bb 21#ifdef powerpc
22#define __KERNEL__ /* Ugly hack : atomic.h is broken */
23#endif
24#include <asm/atomic.h>
25#ifdef powerpc
26#include "ltt/ltt-usertrace-ppc.h"
27#undef __KERNEL__ /* Ugly hack : atomic.h is broken */
28#endif
32f2b04a 29
8b30e7bc 30#include <ltt/ltt-facility-id-user_generic.h>
8b30e7bc 31
47d7d576 32#ifndef LTT_N_SUBBUFS
33#define LTT_N_SUBBUFS 2
34#endif //LTT_N_SUBBUFS
35
b402c055 36#ifndef LTT_SUBBUF_SIZE_PROCESS
37#define LTT_SUBBUF_SIZE_PROCESS 1048576
51bf1553 38#endif //LTT_BUF_SIZE_CPU
b09f3215 39
b402c055 40#define LTT_BUF_SIZE_PROCESS (LTT_SUBBUF_SIZE_PROCESS * LTT_N_SUBBUFS)
47d7d576 41
77b31f39 42#ifndef LTT_USERTRACE_ROOT
43#define LTT_USERTRACE_ROOT "/tmp/ltt-usertrace"
44#endif //LTT_USERTRACE_ROOT
45
47d7d576 46
47/* Buffer offset macros */
48
49#define BUFFER_OFFSET(offset, buf) (offset & (buf->alloc_size-1))
50#define SUBBUF_OFFSET(offset, buf) (offset & (buf->subbuf_size-1))
51#define SUBBUF_ALIGN(offset, buf) \
52 (((offset) + buf->subbuf_size) & (~(buf->subbuf_size-1)))
53#define SUBBUF_TRUNC(offset, buf) \
54 ((offset) & (~(buf->subbuf_size-1)))
55#define SUBBUF_INDEX(offset, buf) \
56 (BUFFER_OFFSET(offset,buf)/buf->subbuf_size)
57
58
32f2b04a 59#define LTT_TRACER_MAGIC_NUMBER 0x00D6B7ED
60#define LTT_TRACER_VERSION_MAJOR 0
61#define LTT_TRACER_VERSION_MINOR 7
62
63#ifndef atomic_cmpxchg
64#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
65#endif //atomic_cmpxchg
5ffa9d14 66
32f2b04a 67struct ltt_trace_header {
68 uint32_t magic_number;
69 uint32_t arch_type;
70 uint32_t arch_variant;
71 uint32_t float_word_order; /* Only useful for user space traces */
72 uint8_t arch_size;
73 //uint32_t system_type;
74 uint8_t major_version;
75 uint8_t minor_version;
76 uint8_t flight_recorder;
77 uint8_t has_heartbeat;
78 uint8_t has_alignment; /* Event header alignment */
79 uint32_t freq_scale;
80 uint64_t start_freq;
81 uint64_t start_tsc;
82 uint64_t start_monotonic;
83 uint64_t start_time_sec;
84 uint64_t start_time_usec;
85} __attribute((packed));
86
87
88struct ltt_block_start_header {
89 struct {
90 uint64_t cycle_count;
91 uint64_t freq; /* khz */
92 } begin;
93 struct {
94 uint64_t cycle_count;
95 uint64_t freq; /* khz */
96 } end;
97 uint32_t lost_size; /* Size unused at the end of the buffer */
98 uint32_t buf_size; /* The size of this sub-buffer */
99 struct ltt_trace_header trace;
100} __attribute((packed));
101
102
103
b09f3215 104struct ltt_buf {
32f2b04a 105 void *start;
b09f3215 106 atomic_t offset;
47d7d576 107 atomic_t consumed;
108 atomic_t reserve_count[LTT_N_SUBBUFS];
109 atomic_t commit_count[LTT_N_SUBBUFS];
b09f3215 110
111 atomic_t events_lost;
32f2b04a 112 atomic_t corrupted_subbuffers;
85b94320 113 sem_t writer_sem; /* semaphore on which the writer waits */
47d7d576 114 unsigned int alloc_size;
115 unsigned int subbuf_size;
b09f3215 116};
117
700d350d 118struct ltt_trace_info {
1c48e587 119 int init;
b09f3215 120 int filter;
700d350d 121 pid_t daemon_id;
8b30e7bc 122 int nesting;
b09f3215 123 struct {
b402c055 124 struct ltt_buf process;
125 char process_buf[LTT_BUF_SIZE_PROCESS] __attribute__ ((aligned (8)));
b09f3215 126 } channel;
127};
128
32f2b04a 129
5ffa9d14 130struct ltt_event_header_nohb {
131 uint64_t timestamp;
132 unsigned char facility_id;
133 unsigned char event_id;
134 uint16_t event_size;
135} __attribute((packed));
32f2b04a 136
700d350d 137extern __thread struct ltt_trace_info *thread_trace_info;
b09f3215 138
51bf1553 139void ltt_thread_init(void);
b09f3215 140
5ffa9d14 141void __attribute__((no_instrument_function))
142 ltt_usertrace_fast_buffer_switch(void);
143
5ffa9d14 144/* Get the offset of the channel in the ltt_trace_struct */
145#define GET_CHANNEL_INDEX(chan) \
146 (unsigned int)&((struct ltt_trace_info*)NULL)->channel.chan
147
148/* ltt_get_index_from_facility
149 *
150 * Get channel index from facility and event id.
151 *
152 * @fID : facility ID
153 * @eID : event number
154 *
155 * Get the channel index into which events must be written for the given
156 * facility and event number. We get this structure offset as soon as possible
157 * and remember it so we pass through this logic only once per trace call (not
158 * for every trace).
159 */
160static inline unsigned int __attribute__((no_instrument_function))
161 ltt_get_index_from_facility(ltt_facility_t fID,
162 uint8_t eID)
163{
b402c055 164 return GET_CHANNEL_INDEX(process);
5ffa9d14 165}
166
167
168static inline struct ltt_buf * __attribute__((no_instrument_function))
169 ltt_get_channel_from_index(
170 struct ltt_trace_info *trace, unsigned int index)
171{
b5d612cb 172 return (struct ltt_buf *)((void*)trace+index);
5ffa9d14 173}
174
175
176/*
177 * ltt_get_header_size
178 *
179 * Calculate alignment offset for arch size void*. This is the
180 * alignment offset of the event header.
181 *
182 * Important note :
183 * The event header must be a size multiple of the void* size. This is necessary
184 * to be able to calculate statically the alignment offset of the variable
185 * length data fields that follows. The total offset calculated here :
186 *
187 * Alignment of header struct on arch size
188 * + sizeof(header struct)
189 * + padding added to end of struct to align on arch size.
190 * */
191static inline unsigned char __attribute__((no_instrument_function))
192 ltt_get_header_size(struct ltt_trace_info *trace,
193 void *address,
194 size_t *before_hdr_pad,
195 size_t *after_hdr_pad,
196 size_t *header_size)
197{
198 unsigned int padding;
199 unsigned int header;
200
201 header = sizeof(struct ltt_event_header_nohb);
202
203 /* Padding before the header. Calculated dynamically */
204 *before_hdr_pad = ltt_align((unsigned long)address, header);
205 padding = *before_hdr_pad;
206
207 /* Padding after header, considering header aligned on ltt_align.
208 * Calculated statically if header size if known. */
209 *after_hdr_pad = ltt_align(header, sizeof(void*));
210 padding += *after_hdr_pad;
211
212 *header_size = header;
213
214 return header+padding;
215}
216
217
218/* ltt_write_event_header
219 *
220 * Writes the event header to the pointer.
221 *
222 * @channel : pointer to the channel structure
223 * @ptr : buffer pointer
224 * @fID : facility ID
225 * @eID : event ID
226 * @event_size : size of the event, excluding the event header.
227 * @offset : offset of the beginning of the header, for alignment.
228 * Calculated by ltt_get_event_header_size.
229 * @tsc : time stamp counter.
230 */
231static inline void __attribute__((no_instrument_function))
232 ltt_write_event_header(
233 struct ltt_trace_info *trace, struct ltt_buf *buf,
234 void *ptr, ltt_facility_t fID, uint32_t eID, size_t event_size,
235 size_t offset, uint64_t tsc)
236{
237 struct ltt_event_header_nohb *nohb;
238
239 event_size = min(event_size, 0xFFFFU);
240 nohb = (struct ltt_event_header_nohb *)(ptr+offset);
241 nohb->timestamp = (uint64_t)tsc;
242 nohb->facility_id = fID;
243 nohb->event_id = eID;
244 nohb->event_size = (uint16_t)event_size;
245}
700d350d 246
32f2b04a 247
248
5ffa9d14 249static inline uint64_t __attribute__((no_instrument_function))
250ltt_get_timestamp()
32f2b04a 251{
252 return get_cycles();
253}
254
5ffa9d14 255static inline unsigned int __attribute__((no_instrument_function))
256ltt_subbuf_header_len(struct ltt_buf *buf)
32f2b04a 257{
258 return sizeof(struct ltt_block_start_header);
259}
260
261
262
5ffa9d14 263static inline void __attribute__((no_instrument_function))
264ltt_write_trace_header(struct ltt_trace_header *header)
32f2b04a 265{
266 header->magic_number = LTT_TRACER_MAGIC_NUMBER;
267 header->major_version = LTT_TRACER_VERSION_MAJOR;
268 header->minor_version = LTT_TRACER_VERSION_MINOR;
269 header->float_word_order = 0; //FIXME
270 header->arch_type = 0; //FIXME LTT_ARCH_TYPE;
271 header->arch_size = sizeof(void*);
272 header->arch_variant = 0; //FIXME LTT_ARCH_VARIANT;
273 header->flight_recorder = 0;
274 header->has_heartbeat = 0;
275
5ffa9d14 276#ifndef LTT_PACK
32f2b04a 277 header->has_alignment = sizeof(void*);
278#else
279 header->has_alignment = 0;
280#endif
281
282 //FIXME
283 header->freq_scale = 0;
284 header->start_freq = 0;
285 header->start_tsc = 0;
286 header->start_monotonic = 0;
287 header->start_time_sec = 0;
288 header->start_time_usec = 0;
289}
290
291
5ffa9d14 292static inline void __attribute__((no_instrument_function))
293ltt_buffer_begin_callback(struct ltt_buf *buf,
32f2b04a 294 uint64_t tsc, unsigned int subbuf_idx)
295{
296 struct ltt_block_start_header *header =
297 (struct ltt_block_start_header*)
298 (buf->start + (subbuf_idx*buf->subbuf_size));
299
300 header->begin.cycle_count = tsc;
301 header->begin.freq = 0; //ltt_frequency();
302
303 header->lost_size = 0xFFFFFFFF; // for debugging...
304
305 header->buf_size = buf->subbuf_size;
306
307 ltt_write_trace_header(&header->trace);
308
309}
310
311
312
5ffa9d14 313static inline void __attribute__((no_instrument_function))
314ltt_buffer_end_callback(struct ltt_buf *buf,
32f2b04a 315 uint64_t tsc, unsigned int offset, unsigned int subbuf_idx)
316{
317 struct ltt_block_start_header *header =
318 (struct ltt_block_start_header*)
319 (buf->start + (subbuf_idx*buf->subbuf_size));
320 /* offset is assumed to never be 0 here : never deliver a completely
321 * empty subbuffer. */
322 /* The lost size is between 0 and subbuf_size-1 */
323 header->lost_size = SUBBUF_OFFSET((buf->subbuf_size - offset),
324 buf);
325 header->end.cycle_count = tsc;
326 header->end.freq = 0; //ltt_frequency();
327}
328
329
5ffa9d14 330static inline void __attribute__((no_instrument_function))
331ltt_deliver_callback(struct ltt_buf *buf,
32f2b04a 332 unsigned subbuf_idx,
333 void *subbuf)
334{
335 ltt_usertrace_fast_buffer_switch();
336}
5ffa9d14 337
338
339/* ltt_reserve_slot
340 *
341 * Atomic slot reservation in a LTTng buffer. It will take care of
342 * sub-buffer switching.
343 *
344 * Parameters:
345 *
346 * @trace : the trace structure to log to.
347 * @buf : the buffer to reserve space into.
348 * @data_size : size of the variable length data to log.
349 * @slot_size : pointer to total size of the slot (out)
350 * @tsc : pointer to the tsc at the slot reservation (out)
351 * @before_hdr_pad : dynamic padding before the event header.
352 * @after_hdr_pad : dynamic padding after the event header.
353 *
354 * Return : NULL if not enough space, else returns the pointer
355 * to the beginning of the reserved slot. */
356static inline void * __attribute__((no_instrument_function)) ltt_reserve_slot(
357 struct ltt_trace_info *trace,
358 struct ltt_buf *ltt_buf,
359 unsigned int data_size,
69b0f48b 360 size_t *slot_size,
5ffa9d14 361 uint64_t *tsc,
362 size_t *before_hdr_pad,
363 size_t *after_hdr_pad,
364 size_t *header_size)
365{
366 int offset_begin, offset_end, offset_old;
367 //int has_switch;
368 int begin_switch, end_switch_current, end_switch_old;
369 int reserve_commit_diff = 0;
370 unsigned int size;
371 int consumed_old, consumed_new;
372 int commit_count, reserve_count;
373 int ret;
374
375 do {
376 offset_old = atomic_read(&ltt_buf->offset);
377 offset_begin = offset_old;
378 //has_switch = 0;
379 begin_switch = 0;
380 end_switch_current = 0;
381 end_switch_old = 0;
382 *tsc = ltt_get_timestamp();
383 if(*tsc == 0) {
384 /* Error in getting the timestamp, event lost */
385 atomic_inc(&ltt_buf->events_lost);
386 return NULL;
387 }
388
389 if(SUBBUF_OFFSET(offset_begin, ltt_buf) == 0) {
390 begin_switch = 1; /* For offset_begin */
391 } else {
392 size = ltt_get_header_size(trace, ltt_buf->start + offset_begin,
393 before_hdr_pad, after_hdr_pad, header_size)
394 + data_size;
395
396 if((SUBBUF_OFFSET(offset_begin, ltt_buf)+size)>ltt_buf->subbuf_size) {
397 //has_switch = 1;
398 end_switch_old = 1; /* For offset_old */
399 begin_switch = 1; /* For offset_begin */
400 }
401 }
402
403 if(begin_switch) {
404 if(end_switch_old) {
405 offset_begin = SUBBUF_ALIGN(offset_begin, ltt_buf);
406 }
407 offset_begin = offset_begin + ltt_subbuf_header_len(ltt_buf);
408 /* Test new buffer integrity */
409 reserve_commit_diff =
410 atomic_read(&ltt_buf->reserve_count[SUBBUF_INDEX(offset_begin,
411 ltt_buf)])
412 - atomic_read(&ltt_buf->commit_count[SUBBUF_INDEX(offset_begin,
413 ltt_buf)]);
414 if(reserve_commit_diff == 0) {
415 /* Next buffer not corrupted. */
b402c055 416 //if((SUBBUF_TRUNC(offset_begin, ltt_buf)
417 // - SUBBUF_TRUNC(atomic_read(&ltt_buf->consumed), ltt_buf))
418 // >= ltt_buf->alloc_size) {
be5cc22c 419 /* sem_wait is not signal safe. Disable signals around it. */
420 {
421 sigset_t oldset, set;
422
423 /* Disable signals */
424 ret = sigfillset(&set);
425 if(ret) perror("LTT Error in sigfillset\n");
426
427 ret = pthread_sigmask(SIG_BLOCK, &set, &oldset);
428 if(ret) perror("LTT Error in pthread_sigmask\n");
429
430 sem_wait(&ltt_buf->writer_sem);
431
432 /* Enable signals */
433 ret = pthread_sigmask(SIG_SETMASK, &oldset, NULL);
434 if(ret) perror("LTT Error in pthread_sigmask\n");
435 }
436
5ffa9d14 437 /* go on with the write */
438
b402c055 439 //} else {
440 // /* next buffer not corrupted, we are either in overwrite mode or
441 // * the buffer is not full. It's safe to write in this new subbuffer.*/
442 //}
5ffa9d14 443 } else {
444 /* Next subbuffer corrupted. Force pushing reader even in normal
445 * mode. It's safe to write in this new subbuffer. */
85b94320 446 sem_post(&ltt_buf->writer_sem);
5ffa9d14 447 }
448 size = ltt_get_header_size(trace, ltt_buf->start + offset_begin,
449 before_hdr_pad, after_hdr_pad, header_size) + data_size;
450 if((SUBBUF_OFFSET(offset_begin,ltt_buf)+size)>ltt_buf->subbuf_size) {
451 /* Event too big for subbuffers, report error, don't complete
452 * the sub-buffer switch. */
453 atomic_inc(&ltt_buf->events_lost);
454 return NULL;
455 } else {
456 /* We just made a successful buffer switch and the event fits in the
457 * new subbuffer. Let's write. */
458 }
459 } else {
460 /* Event fits in the current buffer and we are not on a switch boundary.
461 * It's safe to write */
462 }
463 offset_end = offset_begin + size;
464
465 if((SUBBUF_OFFSET(offset_end, ltt_buf)) == 0) {
466 /* The offset_end will fall at the very beginning of the next subbuffer.
467 */
468 end_switch_current = 1; /* For offset_begin */
469 }
470
471 } while(atomic_cmpxchg(&ltt_buf->offset, offset_old, offset_end)
472 != offset_old);
473
474
475 /* Push the reader if necessary */
476 do {
477 consumed_old = atomic_read(&ltt_buf->consumed);
478 /* If buffer is in overwrite mode, push the reader consumed count if
479 the write position has reached it and we are not at the first
480 iteration (don't push the reader farther than the writer).
481 This operation can be done concurrently by many writers in the
482 same buffer, the writer being at the fartest write position sub-buffer
483 index in the buffer being the one which will win this loop. */
484 /* If the buffer is not in overwrite mode, pushing the reader only
485 happen if a sub-buffer is corrupted */
680b9daa 486 if((SUBBUF_TRUNC(offset_end-1, ltt_buf)
5ffa9d14 487 - SUBBUF_TRUNC(consumed_old, ltt_buf))
488 >= ltt_buf->alloc_size)
489 consumed_new = SUBBUF_ALIGN(consumed_old, ltt_buf);
490 else {
491 consumed_new = consumed_old;
492 break;
493 }
494 } while(atomic_cmpxchg(&ltt_buf->consumed, consumed_old, consumed_new)
495 != consumed_old);
496
497 if(consumed_old != consumed_new) {
498 /* Reader pushed : we are the winner of the push, we can therefore
499 reequilibrate reserve and commit. Atomic increment of the commit
500 count permits other writers to play around with this variable
501 before us. We keep track of corrupted_subbuffers even in overwrite mode :
502 we never want to write over a non completely committed sub-buffer :
503 possible causes : the buffer size is too low compared to the unordered
504 data input, or there is a writer who died between the reserve and the
505 commit. */
506 if(reserve_commit_diff) {
507 /* We have to alter the sub-buffer commit count : a sub-buffer is
508 corrupted. We do not deliver it. */
509 atomic_add(reserve_commit_diff,
510 &ltt_buf->commit_count[SUBBUF_INDEX(offset_begin, ltt_buf)]);
511 atomic_inc(&ltt_buf->corrupted_subbuffers);
512 }
513 }
514
515
516 if(end_switch_old) {
517 /* old subbuffer */
518 /* Concurrency safe because we are the last and only thread to alter this
519 sub-buffer. As long as it is not delivered and read, no other thread can
520 alter the offset, alter the reserve_count or call the
521 client_buffer_end_callback on this sub-buffer.
522 The only remaining threads could be the ones with pending commits. They
523 will have to do the deliver themself.
524 Not concurrency safe in overwrite mode. We detect corrupted subbuffers
525 with commit and reserve counts. We keep a corrupted sub-buffers count
526 and push the readers across these sub-buffers.
527 Not concurrency safe if a writer is stalled in a subbuffer and
528 another writer switches in, finding out it's corrupted. The result will
529 be than the old (uncommited) subbuffer will be declared corrupted, and
530 that the new subbuffer will be declared corrupted too because of the
531 commit count adjustment.
532 Note : offset_old should never be 0 here.*/
533 ltt_buffer_end_callback(ltt_buf, *tsc, offset_old,
534 SUBBUF_INDEX((offset_old-1), ltt_buf));
535 /* Setting this reserve_count will allow the sub-buffer to be delivered by
536 the last committer. */
537 reserve_count =
538 atomic_add_return((SUBBUF_OFFSET((offset_old-1), ltt_buf)+1),
539 &ltt_buf->reserve_count[SUBBUF_INDEX((offset_old-1), ltt_buf)]);
540 if(reserve_count
541 == atomic_read(&ltt_buf->commit_count[SUBBUF_INDEX((offset_old-1),
542 ltt_buf)])) {
543 ltt_deliver_callback(ltt_buf, SUBBUF_INDEX((offset_old-1), ltt_buf),
544 NULL);
545 }
546 }
547
548 if(begin_switch) {
549 /* New sub-buffer */
550 /* This code can be executed unordered : writers may already have written
551 to the sub-buffer before this code gets executed, caution. */
552 /* The commit makes sure that this code is executed before the deliver
553 of this sub-buffer */
554 ltt_buffer_begin_callback(ltt_buf, *tsc, SUBBUF_INDEX(offset_begin, ltt_buf));
555 commit_count = atomic_add_return(ltt_subbuf_header_len(ltt_buf),
556 &ltt_buf->commit_count[SUBBUF_INDEX(offset_begin, ltt_buf)]);
557 /* Check if the written buffer has to be delivered */
558 if(commit_count
559 == atomic_read(&ltt_buf->reserve_count[SUBBUF_INDEX(offset_begin,
560 ltt_buf)])) {
561 ltt_deliver_callback(ltt_buf, SUBBUF_INDEX(offset_begin, ltt_buf), NULL);
562 }
563 }
564
565 if(end_switch_current) {
566 /* current subbuffer */
567 /* Concurrency safe because we are the last and only thread to alter this
568 sub-buffer. As long as it is not delivered and read, no other thread can
569 alter the offset, alter the reserve_count or call the
570 client_buffer_end_callback on this sub-buffer.
571 The only remaining threads could be the ones with pending commits. They
572 will have to do the deliver themself.
573 Not concurrency safe in overwrite mode. We detect corrupted subbuffers
574 with commit and reserve counts. We keep a corrupted sub-buffers count
575 and push the readers across these sub-buffers.
576 Not concurrency safe if a writer is stalled in a subbuffer and
577 another writer switches in, finding out it's corrupted. The result will
578 be than the old (uncommited) subbuffer will be declared corrupted, and
579 that the new subbuffer will be declared corrupted too because of the
580 commit count adjustment. */
581 ltt_buffer_end_callback(ltt_buf, *tsc, offset_end,
582 SUBBUF_INDEX((offset_end-1), ltt_buf));
583 /* Setting this reserve_count will allow the sub-buffer to be delivered by
584 the last committer. */
585 reserve_count =
586 atomic_add_return((SUBBUF_OFFSET((offset_end-1), ltt_buf)+1),
587 &ltt_buf->reserve_count[SUBBUF_INDEX((offset_end-1), ltt_buf)]);
588 if(reserve_count
589 == atomic_read(&ltt_buf->commit_count[SUBBUF_INDEX((offset_end-1),
590 ltt_buf)])) {
591 ltt_deliver_callback(ltt_buf, SUBBUF_INDEX((offset_end-1), ltt_buf), NULL);
592 }
593 }
594
595 *slot_size = size;
596
597 //BUG_ON(*slot_size != (data_size + *before_hdr_pad + *after_hdr_pad + *header_size));
598 //BUG_ON(*slot_size != (offset_end - offset_begin));
599
600 return ltt_buf->start + BUFFER_OFFSET(offset_begin, ltt_buf);
601}
602
603
604/* ltt_commit_slot
605 *
606 * Atomic unordered slot commit. Increments the commit count in the
607 * specified sub-buffer, and delivers it if necessary.
608 *
609 * Parameters:
610 *
611 * @buf : the buffer to commit to.
612 * @reserved : address of the beginnig of the reserved slot.
613 * @slot_size : size of the reserved slot.
614 *
615 */
616static inline void __attribute__((no_instrument_function)) ltt_commit_slot(
617 struct ltt_buf *ltt_buf,
618 void *reserved,
619 unsigned int slot_size)
620{
621 unsigned int offset_begin = reserved - ltt_buf->start;
622 int commit_count;
623
624 commit_count = atomic_add_return(slot_size,
625 &ltt_buf->commit_count[SUBBUF_INDEX(offset_begin,
626 ltt_buf)]);
627
628 /* Check if all commits have been done */
629 if(commit_count ==
630 atomic_read(&ltt_buf->reserve_count[SUBBUF_INDEX(offset_begin, ltt_buf)])) {
631 ltt_deliver_callback(ltt_buf, SUBBUF_INDEX(offset_begin, ltt_buf), NULL);
632 }
633}
634
635
38f24d5c 636#endif //LTT_TRACE_FAST
8b30e7bc 637#endif //LTT_TRACE
04180f7f 638#endif //_LTT_USERTRACE_FAST_H
This page took 0.04837 seconds and 4 git commands to generate.