update compat
[lttv.git] / ltt-usertrace / ltt / ltt-usertrace-fast.h
CommitLineData
b09f3215 1
04180f7f 2/* LTTng user-space "fast" tracing header
b09f3215 3 *
4 * Copyright 2006 Mathieu Desnoyers
5 *
6 */
7
04180f7f 8#ifndef _LTT_USERTRACE_FAST_H
9#define _LTT_USERTRACE_FAST_H
b09f3215 10
8b30e7bc 11#ifdef LTT_TRACE
38f24d5c 12#ifdef LTT_TRACE_FAST
8b30e7bc 13
b09f3215 14#include <errno.h>
700d350d 15#include <pthread.h>
32f2b04a 16#include <stdint.h>
17#include <syscall.h>
85b94320 18#include <semaphore.h>
be5cc22c 19#include <signal.h>
32f2b04a 20
8b30e7bc 21#include <ltt/ltt-facility-id-user_generic.h>
8b30e7bc 22
895ad115 23#ifdef __cplusplus
24extern "C" {
25#endif
26
47d7d576 27#ifndef LTT_N_SUBBUFS
28#define LTT_N_SUBBUFS 2
29#endif //LTT_N_SUBBUFS
30
b402c055 31#ifndef LTT_SUBBUF_SIZE_PROCESS
32#define LTT_SUBBUF_SIZE_PROCESS 1048576
51bf1553 33#endif //LTT_BUF_SIZE_CPU
b09f3215 34
b402c055 35#define LTT_BUF_SIZE_PROCESS (LTT_SUBBUF_SIZE_PROCESS * LTT_N_SUBBUFS)
47d7d576 36
77b31f39 37#ifndef LTT_USERTRACE_ROOT
38#define LTT_USERTRACE_ROOT "/tmp/ltt-usertrace"
39#endif //LTT_USERTRACE_ROOT
40
47d7d576 41
42/* Buffer offset macros */
43
44#define BUFFER_OFFSET(offset, buf) (offset & (buf->alloc_size-1))
45#define SUBBUF_OFFSET(offset, buf) (offset & (buf->subbuf_size-1))
46#define SUBBUF_ALIGN(offset, buf) \
47 (((offset) + buf->subbuf_size) & (~(buf->subbuf_size-1)))
48#define SUBBUF_TRUNC(offset, buf) \
49 ((offset) & (~(buf->subbuf_size-1)))
50#define SUBBUF_INDEX(offset, buf) \
51 (BUFFER_OFFSET(offset,buf)/buf->subbuf_size)
52
53
32f2b04a 54#define LTT_TRACER_MAGIC_NUMBER 0x00D6B7ED
55#define LTT_TRACER_VERSION_MAJOR 0
56#define LTT_TRACER_VERSION_MINOR 7
57
58#ifndef atomic_cmpxchg
59#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
60#endif //atomic_cmpxchg
5ffa9d14 61
32f2b04a 62struct ltt_trace_header {
63 uint32_t magic_number;
64 uint32_t arch_type;
65 uint32_t arch_variant;
66 uint32_t float_word_order; /* Only useful for user space traces */
67 uint8_t arch_size;
68 //uint32_t system_type;
69 uint8_t major_version;
70 uint8_t minor_version;
71 uint8_t flight_recorder;
72 uint8_t has_heartbeat;
73 uint8_t has_alignment; /* Event header alignment */
74 uint32_t freq_scale;
75 uint64_t start_freq;
76 uint64_t start_tsc;
77 uint64_t start_monotonic;
78 uint64_t start_time_sec;
79 uint64_t start_time_usec;
80} __attribute((packed));
81
82
83struct ltt_block_start_header {
84 struct {
85 uint64_t cycle_count;
86 uint64_t freq; /* khz */
87 } begin;
88 struct {
89 uint64_t cycle_count;
90 uint64_t freq; /* khz */
91 } end;
92 uint32_t lost_size; /* Size unused at the end of the buffer */
93 uint32_t buf_size; /* The size of this sub-buffer */
94 struct ltt_trace_header trace;
95} __attribute((packed));
96
97
98
b09f3215 99struct ltt_buf {
32f2b04a 100 void *start;
b09f3215 101 atomic_t offset;
47d7d576 102 atomic_t consumed;
103 atomic_t reserve_count[LTT_N_SUBBUFS];
104 atomic_t commit_count[LTT_N_SUBBUFS];
b09f3215 105
106 atomic_t events_lost;
32f2b04a 107 atomic_t corrupted_subbuffers;
85b94320 108 sem_t writer_sem; /* semaphore on which the writer waits */
47d7d576 109 unsigned int alloc_size;
110 unsigned int subbuf_size;
b09f3215 111};
112
700d350d 113struct ltt_trace_info {
1c48e587 114 int init;
b09f3215 115 int filter;
700d350d 116 pid_t daemon_id;
8b30e7bc 117 int nesting;
b09f3215 118 struct {
b402c055 119 struct ltt_buf process;
120 char process_buf[LTT_BUF_SIZE_PROCESS] __attribute__ ((aligned (8)));
b09f3215 121 } channel;
122};
123
32f2b04a 124
5ffa9d14 125struct ltt_event_header_nohb {
126 uint64_t timestamp;
127 unsigned char facility_id;
128 unsigned char event_id;
129 uint16_t event_size;
130} __attribute((packed));
32f2b04a 131
700d350d 132extern __thread struct ltt_trace_info *thread_trace_info;
b09f3215 133
51bf1553 134void ltt_thread_init(void);
b09f3215 135
5ffa9d14 136void __attribute__((no_instrument_function))
137 ltt_usertrace_fast_buffer_switch(void);
138
5ffa9d14 139/* Get the offset of the channel in the ltt_trace_struct */
140#define GET_CHANNEL_INDEX(chan) \
141 (unsigned int)&((struct ltt_trace_info*)NULL)->channel.chan
142
143/* ltt_get_index_from_facility
144 *
145 * Get channel index from facility and event id.
146 *
147 * @fID : facility ID
148 * @eID : event number
149 *
150 * Get the channel index into which events must be written for the given
151 * facility and event number. We get this structure offset as soon as possible
152 * and remember it so we pass through this logic only once per trace call (not
153 * for every trace).
154 */
155static inline unsigned int __attribute__((no_instrument_function))
156 ltt_get_index_from_facility(ltt_facility_t fID,
157 uint8_t eID)
158{
b402c055 159 return GET_CHANNEL_INDEX(process);
5ffa9d14 160}
161
162
163static inline struct ltt_buf * __attribute__((no_instrument_function))
164 ltt_get_channel_from_index(
165 struct ltt_trace_info *trace, unsigned int index)
166{
b5d612cb 167 return (struct ltt_buf *)((void*)trace+index);
5ffa9d14 168}
169
170
171/*
172 * ltt_get_header_size
173 *
174 * Calculate alignment offset for arch size void*. This is the
175 * alignment offset of the event header.
176 *
177 * Important note :
178 * The event header must be a size multiple of the void* size. This is necessary
179 * to be able to calculate statically the alignment offset of the variable
180 * length data fields that follows. The total offset calculated here :
181 *
182 * Alignment of header struct on arch size
183 * + sizeof(header struct)
184 * + padding added to end of struct to align on arch size.
185 * */
186static inline unsigned char __attribute__((no_instrument_function))
187 ltt_get_header_size(struct ltt_trace_info *trace,
188 void *address,
189 size_t *before_hdr_pad,
190 size_t *after_hdr_pad,
191 size_t *header_size)
192{
193 unsigned int padding;
194 unsigned int header;
195
196 header = sizeof(struct ltt_event_header_nohb);
197
198 /* Padding before the header. Calculated dynamically */
199 *before_hdr_pad = ltt_align((unsigned long)address, header);
200 padding = *before_hdr_pad;
201
202 /* Padding after header, considering header aligned on ltt_align.
203 * Calculated statically if header size if known. */
204 *after_hdr_pad = ltt_align(header, sizeof(void*));
205 padding += *after_hdr_pad;
206
207 *header_size = header;
208
209 return header+padding;
210}
211
212
213/* ltt_write_event_header
214 *
215 * Writes the event header to the pointer.
216 *
217 * @channel : pointer to the channel structure
218 * @ptr : buffer pointer
219 * @fID : facility ID
220 * @eID : event ID
221 * @event_size : size of the event, excluding the event header.
222 * @offset : offset of the beginning of the header, for alignment.
223 * Calculated by ltt_get_event_header_size.
224 * @tsc : time stamp counter.
225 */
226static inline void __attribute__((no_instrument_function))
227 ltt_write_event_header(
228 struct ltt_trace_info *trace, struct ltt_buf *buf,
229 void *ptr, ltt_facility_t fID, uint32_t eID, size_t event_size,
230 size_t offset, uint64_t tsc)
231{
232 struct ltt_event_header_nohb *nohb;
233
234 event_size = min(event_size, 0xFFFFU);
235 nohb = (struct ltt_event_header_nohb *)(ptr+offset);
236 nohb->timestamp = (uint64_t)tsc;
237 nohb->facility_id = fID;
238 nohb->event_id = eID;
239 nohb->event_size = (uint16_t)event_size;
240}
700d350d 241
32f2b04a 242
243
5ffa9d14 244static inline uint64_t __attribute__((no_instrument_function))
245ltt_get_timestamp()
32f2b04a 246{
247 return get_cycles();
248}
249
5ffa9d14 250static inline unsigned int __attribute__((no_instrument_function))
251ltt_subbuf_header_len(struct ltt_buf *buf)
32f2b04a 252{
253 return sizeof(struct ltt_block_start_header);
254}
255
256
257
5ffa9d14 258static inline void __attribute__((no_instrument_function))
259ltt_write_trace_header(struct ltt_trace_header *header)
32f2b04a 260{
261 header->magic_number = LTT_TRACER_MAGIC_NUMBER;
262 header->major_version = LTT_TRACER_VERSION_MAJOR;
263 header->minor_version = LTT_TRACER_VERSION_MINOR;
264 header->float_word_order = 0; //FIXME
265 header->arch_type = 0; //FIXME LTT_ARCH_TYPE;
266 header->arch_size = sizeof(void*);
267 header->arch_variant = 0; //FIXME LTT_ARCH_VARIANT;
268 header->flight_recorder = 0;
269 header->has_heartbeat = 0;
270
5ffa9d14 271#ifndef LTT_PACK
32f2b04a 272 header->has_alignment = sizeof(void*);
273#else
274 header->has_alignment = 0;
275#endif
276
277 //FIXME
278 header->freq_scale = 0;
279 header->start_freq = 0;
280 header->start_tsc = 0;
281 header->start_monotonic = 0;
282 header->start_time_sec = 0;
283 header->start_time_usec = 0;
284}
285
286
5ffa9d14 287static inline void __attribute__((no_instrument_function))
288ltt_buffer_begin_callback(struct ltt_buf *buf,
32f2b04a 289 uint64_t tsc, unsigned int subbuf_idx)
290{
291 struct ltt_block_start_header *header =
292 (struct ltt_block_start_header*)
293 (buf->start + (subbuf_idx*buf->subbuf_size));
294
295 header->begin.cycle_count = tsc;
296 header->begin.freq = 0; //ltt_frequency();
297
298 header->lost_size = 0xFFFFFFFF; // for debugging...
299
300 header->buf_size = buf->subbuf_size;
301
302 ltt_write_trace_header(&header->trace);
303
304}
305
306
307
5ffa9d14 308static inline void __attribute__((no_instrument_function))
309ltt_buffer_end_callback(struct ltt_buf *buf,
32f2b04a 310 uint64_t tsc, unsigned int offset, unsigned int subbuf_idx)
311{
312 struct ltt_block_start_header *header =
313 (struct ltt_block_start_header*)
314 (buf->start + (subbuf_idx*buf->subbuf_size));
315 /* offset is assumed to never be 0 here : never deliver a completely
316 * empty subbuffer. */
317 /* The lost size is between 0 and subbuf_size-1 */
318 header->lost_size = SUBBUF_OFFSET((buf->subbuf_size - offset),
319 buf);
320 header->end.cycle_count = tsc;
321 header->end.freq = 0; //ltt_frequency();
322}
323
324
5ffa9d14 325static inline void __attribute__((no_instrument_function))
326ltt_deliver_callback(struct ltt_buf *buf,
32f2b04a 327 unsigned subbuf_idx,
328 void *subbuf)
329{
330 ltt_usertrace_fast_buffer_switch();
331}
5ffa9d14 332
333
334/* ltt_reserve_slot
335 *
336 * Atomic slot reservation in a LTTng buffer. It will take care of
337 * sub-buffer switching.
338 *
339 * Parameters:
340 *
341 * @trace : the trace structure to log to.
342 * @buf : the buffer to reserve space into.
343 * @data_size : size of the variable length data to log.
344 * @slot_size : pointer to total size of the slot (out)
345 * @tsc : pointer to the tsc at the slot reservation (out)
346 * @before_hdr_pad : dynamic padding before the event header.
347 * @after_hdr_pad : dynamic padding after the event header.
348 *
349 * Return : NULL if not enough space, else returns the pointer
350 * to the beginning of the reserved slot. */
351static inline void * __attribute__((no_instrument_function)) ltt_reserve_slot(
352 struct ltt_trace_info *trace,
353 struct ltt_buf *ltt_buf,
354 unsigned int data_size,
69b0f48b 355 size_t *slot_size,
5ffa9d14 356 uint64_t *tsc,
357 size_t *before_hdr_pad,
358 size_t *after_hdr_pad,
359 size_t *header_size)
360{
361 int offset_begin, offset_end, offset_old;
362 //int has_switch;
363 int begin_switch, end_switch_current, end_switch_old;
364 int reserve_commit_diff = 0;
365 unsigned int size;
366 int consumed_old, consumed_new;
367 int commit_count, reserve_count;
368 int ret;
3a99c38e 369 sigset_t oldset, set;
370
5ffa9d14 371 do {
372 offset_old = atomic_read(&ltt_buf->offset);
373 offset_begin = offset_old;
374 //has_switch = 0;
375 begin_switch = 0;
376 end_switch_current = 0;
377 end_switch_old = 0;
378 *tsc = ltt_get_timestamp();
379 if(*tsc == 0) {
380 /* Error in getting the timestamp, event lost */
381 atomic_inc(&ltt_buf->events_lost);
382 return NULL;
383 }
384
385 if(SUBBUF_OFFSET(offset_begin, ltt_buf) == 0) {
386 begin_switch = 1; /* For offset_begin */
387 } else {
388 size = ltt_get_header_size(trace, ltt_buf->start + offset_begin,
389 before_hdr_pad, after_hdr_pad, header_size)
390 + data_size;
391
392 if((SUBBUF_OFFSET(offset_begin, ltt_buf)+size)>ltt_buf->subbuf_size) {
393 //has_switch = 1;
394 end_switch_old = 1; /* For offset_old */
395 begin_switch = 1; /* For offset_begin */
396 }
397 }
398
399 if(begin_switch) {
400 if(end_switch_old) {
401 offset_begin = SUBBUF_ALIGN(offset_begin, ltt_buf);
402 }
403 offset_begin = offset_begin + ltt_subbuf_header_len(ltt_buf);
404 /* Test new buffer integrity */
405 reserve_commit_diff =
406 atomic_read(&ltt_buf->reserve_count[SUBBUF_INDEX(offset_begin,
407 ltt_buf)])
408 - atomic_read(&ltt_buf->commit_count[SUBBUF_INDEX(offset_begin,
409 ltt_buf)]);
9b0645fd 410
5ffa9d14 411 if(reserve_commit_diff == 0) {
412 /* Next buffer not corrupted. */
b402c055 413 //if((SUBBUF_TRUNC(offset_begin, ltt_buf)
414 // - SUBBUF_TRUNC(atomic_read(&ltt_buf->consumed), ltt_buf))
415 // >= ltt_buf->alloc_size) {
be5cc22c 416 {
9b0645fd 417 /* sem_wait is not signal safe. Disable signals around it.
418 * Signals are kept disabled to make sure we win the cmpxchg. */
9b0645fd 419 /* Disable signals */
5199345f 420 ret = sigfillset(&set);
421 if(ret) perror("LTT Error in sigfillset\n");
422
423 ret = pthread_sigmask(SIG_BLOCK, &set, &oldset);
424 if(ret) perror("LTT Error in pthread_sigmask\n");
425
426 /* We detect if a signal came between
427 * the offset read and signal disabling:
428 * if it is the case, then we restart
429 * the loop after reenabling signals. It
430 * means that it's a signal that has
431 * won the buffer switch.*/
432 if(offset_old != atomic_read(&ltt_buf->offset)) {
433 ret = pthread_sigmask(SIG_SETMASK, &oldset, NULL);
1e6e693a 434 if(ret) perror("LTT Error in pthread_sigmask\n");
5199345f 435 continue;
1e6e693a 436 }
5199345f 437 /* If the offset is still the same, then
438 * we can safely proceed to do the
439 * buffer switch without being
440 * interrupted by a signal. */
be5cc22c 441 sem_wait(&ltt_buf->writer_sem);
442
be5cc22c 443 }
5ffa9d14 444 /* go on with the write */
445
b402c055 446 //} else {
447 // /* next buffer not corrupted, we are either in overwrite mode or
448 // * the buffer is not full. It's safe to write in this new subbuffer.*/
449 //}
5ffa9d14 450 } else {
451 /* Next subbuffer corrupted. Force pushing reader even in normal
452 * mode. It's safe to write in this new subbuffer. */
9b0645fd 453 /* No sem_post is required because we fall through without doing a
454 * sem_wait. */
5ffa9d14 455 }
456 size = ltt_get_header_size(trace, ltt_buf->start + offset_begin,
457 before_hdr_pad, after_hdr_pad, header_size) + data_size;
458 if((SUBBUF_OFFSET(offset_begin,ltt_buf)+size)>ltt_buf->subbuf_size) {
459 /* Event too big for subbuffers, report error, don't complete
460 * the sub-buffer switch. */
461 atomic_inc(&ltt_buf->events_lost);
9b0645fd 462 if(reserve_commit_diff == 0) {
463 ret = pthread_sigmask(SIG_SETMASK, &oldset, NULL);
464 if(ret) perror("LTT Error in pthread_sigmask\n");
465 }
5ffa9d14 466 return NULL;
467 } else {
468 /* We just made a successful buffer switch and the event fits in the
469 * new subbuffer. Let's write. */
470 }
471 } else {
472 /* Event fits in the current buffer and we are not on a switch boundary.
473 * It's safe to write */
474 }
475 offset_end = offset_begin + size;
476
477 if((SUBBUF_OFFSET(offset_end, ltt_buf)) == 0) {
478 /* The offset_end will fall at the very beginning of the next subbuffer.
479 */
480 end_switch_current = 1; /* For offset_begin */
481 }
482
483 } while(atomic_cmpxchg(&ltt_buf->offset, offset_old, offset_end)
484 != offset_old);
485
5ffa9d14 486 /* Push the reader if necessary */
487 do {
488 consumed_old = atomic_read(&ltt_buf->consumed);
489 /* If buffer is in overwrite mode, push the reader consumed count if
490 the write position has reached it and we are not at the first
491 iteration (don't push the reader farther than the writer).
492 This operation can be done concurrently by many writers in the
493 same buffer, the writer being at the fartest write position sub-buffer
494 index in the buffer being the one which will win this loop. */
495 /* If the buffer is not in overwrite mode, pushing the reader only
496 happen if a sub-buffer is corrupted */
680b9daa 497 if((SUBBUF_TRUNC(offset_end-1, ltt_buf)
5ffa9d14 498 - SUBBUF_TRUNC(consumed_old, ltt_buf))
499 >= ltt_buf->alloc_size)
500 consumed_new = SUBBUF_ALIGN(consumed_old, ltt_buf);
501 else {
502 consumed_new = consumed_old;
503 break;
504 }
505 } while(atomic_cmpxchg(&ltt_buf->consumed, consumed_old, consumed_new)
506 != consumed_old);
507
508 if(consumed_old != consumed_new) {
509 /* Reader pushed : we are the winner of the push, we can therefore
510 reequilibrate reserve and commit. Atomic increment of the commit
511 count permits other writers to play around with this variable
512 before us. We keep track of corrupted_subbuffers even in overwrite mode :
513 we never want to write over a non completely committed sub-buffer :
514 possible causes : the buffer size is too low compared to the unordered
515 data input, or there is a writer who died between the reserve and the
516 commit. */
517 if(reserve_commit_diff) {
518 /* We have to alter the sub-buffer commit count : a sub-buffer is
519 corrupted. We do not deliver it. */
520 atomic_add(reserve_commit_diff,
521 &ltt_buf->commit_count[SUBBUF_INDEX(offset_begin, ltt_buf)]);
522 atomic_inc(&ltt_buf->corrupted_subbuffers);
523 }
524 }
525
526
527 if(end_switch_old) {
528 /* old subbuffer */
529 /* Concurrency safe because we are the last and only thread to alter this
530 sub-buffer. As long as it is not delivered and read, no other thread can
531 alter the offset, alter the reserve_count or call the
532 client_buffer_end_callback on this sub-buffer.
533 The only remaining threads could be the ones with pending commits. They
534 will have to do the deliver themself.
535 Not concurrency safe in overwrite mode. We detect corrupted subbuffers
536 with commit and reserve counts. We keep a corrupted sub-buffers count
537 and push the readers across these sub-buffers.
538 Not concurrency safe if a writer is stalled in a subbuffer and
539 another writer switches in, finding out it's corrupted. The result will
540 be than the old (uncommited) subbuffer will be declared corrupted, and
541 that the new subbuffer will be declared corrupted too because of the
542 commit count adjustment.
543 Note : offset_old should never be 0 here.*/
544 ltt_buffer_end_callback(ltt_buf, *tsc, offset_old,
545 SUBBUF_INDEX((offset_old-1), ltt_buf));
546 /* Setting this reserve_count will allow the sub-buffer to be delivered by
547 the last committer. */
548 reserve_count =
549 atomic_add_return((SUBBUF_OFFSET((offset_old-1), ltt_buf)+1),
550 &ltt_buf->reserve_count[SUBBUF_INDEX((offset_old-1), ltt_buf)]);
551 if(reserve_count
552 == atomic_read(&ltt_buf->commit_count[SUBBUF_INDEX((offset_old-1),
553 ltt_buf)])) {
554 ltt_deliver_callback(ltt_buf, SUBBUF_INDEX((offset_old-1), ltt_buf),
555 NULL);
556 }
557 }
558
559 if(begin_switch) {
9b0645fd 560 /* Enable signals : this is what guaranteed that same reserve which did the
561 * sem_wait does in fact win the cmpxchg for the offset. We only call
562 * these system calls on buffer boundaries because of their performance
563 * cost. */
5199345f 564 if(reserve_commit_diff == 0) {
9b0645fd 565 ret = pthread_sigmask(SIG_SETMASK, &oldset, NULL);
566 if(ret) perror("LTT Error in pthread_sigmask\n");
567 }
5ffa9d14 568 /* New sub-buffer */
569 /* This code can be executed unordered : writers may already have written
570 to the sub-buffer before this code gets executed, caution. */
571 /* The commit makes sure that this code is executed before the deliver
572 of this sub-buffer */
573 ltt_buffer_begin_callback(ltt_buf, *tsc, SUBBUF_INDEX(offset_begin, ltt_buf));
574 commit_count = atomic_add_return(ltt_subbuf_header_len(ltt_buf),
575 &ltt_buf->commit_count[SUBBUF_INDEX(offset_begin, ltt_buf)]);
576 /* Check if the written buffer has to be delivered */
577 if(commit_count
578 == atomic_read(&ltt_buf->reserve_count[SUBBUF_INDEX(offset_begin,
579 ltt_buf)])) {
580 ltt_deliver_callback(ltt_buf, SUBBUF_INDEX(offset_begin, ltt_buf), NULL);
581 }
582 }
583
584 if(end_switch_current) {
585 /* current subbuffer */
586 /* Concurrency safe because we are the last and only thread to alter this
587 sub-buffer. As long as it is not delivered and read, no other thread can
588 alter the offset, alter the reserve_count or call the
589 client_buffer_end_callback on this sub-buffer.
590 The only remaining threads could be the ones with pending commits. They
591 will have to do the deliver themself.
592 Not concurrency safe in overwrite mode. We detect corrupted subbuffers
593 with commit and reserve counts. We keep a corrupted sub-buffers count
594 and push the readers across these sub-buffers.
595 Not concurrency safe if a writer is stalled in a subbuffer and
596 another writer switches in, finding out it's corrupted. The result will
597 be than the old (uncommited) subbuffer will be declared corrupted, and
598 that the new subbuffer will be declared corrupted too because of the
599 commit count adjustment. */
600 ltt_buffer_end_callback(ltt_buf, *tsc, offset_end,
601 SUBBUF_INDEX((offset_end-1), ltt_buf));
602 /* Setting this reserve_count will allow the sub-buffer to be delivered by
603 the last committer. */
604 reserve_count =
605 atomic_add_return((SUBBUF_OFFSET((offset_end-1), ltt_buf)+1),
606 &ltt_buf->reserve_count[SUBBUF_INDEX((offset_end-1), ltt_buf)]);
607 if(reserve_count
608 == atomic_read(&ltt_buf->commit_count[SUBBUF_INDEX((offset_end-1),
609 ltt_buf)])) {
610 ltt_deliver_callback(ltt_buf, SUBBUF_INDEX((offset_end-1), ltt_buf), NULL);
611 }
612 }
613
614 *slot_size = size;
615
616 //BUG_ON(*slot_size != (data_size + *before_hdr_pad + *after_hdr_pad + *header_size));
617 //BUG_ON(*slot_size != (offset_end - offset_begin));
618
619 return ltt_buf->start + BUFFER_OFFSET(offset_begin, ltt_buf);
620}
621
622
623/* ltt_commit_slot
624 *
625 * Atomic unordered slot commit. Increments the commit count in the
626 * specified sub-buffer, and delivers it if necessary.
627 *
628 * Parameters:
629 *
630 * @buf : the buffer to commit to.
631 * @reserved : address of the beginnig of the reserved slot.
632 * @slot_size : size of the reserved slot.
633 *
634 */
635static inline void __attribute__((no_instrument_function)) ltt_commit_slot(
636 struct ltt_buf *ltt_buf,
637 void *reserved,
638 unsigned int slot_size)
639{
640 unsigned int offset_begin = reserved - ltt_buf->start;
641 int commit_count;
642
643 commit_count = atomic_add_return(slot_size,
644 &ltt_buf->commit_count[SUBBUF_INDEX(offset_begin,
645 ltt_buf)]);
646
647 /* Check if all commits have been done */
648 if(commit_count ==
649 atomic_read(&ltt_buf->reserve_count[SUBBUF_INDEX(offset_begin, ltt_buf)])) {
650 ltt_deliver_callback(ltt_buf, SUBBUF_INDEX(offset_begin, ltt_buf), NULL);
651 }
652}
895ad115 653
654#ifdef __cplusplus
655} /* end of extern "C" */
656#endif
5ffa9d14 657
38f24d5c 658#endif //LTT_TRACE_FAST
8b30e7bc 659#endif //LTT_TRACE
04180f7f 660#endif //_LTT_USERTRACE_FAST_H
This page took 0.053781 seconds and 4 git commands to generate.