convert from svn repository: remove tags directory
[lttv.git] / trunk / obsolete / ltt-usertrace / ltt / ltt-usertrace-fast.h
CommitLineData
b09f3215 1
04180f7f 2/* LTTng user-space "fast" tracing header
b09f3215 3 *
4 * Copyright 2006 Mathieu Desnoyers
5 *
6 */
7
04180f7f 8#ifndef _LTT_USERTRACE_FAST_H
9#define _LTT_USERTRACE_FAST_H
b09f3215 10
8b30e7bc 11#ifdef LTT_TRACE
38f24d5c 12#ifdef LTT_TRACE_FAST
8b30e7bc 13
b09f3215 14#include <errno.h>
700d350d 15#include <pthread.h>
32f2b04a 16#include <stdint.h>
17#include <syscall.h>
85b94320 18#include <semaphore.h>
be5cc22c 19#include <signal.h>
32f2b04a 20
8b30e7bc 21#include <ltt/ltt-facility-id-user_generic.h>
8b30e7bc 22
895ad115 23#ifdef __cplusplus
24extern "C" {
25#endif
26
47d7d576 27#ifndef LTT_N_SUBBUFS
28#define LTT_N_SUBBUFS 2
29#endif //LTT_N_SUBBUFS
30
b402c055 31#ifndef LTT_SUBBUF_SIZE_PROCESS
32#define LTT_SUBBUF_SIZE_PROCESS 1048576
51bf1553 33#endif //LTT_BUF_SIZE_CPU
b09f3215 34
b402c055 35#define LTT_BUF_SIZE_PROCESS (LTT_SUBBUF_SIZE_PROCESS * LTT_N_SUBBUFS)
47d7d576 36
77b31f39 37#ifndef LTT_USERTRACE_ROOT
38#define LTT_USERTRACE_ROOT "/tmp/ltt-usertrace"
39#endif //LTT_USERTRACE_ROOT
40
47d7d576 41
42/* Buffer offset macros */
43
44#define BUFFER_OFFSET(offset, buf) (offset & (buf->alloc_size-1))
45#define SUBBUF_OFFSET(offset, buf) (offset & (buf->subbuf_size-1))
46#define SUBBUF_ALIGN(offset, buf) \
47 (((offset) + buf->subbuf_size) & (~(buf->subbuf_size-1)))
48#define SUBBUF_TRUNC(offset, buf) \
49 ((offset) & (~(buf->subbuf_size-1)))
50#define SUBBUF_INDEX(offset, buf) \
51 (BUFFER_OFFSET(offset,buf)/buf->subbuf_size)
52
53
32f2b04a 54#define LTT_TRACER_MAGIC_NUMBER 0x00D6B7ED
55#define LTT_TRACER_VERSION_MAJOR 0
27867702 56#define LTT_TRACER_VERSION_MINOR 8
32f2b04a 57
58#ifndef atomic_cmpxchg
4ea155a2 59#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
32f2b04a 60#endif //atomic_cmpxchg
5ffa9d14 61
32f2b04a 62struct ltt_trace_header {
63 uint32_t magic_number;
64 uint32_t arch_type;
65 uint32_t arch_variant;
66 uint32_t float_word_order; /* Only useful for user space traces */
67 uint8_t arch_size;
68 //uint32_t system_type;
69 uint8_t major_version;
70 uint8_t minor_version;
71 uint8_t flight_recorder;
72 uint8_t has_heartbeat;
73 uint8_t has_alignment; /* Event header alignment */
27867702 74 uint8_t tsc_lsb_truncate;
75 uint8_t tscbits;
32f2b04a 76 uint32_t freq_scale;
77 uint64_t start_freq;
78 uint64_t start_tsc;
79 uint64_t start_monotonic;
27867702 80 uint64_t start_time_sec;
81 uint64_t start_time_usec;
32f2b04a 82} __attribute((packed));
83
84
85struct ltt_block_start_header {
86 struct {
87 uint64_t cycle_count;
88 uint64_t freq; /* khz */
89 } begin;
90 struct {
91 uint64_t cycle_count;
92 uint64_t freq; /* khz */
93 } end;
94 uint32_t lost_size; /* Size unused at the end of the buffer */
95 uint32_t buf_size; /* The size of this sub-buffer */
96 struct ltt_trace_header trace;
97} __attribute((packed));
98
99
100
b09f3215 101struct ltt_buf {
32f2b04a 102 void *start;
b09f3215 103 atomic_t offset;
47d7d576 104 atomic_t consumed;
105 atomic_t reserve_count[LTT_N_SUBBUFS];
106 atomic_t commit_count[LTT_N_SUBBUFS];
b09f3215 107
108 atomic_t events_lost;
32f2b04a 109 atomic_t corrupted_subbuffers;
85b94320 110 sem_t writer_sem; /* semaphore on which the writer waits */
47d7d576 111 unsigned int alloc_size;
112 unsigned int subbuf_size;
b09f3215 113};
114
700d350d 115struct ltt_trace_info {
1c48e587 116 int init;
b09f3215 117 int filter;
700d350d 118 pid_t daemon_id;
8b30e7bc 119 int nesting;
b09f3215 120 struct {
b402c055 121 struct ltt_buf process;
122 char process_buf[LTT_BUF_SIZE_PROCESS] __attribute__ ((aligned (8)));
b09f3215 123 } channel;
124};
125
32f2b04a 126
5ffa9d14 127struct ltt_event_header_nohb {
128 uint64_t timestamp;
129 unsigned char facility_id;
130 unsigned char event_id;
131 uint16_t event_size;
132} __attribute((packed));
32f2b04a 133
700d350d 134extern __thread struct ltt_trace_info *thread_trace_info;
b09f3215 135
51bf1553 136void ltt_thread_init(void);
b09f3215 137
5ffa9d14 138void __attribute__((no_instrument_function))
139 ltt_usertrace_fast_buffer_switch(void);
140
5ffa9d14 141/* Get the offset of the channel in the ltt_trace_struct */
142#define GET_CHANNEL_INDEX(chan) \
143 (unsigned int)&((struct ltt_trace_info*)NULL)->channel.chan
144
145/* ltt_get_index_from_facility
146 *
147 * Get channel index from facility and event id.
148 *
149 * @fID : facility ID
150 * @eID : event number
151 *
152 * Get the channel index into which events must be written for the given
153 * facility and event number. We get this structure offset as soon as possible
154 * and remember it so we pass through this logic only once per trace call (not
155 * for every trace).
156 */
157static inline unsigned int __attribute__((no_instrument_function))
4ea155a2 158 ltt_get_index_from_facility(uint8_t fID,
5ffa9d14 159 uint8_t eID)
160{
b402c055 161 return GET_CHANNEL_INDEX(process);
5ffa9d14 162}
163
164
165static inline struct ltt_buf * __attribute__((no_instrument_function))
166 ltt_get_channel_from_index(
167 struct ltt_trace_info *trace, unsigned int index)
168{
b5d612cb 169 return (struct ltt_buf *)((void*)trace+index);
5ffa9d14 170}
171
172
173/*
174 * ltt_get_header_size
175 *
176 * Calculate alignment offset for arch size void*. This is the
177 * alignment offset of the event header.
178 *
179 * Important note :
180 * The event header must be a size multiple of the void* size. This is necessary
181 * to be able to calculate statically the alignment offset of the variable
182 * length data fields that follows. The total offset calculated here :
183 *
184 * Alignment of header struct on arch size
185 * + sizeof(header struct)
186 * + padding added to end of struct to align on arch size.
187 * */
188static inline unsigned char __attribute__((no_instrument_function))
189 ltt_get_header_size(struct ltt_trace_info *trace,
190 void *address,
4ea155a2 191 size_t data_size,
192 size_t *before_hdr_pad)
5ffa9d14 193{
194 unsigned int padding;
195 unsigned int header;
4ea155a2 196 size_t after_hdr_pad;
5ffa9d14 197
198 header = sizeof(struct ltt_event_header_nohb);
199
200 /* Padding before the header. Calculated dynamically */
201 *before_hdr_pad = ltt_align((unsigned long)address, header);
202 padding = *before_hdr_pad;
203
204 /* Padding after header, considering header aligned on ltt_align.
205 * Calculated statically if header size if known. */
4ea155a2 206 after_hdr_pad = ltt_align(header, sizeof(void*));
207 padding += after_hdr_pad;
5ffa9d14 208
209 return header+padding;
210}
211
212
213/* ltt_write_event_header
214 *
215 * Writes the event header to the pointer.
216 *
217 * @channel : pointer to the channel structure
218 * @ptr : buffer pointer
219 * @fID : facility ID
220 * @eID : event ID
221 * @event_size : size of the event, excluding the event header.
5ffa9d14 222 * @tsc : time stamp counter.
223 */
4ea155a2 224static inline char *__attribute__((no_instrument_function))
5ffa9d14 225 ltt_write_event_header(
226 struct ltt_trace_info *trace, struct ltt_buf *buf,
4ea155a2 227 void *ptr, uint8_t fID, uint32_t eID, size_t event_size,
228 uint64_t tsc)
5ffa9d14 229{
4ea155a2 230 size_t after_hdr_pad;
5ffa9d14 231 struct ltt_event_header_nohb *nohb;
232
233 event_size = min(event_size, 0xFFFFU);
4ea155a2 234 nohb = (struct ltt_event_header_nohb *)(ptr);
5ffa9d14 235 nohb->timestamp = (uint64_t)tsc;
236 nohb->facility_id = fID;
237 nohb->event_id = eID;
238 nohb->event_size = (uint16_t)event_size;
4ea155a2 239 after_hdr_pad = ltt_align(sizeof(*nohb), sizeof(void*));
240 return ptr + sizeof(*nohb) + after_hdr_pad;
5ffa9d14 241}
700d350d 242
32f2b04a 243
244
5ffa9d14 245static inline uint64_t __attribute__((no_instrument_function))
246ltt_get_timestamp()
32f2b04a 247{
248 return get_cycles();
249}
250
5ffa9d14 251static inline unsigned int __attribute__((no_instrument_function))
252ltt_subbuf_header_len(struct ltt_buf *buf)
32f2b04a 253{
254 return sizeof(struct ltt_block_start_header);
255}
256
257
258
5ffa9d14 259static inline void __attribute__((no_instrument_function))
260ltt_write_trace_header(struct ltt_trace_header *header)
32f2b04a 261{
262 header->magic_number = LTT_TRACER_MAGIC_NUMBER;
263 header->major_version = LTT_TRACER_VERSION_MAJOR;
264 header->minor_version = LTT_TRACER_VERSION_MINOR;
265 header->float_word_order = 0; //FIXME
266 header->arch_type = 0; //FIXME LTT_ARCH_TYPE;
267 header->arch_size = sizeof(void*);
268 header->arch_variant = 0; //FIXME LTT_ARCH_VARIANT;
269 header->flight_recorder = 0;
270 header->has_heartbeat = 0;
941baece 271 header->tsc_lsb_truncate = 0;
272 header->tscbits = 0;
32f2b04a 273
5ffa9d14 274#ifndef LTT_PACK
32f2b04a 275 header->has_alignment = sizeof(void*);
276#else
277 header->has_alignment = 0;
278#endif
279
280 //FIXME
281 header->freq_scale = 0;
282 header->start_freq = 0;
283 header->start_tsc = 0;
284 header->start_monotonic = 0;
285 header->start_time_sec = 0;
286 header->start_time_usec = 0;
287}
288
289
5ffa9d14 290static inline void __attribute__((no_instrument_function))
291ltt_buffer_begin_callback(struct ltt_buf *buf,
32f2b04a 292 uint64_t tsc, unsigned int subbuf_idx)
293{
294 struct ltt_block_start_header *header =
295 (struct ltt_block_start_header*)
296 (buf->start + (subbuf_idx*buf->subbuf_size));
297
298 header->begin.cycle_count = tsc;
299 header->begin.freq = 0; //ltt_frequency();
300
301 header->lost_size = 0xFFFFFFFF; // for debugging...
302
303 header->buf_size = buf->subbuf_size;
304
305 ltt_write_trace_header(&header->trace);
306
307}
308
309
310
5ffa9d14 311static inline void __attribute__((no_instrument_function))
312ltt_buffer_end_callback(struct ltt_buf *buf,
32f2b04a 313 uint64_t tsc, unsigned int offset, unsigned int subbuf_idx)
314{
315 struct ltt_block_start_header *header =
316 (struct ltt_block_start_header*)
317 (buf->start + (subbuf_idx*buf->subbuf_size));
318 /* offset is assumed to never be 0 here : never deliver a completely
319 * empty subbuffer. */
320 /* The lost size is between 0 and subbuf_size-1 */
321 header->lost_size = SUBBUF_OFFSET((buf->subbuf_size - offset),
322 buf);
323 header->end.cycle_count = tsc;
324 header->end.freq = 0; //ltt_frequency();
325}
326
327
5ffa9d14 328static inline void __attribute__((no_instrument_function))
329ltt_deliver_callback(struct ltt_buf *buf,
32f2b04a 330 unsigned subbuf_idx,
331 void *subbuf)
332{
333 ltt_usertrace_fast_buffer_switch();
334}
5ffa9d14 335
336
337/* ltt_reserve_slot
338 *
339 * Atomic slot reservation in a LTTng buffer. It will take care of
340 * sub-buffer switching.
341 *
342 * Parameters:
343 *
344 * @trace : the trace structure to log to.
345 * @buf : the buffer to reserve space into.
346 * @data_size : size of the variable length data to log.
347 * @slot_size : pointer to total size of the slot (out)
348 * @tsc : pointer to the tsc at the slot reservation (out)
349 * @before_hdr_pad : dynamic padding before the event header.
350 * @after_hdr_pad : dynamic padding after the event header.
351 *
352 * Return : NULL if not enough space, else returns the pointer
353 * to the beginning of the reserved slot. */
354static inline void * __attribute__((no_instrument_function)) ltt_reserve_slot(
355 struct ltt_trace_info *trace,
356 struct ltt_buf *ltt_buf,
357 unsigned int data_size,
69b0f48b 358 size_t *slot_size,
4ea155a2 359 uint64_t *tsc)
5ffa9d14 360{
361 int offset_begin, offset_end, offset_old;
362 //int has_switch;
363 int begin_switch, end_switch_current, end_switch_old;
364 int reserve_commit_diff = 0;
365 unsigned int size;
4ea155a2 366 size_t before_hdr_pad;
5ffa9d14 367 int consumed_old, consumed_new;
368 int commit_count, reserve_count;
369 int ret;
3a99c38e 370 sigset_t oldset, set;
371
5ffa9d14 372 do {
373 offset_old = atomic_read(&ltt_buf->offset);
374 offset_begin = offset_old;
375 //has_switch = 0;
376 begin_switch = 0;
377 end_switch_current = 0;
378 end_switch_old = 0;
379 *tsc = ltt_get_timestamp();
380 if(*tsc == 0) {
381 /* Error in getting the timestamp, event lost */
382 atomic_inc(&ltt_buf->events_lost);
383 return NULL;
384 }
385
386 if(SUBBUF_OFFSET(offset_begin, ltt_buf) == 0) {
387 begin_switch = 1; /* For offset_begin */
388 } else {
389 size = ltt_get_header_size(trace, ltt_buf->start + offset_begin,
4ea155a2 390 data_size, &before_hdr_pad)
5ffa9d14 391 + data_size;
392
393 if((SUBBUF_OFFSET(offset_begin, ltt_buf)+size)>ltt_buf->subbuf_size) {
394 //has_switch = 1;
395 end_switch_old = 1; /* For offset_old */
396 begin_switch = 1; /* For offset_begin */
397 }
398 }
399
400 if(begin_switch) {
401 if(end_switch_old) {
402 offset_begin = SUBBUF_ALIGN(offset_begin, ltt_buf);
403 }
404 offset_begin = offset_begin + ltt_subbuf_header_len(ltt_buf);
405 /* Test new buffer integrity */
406 reserve_commit_diff =
407 atomic_read(&ltt_buf->reserve_count[SUBBUF_INDEX(offset_begin,
408 ltt_buf)])
409 - atomic_read(&ltt_buf->commit_count[SUBBUF_INDEX(offset_begin,
410 ltt_buf)]);
9b0645fd 411
5ffa9d14 412 if(reserve_commit_diff == 0) {
413 /* Next buffer not corrupted. */
b402c055 414 //if((SUBBUF_TRUNC(offset_begin, ltt_buf)
415 // - SUBBUF_TRUNC(atomic_read(&ltt_buf->consumed), ltt_buf))
416 // >= ltt_buf->alloc_size) {
be5cc22c 417 {
9b0645fd 418 /* sem_wait is not signal safe. Disable signals around it.
419 * Signals are kept disabled to make sure we win the cmpxchg. */
9b0645fd 420 /* Disable signals */
5199345f 421 ret = sigfillset(&set);
422 if(ret) perror("LTT Error in sigfillset\n");
423
424 ret = pthread_sigmask(SIG_BLOCK, &set, &oldset);
425 if(ret) perror("LTT Error in pthread_sigmask\n");
426
427 /* We detect if a signal came between
428 * the offset read and signal disabling:
429 * if it is the case, then we restart
430 * the loop after reenabling signals. It
431 * means that it's a signal that has
432 * won the buffer switch.*/
433 if(offset_old != atomic_read(&ltt_buf->offset)) {
434 ret = pthread_sigmask(SIG_SETMASK, &oldset, NULL);
1e6e693a 435 if(ret) perror("LTT Error in pthread_sigmask\n");
5199345f 436 continue;
1e6e693a 437 }
5199345f 438 /* If the offset is still the same, then
439 * we can safely proceed to do the
440 * buffer switch without being
441 * interrupted by a signal. */
be5cc22c 442 sem_wait(&ltt_buf->writer_sem);
443
be5cc22c 444 }
5ffa9d14 445 /* go on with the write */
446
b402c055 447 //} else {
448 // /* next buffer not corrupted, we are either in overwrite mode or
449 // * the buffer is not full. It's safe to write in this new subbuffer.*/
450 //}
5ffa9d14 451 } else {
452 /* Next subbuffer corrupted. Force pushing reader even in normal
453 * mode. It's safe to write in this new subbuffer. */
9b0645fd 454 /* No sem_post is required because we fall through without doing a
455 * sem_wait. */
5ffa9d14 456 }
457 size = ltt_get_header_size(trace, ltt_buf->start + offset_begin,
4ea155a2 458 data_size, &before_hdr_pad) + data_size;
5ffa9d14 459 if((SUBBUF_OFFSET(offset_begin,ltt_buf)+size)>ltt_buf->subbuf_size) {
460 /* Event too big for subbuffers, report error, don't complete
461 * the sub-buffer switch. */
462 atomic_inc(&ltt_buf->events_lost);
9b0645fd 463 if(reserve_commit_diff == 0) {
464 ret = pthread_sigmask(SIG_SETMASK, &oldset, NULL);
465 if(ret) perror("LTT Error in pthread_sigmask\n");
466 }
5ffa9d14 467 return NULL;
468 } else {
469 /* We just made a successful buffer switch and the event fits in the
470 * new subbuffer. Let's write. */
471 }
472 } else {
473 /* Event fits in the current buffer and we are not on a switch boundary.
474 * It's safe to write */
475 }
476 offset_end = offset_begin + size;
477
478 if((SUBBUF_OFFSET(offset_end, ltt_buf)) == 0) {
479 /* The offset_end will fall at the very beginning of the next subbuffer.
480 */
481 end_switch_current = 1; /* For offset_begin */
482 }
483
484 } while(atomic_cmpxchg(&ltt_buf->offset, offset_old, offset_end)
485 != offset_old);
486
5ffa9d14 487 /* Push the reader if necessary */
488 do {
489 consumed_old = atomic_read(&ltt_buf->consumed);
490 /* If buffer is in overwrite mode, push the reader consumed count if
491 the write position has reached it and we are not at the first
492 iteration (don't push the reader farther than the writer).
493 This operation can be done concurrently by many writers in the
494 same buffer, the writer being at the fartest write position sub-buffer
495 index in the buffer being the one which will win this loop. */
496 /* If the buffer is not in overwrite mode, pushing the reader only
497 happen if a sub-buffer is corrupted */
680b9daa 498 if((SUBBUF_TRUNC(offset_end-1, ltt_buf)
5ffa9d14 499 - SUBBUF_TRUNC(consumed_old, ltt_buf))
500 >= ltt_buf->alloc_size)
501 consumed_new = SUBBUF_ALIGN(consumed_old, ltt_buf);
502 else {
503 consumed_new = consumed_old;
504 break;
505 }
506 } while(atomic_cmpxchg(&ltt_buf->consumed, consumed_old, consumed_new)
507 != consumed_old);
508
509 if(consumed_old != consumed_new) {
510 /* Reader pushed : we are the winner of the push, we can therefore
511 reequilibrate reserve and commit. Atomic increment of the commit
512 count permits other writers to play around with this variable
513 before us. We keep track of corrupted_subbuffers even in overwrite mode :
514 we never want to write over a non completely committed sub-buffer :
515 possible causes : the buffer size is too low compared to the unordered
516 data input, or there is a writer who died between the reserve and the
517 commit. */
518 if(reserve_commit_diff) {
519 /* We have to alter the sub-buffer commit count : a sub-buffer is
520 corrupted. We do not deliver it. */
521 atomic_add(reserve_commit_diff,
522 &ltt_buf->commit_count[SUBBUF_INDEX(offset_begin, ltt_buf)]);
523 atomic_inc(&ltt_buf->corrupted_subbuffers);
524 }
525 }
526
527
528 if(end_switch_old) {
529 /* old subbuffer */
530 /* Concurrency safe because we are the last and only thread to alter this
531 sub-buffer. As long as it is not delivered and read, no other thread can
532 alter the offset, alter the reserve_count or call the
533 client_buffer_end_callback on this sub-buffer.
534 The only remaining threads could be the ones with pending commits. They
535 will have to do the deliver themself.
536 Not concurrency safe in overwrite mode. We detect corrupted subbuffers
537 with commit and reserve counts. We keep a corrupted sub-buffers count
538 and push the readers across these sub-buffers.
539 Not concurrency safe if a writer is stalled in a subbuffer and
540 another writer switches in, finding out it's corrupted. The result will
541 be than the old (uncommited) subbuffer will be declared corrupted, and
542 that the new subbuffer will be declared corrupted too because of the
543 commit count adjustment.
544 Note : offset_old should never be 0 here.*/
545 ltt_buffer_end_callback(ltt_buf, *tsc, offset_old,
546 SUBBUF_INDEX((offset_old-1), ltt_buf));
547 /* Setting this reserve_count will allow the sub-buffer to be delivered by
548 the last committer. */
549 reserve_count =
550 atomic_add_return((SUBBUF_OFFSET((offset_old-1), ltt_buf)+1),
551 &ltt_buf->reserve_count[SUBBUF_INDEX((offset_old-1), ltt_buf)]);
552 if(reserve_count
553 == atomic_read(&ltt_buf->commit_count[SUBBUF_INDEX((offset_old-1),
554 ltt_buf)])) {
555 ltt_deliver_callback(ltt_buf, SUBBUF_INDEX((offset_old-1), ltt_buf),
556 NULL);
557 }
558 }
559
560 if(begin_switch) {
9b0645fd 561 /* Enable signals : this is what guaranteed that same reserve which did the
562 * sem_wait does in fact win the cmpxchg for the offset. We only call
563 * these system calls on buffer boundaries because of their performance
564 * cost. */
5199345f 565 if(reserve_commit_diff == 0) {
9b0645fd 566 ret = pthread_sigmask(SIG_SETMASK, &oldset, NULL);
567 if(ret) perror("LTT Error in pthread_sigmask\n");
568 }
5ffa9d14 569 /* New sub-buffer */
570 /* This code can be executed unordered : writers may already have written
571 to the sub-buffer before this code gets executed, caution. */
572 /* The commit makes sure that this code is executed before the deliver
573 of this sub-buffer */
574 ltt_buffer_begin_callback(ltt_buf, *tsc, SUBBUF_INDEX(offset_begin, ltt_buf));
575 commit_count = atomic_add_return(ltt_subbuf_header_len(ltt_buf),
576 &ltt_buf->commit_count[SUBBUF_INDEX(offset_begin, ltt_buf)]);
577 /* Check if the written buffer has to be delivered */
578 if(commit_count
579 == atomic_read(&ltt_buf->reserve_count[SUBBUF_INDEX(offset_begin,
580 ltt_buf)])) {
581 ltt_deliver_callback(ltt_buf, SUBBUF_INDEX(offset_begin, ltt_buf), NULL);
582 }
583 }
584
585 if(end_switch_current) {
586 /* current subbuffer */
587 /* Concurrency safe because we are the last and only thread to alter this
588 sub-buffer. As long as it is not delivered and read, no other thread can
589 alter the offset, alter the reserve_count or call the
590 client_buffer_end_callback on this sub-buffer.
591 The only remaining threads could be the ones with pending commits. They
592 will have to do the deliver themself.
593 Not concurrency safe in overwrite mode. We detect corrupted subbuffers
594 with commit and reserve counts. We keep a corrupted sub-buffers count
595 and push the readers across these sub-buffers.
596 Not concurrency safe if a writer is stalled in a subbuffer and
597 another writer switches in, finding out it's corrupted. The result will
598 be than the old (uncommited) subbuffer will be declared corrupted, and
599 that the new subbuffer will be declared corrupted too because of the
600 commit count adjustment. */
601 ltt_buffer_end_callback(ltt_buf, *tsc, offset_end,
602 SUBBUF_INDEX((offset_end-1), ltt_buf));
603 /* Setting this reserve_count will allow the sub-buffer to be delivered by
604 the last committer. */
605 reserve_count =
606 atomic_add_return((SUBBUF_OFFSET((offset_end-1), ltt_buf)+1),
607 &ltt_buf->reserve_count[SUBBUF_INDEX((offset_end-1), ltt_buf)]);
608 if(reserve_count
609 == atomic_read(&ltt_buf->commit_count[SUBBUF_INDEX((offset_end-1),
610 ltt_buf)])) {
611 ltt_deliver_callback(ltt_buf, SUBBUF_INDEX((offset_end-1), ltt_buf), NULL);
612 }
613 }
614
615 *slot_size = size;
616
617 //BUG_ON(*slot_size != (data_size + *before_hdr_pad + *after_hdr_pad + *header_size));
618 //BUG_ON(*slot_size != (offset_end - offset_begin));
619
4ea155a2 620 return ltt_buf->start + BUFFER_OFFSET(offset_begin, ltt_buf) + before_hdr_pad;
5ffa9d14 621}
622
623
624/* ltt_commit_slot
625 *
626 * Atomic unordered slot commit. Increments the commit count in the
627 * specified sub-buffer, and delivers it if necessary.
628 *
629 * Parameters:
630 *
631 * @buf : the buffer to commit to.
4cd38c6c 632 * @reserved : address of the end of the event header.
5ffa9d14 633 * @slot_size : size of the reserved slot.
634 *
635 */
636static inline void __attribute__((no_instrument_function)) ltt_commit_slot(
637 struct ltt_buf *ltt_buf,
638 void *reserved,
639 unsigned int slot_size)
640{
4cd38c6c 641 unsigned int offset_end = reserved - ltt_buf->start;
5ffa9d14 642 int commit_count;
643
644 commit_count = atomic_add_return(slot_size,
4cd38c6c 645 &ltt_buf->commit_count[SUBBUF_INDEX(offset_end-1,
5ffa9d14 646 ltt_buf)]);
647
648 /* Check if all commits have been done */
649 if(commit_count ==
4cd38c6c 650 atomic_read(&ltt_buf->reserve_count[SUBBUF_INDEX(offset_end-1, ltt_buf)])) {
651 ltt_deliver_callback(ltt_buf, SUBBUF_INDEX(offset_end-1, ltt_buf), NULL);
5ffa9d14 652 }
653}
895ad115 654
655#ifdef __cplusplus
656} /* end of extern "C" */
657#endif
5ffa9d14 658
38f24d5c 659#endif //LTT_TRACE_FAST
8b30e7bc 660#endif //LTT_TRACE
04180f7f 661#endif //_LTT_USERTRACE_FAST_H
This page took 0.062767 seconds and 4 git commands to generate.