X-Git-Url: https://git.lttng.org/?p=ust.git;a=blobdiff_plain;f=libust%2Fbuffers.h;h=afeaef35897e7d7b0c71e3fe28950452bf5364cc;hp=a2ad83efca42884b1bba21e1674d14f304b52135;hb=9edd34bd25f52dd39b354a84f02697254121aefd;hpb=4723ca096d740ff93da400df304c9902e9834e5f diff --git a/libust/buffers.h b/libust/buffers.h index a2ad83e..afeaef3 100644 --- a/libust/buffers.h +++ b/libust/buffers.h @@ -26,12 +26,12 @@ #include #include +#include -#include "usterr.h" +#include "usterr_signal_safe.h" #include "channels.h" #include "tracerconst.h" #include "tracercore.h" -#include "header-inline.h" /***** FIXME: SHOULD BE REMOVED ***** */ @@ -86,7 +86,7 @@ struct ust_buffer { * List of buffers with an open pipe, used for fork and forced subbuffer * switch. */ - struct list_head open_buffers_list; + struct cds_list_head open_buffers_list; unsigned int finalized; //ust// struct timer_list switch_timer; /* timer for periodical switch */ @@ -94,7 +94,7 @@ struct ust_buffer { struct ust_channel *chan; - struct kref kref; + struct urcu_ref urcu_ref; void *buf_data; size_t buf_size; int shmid; @@ -121,6 +121,40 @@ extern int ltt_reserve_slot_lockless_slow(struct ust_channel *chan, extern void ltt_force_switch_lockless_slow(struct ust_buffer *buf, enum force_switch_mode mode); +#ifndef HAVE_EFFICIENT_UNALIGNED_ACCESS + +/* + * Calculate the offset needed to align the type. + * size_of_type must be non-zero. + */ +static inline unsigned int ltt_align(size_t align_drift, size_t size_of_type) +{ + size_t alignment = min(sizeof(void *), size_of_type); + return (alignment - align_drift) & (alignment - 1); +} +/* Default arch alignment */ +#define LTT_ALIGN + +static inline int ltt_get_alignment(void) +{ + return sizeof(void *); +} + +#else /* HAVE_EFFICIENT_UNALIGNED_ACCESS */ + +static inline unsigned int ltt_align(size_t align_drift, + size_t size_of_type) +{ + return 0; +} + +#define LTT_ALIGN __attribute__((packed)) + +static inline int ltt_get_alignment(void) +{ + return 0; +} +#endif /* HAVE_EFFICIENT_UNALIGNED_ACCESS */ static __inline__ void ust_buffers_do_copy(void *dest, const void *src, size_t len) { @@ -193,6 +227,63 @@ static __inline__ int last_tsc_overflow(struct ust_buffer *ltt_buf, } #endif +/* + * ust_get_header_size + * + * Calculate alignment offset to 32-bits. This is the alignment offset of the + * event header. + * + * Important note : + * The event header must be 32-bits. The total offset calculated here : + * + * Alignment of header struct on 32 bits (min arch size, header size) + * + sizeof(header struct) (32-bits) + * + (opt) u16 (ext. event id) + * + (opt) u16 (event_size) (if event_size == 0xFFFFUL, has ext. event size) + * + (opt) u32 (ext. event size) + * + (opt) u64 full TSC (aligned on min(64-bits, arch size)) + * + * The payload must itself determine its own alignment from the biggest type it + * contains. + * */ +static __inline__ unsigned char ust_get_header_size( + struct ust_channel *channel, + size_t offset, + size_t data_size, + size_t *before_hdr_pad, + unsigned int rflags) +{ + size_t orig_offset = offset; + size_t padding; + + padding = ltt_align(offset, sizeof(struct ltt_event_header)); + offset += padding; + offset += sizeof(struct ltt_event_header); + + if(unlikely(rflags)) { + switch (rflags) { + case LTT_RFLAG_ID_SIZE_TSC: + offset += sizeof(u16) + sizeof(u16); + if (data_size >= 0xFFFFU) + offset += sizeof(u32); + offset += ltt_align(offset, sizeof(u64)); + offset += sizeof(u64); + break; + case LTT_RFLAG_ID_SIZE: + offset += sizeof(u16) + sizeof(u16); + if (data_size >= 0xFFFFU) + offset += sizeof(u32); + break; + case LTT_RFLAG_ID: + offset += sizeof(u16); + break; + } + } + + *before_hdr_pad = padding; + return offset - orig_offset; +} + static __inline__ void ltt_reserve_push_reader( struct ust_channel *rchan, struct ust_buffer *buf, @@ -276,7 +367,7 @@ static __inline__ int ltt_poll_deliver(struct ust_channel *chan, struct ust_buff consumed_idx = SUBBUF_INDEX(consumed_old, buf->chan); commit_count = uatomic_read(&buf->commit_count[consumed_idx].cc_sb); /* - * No memory barrier here, since we are only interested + * No memory cmm_barrier here, since we are only interested * in a statistically correct polling result. The next poll will * get the data is we are racing. The mb() that ensures correct * memory order is in get_subbuf. @@ -375,7 +466,7 @@ static __inline__ int ltt_reserve_slot(struct ust_channel *chan, * Perform retryable operations. */ /* FIXME: make this really per cpu? */ - if (unlikely(LOAD_SHARED(ltt_nesting) > 4)) { + if (unlikely(CMM_LOAD_SHARED(ltt_nesting) > 4)) { DBG("Dropping event because nesting is too deep."); uatomic_inc(&buf->events_lost); return -EPERM; @@ -488,7 +579,7 @@ static __inline__ void ltt_commit_slot( long endidx = SUBBUF_INDEX(offset_end - 1, chan); long commit_count; - smp_wmb(); + cmm_smp_wmb(); uatomic_add(&buf->commit_count[endidx].cc, slot_size); /* @@ -574,7 +665,7 @@ size_t ust_buffers_do_strncpy(void *dest, const void *src, size_t len, * don't have constants, so gcc generally uses a function call. */ for (; len > 0; len--) { - *(u8 *)dest = LOAD_SHARED(*(const u8 *)src); + *(u8 *)dest = CMM_LOAD_SHARED(*(const u8 *)src); /* Check with dest, because src may be modified concurrently */ if (*(const u8 *)dest == '\0') { len--;