X-Git-Url: http://git.lttng.org/?a=blobdiff_plain;f=libust%2Fbuffers.h;h=ddacdffe68ac5c1c4ae0745b4f31b15f773cdeba;hb=8d6300d3b3cb0219e1109e931a2219dbd812b24d;hp=63449d6c024194fca1341769083a55cdfb93ff4b;hpb=bb3132c8c50b09bd3ca54dc88f9ddb3c0847ba41;p=ust.git diff --git a/libust/buffers.h b/libust/buffers.h index 63449d6..ddacdff 100644 --- a/libust/buffers.h +++ b/libust/buffers.h @@ -26,6 +26,7 @@ #include #include +#include #include "usterr.h" #include "channels.h" @@ -82,6 +83,11 @@ struct ust_buffer { int data_ready_fd_write; /* the reading end of the pipe */ int data_ready_fd_read; + /* + * List of buffers with an open pipe, used for fork and forced subbuffer + * switch. + */ + struct cds_list_head open_buffers_list; unsigned int finalized; //ust// struct timer_list switch_timer; /* timer for periodical switch */ @@ -89,14 +95,14 @@ struct ust_buffer { struct ust_channel *chan; - struct kref kref; + struct urcu_ref urcu_ref; void *buf_data; size_t buf_size; int shmid; unsigned int cpu; /* commit count per subbuffer; must be at end of struct */ - long commit_seq[0] ____cacheline_aligned; /* ATOMIC */ + long commit_seq[0]; /* ATOMIC */ } ____cacheline_aligned; /* @@ -271,7 +277,7 @@ static __inline__ int ltt_poll_deliver(struct ust_channel *chan, struct ust_buff consumed_idx = SUBBUF_INDEX(consumed_old, buf->chan); commit_count = uatomic_read(&buf->commit_count[consumed_idx].cc_sb); /* - * No memory barrier here, since we are only interested + * No memory cmm_barrier here, since we are only interested * in a statistically correct polling result. The next poll will * get the data is we are racing. The mb() that ensures correct * memory order is in get_subbuf. @@ -370,7 +376,7 @@ static __inline__ int ltt_reserve_slot(struct ust_channel *chan, * Perform retryable operations. */ /* FIXME: make this really per cpu? */ - if (unlikely(LOAD_SHARED(ltt_nesting) > 4)) { + if (unlikely(CMM_LOAD_SHARED(ltt_nesting) > 4)) { DBG("Dropping event because nesting is too deep."); uatomic_inc(&buf->events_lost); return -EPERM; @@ -483,7 +489,7 @@ static __inline__ void ltt_commit_slot( long endidx = SUBBUF_INDEX(offset_end - 1, chan); long commit_count; - smp_wmb(); + cmm_smp_wmb(); uatomic_add(&buf->commit_count[endidx].cc, slot_size); /* @@ -522,7 +528,8 @@ static __inline__ int ust_buffers_write(struct ust_buffer *buf, size_t offset, size_t buf_offset = BUFFER_OFFSET(offset, buf->chan); assert(buf_offset < buf->chan->subbuf_size*buf->chan->subbuf_cnt); - assert(buf_offset + len < buf->chan->subbuf_size*buf->chan->subbuf_cnt); + assert(buf_offset + len + <= buf->chan->subbuf_size*buf->chan->subbuf_cnt); ust_buffers_do_copy(buf->buf_data + buf_offset, src, len); @@ -568,7 +575,7 @@ size_t ust_buffers_do_strncpy(void *dest, const void *src, size_t len, * don't have constants, so gcc generally uses a function call. */ for (; len > 0; len--) { - *(u8 *)dest = LOAD_SHARED(*(const u8 *)src); + *(u8 *)dest = CMM_LOAD_SHARED(*(const u8 *)src); /* Check with dest, because src may be modified concurrently */ if (*(const u8 *)dest == '\0') { len--; @@ -590,7 +597,8 @@ int ust_buffers_strncpy(struct ust_buffer *buf, size_t offset, const void *src, int terminated; assert(buf_offset < buf->chan->subbuf_size*buf->chan->subbuf_cnt); - assert(buf_offset + len < buf->chan->subbuf_size*buf->chan->subbuf_cnt); + assert(buf_offset + len + <= buf->chan->subbuf_size*buf->chan->subbuf_cnt); copied = ust_buffers_do_strncpy(buf->buf_data + buf_offset, src, len, &terminated);