X-Git-Url: http://git.lttng.org/?a=blobdiff_plain;f=libust%2Fbuffers.h;h=4017964c7ea7410967ef610e7d6477163d7f608f;hb=8161463975e218e0833d31ab1577a7ceb9e8e9f3;hp=ca78796fbc9aa7e2aa28f2c4a0fcb9db73cbc7c7;hpb=12e81b07455a1aef2e2bcc73004f14a7b73596fa;p=ust.git diff --git a/libust/buffers.h b/libust/buffers.h index ca78796..4017964 100644 --- a/libust/buffers.h +++ b/libust/buffers.h @@ -24,8 +24,11 @@ #define _UST_BUFFERS_H #include -#include -#include "usterr.h" + +#include +#include + +#include "usterr_signal_safe.h" #include "channels.h" #include "tracerconst.h" #include "tracercore.h" @@ -80,6 +83,11 @@ struct ust_buffer { int data_ready_fd_write; /* the reading end of the pipe */ int data_ready_fd_read; + /* + * List of buffers with an open pipe, used for fork and forced subbuffer + * switch. + */ + struct cds_list_head open_buffers_list; unsigned int finalized; //ust// struct timer_list switch_timer; /* timer for periodical switch */ @@ -87,14 +95,14 @@ struct ust_buffer { struct ust_channel *chan; - struct kref kref; + struct urcu_ref urcu_ref; void *buf_data; size_t buf_size; int shmid; unsigned int cpu; /* commit count per subbuffer; must be at end of struct */ - long commit_seq[0] ____cacheline_aligned; /* ATOMIC */ + long commit_seq[0]; /* ATOMIC */ } ____cacheline_aligned; /* @@ -269,7 +277,7 @@ static __inline__ int ltt_poll_deliver(struct ust_channel *chan, struct ust_buff consumed_idx = SUBBUF_INDEX(consumed_old, buf->chan); commit_count = uatomic_read(&buf->commit_count[consumed_idx].cc_sb); /* - * No memory barrier here, since we are only interested + * No memory cmm_barrier here, since we are only interested * in a statistically correct polling result. The next poll will * get the data is we are racing. The mb() that ensures correct * memory order is in get_subbuf. @@ -367,8 +375,8 @@ static __inline__ int ltt_reserve_slot(struct ust_channel *chan, /* * Perform retryable operations. */ - /* FIXME: make this rellay per cpu? */ - if (unlikely(LOAD_SHARED(ltt_nesting) > 4)) { + /* FIXME: make this really per cpu? */ + if (unlikely(CMM_LOAD_SHARED(ltt_nesting) > 4)) { DBG("Dropping event because nesting is too deep."); uatomic_inc(&buf->events_lost); return -EPERM; @@ -481,16 +489,8 @@ static __inline__ void ltt_commit_slot( long endidx = SUBBUF_INDEX(offset_end - 1, chan); long commit_count; -#ifdef LTT_NO_IPI_BARRIER - smp_wmb(); -#else - /* - * Must write slot data before incrementing commit count. - * This compiler barrier is upgraded into a smp_mb() by the IPI - * sent by get_subbuf(). - */ - barrier(); -#endif + cmm_smp_wmb(); + uatomic_add(&buf->commit_count[endidx].cc, slot_size); /* * commit count read can race with concurrent OOO commit count updates. @@ -519,22 +519,92 @@ static __inline__ void ltt_commit_slot( ltt_write_commit_counter(chan, buf, endidx, buf_offset, commit_count, data_size); } -void _ust_buffers_write(struct ust_buffer *buf, size_t offset, - const void *src, size_t len, ssize_t cpy); +void _ust_buffers_strncpy_fixup(struct ust_buffer *buf, size_t offset, + size_t len, size_t copied, int terminated); static __inline__ int ust_buffers_write(struct ust_buffer *buf, size_t offset, const void *src, size_t len) { - size_t cpy; size_t buf_offset = BUFFER_OFFSET(offset, buf->chan); assert(buf_offset < buf->chan->subbuf_size*buf->chan->subbuf_cnt); + assert(buf_offset + len + <= buf->chan->subbuf_size*buf->chan->subbuf_cnt); + + ust_buffers_do_copy(buf->buf_data + buf_offset, src, len); + + return len; +} + +/* + * ust_buffers_do_memset - write character into dest. + * @dest: destination + * @src: source character + * @len: length to write + */ +static __inline__ +void ust_buffers_do_memset(void *dest, char src, size_t len) +{ + /* + * What we really want here is an __inline__ memset, but we + * don't have constants, so gcc generally uses a function call. + */ + for (; len > 0; len--) + *(u8 *)dest++ = src; +} - cpy = min_t(size_t, len, buf->buf_size - buf_offset); - ust_buffers_do_copy(buf->buf_data + buf_offset, src, cpy); +/* + * ust_buffers_do_strncpy - copy a string up to a certain number of bytes + * @dest: destination + * @src: source + * @len: max. length to copy + * @terminated: output string ends with \0 (output) + * + * returns the number of bytes copied. Does not finalize with \0 if len is + * reached. + */ +static __inline__ +size_t ust_buffers_do_strncpy(void *dest, const void *src, size_t len, + int *terminated) +{ + size_t orig_len = len; - if (unlikely(len != cpy)) - _ust_buffers_write(buf, buf_offset, src, len, cpy); + *terminated = 0; + /* + * What we really want here is an __inline__ strncpy, but we + * don't have constants, so gcc generally uses a function call. + */ + for (; len > 0; len--) { + *(u8 *)dest = CMM_LOAD_SHARED(*(const u8 *)src); + /* Check with dest, because src may be modified concurrently */ + if (*(const u8 *)dest == '\0') { + len--; + *terminated = 1; + break; + } + dest++; + src++; + } + return orig_len - len; +} + +static __inline__ +int ust_buffers_strncpy(struct ust_buffer *buf, size_t offset, const void *src, + size_t len) +{ + size_t buf_offset = BUFFER_OFFSET(offset, buf->chan); + ssize_t copied; + int terminated; + + assert(buf_offset < buf->chan->subbuf_size*buf->chan->subbuf_cnt); + assert(buf_offset + len + <= buf->chan->subbuf_size*buf->chan->subbuf_cnt); + + copied = ust_buffers_do_strncpy(buf->buf_data + buf_offset, + src, len, &terminated); + if (unlikely(copied < len || !terminated)) + _ust_buffers_strncpy_fixup(buf, offset, len, copied, + terminated); return len; }