X-Git-Url: http://git.lttng.org/?a=blobdiff_plain;f=ltt-ring-buffer-client.h;h=39587dd0b15f2054454a57bda420cc78ec680a62;hb=2db1399a47bc5a86dade078994cd1060d6d56f64;hp=55d972ac18dbbd22d17806ae87162f596aa0b7d1;hpb=881833e349e275ac324fc3abf8a34d76f91047ea;p=lttng-modules.git diff --git a/ltt-ring-buffer-client.h b/ltt-ring-buffer-client.h index 55d972ac..39587dd0 100644 --- a/ltt-ring-buffer-client.h +++ b/ltt-ring-buffer-client.h @@ -10,78 +10,137 @@ #include #include +#include "lib/bitfield.h" #include "wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */ #include "wrapper/trace-clock.h" #include "ltt-events.h" #include "ltt-tracer.h" +#include "wrapper/ringbuffer/frontend_types.h" + +/* + * Keep the natural field alignment for _each field_ within this structure if + * you ever add/remove a field from this header. Packed attribute is not used + * because gcc generates poor code on at least powerpc and mips. Don't ever + * let gcc add padding between the structure elements. + */ + +struct packet_header { + /* Trace packet header */ + uint32_t magic; /* + * Trace magic number. + * contains endianness information. + */ + uint8_t uuid[16]; + uint32_t stream_id; + + struct { + /* Stream packet context */ + uint64_t timestamp_begin; /* Cycle count at subbuffer start */ + uint64_t timestamp_end; /* Cycle count at subbuffer end */ + uint32_t events_discarded; /* + * Events lost in this subbuffer since + * the beginning of the trace. + * (may overflow) + */ + uint32_t content_size; /* Size of data in subbuffer */ + uint32_t packet_size; /* Subbuffer size (include padding) */ + uint32_t cpu_id; /* CPU id associated with stream */ + uint8_t header_end; /* End of header */ + } ctx; +}; + static inline notrace u64 lib_ring_buffer_clock_read(struct channel *chan) { return trace_clock_read64(); } +static inline +size_t ctx_get_size(size_t offset, struct lttng_ctx *ctx) +{ + int i; + size_t orig_offset = offset; + + if (likely(!ctx)) + return 0; + for (i = 0; i < ctx->nr_fields; i++) + offset += ctx->fields[i].get_size(offset); + return offset - orig_offset; +} + +static inline +void ctx_record(struct lib_ring_buffer_ctx *bufctx, + struct ltt_channel *chan, + struct lttng_ctx *ctx) +{ + int i; + + if (likely(!ctx)) + return; + for (i = 0; i < ctx->nr_fields; i++) + ctx->fields[i].record(&ctx->fields[i], bufctx, chan); +} + /* * record_header_size - Calculate the header size and padding necessary. * @config: ring buffer instance configuration * @chan: channel * @offset: offset in the write buffer - * @data_size: size of the payload * @pre_header_padding: padding to add before the header (output) - * @rflags: reservation flags * @ctx: reservation context * * Returns the event header size (including padding). * - * Important note : - * The event header must be 32-bits. The total offset calculated here : - * - * Alignment of header struct on 32 bits (min arch size, header size) - * + sizeof(header struct) (32-bits) - * + (opt) u16 (ext. event id) - * + (opt) u16 (event_size) - * (if event_size == LTT_MAX_SMALL_SIZE, has ext. event size) - * + (opt) u32 (ext. event size) - * + (opt) u64 full TSC (aligned on min(64-bits, arch size)) - * * The payload must itself determine its own alignment from the biggest type it * contains. */ static __inline__ unsigned char record_header_size(const struct lib_ring_buffer_config *config, struct channel *chan, size_t offset, - size_t data_size, size_t *pre_header_padding, - unsigned int rflags, + size_t *pre_header_padding, struct lib_ring_buffer_ctx *ctx) { + struct ltt_channel *ltt_chan = channel_get_private(chan); + struct ltt_event *event = ctx->priv; size_t orig_offset = offset; size_t padding; - BUILD_BUG_ON(sizeof(struct event_header) != sizeof(u32)); - - padding = lib_ring_buffer_align(offset, - sizeof(struct event_header)); - offset += padding; - offset += sizeof(struct event_header); - - if (unlikely(rflags)) { - switch (rflags) { - case LTT_RFLAG_ID_SIZE_TSC: - offset += sizeof(u16) + sizeof(u16); - if (data_size >= LTT_MAX_SMALL_SIZE) - offset += sizeof(u32); - offset += lib_ring_buffer_align(offset, sizeof(u64)); - offset += sizeof(u64); - break; - case LTT_RFLAG_ID_SIZE: - offset += sizeof(u16) + sizeof(u16); - if (data_size >= LTT_MAX_SMALL_SIZE) - offset += sizeof(u32); - break; - case LTT_RFLAG_ID: - offset += sizeof(u16); - break; + switch (ltt_chan->header_type) { + case 1: /* compact */ + padding = lib_ring_buffer_align(offset, ltt_alignof(uint32_t)); + offset += padding; + if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) { + offset += sizeof(uint32_t); /* id and timestamp */ + } else { + /* Minimum space taken by 5-bit id */ + offset += sizeof(uint8_t); + /* Align extended struct on largest member */ + offset += lib_ring_buffer_align(offset, ltt_alignof(uint64_t)); + offset += sizeof(uint32_t); /* id */ + offset += lib_ring_buffer_align(offset, ltt_alignof(uint64_t)); + offset += sizeof(uint64_t); /* timestamp */ + } + break; + case 2: /* large */ + padding = lib_ring_buffer_align(offset, ltt_alignof(uint16_t)); + offset += padding; + offset += sizeof(uint16_t); + if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) { + offset += lib_ring_buffer_align(offset, ltt_alignof(uint32_t)); + offset += sizeof(uint32_t); /* timestamp */ + } else { + /* Align extended struct on largest member */ + offset += lib_ring_buffer_align(offset, ltt_alignof(uint64_t)); + offset += sizeof(uint32_t); /* id */ + offset += lib_ring_buffer_align(offset, ltt_alignof(uint64_t)); + offset += sizeof(uint64_t); /* timestamp */ } + break; + default: + WARN_ON_ONCE(1); } + offset += ctx_get_size(offset, event->ctx); + offset += ctx_get_size(offset, ltt_chan->ctx); *pre_header_padding = padding; return offset - orig_offset; @@ -92,7 +151,7 @@ unsigned char record_header_size(const struct lib_ring_buffer_config *config, extern void ltt_write_event_header_slow(const struct lib_ring_buffer_config *config, struct lib_ring_buffer_ctx *ctx, - u16 eID, u32 event_size); + uint32_t event_id); /* * ltt_write_event_header @@ -101,98 +160,107 @@ void ltt_write_event_header_slow(const struct lib_ring_buffer_config *config, * * @config: ring buffer instance configuration * @ctx: reservation context - * @eID : event ID - * @event_size : size of the event, excluding the event header. + * @event_id: event ID */ static __inline__ void ltt_write_event_header(const struct lib_ring_buffer_config *config, struct lib_ring_buffer_ctx *ctx, - u16 eID, u32 event_size) + uint32_t event_id) { - struct event_header header; + struct ltt_channel *ltt_chan = channel_get_private(ctx->chan); + struct ltt_event *event = ctx->priv; if (unlikely(ctx->rflags)) goto slow_path; - header.id_time = eID << LTT_TSC_BITS; - header.id_time |= (u32)ctx->tsc & LTT_TSC_MASK; - lib_ring_buffer_write(config, ctx, &header, sizeof(header)); + switch (ltt_chan->header_type) { + case 1: /* compact */ + { + uint32_t id_time = 0; -slow_path: - ltt_write_event_header_slow(config, ctx, eID, event_size); -} + bt_bitfield_write(&id_time, uint32_t, 0, 5, event_id); + bt_bitfield_write(&id_time, uint32_t, 5, 27, ctx->tsc); + lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time)); + break; + } + case 2: /* large */ + { + uint32_t timestamp = (uint32_t) ctx->tsc; + uint16_t id = event_id; + + lib_ring_buffer_write(config, ctx, &id, sizeof(id)); + lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint32_t)); + lib_ring_buffer_write(config, ctx, ×tamp, sizeof(timestamp)); + break; + } + default: + WARN_ON_ONCE(1); + } -/** - * ltt_write_trace_header - Write trace header - * @priv: Private data (struct trace) - * @header: Memory address where the information must be written to - */ -static __inline__ -void write_trace_header(const struct lib_ring_buffer_config *config, - struct packet_header *header) -{ - header->magic = CTF_MAGIC_NUMBER; -#if 0 - /* TODO: move start time to metadata */ - header->major_version = LTT_TRACER_VERSION_MAJOR; - header->minor_version = LTT_TRACER_VERSION_MINOR; - header->arch_size = sizeof(void *); - header->alignment = lib_ring_buffer_get_alignment(config); - header->start_time_sec = ltt_chan->session->start_time.tv_sec; - header->start_time_usec = ltt_chan->session->start_time.tv_usec; - header->start_freq = ltt_chan->session->start_freq; - header->freq_scale = ltt_chan->session->freq_scale; -#endif //0 + ctx_record(ctx, ltt_chan, event->ctx); + ctx_record(ctx, ltt_chan, ltt_chan->ctx); + + return; + +slow_path: + ltt_write_event_header_slow(config, ctx, event_id); } void ltt_write_event_header_slow(const struct lib_ring_buffer_config *config, - struct lib_ring_buffer_ctx *ctx, - u16 eID, u32 event_size) + struct lib_ring_buffer_ctx *ctx, + uint32_t event_id) { - struct event_header header; - u16 small_size; - - switch (ctx->rflags) { - case LTT_RFLAG_ID_SIZE_TSC: - header.id_time = 29 << LTT_TSC_BITS; - break; - case LTT_RFLAG_ID_SIZE: - header.id_time = 30 << LTT_TSC_BITS; + struct ltt_channel *ltt_chan = channel_get_private(ctx->chan); + struct ltt_event *event = ctx->priv; + + switch (ltt_chan->header_type) { + case 1: /* compact */ + if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) { + uint32_t id_time = 0; + + bt_bitfield_write(&id_time, uint32_t, 0, 5, event_id); + bt_bitfield_write(&id_time, uint32_t, 5, 27, ctx->tsc); + lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time)); + } else { + uint8_t id = 0; + uint64_t timestamp = ctx->tsc; + + bt_bitfield_write(&id, uint8_t, 0, 5, 31); + lib_ring_buffer_write(config, ctx, &id, sizeof(id)); + /* Align extended struct on largest member */ + lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint64_t)); + lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id)); + lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint64_t)); + lib_ring_buffer_write(config, ctx, ×tamp, sizeof(timestamp)); + } break; - case LTT_RFLAG_ID: - header.id_time = 31 << LTT_TSC_BITS; + case 2: /* large */ + { + if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) { + uint32_t timestamp = (uint32_t) ctx->tsc; + uint16_t id = event_id; + + lib_ring_buffer_write(config, ctx, &id, sizeof(id)); + lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint32_t)); + lib_ring_buffer_write(config, ctx, ×tamp, sizeof(timestamp)); + } else { + uint16_t id = 65535; + uint64_t timestamp = ctx->tsc; + + lib_ring_buffer_write(config, ctx, &id, sizeof(id)); + /* Align extended struct on largest member */ + lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint64_t)); + lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id)); + lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint64_t)); + lib_ring_buffer_write(config, ctx, ×tamp, sizeof(timestamp)); + } break; + } default: WARN_ON_ONCE(1); - header.id_time = 0; - } - - header.id_time |= (u32)ctx->tsc & LTT_TSC_MASK; - lib_ring_buffer_write(config, ctx, &header, sizeof(header)); - - switch (ctx->rflags) { - case LTT_RFLAG_ID_SIZE_TSC: - small_size = (u16)min_t(u32, event_size, LTT_MAX_SMALL_SIZE); - lib_ring_buffer_write(config, ctx, &eID, sizeof(u16)); - lib_ring_buffer_write(config, ctx, &small_size, sizeof(u16)); - if (small_size == LTT_MAX_SMALL_SIZE) - lib_ring_buffer_write(config, ctx, &event_size, - sizeof(u32)); - lib_ring_buffer_align_ctx(ctx, sizeof(u64)); - lib_ring_buffer_write(config, ctx, &ctx->tsc, sizeof(u64)); - break; - case LTT_RFLAG_ID_SIZE: - small_size = (u16)min_t(u32, event_size, LTT_MAX_SMALL_SIZE); - lib_ring_buffer_write(config, ctx, &eID, sizeof(u16)); - lib_ring_buffer_write(config, ctx, &small_size, sizeof(u16)); - if (small_size == LTT_MAX_SMALL_SIZE) - lib_ring_buffer_write(config, ctx, &event_size, - sizeof(u32)); - break; - case LTT_RFLAG_ID: - lib_ring_buffer_write(config, ctx, &eID, sizeof(u16)); - break; } + ctx_record(ctx, ltt_chan, event->ctx); + ctx_record(ctx, ltt_chan, ltt_chan->ctx); } static const struct lib_ring_buffer_config client_config; @@ -205,13 +273,11 @@ static u64 client_ring_buffer_clock_read(struct channel *chan) static size_t client_record_header_size(const struct lib_ring_buffer_config *config, struct channel *chan, size_t offset, - size_t data_size, size_t *pre_header_padding, - unsigned int rflags, struct lib_ring_buffer_ctx *ctx) { - return record_header_size(config, chan, offset, data_size, - pre_header_padding, rflags, ctx); + return record_header_size(config, chan, offset, + pre_header_padding, ctx); } /** @@ -223,7 +289,7 @@ size_t client_record_header_size(const struct lib_ring_buffer_config *config, */ static size_t client_packet_header_size(void) { - return offsetof(struct packet_header, header_end); + return offsetof(struct packet_header, ctx.header_end); } static void client_buffer_begin(struct lib_ring_buffer *buf, u64 tsc, @@ -234,10 +300,18 @@ static void client_buffer_begin(struct lib_ring_buffer *buf, u64 tsc, (struct packet_header *) lib_ring_buffer_offset_address(&buf->backend, subbuf_idx * chan->backend.subbuf_size); + struct ltt_channel *ltt_chan = channel_get_private(chan); + struct ltt_session *session = ltt_chan->session; - header->timestamp_begin = tsc; - header->content_size = 0xFFFFFFFF; /* for debugging */ - write_trace_header(&client_config, header); + header->magic = CTF_MAGIC_NUMBER; + memcpy(header->uuid, session->uuid.b, sizeof(session->uuid)); + header->stream_id = ltt_chan->id; + header->ctx.timestamp_begin = tsc; + header->ctx.timestamp_end = 0; + header->ctx.events_discarded = 0; + header->ctx.content_size = 0xFFFFFFFF; /* for debugging */ + header->ctx.packet_size = 0xFFFFFFFF; + header->ctx.cpu_id = buf->backend.cpu; } /* @@ -254,13 +328,13 @@ static void client_buffer_end(struct lib_ring_buffer *buf, u64 tsc, subbuf_idx * chan->backend.subbuf_size); unsigned long records_lost = 0; - header->content_size = data_size; - header->packet_size = PAGE_ALIGN(data_size); - header->timestamp_end = tsc; + header->ctx.timestamp_end = tsc; + header->ctx.content_size = data_size * CHAR_BIT; /* in bits */ + header->ctx.packet_size = PAGE_ALIGN(data_size) * CHAR_BIT; /* in bits */ records_lost += lib_ring_buffer_get_records_lost_full(&client_config, buf); records_lost += lib_ring_buffer_get_records_lost_wrap(&client_config, buf); records_lost += lib_ring_buffer_get_records_lost_big(&client_config, buf); - header->events_lost = records_lost; + header->ctx.events_discarded = records_lost; } static int client_buffer_create(struct lib_ring_buffer *buf, void *priv, @@ -287,7 +361,7 @@ static const struct lib_ring_buffer_config client_config = { .sync = RING_BUFFER_SYNC_PER_CPU, .mode = RING_BUFFER_MODE_TEMPLATE, .backend = RING_BUFFER_PAGE, - .output = RING_BUFFER_SPLICE, + .output = RING_BUFFER_OUTPUT_TEMPLATE, .oops = RING_BUFFER_OOPS_CONSISTENCY, .ipi = RING_BUFFER_IPI_BARRIER, .wakeup = RING_BUFFER_WAKEUP_BY_TIMER, @@ -295,12 +369,12 @@ static const struct lib_ring_buffer_config client_config = { static struct channel *_channel_create(const char *name, - struct ltt_session *session, void *buf_addr, + struct ltt_channel *ltt_chan, void *buf_addr, size_t subbuf_size, size_t num_subbuf, unsigned int switch_timer_interval, unsigned int read_timer_interval) { - return channel_create(&client_config, name, session, buf_addr, + return channel_create(&client_config, name, ltt_chan, buf_addr, subbuf_size, num_subbuf, switch_timer_interval, read_timer_interval); } @@ -329,11 +403,13 @@ static void ltt_buffer_read_close(struct lib_ring_buffer *buf) { lib_ring_buffer_release_read(buf); - } -int ltt_event_reserve(struct lib_ring_buffer_ctx *ctx) +static +int ltt_event_reserve(struct lib_ring_buffer_ctx *ctx, + uint32_t event_id) { + struct ltt_channel *ltt_chan = channel_get_private(ctx->chan); int ret, cpu; cpu = lib_ring_buffer_get_cpu(&client_config); @@ -341,28 +417,67 @@ int ltt_event_reserve(struct lib_ring_buffer_ctx *ctx) return -EPERM; ctx->cpu = cpu; + switch (ltt_chan->header_type) { + case 1: /* compact */ + if (event_id > 30) + ctx->rflags |= LTT_RFLAG_EXTENDED; + break; + case 2: /* large */ + if (event_id > 65534) + ctx->rflags |= LTT_RFLAG_EXTENDED; + break; + default: + WARN_ON_ONCE(1); + } + ret = lib_ring_buffer_reserve(&client_config, ctx); if (ret) goto put; - return ret; - + ltt_write_event_header(&client_config, ctx, event_id); + return 0; put: lib_ring_buffer_put_cpu(&client_config); return ret; } +static void ltt_event_commit(struct lib_ring_buffer_ctx *ctx) { lib_ring_buffer_commit(&client_config, ctx); lib_ring_buffer_put_cpu(&client_config); } +static void ltt_event_write(struct lib_ring_buffer_ctx *ctx, const void *src, size_t len) { lib_ring_buffer_write(&client_config, ctx, src, len); } +static +wait_queue_head_t *ltt_get_reader_wait_queue(struct channel *chan) +{ + return &chan->read_wait; +} + +static +wait_queue_head_t *ltt_get_hp_wait_queue(struct channel *chan) +{ + return &chan->hp_wait; +} + +static +int ltt_is_finalized(struct channel *chan) +{ + return lib_ring_buffer_channel_is_finalized(chan); +} + +static +int ltt_is_disabled(struct channel *chan) +{ + return lib_ring_buffer_channel_is_disabled(chan); +} + static struct ltt_transport ltt_relay_transport = { .name = "relay-" RING_BUFFER_MODE_TEMPLATE_STRING, .owner = THIS_MODULE, @@ -374,6 +489,11 @@ static struct ltt_transport ltt_relay_transport = { .event_reserve = ltt_event_reserve, .event_commit = ltt_event_commit, .event_write = ltt_event_write, + .packet_avail_size = NULL, /* Would be racy anyway */ + .get_reader_wait_queue = ltt_get_reader_wait_queue, + .get_hp_wait_queue = ltt_get_hp_wait_queue, + .is_finalized = ltt_is_finalized, + .is_disabled = ltt_is_disabled, }, }; @@ -384,7 +504,6 @@ static int __init ltt_ring_buffer_client_init(void) * vmalloc'd module pages when it is built as a module into LTTng. */ wrapper_vmalloc_sync_all(); - printk(KERN_INFO "LTT : ltt ring buffer client init\n"); ltt_transport_register(<t_relay_transport); return 0; } @@ -393,7 +512,6 @@ module_init(ltt_ring_buffer_client_init); static void __exit ltt_ring_buffer_client_exit(void) { - printk(KERN_INFO "LTT : ltt ring buffer client exit\n"); ltt_transport_unregister(<t_relay_transport); }