X-Git-Url: https://git.lttng.org/?a=blobdiff_plain;ds=sidebyside;f=ltt-ring-buffer-client.h;h=1c9308e04f7151e9d38db7aeb83609bdca8c51c3;hb=bef96e480c8b83dc90b22803b865ad2a40bdcb67;hp=0177d48b6cfab82926dce3ccb0330879eea6647e;hpb=05d32c64199816da32cd95929a8ff0d0d38d7f60;p=lttng-modules.git diff --git a/ltt-ring-buffer-client.h b/ltt-ring-buffer-client.h index 0177d48b..1c9308e0 100644 --- a/ltt-ring-buffer-client.h +++ b/ltt-ring-buffer-client.h @@ -55,14 +55,38 @@ static inline notrace u64 lib_ring_buffer_clock_read(struct channel *chan) return trace_clock_read64(); } +static inline +size_t ctx_get_size(size_t offset, struct lttng_ctx *ctx) +{ + int i; + size_t orig_offset = offset; + + if (likely(!ctx)) + return 0; + for (i = 0; i < ctx->nr_fields; i++) + offset += ctx->fields[i].get_size(offset); + return offset - orig_offset; +} + +static inline +void ctx_record(struct lib_ring_buffer_ctx *bufctx, + struct ltt_channel *chan, + struct lttng_ctx *ctx) +{ + int i; + + if (likely(!ctx)) + return; + for (i = 0; i < ctx->nr_fields; i++) + ctx->fields[i].record(&ctx->fields[i], bufctx, chan); +} + /* * record_header_size - Calculate the header size and padding necessary. * @config: ring buffer instance configuration * @chan: channel * @offset: offset in the write buffer - * @data_size: size of the payload * @pre_header_padding: padding to add before the header (output) - * @rflags: reservation flags * @ctx: reservation context * * Returns the event header size (including padding). @@ -73,11 +97,11 @@ static inline notrace u64 lib_ring_buffer_clock_read(struct channel *chan) static __inline__ unsigned char record_header_size(const struct lib_ring_buffer_config *config, struct channel *chan, size_t offset, - size_t data_size, size_t *pre_header_padding, - unsigned int rflags, + size_t *pre_header_padding, struct lib_ring_buffer_ctx *ctx) { struct ltt_channel *ltt_chan = channel_get_private(chan); + struct ltt_event *event = ctx->priv; size_t orig_offset = offset; size_t padding; @@ -85,7 +109,7 @@ unsigned char record_header_size(const struct lib_ring_buffer_config *config, case 1: /* compact */ padding = lib_ring_buffer_align(offset, ltt_alignof(uint32_t)); offset += padding; - if (!(rflags & RING_BUFFER_RFLAG_FULL_TSC)) { + if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) { offset += sizeof(uint32_t); /* id and timestamp */ } else { /* Minimum space taken by 5-bit id */ @@ -101,7 +125,7 @@ unsigned char record_header_size(const struct lib_ring_buffer_config *config, padding = lib_ring_buffer_align(offset, ltt_alignof(uint16_t)); offset += padding; offset += sizeof(uint16_t); - if (!(rflags & RING_BUFFER_RFLAG_FULL_TSC)) { + if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) { offset += lib_ring_buffer_align(offset, ltt_alignof(uint32_t)); offset += sizeof(uint32_t); /* timestamp */ } else { @@ -110,12 +134,13 @@ unsigned char record_header_size(const struct lib_ring_buffer_config *config, offset += sizeof(uint32_t); /* id */ offset += lib_ring_buffer_align(offset, ltt_alignof(uint64_t)); offset += sizeof(uint64_t); /* timestamp */ - } break; default: - WARN_ON(1); + WARN_ON_ONCE(1); } + offset += ctx_get_size(offset, event->ctx); + offset += ctx_get_size(offset, ltt_chan->ctx); *pre_header_padding = padding; return offset - orig_offset; @@ -126,7 +151,7 @@ unsigned char record_header_size(const struct lib_ring_buffer_config *config, extern void ltt_write_event_header_slow(const struct lib_ring_buffer_config *config, struct lib_ring_buffer_ctx *ctx, - u16 eID, u32 event_size); + uint32_t event_id); /* * ltt_write_event_header @@ -135,15 +160,15 @@ void ltt_write_event_header_slow(const struct lib_ring_buffer_config *config, * * @config: ring buffer instance configuration * @ctx: reservation context - * @eID : event ID - * @event_size : size of the event, excluding the event header. + * @event_id: event ID */ static __inline__ void ltt_write_event_header(const struct lib_ring_buffer_config *config, struct lib_ring_buffer_ctx *ctx, - u16 eID, u32 event_size) + uint32_t event_id) { struct ltt_channel *ltt_chan = channel_get_private(ctx->chan); + struct ltt_event *event = ctx->priv; if (unlikely(ctx->rflags)) goto slow_path; @@ -153,50 +178,51 @@ void ltt_write_event_header(const struct lib_ring_buffer_config *config, { uint32_t id_time = 0; - bt_bitfield_write(&id_time, uint32_t, 0, 5, eID); + bt_bitfield_write(&id_time, uint32_t, 0, 5, event_id); bt_bitfield_write(&id_time, uint32_t, 5, 27, ctx->tsc); lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time)); break; } case 2: /* large */ { - uint16_t event_id = eID; uint32_t timestamp = (uint32_t) ctx->tsc; + uint16_t id = event_id; - lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id)); + lib_ring_buffer_write(config, ctx, &id, sizeof(id)); lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint32_t)); lib_ring_buffer_write(config, ctx, ×tamp, sizeof(timestamp)); break; } default: - WARN_ON(1); + WARN_ON_ONCE(1); } + + ctx_record(ctx, ltt_chan, event->ctx); + ctx_record(ctx, ltt_chan, ltt_chan->ctx); + return; slow_path: - ltt_write_event_header_slow(config, ctx, eID, event_size); + ltt_write_event_header_slow(config, ctx, event_id); } -/* - * TODO: For now, we only support 65536 event ids per channel. - */ void ltt_write_event_header_slow(const struct lib_ring_buffer_config *config, - struct lib_ring_buffer_ctx *ctx, - u16 eID, u32 event_size) + struct lib_ring_buffer_ctx *ctx, + uint32_t event_id) { struct ltt_channel *ltt_chan = channel_get_private(ctx->chan); + struct ltt_event *event = ctx->priv; switch (ltt_chan->header_type) { case 1: /* compact */ - if (!(ctx->rflags & RING_BUFFER_RFLAG_FULL_TSC)) { + if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) { uint32_t id_time = 0; - bt_bitfield_write(&id_time, uint32_t, 0, 5, eID); + bt_bitfield_write(&id_time, uint32_t, 0, 5, event_id); bt_bitfield_write(&id_time, uint32_t, 5, 27, ctx->tsc); lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time)); } else { uint8_t id = 0; - uint32_t event_id = (uint32_t) eID; uint64_t timestamp = ctx->tsc; bt_bitfield_write(&id, uint8_t, 0, 5, 31); @@ -210,30 +236,31 @@ void ltt_write_event_header_slow(const struct lib_ring_buffer_config *config, break; case 2: /* large */ { - if (!(ctx->rflags & RING_BUFFER_RFLAG_FULL_TSC)) { - uint16_t event_id = eID; + if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) { uint32_t timestamp = (uint32_t) ctx->tsc; + uint16_t id = event_id; - lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id)); + lib_ring_buffer_write(config, ctx, &id, sizeof(id)); lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint32_t)); lib_ring_buffer_write(config, ctx, ×tamp, sizeof(timestamp)); } else { - uint16_t event_id = 65535; - uint32_t event_id_ext = (uint32_t) eID; + uint16_t id = 65535; uint64_t timestamp = ctx->tsc; - lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id)); + lib_ring_buffer_write(config, ctx, &id, sizeof(id)); /* Align extended struct on largest member */ lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint64_t)); - lib_ring_buffer_write(config, ctx, &event_id_ext, sizeof(event_id_ext)); + lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id)); lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint64_t)); lib_ring_buffer_write(config, ctx, ×tamp, sizeof(timestamp)); } break; } default: - WARN_ON(1); + WARN_ON_ONCE(1); } + ctx_record(ctx, ltt_chan, event->ctx); + ctx_record(ctx, ltt_chan, ltt_chan->ctx); } static const struct lib_ring_buffer_config client_config; @@ -246,13 +273,11 @@ static u64 client_ring_buffer_clock_read(struct channel *chan) static size_t client_record_header_size(const struct lib_ring_buffer_config *config, struct channel *chan, size_t offset, - size_t data_size, size_t *pre_header_padding, - unsigned int rflags, struct lib_ring_buffer_ctx *ctx) { - return record_header_size(config, chan, offset, data_size, - pre_header_padding, rflags, ctx); + return record_header_size(config, chan, offset, + pre_header_padding, ctx); } /** @@ -378,12 +403,13 @@ static void ltt_buffer_read_close(struct lib_ring_buffer *buf) { lib_ring_buffer_release_read(buf); - } static -int ltt_event_reserve(struct lib_ring_buffer_ctx *ctx) +int ltt_event_reserve(struct lib_ring_buffer_ctx *ctx, + uint32_t event_id) { + struct ltt_channel *ltt_chan = channel_get_private(ctx->chan); int ret, cpu; cpu = lib_ring_buffer_get_cpu(&client_config); @@ -391,11 +417,24 @@ int ltt_event_reserve(struct lib_ring_buffer_ctx *ctx) return -EPERM; ctx->cpu = cpu; + switch (ltt_chan->header_type) { + case 1: /* compact */ + if (event_id > 30) + ctx->rflags |= LTT_RFLAG_EXTENDED; + break; + case 2: /* large */ + if (event_id > 65534) + ctx->rflags |= LTT_RFLAG_EXTENDED; + break; + default: + WARN_ON_ONCE(1); + } + ret = lib_ring_buffer_reserve(&client_config, ctx); if (ret) goto put; - return ret; - + ltt_write_event_header(&client_config, ctx, event_id); + return 0; put: lib_ring_buffer_put_cpu(&client_config); return ret;