#include <linux/cache.h>
#include <linux/timex.h>
#include <linux/wait.h>
-#include <linux/trace-clock.h>
#include <asm/atomic.h>
#include <asm/local.h>
+#include "wrapper/trace-clock.h"
#include "ltt-tracer-core.h"
#include "ltt-events.h"
+#define LTTNG_VERSION 0
+#define LTTNG_PATCHLEVEL 9
+#define LTTNG_SUBLEVEL 1
+
+#ifndef CHAR_BIT
+#define CHAR_BIT 8
+#endif
+
/* Number of bytes to log with a read/write event */
#define LTT_LOG_RW_SIZE 32L
* concerns.
*/
-#define LTT_RESERVED_EVENTS 3
-#define LTT_EVENT_BITS 5
-#define LTT_FREE_EVENTS ((1 << LTT_EVENT_BITS) - LTT_RESERVED_EVENTS)
-#define LTT_TSC_BITS 27
-#define LTT_TSC_MASK ((1 << LTT_TSC_BITS) - 1)
-
-struct event_header {
- u32 id_time; /* 5 bits event id (MSB); 27 bits time (LSB) */
-};
-
-/* Reservation flags */
-#define LTT_RFLAG_ID (1 << 0)
-#define LTT_RFLAG_ID_SIZE (1 << 1)
-#define LTT_RFLAG_ID_SIZE_TSC (1 << 2)
-
#define LTT_MAX_SMALL_SIZE 0xFFFFU
-/*
- * We use asm/timex.h : cpu_khz/HZ variable in here : we might have to deal
- * specifically with CPU frequency scaling someday, so using an interpolation
- * between the start and end of buffer values is not flexible enough. Using an
- * immediate frequency value permits to calculate directly the times for parts
- * of a buffer that would be before a frequency change.
- *
- * Keep the natural field alignment for _each field_ within this structure if
- * you ever add/remove a field from this header. Packed attribute is not used
- * because gcc generates poor code on at least powerpc and mips. Don't ever
- * let gcc add padding between the structure elements.
- */
-struct packet_header {
- uint32_t magic; /*
- * Trace magic number.
- * contains endianness information.
- */
- uint8_t trace_uuid[16];
- uint32_t stream_id;
- uint64_t timestamp_begin; /* Cycle count at subbuffer start */
- uint64_t timestamp_end; /* Cycle count at subbuffer end */
- uint32_t content_size; /* Size of data in subbuffer */
- uint32_t packet_size; /* Subbuffer size (include padding) */
- uint32_t events_lost; /*
- * Events lost in this subbuffer since
- * the beginning of the trace.
- * (may overflow)
- */
- /* TODO: move to metadata */
-#if 0
- uint8_t major_version;
- uint8_t minor_version;
- uint8_t arch_size; /* Architecture pointer size */
- uint8_t alignment; /* LTT data alignment */
- uint64_t start_time_sec; /* NTP-corrected start time */
- uint64_t start_time_usec;
- uint64_t start_freq; /*
- * Frequency at trace start,
- * used all along the trace.
- */
- uint32_t freq_scale; /* Frequency scaling (divisor) */
-#endif //0
- uint8_t header_end[0]; /* End of header */
-};
-
-static inline notrace u64 lib_ring_buffer_clock_read(struct channel *chan)
-{
- return trace_clock_read64();
-}
-
-/*
- * record_header_size - Calculate the header size and padding necessary.
- * @config: ring buffer instance configuration
- * @chan: channel
- * @offset: offset in the write buffer
- * @data_size: size of the payload
- * @pre_header_padding: padding to add before the header (output)
- * @rflags: reservation flags
- * @ctx: reservation context
- *
- * Returns the event header size (including padding).
- *
- * Important note :
- * The event header must be 32-bits. The total offset calculated here :
- *
- * Alignment of header struct on 32 bits (min arch size, header size)
- * + sizeof(header struct) (32-bits)
- * + (opt) u16 (ext. event id)
- * + (opt) u16 (event_size)
- * (if event_size == LTT_MAX_SMALL_SIZE, has ext. event size)
- * + (opt) u32 (ext. event size)
- * + (opt) u64 full TSC (aligned on min(64-bits, arch size))
- *
- * The payload must itself determine its own alignment from the biggest type it
- * contains.
- */
-static __inline__
-unsigned char record_header_size(const struct lib_ring_buffer_config *config,
- struct channel *chan, size_t offset,
- size_t data_size, size_t *pre_header_padding,
- unsigned int rflags,
- struct lib_ring_buffer_ctx *ctx)
-{
- size_t orig_offset = offset;
- size_t padding;
-
- BUILD_BUG_ON(sizeof(struct event_header) != sizeof(u32));
-
- padding = lib_ring_buffer_align(offset,
- sizeof(struct event_header));
- offset += padding;
- offset += sizeof(struct event_header);
-
- if (unlikely(rflags)) {
- switch (rflags) {
- case LTT_RFLAG_ID_SIZE_TSC:
- offset += sizeof(u16) + sizeof(u16);
- if (data_size >= LTT_MAX_SMALL_SIZE)
- offset += sizeof(u32);
- offset += lib_ring_buffer_align(offset, sizeof(u64));
- offset += sizeof(u64);
- break;
- case LTT_RFLAG_ID_SIZE:
- offset += sizeof(u16) + sizeof(u16);
- if (data_size >= LTT_MAX_SMALL_SIZE)
- offset += sizeof(u32);
- break;
- case LTT_RFLAG_ID:
- offset += sizeof(u16);
- break;
- }
- }
-
- *pre_header_padding = padding;
- return offset - orig_offset;
-}
-
-#include <linux/ringbuffer/api.h>
-
-extern
-void ltt_write_event_header_slow(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer_ctx *ctx,
- u16 eID, u32 event_size);
-
-/*
- * ltt_write_event_header
- *
- * Writes the event header to the offset (already aligned on 32-bits).
- *
- * @config: ring buffer instance configuration
- * @ctx: reservation context
- * @eID : event ID
- * @event_size : size of the event, excluding the event header.
- */
-static __inline__
-void ltt_write_event_header(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer_ctx *ctx,
- u16 eID, u32 event_size)
-{
- struct event_header header;
-
- if (unlikely(ctx->rflags))
- goto slow_path;
-
- header.id_time = eID << LTT_TSC_BITS;
- header.id_time |= (u32)ctx->tsc & LTT_TSC_MASK;
- lib_ring_buffer_write(config, ctx, &header, sizeof(header));
-
-slow_path:
- ltt_write_event_header_slow(config, ctx, eID, event_size);
-}
+#ifdef RING_BUFFER_ALIGN
+#define ltt_alignof(type) __alignof__(type)
+#else
+#define ltt_alignof(type) 1
+#endif
/* Tracer properties */
#define CTF_MAGIC_NUMBER 0xC1FC1FC1
-#define LTT_TRACER_VERSION_MAJOR 3
-#define LTT_TRACER_VERSION_MINOR 0
+#define TSDL_MAGIC_NUMBER 0x75D11D57
+#define CTF_VERSION_MAJOR 0
+#define CTF_VERSION_MINOR 1
-/**
- * ltt_write_trace_header - Write trace header
- * @priv: Private data (struct trace)
- * @header: Memory address where the information must be written to
+/*
+ * Number of milliseconds to retry before failing metadata writes on buffer full
+ * condition. (10 seconds)
*/
-static __inline__
-void write_trace_header(const struct lib_ring_buffer_config *config,
- struct packet_header *header)
-{
- header->magic = CTF_MAGIC_NUMBER;
-#if 0
- /* TODO: move start time to metadata */
- header->major_version = LTT_TRACER_VERSION_MAJOR;
- header->minor_version = LTT_TRACER_VERSION_MINOR;
- header->arch_size = sizeof(void *);
- header->alignment = lib_ring_buffer_get_alignment(config);
- header->start_time_sec = ltt_chan->session->start_time.tv_sec;
- header->start_time_usec = ltt_chan->session->start_time.tv_usec;
- header->start_freq = ltt_chan->session->start_freq;
- header->freq_scale = ltt_chan->session->freq_scale;
-#endif //0
-}
+#define LTTNG_METADATA_TIMEOUT_MSEC 10000
/*
* Size reserved for high priority events (interrupts, NMI, BH) at the end of a
*/
#define LTT_RESERVE_CRITICAL 4096
+#define LTT_RFLAG_EXTENDED RING_BUFFER_RFLAG_END
+#define LTT_RFLAG_END (LTT_RFLAG_EXTENDED << 1)
+
/* Register and unregister function pointers */
enum ltt_module_function {