tracectl cleanup v3
[ust.git] / libust / type-serializer.c
CommitLineData
12e81b07
PMF
1/**
2 * ltt-type-serializer.c
3 *
4 * LTTng specialized type serializer.
5 *
6 * Copyright Mathieu Desnoyers, 2008.
7 *
8 * Dual LGPL v2.1/GPL v2 license.
9 */
93e5ce29
PMF
10
11/* This file contains functions for tracepoint custom probes support. */
12
12e81b07
PMF
13#include <urcu/rculist.h>
14#include <ust/type-serializer.h>
15#include <ust/core.h>
518d7abb 16#include <ust/clock.h>
12e81b07
PMF
17#include "tracer.h"
18
19notrace
20void _ltt_specialized_trace(const struct marker *mdata, void *probe_data,
21 void *serialize_private, unsigned int data_size,
22 unsigned int largest_align)
23{
24 int ret;
25 uint16_t eID;
26 size_t slot_size;
27 unsigned int chan_index;
28 struct ust_buffer *buf;
29 struct ust_channel *chan;
30 struct ust_trace *trace;
31 u64 tsc;
32 long buf_offset;
33 int cpu;
34 unsigned int rflags;
35
36 /*
37 * If we get here, it's probably because we have useful work to do.
38 */
39 if (unlikely(ltt_traces.num_active_traces == 0))
40 return;
41
42 rcu_read_lock();
43 cpu = ust_get_cpu();
44
45 /* Force volatile access. */
46 STORE_SHARED(ltt_nesting, LOAD_SHARED(ltt_nesting) + 1);
47
48 /*
49 * asm volatile and "memory" clobber prevent the compiler from moving
50 * instructions out of the ltt nesting count. This is required to ensure
51 * that probe side-effects which can cause recursion (e.g. unforeseen
52 * traps, divisions by 0, ...) are triggered within the incremented
53 * nesting count section.
54 */
55 barrier();
56 eID = mdata->event_id;
57 chan_index = mdata->channel_id;
58
59 /*
60 * Iterate on each trace, typically small number of active traces,
61 * list iteration with prefetch is usually slower.
62 */
63 list_for_each_entry_rcu(trace, &ltt_traces.head, list) {
64 if (unlikely(!trace->active))
65 continue;
66//ust// if (unlikely(!ltt_run_filter(trace, eID)))
67//ust// continue;
68#ifdef CONFIG_LTT_DEBUG_EVENT_SIZE
69 rflags = LTT_RFLAG_ID_SIZE;
70#else
71 if (unlikely(eID >= LTT_FREE_EVENTS))
72 rflags = LTT_RFLAG_ID;
73 else
74 rflags = 0;
75#endif
76 /*
77 * Skip channels added after trace creation.
78 */
79 if (unlikely(chan_index >= trace->nr_channels))
80 continue;
81 chan = &trace->channels[chan_index];
82 if (!chan->active)
83 continue;
84
85 /* reserve space : header and data */
86 ret = ltt_reserve_slot(chan, trace, data_size, largest_align,
87 cpu, &buf, &slot_size, &buf_offset, &tsc,
88 &rflags);
89 if (unlikely(ret < 0))
90 continue; /* buffer full */
91
92 /* Out-of-order write : header and data */
93 buf_offset = ltt_write_event_header(chan, buf,
94 buf_offset, eID, data_size,
95 tsc, rflags);
96 if (data_size) {
97 buf_offset += ltt_align(buf_offset, largest_align);
98 ust_buffers_write(buf, buf_offset,
99 serialize_private, data_size);
100 buf_offset += data_size;
101 }
102 /* Out-of-order commit */
103 ltt_commit_slot(chan, buf, buf_offset, data_size, slot_size);
104 }
105 /*
106 * asm volatile and "memory" clobber prevent the compiler from moving
107 * instructions out of the ltt nesting count. This is required to ensure
108 * that probe side-effects which can cause recursion (e.g. unforeseen
109 * traps, divisions by 0, ...) are triggered within the incremented
110 * nesting count section.
111 */
112 barrier();
113 STORE_SHARED(ltt_nesting, LOAD_SHARED(ltt_nesting) - 1);
114 rcu_read_unlock();
115}
This page took 0.027559 seconds and 4 git commands to generate.