Privatize headers
[ust.git] / libust / type-serializer.c
1 /**
2 * ltt-type-serializer.c
3 *
4 * LTTng specialized type serializer.
5 *
6 * Copyright Mathieu Desnoyers, 2008.
7 *
8 * Dual LGPL v2.1/GPL v2 license.
9 */
10
11 /* This file contains functions for tracepoint custom probes support. */
12
13 #define _GNU_SOURCE
14 #define _LGPL_SOURCE
15 #include <urcu/rculist.h>
16 #include <ust/core.h>
17 #include <ust/clock.h>
18 #include <urcu-bp.h>
19 #include "tracer.h"
20 #include "type-serializer.h"
21
22 notrace
23 void _ltt_specialized_trace(const struct ust_marker *mdata, void *probe_data,
24 void *serialize_private, unsigned int data_size,
25 unsigned int largest_align)
26 {
27 int ret;
28 uint16_t eID;
29 size_t slot_size;
30 unsigned int chan_index;
31 struct ust_buffer *buf;
32 struct ust_channel *chan;
33 struct ust_trace *trace;
34 u64 tsc;
35 long buf_offset;
36 int cpu;
37 unsigned int rflags;
38
39 /*
40 * If we get here, it's probably because we have useful work to do.
41 */
42 if (unlikely(ltt_traces.num_active_traces == 0))
43 return;
44
45 rcu_read_lock();
46 cpu = ust_get_cpu();
47
48 /* Force volatile access. */
49 CMM_STORE_SHARED(ltt_nesting, CMM_LOAD_SHARED(ltt_nesting) + 1);
50
51 /*
52 * asm volatile and "memory" clobber prevent the compiler from moving
53 * instructions out of the ltt nesting count. This is required to ensure
54 * that probe side-effects which can cause recursion (e.g. unforeseen
55 * traps, divisions by 0, ...) are triggered within the incremented
56 * nesting count section.
57 */
58 cmm_barrier();
59 eID = mdata->event_id;
60 chan_index = mdata->channel_id;
61
62 /*
63 * Iterate on each trace, typically small number of active traces,
64 * list iteration with prefetch is usually slower.
65 */
66 cds_list_for_each_entry_rcu(trace, &ltt_traces.head, list) {
67 if (unlikely(!trace->active))
68 continue;
69 //ust// if (unlikely(!ltt_run_filter(trace, eID)))
70 //ust// continue;
71 #ifdef CONFIG_LTT_DEBUG_EVENT_SIZE
72 rflags = LTT_RFLAG_ID_SIZE;
73 #else
74 if (unlikely(eID >= LTT_FREE_EVENTS))
75 rflags = LTT_RFLAG_ID;
76 else
77 rflags = 0;
78 #endif
79 /*
80 * Skip channels added after trace creation.
81 */
82 if (unlikely(chan_index >= trace->nr_channels))
83 continue;
84 chan = &trace->channels[chan_index];
85 if (!chan->active)
86 continue;
87
88 /* If a new cpu was plugged since the trace was started, we did
89 * not add it to the trace, and therefore we write the event to
90 * cpu 0.
91 */
92 if(cpu >= chan->n_cpus) {
93 cpu = 0;
94 }
95
96 /* reserve space : header and data */
97 ret = ltt_reserve_slot(chan, trace, data_size, largest_align,
98 cpu, &buf, &slot_size, &buf_offset, &tsc,
99 &rflags);
100 if (unlikely(ret < 0))
101 continue; /* buffer full */
102
103 /* Out-of-order write : header and data */
104 buf_offset = ltt_write_event_header(chan, buf,
105 buf_offset, eID, data_size,
106 tsc, rflags);
107 if (data_size) {
108 buf_offset += ltt_align(buf_offset, largest_align);
109 ust_buffers_write(buf, buf_offset,
110 serialize_private, data_size);
111 buf_offset += data_size;
112 }
113 /* Out-of-order commit */
114 ltt_commit_slot(chan, buf, buf_offset, data_size, slot_size);
115 }
116 /*
117 * asm volatile and "memory" clobber prevent the compiler from moving
118 * instructions out of the ltt nesting count. This is required to ensure
119 * that probe side-effects which can cause recursion (e.g. unforeseen
120 * traps, divisions by 0, ...) are triggered within the incremented
121 * nesting count section.
122 */
123 cmm_barrier();
124 CMM_STORE_SHARED(ltt_nesting, CMM_LOAD_SHARED(ltt_nesting) - 1);
125 rcu_read_unlock();
126 }
This page took 0.031162 seconds and 4 git commands to generate.