ust: add kernel ltt-channel.[ch], removing the ltt- part
[ust.git] / libtracing / channels.c
CommitLineData
99054cee
PMF
1/*
2 * ltt/ltt-channels.c
3 *
4 * (C) Copyright 2008 - Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
5 *
6 * LTTng channel management.
7 *
8 * Author:
9 * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
10 */
11
12#include <linux/module.h>
13#include <linux/ltt-channels.h>
14#include <linux/mutex.h>
15#include <linux/vmalloc.h>
16
17/*
18 * ltt_channel_mutex may be nested inside the LTT trace mutex.
19 * ltt_channel_mutex mutex may be nested inside markers mutex.
20 */
21static DEFINE_MUTEX(ltt_channel_mutex);
22static LIST_HEAD(ltt_channels);
23/*
24 * Index of next channel in array. Makes sure that as long as a trace channel is
25 * allocated, no array index will be re-used when a channel is freed and then
26 * another channel is allocated. This index is cleared and the array indexeds
27 * get reassigned when the index_kref goes back to 0, which indicates that no
28 * more trace channels are allocated.
29 */
30static unsigned int free_index;
31static struct kref index_kref; /* Keeps track of allocated trace channels */
32
33static struct ltt_channel_setting *lookup_channel(const char *name)
34{
35 struct ltt_channel_setting *iter;
36
37 list_for_each_entry(iter, &ltt_channels, list)
38 if (strcmp(name, iter->name) == 0)
39 return iter;
40 return NULL;
41}
42
43/*
44 * Must be called when channel refcount falls to 0 _and_ also when the last
45 * trace is freed. This function is responsible for compacting the channel and
46 * event IDs when no users are active.
47 *
48 * Called with lock_markers() and channels mutex held.
49 */
50static void release_channel_setting(struct kref *kref)
51{
52 struct ltt_channel_setting *setting = container_of(kref,
53 struct ltt_channel_setting, kref);
54 struct ltt_channel_setting *iter;
55
56 if (atomic_read(&index_kref.refcount) == 0
57 && atomic_read(&setting->kref.refcount) == 0) {
58 list_del(&setting->list);
59 kfree(setting);
60
61 free_index = 0;
62 list_for_each_entry(iter, &ltt_channels, list) {
63 iter->index = free_index++;
64 iter->free_event_id = 0;
65 }
66 markers_compact_event_ids();
67 }
68}
69
70/*
71 * Perform channel index compaction when the last trace channel is freed.
72 *
73 * Called with lock_markers() and channels mutex held.
74 */
75static void release_trace_channel(struct kref *kref)
76{
77 struct ltt_channel_setting *iter, *n;
78
79 list_for_each_entry_safe(iter, n, &ltt_channels, list)
80 release_channel_setting(&iter->kref);
81}
82
83/**
84 * ltt_channels_register - Register a trace channel.
85 * @name: channel name
86 *
87 * Uses refcounting.
88 */
89int ltt_channels_register(const char *name)
90{
91 struct ltt_channel_setting *setting;
92 int ret = 0;
93
94 mutex_lock(&ltt_channel_mutex);
95 setting = lookup_channel(name);
96 if (setting) {
97 if (atomic_read(&setting->kref.refcount) == 0)
98 goto init_kref;
99 else {
100 kref_get(&setting->kref);
101 goto end;
102 }
103 }
104 setting = kzalloc(sizeof(*setting), GFP_KERNEL);
105 if (!setting) {
106 ret = -ENOMEM;
107 goto end;
108 }
109 list_add(&setting->list, &ltt_channels);
110 strncpy(setting->name, name, PATH_MAX-1);
111 setting->index = free_index++;
112init_kref:
113 kref_init(&setting->kref);
114end:
115 mutex_unlock(&ltt_channel_mutex);
116 return ret;
117}
118EXPORT_SYMBOL_GPL(ltt_channels_register);
119
120/**
121 * ltt_channels_unregister - Unregister a trace channel.
122 * @name: channel name
123 *
124 * Must be called with markers mutex held.
125 */
126int ltt_channels_unregister(const char *name)
127{
128 struct ltt_channel_setting *setting;
129 int ret = 0;
130
131 mutex_lock(&ltt_channel_mutex);
132 setting = lookup_channel(name);
133 if (!setting || atomic_read(&setting->kref.refcount) == 0) {
134 ret = -ENOENT;
135 goto end;
136 }
137 kref_put(&setting->kref, release_channel_setting);
138end:
139 mutex_unlock(&ltt_channel_mutex);
140 return ret;
141}
142EXPORT_SYMBOL_GPL(ltt_channels_unregister);
143
144/**
145 * ltt_channels_set_default - Set channel default behavior.
146 * @name: default channel name
147 * @subbuf_size: size of the subbuffers
148 * @subbuf_cnt: number of subbuffers
149 */
150int ltt_channels_set_default(const char *name,
151 unsigned int subbuf_size,
152 unsigned int subbuf_cnt)
153{
154 struct ltt_channel_setting *setting;
155 int ret = 0;
156
157 mutex_lock(&ltt_channel_mutex);
158 setting = lookup_channel(name);
159 if (!setting || atomic_read(&setting->kref.refcount) == 0) {
160 ret = -ENOENT;
161 goto end;
162 }
163 setting->subbuf_size = subbuf_size;
164 setting->subbuf_cnt = subbuf_cnt;
165end:
166 mutex_unlock(&ltt_channel_mutex);
167 return ret;
168}
169EXPORT_SYMBOL_GPL(ltt_channels_set_default);
170
171/**
172 * ltt_channels_get_name_from_index - get channel name from channel index
173 * @index: channel index
174 *
175 * Allows to lookup the channel name given its index. Done to keep the name
176 * information outside of each trace channel instance.
177 */
178const char *ltt_channels_get_name_from_index(unsigned int index)
179{
180 struct ltt_channel_setting *iter;
181
182 list_for_each_entry(iter, &ltt_channels, list)
183 if (iter->index == index && atomic_read(&iter->kref.refcount))
184 return iter->name;
185 return NULL;
186}
187EXPORT_SYMBOL_GPL(ltt_channels_get_name_from_index);
188
189static struct ltt_channel_setting *
190ltt_channels_get_setting_from_name(const char *name)
191{
192 struct ltt_channel_setting *iter;
193
194 list_for_each_entry(iter, &ltt_channels, list)
195 if (!strcmp(iter->name, name)
196 && atomic_read(&iter->kref.refcount))
197 return iter;
198 return NULL;
199}
200
201/**
202 * ltt_channels_get_index_from_name - get channel index from channel name
203 * @name: channel name
204 *
205 * Allows to lookup the channel index given its name. Done to keep the name
206 * information outside of each trace channel instance.
207 * Returns -1 if not found.
208 */
209int ltt_channels_get_index_from_name(const char *name)
210{
211 struct ltt_channel_setting *setting;
212
213 setting = ltt_channels_get_setting_from_name(name);
214 if (setting)
215 return setting->index;
216 else
217 return -1;
218}
219EXPORT_SYMBOL_GPL(ltt_channels_get_index_from_name);
220
221/**
222 * ltt_channels_trace_alloc - Allocate channel structures for a trace
223 * @subbuf_size: subbuffer size. 0 uses default.
224 * @subbuf_cnt: number of subbuffers per per-cpu buffers. 0 uses default.
225 * @flags: Default channel flags
226 *
227 * Use the current channel list to allocate the channels for a trace.
228 * Called with trace lock held. Does not perform the trace buffer allocation,
229 * because we must let the user overwrite specific channel sizes.
230 */
231struct ltt_channel_struct *ltt_channels_trace_alloc(unsigned int *nr_channels,
232 int overwrite,
233 int active)
234{
235 struct ltt_channel_struct *channel = NULL;
236 struct ltt_channel_setting *iter;
237
238 mutex_lock(&ltt_channel_mutex);
239 if (!free_index)
240 goto end;
241 if (!atomic_read(&index_kref.refcount))
242 kref_init(&index_kref);
243 else
244 kref_get(&index_kref);
245 *nr_channels = free_index;
246 channel = kzalloc(sizeof(struct ltt_channel_struct) * free_index,
247 GFP_KERNEL);
248 if (!channel)
249 goto end;
250 list_for_each_entry(iter, &ltt_channels, list) {
251 if (!atomic_read(&iter->kref.refcount))
252 continue;
253 channel[iter->index].subbuf_size = iter->subbuf_size;
254 channel[iter->index].subbuf_cnt = iter->subbuf_cnt;
255 channel[iter->index].overwrite = overwrite;
256 channel[iter->index].active = active;
257 channel[iter->index].channel_name = iter->name;
258 }
259end:
260 mutex_unlock(&ltt_channel_mutex);
261 return channel;
262}
263EXPORT_SYMBOL_GPL(ltt_channels_trace_alloc);
264
265/**
266 * ltt_channels_trace_free - Free one trace's channels
267 * @channels: channels to free
268 *
269 * Called with trace lock held. The actual channel buffers must be freed before
270 * this function is called.
271 */
272void ltt_channels_trace_free(struct ltt_channel_struct *channels)
273{
274 lock_markers();
275 mutex_lock(&ltt_channel_mutex);
276 kfree(channels);
277 kref_put(&index_kref, release_trace_channel);
278 mutex_unlock(&ltt_channel_mutex);
279 unlock_markers();
280}
281EXPORT_SYMBOL_GPL(ltt_channels_trace_free);
282
283/**
284 * _ltt_channels_get_event_id - get next event ID for a marker
285 * @channel: channel name
286 * @name: event name
287 *
288 * Returns a unique event ID (for this channel) or < 0 on error.
289 * Must be called with channels mutex held.
290 */
291int _ltt_channels_get_event_id(const char *channel, const char *name)
292{
293 struct ltt_channel_setting *setting;
294 int ret;
295
296 setting = ltt_channels_get_setting_from_name(channel);
297 if (!setting) {
298 ret = -ENOENT;
299 goto end;
300 }
301 if (strcmp(channel, "metadata") == 0) {
302 if (strcmp(name, "core_marker_id") == 0)
303 ret = 0;
304 else if (strcmp(name, "core_marker_format") == 0)
305 ret = 1;
306 else
307 ret = -ENOENT;
308 goto end;
309 }
310 if (setting->free_event_id == EVENTS_PER_CHANNEL - 1) {
311 ret = -ENOSPC;
312 goto end;
313 }
314 ret = setting->free_event_id++;
315end:
316 return ret;
317}
318
319/**
320 * ltt_channels_get_event_id - get next event ID for a marker
321 * @channel: channel name
322 * @name: event name
323 *
324 * Returns a unique event ID (for this channel) or < 0 on error.
325 */
326int ltt_channels_get_event_id(const char *channel, const char *name)
327{
328 int ret;
329
330 mutex_lock(&ltt_channel_mutex);
331 ret = _ltt_channels_get_event_id(channel, name);
332 mutex_unlock(&ltt_channel_mutex);
333 return ret;
334}
335
336MODULE_LICENSE("GPL");
337MODULE_AUTHOR("Mathieu Desnoyers");
338MODULE_DESCRIPTION("Linux Trace Toolkit Next Generation Channel Management");
This page took 0.033599 seconds and 4 git commands to generate.