libust: change some licences to LGPL
[ust.git] / libust / channels.c
1 /*
2 * ltt/ltt-channels.c
3 *
4 * (C) Copyright 2008 - Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
5 *
6 * LTTng channel management.
7 *
8 * Author:
9 * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
10 *
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2.1 of the License, or (at your option) any later version.
15 *
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
26 //ust// #include <linux/module.h>
27 //ust// #include <linux/ltt-channels.h>
28 //ust// #include <linux/mutex.h>
29 //ust// #include <linux/vmalloc.h>
30
31 #include "kernelcompat.h"
32 #include "channels.h"
33 #include "usterr.h"
34
35 /*
36 * ltt_channel_mutex may be nested inside the LTT trace mutex.
37 * ltt_channel_mutex mutex may be nested inside markers mutex.
38 */
39 static DEFINE_MUTEX(ltt_channel_mutex);
40 static LIST_HEAD(ltt_channels);
41 /*
42 * Index of next channel in array. Makes sure that as long as a trace channel is
43 * allocated, no array index will be re-used when a channel is freed and then
44 * another channel is allocated. This index is cleared and the array indexeds
45 * get reassigned when the index_kref goes back to 0, which indicates that no
46 * more trace channels are allocated.
47 */
48 static unsigned int free_index;
49 static struct kref index_kref; /* Keeps track of allocated trace channels */
50
51 static struct ltt_channel_setting *lookup_channel(const char *name)
52 {
53 struct ltt_channel_setting *iter;
54
55 list_for_each_entry(iter, &ltt_channels, list)
56 if (strcmp(name, iter->name) == 0)
57 return iter;
58 return NULL;
59 }
60
61 /*
62 * Must be called when channel refcount falls to 0 _and_ also when the last
63 * trace is freed. This function is responsible for compacting the channel and
64 * event IDs when no users are active.
65 *
66 * Called with lock_markers() and channels mutex held.
67 */
68 static void release_channel_setting(struct kref *kref)
69 {
70 struct ltt_channel_setting *setting = container_of(kref,
71 struct ltt_channel_setting, kref);
72 struct ltt_channel_setting *iter;
73
74 if (atomic_read(&index_kref.refcount) == 0
75 && atomic_read(&setting->kref.refcount) == 0) {
76 list_del(&setting->list);
77 kfree(setting);
78
79 free_index = 0;
80 list_for_each_entry(iter, &ltt_channels, list) {
81 iter->index = free_index++;
82 iter->free_event_id = 0;
83 }
84 //ust// markers_compact_event_ids();
85 }
86 }
87
88 /*
89 * Perform channel index compaction when the last trace channel is freed.
90 *
91 * Called with lock_markers() and channels mutex held.
92 */
93 static void release_trace_channel(struct kref *kref)
94 {
95 struct ltt_channel_setting *iter, *n;
96
97 list_for_each_entry_safe(iter, n, &ltt_channels, list)
98 release_channel_setting(&iter->kref);
99 }
100
101 /**
102 * ltt_channels_register - Register a trace channel.
103 * @name: channel name
104 *
105 * Uses refcounting.
106 */
107 int ltt_channels_register(const char *name)
108 {
109 struct ltt_channel_setting *setting;
110 int ret = 0;
111
112 mutex_lock(&ltt_channel_mutex);
113 setting = lookup_channel(name);
114 if (setting) {
115 if (atomic_read(&setting->kref.refcount) == 0)
116 goto init_kref;
117 else {
118 kref_get(&setting->kref);
119 goto end;
120 }
121 }
122 setting = kzalloc(sizeof(*setting), GFP_KERNEL);
123 if (!setting) {
124 ret = -ENOMEM;
125 goto end;
126 }
127 list_add(&setting->list, &ltt_channels);
128 strncpy(setting->name, name, PATH_MAX-1);
129 setting->index = free_index++;
130 init_kref:
131 kref_init(&setting->kref);
132 end:
133 mutex_unlock(&ltt_channel_mutex);
134 return ret;
135 }
136 //ust// EXPORT_SYMBOL_GPL(ltt_channels_register);
137
138 /**
139 * ltt_channels_unregister - Unregister a trace channel.
140 * @name: channel name
141 *
142 * Must be called with markers mutex held.
143 */
144 int ltt_channels_unregister(const char *name)
145 {
146 struct ltt_channel_setting *setting;
147 int ret = 0;
148
149 mutex_lock(&ltt_channel_mutex);
150 setting = lookup_channel(name);
151 if (!setting || atomic_read(&setting->kref.refcount) == 0) {
152 ret = -ENOENT;
153 goto end;
154 }
155 kref_put(&setting->kref, release_channel_setting);
156 end:
157 mutex_unlock(&ltt_channel_mutex);
158 return ret;
159 }
160 //ust// EXPORT_SYMBOL_GPL(ltt_channels_unregister);
161
162 /**
163 * ltt_channels_set_default - Set channel default behavior.
164 * @name: default channel name
165 * @subbuf_size: size of the subbuffers
166 * @subbuf_cnt: number of subbuffers
167 */
168 int ltt_channels_set_default(const char *name,
169 unsigned int subbuf_size,
170 unsigned int subbuf_cnt)
171 {
172 struct ltt_channel_setting *setting;
173 int ret = 0;
174
175 mutex_lock(&ltt_channel_mutex);
176 setting = lookup_channel(name);
177 if (!setting || atomic_read(&setting->kref.refcount) == 0) {
178 ret = -ENOENT;
179 goto end;
180 }
181 setting->subbuf_size = subbuf_size;
182 setting->subbuf_cnt = subbuf_cnt;
183 end:
184 mutex_unlock(&ltt_channel_mutex);
185 return ret;
186 }
187 //ust// EXPORT_SYMBOL_GPL(ltt_channels_set_default);
188
189 /**
190 * ltt_channels_get_name_from_index - get channel name from channel index
191 * @index: channel index
192 *
193 * Allows to lookup the channel name given its index. Done to keep the name
194 * information outside of each trace channel instance.
195 */
196 const char *ltt_channels_get_name_from_index(unsigned int index)
197 {
198 struct ltt_channel_setting *iter;
199
200 list_for_each_entry(iter, &ltt_channels, list)
201 if (iter->index == index && atomic_read(&iter->kref.refcount))
202 return iter->name;
203 return NULL;
204 }
205 //ust// EXPORT_SYMBOL_GPL(ltt_channels_get_name_from_index);
206
207 static struct ltt_channel_setting *
208 ltt_channels_get_setting_from_name(const char *name)
209 {
210 struct ltt_channel_setting *iter;
211
212 list_for_each_entry(iter, &ltt_channels, list)
213 if (!strcmp(iter->name, name)
214 && atomic_read(&iter->kref.refcount))
215 return iter;
216 return NULL;
217 }
218
219 /**
220 * ltt_channels_get_index_from_name - get channel index from channel name
221 * @name: channel name
222 *
223 * Allows to lookup the channel index given its name. Done to keep the name
224 * information outside of each trace channel instance.
225 * Returns -1 if not found.
226 */
227 int ltt_channels_get_index_from_name(const char *name)
228 {
229 struct ltt_channel_setting *setting;
230
231 setting = ltt_channels_get_setting_from_name(name);
232 if (setting)
233 return setting->index;
234 else
235 return -1;
236 }
237 //ust// EXPORT_SYMBOL_GPL(ltt_channels_get_index_from_name);
238
239 /**
240 * ltt_channels_trace_alloc - Allocate channel structures for a trace
241 * @subbuf_size: subbuffer size. 0 uses default.
242 * @subbuf_cnt: number of subbuffers per per-cpu buffers. 0 uses default.
243 * @flags: Default channel flags
244 *
245 * Use the current channel list to allocate the channels for a trace.
246 * Called with trace lock held. Does not perform the trace buffer allocation,
247 * because we must let the user overwrite specific channel sizes.
248 */
249 struct ltt_channel_struct *ltt_channels_trace_alloc(unsigned int *nr_channels,
250 int overwrite,
251 int active)
252 {
253 struct ltt_channel_struct *channel = NULL;
254 struct ltt_channel_setting *iter;
255
256 mutex_lock(&ltt_channel_mutex);
257 if (!free_index) {
258 WARN("ltt_channels_trace_alloc: no free_index; are there any probes connected?");
259 goto end;
260 }
261 if (!atomic_read(&index_kref.refcount))
262 kref_init(&index_kref);
263 else
264 kref_get(&index_kref);
265 *nr_channels = free_index;
266 channel = kzalloc(sizeof(struct ltt_channel_struct) * free_index,
267 GFP_KERNEL);
268 if (!channel) {
269 WARN("ltt_channel_struct: channel null after alloc");
270 goto end;
271 }
272 list_for_each_entry(iter, &ltt_channels, list) {
273 if (!atomic_read(&iter->kref.refcount))
274 continue;
275 channel[iter->index].subbuf_size = iter->subbuf_size;
276 channel[iter->index].subbuf_cnt = iter->subbuf_cnt;
277 channel[iter->index].overwrite = overwrite;
278 channel[iter->index].active = active;
279 channel[iter->index].channel_name = iter->name;
280 }
281 end:
282 mutex_unlock(&ltt_channel_mutex);
283 return channel;
284 }
285 //ust// EXPORT_SYMBOL_GPL(ltt_channels_trace_alloc);
286
287 /**
288 * ltt_channels_trace_free - Free one trace's channels
289 * @channels: channels to free
290 *
291 * Called with trace lock held. The actual channel buffers must be freed before
292 * this function is called.
293 */
294 void ltt_channels_trace_free(struct ltt_channel_struct *channels)
295 {
296 lock_markers();
297 mutex_lock(&ltt_channel_mutex);
298 kfree(channels);
299 kref_put(&index_kref, release_trace_channel);
300 mutex_unlock(&ltt_channel_mutex);
301 unlock_markers();
302 }
303 //ust// EXPORT_SYMBOL_GPL(ltt_channels_trace_free);
304
305 /**
306 * _ltt_channels_get_event_id - get next event ID for a marker
307 * @channel: channel name
308 * @name: event name
309 *
310 * Returns a unique event ID (for this channel) or < 0 on error.
311 * Must be called with channels mutex held.
312 */
313 int _ltt_channels_get_event_id(const char *channel, const char *name)
314 {
315 struct ltt_channel_setting *setting;
316 int ret;
317
318 setting = ltt_channels_get_setting_from_name(channel);
319 if (!setting) {
320 ret = -ENOENT;
321 goto end;
322 }
323 if (strcmp(channel, "metadata") == 0) {
324 if (strcmp(name, "core_marker_id") == 0)
325 ret = 0;
326 else if (strcmp(name, "core_marker_format") == 0)
327 ret = 1;
328 else if (strcmp(name, "testev") == 0)
329 ret = 2;
330 else
331 ret = -ENOENT;
332 goto end;
333 }
334 if (setting->free_event_id == EVENTS_PER_CHANNEL - 1) {
335 ret = -ENOSPC;
336 goto end;
337 }
338 ret = setting->free_event_id++;
339 end:
340 return ret;
341 }
342
343 /**
344 * ltt_channels_get_event_id - get next event ID for a marker
345 * @channel: channel name
346 * @name: event name
347 *
348 * Returns a unique event ID (for this channel) or < 0 on error.
349 */
350 int ltt_channels_get_event_id(const char *channel, const char *name)
351 {
352 int ret;
353
354 mutex_lock(&ltt_channel_mutex);
355 ret = _ltt_channels_get_event_id(channel, name);
356 mutex_unlock(&ltt_channel_mutex);
357 return ret;
358 }
359
360 //ust// MODULE_LICENSE("GPL");
361 //ust// MODULE_AUTHOR("Mathieu Desnoyers");
362 //ust// MODULE_DESCRIPTION("Linux Trace Toolkit Next Generation Channel Management");
This page took 0.038076 seconds and 5 git commands to generate.