Ongoing transition to the generic ring buffer
[lttng-modules.git] / ltt-channels.c
1 /*
2 * ltt/ltt-channels.c
3 *
4 * (C) Copyright 2008 - Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
5 *
6 * LTTng channel management.
7 *
8 * Author:
9 * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
10 *
11 * Dual LGPL v2.1/GPL v2 license.
12 */
13
14 #include <linux/module.h>
15 #include <linux/mutex.h>
16 #include <linux/slab.h>
17 #include <linux/vmalloc.h>
18 #include "ltt-channels.h"
19
20 /*
21 * ltt_channel_mutex may be nested inside the LTT trace mutex.
22 * ltt_channel_mutex mutex may be nested inside markers mutex.
23 */
24 static DEFINE_MUTEX(ltt_channel_mutex);
25 static LIST_HEAD(ltt_channels);
26 /*
27 * Index of next channel in array. Makes sure that as long as a trace channel is
28 * allocated, no array index will be re-used when a channel is freed and then
29 * another channel is allocated. This index is cleared and the array indexeds
30 * get reassigned when the index_kref goes back to 0, which indicates that no
31 * more trace channels are allocated.
32 */
33 static unsigned int free_index;
34 /* index_kref is protected by both ltt_channel_mutex and lock_markers */
35 static struct kref index_kref; /* Keeps track of allocated trace channels */
36
37 static struct ltt_channel_setting *lookup_channel(const char *name)
38 {
39 struct ltt_channel_setting *iter;
40
41 list_for_each_entry(iter, &ltt_channels, list)
42 if (strcmp(name, iter->name) == 0)
43 return iter;
44 return NULL;
45 }
46
47 /*
48 * Must be called when channel refcount falls to 0 _and_ also when the last
49 * trace is freed. This function is responsible for compacting the channel and
50 * event IDs when no users are active.
51 *
52 * Called with lock_markers() and channels mutex held.
53 */
54 static void release_channel_setting(struct kref *kref)
55 {
56 struct ltt_channel_setting *setting = container_of(kref,
57 struct ltt_channel_setting, kref);
58 struct ltt_channel_setting *iter;
59
60 if (atomic_read(&index_kref.refcount) == 0
61 && atomic_read(&setting->kref.refcount) == 0) {
62 list_del(&setting->list);
63 kfree(setting);
64
65 free_index = 0;
66 list_for_each_entry(iter, &ltt_channels, list) {
67 iter->index = free_index++;
68 iter->free_event_id = 0;
69 }
70 }
71 }
72
73 /*
74 * Perform channel index compaction when the last trace channel is freed.
75 *
76 * Called with lock_markers() and channels mutex held.
77 */
78 static void release_trace_channel(struct kref *kref)
79 {
80 struct ltt_channel_setting *iter, *n;
81
82 list_for_each_entry_safe(iter, n, &ltt_channels, list)
83 release_channel_setting(&iter->kref);
84 if (atomic_read(&index_kref.refcount) == 0)
85 markers_compact_event_ids();
86 }
87
88 /*
89 * ltt_channel_trace_ref : Is there an existing trace session ?
90 *
91 * Must be called with lock_markers() held.
92 */
93 int ltt_channels_trace_ref(void)
94 {
95 return !!atomic_read(&index_kref.refcount);
96 }
97 EXPORT_SYMBOL_GPL(ltt_channels_trace_ref);
98
99 /**
100 * ltt_channels_register - Register a trace channel.
101 * @name: channel name
102 *
103 * Uses refcounting.
104 */
105 int ltt_channels_register(const char *name)
106 {
107 struct ltt_channel_setting *setting;
108 int ret = 0;
109
110 mutex_lock(&ltt_channel_mutex);
111 setting = lookup_channel(name);
112 if (setting) {
113 if (atomic_read(&setting->kref.refcount) == 0)
114 goto init_kref;
115 else {
116 kref_get(&setting->kref);
117 goto end;
118 }
119 }
120 setting = kzalloc(sizeof(*setting), GFP_KERNEL);
121 if (!setting) {
122 ret = -ENOMEM;
123 goto end;
124 }
125 list_add(&setting->list, &ltt_channels);
126 strncpy(setting->name, name, PATH_MAX-1);
127 setting->index = free_index++;
128 init_kref:
129 kref_init(&setting->kref);
130 end:
131 mutex_unlock(&ltt_channel_mutex);
132 return ret;
133 }
134 EXPORT_SYMBOL_GPL(ltt_channels_register);
135
136 /**
137 * ltt_channels_unregister - Unregister a trace channel.
138 * @name: channel name
139 * @compacting: performing compaction
140 *
141 * Must be called with markers mutex held.
142 */
143 int ltt_channels_unregister(const char *name, int compacting)
144 {
145 struct ltt_channel_setting *setting;
146 int ret = 0;
147
148 if (!compacting)
149 mutex_lock(&ltt_channel_mutex);
150 setting = lookup_channel(name);
151 if (!setting || atomic_read(&setting->kref.refcount) == 0) {
152 ret = -ENOENT;
153 goto end;
154 }
155 kref_put(&setting->kref, release_channel_setting);
156 if (!compacting && atomic_read(&index_kref.refcount) == 0)
157 markers_compact_event_ids();
158 end:
159 if (!compacting)
160 mutex_unlock(&ltt_channel_mutex);
161 return ret;
162 }
163 EXPORT_SYMBOL_GPL(ltt_channels_unregister);
164
165 /**
166 * ltt_channels_set_default - Set channel default behavior.
167 * @name: default channel name
168 * @sb_size: size of the subbuffers
169 * @n_sb: number of subbuffers
170 */
171 int ltt_channels_set_default(const char *name,
172 unsigned int sb_size,
173 unsigned int n_sb)
174 {
175 struct ltt_channel_setting *setting;
176 int ret = 0;
177
178 mutex_lock(&ltt_channel_mutex);
179 setting = lookup_channel(name);
180 if (!setting || atomic_read(&setting->kref.refcount) == 0) {
181 ret = -ENOENT;
182 goto end;
183 }
184 setting->sb_size = sb_size;
185 setting->n_sb = n_sb;
186 end:
187 mutex_unlock(&ltt_channel_mutex);
188 return ret;
189 }
190 EXPORT_SYMBOL_GPL(ltt_channels_set_default);
191
192 /**
193 * ltt_channels_get_name_from_index - get channel name from channel index
194 * @index: channel index
195 *
196 * Allows to lookup the channel name given its index. Done to keep the name
197 * information outside of each trace channel instance.
198 */
199 const char *ltt_channels_get_name_from_index(unsigned int index)
200 {
201 struct ltt_channel_setting *iter;
202
203 list_for_each_entry(iter, &ltt_channels, list)
204 if (iter->index == index && atomic_read(&iter->kref.refcount))
205 return iter->name;
206 return NULL;
207 }
208 EXPORT_SYMBOL_GPL(ltt_channels_get_name_from_index);
209
210 static struct ltt_channel_setting *
211 ltt_channels_get_setting_from_name(const char *name)
212 {
213 struct ltt_channel_setting *iter;
214
215 list_for_each_entry(iter, &ltt_channels, list)
216 if (!strcmp(iter->name, name)
217 && atomic_read(&iter->kref.refcount))
218 return iter;
219 return NULL;
220 }
221
222 /**
223 * ltt_channels_get_index_from_name - get channel index from channel name
224 * @name: channel name
225 *
226 * Allows to lookup the channel index given its name. Done to keep the name
227 * information outside of each trace channel instance.
228 * Returns -1 if not found.
229 */
230 int ltt_channels_get_index_from_name(const char *name)
231 {
232 struct ltt_channel_setting *setting;
233
234 setting = ltt_channels_get_setting_from_name(name);
235 if (setting)
236 return setting->index;
237 else
238 return -1;
239 }
240 EXPORT_SYMBOL_GPL(ltt_channels_get_index_from_name);
241
242 /**
243 * ltt_channels_trace_alloc - Allocate channel structures for a trace
244 *
245 * Use the current channel list to allocate the channels for a trace.
246 * Called with trace lock held. Does not perform the trace buffer allocation,
247 * because we must let the user overwrite specific channel sizes.
248 */
249 int ltt_channels_trace_alloc(struct ltt_trace *trace, int overwrite)
250 {
251 struct channel **chan = NULL;
252 struct ltt_channel_setting *chans, *iter;
253 int ret = 0;
254
255 lock_markers();
256 mutex_lock(&ltt_channel_mutex);
257 if (!free_index)
258 goto end;
259 if (!atomic_read(&index_kref.refcount))
260 kref_init(&index_kref);
261 else
262 kref_get(&index_kref);
263 trace->nr_channels = free_index;
264 chan = kzalloc(sizeof(struct channel *) * free_index, GFP_KERNEL);
265 if (!chan)
266 goto end;
267 chans = kzalloc(sizeof(struct ltt_channel_setting) * free_index,
268 GFP_KERNEL);
269 if (!chan_settings)
270 goto free_chan;
271 list_for_each_entry(iter, &ltt_channels, list) {
272 if (!atomic_read(&iter->kref.refcount))
273 continue;
274 chans[iter->index].sb_size = iter->sb_size;
275 chans[iter->index].n_sb = iter->n_sb;
276 chans[iter->index].overwrite = overwrite;
277 strncpy(chans[iter->index].filename, iter->name,
278 NAME_MAX - 1);
279 chans[iter->index].switch_timer_interval = 0;
280 chans[iter->index].read_timer_interval = LTT_READ_TIMER_INTERVAL;
281 }
282 trace->channels = chan;
283 trace->settings = chans;
284 end:
285 mutex_unlock(&ltt_channel_mutex);
286 unlock_markers();
287 return ret;
288
289 free_chan:
290 kfree(chan);
291 ret = -ENOMEM;
292 goto end;
293 }
294 EXPORT_SYMBOL_GPL(ltt_channels_trace_alloc);
295
296 /**
297 * ltt_channels_trace_free - Free one trace's channels
298 * @channels: channels to free
299 *
300 * Called with trace lock held. The actual channel buffers must be freed before
301 * this function is called.
302 */
303 void ltt_channels_trace_free(struct ltt_trace *trace)
304 {
305 lock_markers();
306 mutex_lock(&ltt_channel_mutex);
307 kfree(trace->settings);
308 kfree(trace->channels);
309 kref_put(&index_kref, release_trace_channel);
310 mutex_unlock(&ltt_channel_mutex);
311 unlock_markers();
312 marker_update_probes();
313 }
314 EXPORT_SYMBOL_GPL(ltt_channels_trace_free);
315
316 /**
317 * ltt_channels_trace_set_timer - set switch timer
318 * @channel: channel
319 * @interval: interval of timer interrupt, in jiffies. 0 inhibits timer.
320 */
321
322 void ltt_channels_trace_set_timer(struct ltt_chan *chan,
323 unsigned long interval)
324 {
325 chan->switch_timer_interval = interval;
326 }
327 EXPORT_SYMBOL_GPL(ltt_channels_trace_set_timer);
328
329 /**
330 * _ltt_channels_get_event_id - get next event ID for a marker
331 * @channel: channel name
332 * @name: event name
333 *
334 * Returns a unique event ID (for this channel) or < 0 on error.
335 * Must be called with channels mutex held.
336 */
337 int _ltt_channels_get_event_id(const char *channel, const char *name)
338 {
339 struct ltt_channel_setting *setting;
340 int ret;
341
342 setting = ltt_channels_get_setting_from_name(channel);
343 if (!setting) {
344 ret = -ENOENT;
345 goto end;
346 }
347 if (strcmp(channel, "metadata") == 0) {
348 if (strcmp(name, "core_marker_id") == 0)
349 ret = 0;
350 else if (strcmp(name, "core_marker_format") == 0)
351 ret = 1;
352 else
353 ret = -ENOENT;
354 goto end;
355 }
356 if (setting->free_event_id == EVENTS_PER_CHANNEL - 1) {
357 ret = -ENOSPC;
358 goto end;
359 }
360 ret = setting->free_event_id++;
361 end:
362 return ret;
363 }
364
365 /**
366 * ltt_channels_get_event_id - get next event ID for a marker
367 * @channel: channel name
368 * @name: event name
369 *
370 * Returns a unique event ID (for this channel) or < 0 on error.
371 */
372 int ltt_channels_get_event_id(const char *channel, const char *name)
373 {
374 int ret;
375
376 mutex_lock(&ltt_channel_mutex);
377 ret = _ltt_channels_get_event_id(channel, name);
378 mutex_unlock(&ltt_channel_mutex);
379 return ret;
380 }
381
382 /**
383 * ltt_channels_reset_event_ids - reset event IDs at compaction
384 *
385 * Called with lock marker and channel mutex held.
386 */
387 void _ltt_channels_reset_event_ids(void)
388 {
389 struct ltt_channel_setting *iter;
390
391 list_for_each_entry(iter, &ltt_channels, list)
392 iter->free_event_id = 0;
393 }
394
395 MODULE_LICENSE("GPL and additional rights");
396 MODULE_AUTHOR("Mathieu Desnoyers");
397 MODULE_DESCRIPTION("Linux Trace Toolkit Next Generation Channel Management");
This page took 0.035961 seconds and 4 git commands to generate.