lttng-modules v0.19-stable: setup_trace_write: Fix recursive locking
[lttng-modules.git] / ltt-channels.c
CommitLineData
1c8284eb
MD
1 /*
2 * ltt/ltt-channels.c
3 *
4 * (C) Copyright 2008 - Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
5 *
6 * LTTng channel management.
7 *
8 * Author:
9 * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
10 *
11 * Dual LGPL v2.1/GPL v2 license.
12 */
13
14#include <linux/module.h>
15#include <linux/mutex.h>
16#include <linux/slab.h>
17#include <linux/vmalloc.h>
18#include <linux/ltt-channels.h>
19
20/*
21 * ltt_channel_mutex may be nested inside the LTT trace mutex.
22 * ltt_channel_mutex mutex may be nested inside markers mutex.
23 */
24static DEFINE_MUTEX(ltt_channel_mutex);
25static LIST_HEAD(ltt_channels);
26/*
27 * Index of next channel in array. Makes sure that as long as a trace channel is
28 * allocated, no array index will be re-used when a channel is freed and then
29 * another channel is allocated. This index is cleared and the array indexeds
30 * get reassigned when the index_kref goes back to 0, which indicates that no
31 * more trace channels are allocated.
32 */
33static unsigned int free_index;
34/* index_kref is protected by both ltt_channel_mutex and lock_markers */
35static struct kref index_kref; /* Keeps track of allocated trace channels */
36
37static struct ltt_channel_setting *lookup_channel(const char *name)
38{
39 struct ltt_channel_setting *iter;
40
41 list_for_each_entry(iter, &ltt_channels, list)
42 if (strcmp(name, iter->name) == 0)
43 return iter;
44 return NULL;
45}
46
47/*
48 * Must be called when channel refcount falls to 0 _and_ also when the last
49 * trace is freed. This function is responsible for compacting the channel and
50 * event IDs when no users are active.
51 *
52 * Called with lock_markers() and channels mutex held.
53 */
54static void release_channel_setting(struct kref *kref)
55{
56 struct ltt_channel_setting *setting = container_of(kref,
57 struct ltt_channel_setting, kref);
58 struct ltt_channel_setting *iter;
59
60 if (atomic_read(&index_kref.refcount) == 0
61 && atomic_read(&setting->kref.refcount) == 0) {
62 list_del(&setting->list);
63 kfree(setting);
64
65 free_index = 0;
66 list_for_each_entry(iter, &ltt_channels, list) {
67 iter->index = free_index++;
68 iter->free_event_id = 0;
69 }
70 }
71}
72
73/*
74 * Perform channel index compaction when the last trace channel is freed.
75 *
76 * Called with lock_markers() and channels mutex held.
77 */
78static void release_trace_channel(struct kref *kref)
79{
80 struct ltt_channel_setting *iter, *n;
81
82 list_for_each_entry_safe(iter, n, &ltt_channels, list)
83 release_channel_setting(&iter->kref);
84 if (atomic_read(&index_kref.refcount) == 0)
85 markers_compact_event_ids();
86}
87
88/*
89 * ltt_channel_trace_ref : Is there an existing trace session ?
90 *
91 * Must be called with lock_markers() held.
92 */
93int ltt_channels_trace_ref(void)
94{
95 return !!atomic_read(&index_kref.refcount);
96}
97EXPORT_SYMBOL_GPL(ltt_channels_trace_ref);
98
99/**
100 * ltt_channels_register - Register a trace channel.
101 * @name: channel name
102 *
103 * Uses refcounting.
104 */
105int ltt_channels_register(const char *name)
106{
107 struct ltt_channel_setting *setting;
108 int ret = 0;
109
110 mutex_lock(&ltt_channel_mutex);
111 setting = lookup_channel(name);
112 if (setting) {
113 if (atomic_read(&setting->kref.refcount) == 0)
114 goto init_kref;
115 else {
116 kref_get(&setting->kref);
117 goto end;
118 }
119 }
120 setting = kzalloc(sizeof(*setting), GFP_KERNEL);
121 if (!setting) {
122 ret = -ENOMEM;
123 goto end;
124 }
125 list_add(&setting->list, &ltt_channels);
126 strncpy(setting->name, name, PATH_MAX-1);
127 setting->index = free_index++;
128init_kref:
129 kref_init(&setting->kref);
130end:
131 mutex_unlock(&ltt_channel_mutex);
132 return ret;
133}
134EXPORT_SYMBOL_GPL(ltt_channels_register);
135
136/**
137 * ltt_channels_unregister - Unregister a trace channel.
138 * @name: channel name
139 * @compacting: performing compaction
140 *
141 * Must be called with markers mutex held.
142 */
143int ltt_channels_unregister(const char *name, int compacting)
144{
145 struct ltt_channel_setting *setting;
146 int ret = 0;
147
148 if (!compacting)
149 mutex_lock(&ltt_channel_mutex);
150 setting = lookup_channel(name);
151 if (!setting || atomic_read(&setting->kref.refcount) == 0) {
152 ret = -ENOENT;
153 goto end;
154 }
155 kref_put(&setting->kref, release_channel_setting);
156 if (!compacting && atomic_read(&index_kref.refcount) == 0)
157 markers_compact_event_ids();
158end:
159 if (!compacting)
160 mutex_unlock(&ltt_channel_mutex);
161 return ret;
162}
163EXPORT_SYMBOL_GPL(ltt_channels_unregister);
164
165/**
166 * ltt_channels_set_default - Set channel default behavior.
167 * @name: default channel name
168 * @sb_size: size of the subbuffers
169 * @n_sb: number of subbuffers
170 */
171int ltt_channels_set_default(const char *name,
172 unsigned int sb_size,
173 unsigned int n_sb)
174{
175 struct ltt_channel_setting *setting;
176 int ret = 0;
177
178 mutex_lock(&ltt_channel_mutex);
179 setting = lookup_channel(name);
180 if (!setting || atomic_read(&setting->kref.refcount) == 0) {
181 ret = -ENOENT;
182 goto end;
183 }
184 setting->sb_size = sb_size;
185 setting->n_sb = n_sb;
186end:
187 mutex_unlock(&ltt_channel_mutex);
188 return ret;
189}
190EXPORT_SYMBOL_GPL(ltt_channels_set_default);
191
192/**
193 * ltt_channels_get_name_from_index - get channel name from channel index
194 * @index: channel index
195 *
196 * Allows to lookup the channel name given its index. Done to keep the name
197 * information outside of each trace channel instance.
198 */
199const char *ltt_channels_get_name_from_index(unsigned int index)
200{
201 struct ltt_channel_setting *iter;
202
203 list_for_each_entry(iter, &ltt_channels, list)
204 if (iter->index == index && atomic_read(&iter->kref.refcount))
205 return iter->name;
206 return NULL;
207}
208EXPORT_SYMBOL_GPL(ltt_channels_get_name_from_index);
209
210static struct ltt_channel_setting *
211ltt_channels_get_setting_from_name(const char *name)
212{
213 struct ltt_channel_setting *iter;
214
215 list_for_each_entry(iter, &ltt_channels, list)
216 if (!strcmp(iter->name, name)
217 && atomic_read(&iter->kref.refcount))
218 return iter;
219 return NULL;
220}
221
222/**
223 * ltt_channels_get_index_from_name - get channel index from channel name
224 * @name: channel name
225 *
226 * Allows to lookup the channel index given its name. Done to keep the name
227 * information outside of each trace channel instance.
228 * Returns -1 if not found.
229 */
230int ltt_channels_get_index_from_name(const char *name)
231{
232 struct ltt_channel_setting *setting;
233
234 setting = ltt_channels_get_setting_from_name(name);
235 if (setting)
236 return setting->index;
237 else
238 return -1;
239}
240EXPORT_SYMBOL_GPL(ltt_channels_get_index_from_name);
241
242/**
243 * ltt_channels_trace_alloc - Allocate channel structures for a trace
244 * @sb_size: subbuffer size. 0 uses default.
245 * @n_sb: number of subbuffers per per-cpu buffers. 0 uses default.
246 * @flags: Default channel flags
247 *
248 * Use the current channel list to allocate the channels for a trace.
249 * Called with trace lock held. Does not perform the trace buffer allocation,
250 * because we must let the user overwrite specific channel sizes.
251 */
252struct ltt_chan *ltt_channels_trace_alloc(unsigned int *nr_channels,
253 int overwrite, int active)
254{
255 struct ltt_chan *chan = NULL;
256 struct ltt_channel_setting *iter;
257
258 lock_markers();
259 mutex_lock(&ltt_channel_mutex);
260 if (!free_index)
261 goto end;
262 if (!atomic_read(&index_kref.refcount))
263 kref_init(&index_kref);
264 else
265 kref_get(&index_kref);
266 *nr_channels = free_index;
267 chan = kzalloc(sizeof(struct ltt_chan) * free_index, GFP_KERNEL);
268 if (!chan)
269 goto end;
270 list_for_each_entry(iter, &ltt_channels, list) {
271 if (!atomic_read(&iter->kref.refcount))
272 continue;
273 chan[iter->index].a.sb_size = iter->sb_size;
274 chan[iter->index].a.n_sb = iter->n_sb;
275 chan[iter->index].overwrite = overwrite;
276 chan[iter->index].active = active;
277 strncpy(chan[iter->index].a.filename, iter->name, NAME_MAX - 1);
278 chan[iter->index].switch_timer_interval = 0;
279 }
280end:
281 mutex_unlock(&ltt_channel_mutex);
282 unlock_markers();
283 return chan;
284}
285EXPORT_SYMBOL_GPL(ltt_channels_trace_alloc);
286
287/**
288 * ltt_channels_trace_free - Free one trace's channels
289 * @channels: channels to free
290 *
291 * Called with trace lock held. The actual channel buffers must be freed before
292 * this function is called.
293 */
294void ltt_channels_trace_free(struct ltt_chan *channels,
295 unsigned int nr_channels)
296{
297 lock_markers();
298 mutex_lock(&ltt_channel_mutex);
299 kfree(channels);
300 kref_put(&index_kref, release_trace_channel);
301 mutex_unlock(&ltt_channel_mutex);
302 unlock_markers();
303 marker_update_probes();
304}
305EXPORT_SYMBOL_GPL(ltt_channels_trace_free);
306
307/**
308 * ltt_channels_trace_set_timer - set switch timer
309 * @channel: channel
310 * @interval: interval of timer interrupt, in jiffies. 0 inhibits timer.
311 */
312
313void ltt_channels_trace_set_timer(struct ltt_chan *chan,
314 unsigned long interval)
315{
316 chan->switch_timer_interval = interval;
317}
318EXPORT_SYMBOL_GPL(ltt_channels_trace_set_timer);
319
320/**
321 * _ltt_channels_get_event_id - get next event ID for a marker
322 * @channel: channel name
323 * @name: event name
324 *
325 * Returns a unique event ID (for this channel) or < 0 on error.
326 * Must be called with channels mutex held.
327 */
328int _ltt_channels_get_event_id(const char *channel, const char *name)
329{
330 struct ltt_channel_setting *setting;
331 int ret;
332
333 setting = ltt_channels_get_setting_from_name(channel);
334 if (!setting) {
335 ret = -ENOENT;
336 goto end;
337 }
338 if (strcmp(channel, "metadata") == 0) {
339 if (strcmp(name, "core_marker_id") == 0)
340 ret = 0;
341 else if (strcmp(name, "core_marker_format") == 0)
342 ret = 1;
343 else
344 ret = -ENOENT;
345 goto end;
346 }
347 if (setting->free_event_id == EVENTS_PER_CHANNEL - 1) {
348 ret = -ENOSPC;
349 goto end;
350 }
351 ret = setting->free_event_id++;
352end:
353 return ret;
354}
355
356/**
357 * ltt_channels_get_event_id - get next event ID for a marker
358 * @channel: channel name
359 * @name: event name
360 *
361 * Returns a unique event ID (for this channel) or < 0 on error.
362 */
363int ltt_channels_get_event_id(const char *channel, const char *name)
364{
365 int ret;
366
367 mutex_lock(&ltt_channel_mutex);
368 ret = _ltt_channels_get_event_id(channel, name);
369 mutex_unlock(&ltt_channel_mutex);
370 return ret;
371}
372
373/**
374 * ltt_channels_reset_event_ids - reset event IDs at compaction
375 *
376 * Called with lock marker and channel mutex held.
377 */
378void _ltt_channels_reset_event_ids(void)
379{
380 struct ltt_channel_setting *iter;
381
382 list_for_each_entry(iter, &ltt_channels, list)
383 iter->free_event_id = 0;
384}
385
386MODULE_LICENSE("GPL and additional rights");
387MODULE_AUTHOR("Mathieu Desnoyers");
388MODULE_DESCRIPTION("Linux Trace Toolkit Next Generation Channel Management");
This page took 0.036733 seconds and 4 git commands to generate.