ust: continue work
[ust.git] / libtracing / channels.c
CommitLineData
99054cee
PMF
1/*
2 * ltt/ltt-channels.c
3 *
4 * (C) Copyright 2008 - Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
5 *
6 * LTTng channel management.
7 *
8 * Author:
9 * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
10 */
11
b6bf28ec
PMF
12//ust// #include <linux/module.h>
13//ust// #include <linux/ltt-channels.h>
14//ust// #include <linux/mutex.h>
15//ust// #include <linux/vmalloc.h>
16
17#include "kernelcompat.h"
18#include "channels.h"
99054cee
PMF
19
20/*
21 * ltt_channel_mutex may be nested inside the LTT trace mutex.
22 * ltt_channel_mutex mutex may be nested inside markers mutex.
23 */
24static DEFINE_MUTEX(ltt_channel_mutex);
25static LIST_HEAD(ltt_channels);
26/*
27 * Index of next channel in array. Makes sure that as long as a trace channel is
28 * allocated, no array index will be re-used when a channel is freed and then
29 * another channel is allocated. This index is cleared and the array indexeds
30 * get reassigned when the index_kref goes back to 0, which indicates that no
31 * more trace channels are allocated.
32 */
33static unsigned int free_index;
34static struct kref index_kref; /* Keeps track of allocated trace channels */
35
36static struct ltt_channel_setting *lookup_channel(const char *name)
37{
38 struct ltt_channel_setting *iter;
39
40 list_for_each_entry(iter, &ltt_channels, list)
41 if (strcmp(name, iter->name) == 0)
42 return iter;
43 return NULL;
44}
45
46/*
47 * Must be called when channel refcount falls to 0 _and_ also when the last
48 * trace is freed. This function is responsible for compacting the channel and
49 * event IDs when no users are active.
50 *
51 * Called with lock_markers() and channels mutex held.
52 */
53static void release_channel_setting(struct kref *kref)
54{
55 struct ltt_channel_setting *setting = container_of(kref,
56 struct ltt_channel_setting, kref);
57 struct ltt_channel_setting *iter;
58
59 if (atomic_read(&index_kref.refcount) == 0
60 && atomic_read(&setting->kref.refcount) == 0) {
61 list_del(&setting->list);
62 kfree(setting);
63
64 free_index = 0;
65 list_for_each_entry(iter, &ltt_channels, list) {
66 iter->index = free_index++;
67 iter->free_event_id = 0;
68 }
b6bf28ec 69//ust// markers_compact_event_ids();
99054cee
PMF
70 }
71}
72
73/*
74 * Perform channel index compaction when the last trace channel is freed.
75 *
76 * Called with lock_markers() and channels mutex held.
77 */
78static void release_trace_channel(struct kref *kref)
79{
80 struct ltt_channel_setting *iter, *n;
81
82 list_for_each_entry_safe(iter, n, &ltt_channels, list)
83 release_channel_setting(&iter->kref);
84}
85
86/**
87 * ltt_channels_register - Register a trace channel.
88 * @name: channel name
89 *
90 * Uses refcounting.
91 */
92int ltt_channels_register(const char *name)
93{
94 struct ltt_channel_setting *setting;
95 int ret = 0;
96
97 mutex_lock(&ltt_channel_mutex);
98 setting = lookup_channel(name);
99 if (setting) {
100 if (atomic_read(&setting->kref.refcount) == 0)
101 goto init_kref;
102 else {
103 kref_get(&setting->kref);
104 goto end;
105 }
106 }
107 setting = kzalloc(sizeof(*setting), GFP_KERNEL);
108 if (!setting) {
109 ret = -ENOMEM;
110 goto end;
111 }
112 list_add(&setting->list, &ltt_channels);
113 strncpy(setting->name, name, PATH_MAX-1);
114 setting->index = free_index++;
115init_kref:
116 kref_init(&setting->kref);
117end:
118 mutex_unlock(&ltt_channel_mutex);
119 return ret;
120}
b6bf28ec 121//ust// EXPORT_SYMBOL_GPL(ltt_channels_register);
99054cee
PMF
122
123/**
124 * ltt_channels_unregister - Unregister a trace channel.
125 * @name: channel name
126 *
127 * Must be called with markers mutex held.
128 */
129int ltt_channels_unregister(const char *name)
130{
131 struct ltt_channel_setting *setting;
132 int ret = 0;
133
134 mutex_lock(&ltt_channel_mutex);
135 setting = lookup_channel(name);
136 if (!setting || atomic_read(&setting->kref.refcount) == 0) {
137 ret = -ENOENT;
138 goto end;
139 }
140 kref_put(&setting->kref, release_channel_setting);
141end:
142 mutex_unlock(&ltt_channel_mutex);
143 return ret;
144}
b6bf28ec 145//ust// EXPORT_SYMBOL_GPL(ltt_channels_unregister);
99054cee
PMF
146
147/**
148 * ltt_channels_set_default - Set channel default behavior.
149 * @name: default channel name
150 * @subbuf_size: size of the subbuffers
151 * @subbuf_cnt: number of subbuffers
152 */
153int ltt_channels_set_default(const char *name,
154 unsigned int subbuf_size,
155 unsigned int subbuf_cnt)
156{
157 struct ltt_channel_setting *setting;
158 int ret = 0;
159
160 mutex_lock(&ltt_channel_mutex);
161 setting = lookup_channel(name);
162 if (!setting || atomic_read(&setting->kref.refcount) == 0) {
163 ret = -ENOENT;
164 goto end;
165 }
166 setting->subbuf_size = subbuf_size;
167 setting->subbuf_cnt = subbuf_cnt;
168end:
169 mutex_unlock(&ltt_channel_mutex);
170 return ret;
171}
b6bf28ec 172//ust// EXPORT_SYMBOL_GPL(ltt_channels_set_default);
99054cee
PMF
173
174/**
175 * ltt_channels_get_name_from_index - get channel name from channel index
176 * @index: channel index
177 *
178 * Allows to lookup the channel name given its index. Done to keep the name
179 * information outside of each trace channel instance.
180 */
181const char *ltt_channels_get_name_from_index(unsigned int index)
182{
183 struct ltt_channel_setting *iter;
184
185 list_for_each_entry(iter, &ltt_channels, list)
186 if (iter->index == index && atomic_read(&iter->kref.refcount))
187 return iter->name;
188 return NULL;
189}
b6bf28ec 190//ust// EXPORT_SYMBOL_GPL(ltt_channels_get_name_from_index);
99054cee
PMF
191
192static struct ltt_channel_setting *
193ltt_channels_get_setting_from_name(const char *name)
194{
195 struct ltt_channel_setting *iter;
196
197 list_for_each_entry(iter, &ltt_channels, list)
198 if (!strcmp(iter->name, name)
199 && atomic_read(&iter->kref.refcount))
200 return iter;
201 return NULL;
202}
203
204/**
205 * ltt_channels_get_index_from_name - get channel index from channel name
206 * @name: channel name
207 *
208 * Allows to lookup the channel index given its name. Done to keep the name
209 * information outside of each trace channel instance.
210 * Returns -1 if not found.
211 */
212int ltt_channels_get_index_from_name(const char *name)
213{
214 struct ltt_channel_setting *setting;
215
216 setting = ltt_channels_get_setting_from_name(name);
217 if (setting)
218 return setting->index;
219 else
220 return -1;
221}
b6bf28ec 222//ust// EXPORT_SYMBOL_GPL(ltt_channels_get_index_from_name);
99054cee
PMF
223
224/**
225 * ltt_channels_trace_alloc - Allocate channel structures for a trace
226 * @subbuf_size: subbuffer size. 0 uses default.
227 * @subbuf_cnt: number of subbuffers per per-cpu buffers. 0 uses default.
228 * @flags: Default channel flags
229 *
230 * Use the current channel list to allocate the channels for a trace.
231 * Called with trace lock held. Does not perform the trace buffer allocation,
232 * because we must let the user overwrite specific channel sizes.
233 */
234struct ltt_channel_struct *ltt_channels_trace_alloc(unsigned int *nr_channels,
235 int overwrite,
236 int active)
237{
238 struct ltt_channel_struct *channel = NULL;
239 struct ltt_channel_setting *iter;
240
241 mutex_lock(&ltt_channel_mutex);
242 if (!free_index)
243 goto end;
244 if (!atomic_read(&index_kref.refcount))
245 kref_init(&index_kref);
246 else
247 kref_get(&index_kref);
248 *nr_channels = free_index;
249 channel = kzalloc(sizeof(struct ltt_channel_struct) * free_index,
250 GFP_KERNEL);
251 if (!channel)
252 goto end;
253 list_for_each_entry(iter, &ltt_channels, list) {
254 if (!atomic_read(&iter->kref.refcount))
255 continue;
256 channel[iter->index].subbuf_size = iter->subbuf_size;
257 channel[iter->index].subbuf_cnt = iter->subbuf_cnt;
258 channel[iter->index].overwrite = overwrite;
259 channel[iter->index].active = active;
260 channel[iter->index].channel_name = iter->name;
261 }
262end:
263 mutex_unlock(&ltt_channel_mutex);
264 return channel;
265}
b6bf28ec 266//ust// EXPORT_SYMBOL_GPL(ltt_channels_trace_alloc);
99054cee
PMF
267
268/**
269 * ltt_channels_trace_free - Free one trace's channels
270 * @channels: channels to free
271 *
272 * Called with trace lock held. The actual channel buffers must be freed before
273 * this function is called.
274 */
275void ltt_channels_trace_free(struct ltt_channel_struct *channels)
276{
277 lock_markers();
278 mutex_lock(&ltt_channel_mutex);
279 kfree(channels);
280 kref_put(&index_kref, release_trace_channel);
281 mutex_unlock(&ltt_channel_mutex);
282 unlock_markers();
283}
b6bf28ec 284//ust// EXPORT_SYMBOL_GPL(ltt_channels_trace_free);
99054cee
PMF
285
286/**
287 * _ltt_channels_get_event_id - get next event ID for a marker
288 * @channel: channel name
289 * @name: event name
290 *
291 * Returns a unique event ID (for this channel) or < 0 on error.
292 * Must be called with channels mutex held.
293 */
294int _ltt_channels_get_event_id(const char *channel, const char *name)
295{
296 struct ltt_channel_setting *setting;
297 int ret;
298
299 setting = ltt_channels_get_setting_from_name(channel);
300 if (!setting) {
301 ret = -ENOENT;
302 goto end;
303 }
304 if (strcmp(channel, "metadata") == 0) {
305 if (strcmp(name, "core_marker_id") == 0)
306 ret = 0;
307 else if (strcmp(name, "core_marker_format") == 0)
308 ret = 1;
309 else
310 ret = -ENOENT;
311 goto end;
312 }
313 if (setting->free_event_id == EVENTS_PER_CHANNEL - 1) {
314 ret = -ENOSPC;
315 goto end;
316 }
317 ret = setting->free_event_id++;
318end:
319 return ret;
320}
321
322/**
323 * ltt_channels_get_event_id - get next event ID for a marker
324 * @channel: channel name
325 * @name: event name
326 *
327 * Returns a unique event ID (for this channel) or < 0 on error.
328 */
329int ltt_channels_get_event_id(const char *channel, const char *name)
330{
331 int ret;
332
333 mutex_lock(&ltt_channel_mutex);
334 ret = _ltt_channels_get_event_id(channel, name);
335 mutex_unlock(&ltt_channel_mutex);
336 return ret;
337}
338
b6bf28ec
PMF
339//ust// MODULE_LICENSE("GPL");
340//ust// MODULE_AUTHOR("Mathieu Desnoyers");
341//ust// MODULE_DESCRIPTION("Linux Trace Toolkit Next Generation Channel Management");
This page took 0.034946 seconds and 4 git commands to generate.