Lazy provider registration
[lttng-ust.git] / liblttng-ust / lttng-probes.c
1 /*
2 * lttng-probes.c
3 *
4 * Holds LTTng probes registry.
5 *
6 * Copyright 2010-2012 (c) - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; only
11 * version 2.1 of the License.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #include <string.h>
24 #include <errno.h>
25 #include <urcu/list.h>
26 #include <urcu/hlist.h>
27 #include <lttng/ust-events.h>
28 #include <lttng/tracepoint.h>
29 #include "tracepoint-internal.h"
30 #include <assert.h>
31 #include <helper.h>
32 #include <ctype.h>
33
34 #include "lttng-tracer-core.h"
35 #include "jhash.h"
36 #include "error.h"
37
38 /*
39 * probe list is protected by ust_lock()/ust_unlock().
40 */
41 static CDS_LIST_HEAD(_probe_list);
42
43 /*
44 * List of probes registered by not yet processed.
45 */
46 static CDS_LIST_HEAD(lazy_probe_init);
47
48 /*
49 * lazy_nesting counter ensures we don't trigger lazy probe registration
50 * fixup while we are performing the fixup. It is protected by the ust
51 * mutex.
52 */
53 static int lazy_nesting;
54
55 /*
56 * Called under ust lock.
57 */
58 static
59 int check_event_provider(struct lttng_probe_desc *desc)
60 {
61 int i;
62 size_t provider_name_len;
63
64 provider_name_len = strnlen(desc->provider,
65 LTTNG_UST_SYM_NAME_LEN - 1);
66 for (i = 0; i < desc->nr_events; i++) {
67 if (strncmp(desc->event_desc[i]->name,
68 desc->provider,
69 provider_name_len))
70 return 0; /* provider mismatch */
71 }
72 return 1;
73 }
74
75 /*
76 * Called under ust lock.
77 */
78 static
79 void lttng_lazy_probe_register(struct lttng_probe_desc *desc)
80 {
81 struct lttng_probe_desc *iter;
82 struct cds_list_head *probe_list;
83 int i;
84
85 /*
86 * Each provider enforce that every event name begins with the
87 * provider name. Check this in an assertion for extra
88 * carefulness. This ensures we cannot have duplicate event
89 * names across providers.
90 */
91 assert(check_event_provider(desc));
92
93 /*
94 * The provider ensures there are no duplicate event names.
95 * Duplicated TRACEPOINT_EVENT event names would generate a
96 * compile-time error due to duplicated symbol names.
97 */
98
99 /*
100 * We sort the providers by struct lttng_probe_desc pointer
101 * address.
102 */
103 probe_list = &_probe_list;
104 cds_list_for_each_entry_reverse(iter, probe_list, head) {
105 BUG_ON(iter == desc); /* Should never be in the list twice */
106 if (iter < desc) {
107 /* We belong to the location right after iter. */
108 cds_list_add(&desc->head, &iter->head);
109 goto desc_added;
110 }
111 }
112 /* We should be added at the head of the list */
113 cds_list_add(&desc->head, probe_list);
114 desc_added:
115 DBG("just registered probe %s containing %u events",
116 desc->provider, desc->nr_events);
117 /*
118 * fix the events awaiting probe load.
119 */
120 for (i = 0; i < desc->nr_events; i++) {
121 const struct lttng_event_desc *ed;
122 int ret;
123
124 ed = desc->event_desc[i];
125 DBG("Registered event probe \"%s\" with signature \"%s\"",
126 ed->name, ed->signature);
127 ret = lttng_fix_pending_event_desc(ed);
128 assert(!ret);
129 }
130 }
131
132 /*
133 * Called under ust lock.
134 */
135 static
136 void fixup_lazy_probes(void)
137 {
138 struct lttng_probe_desc *iter, *tmp;
139
140 lazy_nesting++;
141 cds_list_for_each_entry_safe(iter, tmp,
142 &lazy_probe_init, lazy_init_head) {
143 lttng_lazy_probe_register(iter);
144 iter->lazy = 0;
145 cds_list_del(&iter->lazy_init_head);
146 }
147 lazy_nesting--;
148 }
149
150 /*
151 * Called under ust lock.
152 */
153 struct cds_list_head *lttng_get_probe_list_head(void)
154 {
155 if (!lazy_nesting && !cds_list_empty(&lazy_probe_init))
156 fixup_lazy_probes();
157 return &_probe_list;
158 }
159
160 static
161 const struct lttng_probe_desc *find_provider(const char *provider)
162 {
163 struct lttng_probe_desc *iter;
164 struct cds_list_head *probe_list;
165
166 probe_list = lttng_get_probe_list_head();
167 cds_list_for_each_entry(iter, probe_list, head) {
168 if (!strcmp(iter->provider, provider))
169 return iter;
170 }
171 return NULL;
172 }
173
174 int lttng_probe_register(struct lttng_probe_desc *desc)
175 {
176 int ret = 0;
177
178 ust_lock();
179
180 /*
181 * Check if the provider has already been registered.
182 */
183 if (find_provider(desc->provider)) {
184 ret = -EEXIST;
185 goto end;
186 }
187 cds_list_add(&desc->lazy_init_head, &lazy_probe_init);
188 desc->lazy = 1;
189 DBG("adding probe %s containing %u events to lazy registration list",
190 desc->provider, desc->nr_events);
191 /*
192 * If there is at least one active session, we need to register
193 * the probe immediately, since we cannot delay event
194 * registration because they are needed ASAP.
195 */
196 if (lttng_session_active())
197 fixup_lazy_probes();
198 end:
199 ust_unlock();
200 return ret;
201 }
202
203 /* Backward compatibility with UST 2.0 */
204 int ltt_probe_register(struct lttng_probe_desc *desc)
205 {
206 return lttng_probe_register(desc);
207 }
208
209 void lttng_probe_unregister(struct lttng_probe_desc *desc)
210 {
211 ust_lock();
212 if (!desc->lazy)
213 cds_list_del(&desc->head);
214 else
215 cds_list_del(&desc->lazy_init_head);
216 DBG("just unregistered probe %s", desc->provider);
217 ust_unlock();
218 }
219
220 /* Backward compatibility with UST 2.0 */
221 void ltt_probe_unregister(struct lttng_probe_desc *desc)
222 {
223 lttng_probe_unregister(desc);
224 }
225
226 void lttng_probes_prune_event_list(struct lttng_ust_tracepoint_list *list)
227 {
228 struct tp_list_entry *list_entry, *tmp;
229
230 cds_list_for_each_entry_safe(list_entry, tmp, &list->head, head) {
231 cds_list_del(&list_entry->head);
232 free(list_entry);
233 }
234 }
235
236 /*
237 * called with UST lock held.
238 */
239 int lttng_probes_get_event_list(struct lttng_ust_tracepoint_list *list)
240 {
241 struct lttng_probe_desc *probe_desc;
242 int i;
243 struct cds_list_head *probe_list;
244
245 probe_list = lttng_get_probe_list_head();
246 CDS_INIT_LIST_HEAD(&list->head);
247 cds_list_for_each_entry(probe_desc, probe_list, head) {
248 for (i = 0; i < probe_desc->nr_events; i++) {
249 struct tp_list_entry *list_entry;
250
251 list_entry = zmalloc(sizeof(*list_entry));
252 if (!list_entry)
253 goto err_nomem;
254 cds_list_add(&list_entry->head, &list->head);
255 strncpy(list_entry->tp.name,
256 probe_desc->event_desc[i]->name,
257 LTTNG_UST_SYM_NAME_LEN);
258 list_entry->tp.name[LTTNG_UST_SYM_NAME_LEN - 1] = '\0';
259 if (!probe_desc->event_desc[i]->loglevel) {
260 list_entry->tp.loglevel = TRACE_DEFAULT;
261 } else {
262 list_entry->tp.loglevel = *(*probe_desc->event_desc[i]->loglevel);
263 }
264 }
265 }
266 if (cds_list_empty(&list->head))
267 list->iter = NULL;
268 else
269 list->iter =
270 cds_list_first_entry(&list->head, struct tp_list_entry, head);
271 return 0;
272
273 err_nomem:
274 lttng_probes_prune_event_list(list);
275 return -ENOMEM;
276 }
277
278 /*
279 * Return current iteration position, advance internal iterator to next.
280 * Return NULL if end of list.
281 */
282 struct lttng_ust_tracepoint_iter *
283 lttng_ust_tracepoint_list_get_iter_next(struct lttng_ust_tracepoint_list *list)
284 {
285 struct tp_list_entry *entry;
286
287 if (!list->iter)
288 return NULL;
289 entry = list->iter;
290 if (entry->head.next == &list->head)
291 list->iter = NULL;
292 else
293 list->iter = cds_list_entry(entry->head.next,
294 struct tp_list_entry, head);
295 return &entry->tp;
296 }
297
298 void lttng_probes_prune_field_list(struct lttng_ust_field_list *list)
299 {
300 struct tp_field_list_entry *list_entry, *tmp;
301
302 cds_list_for_each_entry_safe(list_entry, tmp, &list->head, head) {
303 cds_list_del(&list_entry->head);
304 free(list_entry);
305 }
306 }
307
308 /*
309 * called with UST lock held.
310 */
311 int lttng_probes_get_field_list(struct lttng_ust_field_list *list)
312 {
313 struct lttng_probe_desc *probe_desc;
314 int i;
315 struct cds_list_head *probe_list;
316
317 probe_list = lttng_get_probe_list_head();
318 CDS_INIT_LIST_HEAD(&list->head);
319 cds_list_for_each_entry(probe_desc, probe_list, head) {
320 for (i = 0; i < probe_desc->nr_events; i++) {
321 const struct lttng_event_desc *event_desc =
322 probe_desc->event_desc[i];
323 int j;
324
325 if (event_desc->nr_fields == 0) {
326 /* Events without fields. */
327 struct tp_field_list_entry *list_entry;
328
329 list_entry = zmalloc(sizeof(*list_entry));
330 if (!list_entry)
331 goto err_nomem;
332 cds_list_add(&list_entry->head, &list->head);
333 strncpy(list_entry->field.event_name,
334 event_desc->name,
335 LTTNG_UST_SYM_NAME_LEN);
336 list_entry->field.event_name[LTTNG_UST_SYM_NAME_LEN - 1] = '\0';
337 list_entry->field.field_name[0] = '\0';
338 list_entry->field.type = LTTNG_UST_FIELD_OTHER;
339 if (!event_desc->loglevel) {
340 list_entry->field.loglevel = TRACE_DEFAULT;
341 } else {
342 list_entry->field.loglevel = *(*event_desc->loglevel);
343 }
344 list_entry->field.nowrite = 1;
345 }
346
347 for (j = 0; j < event_desc->nr_fields; j++) {
348 const struct lttng_event_field *event_field =
349 &event_desc->fields[j];
350 struct tp_field_list_entry *list_entry;
351
352 list_entry = zmalloc(sizeof(*list_entry));
353 if (!list_entry)
354 goto err_nomem;
355 cds_list_add(&list_entry->head, &list->head);
356 strncpy(list_entry->field.event_name,
357 event_desc->name,
358 LTTNG_UST_SYM_NAME_LEN);
359 list_entry->field.event_name[LTTNG_UST_SYM_NAME_LEN - 1] = '\0';
360 strncpy(list_entry->field.field_name,
361 event_field->name,
362 LTTNG_UST_SYM_NAME_LEN);
363 list_entry->field.field_name[LTTNG_UST_SYM_NAME_LEN - 1] = '\0';
364 switch (event_field->type.atype) {
365 case atype_integer:
366 list_entry->field.type = LTTNG_UST_FIELD_INTEGER;
367 break;
368 case atype_string:
369 list_entry->field.type = LTTNG_UST_FIELD_STRING;
370 break;
371 case atype_array:
372 if (event_field->type.u.array.elem_type.atype != atype_integer
373 || event_field->type.u.array.elem_type.u.basic.integer.encoding == lttng_encode_none)
374 list_entry->field.type = LTTNG_UST_FIELD_OTHER;
375 else
376 list_entry->field.type = LTTNG_UST_FIELD_STRING;
377 break;
378 case atype_sequence:
379 if (event_field->type.u.sequence.elem_type.atype != atype_integer
380 || event_field->type.u.sequence.elem_type.u.basic.integer.encoding == lttng_encode_none)
381 list_entry->field.type = LTTNG_UST_FIELD_OTHER;
382 else
383 list_entry->field.type = LTTNG_UST_FIELD_STRING;
384 break;
385 case atype_float:
386 list_entry->field.type = LTTNG_UST_FIELD_FLOAT;
387 break;
388 case atype_enum:
389 list_entry->field.type = LTTNG_UST_FIELD_ENUM;
390 break;
391 default:
392 list_entry->field.type = LTTNG_UST_FIELD_OTHER;
393 }
394 if (!event_desc->loglevel) {
395 list_entry->field.loglevel = TRACE_DEFAULT;
396 } else {
397 list_entry->field.loglevel = *(*event_desc->loglevel);
398 }
399 list_entry->field.nowrite = event_field->nowrite;
400 }
401 }
402 }
403 if (cds_list_empty(&list->head))
404 list->iter = NULL;
405 else
406 list->iter =
407 cds_list_first_entry(&list->head,
408 struct tp_field_list_entry, head);
409 return 0;
410
411 err_nomem:
412 lttng_probes_prune_field_list(list);
413 return -ENOMEM;
414 }
415
416 /*
417 * Return current iteration position, advance internal iterator to next.
418 * Return NULL if end of list.
419 */
420 struct lttng_ust_field_iter *
421 lttng_ust_field_list_get_iter_next(struct lttng_ust_field_list *list)
422 {
423 struct tp_field_list_entry *entry;
424
425 if (!list->iter)
426 return NULL;
427 entry = list->iter;
428 if (entry->head.next == &list->head)
429 list->iter = NULL;
430 else
431 list->iter = cds_list_entry(entry->head.next,
432 struct tp_field_list_entry, head);
433 return &entry->field;
434 }
This page took 0.038723 seconds and 5 git commands to generate.