Cleanup: apply `include-what-you-use` guideline for `uint*_t`
[lttng-ust.git] / liblttng-ust / lttng-context-perf-counters.c
1 /*
2 * lttng-context-perf-counters.c
3 *
4 * LTTng UST performance monitoring counters (perf-counters) integration.
5 *
6 * Copyright (C) 2009-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; only
11 * version 2.1 of the License.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #define _LGPL_SOURCE
24 #include <sys/types.h>
25 #include <unistd.h>
26 #include <string.h>
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <stdbool.h>
30 #include <stddef.h>
31 #include <stdint.h>
32 #include <sys/mman.h>
33 #include <sys/syscall.h>
34 #include <lttng/ust-events.h>
35 #include <lttng/ust-tracer.h>
36 #include <lttng/ringbuffer-config.h>
37 #include <urcu/system.h>
38 #include <urcu/arch.h>
39 #include <urcu/rculist.h>
40 #include <helper.h>
41 #include <urcu/ref.h>
42 #include <usterr-signal-safe.h>
43 #include <signal.h>
44 #include <urcu/tls-compat.h>
45 #include "perf_event.h"
46 #include "lttng-tracer-core.h"
47
48 /*
49 * We use a global perf counter key and iterate on per-thread RCU lists
50 * of fields in the fast path, even though this is not strictly speaking
51 * what would provide the best fast-path complexity, to ensure teardown
52 * of sessions vs thread exit is handled racelessly.
53 *
54 * Updates and traversals of thread_list are protected by UST lock.
55 * Updates to rcu_field_list are protected by UST lock.
56 */
57
58 struct lttng_perf_counter_thread_field {
59 struct lttng_perf_counter_field *field; /* Back reference */
60 struct perf_event_mmap_page *pc;
61 struct cds_list_head thread_field_node; /* Per-field list of thread fields (node) */
62 struct cds_list_head rcu_field_node; /* RCU per-thread list of fields (node) */
63 int fd; /* Perf FD */
64 };
65
66 struct lttng_perf_counter_thread {
67 struct cds_list_head rcu_field_list; /* RCU per-thread list of fields */
68 };
69
70 struct lttng_perf_counter_field {
71 struct perf_event_attr attr;
72 struct cds_list_head thread_field_list; /* Per-field list of thread fields */
73 };
74
75 static pthread_key_t perf_counter_key;
76
77 /*
78 * lttng_perf_lock - Protect lttng-ust perf counter data structures
79 *
80 * Nests within the ust_lock, and therefore within the libc dl lock.
81 * Therefore, we need to fixup the TLS before nesting into this lock.
82 * Nests inside RCU bp read-side lock. Protects against concurrent
83 * fork.
84 */
85 static pthread_mutex_t ust_perf_mutex = PTHREAD_MUTEX_INITIALIZER;
86
87 /*
88 * Cancel state when grabbing the ust_perf_mutex. Saved when locking,
89 * restored on unlock. Protected by ust_perf_mutex.
90 */
91 static int ust_perf_saved_cancelstate;
92
93 /*
94 * Track whether we are tracing from a signal handler nested on an
95 * application thread.
96 */
97 static DEFINE_URCU_TLS(int, ust_perf_mutex_nest);
98
99 /*
100 * Force a read (imply TLS fixup for dlopen) of TLS variables.
101 */
102 void lttng_ust_fixup_perf_counter_tls(void)
103 {
104 asm volatile ("" : : "m" (URCU_TLS(ust_perf_mutex_nest)));
105 }
106
107 void lttng_perf_lock(void)
108 {
109 sigset_t sig_all_blocked, orig_mask;
110 int ret, oldstate;
111
112 ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldstate);
113 if (ret) {
114 ERR("pthread_setcancelstate: %s", strerror(ret));
115 }
116 sigfillset(&sig_all_blocked);
117 ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
118 if (ret) {
119 ERR("pthread_sigmask: %s", strerror(ret));
120 }
121 if (!URCU_TLS(ust_perf_mutex_nest)++) {
122 /*
123 * Ensure the compiler don't move the store after the close()
124 * call in case close() would be marked as leaf.
125 */
126 cmm_barrier();
127 pthread_mutex_lock(&ust_perf_mutex);
128 ust_perf_saved_cancelstate = oldstate;
129 }
130 ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
131 if (ret) {
132 ERR("pthread_sigmask: %s", strerror(ret));
133 }
134 }
135
136 void lttng_perf_unlock(void)
137 {
138 sigset_t sig_all_blocked, orig_mask;
139 int ret, newstate, oldstate;
140 bool restore_cancel = false;
141
142 sigfillset(&sig_all_blocked);
143 ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
144 if (ret) {
145 ERR("pthread_sigmask: %s", strerror(ret));
146 }
147 /*
148 * Ensure the compiler don't move the store before the close()
149 * call, in case close() would be marked as leaf.
150 */
151 cmm_barrier();
152 if (!--URCU_TLS(ust_perf_mutex_nest)) {
153 newstate = ust_perf_saved_cancelstate;
154 restore_cancel = true;
155 pthread_mutex_unlock(&ust_perf_mutex);
156 }
157 ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
158 if (ret) {
159 ERR("pthread_sigmask: %s", strerror(ret));
160 }
161 if (restore_cancel) {
162 ret = pthread_setcancelstate(newstate, &oldstate);
163 if (ret) {
164 ERR("pthread_setcancelstate: %s", strerror(ret));
165 }
166 }
167 }
168
169 static
170 size_t perf_counter_get_size(struct lttng_ctx_field *field, size_t offset)
171 {
172 size_t size = 0;
173
174 size += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
175 size += sizeof(uint64_t);
176 return size;
177 }
178
179 static
180 uint64_t read_perf_counter_syscall(
181 struct lttng_perf_counter_thread_field *thread_field)
182 {
183 uint64_t count;
184
185 if (caa_unlikely(thread_field->fd < 0))
186 return 0;
187
188 if (caa_unlikely(read(thread_field->fd, &count, sizeof(count))
189 < sizeof(count)))
190 return 0;
191
192 return count;
193 }
194
195 #if defined(__x86_64__) || defined(__i386__)
196
197 static
198 uint64_t rdpmc(unsigned int counter)
199 {
200 unsigned int low, high;
201
202 asm volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (counter));
203
204 return low | ((uint64_t) high) << 32;
205 }
206
207 static
208 bool has_rdpmc(struct perf_event_mmap_page *pc)
209 {
210 if (caa_unlikely(!pc->cap_bit0_is_deprecated))
211 return false;
212 /* Since Linux kernel 3.12. */
213 return pc->cap_user_rdpmc;
214 }
215
216 static
217 uint64_t arch_read_perf_counter(
218 struct lttng_perf_counter_thread_field *thread_field)
219 {
220 uint32_t seq, idx;
221 uint64_t count;
222 struct perf_event_mmap_page *pc = thread_field->pc;
223
224 if (caa_unlikely(!pc))
225 return 0;
226
227 do {
228 seq = CMM_LOAD_SHARED(pc->lock);
229 cmm_barrier();
230
231 idx = pc->index;
232 if (caa_likely(has_rdpmc(pc) && idx)) {
233 int64_t pmcval;
234
235 pmcval = rdpmc(idx - 1);
236 /* Sign-extend the pmc register result. */
237 pmcval <<= 64 - pc->pmc_width;
238 pmcval >>= 64 - pc->pmc_width;
239 count = pc->offset + pmcval;
240 } else {
241 /* Fall-back on system call if rdpmc cannot be used. */
242 return read_perf_counter_syscall(thread_field);
243 }
244 cmm_barrier();
245 } while (CMM_LOAD_SHARED(pc->lock) != seq);
246
247 return count;
248 }
249
250 static
251 int arch_perf_keep_fd(struct lttng_perf_counter_thread_field *thread_field)
252 {
253 struct perf_event_mmap_page *pc = thread_field->pc;
254
255 if (!pc)
256 return 0;
257 return !has_rdpmc(pc);
258 }
259
260 #else
261
262 /* Generic (slow) implementation using a read system call. */
263 static
264 uint64_t arch_read_perf_counter(
265 struct lttng_perf_counter_thread_field *thread_field)
266 {
267 return read_perf_counter_syscall(thread_field);
268 }
269
270 static
271 int arch_perf_keep_fd(struct lttng_perf_counter_thread_field *thread_field)
272 {
273 return 1;
274 }
275
276 #endif
277
278 static
279 int sys_perf_event_open(struct perf_event_attr *attr,
280 pid_t pid, int cpu, int group_fd,
281 unsigned long flags)
282 {
283 return syscall(SYS_perf_event_open, attr, pid, cpu,
284 group_fd, flags);
285 }
286
287 static
288 int open_perf_fd(struct perf_event_attr *attr)
289 {
290 int fd;
291
292 fd = sys_perf_event_open(attr, 0, -1, -1, 0);
293 if (fd < 0)
294 return -1;
295
296 return fd;
297 }
298
299 static
300 void close_perf_fd(int fd)
301 {
302 int ret;
303
304 if (fd < 0)
305 return;
306
307 ret = close(fd);
308 if (ret) {
309 perror("Error closing LTTng-UST perf memory mapping FD");
310 }
311 }
312
313 static void setup_perf(struct lttng_perf_counter_thread_field *thread_field)
314 {
315 void *perf_addr;
316
317 perf_addr = mmap(NULL, sizeof(struct perf_event_mmap_page),
318 PROT_READ, MAP_SHARED, thread_field->fd, 0);
319 if (perf_addr == MAP_FAILED)
320 perf_addr = NULL;
321 thread_field->pc = perf_addr;
322
323 if (!arch_perf_keep_fd(thread_field)) {
324 close_perf_fd(thread_field->fd);
325 thread_field->fd = -1;
326 }
327 }
328
329 static
330 void unmap_perf_page(struct perf_event_mmap_page *pc)
331 {
332 int ret;
333
334 if (!pc)
335 return;
336 ret = munmap(pc, sizeof(struct perf_event_mmap_page));
337 if (ret < 0) {
338 PERROR("Error in munmap");
339 abort();
340 }
341 }
342
343 static
344 struct lttng_perf_counter_thread *alloc_perf_counter_thread(void)
345 {
346 struct lttng_perf_counter_thread *perf_thread;
347 sigset_t newmask, oldmask;
348 int ret;
349
350 ret = sigfillset(&newmask);
351 if (ret)
352 abort();
353 ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
354 if (ret)
355 abort();
356 /* Check again with signals disabled */
357 perf_thread = pthread_getspecific(perf_counter_key);
358 if (perf_thread)
359 goto skip;
360 perf_thread = zmalloc(sizeof(*perf_thread));
361 if (!perf_thread)
362 abort();
363 CDS_INIT_LIST_HEAD(&perf_thread->rcu_field_list);
364 ret = pthread_setspecific(perf_counter_key, perf_thread);
365 if (ret)
366 abort();
367 skip:
368 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
369 if (ret)
370 abort();
371 return perf_thread;
372 }
373
374 static
375 struct lttng_perf_counter_thread_field *
376 add_thread_field(struct lttng_perf_counter_field *perf_field,
377 struct lttng_perf_counter_thread *perf_thread)
378 {
379 struct lttng_perf_counter_thread_field *thread_field;
380 sigset_t newmask, oldmask;
381 int ret;
382
383 ret = sigfillset(&newmask);
384 if (ret)
385 abort();
386 ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
387 if (ret)
388 abort();
389 /* Check again with signals disabled */
390 cds_list_for_each_entry_rcu(thread_field, &perf_thread->rcu_field_list,
391 rcu_field_node) {
392 if (thread_field->field == perf_field)
393 goto skip;
394 }
395 thread_field = zmalloc(sizeof(*thread_field));
396 if (!thread_field)
397 abort();
398 thread_field->field = perf_field;
399 thread_field->fd = open_perf_fd(&perf_field->attr);
400 if (thread_field->fd >= 0)
401 setup_perf(thread_field);
402 /*
403 * Note: thread_field->pc can be NULL if setup_perf() fails.
404 * Also, thread_field->fd can be -1 if open_perf_fd() fails.
405 */
406 lttng_perf_lock();
407 cds_list_add_rcu(&thread_field->rcu_field_node,
408 &perf_thread->rcu_field_list);
409 cds_list_add(&thread_field->thread_field_node,
410 &perf_field->thread_field_list);
411 lttng_perf_unlock();
412 skip:
413 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
414 if (ret)
415 abort();
416 return thread_field;
417 }
418
419 static
420 struct lttng_perf_counter_thread_field *
421 get_thread_field(struct lttng_perf_counter_field *field)
422 {
423 struct lttng_perf_counter_thread *perf_thread;
424 struct lttng_perf_counter_thread_field *thread_field;
425
426 perf_thread = pthread_getspecific(perf_counter_key);
427 if (!perf_thread)
428 perf_thread = alloc_perf_counter_thread();
429 cds_list_for_each_entry_rcu(thread_field, &perf_thread->rcu_field_list,
430 rcu_field_node) {
431 if (thread_field->field == field)
432 return thread_field;
433 }
434 /* perf_counter_thread_field not found, need to add one */
435 return add_thread_field(field, perf_thread);
436 }
437
438 static
439 uint64_t wrapper_perf_counter_read(struct lttng_ctx_field *field)
440 {
441 struct lttng_perf_counter_field *perf_field;
442 struct lttng_perf_counter_thread_field *perf_thread_field;
443
444 perf_field = field->u.perf_counter;
445 perf_thread_field = get_thread_field(perf_field);
446 return arch_read_perf_counter(perf_thread_field);
447 }
448
449 static
450 void perf_counter_record(struct lttng_ctx_field *field,
451 struct lttng_ust_lib_ring_buffer_ctx *ctx,
452 struct lttng_channel *chan)
453 {
454 uint64_t value;
455
456 value = wrapper_perf_counter_read(field);
457 lib_ring_buffer_align_ctx(ctx, lttng_alignof(value));
458 chan->ops->event_write(ctx, &value, sizeof(value));
459 }
460
461 static
462 void perf_counter_get_value(struct lttng_ctx_field *field,
463 struct lttng_ctx_value *value)
464 {
465 value->u.s64 = wrapper_perf_counter_read(field);
466 }
467
468 /* Called with perf lock held */
469 static
470 void lttng_destroy_perf_thread_field(
471 struct lttng_perf_counter_thread_field *thread_field)
472 {
473 close_perf_fd(thread_field->fd);
474 unmap_perf_page(thread_field->pc);
475 cds_list_del_rcu(&thread_field->rcu_field_node);
476 cds_list_del(&thread_field->thread_field_node);
477 free(thread_field);
478 }
479
480 static
481 void lttng_destroy_perf_thread_key(void *_key)
482 {
483 struct lttng_perf_counter_thread *perf_thread = _key;
484 struct lttng_perf_counter_thread_field *pos, *p;
485
486 lttng_perf_lock();
487 cds_list_for_each_entry_safe(pos, p, &perf_thread->rcu_field_list,
488 rcu_field_node)
489 lttng_destroy_perf_thread_field(pos);
490 lttng_perf_unlock();
491 free(perf_thread);
492 }
493
494 /* Called with UST lock held */
495 static
496 void lttng_destroy_perf_counter_field(struct lttng_ctx_field *field)
497 {
498 struct lttng_perf_counter_field *perf_field;
499 struct lttng_perf_counter_thread_field *pos, *p;
500
501 free((char *) field->event_field.name);
502 perf_field = field->u.perf_counter;
503 /*
504 * This put is performed when no threads can concurrently
505 * perform a "get" concurrently, thanks to urcu-bp grace
506 * period. Holding the lttng perf lock protects against
507 * concurrent modification of the per-thread thread field
508 * list.
509 */
510 lttng_perf_lock();
511 cds_list_for_each_entry_safe(pos, p, &perf_field->thread_field_list,
512 thread_field_node)
513 lttng_destroy_perf_thread_field(pos);
514 lttng_perf_unlock();
515 free(perf_field);
516 }
517
518 #ifdef __ARM_ARCH_7A__
519
520 static
521 int perf_get_exclude_kernel(void)
522 {
523 return 0;
524 }
525
526 #else /* __ARM_ARCH_7A__ */
527
528 static
529 int perf_get_exclude_kernel(void)
530 {
531 return 1;
532 }
533
534 #endif /* __ARM_ARCH_7A__ */
535
536 /* Called with UST lock held */
537 int lttng_add_perf_counter_to_ctx(uint32_t type,
538 uint64_t config,
539 const char *name,
540 struct lttng_ctx **ctx)
541 {
542 struct lttng_ctx_field *field;
543 struct lttng_perf_counter_field *perf_field;
544 char *name_alloc;
545 int ret;
546
547 name_alloc = strdup(name);
548 if (!name_alloc) {
549 ret = -ENOMEM;
550 goto name_alloc_error;
551 }
552 perf_field = zmalloc(sizeof(*perf_field));
553 if (!perf_field) {
554 ret = -ENOMEM;
555 goto perf_field_alloc_error;
556 }
557 field = lttng_append_context(ctx);
558 if (!field) {
559 ret = -ENOMEM;
560 goto append_context_error;
561 }
562 if (lttng_find_context(*ctx, name_alloc)) {
563 ret = -EEXIST;
564 goto find_error;
565 }
566
567 field->destroy = lttng_destroy_perf_counter_field;
568
569 field->event_field.name = name_alloc;
570 field->event_field.type.atype = atype_integer;
571 field->event_field.type.u.basic.integer.size =
572 sizeof(uint64_t) * CHAR_BIT;
573 field->event_field.type.u.basic.integer.alignment =
574 lttng_alignof(uint64_t) * CHAR_BIT;
575 field->event_field.type.u.basic.integer.signedness =
576 lttng_is_signed_type(uint64_t);
577 field->event_field.type.u.basic.integer.reverse_byte_order = 0;
578 field->event_field.type.u.basic.integer.base = 10;
579 field->event_field.type.u.basic.integer.encoding = lttng_encode_none;
580 field->get_size = perf_counter_get_size;
581 field->record = perf_counter_record;
582 field->get_value = perf_counter_get_value;
583
584 perf_field->attr.type = type;
585 perf_field->attr.config = config;
586 perf_field->attr.exclude_kernel = perf_get_exclude_kernel();
587 CDS_INIT_LIST_HEAD(&perf_field->thread_field_list);
588 field->u.perf_counter = perf_field;
589
590 /* Ensure that this perf counter can be used in this process. */
591 ret = open_perf_fd(&perf_field->attr);
592 if (ret < 0) {
593 ret = -ENODEV;
594 goto setup_error;
595 }
596 close_perf_fd(ret);
597
598 /*
599 * Contexts can only be added before tracing is started, so we
600 * don't have to synchronize against concurrent threads using
601 * the field here.
602 */
603
604 lttng_context_update(*ctx);
605 return 0;
606
607 setup_error:
608 find_error:
609 lttng_remove_context_field(ctx, field);
610 append_context_error:
611 free(perf_field);
612 perf_field_alloc_error:
613 free(name_alloc);
614 name_alloc_error:
615 return ret;
616 }
617
618 int lttng_perf_counter_init(void)
619 {
620 int ret;
621
622 ret = pthread_key_create(&perf_counter_key,
623 lttng_destroy_perf_thread_key);
624 if (ret)
625 ret = -ret;
626 return ret;
627 }
628
629 void lttng_perf_counter_exit(void)
630 {
631 int ret;
632
633 ret = pthread_key_delete(perf_counter_key);
634 if (ret) {
635 errno = ret;
636 PERROR("Error in pthread_key_delete");
637 }
638 }
This page took 0.042435 seconds and 4 git commands to generate.