Cleanup: apply `include-what-you-use` guideline for `size_t`
[lttng-ust.git] / liblttng-ust / lttng-context-perf-counters.c
CommitLineData
d58d1454
MD
1/*
2 * lttng-context-perf-counters.c
3 *
4 * LTTng UST performance monitoring counters (perf-counters) integration.
5 *
6 * Copyright (C) 2009-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; only
11 * version 2.1 of the License.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
3fbec7dc 23#define _LGPL_SOURCE
d58d1454
MD
24#include <sys/types.h>
25#include <unistd.h>
26#include <string.h>
27#include <stdlib.h>
28#include <stdio.h>
649fb6b3 29#include <stdbool.h>
b4051ad8 30#include <stddef.h>
d58d1454
MD
31#include <sys/mman.h>
32#include <sys/syscall.h>
d58d1454
MD
33#include <lttng/ust-events.h>
34#include <lttng/ust-tracer.h>
35#include <lttng/ringbuffer-config.h>
36#include <urcu/system.h>
37#include <urcu/arch.h>
38#include <urcu/rculist.h>
39#include <helper.h>
40#include <urcu/ref.h>
41#include <usterr-signal-safe.h>
42#include <signal.h>
20142124 43#include <urcu/tls-compat.h>
77d7fa98 44#include "perf_event.h"
d58d1454
MD
45#include "lttng-tracer-core.h"
46
47/*
48 * We use a global perf counter key and iterate on per-thread RCU lists
49 * of fields in the fast path, even though this is not strictly speaking
50 * what would provide the best fast-path complexity, to ensure teardown
51 * of sessions vs thread exit is handled racelessly.
52 *
53 * Updates and traversals of thread_list are protected by UST lock.
54 * Updates to rcu_field_list are protected by UST lock.
55 */
56
57struct lttng_perf_counter_thread_field {
58 struct lttng_perf_counter_field *field; /* Back reference */
59 struct perf_event_mmap_page *pc;
60 struct cds_list_head thread_field_node; /* Per-field list of thread fields (node) */
61 struct cds_list_head rcu_field_node; /* RCU per-thread list of fields (node) */
b9389e6e 62 int fd; /* Perf FD */
d58d1454
MD
63};
64
65struct lttng_perf_counter_thread {
66 struct cds_list_head rcu_field_list; /* RCU per-thread list of fields */
67};
68
69struct lttng_perf_counter_field {
70 struct perf_event_attr attr;
71 struct cds_list_head thread_field_list; /* Per-field list of thread fields */
72};
73
74static pthread_key_t perf_counter_key;
75
20142124
MD
76/*
77 * lttng_perf_lock - Protect lttng-ust perf counter data structures
78 *
79 * Nests within the ust_lock, and therefore within the libc dl lock.
80 * Therefore, we need to fixup the TLS before nesting into this lock.
81 * Nests inside RCU bp read-side lock. Protects against concurrent
82 * fork.
83 */
84static pthread_mutex_t ust_perf_mutex = PTHREAD_MUTEX_INITIALIZER;
85
86/*
87 * Cancel state when grabbing the ust_perf_mutex. Saved when locking,
88 * restored on unlock. Protected by ust_perf_mutex.
89 */
90static int ust_perf_saved_cancelstate;
91
92/*
93 * Track whether we are tracing from a signal handler nested on an
94 * application thread.
95 */
96static DEFINE_URCU_TLS(int, ust_perf_mutex_nest);
97
98/*
99 * Force a read (imply TLS fixup for dlopen) of TLS variables.
100 */
101void lttng_ust_fixup_perf_counter_tls(void)
102{
103 asm volatile ("" : : "m" (URCU_TLS(ust_perf_mutex_nest)));
104}
105
106void lttng_perf_lock(void)
107{
108 sigset_t sig_all_blocked, orig_mask;
109 int ret, oldstate;
110
111 ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldstate);
112 if (ret) {
113 ERR("pthread_setcancelstate: %s", strerror(ret));
114 }
115 sigfillset(&sig_all_blocked);
116 ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
117 if (ret) {
118 ERR("pthread_sigmask: %s", strerror(ret));
119 }
120 if (!URCU_TLS(ust_perf_mutex_nest)++) {
121 /*
122 * Ensure the compiler don't move the store after the close()
123 * call in case close() would be marked as leaf.
124 */
125 cmm_barrier();
126 pthread_mutex_lock(&ust_perf_mutex);
127 ust_perf_saved_cancelstate = oldstate;
128 }
129 ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
130 if (ret) {
131 ERR("pthread_sigmask: %s", strerror(ret));
132 }
133}
134
135void lttng_perf_unlock(void)
136{
137 sigset_t sig_all_blocked, orig_mask;
138 int ret, newstate, oldstate;
139 bool restore_cancel = false;
140
141 sigfillset(&sig_all_blocked);
142 ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
143 if (ret) {
144 ERR("pthread_sigmask: %s", strerror(ret));
145 }
146 /*
147 * Ensure the compiler don't move the store before the close()
148 * call, in case close() would be marked as leaf.
149 */
150 cmm_barrier();
151 if (!--URCU_TLS(ust_perf_mutex_nest)) {
152 newstate = ust_perf_saved_cancelstate;
153 restore_cancel = true;
154 pthread_mutex_unlock(&ust_perf_mutex);
155 }
156 ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
157 if (ret) {
158 ERR("pthread_sigmask: %s", strerror(ret));
159 }
160 if (restore_cancel) {
161 ret = pthread_setcancelstate(newstate, &oldstate);
162 if (ret) {
163 ERR("pthread_setcancelstate: %s", strerror(ret));
164 }
165 }
166}
167
d58d1454 168static
53569322 169size_t perf_counter_get_size(struct lttng_ctx_field *field, size_t offset)
d58d1454
MD
170{
171 size_t size = 0;
172
173 size += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
174 size += sizeof(uint64_t);
175 return size;
176}
177
a3a8d943
MD
178static
179uint64_t read_perf_counter_syscall(
180 struct lttng_perf_counter_thread_field *thread_field)
181{
182 uint64_t count;
183
184 if (caa_unlikely(thread_field->fd < 0))
185 return 0;
186
187 if (caa_unlikely(read(thread_field->fd, &count, sizeof(count))
188 < sizeof(count)))
189 return 0;
190
191 return count;
192}
193
d58d1454
MD
194#if defined(__x86_64__) || defined(__i386__)
195
196static
197uint64_t rdpmc(unsigned int counter)
198{
199 unsigned int low, high;
200
201 asm volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (counter));
202
203 return low | ((uint64_t) high) << 32;
204}
205
77d7fa98
MD
206static
207bool has_rdpmc(struct perf_event_mmap_page *pc)
208{
209 if (caa_unlikely(!pc->cap_bit0_is_deprecated))
210 return false;
211 /* Since Linux kernel 3.12. */
212 return pc->cap_user_rdpmc;
213}
214
d58d1454 215static
a3a8d943 216uint64_t arch_read_perf_counter(
d286ad50 217 struct lttng_perf_counter_thread_field *thread_field)
d58d1454
MD
218{
219 uint32_t seq, idx;
220 uint64_t count;
d286ad50 221 struct perf_event_mmap_page *pc = thread_field->pc;
d58d1454
MD
222
223 if (caa_unlikely(!pc))
224 return 0;
225
226 do {
227 seq = CMM_LOAD_SHARED(pc->lock);
228 cmm_barrier();
229
230 idx = pc->index;
77d7fa98 231 if (caa_likely(has_rdpmc(pc) && idx)) {
4f58f54f
MD
232 int64_t pmcval;
233
234 pmcval = rdpmc(idx - 1);
235 /* Sign-extend the pmc register result. */
236 pmcval <<= 64 - pc->pmc_width;
237 pmcval >>= 64 - pc->pmc_width;
238 count = pc->offset + pmcval;
239 } else {
a3a8d943
MD
240 /* Fall-back on system call if rdpmc cannot be used. */
241 return read_perf_counter_syscall(thread_field);
4f58f54f 242 }
d58d1454
MD
243 cmm_barrier();
244 } while (CMM_LOAD_SHARED(pc->lock) != seq);
245
246 return count;
247}
248
d286ad50 249static
a3a8d943 250int arch_perf_keep_fd(struct lttng_perf_counter_thread_field *thread_field)
d286ad50 251{
a3a8d943 252 struct perf_event_mmap_page *pc = thread_field->pc;
d286ad50 253
a3a8d943 254 if (!pc)
d286ad50 255 return 0;
77d7fa98 256 return !has_rdpmc(pc);
a3a8d943 257}
d286ad50 258
a3a8d943 259#else
d286ad50 260
a3a8d943
MD
261/* Generic (slow) implementation using a read system call. */
262static
263uint64_t arch_read_perf_counter(
264 struct lttng_perf_counter_thread_field *thread_field)
265{
266 return read_perf_counter_syscall(thread_field);
d286ad50
JD
267}
268
a3a8d943
MD
269static
270int arch_perf_keep_fd(struct lttng_perf_counter_thread_field *thread_field)
271{
272 return 1;
273}
d286ad50 274
a3a8d943 275#endif
d286ad50 276
d58d1454
MD
277static
278int sys_perf_event_open(struct perf_event_attr *attr,
279 pid_t pid, int cpu, int group_fd,
280 unsigned long flags)
281{
282 return syscall(SYS_perf_event_open, attr, pid, cpu,
283 group_fd, flags);
284}
285
286static
b9389e6e 287int open_perf_fd(struct perf_event_attr *attr)
d58d1454 288{
b9389e6e 289 int fd;
d58d1454
MD
290
291 fd = sys_perf_event_open(attr, 0, -1, -1, 0);
292 if (fd < 0)
b9389e6e
JD
293 return -1;
294
295 return fd;
296}
297
d286ad50
JD
298static
299void close_perf_fd(int fd)
300{
301 int ret;
302
303 if (fd < 0)
304 return;
305
306 ret = close(fd);
307 if (ret) {
308 perror("Error closing LTTng-UST perf memory mapping FD");
309 }
310}
311
77d7fa98 312static void setup_perf(struct lttng_perf_counter_thread_field *thread_field)
b9389e6e
JD
313{
314 void *perf_addr;
d58d1454
MD
315
316 perf_addr = mmap(NULL, sizeof(struct perf_event_mmap_page),
b9389e6e 317 PROT_READ, MAP_SHARED, thread_field->fd, 0);
d58d1454 318 if (perf_addr == MAP_FAILED)
b9389e6e 319 perf_addr = NULL;
77d7fa98 320 thread_field->pc = perf_addr;
b9389e6e 321
a3a8d943 322 if (!arch_perf_keep_fd(thread_field)) {
b9389e6e
JD
323 close_perf_fd(thread_field->fd);
324 thread_field->fd = -1;
6c2125af 325 }
d58d1454
MD
326}
327
328static
329void unmap_perf_page(struct perf_event_mmap_page *pc)
330{
331 int ret;
332
333 if (!pc)
334 return;
335 ret = munmap(pc, sizeof(struct perf_event_mmap_page));
336 if (ret < 0) {
337 PERROR("Error in munmap");
338 abort();
339 }
340}
341
342static
343struct lttng_perf_counter_thread *alloc_perf_counter_thread(void)
344{
345 struct lttng_perf_counter_thread *perf_thread;
346 sigset_t newmask, oldmask;
347 int ret;
348
349 ret = sigfillset(&newmask);
350 if (ret)
351 abort();
352 ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
353 if (ret)
354 abort();
355 /* Check again with signals disabled */
356 perf_thread = pthread_getspecific(perf_counter_key);
357 if (perf_thread)
358 goto skip;
359 perf_thread = zmalloc(sizeof(*perf_thread));
360 if (!perf_thread)
361 abort();
362 CDS_INIT_LIST_HEAD(&perf_thread->rcu_field_list);
363 ret = pthread_setspecific(perf_counter_key, perf_thread);
364 if (ret)
365 abort();
366skip:
367 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
368 if (ret)
369 abort();
370 return perf_thread;
371}
372
373static
374struct lttng_perf_counter_thread_field *
375 add_thread_field(struct lttng_perf_counter_field *perf_field,
376 struct lttng_perf_counter_thread *perf_thread)
377{
378 struct lttng_perf_counter_thread_field *thread_field;
379 sigset_t newmask, oldmask;
380 int ret;
381
382 ret = sigfillset(&newmask);
383 if (ret)
384 abort();
385 ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
386 if (ret)
387 abort();
388 /* Check again with signals disabled */
389 cds_list_for_each_entry_rcu(thread_field, &perf_thread->rcu_field_list,
390 rcu_field_node) {
391 if (thread_field->field == perf_field)
392 goto skip;
393 }
394 thread_field = zmalloc(sizeof(*thread_field));
395 if (!thread_field)
396 abort();
397 thread_field->field = perf_field;
b9389e6e
JD
398 thread_field->fd = open_perf_fd(&perf_field->attr);
399 if (thread_field->fd >= 0)
77d7fa98 400 setup_perf(thread_field);
b9389e6e
JD
401 /*
402 * Note: thread_field->pc can be NULL if setup_perf() fails.
403 * Also, thread_field->fd can be -1 if open_perf_fd() fails.
404 */
20142124 405 lttng_perf_lock();
d58d1454
MD
406 cds_list_add_rcu(&thread_field->rcu_field_node,
407 &perf_thread->rcu_field_list);
408 cds_list_add(&thread_field->thread_field_node,
409 &perf_field->thread_field_list);
20142124 410 lttng_perf_unlock();
d58d1454
MD
411skip:
412 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
413 if (ret)
414 abort();
415 return thread_field;
416}
417
418static
419struct lttng_perf_counter_thread_field *
420 get_thread_field(struct lttng_perf_counter_field *field)
421{
422 struct lttng_perf_counter_thread *perf_thread;
423 struct lttng_perf_counter_thread_field *thread_field;
424
425 perf_thread = pthread_getspecific(perf_counter_key);
426 if (!perf_thread)
427 perf_thread = alloc_perf_counter_thread();
428 cds_list_for_each_entry_rcu(thread_field, &perf_thread->rcu_field_list,
429 rcu_field_node) {
430 if (thread_field->field == field)
431 return thread_field;
432 }
433 /* perf_counter_thread_field not found, need to add one */
434 return add_thread_field(field, perf_thread);
435}
436
437static
438uint64_t wrapper_perf_counter_read(struct lttng_ctx_field *field)
439{
440 struct lttng_perf_counter_field *perf_field;
441 struct lttng_perf_counter_thread_field *perf_thread_field;
442
443 perf_field = field->u.perf_counter;
444 perf_thread_field = get_thread_field(perf_field);
a3a8d943 445 return arch_read_perf_counter(perf_thread_field);
d58d1454
MD
446}
447
448static
449void perf_counter_record(struct lttng_ctx_field *field,
450 struct lttng_ust_lib_ring_buffer_ctx *ctx,
451 struct lttng_channel *chan)
452{
453 uint64_t value;
454
455 value = wrapper_perf_counter_read(field);
456 lib_ring_buffer_align_ctx(ctx, lttng_alignof(value));
457 chan->ops->event_write(ctx, &value, sizeof(value));
458}
459
460static
461void perf_counter_get_value(struct lttng_ctx_field *field,
53569322 462 struct lttng_ctx_value *value)
d58d1454 463{
6e9ac4ae 464 value->u.s64 = wrapper_perf_counter_read(field);
d58d1454
MD
465}
466
20142124 467/* Called with perf lock held */
d58d1454
MD
468static
469void lttng_destroy_perf_thread_field(
470 struct lttng_perf_counter_thread_field *thread_field)
471{
b9389e6e 472 close_perf_fd(thread_field->fd);
d58d1454
MD
473 unmap_perf_page(thread_field->pc);
474 cds_list_del_rcu(&thread_field->rcu_field_node);
475 cds_list_del(&thread_field->thread_field_node);
476 free(thread_field);
477}
478
479static
480void lttng_destroy_perf_thread_key(void *_key)
481{
482 struct lttng_perf_counter_thread *perf_thread = _key;
483 struct lttng_perf_counter_thread_field *pos, *p;
484
20142124 485 lttng_perf_lock();
d58d1454
MD
486 cds_list_for_each_entry_safe(pos, p, &perf_thread->rcu_field_list,
487 rcu_field_node)
488 lttng_destroy_perf_thread_field(pos);
20142124 489 lttng_perf_unlock();
d58d1454
MD
490 free(perf_thread);
491}
492
493/* Called with UST lock held */
494static
495void lttng_destroy_perf_counter_field(struct lttng_ctx_field *field)
496{
497 struct lttng_perf_counter_field *perf_field;
498 struct lttng_perf_counter_thread_field *pos, *p;
499
500 free((char *) field->event_field.name);
501 perf_field = field->u.perf_counter;
502 /*
503 * This put is performed when no threads can concurrently
504 * perform a "get" concurrently, thanks to urcu-bp grace
20142124
MD
505 * period. Holding the lttng perf lock protects against
506 * concurrent modification of the per-thread thread field
507 * list.
d58d1454 508 */
20142124 509 lttng_perf_lock();
d58d1454
MD
510 cds_list_for_each_entry_safe(pos, p, &perf_field->thread_field_list,
511 thread_field_node)
512 lttng_destroy_perf_thread_field(pos);
20142124 513 lttng_perf_unlock();
d58d1454
MD
514 free(perf_field);
515}
516
d286ad50
JD
517#ifdef __ARM_ARCH_7A__
518
519static
520int perf_get_exclude_kernel(void)
521{
522 return 0;
523}
524
525#else /* __ARM_ARCH_7A__ */
526
527static
528int perf_get_exclude_kernel(void)
529{
530 return 1;
531}
532
533#endif /* __ARM_ARCH_7A__ */
534
d58d1454
MD
535/* Called with UST lock held */
536int lttng_add_perf_counter_to_ctx(uint32_t type,
537 uint64_t config,
538 const char *name,
539 struct lttng_ctx **ctx)
540{
541 struct lttng_ctx_field *field;
542 struct lttng_perf_counter_field *perf_field;
d58d1454
MD
543 char *name_alloc;
544 int ret;
545
546 name_alloc = strdup(name);
547 if (!name_alloc) {
548 ret = -ENOMEM;
549 goto name_alloc_error;
550 }
551 perf_field = zmalloc(sizeof(*perf_field));
552 if (!perf_field) {
553 ret = -ENOMEM;
554 goto perf_field_alloc_error;
555 }
556 field = lttng_append_context(ctx);
557 if (!field) {
558 ret = -ENOMEM;
559 goto append_context_error;
560 }
561 if (lttng_find_context(*ctx, name_alloc)) {
562 ret = -EEXIST;
563 goto find_error;
564 }
565
566 field->destroy = lttng_destroy_perf_counter_field;
567
568 field->event_field.name = name_alloc;
569 field->event_field.type.atype = atype_integer;
570 field->event_field.type.u.basic.integer.size =
571 sizeof(uint64_t) * CHAR_BIT;
572 field->event_field.type.u.basic.integer.alignment =
573 lttng_alignof(uint64_t) * CHAR_BIT;
574 field->event_field.type.u.basic.integer.signedness =
575 lttng_is_signed_type(uint64_t);
576 field->event_field.type.u.basic.integer.reverse_byte_order = 0;
577 field->event_field.type.u.basic.integer.base = 10;
578 field->event_field.type.u.basic.integer.encoding = lttng_encode_none;
579 field->get_size = perf_counter_get_size;
580 field->record = perf_counter_record;
581 field->get_value = perf_counter_get_value;
582
583 perf_field->attr.type = type;
584 perf_field->attr.config = config;
d286ad50 585 perf_field->attr.exclude_kernel = perf_get_exclude_kernel();
d58d1454
MD
586 CDS_INIT_LIST_HEAD(&perf_field->thread_field_list);
587 field->u.perf_counter = perf_field;
588
589 /* Ensure that this perf counter can be used in this process. */
b9389e6e
JD
590 ret = open_perf_fd(&perf_field->attr);
591 if (ret < 0) {
d58d1454
MD
592 ret = -ENODEV;
593 goto setup_error;
594 }
b9389e6e 595 close_perf_fd(ret);
d58d1454
MD
596
597 /*
598 * Contexts can only be added before tracing is started, so we
599 * don't have to synchronize against concurrent threads using
600 * the field here.
601 */
602
b2cc986a 603 lttng_context_update(*ctx);
d58d1454
MD
604 return 0;
605
606setup_error:
607find_error:
608 lttng_remove_context_field(ctx, field);
609append_context_error:
610 free(perf_field);
611perf_field_alloc_error:
612 free(name_alloc);
613name_alloc_error:
614 return ret;
615}
616
617int lttng_perf_counter_init(void)
618{
619 int ret;
620
621 ret = pthread_key_create(&perf_counter_key,
622 lttng_destroy_perf_thread_key);
623 if (ret)
624 ret = -ret;
625 return ret;
626}
627
628void lttng_perf_counter_exit(void)
629{
630 int ret;
631
632 ret = pthread_key_delete(perf_counter_key);
633 if (ret) {
634 errno = ret;
635 PERROR("Error in pthread_key_delete");
636 }
637}
This page took 0.051109 seconds and 4 git commands to generate.