Move to kernel style SPDX license identifiers
[lttng-ust.git] / liblttng-ust / lttng-context-perf-counters.c
CommitLineData
d58d1454 1/*
c0c0989a 2 * SPDX-License-Identifier: LGPL-2.1-only
d58d1454
MD
3 *
4 * Copyright (C) 2009-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
c0c0989a 6 * LTTng UST performance monitoring counters (perf-counters) integration.
d58d1454
MD
7 */
8
3fbec7dc 9#define _LGPL_SOURCE
d58d1454
MD
10#include <sys/types.h>
11#include <unistd.h>
12#include <string.h>
13#include <stdlib.h>
14#include <stdio.h>
649fb6b3 15#include <stdbool.h>
b4051ad8 16#include <stddef.h>
fb31eb73 17#include <stdint.h>
d58d1454
MD
18#include <sys/mman.h>
19#include <sys/syscall.h>
d58d1454
MD
20#include <lttng/ust-events.h>
21#include <lttng/ust-tracer.h>
22#include <lttng/ringbuffer-config.h>
23#include <urcu/system.h>
24#include <urcu/arch.h>
25#include <urcu/rculist.h>
26#include <helper.h>
27#include <urcu/ref.h>
28#include <usterr-signal-safe.h>
29#include <signal.h>
20142124 30#include <urcu/tls-compat.h>
77d7fa98 31#include "perf_event.h"
d58d1454
MD
32#include "lttng-tracer-core.h"
33
34/*
35 * We use a global perf counter key and iterate on per-thread RCU lists
36 * of fields in the fast path, even though this is not strictly speaking
37 * what would provide the best fast-path complexity, to ensure teardown
38 * of sessions vs thread exit is handled racelessly.
39 *
40 * Updates and traversals of thread_list are protected by UST lock.
41 * Updates to rcu_field_list are protected by UST lock.
42 */
43
44struct lttng_perf_counter_thread_field {
45 struct lttng_perf_counter_field *field; /* Back reference */
46 struct perf_event_mmap_page *pc;
47 struct cds_list_head thread_field_node; /* Per-field list of thread fields (node) */
48 struct cds_list_head rcu_field_node; /* RCU per-thread list of fields (node) */
b9389e6e 49 int fd; /* Perf FD */
d58d1454
MD
50};
51
52struct lttng_perf_counter_thread {
53 struct cds_list_head rcu_field_list; /* RCU per-thread list of fields */
54};
55
56struct lttng_perf_counter_field {
57 struct perf_event_attr attr;
58 struct cds_list_head thread_field_list; /* Per-field list of thread fields */
59};
60
61static pthread_key_t perf_counter_key;
62
20142124
MD
63/*
64 * lttng_perf_lock - Protect lttng-ust perf counter data structures
65 *
66 * Nests within the ust_lock, and therefore within the libc dl lock.
67 * Therefore, we need to fixup the TLS before nesting into this lock.
68 * Nests inside RCU bp read-side lock. Protects against concurrent
69 * fork.
70 */
71static pthread_mutex_t ust_perf_mutex = PTHREAD_MUTEX_INITIALIZER;
72
73/*
74 * Cancel state when grabbing the ust_perf_mutex. Saved when locking,
75 * restored on unlock. Protected by ust_perf_mutex.
76 */
77static int ust_perf_saved_cancelstate;
78
79/*
80 * Track whether we are tracing from a signal handler nested on an
81 * application thread.
82 */
83static DEFINE_URCU_TLS(int, ust_perf_mutex_nest);
84
85/*
86 * Force a read (imply TLS fixup for dlopen) of TLS variables.
87 */
88void lttng_ust_fixup_perf_counter_tls(void)
89{
90 asm volatile ("" : : "m" (URCU_TLS(ust_perf_mutex_nest)));
91}
92
93void lttng_perf_lock(void)
94{
95 sigset_t sig_all_blocked, orig_mask;
96 int ret, oldstate;
97
98 ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldstate);
99 if (ret) {
100 ERR("pthread_setcancelstate: %s", strerror(ret));
101 }
102 sigfillset(&sig_all_blocked);
103 ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
104 if (ret) {
105 ERR("pthread_sigmask: %s", strerror(ret));
106 }
107 if (!URCU_TLS(ust_perf_mutex_nest)++) {
108 /*
109 * Ensure the compiler don't move the store after the close()
110 * call in case close() would be marked as leaf.
111 */
112 cmm_barrier();
113 pthread_mutex_lock(&ust_perf_mutex);
114 ust_perf_saved_cancelstate = oldstate;
115 }
116 ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
117 if (ret) {
118 ERR("pthread_sigmask: %s", strerror(ret));
119 }
120}
121
122void lttng_perf_unlock(void)
123{
124 sigset_t sig_all_blocked, orig_mask;
125 int ret, newstate, oldstate;
126 bool restore_cancel = false;
127
128 sigfillset(&sig_all_blocked);
129 ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
130 if (ret) {
131 ERR("pthread_sigmask: %s", strerror(ret));
132 }
133 /*
134 * Ensure the compiler don't move the store before the close()
135 * call, in case close() would be marked as leaf.
136 */
137 cmm_barrier();
138 if (!--URCU_TLS(ust_perf_mutex_nest)) {
139 newstate = ust_perf_saved_cancelstate;
140 restore_cancel = true;
141 pthread_mutex_unlock(&ust_perf_mutex);
142 }
143 ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
144 if (ret) {
145 ERR("pthread_sigmask: %s", strerror(ret));
146 }
147 if (restore_cancel) {
148 ret = pthread_setcancelstate(newstate, &oldstate);
149 if (ret) {
150 ERR("pthread_setcancelstate: %s", strerror(ret));
151 }
152 }
153}
154
d58d1454 155static
53569322 156size_t perf_counter_get_size(struct lttng_ctx_field *field, size_t offset)
d58d1454
MD
157{
158 size_t size = 0;
159
160 size += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
161 size += sizeof(uint64_t);
162 return size;
163}
164
a3a8d943
MD
165static
166uint64_t read_perf_counter_syscall(
167 struct lttng_perf_counter_thread_field *thread_field)
168{
169 uint64_t count;
170
171 if (caa_unlikely(thread_field->fd < 0))
172 return 0;
173
174 if (caa_unlikely(read(thread_field->fd, &count, sizeof(count))
175 < sizeof(count)))
176 return 0;
177
178 return count;
179}
180
d58d1454
MD
181#if defined(__x86_64__) || defined(__i386__)
182
183static
184uint64_t rdpmc(unsigned int counter)
185{
186 unsigned int low, high;
187
188 asm volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (counter));
189
190 return low | ((uint64_t) high) << 32;
191}
192
77d7fa98
MD
193static
194bool has_rdpmc(struct perf_event_mmap_page *pc)
195{
196 if (caa_unlikely(!pc->cap_bit0_is_deprecated))
197 return false;
198 /* Since Linux kernel 3.12. */
199 return pc->cap_user_rdpmc;
200}
201
d58d1454 202static
a3a8d943 203uint64_t arch_read_perf_counter(
d286ad50 204 struct lttng_perf_counter_thread_field *thread_field)
d58d1454
MD
205{
206 uint32_t seq, idx;
207 uint64_t count;
d286ad50 208 struct perf_event_mmap_page *pc = thread_field->pc;
d58d1454
MD
209
210 if (caa_unlikely(!pc))
211 return 0;
212
213 do {
214 seq = CMM_LOAD_SHARED(pc->lock);
215 cmm_barrier();
216
217 idx = pc->index;
77d7fa98 218 if (caa_likely(has_rdpmc(pc) && idx)) {
4f58f54f
MD
219 int64_t pmcval;
220
221 pmcval = rdpmc(idx - 1);
222 /* Sign-extend the pmc register result. */
223 pmcval <<= 64 - pc->pmc_width;
224 pmcval >>= 64 - pc->pmc_width;
225 count = pc->offset + pmcval;
226 } else {
a3a8d943
MD
227 /* Fall-back on system call if rdpmc cannot be used. */
228 return read_perf_counter_syscall(thread_field);
4f58f54f 229 }
d58d1454
MD
230 cmm_barrier();
231 } while (CMM_LOAD_SHARED(pc->lock) != seq);
232
233 return count;
234}
235
d286ad50 236static
a3a8d943 237int arch_perf_keep_fd(struct lttng_perf_counter_thread_field *thread_field)
d286ad50 238{
a3a8d943 239 struct perf_event_mmap_page *pc = thread_field->pc;
d286ad50 240
a3a8d943 241 if (!pc)
d286ad50 242 return 0;
77d7fa98 243 return !has_rdpmc(pc);
a3a8d943 244}
d286ad50 245
a3a8d943 246#else
d286ad50 247
a3a8d943
MD
248/* Generic (slow) implementation using a read system call. */
249static
250uint64_t arch_read_perf_counter(
251 struct lttng_perf_counter_thread_field *thread_field)
252{
253 return read_perf_counter_syscall(thread_field);
d286ad50
JD
254}
255
a3a8d943
MD
256static
257int arch_perf_keep_fd(struct lttng_perf_counter_thread_field *thread_field)
258{
259 return 1;
260}
d286ad50 261
a3a8d943 262#endif
d286ad50 263
d58d1454
MD
264static
265int sys_perf_event_open(struct perf_event_attr *attr,
266 pid_t pid, int cpu, int group_fd,
267 unsigned long flags)
268{
269 return syscall(SYS_perf_event_open, attr, pid, cpu,
270 group_fd, flags);
271}
272
273static
b9389e6e 274int open_perf_fd(struct perf_event_attr *attr)
d58d1454 275{
b9389e6e 276 int fd;
d58d1454
MD
277
278 fd = sys_perf_event_open(attr, 0, -1, -1, 0);
279 if (fd < 0)
b9389e6e
JD
280 return -1;
281
282 return fd;
283}
284
d286ad50
JD
285static
286void close_perf_fd(int fd)
287{
288 int ret;
289
290 if (fd < 0)
291 return;
292
293 ret = close(fd);
294 if (ret) {
295 perror("Error closing LTTng-UST perf memory mapping FD");
296 }
297}
298
77d7fa98 299static void setup_perf(struct lttng_perf_counter_thread_field *thread_field)
b9389e6e
JD
300{
301 void *perf_addr;
d58d1454
MD
302
303 perf_addr = mmap(NULL, sizeof(struct perf_event_mmap_page),
b9389e6e 304 PROT_READ, MAP_SHARED, thread_field->fd, 0);
d58d1454 305 if (perf_addr == MAP_FAILED)
b9389e6e 306 perf_addr = NULL;
77d7fa98 307 thread_field->pc = perf_addr;
b9389e6e 308
a3a8d943 309 if (!arch_perf_keep_fd(thread_field)) {
b9389e6e
JD
310 close_perf_fd(thread_field->fd);
311 thread_field->fd = -1;
6c2125af 312 }
d58d1454
MD
313}
314
315static
316void unmap_perf_page(struct perf_event_mmap_page *pc)
317{
318 int ret;
319
320 if (!pc)
321 return;
322 ret = munmap(pc, sizeof(struct perf_event_mmap_page));
323 if (ret < 0) {
324 PERROR("Error in munmap");
325 abort();
326 }
327}
328
329static
330struct lttng_perf_counter_thread *alloc_perf_counter_thread(void)
331{
332 struct lttng_perf_counter_thread *perf_thread;
333 sigset_t newmask, oldmask;
334 int ret;
335
336 ret = sigfillset(&newmask);
337 if (ret)
338 abort();
339 ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
340 if (ret)
341 abort();
342 /* Check again with signals disabled */
343 perf_thread = pthread_getspecific(perf_counter_key);
344 if (perf_thread)
345 goto skip;
346 perf_thread = zmalloc(sizeof(*perf_thread));
347 if (!perf_thread)
348 abort();
349 CDS_INIT_LIST_HEAD(&perf_thread->rcu_field_list);
350 ret = pthread_setspecific(perf_counter_key, perf_thread);
351 if (ret)
352 abort();
353skip:
354 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
355 if (ret)
356 abort();
357 return perf_thread;
358}
359
360static
361struct lttng_perf_counter_thread_field *
362 add_thread_field(struct lttng_perf_counter_field *perf_field,
363 struct lttng_perf_counter_thread *perf_thread)
364{
365 struct lttng_perf_counter_thread_field *thread_field;
366 sigset_t newmask, oldmask;
367 int ret;
368
369 ret = sigfillset(&newmask);
370 if (ret)
371 abort();
372 ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
373 if (ret)
374 abort();
375 /* Check again with signals disabled */
376 cds_list_for_each_entry_rcu(thread_field, &perf_thread->rcu_field_list,
377 rcu_field_node) {
378 if (thread_field->field == perf_field)
379 goto skip;
380 }
381 thread_field = zmalloc(sizeof(*thread_field));
382 if (!thread_field)
383 abort();
384 thread_field->field = perf_field;
b9389e6e
JD
385 thread_field->fd = open_perf_fd(&perf_field->attr);
386 if (thread_field->fd >= 0)
77d7fa98 387 setup_perf(thread_field);
b9389e6e
JD
388 /*
389 * Note: thread_field->pc can be NULL if setup_perf() fails.
390 * Also, thread_field->fd can be -1 if open_perf_fd() fails.
391 */
20142124 392 lttng_perf_lock();
d58d1454
MD
393 cds_list_add_rcu(&thread_field->rcu_field_node,
394 &perf_thread->rcu_field_list);
395 cds_list_add(&thread_field->thread_field_node,
396 &perf_field->thread_field_list);
20142124 397 lttng_perf_unlock();
d58d1454
MD
398skip:
399 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
400 if (ret)
401 abort();
402 return thread_field;
403}
404
405static
406struct lttng_perf_counter_thread_field *
407 get_thread_field(struct lttng_perf_counter_field *field)
408{
409 struct lttng_perf_counter_thread *perf_thread;
410 struct lttng_perf_counter_thread_field *thread_field;
411
412 perf_thread = pthread_getspecific(perf_counter_key);
413 if (!perf_thread)
414 perf_thread = alloc_perf_counter_thread();
415 cds_list_for_each_entry_rcu(thread_field, &perf_thread->rcu_field_list,
416 rcu_field_node) {
417 if (thread_field->field == field)
418 return thread_field;
419 }
420 /* perf_counter_thread_field not found, need to add one */
421 return add_thread_field(field, perf_thread);
422}
423
424static
425uint64_t wrapper_perf_counter_read(struct lttng_ctx_field *field)
426{
427 struct lttng_perf_counter_field *perf_field;
428 struct lttng_perf_counter_thread_field *perf_thread_field;
429
430 perf_field = field->u.perf_counter;
431 perf_thread_field = get_thread_field(perf_field);
a3a8d943 432 return arch_read_perf_counter(perf_thread_field);
d58d1454
MD
433}
434
435static
436void perf_counter_record(struct lttng_ctx_field *field,
437 struct lttng_ust_lib_ring_buffer_ctx *ctx,
438 struct lttng_channel *chan)
439{
440 uint64_t value;
441
442 value = wrapper_perf_counter_read(field);
443 lib_ring_buffer_align_ctx(ctx, lttng_alignof(value));
444 chan->ops->event_write(ctx, &value, sizeof(value));
445}
446
447static
448void perf_counter_get_value(struct lttng_ctx_field *field,
53569322 449 struct lttng_ctx_value *value)
d58d1454 450{
6e9ac4ae 451 value->u.s64 = wrapper_perf_counter_read(field);
d58d1454
MD
452}
453
20142124 454/* Called with perf lock held */
d58d1454
MD
455static
456void lttng_destroy_perf_thread_field(
457 struct lttng_perf_counter_thread_field *thread_field)
458{
b9389e6e 459 close_perf_fd(thread_field->fd);
d58d1454
MD
460 unmap_perf_page(thread_field->pc);
461 cds_list_del_rcu(&thread_field->rcu_field_node);
462 cds_list_del(&thread_field->thread_field_node);
463 free(thread_field);
464}
465
466static
467void lttng_destroy_perf_thread_key(void *_key)
468{
469 struct lttng_perf_counter_thread *perf_thread = _key;
470 struct lttng_perf_counter_thread_field *pos, *p;
471
20142124 472 lttng_perf_lock();
d58d1454
MD
473 cds_list_for_each_entry_safe(pos, p, &perf_thread->rcu_field_list,
474 rcu_field_node)
475 lttng_destroy_perf_thread_field(pos);
20142124 476 lttng_perf_unlock();
d58d1454
MD
477 free(perf_thread);
478}
479
480/* Called with UST lock held */
481static
482void lttng_destroy_perf_counter_field(struct lttng_ctx_field *field)
483{
484 struct lttng_perf_counter_field *perf_field;
485 struct lttng_perf_counter_thread_field *pos, *p;
486
487 free((char *) field->event_field.name);
488 perf_field = field->u.perf_counter;
489 /*
490 * This put is performed when no threads can concurrently
491 * perform a "get" concurrently, thanks to urcu-bp grace
20142124
MD
492 * period. Holding the lttng perf lock protects against
493 * concurrent modification of the per-thread thread field
494 * list.
d58d1454 495 */
20142124 496 lttng_perf_lock();
d58d1454
MD
497 cds_list_for_each_entry_safe(pos, p, &perf_field->thread_field_list,
498 thread_field_node)
499 lttng_destroy_perf_thread_field(pos);
20142124 500 lttng_perf_unlock();
d58d1454
MD
501 free(perf_field);
502}
503
d286ad50
JD
504#ifdef __ARM_ARCH_7A__
505
506static
507int perf_get_exclude_kernel(void)
508{
509 return 0;
510}
511
512#else /* __ARM_ARCH_7A__ */
513
514static
515int perf_get_exclude_kernel(void)
516{
517 return 1;
518}
519
520#endif /* __ARM_ARCH_7A__ */
521
d58d1454
MD
522/* Called with UST lock held */
523int lttng_add_perf_counter_to_ctx(uint32_t type,
524 uint64_t config,
525 const char *name,
526 struct lttng_ctx **ctx)
527{
528 struct lttng_ctx_field *field;
529 struct lttng_perf_counter_field *perf_field;
d58d1454
MD
530 char *name_alloc;
531 int ret;
532
533 name_alloc = strdup(name);
534 if (!name_alloc) {
535 ret = -ENOMEM;
536 goto name_alloc_error;
537 }
538 perf_field = zmalloc(sizeof(*perf_field));
539 if (!perf_field) {
540 ret = -ENOMEM;
541 goto perf_field_alloc_error;
542 }
543 field = lttng_append_context(ctx);
544 if (!field) {
545 ret = -ENOMEM;
546 goto append_context_error;
547 }
548 if (lttng_find_context(*ctx, name_alloc)) {
549 ret = -EEXIST;
550 goto find_error;
551 }
552
553 field->destroy = lttng_destroy_perf_counter_field;
554
555 field->event_field.name = name_alloc;
556 field->event_field.type.atype = atype_integer;
218deb69 557 field->event_field.type.u.integer.size =
d58d1454 558 sizeof(uint64_t) * CHAR_BIT;
218deb69 559 field->event_field.type.u.integer.alignment =
d58d1454 560 lttng_alignof(uint64_t) * CHAR_BIT;
218deb69 561 field->event_field.type.u.integer.signedness =
d58d1454 562 lttng_is_signed_type(uint64_t);
218deb69
MD
563 field->event_field.type.u.integer.reverse_byte_order = 0;
564 field->event_field.type.u.integer.base = 10;
565 field->event_field.type.u.integer.encoding = lttng_encode_none;
d58d1454
MD
566 field->get_size = perf_counter_get_size;
567 field->record = perf_counter_record;
568 field->get_value = perf_counter_get_value;
569
570 perf_field->attr.type = type;
571 perf_field->attr.config = config;
d286ad50 572 perf_field->attr.exclude_kernel = perf_get_exclude_kernel();
d58d1454
MD
573 CDS_INIT_LIST_HEAD(&perf_field->thread_field_list);
574 field->u.perf_counter = perf_field;
575
576 /* Ensure that this perf counter can be used in this process. */
b9389e6e
JD
577 ret = open_perf_fd(&perf_field->attr);
578 if (ret < 0) {
d58d1454
MD
579 ret = -ENODEV;
580 goto setup_error;
581 }
b9389e6e 582 close_perf_fd(ret);
d58d1454
MD
583
584 /*
585 * Contexts can only be added before tracing is started, so we
586 * don't have to synchronize against concurrent threads using
587 * the field here.
588 */
589
b2cc986a 590 lttng_context_update(*ctx);
d58d1454
MD
591 return 0;
592
593setup_error:
594find_error:
595 lttng_remove_context_field(ctx, field);
596append_context_error:
597 free(perf_field);
598perf_field_alloc_error:
599 free(name_alloc);
600name_alloc_error:
601 return ret;
602}
603
604int lttng_perf_counter_init(void)
605{
606 int ret;
607
608 ret = pthread_key_create(&perf_counter_key,
609 lttng_destroy_perf_thread_key);
610 if (ret)
611 ret = -ret;
612 return ret;
613}
614
615void lttng_perf_counter_exit(void)
616{
617 int ret;
618
619 ret = pthread_key_delete(perf_counter_key);
620 if (ret) {
621 errno = ret;
622 PERROR("Error in pthread_key_delete");
623 }
624}
This page took 0.051971 seconds and 4 git commands to generate.