API refactoring: introduce probe context
[lttng-ust.git] / src / lib / lttng-ust / lttng-context-perf-counters.c
CommitLineData
d58d1454 1/*
c0c0989a 2 * SPDX-License-Identifier: LGPL-2.1-only
d58d1454
MD
3 *
4 * Copyright (C) 2009-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
c0c0989a 6 * LTTng UST performance monitoring counters (perf-counters) integration.
d58d1454
MD
7 */
8
3fbec7dc 9#define _LGPL_SOURCE
9af5d97a 10#include <limits.h>
d58d1454
MD
11#include <sys/types.h>
12#include <unistd.h>
13#include <string.h>
14#include <stdlib.h>
15#include <stdio.h>
649fb6b3 16#include <stdbool.h>
b4051ad8 17#include <stddef.h>
fb31eb73 18#include <stdint.h>
d58d1454
MD
19#include <sys/mman.h>
20#include <sys/syscall.h>
2eba8e39 21#include <lttng/ust-arch.h>
d58d1454
MD
22#include <lttng/ust-events.h>
23#include <lttng/ust-tracer.h>
0b4b8811 24#include <lttng/ust-ringbuffer-context.h>
d58d1454
MD
25#include <urcu/system.h>
26#include <urcu/arch.h>
27#include <urcu/rculist.h>
9d315d6d 28#include "common/macros.h"
d58d1454 29#include <urcu/ref.h>
9d315d6d 30#include "common/logging.h"
d58d1454 31#include <signal.h>
20142124 32#include <urcu/tls-compat.h>
77d7fa98 33#include "perf_event.h"
fc80554e
MJ
34
35#include "context-internal.h"
d58d1454 36#include "lttng-tracer-core.h"
36c52fff 37#include "lib/lttng-ust/events.h"
d58d1454
MD
38
39/*
40 * We use a global perf counter key and iterate on per-thread RCU lists
41 * of fields in the fast path, even though this is not strictly speaking
42 * what would provide the best fast-path complexity, to ensure teardown
43 * of sessions vs thread exit is handled racelessly.
44 *
45 * Updates and traversals of thread_list are protected by UST lock.
46 * Updates to rcu_field_list are protected by UST lock.
47 */
48
49struct lttng_perf_counter_thread_field {
50 struct lttng_perf_counter_field *field; /* Back reference */
51 struct perf_event_mmap_page *pc;
52 struct cds_list_head thread_field_node; /* Per-field list of thread fields (node) */
53 struct cds_list_head rcu_field_node; /* RCU per-thread list of fields (node) */
b9389e6e 54 int fd; /* Perf FD */
d58d1454
MD
55};
56
57struct lttng_perf_counter_thread {
58 struct cds_list_head rcu_field_list; /* RCU per-thread list of fields */
59};
60
61struct lttng_perf_counter_field {
62 struct perf_event_attr attr;
63 struct cds_list_head thread_field_list; /* Per-field list of thread fields */
4e48b5d2 64 char *name;
f72cd4d9 65 struct lttng_ust_event_field *event_field;
d58d1454
MD
66};
67
68static pthread_key_t perf_counter_key;
69
20142124
MD
70/*
71 * lttng_perf_lock - Protect lttng-ust perf counter data structures
72 *
73 * Nests within the ust_lock, and therefore within the libc dl lock.
74 * Therefore, we need to fixup the TLS before nesting into this lock.
75 * Nests inside RCU bp read-side lock. Protects against concurrent
76 * fork.
77 */
78static pthread_mutex_t ust_perf_mutex = PTHREAD_MUTEX_INITIALIZER;
79
80/*
81 * Cancel state when grabbing the ust_perf_mutex. Saved when locking,
82 * restored on unlock. Protected by ust_perf_mutex.
83 */
84static int ust_perf_saved_cancelstate;
85
86/*
87 * Track whether we are tracing from a signal handler nested on an
88 * application thread.
89 */
90static DEFINE_URCU_TLS(int, ust_perf_mutex_nest);
91
92/*
93 * Force a read (imply TLS fixup for dlopen) of TLS variables.
94 */
95void lttng_ust_fixup_perf_counter_tls(void)
96{
97 asm volatile ("" : : "m" (URCU_TLS(ust_perf_mutex_nest)));
98}
99
100void lttng_perf_lock(void)
101{
102 sigset_t sig_all_blocked, orig_mask;
103 int ret, oldstate;
104
105 ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldstate);
106 if (ret) {
107 ERR("pthread_setcancelstate: %s", strerror(ret));
108 }
109 sigfillset(&sig_all_blocked);
110 ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
111 if (ret) {
112 ERR("pthread_sigmask: %s", strerror(ret));
113 }
114 if (!URCU_TLS(ust_perf_mutex_nest)++) {
115 /*
116 * Ensure the compiler don't move the store after the close()
117 * call in case close() would be marked as leaf.
118 */
119 cmm_barrier();
120 pthread_mutex_lock(&ust_perf_mutex);
121 ust_perf_saved_cancelstate = oldstate;
122 }
123 ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
124 if (ret) {
125 ERR("pthread_sigmask: %s", strerror(ret));
126 }
127}
128
129void lttng_perf_unlock(void)
130{
131 sigset_t sig_all_blocked, orig_mask;
132 int ret, newstate, oldstate;
133 bool restore_cancel = false;
134
135 sigfillset(&sig_all_blocked);
136 ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
137 if (ret) {
138 ERR("pthread_sigmask: %s", strerror(ret));
139 }
140 /*
141 * Ensure the compiler don't move the store before the close()
142 * call, in case close() would be marked as leaf.
143 */
144 cmm_barrier();
145 if (!--URCU_TLS(ust_perf_mutex_nest)) {
146 newstate = ust_perf_saved_cancelstate;
147 restore_cancel = true;
148 pthread_mutex_unlock(&ust_perf_mutex);
149 }
150 ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
151 if (ret) {
152 ERR("pthread_sigmask: %s", strerror(ret));
153 }
154 if (restore_cancel) {
155 ret = pthread_setcancelstate(newstate, &oldstate);
156 if (ret) {
157 ERR("pthread_setcancelstate: %s", strerror(ret));
158 }
159 }
160}
161
d58d1454 162static
4e48b5d2 163size_t perf_counter_get_size(void *priv __attribute__((unused)),
b2e37d27 164 struct lttng_ust_probe_ctx *probe_ctx __attribute__((unused)),
2208d8b5 165 size_t offset)
d58d1454
MD
166{
167 size_t size = 0;
168
b5457df5 169 size += lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(uint64_t));
d58d1454
MD
170 size += sizeof(uint64_t);
171 return size;
172}
173
a3a8d943
MD
174static
175uint64_t read_perf_counter_syscall(
176 struct lttng_perf_counter_thread_field *thread_field)
177{
178 uint64_t count;
179
180 if (caa_unlikely(thread_field->fd < 0))
181 return 0;
182
183 if (caa_unlikely(read(thread_field->fd, &count, sizeof(count))
184 < sizeof(count)))
185 return 0;
186
187 return count;
188}
189
2eba8e39 190#if defined(LTTNG_UST_ARCH_X86)
d58d1454
MD
191
192static
193uint64_t rdpmc(unsigned int counter)
194{
195 unsigned int low, high;
196
197 asm volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (counter));
198
199 return low | ((uint64_t) high) << 32;
200}
201
77d7fa98
MD
202static
203bool has_rdpmc(struct perf_event_mmap_page *pc)
204{
205 if (caa_unlikely(!pc->cap_bit0_is_deprecated))
206 return false;
207 /* Since Linux kernel 3.12. */
208 return pc->cap_user_rdpmc;
209}
210
d58d1454 211static
a3a8d943 212uint64_t arch_read_perf_counter(
d286ad50 213 struct lttng_perf_counter_thread_field *thread_field)
d58d1454
MD
214{
215 uint32_t seq, idx;
216 uint64_t count;
d286ad50 217 struct perf_event_mmap_page *pc = thread_field->pc;
d58d1454
MD
218
219 if (caa_unlikely(!pc))
220 return 0;
221
222 do {
223 seq = CMM_LOAD_SHARED(pc->lock);
224 cmm_barrier();
225
226 idx = pc->index;
77d7fa98 227 if (caa_likely(has_rdpmc(pc) && idx)) {
4f58f54f
MD
228 int64_t pmcval;
229
230 pmcval = rdpmc(idx - 1);
231 /* Sign-extend the pmc register result. */
232 pmcval <<= 64 - pc->pmc_width;
233 pmcval >>= 64 - pc->pmc_width;
234 count = pc->offset + pmcval;
235 } else {
a3a8d943
MD
236 /* Fall-back on system call if rdpmc cannot be used. */
237 return read_perf_counter_syscall(thread_field);
4f58f54f 238 }
d58d1454
MD
239 cmm_barrier();
240 } while (CMM_LOAD_SHARED(pc->lock) != seq);
241
242 return count;
243}
244
d286ad50 245static
a3a8d943 246int arch_perf_keep_fd(struct lttng_perf_counter_thread_field *thread_field)
d286ad50 247{
a3a8d943 248 struct perf_event_mmap_page *pc = thread_field->pc;
d286ad50 249
a3a8d943 250 if (!pc)
d286ad50 251 return 0;
77d7fa98 252 return !has_rdpmc(pc);
a3a8d943 253}
d286ad50 254
a3a8d943 255#else
d286ad50 256
a3a8d943
MD
257/* Generic (slow) implementation using a read system call. */
258static
259uint64_t arch_read_perf_counter(
260 struct lttng_perf_counter_thread_field *thread_field)
261{
262 return read_perf_counter_syscall(thread_field);
d286ad50
JD
263}
264
a3a8d943 265static
c494c0f1 266int arch_perf_keep_fd(struct lttng_perf_counter_thread_field *thread_field __attribute__((unused)))
a3a8d943
MD
267{
268 return 1;
269}
d286ad50 270
a3a8d943 271#endif
d286ad50 272
d58d1454
MD
273static
274int sys_perf_event_open(struct perf_event_attr *attr,
275 pid_t pid, int cpu, int group_fd,
276 unsigned long flags)
277{
278 return syscall(SYS_perf_event_open, attr, pid, cpu,
279 group_fd, flags);
280}
281
282static
b9389e6e 283int open_perf_fd(struct perf_event_attr *attr)
d58d1454 284{
b9389e6e 285 int fd;
d58d1454
MD
286
287 fd = sys_perf_event_open(attr, 0, -1, -1, 0);
288 if (fd < 0)
b9389e6e
JD
289 return -1;
290
291 return fd;
292}
293
d286ad50
JD
294static
295void close_perf_fd(int fd)
296{
297 int ret;
298
299 if (fd < 0)
300 return;
301
302 ret = close(fd);
303 if (ret) {
304 perror("Error closing LTTng-UST perf memory mapping FD");
305 }
306}
307
77d7fa98 308static void setup_perf(struct lttng_perf_counter_thread_field *thread_field)
b9389e6e
JD
309{
310 void *perf_addr;
d58d1454
MD
311
312 perf_addr = mmap(NULL, sizeof(struct perf_event_mmap_page),
b9389e6e 313 PROT_READ, MAP_SHARED, thread_field->fd, 0);
d58d1454 314 if (perf_addr == MAP_FAILED)
b9389e6e 315 perf_addr = NULL;
77d7fa98 316 thread_field->pc = perf_addr;
b9389e6e 317
a3a8d943 318 if (!arch_perf_keep_fd(thread_field)) {
b9389e6e
JD
319 close_perf_fd(thread_field->fd);
320 thread_field->fd = -1;
6c2125af 321 }
d58d1454
MD
322}
323
324static
325void unmap_perf_page(struct perf_event_mmap_page *pc)
326{
327 int ret;
328
329 if (!pc)
330 return;
331 ret = munmap(pc, sizeof(struct perf_event_mmap_page));
332 if (ret < 0) {
333 PERROR("Error in munmap");
334 abort();
335 }
336}
337
338static
339struct lttng_perf_counter_thread *alloc_perf_counter_thread(void)
340{
341 struct lttng_perf_counter_thread *perf_thread;
342 sigset_t newmask, oldmask;
343 int ret;
344
345 ret = sigfillset(&newmask);
346 if (ret)
347 abort();
348 ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
349 if (ret)
350 abort();
351 /* Check again with signals disabled */
352 perf_thread = pthread_getspecific(perf_counter_key);
353 if (perf_thread)
354 goto skip;
355 perf_thread = zmalloc(sizeof(*perf_thread));
356 if (!perf_thread)
357 abort();
358 CDS_INIT_LIST_HEAD(&perf_thread->rcu_field_list);
359 ret = pthread_setspecific(perf_counter_key, perf_thread);
360 if (ret)
361 abort();
362skip:
363 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
364 if (ret)
365 abort();
366 return perf_thread;
367}
368
369static
370struct lttng_perf_counter_thread_field *
371 add_thread_field(struct lttng_perf_counter_field *perf_field,
372 struct lttng_perf_counter_thread *perf_thread)
373{
374 struct lttng_perf_counter_thread_field *thread_field;
375 sigset_t newmask, oldmask;
376 int ret;
377
378 ret = sigfillset(&newmask);
379 if (ret)
380 abort();
381 ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
382 if (ret)
383 abort();
384 /* Check again with signals disabled */
385 cds_list_for_each_entry_rcu(thread_field, &perf_thread->rcu_field_list,
386 rcu_field_node) {
387 if (thread_field->field == perf_field)
388 goto skip;
389 }
390 thread_field = zmalloc(sizeof(*thread_field));
391 if (!thread_field)
392 abort();
393 thread_field->field = perf_field;
b9389e6e
JD
394 thread_field->fd = open_perf_fd(&perf_field->attr);
395 if (thread_field->fd >= 0)
77d7fa98 396 setup_perf(thread_field);
b9389e6e
JD
397 /*
398 * Note: thread_field->pc can be NULL if setup_perf() fails.
399 * Also, thread_field->fd can be -1 if open_perf_fd() fails.
400 */
20142124 401 lttng_perf_lock();
d58d1454
MD
402 cds_list_add_rcu(&thread_field->rcu_field_node,
403 &perf_thread->rcu_field_list);
404 cds_list_add(&thread_field->thread_field_node,
405 &perf_field->thread_field_list);
20142124 406 lttng_perf_unlock();
d58d1454
MD
407skip:
408 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
409 if (ret)
410 abort();
411 return thread_field;
412}
413
414static
415struct lttng_perf_counter_thread_field *
416 get_thread_field(struct lttng_perf_counter_field *field)
417{
418 struct lttng_perf_counter_thread *perf_thread;
419 struct lttng_perf_counter_thread_field *thread_field;
420
421 perf_thread = pthread_getspecific(perf_counter_key);
422 if (!perf_thread)
423 perf_thread = alloc_perf_counter_thread();
424 cds_list_for_each_entry_rcu(thread_field, &perf_thread->rcu_field_list,
425 rcu_field_node) {
426 if (thread_field->field == field)
427 return thread_field;
428 }
429 /* perf_counter_thread_field not found, need to add one */
430 return add_thread_field(field, perf_thread);
431}
432
433static
4e48b5d2 434uint64_t wrapper_perf_counter_read(void *priv)
d58d1454
MD
435{
436 struct lttng_perf_counter_field *perf_field;
437 struct lttng_perf_counter_thread_field *perf_thread_field;
438
4e48b5d2 439 perf_field = (struct lttng_perf_counter_field *) priv;
d58d1454 440 perf_thread_field = get_thread_field(perf_field);
a3a8d943 441 return arch_read_perf_counter(perf_thread_field);
d58d1454
MD
442}
443
444static
4e48b5d2 445void perf_counter_record(void *priv,
b2e37d27
MD
446 struct lttng_ust_probe_ctx *probe_ctx __attribute__((unused)),
447 struct lttng_ust_ring_buffer_ctx *ctx,
448 struct lttng_ust_channel_buffer *chan)
d58d1454
MD
449{
450 uint64_t value;
451
4e48b5d2 452 value = wrapper_perf_counter_read(priv);
8936b6c0 453 chan->ops->event_write(ctx, &value, sizeof(value), lttng_ust_rb_alignof(value));
d58d1454
MD
454}
455
456static
4e48b5d2 457void perf_counter_get_value(void *priv,
b2e37d27 458 struct lttng_ust_probe_ctx *probe_ctx __attribute__((unused)),
daacdbfc 459 struct lttng_ust_ctx_value *value)
d58d1454 460{
b2e37d27 461 value->u.u64 = wrapper_perf_counter_read(priv);
d58d1454
MD
462}
463
20142124 464/* Called with perf lock held */
d58d1454
MD
465static
466void lttng_destroy_perf_thread_field(
467 struct lttng_perf_counter_thread_field *thread_field)
468{
b9389e6e 469 close_perf_fd(thread_field->fd);
d58d1454
MD
470 unmap_perf_page(thread_field->pc);
471 cds_list_del_rcu(&thread_field->rcu_field_node);
472 cds_list_del(&thread_field->thread_field_node);
473 free(thread_field);
474}
475
476static
477void lttng_destroy_perf_thread_key(void *_key)
478{
479 struct lttng_perf_counter_thread *perf_thread = _key;
480 struct lttng_perf_counter_thread_field *pos, *p;
481
20142124 482 lttng_perf_lock();
d58d1454
MD
483 cds_list_for_each_entry_safe(pos, p, &perf_thread->rcu_field_list,
484 rcu_field_node)
485 lttng_destroy_perf_thread_field(pos);
20142124 486 lttng_perf_unlock();
d58d1454
MD
487 free(perf_thread);
488}
489
490/* Called with UST lock held */
491static
4e48b5d2 492void lttng_destroy_perf_counter_ctx_field(void *priv)
d58d1454
MD
493{
494 struct lttng_perf_counter_field *perf_field;
495 struct lttng_perf_counter_thread_field *pos, *p;
496
4e48b5d2
MD
497 perf_field = (struct lttng_perf_counter_field *) priv;
498 free(perf_field->name);
d58d1454
MD
499 /*
500 * This put is performed when no threads can concurrently
501 * perform a "get" concurrently, thanks to urcu-bp grace
20142124
MD
502 * period. Holding the lttng perf lock protects against
503 * concurrent modification of the per-thread thread field
504 * list.
d58d1454 505 */
20142124 506 lttng_perf_lock();
d58d1454
MD
507 cds_list_for_each_entry_safe(pos, p, &perf_field->thread_field_list,
508 thread_field_node)
509 lttng_destroy_perf_thread_field(pos);
20142124 510 lttng_perf_unlock();
f72cd4d9 511 free(perf_field->event_field);
d58d1454
MD
512 free(perf_field);
513}
514
2eba8e39 515#ifdef LTTNG_UST_ARCH_ARMV7
d286ad50
JD
516
517static
518int perf_get_exclude_kernel(void)
519{
520 return 0;
521}
522
2eba8e39 523#else /* LTTNG_UST_ARCH_ARMV7 */
d286ad50
JD
524
525static
526int perf_get_exclude_kernel(void)
527{
528 return 1;
529}
530
2eba8e39 531#endif /* LTTNG_UST_ARCH_ARMV7 */
d286ad50 532
4e48b5d2
MD
533static const struct lttng_ust_type_common *ust_type =
534 lttng_ust_static_type_integer(sizeof(uint64_t) * CHAR_BIT,
535 lttng_ust_rb_alignof(uint64_t) * CHAR_BIT,
536 lttng_ust_is_signed_type(uint64_t),
baa8acf3 537 LTTNG_UST_BYTE_ORDER, 10);
4e48b5d2 538
d58d1454
MD
539/* Called with UST lock held */
540int lttng_add_perf_counter_to_ctx(uint32_t type,
541 uint64_t config,
542 const char *name,
daacdbfc 543 struct lttng_ust_ctx **ctx)
d58d1454 544{
4e48b5d2
MD
545 struct lttng_ust_ctx_field ctx_field;
546 struct lttng_ust_event_field *event_field;
d58d1454 547 struct lttng_perf_counter_field *perf_field;
d58d1454
MD
548 char *name_alloc;
549 int ret;
550
4e48b5d2
MD
551 if (lttng_find_context(*ctx, name)) {
552 ret = -EEXIST;
553 goto find_error;
554 }
d58d1454
MD
555 name_alloc = strdup(name);
556 if (!name_alloc) {
557 ret = -ENOMEM;
558 goto name_alloc_error;
559 }
4e48b5d2
MD
560 event_field = zmalloc(sizeof(*event_field));
561 if (!event_field) {
562 ret = -ENOMEM;
563 goto event_field_alloc_error;
564 }
565 event_field->name = name_alloc;
566 event_field->type = ust_type;
567
d58d1454
MD
568 perf_field = zmalloc(sizeof(*perf_field));
569 if (!perf_field) {
570 ret = -ENOMEM;
571 goto perf_field_alloc_error;
572 }
d58d1454
MD
573 perf_field->attr.type = type;
574 perf_field->attr.config = config;
d286ad50 575 perf_field->attr.exclude_kernel = perf_get_exclude_kernel();
d58d1454 576 CDS_INIT_LIST_HEAD(&perf_field->thread_field_list);
4e48b5d2 577 perf_field->name = name_alloc;
f72cd4d9 578 perf_field->event_field = event_field;
d58d1454
MD
579
580 /* Ensure that this perf counter can be used in this process. */
b9389e6e
JD
581 ret = open_perf_fd(&perf_field->attr);
582 if (ret < 0) {
d58d1454
MD
583 ret = -ENODEV;
584 goto setup_error;
585 }
b9389e6e 586 close_perf_fd(ret);
d58d1454 587
4e48b5d2
MD
588 ctx_field.event_field = event_field;
589 ctx_field.get_size = perf_counter_get_size;
590 ctx_field.record = perf_counter_record;
591 ctx_field.get_value = perf_counter_get_value;
592 ctx_field.destroy = lttng_destroy_perf_counter_ctx_field;
593 ctx_field.priv = perf_field;
d58d1454 594
4e48b5d2
MD
595 ret = lttng_ust_context_append(ctx, &ctx_field);
596 if (ret) {
597 ret = -ENOMEM;
598 goto append_context_error;
599 }
d58d1454
MD
600 return 0;
601
d58d1454 602append_context_error:
4e48b5d2 603setup_error:
d58d1454
MD
604 free(perf_field);
605perf_field_alloc_error:
4e48b5d2
MD
606 free(event_field);
607event_field_alloc_error:
d58d1454
MD
608 free(name_alloc);
609name_alloc_error:
4e48b5d2 610find_error:
d58d1454
MD
611 return ret;
612}
613
614int lttng_perf_counter_init(void)
615{
616 int ret;
617
618 ret = pthread_key_create(&perf_counter_key,
619 lttng_destroy_perf_thread_key);
620 if (ret)
621 ret = -ret;
622 return ret;
623}
624
625void lttng_perf_counter_exit(void)
626{
627 int ret;
628
629 ret = pthread_key_delete(perf_counter_key);
630 if (ret) {
631 errno = ret;
632 PERROR("Error in pthread_key_delete");
633 }
634}
This page took 0.059215 seconds and 4 git commands to generate.