Fix: nestable pthread cancelstate
[lttng-ust.git] / src / lib / lttng-ust / lttng-context-perf-counters.c
CommitLineData
d58d1454 1/*
c0c0989a 2 * SPDX-License-Identifier: LGPL-2.1-only
d58d1454
MD
3 *
4 * Copyright (C) 2009-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
c0c0989a 6 * LTTng UST performance monitoring counters (perf-counters) integration.
d58d1454
MD
7 */
8
3fbec7dc 9#define _LGPL_SOURCE
9af5d97a 10#include <limits.h>
d58d1454
MD
11#include <sys/types.h>
12#include <unistd.h>
13#include <string.h>
14#include <stdlib.h>
15#include <stdio.h>
649fb6b3 16#include <stdbool.h>
b4051ad8 17#include <stddef.h>
fb31eb73 18#include <stdint.h>
d58d1454
MD
19#include <sys/mman.h>
20#include <sys/syscall.h>
2eba8e39 21#include <lttng/ust-arch.h>
d58d1454
MD
22#include <lttng/ust-events.h>
23#include <lttng/ust-tracer.h>
0b4b8811 24#include <lttng/ust-ringbuffer-context.h>
d0cd72be 25#include <lttng/ust-cancelstate.h>
d58d1454
MD
26#include <urcu/system.h>
27#include <urcu/arch.h>
28#include <urcu/rculist.h>
9d315d6d 29#include "common/macros.h"
d58d1454 30#include <urcu/ref.h>
9d315d6d 31#include "common/logging.h"
d58d1454 32#include <signal.h>
20142124 33#include <urcu/tls-compat.h>
77d7fa98 34#include "perf_event.h"
fc80554e
MJ
35
36#include "context-internal.h"
d58d1454 37#include "lttng-tracer-core.h"
36c52fff 38#include "lib/lttng-ust/events.h"
d58d1454
MD
39
40/*
41 * We use a global perf counter key and iterate on per-thread RCU lists
42 * of fields in the fast path, even though this is not strictly speaking
43 * what would provide the best fast-path complexity, to ensure teardown
44 * of sessions vs thread exit is handled racelessly.
45 *
46 * Updates and traversals of thread_list are protected by UST lock.
47 * Updates to rcu_field_list are protected by UST lock.
48 */
49
50struct lttng_perf_counter_thread_field {
51 struct lttng_perf_counter_field *field; /* Back reference */
52 struct perf_event_mmap_page *pc;
53 struct cds_list_head thread_field_node; /* Per-field list of thread fields (node) */
54 struct cds_list_head rcu_field_node; /* RCU per-thread list of fields (node) */
b9389e6e 55 int fd; /* Perf FD */
d58d1454
MD
56};
57
58struct lttng_perf_counter_thread {
59 struct cds_list_head rcu_field_list; /* RCU per-thread list of fields */
60};
61
62struct lttng_perf_counter_field {
63 struct perf_event_attr attr;
64 struct cds_list_head thread_field_list; /* Per-field list of thread fields */
4e48b5d2 65 char *name;
f72cd4d9 66 struct lttng_ust_event_field *event_field;
d58d1454
MD
67};
68
69static pthread_key_t perf_counter_key;
70
20142124
MD
71/*
72 * lttng_perf_lock - Protect lttng-ust perf counter data structures
73 *
74 * Nests within the ust_lock, and therefore within the libc dl lock.
a9fd951a 75 * Therefore, we need to allocate the TLS before nesting into this lock.
20142124
MD
76 * Nests inside RCU bp read-side lock. Protects against concurrent
77 * fork.
78 */
79static pthread_mutex_t ust_perf_mutex = PTHREAD_MUTEX_INITIALIZER;
80
20142124
MD
81/*
82 * Track whether we are tracing from a signal handler nested on an
83 * application thread.
84 */
85static DEFINE_URCU_TLS(int, ust_perf_mutex_nest);
86
87/*
a9fd951a 88 * Force a read (imply TLS allocation for dlopen) of TLS variables.
20142124 89 */
a9fd951a 90void lttng_ust_perf_counter_alloc_tls(void)
20142124
MD
91{
92 asm volatile ("" : : "m" (URCU_TLS(ust_perf_mutex_nest)));
93}
94
95void lttng_perf_lock(void)
96{
97 sigset_t sig_all_blocked, orig_mask;
d0cd72be 98 int ret;
20142124 99
d0cd72be
MD
100 if (lttng_ust_cancelstate_disable_push()) {
101 ERR("lttng_ust_cancelstate_disable_push");
20142124
MD
102 }
103 sigfillset(&sig_all_blocked);
104 ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
105 if (ret) {
106 ERR("pthread_sigmask: %s", strerror(ret));
107 }
108 if (!URCU_TLS(ust_perf_mutex_nest)++) {
109 /*
110 * Ensure the compiler don't move the store after the close()
111 * call in case close() would be marked as leaf.
112 */
113 cmm_barrier();
114 pthread_mutex_lock(&ust_perf_mutex);
20142124
MD
115 }
116 ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
117 if (ret) {
118 ERR("pthread_sigmask: %s", strerror(ret));
119 }
120}
121
122void lttng_perf_unlock(void)
123{
124 sigset_t sig_all_blocked, orig_mask;
d0cd72be 125 int ret;
20142124
MD
126
127 sigfillset(&sig_all_blocked);
128 ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
129 if (ret) {
130 ERR("pthread_sigmask: %s", strerror(ret));
131 }
132 /*
133 * Ensure the compiler don't move the store before the close()
134 * call, in case close() would be marked as leaf.
135 */
136 cmm_barrier();
137 if (!--URCU_TLS(ust_perf_mutex_nest)) {
20142124
MD
138 pthread_mutex_unlock(&ust_perf_mutex);
139 }
140 ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
141 if (ret) {
142 ERR("pthread_sigmask: %s", strerror(ret));
143 }
d0cd72be
MD
144 if (lttng_ust_cancelstate_disable_pop()) {
145 ERR("lttng_ust_cancelstate_disable_pop");
20142124
MD
146 }
147}
148
d58d1454 149static
4e48b5d2 150size_t perf_counter_get_size(void *priv __attribute__((unused)),
b2e37d27 151 struct lttng_ust_probe_ctx *probe_ctx __attribute__((unused)),
2208d8b5 152 size_t offset)
d58d1454
MD
153{
154 size_t size = 0;
155
b5457df5 156 size += lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(uint64_t));
d58d1454
MD
157 size += sizeof(uint64_t);
158 return size;
159}
160
a3a8d943
MD
161static
162uint64_t read_perf_counter_syscall(
163 struct lttng_perf_counter_thread_field *thread_field)
164{
165 uint64_t count;
166
167 if (caa_unlikely(thread_field->fd < 0))
168 return 0;
169
170 if (caa_unlikely(read(thread_field->fd, &count, sizeof(count))
171 < sizeof(count)))
172 return 0;
173
174 return count;
175}
176
2eba8e39 177#if defined(LTTNG_UST_ARCH_X86)
d58d1454
MD
178
179static
180uint64_t rdpmc(unsigned int counter)
181{
182 unsigned int low, high;
183
184 asm volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (counter));
185
186 return low | ((uint64_t) high) << 32;
187}
188
77d7fa98
MD
189static
190bool has_rdpmc(struct perf_event_mmap_page *pc)
191{
192 if (caa_unlikely(!pc->cap_bit0_is_deprecated))
193 return false;
194 /* Since Linux kernel 3.12. */
195 return pc->cap_user_rdpmc;
196}
197
d58d1454 198static
a3a8d943 199uint64_t arch_read_perf_counter(
d286ad50 200 struct lttng_perf_counter_thread_field *thread_field)
d58d1454
MD
201{
202 uint32_t seq, idx;
203 uint64_t count;
d286ad50 204 struct perf_event_mmap_page *pc = thread_field->pc;
d58d1454
MD
205
206 if (caa_unlikely(!pc))
207 return 0;
208
209 do {
210 seq = CMM_LOAD_SHARED(pc->lock);
211 cmm_barrier();
212
213 idx = pc->index;
77d7fa98 214 if (caa_likely(has_rdpmc(pc) && idx)) {
4f58f54f
MD
215 int64_t pmcval;
216
217 pmcval = rdpmc(idx - 1);
218 /* Sign-extend the pmc register result. */
219 pmcval <<= 64 - pc->pmc_width;
220 pmcval >>= 64 - pc->pmc_width;
221 count = pc->offset + pmcval;
222 } else {
a3a8d943
MD
223 /* Fall-back on system call if rdpmc cannot be used. */
224 return read_perf_counter_syscall(thread_field);
4f58f54f 225 }
d58d1454
MD
226 cmm_barrier();
227 } while (CMM_LOAD_SHARED(pc->lock) != seq);
228
229 return count;
230}
231
d286ad50 232static
a3a8d943 233int arch_perf_keep_fd(struct lttng_perf_counter_thread_field *thread_field)
d286ad50 234{
a3a8d943 235 struct perf_event_mmap_page *pc = thread_field->pc;
d286ad50 236
a3a8d943 237 if (!pc)
d286ad50 238 return 0;
77d7fa98 239 return !has_rdpmc(pc);
a3a8d943 240}
d286ad50 241
a3a8d943 242#else
d286ad50 243
a3a8d943
MD
244/* Generic (slow) implementation using a read system call. */
245static
246uint64_t arch_read_perf_counter(
247 struct lttng_perf_counter_thread_field *thread_field)
248{
249 return read_perf_counter_syscall(thread_field);
d286ad50
JD
250}
251
a3a8d943 252static
c494c0f1 253int arch_perf_keep_fd(struct lttng_perf_counter_thread_field *thread_field __attribute__((unused)))
a3a8d943
MD
254{
255 return 1;
256}
d286ad50 257
a3a8d943 258#endif
d286ad50 259
d58d1454
MD
260static
261int sys_perf_event_open(struct perf_event_attr *attr,
262 pid_t pid, int cpu, int group_fd,
263 unsigned long flags)
264{
265 return syscall(SYS_perf_event_open, attr, pid, cpu,
266 group_fd, flags);
267}
268
269static
b9389e6e 270int open_perf_fd(struct perf_event_attr *attr)
d58d1454 271{
b9389e6e 272 int fd;
d58d1454
MD
273
274 fd = sys_perf_event_open(attr, 0, -1, -1, 0);
275 if (fd < 0)
b9389e6e
JD
276 return -1;
277
278 return fd;
279}
280
d286ad50
JD
281static
282void close_perf_fd(int fd)
283{
284 int ret;
285
286 if (fd < 0)
287 return;
288
289 ret = close(fd);
290 if (ret) {
291 perror("Error closing LTTng-UST perf memory mapping FD");
292 }
293}
294
77d7fa98 295static void setup_perf(struct lttng_perf_counter_thread_field *thread_field)
b9389e6e
JD
296{
297 void *perf_addr;
d58d1454
MD
298
299 perf_addr = mmap(NULL, sizeof(struct perf_event_mmap_page),
b9389e6e 300 PROT_READ, MAP_SHARED, thread_field->fd, 0);
d58d1454 301 if (perf_addr == MAP_FAILED)
b9389e6e 302 perf_addr = NULL;
77d7fa98 303 thread_field->pc = perf_addr;
b9389e6e 304
a3a8d943 305 if (!arch_perf_keep_fd(thread_field)) {
b9389e6e
JD
306 close_perf_fd(thread_field->fd);
307 thread_field->fd = -1;
6c2125af 308 }
d58d1454
MD
309}
310
311static
312void unmap_perf_page(struct perf_event_mmap_page *pc)
313{
314 int ret;
315
316 if (!pc)
317 return;
318 ret = munmap(pc, sizeof(struct perf_event_mmap_page));
319 if (ret < 0) {
320 PERROR("Error in munmap");
321 abort();
322 }
323}
324
325static
326struct lttng_perf_counter_thread *alloc_perf_counter_thread(void)
327{
328 struct lttng_perf_counter_thread *perf_thread;
329 sigset_t newmask, oldmask;
330 int ret;
331
332 ret = sigfillset(&newmask);
333 if (ret)
334 abort();
335 ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
336 if (ret)
337 abort();
338 /* Check again with signals disabled */
339 perf_thread = pthread_getspecific(perf_counter_key);
340 if (perf_thread)
341 goto skip;
342 perf_thread = zmalloc(sizeof(*perf_thread));
343 if (!perf_thread)
344 abort();
345 CDS_INIT_LIST_HEAD(&perf_thread->rcu_field_list);
346 ret = pthread_setspecific(perf_counter_key, perf_thread);
347 if (ret)
348 abort();
349skip:
350 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
351 if (ret)
352 abort();
353 return perf_thread;
354}
355
356static
357struct lttng_perf_counter_thread_field *
358 add_thread_field(struct lttng_perf_counter_field *perf_field,
359 struct lttng_perf_counter_thread *perf_thread)
360{
361 struct lttng_perf_counter_thread_field *thread_field;
362 sigset_t newmask, oldmask;
363 int ret;
364
365 ret = sigfillset(&newmask);
366 if (ret)
367 abort();
368 ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
369 if (ret)
370 abort();
371 /* Check again with signals disabled */
372 cds_list_for_each_entry_rcu(thread_field, &perf_thread->rcu_field_list,
373 rcu_field_node) {
374 if (thread_field->field == perf_field)
375 goto skip;
376 }
377 thread_field = zmalloc(sizeof(*thread_field));
378 if (!thread_field)
379 abort();
380 thread_field->field = perf_field;
b9389e6e
JD
381 thread_field->fd = open_perf_fd(&perf_field->attr);
382 if (thread_field->fd >= 0)
77d7fa98 383 setup_perf(thread_field);
b9389e6e
JD
384 /*
385 * Note: thread_field->pc can be NULL if setup_perf() fails.
386 * Also, thread_field->fd can be -1 if open_perf_fd() fails.
387 */
20142124 388 lttng_perf_lock();
d58d1454
MD
389 cds_list_add_rcu(&thread_field->rcu_field_node,
390 &perf_thread->rcu_field_list);
391 cds_list_add(&thread_field->thread_field_node,
392 &perf_field->thread_field_list);
20142124 393 lttng_perf_unlock();
d58d1454
MD
394skip:
395 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
396 if (ret)
397 abort();
398 return thread_field;
399}
400
401static
402struct lttng_perf_counter_thread_field *
403 get_thread_field(struct lttng_perf_counter_field *field)
404{
405 struct lttng_perf_counter_thread *perf_thread;
406 struct lttng_perf_counter_thread_field *thread_field;
407
408 perf_thread = pthread_getspecific(perf_counter_key);
409 if (!perf_thread)
410 perf_thread = alloc_perf_counter_thread();
411 cds_list_for_each_entry_rcu(thread_field, &perf_thread->rcu_field_list,
412 rcu_field_node) {
413 if (thread_field->field == field)
414 return thread_field;
415 }
416 /* perf_counter_thread_field not found, need to add one */
417 return add_thread_field(field, perf_thread);
418}
419
420static
4e48b5d2 421uint64_t wrapper_perf_counter_read(void *priv)
d58d1454
MD
422{
423 struct lttng_perf_counter_field *perf_field;
424 struct lttng_perf_counter_thread_field *perf_thread_field;
425
4e48b5d2 426 perf_field = (struct lttng_perf_counter_field *) priv;
d58d1454 427 perf_thread_field = get_thread_field(perf_field);
a3a8d943 428 return arch_read_perf_counter(perf_thread_field);
d58d1454
MD
429}
430
431static
4e48b5d2 432void perf_counter_record(void *priv,
b2e37d27
MD
433 struct lttng_ust_probe_ctx *probe_ctx __attribute__((unused)),
434 struct lttng_ust_ring_buffer_ctx *ctx,
435 struct lttng_ust_channel_buffer *chan)
d58d1454
MD
436{
437 uint64_t value;
438
4e48b5d2 439 value = wrapper_perf_counter_read(priv);
8936b6c0 440 chan->ops->event_write(ctx, &value, sizeof(value), lttng_ust_rb_alignof(value));
d58d1454
MD
441}
442
443static
4e48b5d2 444void perf_counter_get_value(void *priv,
b2e37d27 445 struct lttng_ust_probe_ctx *probe_ctx __attribute__((unused)),
daacdbfc 446 struct lttng_ust_ctx_value *value)
d58d1454 447{
b2e37d27 448 value->u.u64 = wrapper_perf_counter_read(priv);
d58d1454
MD
449}
450
20142124 451/* Called with perf lock held */
d58d1454
MD
452static
453void lttng_destroy_perf_thread_field(
454 struct lttng_perf_counter_thread_field *thread_field)
455{
b9389e6e 456 close_perf_fd(thread_field->fd);
d58d1454
MD
457 unmap_perf_page(thread_field->pc);
458 cds_list_del_rcu(&thread_field->rcu_field_node);
459 cds_list_del(&thread_field->thread_field_node);
460 free(thread_field);
461}
462
463static
464void lttng_destroy_perf_thread_key(void *_key)
465{
466 struct lttng_perf_counter_thread *perf_thread = _key;
467 struct lttng_perf_counter_thread_field *pos, *p;
468
20142124 469 lttng_perf_lock();
d58d1454
MD
470 cds_list_for_each_entry_safe(pos, p, &perf_thread->rcu_field_list,
471 rcu_field_node)
472 lttng_destroy_perf_thread_field(pos);
20142124 473 lttng_perf_unlock();
d58d1454
MD
474 free(perf_thread);
475}
476
477/* Called with UST lock held */
478static
4e48b5d2 479void lttng_destroy_perf_counter_ctx_field(void *priv)
d58d1454
MD
480{
481 struct lttng_perf_counter_field *perf_field;
482 struct lttng_perf_counter_thread_field *pos, *p;
483
4e48b5d2
MD
484 perf_field = (struct lttng_perf_counter_field *) priv;
485 free(perf_field->name);
d58d1454
MD
486 /*
487 * This put is performed when no threads can concurrently
488 * perform a "get" concurrently, thanks to urcu-bp grace
20142124
MD
489 * period. Holding the lttng perf lock protects against
490 * concurrent modification of the per-thread thread field
491 * list.
d58d1454 492 */
20142124 493 lttng_perf_lock();
d58d1454
MD
494 cds_list_for_each_entry_safe(pos, p, &perf_field->thread_field_list,
495 thread_field_node)
496 lttng_destroy_perf_thread_field(pos);
20142124 497 lttng_perf_unlock();
f72cd4d9 498 free(perf_field->event_field);
d58d1454
MD
499 free(perf_field);
500}
501
2eba8e39 502#ifdef LTTNG_UST_ARCH_ARMV7
d286ad50
JD
503
504static
505int perf_get_exclude_kernel(void)
506{
507 return 0;
508}
509
2eba8e39 510#else /* LTTNG_UST_ARCH_ARMV7 */
d286ad50
JD
511
512static
513int perf_get_exclude_kernel(void)
514{
515 return 1;
516}
517
2eba8e39 518#endif /* LTTNG_UST_ARCH_ARMV7 */
d286ad50 519
4e48b5d2
MD
520static const struct lttng_ust_type_common *ust_type =
521 lttng_ust_static_type_integer(sizeof(uint64_t) * CHAR_BIT,
522 lttng_ust_rb_alignof(uint64_t) * CHAR_BIT,
523 lttng_ust_is_signed_type(uint64_t),
baa8acf3 524 LTTNG_UST_BYTE_ORDER, 10);
4e48b5d2 525
d58d1454
MD
526/* Called with UST lock held */
527int lttng_add_perf_counter_to_ctx(uint32_t type,
528 uint64_t config,
529 const char *name,
daacdbfc 530 struct lttng_ust_ctx **ctx)
d58d1454 531{
4e48b5d2
MD
532 struct lttng_ust_ctx_field ctx_field;
533 struct lttng_ust_event_field *event_field;
d58d1454 534 struct lttng_perf_counter_field *perf_field;
d58d1454
MD
535 char *name_alloc;
536 int ret;
537
4e48b5d2
MD
538 if (lttng_find_context(*ctx, name)) {
539 ret = -EEXIST;
540 goto find_error;
541 }
d58d1454
MD
542 name_alloc = strdup(name);
543 if (!name_alloc) {
544 ret = -ENOMEM;
545 goto name_alloc_error;
546 }
4e48b5d2
MD
547 event_field = zmalloc(sizeof(*event_field));
548 if (!event_field) {
549 ret = -ENOMEM;
550 goto event_field_alloc_error;
551 }
552 event_field->name = name_alloc;
553 event_field->type = ust_type;
554
d58d1454
MD
555 perf_field = zmalloc(sizeof(*perf_field));
556 if (!perf_field) {
557 ret = -ENOMEM;
558 goto perf_field_alloc_error;
559 }
d58d1454
MD
560 perf_field->attr.type = type;
561 perf_field->attr.config = config;
d286ad50 562 perf_field->attr.exclude_kernel = perf_get_exclude_kernel();
d58d1454 563 CDS_INIT_LIST_HEAD(&perf_field->thread_field_list);
4e48b5d2 564 perf_field->name = name_alloc;
f72cd4d9 565 perf_field->event_field = event_field;
d58d1454
MD
566
567 /* Ensure that this perf counter can be used in this process. */
b9389e6e
JD
568 ret = open_perf_fd(&perf_field->attr);
569 if (ret < 0) {
d58d1454
MD
570 ret = -ENODEV;
571 goto setup_error;
572 }
b9389e6e 573 close_perf_fd(ret);
d58d1454 574
4e48b5d2
MD
575 ctx_field.event_field = event_field;
576 ctx_field.get_size = perf_counter_get_size;
577 ctx_field.record = perf_counter_record;
578 ctx_field.get_value = perf_counter_get_value;
579 ctx_field.destroy = lttng_destroy_perf_counter_ctx_field;
580 ctx_field.priv = perf_field;
d58d1454 581
4e48b5d2
MD
582 ret = lttng_ust_context_append(ctx, &ctx_field);
583 if (ret) {
584 ret = -ENOMEM;
585 goto append_context_error;
586 }
d58d1454
MD
587 return 0;
588
d58d1454 589append_context_error:
4e48b5d2 590setup_error:
d58d1454
MD
591 free(perf_field);
592perf_field_alloc_error:
4e48b5d2
MD
593 free(event_field);
594event_field_alloc_error:
d58d1454
MD
595 free(name_alloc);
596name_alloc_error:
4e48b5d2 597find_error:
d58d1454
MD
598 return ret;
599}
600
601int lttng_perf_counter_init(void)
602{
603 int ret;
604
605 ret = pthread_key_create(&perf_counter_key,
606 lttng_destroy_perf_thread_key);
607 if (ret)
608 ret = -ret;
609 return ret;
610}
611
612void lttng_perf_counter_exit(void)
613{
614 int ret;
615
616 ret = pthread_key_delete(perf_counter_key);
617 if (ret) {
618 errno = ret;
619 PERROR("Error in pthread_key_delete");
620 }
621}
This page took 0.061331 seconds and 4 git commands to generate.