lttng_ust_init_thread: initialise cached context values
[lttng-ust.git] / src / lib / lttng-ust / lttng-context-perf-counters.c
CommitLineData
d58d1454 1/*
c0c0989a 2 * SPDX-License-Identifier: LGPL-2.1-only
d58d1454
MD
3 *
4 * Copyright (C) 2009-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
c0c0989a 6 * LTTng UST performance monitoring counters (perf-counters) integration.
d58d1454
MD
7 */
8
3fbec7dc 9#define _LGPL_SOURCE
9af5d97a 10#include <limits.h>
d58d1454
MD
11#include <sys/types.h>
12#include <unistd.h>
13#include <string.h>
14#include <stdlib.h>
15#include <stdio.h>
649fb6b3 16#include <stdbool.h>
b4051ad8 17#include <stddef.h>
fb31eb73 18#include <stdint.h>
d58d1454
MD
19#include <sys/mman.h>
20#include <sys/syscall.h>
2eba8e39 21#include <lttng/ust-arch.h>
d58d1454
MD
22#include <lttng/ust-events.h>
23#include <lttng/ust-tracer.h>
0b4b8811 24#include <lttng/ust-ringbuffer-context.h>
d0cd72be 25#include <lttng/ust-cancelstate.h>
d58d1454
MD
26#include <urcu/system.h>
27#include <urcu/arch.h>
28#include <urcu/rculist.h>
9d315d6d 29#include "common/macros.h"
d58d1454 30#include <urcu/ref.h>
9d315d6d 31#include "common/logging.h"
d58d1454 32#include <signal.h>
20142124 33#include <urcu/tls-compat.h>
77d7fa98 34#include "perf_event.h"
fc80554e
MJ
35
36#include "context-internal.h"
d58d1454 37#include "lttng-tracer-core.h"
36c52fff 38#include "lib/lttng-ust/events.h"
d58d1454
MD
39
40/*
41 * We use a global perf counter key and iterate on per-thread RCU lists
42 * of fields in the fast path, even though this is not strictly speaking
43 * what would provide the best fast-path complexity, to ensure teardown
44 * of sessions vs thread exit is handled racelessly.
45 *
46 * Updates and traversals of thread_list are protected by UST lock.
47 * Updates to rcu_field_list are protected by UST lock.
48 */
49
50struct lttng_perf_counter_thread_field {
51 struct lttng_perf_counter_field *field; /* Back reference */
52 struct perf_event_mmap_page *pc;
53 struct cds_list_head thread_field_node; /* Per-field list of thread fields (node) */
54 struct cds_list_head rcu_field_node; /* RCU per-thread list of fields (node) */
b9389e6e 55 int fd; /* Perf FD */
d58d1454
MD
56};
57
58struct lttng_perf_counter_thread {
59 struct cds_list_head rcu_field_list; /* RCU per-thread list of fields */
60};
61
62struct lttng_perf_counter_field {
63 struct perf_event_attr attr;
64 struct cds_list_head thread_field_list; /* Per-field list of thread fields */
4e48b5d2 65 char *name;
f72cd4d9 66 struct lttng_ust_event_field *event_field;
d58d1454
MD
67};
68
69static pthread_key_t perf_counter_key;
70
20142124
MD
71/*
72 * lttng_perf_lock - Protect lttng-ust perf counter data structures
73 *
74 * Nests within the ust_lock, and therefore within the libc dl lock.
a9fd951a 75 * Therefore, we need to allocate the TLS before nesting into this lock.
20142124
MD
76 * Nests inside RCU bp read-side lock. Protects against concurrent
77 * fork.
78 */
79static pthread_mutex_t ust_perf_mutex = PTHREAD_MUTEX_INITIALIZER;
80
20142124
MD
81/*
82 * Track whether we are tracing from a signal handler nested on an
83 * application thread.
84 */
85static DEFINE_URCU_TLS(int, ust_perf_mutex_nest);
86
87/*
a9fd951a 88 * Force a read (imply TLS allocation for dlopen) of TLS variables.
20142124 89 */
c246521d 90void lttng_ust_perf_counter_init_thread(int flags)
20142124
MD
91{
92 asm volatile ("" : : "m" (URCU_TLS(ust_perf_mutex_nest)));
c246521d 93 (void)flags;
20142124
MD
94}
95
96void lttng_perf_lock(void)
97{
98 sigset_t sig_all_blocked, orig_mask;
d0cd72be 99 int ret;
20142124 100
d0cd72be
MD
101 if (lttng_ust_cancelstate_disable_push()) {
102 ERR("lttng_ust_cancelstate_disable_push");
20142124
MD
103 }
104 sigfillset(&sig_all_blocked);
105 ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
106 if (ret) {
107 ERR("pthread_sigmask: %s", strerror(ret));
108 }
109 if (!URCU_TLS(ust_perf_mutex_nest)++) {
110 /*
111 * Ensure the compiler don't move the store after the close()
112 * call in case close() would be marked as leaf.
113 */
114 cmm_barrier();
115 pthread_mutex_lock(&ust_perf_mutex);
20142124
MD
116 }
117 ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
118 if (ret) {
119 ERR("pthread_sigmask: %s", strerror(ret));
120 }
121}
122
123void lttng_perf_unlock(void)
124{
125 sigset_t sig_all_blocked, orig_mask;
d0cd72be 126 int ret;
20142124
MD
127
128 sigfillset(&sig_all_blocked);
129 ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
130 if (ret) {
131 ERR("pthread_sigmask: %s", strerror(ret));
132 }
133 /*
134 * Ensure the compiler don't move the store before the close()
135 * call, in case close() would be marked as leaf.
136 */
137 cmm_barrier();
138 if (!--URCU_TLS(ust_perf_mutex_nest)) {
20142124
MD
139 pthread_mutex_unlock(&ust_perf_mutex);
140 }
141 ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
142 if (ret) {
143 ERR("pthread_sigmask: %s", strerror(ret));
144 }
d0cd72be
MD
145 if (lttng_ust_cancelstate_disable_pop()) {
146 ERR("lttng_ust_cancelstate_disable_pop");
20142124
MD
147 }
148}
149
d58d1454 150static
4e48b5d2 151size_t perf_counter_get_size(void *priv __attribute__((unused)),
b2e37d27 152 struct lttng_ust_probe_ctx *probe_ctx __attribute__((unused)),
2208d8b5 153 size_t offset)
d58d1454
MD
154{
155 size_t size = 0;
156
b5457df5 157 size += lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(uint64_t));
d58d1454
MD
158 size += sizeof(uint64_t);
159 return size;
160}
161
a3a8d943
MD
162static
163uint64_t read_perf_counter_syscall(
164 struct lttng_perf_counter_thread_field *thread_field)
165{
166 uint64_t count;
167
168 if (caa_unlikely(thread_field->fd < 0))
169 return 0;
170
171 if (caa_unlikely(read(thread_field->fd, &count, sizeof(count))
172 < sizeof(count)))
173 return 0;
174
175 return count;
176}
177
2eba8e39 178#if defined(LTTNG_UST_ARCH_X86)
d58d1454
MD
179
180static
181uint64_t rdpmc(unsigned int counter)
182{
183 unsigned int low, high;
184
185 asm volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (counter));
186
187 return low | ((uint64_t) high) << 32;
188}
189
77d7fa98
MD
190static
191bool has_rdpmc(struct perf_event_mmap_page *pc)
192{
193 if (caa_unlikely(!pc->cap_bit0_is_deprecated))
194 return false;
195 /* Since Linux kernel 3.12. */
196 return pc->cap_user_rdpmc;
197}
198
d58d1454 199static
a3a8d943 200uint64_t arch_read_perf_counter(
d286ad50 201 struct lttng_perf_counter_thread_field *thread_field)
d58d1454
MD
202{
203 uint32_t seq, idx;
204 uint64_t count;
d286ad50 205 struct perf_event_mmap_page *pc = thread_field->pc;
d58d1454
MD
206
207 if (caa_unlikely(!pc))
208 return 0;
209
210 do {
211 seq = CMM_LOAD_SHARED(pc->lock);
212 cmm_barrier();
213
214 idx = pc->index;
77d7fa98 215 if (caa_likely(has_rdpmc(pc) && idx)) {
4f58f54f
MD
216 int64_t pmcval;
217
218 pmcval = rdpmc(idx - 1);
219 /* Sign-extend the pmc register result. */
220 pmcval <<= 64 - pc->pmc_width;
221 pmcval >>= 64 - pc->pmc_width;
222 count = pc->offset + pmcval;
223 } else {
a3a8d943
MD
224 /* Fall-back on system call if rdpmc cannot be used. */
225 return read_perf_counter_syscall(thread_field);
4f58f54f 226 }
d58d1454
MD
227 cmm_barrier();
228 } while (CMM_LOAD_SHARED(pc->lock) != seq);
229
230 return count;
231}
232
d286ad50 233static
a3a8d943 234int arch_perf_keep_fd(struct lttng_perf_counter_thread_field *thread_field)
d286ad50 235{
a3a8d943 236 struct perf_event_mmap_page *pc = thread_field->pc;
d286ad50 237
a3a8d943 238 if (!pc)
d286ad50 239 return 0;
77d7fa98 240 return !has_rdpmc(pc);
a3a8d943 241}
d286ad50 242
a3a8d943 243#else
d286ad50 244
a3a8d943
MD
245/* Generic (slow) implementation using a read system call. */
246static
247uint64_t arch_read_perf_counter(
248 struct lttng_perf_counter_thread_field *thread_field)
249{
250 return read_perf_counter_syscall(thread_field);
d286ad50
JD
251}
252
a3a8d943 253static
c494c0f1 254int arch_perf_keep_fd(struct lttng_perf_counter_thread_field *thread_field __attribute__((unused)))
a3a8d943
MD
255{
256 return 1;
257}
d286ad50 258
a3a8d943 259#endif
d286ad50 260
d58d1454
MD
261static
262int sys_perf_event_open(struct perf_event_attr *attr,
263 pid_t pid, int cpu, int group_fd,
264 unsigned long flags)
265{
266 return syscall(SYS_perf_event_open, attr, pid, cpu,
267 group_fd, flags);
268}
269
270static
b9389e6e 271int open_perf_fd(struct perf_event_attr *attr)
d58d1454 272{
b9389e6e 273 int fd;
d58d1454
MD
274
275 fd = sys_perf_event_open(attr, 0, -1, -1, 0);
276 if (fd < 0)
b9389e6e
JD
277 return -1;
278
279 return fd;
280}
281
d286ad50
JD
282static
283void close_perf_fd(int fd)
284{
285 int ret;
286
287 if (fd < 0)
288 return;
289
290 ret = close(fd);
291 if (ret) {
292 perror("Error closing LTTng-UST perf memory mapping FD");
293 }
294}
295
77d7fa98 296static void setup_perf(struct lttng_perf_counter_thread_field *thread_field)
b9389e6e
JD
297{
298 void *perf_addr;
d58d1454
MD
299
300 perf_addr = mmap(NULL, sizeof(struct perf_event_mmap_page),
b9389e6e 301 PROT_READ, MAP_SHARED, thread_field->fd, 0);
d58d1454 302 if (perf_addr == MAP_FAILED)
b9389e6e 303 perf_addr = NULL;
77d7fa98 304 thread_field->pc = perf_addr;
b9389e6e 305
a3a8d943 306 if (!arch_perf_keep_fd(thread_field)) {
b9389e6e
JD
307 close_perf_fd(thread_field->fd);
308 thread_field->fd = -1;
6c2125af 309 }
d58d1454
MD
310}
311
312static
313void unmap_perf_page(struct perf_event_mmap_page *pc)
314{
315 int ret;
316
317 if (!pc)
318 return;
319 ret = munmap(pc, sizeof(struct perf_event_mmap_page));
320 if (ret < 0) {
321 PERROR("Error in munmap");
322 abort();
323 }
324}
325
326static
327struct lttng_perf_counter_thread *alloc_perf_counter_thread(void)
328{
329 struct lttng_perf_counter_thread *perf_thread;
330 sigset_t newmask, oldmask;
331 int ret;
332
333 ret = sigfillset(&newmask);
334 if (ret)
335 abort();
336 ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
337 if (ret)
338 abort();
339 /* Check again with signals disabled */
340 perf_thread = pthread_getspecific(perf_counter_key);
341 if (perf_thread)
342 goto skip;
343 perf_thread = zmalloc(sizeof(*perf_thread));
344 if (!perf_thread)
345 abort();
346 CDS_INIT_LIST_HEAD(&perf_thread->rcu_field_list);
347 ret = pthread_setspecific(perf_counter_key, perf_thread);
348 if (ret)
349 abort();
350skip:
351 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
352 if (ret)
353 abort();
354 return perf_thread;
355}
356
357static
358struct lttng_perf_counter_thread_field *
359 add_thread_field(struct lttng_perf_counter_field *perf_field,
360 struct lttng_perf_counter_thread *perf_thread)
361{
362 struct lttng_perf_counter_thread_field *thread_field;
363 sigset_t newmask, oldmask;
364 int ret;
365
366 ret = sigfillset(&newmask);
367 if (ret)
368 abort();
369 ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
370 if (ret)
371 abort();
372 /* Check again with signals disabled */
373 cds_list_for_each_entry_rcu(thread_field, &perf_thread->rcu_field_list,
374 rcu_field_node) {
375 if (thread_field->field == perf_field)
376 goto skip;
377 }
378 thread_field = zmalloc(sizeof(*thread_field));
379 if (!thread_field)
380 abort();
381 thread_field->field = perf_field;
b9389e6e
JD
382 thread_field->fd = open_perf_fd(&perf_field->attr);
383 if (thread_field->fd >= 0)
77d7fa98 384 setup_perf(thread_field);
b9389e6e
JD
385 /*
386 * Note: thread_field->pc can be NULL if setup_perf() fails.
387 * Also, thread_field->fd can be -1 if open_perf_fd() fails.
388 */
20142124 389 lttng_perf_lock();
d58d1454
MD
390 cds_list_add_rcu(&thread_field->rcu_field_node,
391 &perf_thread->rcu_field_list);
392 cds_list_add(&thread_field->thread_field_node,
393 &perf_field->thread_field_list);
20142124 394 lttng_perf_unlock();
d58d1454
MD
395skip:
396 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
397 if (ret)
398 abort();
399 return thread_field;
400}
401
402static
403struct lttng_perf_counter_thread_field *
404 get_thread_field(struct lttng_perf_counter_field *field)
405{
406 struct lttng_perf_counter_thread *perf_thread;
407 struct lttng_perf_counter_thread_field *thread_field;
408
409 perf_thread = pthread_getspecific(perf_counter_key);
410 if (!perf_thread)
411 perf_thread = alloc_perf_counter_thread();
412 cds_list_for_each_entry_rcu(thread_field, &perf_thread->rcu_field_list,
413 rcu_field_node) {
414 if (thread_field->field == field)
415 return thread_field;
416 }
417 /* perf_counter_thread_field not found, need to add one */
418 return add_thread_field(field, perf_thread);
419}
420
421static
4e48b5d2 422uint64_t wrapper_perf_counter_read(void *priv)
d58d1454
MD
423{
424 struct lttng_perf_counter_field *perf_field;
425 struct lttng_perf_counter_thread_field *perf_thread_field;
426
4e48b5d2 427 perf_field = (struct lttng_perf_counter_field *) priv;
d58d1454 428 perf_thread_field = get_thread_field(perf_field);
a3a8d943 429 return arch_read_perf_counter(perf_thread_field);
d58d1454
MD
430}
431
432static
4e48b5d2 433void perf_counter_record(void *priv,
b2e37d27
MD
434 struct lttng_ust_probe_ctx *probe_ctx __attribute__((unused)),
435 struct lttng_ust_ring_buffer_ctx *ctx,
436 struct lttng_ust_channel_buffer *chan)
d58d1454
MD
437{
438 uint64_t value;
439
4e48b5d2 440 value = wrapper_perf_counter_read(priv);
8936b6c0 441 chan->ops->event_write(ctx, &value, sizeof(value), lttng_ust_rb_alignof(value));
d58d1454
MD
442}
443
444static
4e48b5d2 445void perf_counter_get_value(void *priv,
b2e37d27 446 struct lttng_ust_probe_ctx *probe_ctx __attribute__((unused)),
daacdbfc 447 struct lttng_ust_ctx_value *value)
d58d1454 448{
b2e37d27 449 value->u.u64 = wrapper_perf_counter_read(priv);
d58d1454
MD
450}
451
20142124 452/* Called with perf lock held */
d58d1454
MD
453static
454void lttng_destroy_perf_thread_field(
455 struct lttng_perf_counter_thread_field *thread_field)
456{
b9389e6e 457 close_perf_fd(thread_field->fd);
d58d1454
MD
458 unmap_perf_page(thread_field->pc);
459 cds_list_del_rcu(&thread_field->rcu_field_node);
460 cds_list_del(&thread_field->thread_field_node);
461 free(thread_field);
462}
463
464static
465void lttng_destroy_perf_thread_key(void *_key)
466{
467 struct lttng_perf_counter_thread *perf_thread = _key;
468 struct lttng_perf_counter_thread_field *pos, *p;
469
20142124 470 lttng_perf_lock();
d58d1454
MD
471 cds_list_for_each_entry_safe(pos, p, &perf_thread->rcu_field_list,
472 rcu_field_node)
473 lttng_destroy_perf_thread_field(pos);
20142124 474 lttng_perf_unlock();
d58d1454
MD
475 free(perf_thread);
476}
477
478/* Called with UST lock held */
479static
4e48b5d2 480void lttng_destroy_perf_counter_ctx_field(void *priv)
d58d1454
MD
481{
482 struct lttng_perf_counter_field *perf_field;
483 struct lttng_perf_counter_thread_field *pos, *p;
484
4e48b5d2
MD
485 perf_field = (struct lttng_perf_counter_field *) priv;
486 free(perf_field->name);
d58d1454
MD
487 /*
488 * This put is performed when no threads can concurrently
489 * perform a "get" concurrently, thanks to urcu-bp grace
20142124
MD
490 * period. Holding the lttng perf lock protects against
491 * concurrent modification of the per-thread thread field
492 * list.
d58d1454 493 */
20142124 494 lttng_perf_lock();
d58d1454
MD
495 cds_list_for_each_entry_safe(pos, p, &perf_field->thread_field_list,
496 thread_field_node)
497 lttng_destroy_perf_thread_field(pos);
20142124 498 lttng_perf_unlock();
f72cd4d9 499 free(perf_field->event_field);
d58d1454
MD
500 free(perf_field);
501}
502
2eba8e39 503#ifdef LTTNG_UST_ARCH_ARMV7
d286ad50
JD
504
505static
506int perf_get_exclude_kernel(void)
507{
508 return 0;
509}
510
2eba8e39 511#else /* LTTNG_UST_ARCH_ARMV7 */
d286ad50
JD
512
513static
514int perf_get_exclude_kernel(void)
515{
516 return 1;
517}
518
2eba8e39 519#endif /* LTTNG_UST_ARCH_ARMV7 */
d286ad50 520
4e48b5d2
MD
521static const struct lttng_ust_type_common *ust_type =
522 lttng_ust_static_type_integer(sizeof(uint64_t) * CHAR_BIT,
523 lttng_ust_rb_alignof(uint64_t) * CHAR_BIT,
524 lttng_ust_is_signed_type(uint64_t),
baa8acf3 525 LTTNG_UST_BYTE_ORDER, 10);
4e48b5d2 526
d58d1454
MD
527/* Called with UST lock held */
528int lttng_add_perf_counter_to_ctx(uint32_t type,
529 uint64_t config,
530 const char *name,
daacdbfc 531 struct lttng_ust_ctx **ctx)
d58d1454 532{
4e48b5d2
MD
533 struct lttng_ust_ctx_field ctx_field;
534 struct lttng_ust_event_field *event_field;
d58d1454 535 struct lttng_perf_counter_field *perf_field;
d58d1454
MD
536 char *name_alloc;
537 int ret;
538
4e48b5d2
MD
539 if (lttng_find_context(*ctx, name)) {
540 ret = -EEXIST;
541 goto find_error;
542 }
d58d1454
MD
543 name_alloc = strdup(name);
544 if (!name_alloc) {
545 ret = -ENOMEM;
546 goto name_alloc_error;
547 }
4e48b5d2
MD
548 event_field = zmalloc(sizeof(*event_field));
549 if (!event_field) {
550 ret = -ENOMEM;
551 goto event_field_alloc_error;
552 }
553 event_field->name = name_alloc;
554 event_field->type = ust_type;
555
d58d1454
MD
556 perf_field = zmalloc(sizeof(*perf_field));
557 if (!perf_field) {
558 ret = -ENOMEM;
559 goto perf_field_alloc_error;
560 }
d58d1454
MD
561 perf_field->attr.type = type;
562 perf_field->attr.config = config;
d286ad50 563 perf_field->attr.exclude_kernel = perf_get_exclude_kernel();
d58d1454 564 CDS_INIT_LIST_HEAD(&perf_field->thread_field_list);
4e48b5d2 565 perf_field->name = name_alloc;
f72cd4d9 566 perf_field->event_field = event_field;
d58d1454
MD
567
568 /* Ensure that this perf counter can be used in this process. */
b9389e6e
JD
569 ret = open_perf_fd(&perf_field->attr);
570 if (ret < 0) {
d58d1454
MD
571 ret = -ENODEV;
572 goto setup_error;
573 }
b9389e6e 574 close_perf_fd(ret);
d58d1454 575
4e48b5d2
MD
576 ctx_field.event_field = event_field;
577 ctx_field.get_size = perf_counter_get_size;
578 ctx_field.record = perf_counter_record;
579 ctx_field.get_value = perf_counter_get_value;
580 ctx_field.destroy = lttng_destroy_perf_counter_ctx_field;
581 ctx_field.priv = perf_field;
d58d1454 582
4e48b5d2
MD
583 ret = lttng_ust_context_append(ctx, &ctx_field);
584 if (ret) {
585 ret = -ENOMEM;
586 goto append_context_error;
587 }
d58d1454
MD
588 return 0;
589
d58d1454 590append_context_error:
4e48b5d2 591setup_error:
d58d1454
MD
592 free(perf_field);
593perf_field_alloc_error:
4e48b5d2
MD
594 free(event_field);
595event_field_alloc_error:
d58d1454
MD
596 free(name_alloc);
597name_alloc_error:
4e48b5d2 598find_error:
d58d1454
MD
599 return ret;
600}
601
602int lttng_perf_counter_init(void)
603{
604 int ret;
605
606 ret = pthread_key_create(&perf_counter_key,
607 lttng_destroy_perf_thread_key);
608 if (ret)
609 ret = -ret;
610 return ret;
611}
612
613void lttng_perf_counter_exit(void)
614{
615 int ret;
616
617 ret = pthread_key_delete(perf_counter_key);
618 if (ret) {
619 errno = ret;
620 PERROR("Error in pthread_key_delete");
621 }
622}
This page took 0.064065 seconds and 4 git commands to generate.