Refactoring: remove ring buffer channel pointer from struct lttng_ust_channel_buffer
[lttng-ust.git] / liblttng-ust / lttng-context-perf-counters.c
CommitLineData
d58d1454 1/*
c0c0989a 2 * SPDX-License-Identifier: LGPL-2.1-only
d58d1454
MD
3 *
4 * Copyright (C) 2009-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
c0c0989a 6 * LTTng UST performance monitoring counters (perf-counters) integration.
d58d1454
MD
7 */
8
3fbec7dc 9#define _LGPL_SOURCE
d58d1454
MD
10#include <sys/types.h>
11#include <unistd.h>
12#include <string.h>
13#include <stdlib.h>
14#include <stdio.h>
649fb6b3 15#include <stdbool.h>
b4051ad8 16#include <stddef.h>
fb31eb73 17#include <stdint.h>
d58d1454
MD
18#include <sys/mman.h>
19#include <sys/syscall.h>
2eba8e39 20#include <lttng/ust-arch.h>
d58d1454
MD
21#include <lttng/ust-events.h>
22#include <lttng/ust-tracer.h>
0466ac28 23#include <lttng/ringbuffer-context.h>
d58d1454
MD
24#include <urcu/system.h>
25#include <urcu/arch.h>
26#include <urcu/rculist.h>
864a1eda 27#include <ust-helper.h>
d58d1454
MD
28#include <urcu/ref.h>
29#include <usterr-signal-safe.h>
30#include <signal.h>
20142124 31#include <urcu/tls-compat.h>
77d7fa98 32#include "perf_event.h"
fc80554e
MJ
33
34#include "context-internal.h"
d58d1454 35#include "lttng-tracer-core.h"
bd8c1787 36#include "ust-events-internal.h"
d58d1454
MD
37
38/*
39 * We use a global perf counter key and iterate on per-thread RCU lists
40 * of fields in the fast path, even though this is not strictly speaking
41 * what would provide the best fast-path complexity, to ensure teardown
42 * of sessions vs thread exit is handled racelessly.
43 *
44 * Updates and traversals of thread_list are protected by UST lock.
45 * Updates to rcu_field_list are protected by UST lock.
46 */
47
48struct lttng_perf_counter_thread_field {
49 struct lttng_perf_counter_field *field; /* Back reference */
50 struct perf_event_mmap_page *pc;
51 struct cds_list_head thread_field_node; /* Per-field list of thread fields (node) */
52 struct cds_list_head rcu_field_node; /* RCU per-thread list of fields (node) */
b9389e6e 53 int fd; /* Perf FD */
d58d1454
MD
54};
55
56struct lttng_perf_counter_thread {
57 struct cds_list_head rcu_field_list; /* RCU per-thread list of fields */
58};
59
60struct lttng_perf_counter_field {
61 struct perf_event_attr attr;
62 struct cds_list_head thread_field_list; /* Per-field list of thread fields */
63};
64
65static pthread_key_t perf_counter_key;
66
20142124
MD
67/*
68 * lttng_perf_lock - Protect lttng-ust perf counter data structures
69 *
70 * Nests within the ust_lock, and therefore within the libc dl lock.
71 * Therefore, we need to fixup the TLS before nesting into this lock.
72 * Nests inside RCU bp read-side lock. Protects against concurrent
73 * fork.
74 */
75static pthread_mutex_t ust_perf_mutex = PTHREAD_MUTEX_INITIALIZER;
76
77/*
78 * Cancel state when grabbing the ust_perf_mutex. Saved when locking,
79 * restored on unlock. Protected by ust_perf_mutex.
80 */
81static int ust_perf_saved_cancelstate;
82
83/*
84 * Track whether we are tracing from a signal handler nested on an
85 * application thread.
86 */
87static DEFINE_URCU_TLS(int, ust_perf_mutex_nest);
88
89/*
90 * Force a read (imply TLS fixup for dlopen) of TLS variables.
91 */
92void lttng_ust_fixup_perf_counter_tls(void)
93{
94 asm volatile ("" : : "m" (URCU_TLS(ust_perf_mutex_nest)));
95}
96
97void lttng_perf_lock(void)
98{
99 sigset_t sig_all_blocked, orig_mask;
100 int ret, oldstate;
101
102 ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldstate);
103 if (ret) {
104 ERR("pthread_setcancelstate: %s", strerror(ret));
105 }
106 sigfillset(&sig_all_blocked);
107 ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
108 if (ret) {
109 ERR("pthread_sigmask: %s", strerror(ret));
110 }
111 if (!URCU_TLS(ust_perf_mutex_nest)++) {
112 /*
113 * Ensure the compiler don't move the store after the close()
114 * call in case close() would be marked as leaf.
115 */
116 cmm_barrier();
117 pthread_mutex_lock(&ust_perf_mutex);
118 ust_perf_saved_cancelstate = oldstate;
119 }
120 ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
121 if (ret) {
122 ERR("pthread_sigmask: %s", strerror(ret));
123 }
124}
125
126void lttng_perf_unlock(void)
127{
128 sigset_t sig_all_blocked, orig_mask;
129 int ret, newstate, oldstate;
130 bool restore_cancel = false;
131
132 sigfillset(&sig_all_blocked);
133 ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
134 if (ret) {
135 ERR("pthread_sigmask: %s", strerror(ret));
136 }
137 /*
138 * Ensure the compiler don't move the store before the close()
139 * call, in case close() would be marked as leaf.
140 */
141 cmm_barrier();
142 if (!--URCU_TLS(ust_perf_mutex_nest)) {
143 newstate = ust_perf_saved_cancelstate;
144 restore_cancel = true;
145 pthread_mutex_unlock(&ust_perf_mutex);
146 }
147 ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
148 if (ret) {
149 ERR("pthread_sigmask: %s", strerror(ret));
150 }
151 if (restore_cancel) {
152 ret = pthread_setcancelstate(newstate, &oldstate);
153 if (ret) {
154 ERR("pthread_setcancelstate: %s", strerror(ret));
155 }
156 }
157}
158
d58d1454 159static
daacdbfc 160size_t perf_counter_get_size(struct lttng_ust_ctx_field *field, size_t offset)
d58d1454
MD
161{
162 size_t size = 0;
163
164 size += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
165 size += sizeof(uint64_t);
166 return size;
167}
168
a3a8d943
MD
169static
170uint64_t read_perf_counter_syscall(
171 struct lttng_perf_counter_thread_field *thread_field)
172{
173 uint64_t count;
174
175 if (caa_unlikely(thread_field->fd < 0))
176 return 0;
177
178 if (caa_unlikely(read(thread_field->fd, &count, sizeof(count))
179 < sizeof(count)))
180 return 0;
181
182 return count;
183}
184
2eba8e39 185#if defined(LTTNG_UST_ARCH_X86)
d58d1454
MD
186
187static
188uint64_t rdpmc(unsigned int counter)
189{
190 unsigned int low, high;
191
192 asm volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (counter));
193
194 return low | ((uint64_t) high) << 32;
195}
196
77d7fa98
MD
197static
198bool has_rdpmc(struct perf_event_mmap_page *pc)
199{
200 if (caa_unlikely(!pc->cap_bit0_is_deprecated))
201 return false;
202 /* Since Linux kernel 3.12. */
203 return pc->cap_user_rdpmc;
204}
205
d58d1454 206static
a3a8d943 207uint64_t arch_read_perf_counter(
d286ad50 208 struct lttng_perf_counter_thread_field *thread_field)
d58d1454
MD
209{
210 uint32_t seq, idx;
211 uint64_t count;
d286ad50 212 struct perf_event_mmap_page *pc = thread_field->pc;
d58d1454
MD
213
214 if (caa_unlikely(!pc))
215 return 0;
216
217 do {
218 seq = CMM_LOAD_SHARED(pc->lock);
219 cmm_barrier();
220
221 idx = pc->index;
77d7fa98 222 if (caa_likely(has_rdpmc(pc) && idx)) {
4f58f54f
MD
223 int64_t pmcval;
224
225 pmcval = rdpmc(idx - 1);
226 /* Sign-extend the pmc register result. */
227 pmcval <<= 64 - pc->pmc_width;
228 pmcval >>= 64 - pc->pmc_width;
229 count = pc->offset + pmcval;
230 } else {
a3a8d943
MD
231 /* Fall-back on system call if rdpmc cannot be used. */
232 return read_perf_counter_syscall(thread_field);
4f58f54f 233 }
d58d1454
MD
234 cmm_barrier();
235 } while (CMM_LOAD_SHARED(pc->lock) != seq);
236
237 return count;
238}
239
d286ad50 240static
a3a8d943 241int arch_perf_keep_fd(struct lttng_perf_counter_thread_field *thread_field)
d286ad50 242{
a3a8d943 243 struct perf_event_mmap_page *pc = thread_field->pc;
d286ad50 244
a3a8d943 245 if (!pc)
d286ad50 246 return 0;
77d7fa98 247 return !has_rdpmc(pc);
a3a8d943 248}
d286ad50 249
a3a8d943 250#else
d286ad50 251
a3a8d943
MD
252/* Generic (slow) implementation using a read system call. */
253static
254uint64_t arch_read_perf_counter(
255 struct lttng_perf_counter_thread_field *thread_field)
256{
257 return read_perf_counter_syscall(thread_field);
d286ad50
JD
258}
259
a3a8d943
MD
260static
261int arch_perf_keep_fd(struct lttng_perf_counter_thread_field *thread_field)
262{
263 return 1;
264}
d286ad50 265
a3a8d943 266#endif
d286ad50 267
d58d1454
MD
268static
269int sys_perf_event_open(struct perf_event_attr *attr,
270 pid_t pid, int cpu, int group_fd,
271 unsigned long flags)
272{
273 return syscall(SYS_perf_event_open, attr, pid, cpu,
274 group_fd, flags);
275}
276
277static
b9389e6e 278int open_perf_fd(struct perf_event_attr *attr)
d58d1454 279{
b9389e6e 280 int fd;
d58d1454
MD
281
282 fd = sys_perf_event_open(attr, 0, -1, -1, 0);
283 if (fd < 0)
b9389e6e
JD
284 return -1;
285
286 return fd;
287}
288
d286ad50
JD
289static
290void close_perf_fd(int fd)
291{
292 int ret;
293
294 if (fd < 0)
295 return;
296
297 ret = close(fd);
298 if (ret) {
299 perror("Error closing LTTng-UST perf memory mapping FD");
300 }
301}
302
77d7fa98 303static void setup_perf(struct lttng_perf_counter_thread_field *thread_field)
b9389e6e
JD
304{
305 void *perf_addr;
d58d1454
MD
306
307 perf_addr = mmap(NULL, sizeof(struct perf_event_mmap_page),
b9389e6e 308 PROT_READ, MAP_SHARED, thread_field->fd, 0);
d58d1454 309 if (perf_addr == MAP_FAILED)
b9389e6e 310 perf_addr = NULL;
77d7fa98 311 thread_field->pc = perf_addr;
b9389e6e 312
a3a8d943 313 if (!arch_perf_keep_fd(thread_field)) {
b9389e6e
JD
314 close_perf_fd(thread_field->fd);
315 thread_field->fd = -1;
6c2125af 316 }
d58d1454
MD
317}
318
319static
320void unmap_perf_page(struct perf_event_mmap_page *pc)
321{
322 int ret;
323
324 if (!pc)
325 return;
326 ret = munmap(pc, sizeof(struct perf_event_mmap_page));
327 if (ret < 0) {
328 PERROR("Error in munmap");
329 abort();
330 }
331}
332
333static
334struct lttng_perf_counter_thread *alloc_perf_counter_thread(void)
335{
336 struct lttng_perf_counter_thread *perf_thread;
337 sigset_t newmask, oldmask;
338 int ret;
339
340 ret = sigfillset(&newmask);
341 if (ret)
342 abort();
343 ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
344 if (ret)
345 abort();
346 /* Check again with signals disabled */
347 perf_thread = pthread_getspecific(perf_counter_key);
348 if (perf_thread)
349 goto skip;
350 perf_thread = zmalloc(sizeof(*perf_thread));
351 if (!perf_thread)
352 abort();
353 CDS_INIT_LIST_HEAD(&perf_thread->rcu_field_list);
354 ret = pthread_setspecific(perf_counter_key, perf_thread);
355 if (ret)
356 abort();
357skip:
358 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
359 if (ret)
360 abort();
361 return perf_thread;
362}
363
364static
365struct lttng_perf_counter_thread_field *
366 add_thread_field(struct lttng_perf_counter_field *perf_field,
367 struct lttng_perf_counter_thread *perf_thread)
368{
369 struct lttng_perf_counter_thread_field *thread_field;
370 sigset_t newmask, oldmask;
371 int ret;
372
373 ret = sigfillset(&newmask);
374 if (ret)
375 abort();
376 ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
377 if (ret)
378 abort();
379 /* Check again with signals disabled */
380 cds_list_for_each_entry_rcu(thread_field, &perf_thread->rcu_field_list,
381 rcu_field_node) {
382 if (thread_field->field == perf_field)
383 goto skip;
384 }
385 thread_field = zmalloc(sizeof(*thread_field));
386 if (!thread_field)
387 abort();
388 thread_field->field = perf_field;
b9389e6e
JD
389 thread_field->fd = open_perf_fd(&perf_field->attr);
390 if (thread_field->fd >= 0)
77d7fa98 391 setup_perf(thread_field);
b9389e6e
JD
392 /*
393 * Note: thread_field->pc can be NULL if setup_perf() fails.
394 * Also, thread_field->fd can be -1 if open_perf_fd() fails.
395 */
20142124 396 lttng_perf_lock();
d58d1454
MD
397 cds_list_add_rcu(&thread_field->rcu_field_node,
398 &perf_thread->rcu_field_list);
399 cds_list_add(&thread_field->thread_field_node,
400 &perf_field->thread_field_list);
20142124 401 lttng_perf_unlock();
d58d1454
MD
402skip:
403 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
404 if (ret)
405 abort();
406 return thread_field;
407}
408
409static
410struct lttng_perf_counter_thread_field *
411 get_thread_field(struct lttng_perf_counter_field *field)
412{
413 struct lttng_perf_counter_thread *perf_thread;
414 struct lttng_perf_counter_thread_field *thread_field;
415
416 perf_thread = pthread_getspecific(perf_counter_key);
417 if (!perf_thread)
418 perf_thread = alloc_perf_counter_thread();
419 cds_list_for_each_entry_rcu(thread_field, &perf_thread->rcu_field_list,
420 rcu_field_node) {
421 if (thread_field->field == field)
422 return thread_field;
423 }
424 /* perf_counter_thread_field not found, need to add one */
425 return add_thread_field(field, perf_thread);
426}
427
428static
daacdbfc 429uint64_t wrapper_perf_counter_read(struct lttng_ust_ctx_field *field)
d58d1454
MD
430{
431 struct lttng_perf_counter_field *perf_field;
432 struct lttng_perf_counter_thread_field *perf_thread_field;
433
daacdbfc 434 perf_field = (struct lttng_perf_counter_field *) field->priv;
d58d1454 435 perf_thread_field = get_thread_field(perf_field);
a3a8d943 436 return arch_read_perf_counter(perf_thread_field);
d58d1454
MD
437}
438
439static
daacdbfc 440void perf_counter_record(struct lttng_ust_ctx_field *field,
d58d1454 441 struct lttng_ust_lib_ring_buffer_ctx *ctx,
e7bc0ef6 442 struct lttng_ust_channel_buffer *chan)
d58d1454
MD
443{
444 uint64_t value;
445
446 value = wrapper_perf_counter_read(field);
447 lib_ring_buffer_align_ctx(ctx, lttng_alignof(value));
448 chan->ops->event_write(ctx, &value, sizeof(value));
449}
450
451static
daacdbfc
MD
452void perf_counter_get_value(struct lttng_ust_ctx_field *field,
453 struct lttng_ust_ctx_value *value)
d58d1454 454{
6e9ac4ae 455 value->u.s64 = wrapper_perf_counter_read(field);
d58d1454
MD
456}
457
20142124 458/* Called with perf lock held */
d58d1454
MD
459static
460void lttng_destroy_perf_thread_field(
461 struct lttng_perf_counter_thread_field *thread_field)
462{
b9389e6e 463 close_perf_fd(thread_field->fd);
d58d1454
MD
464 unmap_perf_page(thread_field->pc);
465 cds_list_del_rcu(&thread_field->rcu_field_node);
466 cds_list_del(&thread_field->thread_field_node);
467 free(thread_field);
468}
469
470static
471void lttng_destroy_perf_thread_key(void *_key)
472{
473 struct lttng_perf_counter_thread *perf_thread = _key;
474 struct lttng_perf_counter_thread_field *pos, *p;
475
20142124 476 lttng_perf_lock();
d58d1454
MD
477 cds_list_for_each_entry_safe(pos, p, &perf_thread->rcu_field_list,
478 rcu_field_node)
479 lttng_destroy_perf_thread_field(pos);
20142124 480 lttng_perf_unlock();
d58d1454
MD
481 free(perf_thread);
482}
483
484/* Called with UST lock held */
485static
daacdbfc 486void lttng_destroy_perf_counter_field(struct lttng_ust_ctx_field *field)
d58d1454
MD
487{
488 struct lttng_perf_counter_field *perf_field;
489 struct lttng_perf_counter_thread_field *pos, *p;
490
daacdbfc
MD
491 free((char *) field->event_field->name);
492 perf_field = (struct lttng_perf_counter_field *) field->priv;
d58d1454
MD
493 /*
494 * This put is performed when no threads can concurrently
495 * perform a "get" concurrently, thanks to urcu-bp grace
20142124
MD
496 * period. Holding the lttng perf lock protects against
497 * concurrent modification of the per-thread thread field
498 * list.
d58d1454 499 */
20142124 500 lttng_perf_lock();
d58d1454
MD
501 cds_list_for_each_entry_safe(pos, p, &perf_field->thread_field_list,
502 thread_field_node)
503 lttng_destroy_perf_thread_field(pos);
20142124 504 lttng_perf_unlock();
d58d1454
MD
505 free(perf_field);
506}
507
2eba8e39 508#ifdef LTTNG_UST_ARCH_ARMV7
d286ad50
JD
509
510static
511int perf_get_exclude_kernel(void)
512{
513 return 0;
514}
515
2eba8e39 516#else /* LTTNG_UST_ARCH_ARMV7 */
d286ad50
JD
517
518static
519int perf_get_exclude_kernel(void)
520{
521 return 1;
522}
523
2eba8e39 524#endif /* LTTNG_UST_ARCH_ARMV7 */
d286ad50 525
d58d1454
MD
526/* Called with UST lock held */
527int lttng_add_perf_counter_to_ctx(uint32_t type,
528 uint64_t config,
529 const char *name,
daacdbfc 530 struct lttng_ust_ctx **ctx)
d58d1454 531{
daacdbfc 532 struct lttng_ust_ctx_field *field;
a084756d 533 struct lttng_ust_type_common *ust_type;
d58d1454 534 struct lttng_perf_counter_field *perf_field;
d58d1454
MD
535 char *name_alloc;
536 int ret;
537
538 name_alloc = strdup(name);
539 if (!name_alloc) {
540 ret = -ENOMEM;
541 goto name_alloc_error;
542 }
543 perf_field = zmalloc(sizeof(*perf_field));
544 if (!perf_field) {
545 ret = -ENOMEM;
546 goto perf_field_alloc_error;
547 }
a084756d
MD
548 ust_type = lttng_ust_create_type_integer(sizeof(uint64_t) * CHAR_BIT,
549 lttng_alignof(uint64_t) * CHAR_BIT,
550 lttng_is_signed_type(uint64_t),
551 BYTE_ORDER, 10);
ea8efbbe 552 if (!ust_type) {
a084756d
MD
553 ret = -ENOMEM;
554 goto type_alloc_error;
555 }
d58d1454
MD
556 field = lttng_append_context(ctx);
557 if (!field) {
558 ret = -ENOMEM;
559 goto append_context_error;
560 }
561 if (lttng_find_context(*ctx, name_alloc)) {
562 ret = -EEXIST;
563 goto find_error;
564 }
565
566 field->destroy = lttng_destroy_perf_counter_field;
567
daacdbfc 568 field->event_field->name = name_alloc;
a084756d 569 field->event_field->type = ust_type;
d58d1454
MD
570 field->get_size = perf_counter_get_size;
571 field->record = perf_counter_record;
572 field->get_value = perf_counter_get_value;
573
574 perf_field->attr.type = type;
575 perf_field->attr.config = config;
d286ad50 576 perf_field->attr.exclude_kernel = perf_get_exclude_kernel();
d58d1454 577 CDS_INIT_LIST_HEAD(&perf_field->thread_field_list);
daacdbfc 578 field->priv = perf_field;
d58d1454
MD
579
580 /* Ensure that this perf counter can be used in this process. */
b9389e6e
JD
581 ret = open_perf_fd(&perf_field->attr);
582 if (ret < 0) {
d58d1454
MD
583 ret = -ENODEV;
584 goto setup_error;
585 }
b9389e6e 586 close_perf_fd(ret);
d58d1454
MD
587
588 /*
589 * Contexts can only be added before tracing is started, so we
590 * don't have to synchronize against concurrent threads using
591 * the field here.
592 */
593
b2cc986a 594 lttng_context_update(*ctx);
d58d1454
MD
595 return 0;
596
597setup_error:
598find_error:
599 lttng_remove_context_field(ctx, field);
600append_context_error:
a084756d
MD
601 lttng_ust_destroy_type(ust_type);
602type_alloc_error:
d58d1454
MD
603 free(perf_field);
604perf_field_alloc_error:
605 free(name_alloc);
606name_alloc_error:
607 return ret;
608}
609
610int lttng_perf_counter_init(void)
611{
612 int ret;
613
614 ret = pthread_key_create(&perf_counter_key,
615 lttng_destroy_perf_thread_key);
616 if (ret)
617 ret = -ret;
618 return ret;
619}
620
621void lttng_perf_counter_exit(void)
622{
623 int ret;
624
625 ret = pthread_key_delete(perf_counter_key);
626 if (ret) {
627 errno = ret;
628 PERROR("Error in pthread_key_delete");
629 }
630}
This page took 0.055024 seconds and 4 git commands to generate.