2 * lttng-context-perf-counters.c
4 * LTTng UST performance monitoring counters (perf-counters) integration.
6 * Copyright (C) 2009-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; only
11 * version 2.1 of the License.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 #include <sys/types.h>
31 #include <sys/syscall.h>
32 #include <lttng/ust-events.h>
33 #include <lttng/ust-tracer.h>
34 #include <lttng/ringbuffer-config.h>
35 #include <urcu/system.h>
36 #include <urcu/arch.h>
37 #include <urcu/rculist.h>
40 #include <usterr-signal-safe.h>
42 #include "perf_event.h"
43 #include "lttng-tracer-core.h"
46 * We use a global perf counter key and iterate on per-thread RCU lists
47 * of fields in the fast path, even though this is not strictly speaking
48 * what would provide the best fast-path complexity, to ensure teardown
49 * of sessions vs thread exit is handled racelessly.
51 * Updates and traversals of thread_list are protected by UST lock.
52 * Updates to rcu_field_list are protected by UST lock.
55 struct lttng_perf_counter_thread_field
{
56 struct lttng_perf_counter_field
*field
; /* Back reference */
57 struct perf_event_mmap_page
*pc
;
58 struct cds_list_head thread_field_node
; /* Per-field list of thread fields (node) */
59 struct cds_list_head rcu_field_node
; /* RCU per-thread list of fields (node) */
63 struct lttng_perf_counter_thread
{
64 struct cds_list_head rcu_field_list
; /* RCU per-thread list of fields */
67 struct lttng_perf_counter_field
{
68 struct perf_event_attr attr
;
69 struct cds_list_head thread_field_list
; /* Per-field list of thread fields */
72 static pthread_key_t perf_counter_key
;
75 size_t perf_counter_get_size(struct lttng_ctx_field
*field
, size_t offset
)
79 size
+= lib_ring_buffer_align(offset
, lttng_alignof(uint64_t));
80 size
+= sizeof(uint64_t);
85 uint64_t read_perf_counter_syscall(
86 struct lttng_perf_counter_thread_field
*thread_field
)
90 if (caa_unlikely(thread_field
->fd
< 0))
93 if (caa_unlikely(read(thread_field
->fd
, &count
, sizeof(count
))
100 #if defined(__x86_64__) || defined(__i386__)
103 uint64_t rdpmc(unsigned int counter
)
105 unsigned int low
, high
;
107 asm volatile("rdpmc" : "=a" (low
), "=d" (high
) : "c" (counter
));
109 return low
| ((uint64_t) high
) << 32;
113 bool has_rdpmc(struct perf_event_mmap_page
*pc
)
115 if (caa_unlikely(!pc
->cap_bit0_is_deprecated
))
117 /* Since Linux kernel 3.12. */
118 return pc
->cap_user_rdpmc
;
122 uint64_t arch_read_perf_counter(
123 struct lttng_perf_counter_thread_field
*thread_field
)
127 struct perf_event_mmap_page
*pc
= thread_field
->pc
;
129 if (caa_unlikely(!pc
))
133 seq
= CMM_LOAD_SHARED(pc
->lock
);
137 if (caa_likely(has_rdpmc(pc
) && idx
)) {
140 pmcval
= rdpmc(idx
- 1);
141 /* Sign-extend the pmc register result. */
142 pmcval
<<= 64 - pc
->pmc_width
;
143 pmcval
>>= 64 - pc
->pmc_width
;
144 count
= pc
->offset
+ pmcval
;
146 /* Fall-back on system call if rdpmc cannot be used. */
147 return read_perf_counter_syscall(thread_field
);
150 } while (CMM_LOAD_SHARED(pc
->lock
) != seq
);
156 int arch_perf_keep_fd(struct lttng_perf_counter_thread_field
*thread_field
)
158 struct perf_event_mmap_page
*pc
= thread_field
->pc
;
162 return !has_rdpmc(pc
);
167 /* Generic (slow) implementation using a read system call. */
169 uint64_t arch_read_perf_counter(
170 struct lttng_perf_counter_thread_field
*thread_field
)
172 return read_perf_counter_syscall(thread_field
);
176 int arch_perf_keep_fd(struct lttng_perf_counter_thread_field
*thread_field
)
184 int sys_perf_event_open(struct perf_event_attr
*attr
,
185 pid_t pid
, int cpu
, int group_fd
,
188 return syscall(SYS_perf_event_open
, attr
, pid
, cpu
,
193 int open_perf_fd(struct perf_event_attr
*attr
)
197 fd
= sys_perf_event_open(attr
, 0, -1, -1, 0);
205 void close_perf_fd(int fd
)
214 perror("Error closing LTTng-UST perf memory mapping FD");
218 static void setup_perf(struct lttng_perf_counter_thread_field
*thread_field
)
222 perf_addr
= mmap(NULL
, sizeof(struct perf_event_mmap_page
),
223 PROT_READ
, MAP_SHARED
, thread_field
->fd
, 0);
224 if (perf_addr
== MAP_FAILED
)
226 thread_field
->pc
= perf_addr
;
228 if (!arch_perf_keep_fd(thread_field
)) {
229 close_perf_fd(thread_field
->fd
);
230 thread_field
->fd
= -1;
235 void unmap_perf_page(struct perf_event_mmap_page
*pc
)
241 ret
= munmap(pc
, sizeof(struct perf_event_mmap_page
));
243 PERROR("Error in munmap");
249 struct lttng_perf_counter_thread
*alloc_perf_counter_thread(void)
251 struct lttng_perf_counter_thread
*perf_thread
;
252 sigset_t newmask
, oldmask
;
255 ret
= sigfillset(&newmask
);
258 ret
= pthread_sigmask(SIG_BLOCK
, &newmask
, &oldmask
);
261 /* Check again with signals disabled */
262 perf_thread
= pthread_getspecific(perf_counter_key
);
265 perf_thread
= zmalloc(sizeof(*perf_thread
));
268 CDS_INIT_LIST_HEAD(&perf_thread
->rcu_field_list
);
269 ret
= pthread_setspecific(perf_counter_key
, perf_thread
);
273 ret
= pthread_sigmask(SIG_SETMASK
, &oldmask
, NULL
);
280 struct lttng_perf_counter_thread_field
*
281 add_thread_field(struct lttng_perf_counter_field
*perf_field
,
282 struct lttng_perf_counter_thread
*perf_thread
)
284 struct lttng_perf_counter_thread_field
*thread_field
;
285 sigset_t newmask
, oldmask
;
288 ret
= sigfillset(&newmask
);
291 ret
= pthread_sigmask(SIG_BLOCK
, &newmask
, &oldmask
);
294 /* Check again with signals disabled */
295 cds_list_for_each_entry_rcu(thread_field
, &perf_thread
->rcu_field_list
,
297 if (thread_field
->field
== perf_field
)
300 thread_field
= zmalloc(sizeof(*thread_field
));
303 thread_field
->field
= perf_field
;
304 thread_field
->fd
= open_perf_fd(&perf_field
->attr
);
305 if (thread_field
->fd
>= 0)
306 setup_perf(thread_field
);
308 * Note: thread_field->pc can be NULL if setup_perf() fails.
309 * Also, thread_field->fd can be -1 if open_perf_fd() fails.
312 cds_list_add_rcu(&thread_field
->rcu_field_node
,
313 &perf_thread
->rcu_field_list
);
314 cds_list_add(&thread_field
->thread_field_node
,
315 &perf_field
->thread_field_list
);
318 ret
= pthread_sigmask(SIG_SETMASK
, &oldmask
, NULL
);
325 struct lttng_perf_counter_thread_field
*
326 get_thread_field(struct lttng_perf_counter_field
*field
)
328 struct lttng_perf_counter_thread
*perf_thread
;
329 struct lttng_perf_counter_thread_field
*thread_field
;
331 perf_thread
= pthread_getspecific(perf_counter_key
);
333 perf_thread
= alloc_perf_counter_thread();
334 cds_list_for_each_entry_rcu(thread_field
, &perf_thread
->rcu_field_list
,
336 if (thread_field
->field
== field
)
339 /* perf_counter_thread_field not found, need to add one */
340 return add_thread_field(field
, perf_thread
);
344 uint64_t wrapper_perf_counter_read(struct lttng_ctx_field
*field
)
346 struct lttng_perf_counter_field
*perf_field
;
347 struct lttng_perf_counter_thread_field
*perf_thread_field
;
349 perf_field
= field
->u
.perf_counter
;
350 perf_thread_field
= get_thread_field(perf_field
);
351 return arch_read_perf_counter(perf_thread_field
);
355 void perf_counter_record(struct lttng_ctx_field
*field
,
356 struct lttng_ust_lib_ring_buffer_ctx
*ctx
,
357 struct lttng_channel
*chan
)
361 value
= wrapper_perf_counter_read(field
);
362 lib_ring_buffer_align_ctx(ctx
, lttng_alignof(value
));
363 chan
->ops
->event_write(ctx
, &value
, sizeof(value
));
367 void perf_counter_get_value(struct lttng_ctx_field
*field
,
368 struct lttng_ctx_value
*value
)
372 v
= wrapper_perf_counter_read(field
);
376 /* Called with UST lock held */
378 void lttng_destroy_perf_thread_field(
379 struct lttng_perf_counter_thread_field
*thread_field
)
381 close_perf_fd(thread_field
->fd
);
382 unmap_perf_page(thread_field
->pc
);
383 cds_list_del_rcu(&thread_field
->rcu_field_node
);
384 cds_list_del(&thread_field
->thread_field_node
);
389 void lttng_destroy_perf_thread_key(void *_key
)
391 struct lttng_perf_counter_thread
*perf_thread
= _key
;
392 struct lttng_perf_counter_thread_field
*pos
, *p
;
395 cds_list_for_each_entry_safe(pos
, p
, &perf_thread
->rcu_field_list
,
397 lttng_destroy_perf_thread_field(pos
);
402 /* Called with UST lock held */
404 void lttng_destroy_perf_counter_field(struct lttng_ctx_field
*field
)
406 struct lttng_perf_counter_field
*perf_field
;
407 struct lttng_perf_counter_thread_field
*pos
, *p
;
409 free((char *) field
->event_field
.name
);
410 perf_field
= field
->u
.perf_counter
;
412 * This put is performed when no threads can concurrently
413 * perform a "get" concurrently, thanks to urcu-bp grace
416 cds_list_for_each_entry_safe(pos
, p
, &perf_field
->thread_field_list
,
418 lttng_destroy_perf_thread_field(pos
);
422 #ifdef __ARM_ARCH_7A__
425 int perf_get_exclude_kernel(void)
430 #else /* __ARM_ARCH_7A__ */
433 int perf_get_exclude_kernel(void)
438 #endif /* __ARM_ARCH_7A__ */
440 /* Called with UST lock held */
441 int lttng_add_perf_counter_to_ctx(uint32_t type
,
444 struct lttng_ctx
**ctx
)
446 struct lttng_ctx_field
*field
;
447 struct lttng_perf_counter_field
*perf_field
;
451 name_alloc
= strdup(name
);
454 goto name_alloc_error
;
456 perf_field
= zmalloc(sizeof(*perf_field
));
459 goto perf_field_alloc_error
;
461 field
= lttng_append_context(ctx
);
464 goto append_context_error
;
466 if (lttng_find_context(*ctx
, name_alloc
)) {
471 field
->destroy
= lttng_destroy_perf_counter_field
;
473 field
->event_field
.name
= name_alloc
;
474 field
->event_field
.type
.atype
= atype_integer
;
475 field
->event_field
.type
.u
.basic
.integer
.size
=
476 sizeof(uint64_t) * CHAR_BIT
;
477 field
->event_field
.type
.u
.basic
.integer
.alignment
=
478 lttng_alignof(uint64_t) * CHAR_BIT
;
479 field
->event_field
.type
.u
.basic
.integer
.signedness
=
480 lttng_is_signed_type(uint64_t);
481 field
->event_field
.type
.u
.basic
.integer
.reverse_byte_order
= 0;
482 field
->event_field
.type
.u
.basic
.integer
.base
= 10;
483 field
->event_field
.type
.u
.basic
.integer
.encoding
= lttng_encode_none
;
484 field
->get_size
= perf_counter_get_size
;
485 field
->record
= perf_counter_record
;
486 field
->get_value
= perf_counter_get_value
;
488 perf_field
->attr
.type
= type
;
489 perf_field
->attr
.config
= config
;
490 perf_field
->attr
.exclude_kernel
= perf_get_exclude_kernel();
491 CDS_INIT_LIST_HEAD(&perf_field
->thread_field_list
);
492 field
->u
.perf_counter
= perf_field
;
494 /* Ensure that this perf counter can be used in this process. */
495 ret
= open_perf_fd(&perf_field
->attr
);
503 * Contexts can only be added before tracing is started, so we
504 * don't have to synchronize against concurrent threads using
508 lttng_context_update(*ctx
);
513 lttng_remove_context_field(ctx
, field
);
514 append_context_error
:
516 perf_field_alloc_error
:
522 int lttng_perf_counter_init(void)
526 ret
= pthread_key_create(&perf_counter_key
,
527 lttng_destroy_perf_thread_key
);
533 void lttng_perf_counter_exit(void)
537 ret
= pthread_key_delete(perf_counter_key
);
540 PERROR("Error in pthread_key_delete");