5a7dec4aac0e900013f07f372ca69255675051c7
[lttng-ust.git] / src / lib / lttng-ust / lttng-context-perf-counters.c
1 /*
2 * SPDX-License-Identifier: LGPL-2.1-only
3 *
4 * Copyright (C) 2009-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * LTTng UST performance monitoring counters (perf-counters) integration.
7 */
8
9 #define _LGPL_SOURCE
10 #include <limits.h>
11 #include <sys/types.h>
12 #include <unistd.h>
13 #include <string.h>
14 #include <stdlib.h>
15 #include <stdio.h>
16 #include <stdbool.h>
17 #include <stddef.h>
18 #include <stdint.h>
19 #include <sys/mman.h>
20 #include <sys/syscall.h>
21 #include <lttng/ust-arch.h>
22 #include <lttng/ust-events.h>
23 #include <lttng/ust-tracer.h>
24 #include <lttng/ust-ringbuffer-context.h>
25 #include <urcu/system.h>
26 #include <urcu/arch.h>
27 #include <urcu/rculist.h>
28 #include "common/macros.h"
29 #include <urcu/ref.h>
30 #include "common/logging.h"
31 #include <signal.h>
32 #include <urcu/tls-compat.h>
33 #include "perf_event.h"
34
35 #include "context-internal.h"
36 #include "lttng-tracer-core.h"
37 #include "lib/lttng-ust/events.h"
38
39 /*
40 * We use a global perf counter key and iterate on per-thread RCU lists
41 * of fields in the fast path, even though this is not strictly speaking
42 * what would provide the best fast-path complexity, to ensure teardown
43 * of sessions vs thread exit is handled racelessly.
44 *
45 * Updates and traversals of thread_list are protected by UST lock.
46 * Updates to rcu_field_list are protected by UST lock.
47 */
48
49 struct lttng_perf_counter_thread_field {
50 struct lttng_perf_counter_field *field; /* Back reference */
51 struct perf_event_mmap_page *pc;
52 struct cds_list_head thread_field_node; /* Per-field list of thread fields (node) */
53 struct cds_list_head rcu_field_node; /* RCU per-thread list of fields (node) */
54 int fd; /* Perf FD */
55 };
56
57 struct lttng_perf_counter_thread {
58 struct cds_list_head rcu_field_list; /* RCU per-thread list of fields */
59 };
60
61 struct lttng_perf_counter_field {
62 struct perf_event_attr attr;
63 struct cds_list_head thread_field_list; /* Per-field list of thread fields */
64 char *name;
65 struct lttng_ust_event_field *event_field;
66 };
67
68 static pthread_key_t perf_counter_key;
69
70 /*
71 * lttng_perf_lock - Protect lttng-ust perf counter data structures
72 *
73 * Nests within the ust_lock, and therefore within the libc dl lock.
74 * Therefore, we need to fixup the TLS before nesting into this lock.
75 * Nests inside RCU bp read-side lock. Protects against concurrent
76 * fork.
77 */
78 static pthread_mutex_t ust_perf_mutex = PTHREAD_MUTEX_INITIALIZER;
79
80 /*
81 * Cancel state when grabbing the ust_perf_mutex. Saved when locking,
82 * restored on unlock. Protected by ust_perf_mutex.
83 */
84 static int ust_perf_saved_cancelstate;
85
86 /*
87 * Track whether we are tracing from a signal handler nested on an
88 * application thread.
89 */
90 static DEFINE_URCU_TLS(int, ust_perf_mutex_nest);
91
92 /*
93 * Force a read (imply TLS fixup for dlopen) of TLS variables.
94 */
95 void lttng_ust_fixup_perf_counter_tls(void)
96 {
97 asm volatile ("" : : "m" (URCU_TLS(ust_perf_mutex_nest)));
98 }
99
100 void lttng_perf_lock(void)
101 {
102 sigset_t sig_all_blocked, orig_mask;
103 int ret, oldstate;
104
105 ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldstate);
106 if (ret) {
107 ERR("pthread_setcancelstate: %s", strerror(ret));
108 }
109 sigfillset(&sig_all_blocked);
110 ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
111 if (ret) {
112 ERR("pthread_sigmask: %s", strerror(ret));
113 }
114 if (!URCU_TLS(ust_perf_mutex_nest)++) {
115 /*
116 * Ensure the compiler don't move the store after the close()
117 * call in case close() would be marked as leaf.
118 */
119 cmm_barrier();
120 pthread_mutex_lock(&ust_perf_mutex);
121 ust_perf_saved_cancelstate = oldstate;
122 }
123 ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
124 if (ret) {
125 ERR("pthread_sigmask: %s", strerror(ret));
126 }
127 }
128
129 void lttng_perf_unlock(void)
130 {
131 sigset_t sig_all_blocked, orig_mask;
132 int ret, newstate, oldstate;
133 bool restore_cancel = false;
134
135 sigfillset(&sig_all_blocked);
136 ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
137 if (ret) {
138 ERR("pthread_sigmask: %s", strerror(ret));
139 }
140 /*
141 * Ensure the compiler don't move the store before the close()
142 * call, in case close() would be marked as leaf.
143 */
144 cmm_barrier();
145 if (!--URCU_TLS(ust_perf_mutex_nest)) {
146 newstate = ust_perf_saved_cancelstate;
147 restore_cancel = true;
148 pthread_mutex_unlock(&ust_perf_mutex);
149 }
150 ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
151 if (ret) {
152 ERR("pthread_sigmask: %s", strerror(ret));
153 }
154 if (restore_cancel) {
155 ret = pthread_setcancelstate(newstate, &oldstate);
156 if (ret) {
157 ERR("pthread_setcancelstate: %s", strerror(ret));
158 }
159 }
160 }
161
162 static
163 size_t perf_counter_get_size(void *priv __attribute__((unused)),
164 size_t offset)
165 {
166 size_t size = 0;
167
168 size += lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(uint64_t));
169 size += sizeof(uint64_t);
170 return size;
171 }
172
173 static
174 uint64_t read_perf_counter_syscall(
175 struct lttng_perf_counter_thread_field *thread_field)
176 {
177 uint64_t count;
178
179 if (caa_unlikely(thread_field->fd < 0))
180 return 0;
181
182 if (caa_unlikely(read(thread_field->fd, &count, sizeof(count))
183 < sizeof(count)))
184 return 0;
185
186 return count;
187 }
188
189 #if defined(LTTNG_UST_ARCH_X86)
190
191 static
192 uint64_t rdpmc(unsigned int counter)
193 {
194 unsigned int low, high;
195
196 asm volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (counter));
197
198 return low | ((uint64_t) high) << 32;
199 }
200
201 static
202 bool has_rdpmc(struct perf_event_mmap_page *pc)
203 {
204 if (caa_unlikely(!pc->cap_bit0_is_deprecated))
205 return false;
206 /* Since Linux kernel 3.12. */
207 return pc->cap_user_rdpmc;
208 }
209
210 static
211 uint64_t arch_read_perf_counter(
212 struct lttng_perf_counter_thread_field *thread_field)
213 {
214 uint32_t seq, idx;
215 uint64_t count;
216 struct perf_event_mmap_page *pc = thread_field->pc;
217
218 if (caa_unlikely(!pc))
219 return 0;
220
221 do {
222 seq = CMM_LOAD_SHARED(pc->lock);
223 cmm_barrier();
224
225 idx = pc->index;
226 if (caa_likely(has_rdpmc(pc) && idx)) {
227 int64_t pmcval;
228
229 pmcval = rdpmc(idx - 1);
230 /* Sign-extend the pmc register result. */
231 pmcval <<= 64 - pc->pmc_width;
232 pmcval >>= 64 - pc->pmc_width;
233 count = pc->offset + pmcval;
234 } else {
235 /* Fall-back on system call if rdpmc cannot be used. */
236 return read_perf_counter_syscall(thread_field);
237 }
238 cmm_barrier();
239 } while (CMM_LOAD_SHARED(pc->lock) != seq);
240
241 return count;
242 }
243
244 static
245 int arch_perf_keep_fd(struct lttng_perf_counter_thread_field *thread_field)
246 {
247 struct perf_event_mmap_page *pc = thread_field->pc;
248
249 if (!pc)
250 return 0;
251 return !has_rdpmc(pc);
252 }
253
254 #else
255
256 /* Generic (slow) implementation using a read system call. */
257 static
258 uint64_t arch_read_perf_counter(
259 struct lttng_perf_counter_thread_field *thread_field)
260 {
261 return read_perf_counter_syscall(thread_field);
262 }
263
264 static
265 int arch_perf_keep_fd(struct lttng_perf_counter_thread_field *thread_field __attribute__((unused)))
266 {
267 return 1;
268 }
269
270 #endif
271
272 static
273 int sys_perf_event_open(struct perf_event_attr *attr,
274 pid_t pid, int cpu, int group_fd,
275 unsigned long flags)
276 {
277 return syscall(SYS_perf_event_open, attr, pid, cpu,
278 group_fd, flags);
279 }
280
281 static
282 int open_perf_fd(struct perf_event_attr *attr)
283 {
284 int fd;
285
286 fd = sys_perf_event_open(attr, 0, -1, -1, 0);
287 if (fd < 0)
288 return -1;
289
290 return fd;
291 }
292
293 static
294 void close_perf_fd(int fd)
295 {
296 int ret;
297
298 if (fd < 0)
299 return;
300
301 ret = close(fd);
302 if (ret) {
303 perror("Error closing LTTng-UST perf memory mapping FD");
304 }
305 }
306
307 static void setup_perf(struct lttng_perf_counter_thread_field *thread_field)
308 {
309 void *perf_addr;
310
311 perf_addr = mmap(NULL, sizeof(struct perf_event_mmap_page),
312 PROT_READ, MAP_SHARED, thread_field->fd, 0);
313 if (perf_addr == MAP_FAILED)
314 perf_addr = NULL;
315 thread_field->pc = perf_addr;
316
317 if (!arch_perf_keep_fd(thread_field)) {
318 close_perf_fd(thread_field->fd);
319 thread_field->fd = -1;
320 }
321 }
322
323 static
324 void unmap_perf_page(struct perf_event_mmap_page *pc)
325 {
326 int ret;
327
328 if (!pc)
329 return;
330 ret = munmap(pc, sizeof(struct perf_event_mmap_page));
331 if (ret < 0) {
332 PERROR("Error in munmap");
333 abort();
334 }
335 }
336
337 static
338 struct lttng_perf_counter_thread *alloc_perf_counter_thread(void)
339 {
340 struct lttng_perf_counter_thread *perf_thread;
341 sigset_t newmask, oldmask;
342 int ret;
343
344 ret = sigfillset(&newmask);
345 if (ret)
346 abort();
347 ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
348 if (ret)
349 abort();
350 /* Check again with signals disabled */
351 perf_thread = pthread_getspecific(perf_counter_key);
352 if (perf_thread)
353 goto skip;
354 perf_thread = zmalloc(sizeof(*perf_thread));
355 if (!perf_thread)
356 abort();
357 CDS_INIT_LIST_HEAD(&perf_thread->rcu_field_list);
358 ret = pthread_setspecific(perf_counter_key, perf_thread);
359 if (ret)
360 abort();
361 skip:
362 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
363 if (ret)
364 abort();
365 return perf_thread;
366 }
367
368 static
369 struct lttng_perf_counter_thread_field *
370 add_thread_field(struct lttng_perf_counter_field *perf_field,
371 struct lttng_perf_counter_thread *perf_thread)
372 {
373 struct lttng_perf_counter_thread_field *thread_field;
374 sigset_t newmask, oldmask;
375 int ret;
376
377 ret = sigfillset(&newmask);
378 if (ret)
379 abort();
380 ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
381 if (ret)
382 abort();
383 /* Check again with signals disabled */
384 cds_list_for_each_entry_rcu(thread_field, &perf_thread->rcu_field_list,
385 rcu_field_node) {
386 if (thread_field->field == perf_field)
387 goto skip;
388 }
389 thread_field = zmalloc(sizeof(*thread_field));
390 if (!thread_field)
391 abort();
392 thread_field->field = perf_field;
393 thread_field->fd = open_perf_fd(&perf_field->attr);
394 if (thread_field->fd >= 0)
395 setup_perf(thread_field);
396 /*
397 * Note: thread_field->pc can be NULL if setup_perf() fails.
398 * Also, thread_field->fd can be -1 if open_perf_fd() fails.
399 */
400 lttng_perf_lock();
401 cds_list_add_rcu(&thread_field->rcu_field_node,
402 &perf_thread->rcu_field_list);
403 cds_list_add(&thread_field->thread_field_node,
404 &perf_field->thread_field_list);
405 lttng_perf_unlock();
406 skip:
407 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
408 if (ret)
409 abort();
410 return thread_field;
411 }
412
413 static
414 struct lttng_perf_counter_thread_field *
415 get_thread_field(struct lttng_perf_counter_field *field)
416 {
417 struct lttng_perf_counter_thread *perf_thread;
418 struct lttng_perf_counter_thread_field *thread_field;
419
420 perf_thread = pthread_getspecific(perf_counter_key);
421 if (!perf_thread)
422 perf_thread = alloc_perf_counter_thread();
423 cds_list_for_each_entry_rcu(thread_field, &perf_thread->rcu_field_list,
424 rcu_field_node) {
425 if (thread_field->field == field)
426 return thread_field;
427 }
428 /* perf_counter_thread_field not found, need to add one */
429 return add_thread_field(field, perf_thread);
430 }
431
432 static
433 uint64_t wrapper_perf_counter_read(void *priv)
434 {
435 struct lttng_perf_counter_field *perf_field;
436 struct lttng_perf_counter_thread_field *perf_thread_field;
437
438 perf_field = (struct lttng_perf_counter_field *) priv;
439 perf_thread_field = get_thread_field(perf_field);
440 return arch_read_perf_counter(perf_thread_field);
441 }
442
443 static
444 void perf_counter_record(void *priv,
445 struct lttng_ust_ring_buffer_ctx *ctx,
446 struct lttng_ust_channel_buffer *chan)
447 {
448 uint64_t value;
449
450 value = wrapper_perf_counter_read(priv);
451 chan->ops->event_write(ctx, &value, sizeof(value), lttng_ust_rb_alignof(value));
452 }
453
454 static
455 void perf_counter_get_value(void *priv,
456 struct lttng_ust_ctx_value *value)
457 {
458 value->u.s64 = wrapper_perf_counter_read(priv);
459 }
460
461 /* Called with perf lock held */
462 static
463 void lttng_destroy_perf_thread_field(
464 struct lttng_perf_counter_thread_field *thread_field)
465 {
466 close_perf_fd(thread_field->fd);
467 unmap_perf_page(thread_field->pc);
468 cds_list_del_rcu(&thread_field->rcu_field_node);
469 cds_list_del(&thread_field->thread_field_node);
470 free(thread_field);
471 }
472
473 static
474 void lttng_destroy_perf_thread_key(void *_key)
475 {
476 struct lttng_perf_counter_thread *perf_thread = _key;
477 struct lttng_perf_counter_thread_field *pos, *p;
478
479 lttng_perf_lock();
480 cds_list_for_each_entry_safe(pos, p, &perf_thread->rcu_field_list,
481 rcu_field_node)
482 lttng_destroy_perf_thread_field(pos);
483 lttng_perf_unlock();
484 free(perf_thread);
485 }
486
487 /* Called with UST lock held */
488 static
489 void lttng_destroy_perf_counter_ctx_field(void *priv)
490 {
491 struct lttng_perf_counter_field *perf_field;
492 struct lttng_perf_counter_thread_field *pos, *p;
493
494 perf_field = (struct lttng_perf_counter_field *) priv;
495 free(perf_field->name);
496 /*
497 * This put is performed when no threads can concurrently
498 * perform a "get" concurrently, thanks to urcu-bp grace
499 * period. Holding the lttng perf lock protects against
500 * concurrent modification of the per-thread thread field
501 * list.
502 */
503 lttng_perf_lock();
504 cds_list_for_each_entry_safe(pos, p, &perf_field->thread_field_list,
505 thread_field_node)
506 lttng_destroy_perf_thread_field(pos);
507 lttng_perf_unlock();
508 free(perf_field->event_field);
509 free(perf_field);
510 }
511
512 #ifdef LTTNG_UST_ARCH_ARMV7
513
514 static
515 int perf_get_exclude_kernel(void)
516 {
517 return 0;
518 }
519
520 #else /* LTTNG_UST_ARCH_ARMV7 */
521
522 static
523 int perf_get_exclude_kernel(void)
524 {
525 return 1;
526 }
527
528 #endif /* LTTNG_UST_ARCH_ARMV7 */
529
530 static const struct lttng_ust_type_common *ust_type =
531 lttng_ust_static_type_integer(sizeof(uint64_t) * CHAR_BIT,
532 lttng_ust_rb_alignof(uint64_t) * CHAR_BIT,
533 lttng_ust_is_signed_type(uint64_t),
534 LTTNG_UST_BYTE_ORDER, 10);
535
536 /* Called with UST lock held */
537 int lttng_add_perf_counter_to_ctx(uint32_t type,
538 uint64_t config,
539 const char *name,
540 struct lttng_ust_ctx **ctx)
541 {
542 struct lttng_ust_ctx_field ctx_field;
543 struct lttng_ust_event_field *event_field;
544 struct lttng_perf_counter_field *perf_field;
545 char *name_alloc;
546 int ret;
547
548 if (lttng_find_context(*ctx, name)) {
549 ret = -EEXIST;
550 goto find_error;
551 }
552 name_alloc = strdup(name);
553 if (!name_alloc) {
554 ret = -ENOMEM;
555 goto name_alloc_error;
556 }
557 event_field = zmalloc(sizeof(*event_field));
558 if (!event_field) {
559 ret = -ENOMEM;
560 goto event_field_alloc_error;
561 }
562 event_field->name = name_alloc;
563 event_field->type = ust_type;
564
565 perf_field = zmalloc(sizeof(*perf_field));
566 if (!perf_field) {
567 ret = -ENOMEM;
568 goto perf_field_alloc_error;
569 }
570 perf_field->attr.type = type;
571 perf_field->attr.config = config;
572 perf_field->attr.exclude_kernel = perf_get_exclude_kernel();
573 CDS_INIT_LIST_HEAD(&perf_field->thread_field_list);
574 perf_field->name = name_alloc;
575 perf_field->event_field = event_field;
576
577 /* Ensure that this perf counter can be used in this process. */
578 ret = open_perf_fd(&perf_field->attr);
579 if (ret < 0) {
580 ret = -ENODEV;
581 goto setup_error;
582 }
583 close_perf_fd(ret);
584
585 ctx_field.event_field = event_field;
586 ctx_field.get_size = perf_counter_get_size;
587 ctx_field.record = perf_counter_record;
588 ctx_field.get_value = perf_counter_get_value;
589 ctx_field.destroy = lttng_destroy_perf_counter_ctx_field;
590 ctx_field.priv = perf_field;
591
592 ret = lttng_ust_context_append(ctx, &ctx_field);
593 if (ret) {
594 ret = -ENOMEM;
595 goto append_context_error;
596 }
597 return 0;
598
599 append_context_error:
600 setup_error:
601 free(perf_field);
602 perf_field_alloc_error:
603 free(event_field);
604 event_field_alloc_error:
605 free(name_alloc);
606 name_alloc_error:
607 find_error:
608 return ret;
609 }
610
611 int lttng_perf_counter_init(void)
612 {
613 int ret;
614
615 ret = pthread_key_create(&perf_counter_key,
616 lttng_destroy_perf_thread_key);
617 if (ret)
618 ret = -ret;
619 return ret;
620 }
621
622 void lttng_perf_counter_exit(void)
623 {
624 int ret;
625
626 ret = pthread_key_delete(perf_counter_key);
627 if (ret) {
628 errno = ret;
629 PERROR("Error in pthread_key_delete");
630 }
631 }
This page took 0.042328 seconds and 3 git commands to generate.