Commit | Line | Data |
---|---|---|
d58d1454 | 1 | /* |
c0c0989a | 2 | * SPDX-License-Identifier: LGPL-2.1-only |
d58d1454 MD |
3 | * |
4 | * Copyright (C) 2009-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
5 | * | |
c0c0989a | 6 | * LTTng UST performance monitoring counters (perf-counters) integration. |
d58d1454 MD |
7 | */ |
8 | ||
3fbec7dc | 9 | #define _LGPL_SOURCE |
d58d1454 MD |
10 | #include <sys/types.h> |
11 | #include <unistd.h> | |
12 | #include <string.h> | |
13 | #include <stdlib.h> | |
14 | #include <stdio.h> | |
649fb6b3 | 15 | #include <stdbool.h> |
b4051ad8 | 16 | #include <stddef.h> |
fb31eb73 | 17 | #include <stdint.h> |
d58d1454 MD |
18 | #include <sys/mman.h> |
19 | #include <sys/syscall.h> | |
d58d1454 MD |
20 | #include <lttng/ust-events.h> |
21 | #include <lttng/ust-tracer.h> | |
22 | #include <lttng/ringbuffer-config.h> | |
23 | #include <urcu/system.h> | |
24 | #include <urcu/arch.h> | |
25 | #include <urcu/rculist.h> | |
864a1eda | 26 | #include <ust-helper.h> |
d58d1454 MD |
27 | #include <urcu/ref.h> |
28 | #include <usterr-signal-safe.h> | |
29 | #include <signal.h> | |
20142124 | 30 | #include <urcu/tls-compat.h> |
77d7fa98 | 31 | #include "perf_event.h" |
d58d1454 | 32 | #include "lttng-tracer-core.h" |
bd8c1787 | 33 | #include "ust-events-internal.h" |
d58d1454 MD |
34 | |
35 | /* | |
36 | * We use a global perf counter key and iterate on per-thread RCU lists | |
37 | * of fields in the fast path, even though this is not strictly speaking | |
38 | * what would provide the best fast-path complexity, to ensure teardown | |
39 | * of sessions vs thread exit is handled racelessly. | |
40 | * | |
41 | * Updates and traversals of thread_list are protected by UST lock. | |
42 | * Updates to rcu_field_list are protected by UST lock. | |
43 | */ | |
44 | ||
45 | struct lttng_perf_counter_thread_field { | |
46 | struct lttng_perf_counter_field *field; /* Back reference */ | |
47 | struct perf_event_mmap_page *pc; | |
48 | struct cds_list_head thread_field_node; /* Per-field list of thread fields (node) */ | |
49 | struct cds_list_head rcu_field_node; /* RCU per-thread list of fields (node) */ | |
b9389e6e | 50 | int fd; /* Perf FD */ |
d58d1454 MD |
51 | }; |
52 | ||
53 | struct lttng_perf_counter_thread { | |
54 | struct cds_list_head rcu_field_list; /* RCU per-thread list of fields */ | |
55 | }; | |
56 | ||
57 | struct lttng_perf_counter_field { | |
58 | struct perf_event_attr attr; | |
59 | struct cds_list_head thread_field_list; /* Per-field list of thread fields */ | |
60 | }; | |
61 | ||
62 | static pthread_key_t perf_counter_key; | |
63 | ||
20142124 MD |
64 | /* |
65 | * lttng_perf_lock - Protect lttng-ust perf counter data structures | |
66 | * | |
67 | * Nests within the ust_lock, and therefore within the libc dl lock. | |
68 | * Therefore, we need to fixup the TLS before nesting into this lock. | |
69 | * Nests inside RCU bp read-side lock. Protects against concurrent | |
70 | * fork. | |
71 | */ | |
72 | static pthread_mutex_t ust_perf_mutex = PTHREAD_MUTEX_INITIALIZER; | |
73 | ||
74 | /* | |
75 | * Cancel state when grabbing the ust_perf_mutex. Saved when locking, | |
76 | * restored on unlock. Protected by ust_perf_mutex. | |
77 | */ | |
78 | static int ust_perf_saved_cancelstate; | |
79 | ||
80 | /* | |
81 | * Track whether we are tracing from a signal handler nested on an | |
82 | * application thread. | |
83 | */ | |
84 | static DEFINE_URCU_TLS(int, ust_perf_mutex_nest); | |
85 | ||
86 | /* | |
87 | * Force a read (imply TLS fixup for dlopen) of TLS variables. | |
88 | */ | |
89 | void lttng_ust_fixup_perf_counter_tls(void) | |
90 | { | |
91 | asm volatile ("" : : "m" (URCU_TLS(ust_perf_mutex_nest))); | |
92 | } | |
93 | ||
94 | void lttng_perf_lock(void) | |
95 | { | |
96 | sigset_t sig_all_blocked, orig_mask; | |
97 | int ret, oldstate; | |
98 | ||
99 | ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldstate); | |
100 | if (ret) { | |
101 | ERR("pthread_setcancelstate: %s", strerror(ret)); | |
102 | } | |
103 | sigfillset(&sig_all_blocked); | |
104 | ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask); | |
105 | if (ret) { | |
106 | ERR("pthread_sigmask: %s", strerror(ret)); | |
107 | } | |
108 | if (!URCU_TLS(ust_perf_mutex_nest)++) { | |
109 | /* | |
110 | * Ensure the compiler don't move the store after the close() | |
111 | * call in case close() would be marked as leaf. | |
112 | */ | |
113 | cmm_barrier(); | |
114 | pthread_mutex_lock(&ust_perf_mutex); | |
115 | ust_perf_saved_cancelstate = oldstate; | |
116 | } | |
117 | ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL); | |
118 | if (ret) { | |
119 | ERR("pthread_sigmask: %s", strerror(ret)); | |
120 | } | |
121 | } | |
122 | ||
123 | void lttng_perf_unlock(void) | |
124 | { | |
125 | sigset_t sig_all_blocked, orig_mask; | |
126 | int ret, newstate, oldstate; | |
127 | bool restore_cancel = false; | |
128 | ||
129 | sigfillset(&sig_all_blocked); | |
130 | ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask); | |
131 | if (ret) { | |
132 | ERR("pthread_sigmask: %s", strerror(ret)); | |
133 | } | |
134 | /* | |
135 | * Ensure the compiler don't move the store before the close() | |
136 | * call, in case close() would be marked as leaf. | |
137 | */ | |
138 | cmm_barrier(); | |
139 | if (!--URCU_TLS(ust_perf_mutex_nest)) { | |
140 | newstate = ust_perf_saved_cancelstate; | |
141 | restore_cancel = true; | |
142 | pthread_mutex_unlock(&ust_perf_mutex); | |
143 | } | |
144 | ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL); | |
145 | if (ret) { | |
146 | ERR("pthread_sigmask: %s", strerror(ret)); | |
147 | } | |
148 | if (restore_cancel) { | |
149 | ret = pthread_setcancelstate(newstate, &oldstate); | |
150 | if (ret) { | |
151 | ERR("pthread_setcancelstate: %s", strerror(ret)); | |
152 | } | |
153 | } | |
154 | } | |
155 | ||
d58d1454 | 156 | static |
53569322 | 157 | size_t perf_counter_get_size(struct lttng_ctx_field *field, size_t offset) |
d58d1454 MD |
158 | { |
159 | size_t size = 0; | |
160 | ||
161 | size += lib_ring_buffer_align(offset, lttng_alignof(uint64_t)); | |
162 | size += sizeof(uint64_t); | |
163 | return size; | |
164 | } | |
165 | ||
a3a8d943 MD |
166 | static |
167 | uint64_t read_perf_counter_syscall( | |
168 | struct lttng_perf_counter_thread_field *thread_field) | |
169 | { | |
170 | uint64_t count; | |
171 | ||
172 | if (caa_unlikely(thread_field->fd < 0)) | |
173 | return 0; | |
174 | ||
175 | if (caa_unlikely(read(thread_field->fd, &count, sizeof(count)) | |
176 | < sizeof(count))) | |
177 | return 0; | |
178 | ||
179 | return count; | |
180 | } | |
181 | ||
d58d1454 MD |
182 | #if defined(__x86_64__) || defined(__i386__) |
183 | ||
184 | static | |
185 | uint64_t rdpmc(unsigned int counter) | |
186 | { | |
187 | unsigned int low, high; | |
188 | ||
189 | asm volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (counter)); | |
190 | ||
191 | return low | ((uint64_t) high) << 32; | |
192 | } | |
193 | ||
77d7fa98 MD |
194 | static |
195 | bool has_rdpmc(struct perf_event_mmap_page *pc) | |
196 | { | |
197 | if (caa_unlikely(!pc->cap_bit0_is_deprecated)) | |
198 | return false; | |
199 | /* Since Linux kernel 3.12. */ | |
200 | return pc->cap_user_rdpmc; | |
201 | } | |
202 | ||
d58d1454 | 203 | static |
a3a8d943 | 204 | uint64_t arch_read_perf_counter( |
d286ad50 | 205 | struct lttng_perf_counter_thread_field *thread_field) |
d58d1454 MD |
206 | { |
207 | uint32_t seq, idx; | |
208 | uint64_t count; | |
d286ad50 | 209 | struct perf_event_mmap_page *pc = thread_field->pc; |
d58d1454 MD |
210 | |
211 | if (caa_unlikely(!pc)) | |
212 | return 0; | |
213 | ||
214 | do { | |
215 | seq = CMM_LOAD_SHARED(pc->lock); | |
216 | cmm_barrier(); | |
217 | ||
218 | idx = pc->index; | |
77d7fa98 | 219 | if (caa_likely(has_rdpmc(pc) && idx)) { |
4f58f54f MD |
220 | int64_t pmcval; |
221 | ||
222 | pmcval = rdpmc(idx - 1); | |
223 | /* Sign-extend the pmc register result. */ | |
224 | pmcval <<= 64 - pc->pmc_width; | |
225 | pmcval >>= 64 - pc->pmc_width; | |
226 | count = pc->offset + pmcval; | |
227 | } else { | |
a3a8d943 MD |
228 | /* Fall-back on system call if rdpmc cannot be used. */ |
229 | return read_perf_counter_syscall(thread_field); | |
4f58f54f | 230 | } |
d58d1454 MD |
231 | cmm_barrier(); |
232 | } while (CMM_LOAD_SHARED(pc->lock) != seq); | |
233 | ||
234 | return count; | |
235 | } | |
236 | ||
d286ad50 | 237 | static |
a3a8d943 | 238 | int arch_perf_keep_fd(struct lttng_perf_counter_thread_field *thread_field) |
d286ad50 | 239 | { |
a3a8d943 | 240 | struct perf_event_mmap_page *pc = thread_field->pc; |
d286ad50 | 241 | |
a3a8d943 | 242 | if (!pc) |
d286ad50 | 243 | return 0; |
77d7fa98 | 244 | return !has_rdpmc(pc); |
a3a8d943 | 245 | } |
d286ad50 | 246 | |
a3a8d943 | 247 | #else |
d286ad50 | 248 | |
a3a8d943 MD |
249 | /* Generic (slow) implementation using a read system call. */ |
250 | static | |
251 | uint64_t arch_read_perf_counter( | |
252 | struct lttng_perf_counter_thread_field *thread_field) | |
253 | { | |
254 | return read_perf_counter_syscall(thread_field); | |
d286ad50 JD |
255 | } |
256 | ||
a3a8d943 MD |
257 | static |
258 | int arch_perf_keep_fd(struct lttng_perf_counter_thread_field *thread_field) | |
259 | { | |
260 | return 1; | |
261 | } | |
d286ad50 | 262 | |
a3a8d943 | 263 | #endif |
d286ad50 | 264 | |
d58d1454 MD |
265 | static |
266 | int sys_perf_event_open(struct perf_event_attr *attr, | |
267 | pid_t pid, int cpu, int group_fd, | |
268 | unsigned long flags) | |
269 | { | |
270 | return syscall(SYS_perf_event_open, attr, pid, cpu, | |
271 | group_fd, flags); | |
272 | } | |
273 | ||
274 | static | |
b9389e6e | 275 | int open_perf_fd(struct perf_event_attr *attr) |
d58d1454 | 276 | { |
b9389e6e | 277 | int fd; |
d58d1454 MD |
278 | |
279 | fd = sys_perf_event_open(attr, 0, -1, -1, 0); | |
280 | if (fd < 0) | |
b9389e6e JD |
281 | return -1; |
282 | ||
283 | return fd; | |
284 | } | |
285 | ||
d286ad50 JD |
286 | static |
287 | void close_perf_fd(int fd) | |
288 | { | |
289 | int ret; | |
290 | ||
291 | if (fd < 0) | |
292 | return; | |
293 | ||
294 | ret = close(fd); | |
295 | if (ret) { | |
296 | perror("Error closing LTTng-UST perf memory mapping FD"); | |
297 | } | |
298 | } | |
299 | ||
77d7fa98 | 300 | static void setup_perf(struct lttng_perf_counter_thread_field *thread_field) |
b9389e6e JD |
301 | { |
302 | void *perf_addr; | |
d58d1454 MD |
303 | |
304 | perf_addr = mmap(NULL, sizeof(struct perf_event_mmap_page), | |
b9389e6e | 305 | PROT_READ, MAP_SHARED, thread_field->fd, 0); |
d58d1454 | 306 | if (perf_addr == MAP_FAILED) |
b9389e6e | 307 | perf_addr = NULL; |
77d7fa98 | 308 | thread_field->pc = perf_addr; |
b9389e6e | 309 | |
a3a8d943 | 310 | if (!arch_perf_keep_fd(thread_field)) { |
b9389e6e JD |
311 | close_perf_fd(thread_field->fd); |
312 | thread_field->fd = -1; | |
6c2125af | 313 | } |
d58d1454 MD |
314 | } |
315 | ||
316 | static | |
317 | void unmap_perf_page(struct perf_event_mmap_page *pc) | |
318 | { | |
319 | int ret; | |
320 | ||
321 | if (!pc) | |
322 | return; | |
323 | ret = munmap(pc, sizeof(struct perf_event_mmap_page)); | |
324 | if (ret < 0) { | |
325 | PERROR("Error in munmap"); | |
326 | abort(); | |
327 | } | |
328 | } | |
329 | ||
330 | static | |
331 | struct lttng_perf_counter_thread *alloc_perf_counter_thread(void) | |
332 | { | |
333 | struct lttng_perf_counter_thread *perf_thread; | |
334 | sigset_t newmask, oldmask; | |
335 | int ret; | |
336 | ||
337 | ret = sigfillset(&newmask); | |
338 | if (ret) | |
339 | abort(); | |
340 | ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask); | |
341 | if (ret) | |
342 | abort(); | |
343 | /* Check again with signals disabled */ | |
344 | perf_thread = pthread_getspecific(perf_counter_key); | |
345 | if (perf_thread) | |
346 | goto skip; | |
347 | perf_thread = zmalloc(sizeof(*perf_thread)); | |
348 | if (!perf_thread) | |
349 | abort(); | |
350 | CDS_INIT_LIST_HEAD(&perf_thread->rcu_field_list); | |
351 | ret = pthread_setspecific(perf_counter_key, perf_thread); | |
352 | if (ret) | |
353 | abort(); | |
354 | skip: | |
355 | ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL); | |
356 | if (ret) | |
357 | abort(); | |
358 | return perf_thread; | |
359 | } | |
360 | ||
361 | static | |
362 | struct lttng_perf_counter_thread_field * | |
363 | add_thread_field(struct lttng_perf_counter_field *perf_field, | |
364 | struct lttng_perf_counter_thread *perf_thread) | |
365 | { | |
366 | struct lttng_perf_counter_thread_field *thread_field; | |
367 | sigset_t newmask, oldmask; | |
368 | int ret; | |
369 | ||
370 | ret = sigfillset(&newmask); | |
371 | if (ret) | |
372 | abort(); | |
373 | ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask); | |
374 | if (ret) | |
375 | abort(); | |
376 | /* Check again with signals disabled */ | |
377 | cds_list_for_each_entry_rcu(thread_field, &perf_thread->rcu_field_list, | |
378 | rcu_field_node) { | |
379 | if (thread_field->field == perf_field) | |
380 | goto skip; | |
381 | } | |
382 | thread_field = zmalloc(sizeof(*thread_field)); | |
383 | if (!thread_field) | |
384 | abort(); | |
385 | thread_field->field = perf_field; | |
b9389e6e JD |
386 | thread_field->fd = open_perf_fd(&perf_field->attr); |
387 | if (thread_field->fd >= 0) | |
77d7fa98 | 388 | setup_perf(thread_field); |
b9389e6e JD |
389 | /* |
390 | * Note: thread_field->pc can be NULL if setup_perf() fails. | |
391 | * Also, thread_field->fd can be -1 if open_perf_fd() fails. | |
392 | */ | |
20142124 | 393 | lttng_perf_lock(); |
d58d1454 MD |
394 | cds_list_add_rcu(&thread_field->rcu_field_node, |
395 | &perf_thread->rcu_field_list); | |
396 | cds_list_add(&thread_field->thread_field_node, | |
397 | &perf_field->thread_field_list); | |
20142124 | 398 | lttng_perf_unlock(); |
d58d1454 MD |
399 | skip: |
400 | ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL); | |
401 | if (ret) | |
402 | abort(); | |
403 | return thread_field; | |
404 | } | |
405 | ||
406 | static | |
407 | struct lttng_perf_counter_thread_field * | |
408 | get_thread_field(struct lttng_perf_counter_field *field) | |
409 | { | |
410 | struct lttng_perf_counter_thread *perf_thread; | |
411 | struct lttng_perf_counter_thread_field *thread_field; | |
412 | ||
413 | perf_thread = pthread_getspecific(perf_counter_key); | |
414 | if (!perf_thread) | |
415 | perf_thread = alloc_perf_counter_thread(); | |
416 | cds_list_for_each_entry_rcu(thread_field, &perf_thread->rcu_field_list, | |
417 | rcu_field_node) { | |
418 | if (thread_field->field == field) | |
419 | return thread_field; | |
420 | } | |
421 | /* perf_counter_thread_field not found, need to add one */ | |
422 | return add_thread_field(field, perf_thread); | |
423 | } | |
424 | ||
425 | static | |
426 | uint64_t wrapper_perf_counter_read(struct lttng_ctx_field *field) | |
427 | { | |
428 | struct lttng_perf_counter_field *perf_field; | |
429 | struct lttng_perf_counter_thread_field *perf_thread_field; | |
430 | ||
431 | perf_field = field->u.perf_counter; | |
432 | perf_thread_field = get_thread_field(perf_field); | |
a3a8d943 | 433 | return arch_read_perf_counter(perf_thread_field); |
d58d1454 MD |
434 | } |
435 | ||
436 | static | |
437 | void perf_counter_record(struct lttng_ctx_field *field, | |
438 | struct lttng_ust_lib_ring_buffer_ctx *ctx, | |
439 | struct lttng_channel *chan) | |
440 | { | |
441 | uint64_t value; | |
442 | ||
443 | value = wrapper_perf_counter_read(field); | |
444 | lib_ring_buffer_align_ctx(ctx, lttng_alignof(value)); | |
445 | chan->ops->event_write(ctx, &value, sizeof(value)); | |
446 | } | |
447 | ||
448 | static | |
449 | void perf_counter_get_value(struct lttng_ctx_field *field, | |
53569322 | 450 | struct lttng_ctx_value *value) |
d58d1454 | 451 | { |
6e9ac4ae | 452 | value->u.s64 = wrapper_perf_counter_read(field); |
d58d1454 MD |
453 | } |
454 | ||
20142124 | 455 | /* Called with perf lock held */ |
d58d1454 MD |
456 | static |
457 | void lttng_destroy_perf_thread_field( | |
458 | struct lttng_perf_counter_thread_field *thread_field) | |
459 | { | |
b9389e6e | 460 | close_perf_fd(thread_field->fd); |
d58d1454 MD |
461 | unmap_perf_page(thread_field->pc); |
462 | cds_list_del_rcu(&thread_field->rcu_field_node); | |
463 | cds_list_del(&thread_field->thread_field_node); | |
464 | free(thread_field); | |
465 | } | |
466 | ||
467 | static | |
468 | void lttng_destroy_perf_thread_key(void *_key) | |
469 | { | |
470 | struct lttng_perf_counter_thread *perf_thread = _key; | |
471 | struct lttng_perf_counter_thread_field *pos, *p; | |
472 | ||
20142124 | 473 | lttng_perf_lock(); |
d58d1454 MD |
474 | cds_list_for_each_entry_safe(pos, p, &perf_thread->rcu_field_list, |
475 | rcu_field_node) | |
476 | lttng_destroy_perf_thread_field(pos); | |
20142124 | 477 | lttng_perf_unlock(); |
d58d1454 MD |
478 | free(perf_thread); |
479 | } | |
480 | ||
481 | /* Called with UST lock held */ | |
482 | static | |
483 | void lttng_destroy_perf_counter_field(struct lttng_ctx_field *field) | |
484 | { | |
485 | struct lttng_perf_counter_field *perf_field; | |
486 | struct lttng_perf_counter_thread_field *pos, *p; | |
487 | ||
488 | free((char *) field->event_field.name); | |
489 | perf_field = field->u.perf_counter; | |
490 | /* | |
491 | * This put is performed when no threads can concurrently | |
492 | * perform a "get" concurrently, thanks to urcu-bp grace | |
20142124 MD |
493 | * period. Holding the lttng perf lock protects against |
494 | * concurrent modification of the per-thread thread field | |
495 | * list. | |
d58d1454 | 496 | */ |
20142124 | 497 | lttng_perf_lock(); |
d58d1454 MD |
498 | cds_list_for_each_entry_safe(pos, p, &perf_field->thread_field_list, |
499 | thread_field_node) | |
500 | lttng_destroy_perf_thread_field(pos); | |
20142124 | 501 | lttng_perf_unlock(); |
d58d1454 MD |
502 | free(perf_field); |
503 | } | |
504 | ||
d286ad50 JD |
505 | #ifdef __ARM_ARCH_7A__ |
506 | ||
507 | static | |
508 | int perf_get_exclude_kernel(void) | |
509 | { | |
510 | return 0; | |
511 | } | |
512 | ||
513 | #else /* __ARM_ARCH_7A__ */ | |
514 | ||
515 | static | |
516 | int perf_get_exclude_kernel(void) | |
517 | { | |
518 | return 1; | |
519 | } | |
520 | ||
521 | #endif /* __ARM_ARCH_7A__ */ | |
522 | ||
d58d1454 MD |
523 | /* Called with UST lock held */ |
524 | int lttng_add_perf_counter_to_ctx(uint32_t type, | |
525 | uint64_t config, | |
526 | const char *name, | |
527 | struct lttng_ctx **ctx) | |
528 | { | |
529 | struct lttng_ctx_field *field; | |
530 | struct lttng_perf_counter_field *perf_field; | |
d58d1454 MD |
531 | char *name_alloc; |
532 | int ret; | |
533 | ||
534 | name_alloc = strdup(name); | |
535 | if (!name_alloc) { | |
536 | ret = -ENOMEM; | |
537 | goto name_alloc_error; | |
538 | } | |
539 | perf_field = zmalloc(sizeof(*perf_field)); | |
540 | if (!perf_field) { | |
541 | ret = -ENOMEM; | |
542 | goto perf_field_alloc_error; | |
543 | } | |
544 | field = lttng_append_context(ctx); | |
545 | if (!field) { | |
546 | ret = -ENOMEM; | |
547 | goto append_context_error; | |
548 | } | |
549 | if (lttng_find_context(*ctx, name_alloc)) { | |
550 | ret = -EEXIST; | |
551 | goto find_error; | |
552 | } | |
553 | ||
554 | field->destroy = lttng_destroy_perf_counter_field; | |
555 | ||
556 | field->event_field.name = name_alloc; | |
557 | field->event_field.type.atype = atype_integer; | |
218deb69 | 558 | field->event_field.type.u.integer.size = |
d58d1454 | 559 | sizeof(uint64_t) * CHAR_BIT; |
218deb69 | 560 | field->event_field.type.u.integer.alignment = |
d58d1454 | 561 | lttng_alignof(uint64_t) * CHAR_BIT; |
218deb69 | 562 | field->event_field.type.u.integer.signedness = |
d58d1454 | 563 | lttng_is_signed_type(uint64_t); |
218deb69 MD |
564 | field->event_field.type.u.integer.reverse_byte_order = 0; |
565 | field->event_field.type.u.integer.base = 10; | |
566 | field->event_field.type.u.integer.encoding = lttng_encode_none; | |
d58d1454 MD |
567 | field->get_size = perf_counter_get_size; |
568 | field->record = perf_counter_record; | |
569 | field->get_value = perf_counter_get_value; | |
570 | ||
571 | perf_field->attr.type = type; | |
572 | perf_field->attr.config = config; | |
d286ad50 | 573 | perf_field->attr.exclude_kernel = perf_get_exclude_kernel(); |
d58d1454 MD |
574 | CDS_INIT_LIST_HEAD(&perf_field->thread_field_list); |
575 | field->u.perf_counter = perf_field; | |
576 | ||
577 | /* Ensure that this perf counter can be used in this process. */ | |
b9389e6e JD |
578 | ret = open_perf_fd(&perf_field->attr); |
579 | if (ret < 0) { | |
d58d1454 MD |
580 | ret = -ENODEV; |
581 | goto setup_error; | |
582 | } | |
b9389e6e | 583 | close_perf_fd(ret); |
d58d1454 MD |
584 | |
585 | /* | |
586 | * Contexts can only be added before tracing is started, so we | |
587 | * don't have to synchronize against concurrent threads using | |
588 | * the field here. | |
589 | */ | |
590 | ||
b2cc986a | 591 | lttng_context_update(*ctx); |
d58d1454 MD |
592 | return 0; |
593 | ||
594 | setup_error: | |
595 | find_error: | |
596 | lttng_remove_context_field(ctx, field); | |
597 | append_context_error: | |
598 | free(perf_field); | |
599 | perf_field_alloc_error: | |
600 | free(name_alloc); | |
601 | name_alloc_error: | |
602 | return ret; | |
603 | } | |
604 | ||
605 | int lttng_perf_counter_init(void) | |
606 | { | |
607 | int ret; | |
608 | ||
609 | ret = pthread_key_create(&perf_counter_key, | |
610 | lttng_destroy_perf_thread_key); | |
611 | if (ret) | |
612 | ret = -ret; | |
613 | return ret; | |
614 | } | |
615 | ||
616 | void lttng_perf_counter_exit(void) | |
617 | { | |
618 | int ret; | |
619 | ||
620 | ret = pthread_key_delete(perf_counter_key); | |
621 | if (ret) { | |
622 | errno = ret; | |
623 | PERROR("Error in pthread_key_delete"); | |
624 | } | |
625 | } |