docs: Correct GitHub URLs in lttng-ust.3
[lttng-ust.git] / src / common / counter / counter.c
1 /* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
2 *
3 * counter.c
4 *
5 * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 */
7
8 #include <errno.h>
9 #include "counter.h"
10 #include "counter-internal.h"
11 #include <urcu/system.h>
12 #include <urcu/compiler.h>
13 #include <stdbool.h>
14
15 #include "common/macros.h"
16 #include "common/align.h"
17 #include "common/bitmap.h"
18
19 #include "common/smp.h"
20 #include "common/populate.h"
21 #include "shm.h"
22
23 static size_t lttng_counter_get_dimension_nr_elements(struct lib_counter_dimension *dimension)
24 {
25 return dimension->max_nr_elem;
26 }
27
28 static int lttng_counter_init_stride(
29 const struct lib_counter_config *config __attribute__((unused)),
30 struct lib_counter *counter)
31 {
32 size_t nr_dimensions = counter->nr_dimensions;
33 size_t stride = 1;
34 ssize_t i;
35
36 for (i = nr_dimensions - 1; i >= 0; i--) {
37 struct lib_counter_dimension *dimension = &counter->dimensions[i];
38 size_t nr_elem;
39
40 nr_elem = lttng_counter_get_dimension_nr_elements(dimension);
41 dimension->stride = stride;
42 /* nr_elem should be minimum 1 for each dimension. */
43 if (!nr_elem)
44 return -EINVAL;
45 stride *= nr_elem;
46 if (stride > SIZE_MAX / nr_elem)
47 return -EINVAL;
48 }
49 return 0;
50 }
51
52 static int lttng_counter_layout_init(struct lib_counter *counter, int cpu, int shm_fd)
53 {
54 struct lib_counter_layout *layout;
55 size_t counter_size;
56 size_t nr_elem = counter->allocated_elem;
57 size_t shm_length = 0, counters_offset, overflow_offset, underflow_offset;
58 struct lttng_counter_shm_object *shm_object;
59
60 if (shm_fd < 0)
61 return 0; /* Skip, will be populated later. */
62
63 if (cpu == -1)
64 layout = &counter->global_counters;
65 else
66 layout = &counter->percpu_counters[cpu];
67 switch (counter->config.counter_size) {
68 case COUNTER_SIZE_8_BIT:
69 case COUNTER_SIZE_16_BIT:
70 case COUNTER_SIZE_32_BIT:
71 case COUNTER_SIZE_64_BIT:
72 counter_size = (size_t) counter->config.counter_size;
73 break;
74 default:
75 return -EINVAL;
76 }
77 layout->shm_fd = shm_fd;
78 counters_offset = shm_length;
79 shm_length += counter_size * nr_elem;
80 overflow_offset = shm_length;
81 shm_length += LTTNG_UST_ALIGN(nr_elem, 8) / 8;
82 underflow_offset = shm_length;
83 shm_length += LTTNG_UST_ALIGN(nr_elem, 8) / 8;
84 layout->shm_len = shm_length;
85 if (counter->is_daemon) {
86 /* Allocate and clear shared memory. */
87 shm_object = lttng_counter_shm_object_table_alloc(counter->object_table,
88 shm_length, LTTNG_COUNTER_SHM_OBJECT_SHM, shm_fd, cpu,
89 lttng_ust_map_populate_cpu_is_enabled(cpu));
90 if (!shm_object)
91 return -ENOMEM;
92 } else {
93 /* Map pre-existing shared memory. */
94 shm_object = lttng_counter_shm_object_table_append_shm(counter->object_table,
95 shm_fd, shm_length, lttng_ust_map_populate_cpu_is_enabled(cpu));
96 if (!shm_object)
97 return -ENOMEM;
98 }
99 layout->counters = shm_object->memory_map + counters_offset;
100 layout->overflow_bitmap = (unsigned long *)(shm_object->memory_map + overflow_offset);
101 layout->underflow_bitmap = (unsigned long *)(shm_object->memory_map + underflow_offset);
102 return 0;
103 }
104
105 int lttng_counter_set_global_shm(struct lib_counter *counter, int fd)
106 {
107 struct lib_counter_config *config = &counter->config;
108 struct lib_counter_layout *layout;
109
110 if (!(config->alloc & COUNTER_ALLOC_GLOBAL))
111 return -EINVAL;
112 layout = &counter->global_counters;
113 if (layout->shm_fd >= 0)
114 return -EBUSY;
115 return lttng_counter_layout_init(counter, -1, fd);
116 }
117
118 int lttng_counter_set_cpu_shm(struct lib_counter *counter, int cpu, int fd)
119 {
120 struct lib_counter_config *config = &counter->config;
121 struct lib_counter_layout *layout;
122
123 if (cpu < 0 || cpu >= get_possible_cpus_array_len())
124 return -EINVAL;
125
126 if (!(config->alloc & COUNTER_ALLOC_PER_CPU))
127 return -EINVAL;
128 layout = &counter->percpu_counters[cpu];
129 if (layout->shm_fd >= 0)
130 return -EBUSY;
131 return lttng_counter_layout_init(counter, cpu, fd);
132 }
133
134 static
135 int lttng_counter_set_global_sum_step(struct lib_counter *counter,
136 int64_t global_sum_step)
137 {
138 if (global_sum_step < 0)
139 return -EINVAL;
140
141 switch (counter->config.counter_size) {
142 case COUNTER_SIZE_8_BIT:
143 if (global_sum_step > INT8_MAX)
144 return -EINVAL;
145 counter->global_sum_step.s8 = (int8_t) global_sum_step;
146 break;
147 case COUNTER_SIZE_16_BIT:
148 if (global_sum_step > INT16_MAX)
149 return -EINVAL;
150 counter->global_sum_step.s16 = (int16_t) global_sum_step;
151 break;
152 case COUNTER_SIZE_32_BIT:
153 if (global_sum_step > INT32_MAX)
154 return -EINVAL;
155 counter->global_sum_step.s32 = (int32_t) global_sum_step;
156 break;
157 case COUNTER_SIZE_64_BIT:
158 counter->global_sum_step.s64 = global_sum_step;
159 break;
160 default:
161 return -EINVAL;
162 }
163
164 return 0;
165 }
166
167 static
168 int validate_args(const struct lib_counter_config *config,
169 size_t nr_dimensions __attribute__((unused)),
170 const size_t *max_nr_elem,
171 int64_t global_sum_step,
172 int global_counter_fd,
173 int nr_counter_cpu_fds,
174 const int *counter_cpu_fds)
175 {
176 int nr_cpus = get_possible_cpus_array_len();
177
178 if (CAA_BITS_PER_LONG != 64 && config->counter_size == COUNTER_SIZE_64_BIT) {
179 WARN_ON_ONCE(1);
180 return -1;
181 }
182 if (!max_nr_elem)
183 return -1;
184 /*
185 * global sum step is only useful with allocating both per-cpu
186 * and global counters.
187 */
188 if (global_sum_step && (!(config->alloc & COUNTER_ALLOC_GLOBAL) ||
189 !(config->alloc & COUNTER_ALLOC_PER_CPU)))
190 return -1;
191 if (!(config->alloc & COUNTER_ALLOC_GLOBAL) && global_counter_fd >= 0)
192 return -1;
193 if (!(config->alloc & COUNTER_ALLOC_PER_CPU) && counter_cpu_fds)
194 return -1;
195 if (!(config->alloc & COUNTER_ALLOC_PER_CPU) && nr_counter_cpu_fds >= 0)
196 return -1;
197 if (counter_cpu_fds && nr_cpus != nr_counter_cpu_fds)
198 return -1;
199 return 0;
200 }
201
202 struct lib_counter *lttng_counter_create(const struct lib_counter_config *config,
203 size_t nr_dimensions,
204 const size_t *max_nr_elem,
205 int64_t global_sum_step,
206 int global_counter_fd,
207 int nr_counter_cpu_fds,
208 const int *counter_cpu_fds,
209 bool is_daemon)
210 {
211 struct lib_counter *counter;
212 size_t dimension, nr_elem = 1;
213 int cpu, ret;
214 int nr_handles = 0;
215 int nr_cpus = get_possible_cpus_array_len();
216 bool populate = lttng_ust_map_populate_is_enabled();
217
218 if (validate_args(config, nr_dimensions, max_nr_elem,
219 global_sum_step, global_counter_fd, nr_counter_cpu_fds,
220 counter_cpu_fds))
221 return NULL;
222 counter = zmalloc_populate(sizeof(struct lib_counter), populate);
223 if (!counter)
224 return NULL;
225 counter->global_counters.shm_fd = -1;
226 counter->config = *config;
227 counter->is_daemon = is_daemon;
228 if (lttng_counter_set_global_sum_step(counter, global_sum_step))
229 goto error_sum_step;
230 counter->nr_dimensions = nr_dimensions;
231 counter->dimensions = zmalloc_populate(nr_dimensions * sizeof(*counter->dimensions), populate);
232 if (!counter->dimensions)
233 goto error_dimensions;
234 for (dimension = 0; dimension < nr_dimensions; dimension++)
235 counter->dimensions[dimension].max_nr_elem = max_nr_elem[dimension];
236 if (config->alloc & COUNTER_ALLOC_PER_CPU) {
237 counter->percpu_counters = zmalloc_populate(sizeof(struct lib_counter_layout) * nr_cpus, populate);
238 if (!counter->percpu_counters)
239 goto error_alloc_percpu;
240 for_each_possible_cpu(cpu)
241 counter->percpu_counters[cpu].shm_fd = -1;
242 }
243
244 if (lttng_counter_init_stride(config, counter))
245 goto error_init_stride;
246 //TODO saturation values.
247 for (dimension = 0; dimension < counter->nr_dimensions; dimension++)
248 nr_elem *= lttng_counter_get_dimension_nr_elements(&counter->dimensions[dimension]);
249 counter->allocated_elem = nr_elem;
250
251 if (config->alloc & COUNTER_ALLOC_GLOBAL)
252 nr_handles++;
253 if (config->alloc & COUNTER_ALLOC_PER_CPU)
254 nr_handles += nr_cpus;
255 /* Allocate table for global and per-cpu counters. */
256 counter->object_table = lttng_counter_shm_object_table_create(nr_handles, populate);
257 if (!counter->object_table)
258 goto error_alloc_object_table;
259
260 if (config->alloc & COUNTER_ALLOC_GLOBAL) {
261 ret = lttng_counter_layout_init(counter, -1, global_counter_fd); /* global */
262 if (ret)
263 goto layout_init_error;
264 }
265 if ((config->alloc & COUNTER_ALLOC_PER_CPU) && counter_cpu_fds) {
266 for_each_possible_cpu(cpu) {
267 ret = lttng_counter_layout_init(counter, cpu, counter_cpu_fds[cpu]);
268 if (ret)
269 goto layout_init_error;
270 }
271 }
272 return counter;
273
274 layout_init_error:
275 lttng_counter_shm_object_table_destroy(counter->object_table, is_daemon);
276 error_alloc_object_table:
277 error_init_stride:
278 free(counter->percpu_counters);
279 error_alloc_percpu:
280 free(counter->dimensions);
281 error_dimensions:
282 error_sum_step:
283 free(counter);
284 return NULL;
285 }
286
287 void lttng_counter_destroy(struct lib_counter *counter)
288 {
289 struct lib_counter_config *config = &counter->config;
290
291 if (config->alloc & COUNTER_ALLOC_PER_CPU)
292 free(counter->percpu_counters);
293 lttng_counter_shm_object_table_destroy(counter->object_table, counter->is_daemon);
294 free(counter->dimensions);
295 free(counter);
296 }
297
298 int lttng_counter_get_global_shm(struct lib_counter *counter, int *fd, size_t *len)
299 {
300 int shm_fd;
301
302 shm_fd = counter->global_counters.shm_fd;
303 if (shm_fd < 0)
304 return -1;
305 *fd = shm_fd;
306 *len = counter->global_counters.shm_len;
307 return 0;
308 }
309
310 int lttng_counter_get_cpu_shm(struct lib_counter *counter, int cpu, int *fd, size_t *len)
311 {
312 struct lib_counter_layout *layout;
313 int shm_fd;
314
315 if (cpu >= get_possible_cpus_array_len())
316 return -1;
317 layout = &counter->percpu_counters[cpu];
318 shm_fd = layout->shm_fd;
319 if (shm_fd < 0)
320 return -1;
321 *fd = shm_fd;
322 *len = layout->shm_len;
323 return 0;
324 }
325
326 int lttng_counter_read(const struct lib_counter_config *config,
327 struct lib_counter *counter,
328 const size_t *dimension_indexes,
329 int cpu, int64_t *value, bool *overflow,
330 bool *underflow)
331 {
332 size_t index;
333 struct lib_counter_layout *layout;
334
335 if (caa_unlikely(lttng_counter_validate_indexes(config, counter, dimension_indexes)))
336 return -EOVERFLOW;
337 index = lttng_counter_get_index(config, counter, dimension_indexes);
338
339 switch (config->alloc) {
340 case COUNTER_ALLOC_PER_CPU:
341 if (cpu < 0 || cpu >= get_possible_cpus_array_len())
342 return -EINVAL;
343 layout = &counter->percpu_counters[cpu];
344 break;
345 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
346 if (cpu >= 0) {
347 if (cpu >= get_possible_cpus_array_len())
348 return -EINVAL;
349 layout = &counter->percpu_counters[cpu];
350 } else {
351 layout = &counter->global_counters;
352 }
353 break;
354 case COUNTER_ALLOC_GLOBAL:
355 if (cpu >= 0)
356 return -EINVAL;
357 layout = &counter->global_counters;
358 break;
359 default:
360 return -EINVAL;
361 }
362 if (caa_unlikely(!layout->counters))
363 return -ENODEV;
364
365 switch (config->counter_size) {
366 case COUNTER_SIZE_8_BIT:
367 {
368 int8_t *int_p = (int8_t *) layout->counters + index;
369 *value = (int64_t) CMM_LOAD_SHARED(*int_p);
370 break;
371 }
372 case COUNTER_SIZE_16_BIT:
373 {
374 int16_t *int_p = (int16_t *) layout->counters + index;
375 *value = (int64_t) CMM_LOAD_SHARED(*int_p);
376 break;
377 }
378 case COUNTER_SIZE_32_BIT:
379 {
380 int32_t *int_p = (int32_t *) layout->counters + index;
381 *value = (int64_t) CMM_LOAD_SHARED(*int_p);
382 break;
383 }
384 #if CAA_BITS_PER_LONG == 64
385 case COUNTER_SIZE_64_BIT:
386 {
387 int64_t *int_p = (int64_t *) layout->counters + index;
388 *value = CMM_LOAD_SHARED(*int_p);
389 break;
390 }
391 #endif
392 default:
393 return -EINVAL;
394 }
395 *overflow = lttng_bitmap_test_bit(index, layout->overflow_bitmap);
396 *underflow = lttng_bitmap_test_bit(index, layout->underflow_bitmap);
397 return 0;
398 }
399
400 int lttng_counter_aggregate(const struct lib_counter_config *config,
401 struct lib_counter *counter,
402 const size_t *dimension_indexes,
403 int64_t *value, bool *overflow,
404 bool *underflow)
405 {
406 int cpu, ret;
407 int64_t v, sum = 0;
408 bool of, uf;
409
410 *overflow = false;
411 *underflow = false;
412
413 switch (config->alloc) {
414 case COUNTER_ALLOC_GLOBAL: /* Fallthrough */
415 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
416 /* Read global counter. */
417 ret = lttng_counter_read(config, counter, dimension_indexes,
418 -1, &v, &of, &uf);
419 if (ret < 0)
420 return ret;
421 sum += v;
422 *overflow |= of;
423 *underflow |= uf;
424 break;
425 case COUNTER_ALLOC_PER_CPU:
426 break;
427 default:
428 return -EINVAL;
429 }
430
431 switch (config->alloc) {
432 case COUNTER_ALLOC_GLOBAL:
433 break;
434 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL: /* Fallthrough */
435 case COUNTER_ALLOC_PER_CPU:
436 for_each_possible_cpu(cpu) {
437 int64_t old = sum;
438
439 ret = lttng_counter_read(config, counter, dimension_indexes,
440 cpu, &v, &of, &uf);
441 if (ret < 0)
442 return ret;
443 *overflow |= of;
444 *underflow |= uf;
445 /* Overflow is defined on unsigned types. */
446 sum = (int64_t) ((uint64_t) old + (uint64_t) v);
447 if (v > 0 && sum < old)
448 *overflow = true;
449 else if (v < 0 && sum > old)
450 *underflow = true;
451 }
452 break;
453 default:
454 return -EINVAL;
455 }
456 *value = sum;
457 return 0;
458 }
459
460 static
461 int lttng_counter_clear_cpu(const struct lib_counter_config *config,
462 struct lib_counter *counter,
463 const size_t *dimension_indexes,
464 int cpu)
465 {
466 size_t index;
467 struct lib_counter_layout *layout;
468
469 if (caa_unlikely(lttng_counter_validate_indexes(config, counter, dimension_indexes)))
470 return -EOVERFLOW;
471 index = lttng_counter_get_index(config, counter, dimension_indexes);
472
473 switch (config->alloc) {
474 case COUNTER_ALLOC_PER_CPU:
475 if (cpu < 0 || cpu >= get_possible_cpus_array_len())
476 return -EINVAL;
477 layout = &counter->percpu_counters[cpu];
478 break;
479 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
480 if (cpu >= 0) {
481 if (cpu >= get_possible_cpus_array_len())
482 return -EINVAL;
483 layout = &counter->percpu_counters[cpu];
484 } else {
485 layout = &counter->global_counters;
486 }
487 break;
488 case COUNTER_ALLOC_GLOBAL:
489 if (cpu >= 0)
490 return -EINVAL;
491 layout = &counter->global_counters;
492 break;
493 default:
494 return -EINVAL;
495 }
496 if (caa_unlikely(!layout->counters))
497 return -ENODEV;
498
499 switch (config->counter_size) {
500 case COUNTER_SIZE_8_BIT:
501 {
502 int8_t *int_p = (int8_t *) layout->counters + index;
503 CMM_STORE_SHARED(*int_p, 0);
504 break;
505 }
506 case COUNTER_SIZE_16_BIT:
507 {
508 int16_t *int_p = (int16_t *) layout->counters + index;
509 CMM_STORE_SHARED(*int_p, 0);
510 break;
511 }
512 case COUNTER_SIZE_32_BIT:
513 {
514 int32_t *int_p = (int32_t *) layout->counters + index;
515 CMM_STORE_SHARED(*int_p, 0);
516 break;
517 }
518 #if CAA_BITS_PER_LONG == 64
519 case COUNTER_SIZE_64_BIT:
520 {
521 int64_t *int_p = (int64_t *) layout->counters + index;
522 CMM_STORE_SHARED(*int_p, 0);
523 break;
524 }
525 #endif
526 default:
527 return -EINVAL;
528 }
529 lttng_bitmap_clear_bit(index, layout->overflow_bitmap);
530 lttng_bitmap_clear_bit(index, layout->underflow_bitmap);
531 return 0;
532 }
533
534 int lttng_counter_clear(const struct lib_counter_config *config,
535 struct lib_counter *counter,
536 const size_t *dimension_indexes)
537 {
538 int cpu, ret;
539
540 switch (config->alloc) {
541 case COUNTER_ALLOC_PER_CPU:
542 break;
543 case COUNTER_ALLOC_GLOBAL: /* Fallthrough */
544 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
545 /* Clear global counter. */
546 ret = lttng_counter_clear_cpu(config, counter, dimension_indexes, -1);
547 if (ret < 0)
548 return ret;
549 break;
550 default:
551 return -EINVAL;
552 }
553
554 switch (config->alloc) {
555 case COUNTER_ALLOC_PER_CPU: /* Fallthrough */
556 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
557 for_each_possible_cpu(cpu) {
558 ret = lttng_counter_clear_cpu(config, counter, dimension_indexes, cpu);
559 if (ret < 0)
560 return ret;
561 }
562 break;
563 case COUNTER_ALLOC_GLOBAL:
564 break;
565 default:
566 return -EINVAL;
567 }
568 return 0;
569 }
This page took 0.040307 seconds and 4 git commands to generate.