Clarify terminolgy around cpu ids and array length
[lttng-ust.git] / src / common / counter / counter.c
CommitLineData
ebabbf58
MD
1/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
2 *
3 * counter.c
4 *
5 * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 */
7
ebabbf58
MD
8#include <errno.h>
9#include "counter.h"
10#include "counter-internal.h"
ebabbf58
MD
11#include <urcu/system.h>
12#include <urcu/compiler.h>
13#include <stdbool.h>
9d315d6d
MJ
14
15#include "common/macros.h"
16#include "common/align.h"
17#include "common/bitmap.h"
18
74cc1f59 19#include "common/smp.h"
ebabbf58 20#include "shm.h"
b7b5cf30 21
ebabbf58
MD
22static size_t lttng_counter_get_dimension_nr_elements(struct lib_counter_dimension *dimension)
23{
24 return dimension->max_nr_elem;
25}
26
2208d8b5
MJ
27static int lttng_counter_init_stride(
28 const struct lib_counter_config *config __attribute__((unused)),
29 struct lib_counter *counter)
ebabbf58
MD
30{
31 size_t nr_dimensions = counter->nr_dimensions;
32 size_t stride = 1;
33 ssize_t i;
34
35 for (i = nr_dimensions - 1; i >= 0; i--) {
36 struct lib_counter_dimension *dimension = &counter->dimensions[i];
37 size_t nr_elem;
38
39 nr_elem = lttng_counter_get_dimension_nr_elements(dimension);
40 dimension->stride = stride;
41 /* nr_elem should be minimum 1 for each dimension. */
42 if (!nr_elem)
43 return -EINVAL;
44 stride *= nr_elem;
45 if (stride > SIZE_MAX / nr_elem)
46 return -EINVAL;
47 }
48 return 0;
49}
50
51static int lttng_counter_layout_init(struct lib_counter *counter, int cpu, int shm_fd)
52{
53 struct lib_counter_layout *layout;
54 size_t counter_size;
55 size_t nr_elem = counter->allocated_elem;
56 size_t shm_length = 0, counters_offset, overflow_offset, underflow_offset;
57 struct lttng_counter_shm_object *shm_object;
58
59 if (shm_fd < 0)
60 return 0; /* Skip, will be populated later. */
61
62 if (cpu == -1)
63 layout = &counter->global_counters;
64 else
65 layout = &counter->percpu_counters[cpu];
66 switch (counter->config.counter_size) {
67 case COUNTER_SIZE_8_BIT:
68 case COUNTER_SIZE_16_BIT:
69 case COUNTER_SIZE_32_BIT:
70 case COUNTER_SIZE_64_BIT:
71 counter_size = (size_t) counter->config.counter_size;
72 break;
73 default:
74 return -EINVAL;
75 }
76 layout->shm_fd = shm_fd;
77 counters_offset = shm_length;
78 shm_length += counter_size * nr_elem;
79 overflow_offset = shm_length;
00adec46 80 shm_length += LTTNG_UST_ALIGN(nr_elem, 8) / 8;
ebabbf58 81 underflow_offset = shm_length;
00adec46 82 shm_length += LTTNG_UST_ALIGN(nr_elem, 8) / 8;
ebabbf58
MD
83 layout->shm_len = shm_length;
84 if (counter->is_daemon) {
85 /* Allocate and clear shared memory. */
86 shm_object = lttng_counter_shm_object_table_alloc(counter->object_table,
87 shm_length, LTTNG_COUNTER_SHM_OBJECT_SHM, shm_fd, cpu);
88 if (!shm_object)
89 return -ENOMEM;
90 } else {
91 /* Map pre-existing shared memory. */
92 shm_object = lttng_counter_shm_object_table_append_shm(counter->object_table,
93 shm_fd, shm_length);
94 if (!shm_object)
95 return -ENOMEM;
96 }
97 layout->counters = shm_object->memory_map + counters_offset;
98 layout->overflow_bitmap = (unsigned long *)(shm_object->memory_map + overflow_offset);
99 layout->underflow_bitmap = (unsigned long *)(shm_object->memory_map + underflow_offset);
100 return 0;
101}
102
103int lttng_counter_set_global_shm(struct lib_counter *counter, int fd)
104{
105 struct lib_counter_config *config = &counter->config;
106 struct lib_counter_layout *layout;
107
108 if (!(config->alloc & COUNTER_ALLOC_GLOBAL))
109 return -EINVAL;
110 layout = &counter->global_counters;
111 if (layout->shm_fd >= 0)
112 return -EBUSY;
113 return lttng_counter_layout_init(counter, -1, fd);
114}
115
116int lttng_counter_set_cpu_shm(struct lib_counter *counter, int cpu, int fd)
117{
118 struct lib_counter_config *config = &counter->config;
119 struct lib_counter_layout *layout;
120
a616fb4e 121 if (cpu < 0 || cpu >= get_possible_cpus_array_len())
ebabbf58
MD
122 return -EINVAL;
123
124 if (!(config->alloc & COUNTER_ALLOC_PER_CPU))
125 return -EINVAL;
126 layout = &counter->percpu_counters[cpu];
127 if (layout->shm_fd >= 0)
128 return -EBUSY;
129 return lttng_counter_layout_init(counter, cpu, fd);
130}
131
132static
133int lttng_counter_set_global_sum_step(struct lib_counter *counter,
134 int64_t global_sum_step)
135{
136 if (global_sum_step < 0)
137 return -EINVAL;
138
139 switch (counter->config.counter_size) {
140 case COUNTER_SIZE_8_BIT:
141 if (global_sum_step > INT8_MAX)
142 return -EINVAL;
143 counter->global_sum_step.s8 = (int8_t) global_sum_step;
144 break;
145 case COUNTER_SIZE_16_BIT:
146 if (global_sum_step > INT16_MAX)
147 return -EINVAL;
148 counter->global_sum_step.s16 = (int16_t) global_sum_step;
149 break;
150 case COUNTER_SIZE_32_BIT:
151 if (global_sum_step > INT32_MAX)
152 return -EINVAL;
153 counter->global_sum_step.s32 = (int32_t) global_sum_step;
154 break;
155 case COUNTER_SIZE_64_BIT:
156 counter->global_sum_step.s64 = global_sum_step;
157 break;
158 default:
159 return -EINVAL;
160 }
161
162 return 0;
163}
164
165static
166int validate_args(const struct lib_counter_config *config,
2208d8b5 167 size_t nr_dimensions __attribute__((unused)),
ebabbf58
MD
168 const size_t *max_nr_elem,
169 int64_t global_sum_step,
170 int global_counter_fd,
171 int nr_counter_cpu_fds,
172 const int *counter_cpu_fds)
173{
a616fb4e 174 int nr_cpus = get_possible_cpus_array_len();
ebabbf58
MD
175
176 if (CAA_BITS_PER_LONG != 64 && config->counter_size == COUNTER_SIZE_64_BIT) {
177 WARN_ON_ONCE(1);
178 return -1;
179 }
180 if (!max_nr_elem)
181 return -1;
182 /*
183 * global sum step is only useful with allocating both per-cpu
184 * and global counters.
185 */
186 if (global_sum_step && (!(config->alloc & COUNTER_ALLOC_GLOBAL) ||
187 !(config->alloc & COUNTER_ALLOC_PER_CPU)))
188 return -1;
189 if (!(config->alloc & COUNTER_ALLOC_GLOBAL) && global_counter_fd >= 0)
190 return -1;
191 if (!(config->alloc & COUNTER_ALLOC_PER_CPU) && counter_cpu_fds)
192 return -1;
41d6c53d 193 if (!(config->alloc & COUNTER_ALLOC_PER_CPU) && nr_counter_cpu_fds >= 0)
ebabbf58
MD
194 return -1;
195 if (counter_cpu_fds && nr_cpus != nr_counter_cpu_fds)
196 return -1;
197 return 0;
198}
199
200struct lib_counter *lttng_counter_create(const struct lib_counter_config *config,
201 size_t nr_dimensions,
202 const size_t *max_nr_elem,
203 int64_t global_sum_step,
204 int global_counter_fd,
205 int nr_counter_cpu_fds,
206 const int *counter_cpu_fds,
207 bool is_daemon)
208{
209 struct lib_counter *counter;
210 size_t dimension, nr_elem = 1;
211 int cpu, ret;
212 int nr_handles = 0;
a616fb4e 213 int nr_cpus = get_possible_cpus_array_len();
ebabbf58
MD
214
215 if (validate_args(config, nr_dimensions, max_nr_elem,
216 global_sum_step, global_counter_fd, nr_counter_cpu_fds,
217 counter_cpu_fds))
218 return NULL;
219 counter = zmalloc(sizeof(struct lib_counter));
220 if (!counter)
221 return NULL;
222 counter->global_counters.shm_fd = -1;
223 counter->config = *config;
224 counter->is_daemon = is_daemon;
225 if (lttng_counter_set_global_sum_step(counter, global_sum_step))
226 goto error_sum_step;
227 counter->nr_dimensions = nr_dimensions;
228 counter->dimensions = zmalloc(nr_dimensions * sizeof(*counter->dimensions));
229 if (!counter->dimensions)
230 goto error_dimensions;
231 for (dimension = 0; dimension < nr_dimensions; dimension++)
232 counter->dimensions[dimension].max_nr_elem = max_nr_elem[dimension];
233 if (config->alloc & COUNTER_ALLOC_PER_CPU) {
234 counter->percpu_counters = zmalloc(sizeof(struct lib_counter_layout) * nr_cpus);
235 if (!counter->percpu_counters)
236 goto error_alloc_percpu;
74cc1f59 237 for_each_possible_cpu(cpu)
ebabbf58
MD
238 counter->percpu_counters[cpu].shm_fd = -1;
239 }
240
241 if (lttng_counter_init_stride(config, counter))
242 goto error_init_stride;
243 //TODO saturation values.
244 for (dimension = 0; dimension < counter->nr_dimensions; dimension++)
245 nr_elem *= lttng_counter_get_dimension_nr_elements(&counter->dimensions[dimension]);
246 counter->allocated_elem = nr_elem;
247
248 if (config->alloc & COUNTER_ALLOC_GLOBAL)
249 nr_handles++;
250 if (config->alloc & COUNTER_ALLOC_PER_CPU)
251 nr_handles += nr_cpus;
252 /* Allocate table for global and per-cpu counters. */
253 counter->object_table = lttng_counter_shm_object_table_create(nr_handles);
254 if (!counter->object_table)
255 goto error_alloc_object_table;
256
257 if (config->alloc & COUNTER_ALLOC_GLOBAL) {
258 ret = lttng_counter_layout_init(counter, -1, global_counter_fd); /* global */
259 if (ret)
260 goto layout_init_error;
261 }
262 if ((config->alloc & COUNTER_ALLOC_PER_CPU) && counter_cpu_fds) {
74cc1f59 263 for_each_possible_cpu(cpu) {
ebabbf58
MD
264 ret = lttng_counter_layout_init(counter, cpu, counter_cpu_fds[cpu]);
265 if (ret)
266 goto layout_init_error;
267 }
268 }
269 return counter;
270
271layout_init_error:
272 lttng_counter_shm_object_table_destroy(counter->object_table, is_daemon);
273error_alloc_object_table:
274error_init_stride:
275 free(counter->percpu_counters);
276error_alloc_percpu:
277 free(counter->dimensions);
278error_dimensions:
279error_sum_step:
280 free(counter);
281 return NULL;
282}
283
284void lttng_counter_destroy(struct lib_counter *counter)
285{
286 struct lib_counter_config *config = &counter->config;
287
288 if (config->alloc & COUNTER_ALLOC_PER_CPU)
289 free(counter->percpu_counters);
290 lttng_counter_shm_object_table_destroy(counter->object_table, counter->is_daemon);
291 free(counter->dimensions);
292 free(counter);
293}
294
295int lttng_counter_get_global_shm(struct lib_counter *counter, int *fd, size_t *len)
296{
297 int shm_fd;
298
299 shm_fd = counter->global_counters.shm_fd;
300 if (shm_fd < 0)
301 return -1;
302 *fd = shm_fd;
303 *len = counter->global_counters.shm_len;
304 return 0;
305}
306
307int lttng_counter_get_cpu_shm(struct lib_counter *counter, int cpu, int *fd, size_t *len)
308{
309 struct lib_counter_layout *layout;
310 int shm_fd;
311
a616fb4e 312 if (cpu >= get_possible_cpus_array_len())
ebabbf58
MD
313 return -1;
314 layout = &counter->percpu_counters[cpu];
315 shm_fd = layout->shm_fd;
316 if (shm_fd < 0)
317 return -1;
318 *fd = shm_fd;
319 *len = layout->shm_len;
320 return 0;
321}
322
323int lttng_counter_read(const struct lib_counter_config *config,
324 struct lib_counter *counter,
325 const size_t *dimension_indexes,
326 int cpu, int64_t *value, bool *overflow,
327 bool *underflow)
328{
329 size_t index;
330 struct lib_counter_layout *layout;
331
332 if (caa_unlikely(lttng_counter_validate_indexes(config, counter, dimension_indexes)))
333 return -EOVERFLOW;
334 index = lttng_counter_get_index(config, counter, dimension_indexes);
335
336 switch (config->alloc) {
337 case COUNTER_ALLOC_PER_CPU:
a616fb4e 338 if (cpu < 0 || cpu >= get_possible_cpus_array_len())
ebabbf58
MD
339 return -EINVAL;
340 layout = &counter->percpu_counters[cpu];
341 break;
342 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
343 if (cpu >= 0) {
a616fb4e 344 if (cpu >= get_possible_cpus_array_len())
ebabbf58
MD
345 return -EINVAL;
346 layout = &counter->percpu_counters[cpu];
347 } else {
348 layout = &counter->global_counters;
349 }
350 break;
351 case COUNTER_ALLOC_GLOBAL:
352 if (cpu >= 0)
353 return -EINVAL;
354 layout = &counter->global_counters;
355 break;
356 default:
357 return -EINVAL;
358 }
359 if (caa_unlikely(!layout->counters))
360 return -ENODEV;
361
362 switch (config->counter_size) {
363 case COUNTER_SIZE_8_BIT:
364 {
365 int8_t *int_p = (int8_t *) layout->counters + index;
366 *value = (int64_t) CMM_LOAD_SHARED(*int_p);
367 break;
368 }
369 case COUNTER_SIZE_16_BIT:
370 {
371 int16_t *int_p = (int16_t *) layout->counters + index;
372 *value = (int64_t) CMM_LOAD_SHARED(*int_p);
373 break;
374 }
375 case COUNTER_SIZE_32_BIT:
376 {
377 int32_t *int_p = (int32_t *) layout->counters + index;
378 *value = (int64_t) CMM_LOAD_SHARED(*int_p);
379 break;
380 }
381#if CAA_BITS_PER_LONG == 64
382 case COUNTER_SIZE_64_BIT:
383 {
384 int64_t *int_p = (int64_t *) layout->counters + index;
385 *value = CMM_LOAD_SHARED(*int_p);
386 break;
387 }
388#endif
389 default:
390 return -EINVAL;
391 }
392 *overflow = lttng_bitmap_test_bit(index, layout->overflow_bitmap);
393 *underflow = lttng_bitmap_test_bit(index, layout->underflow_bitmap);
394 return 0;
395}
396
397int lttng_counter_aggregate(const struct lib_counter_config *config,
398 struct lib_counter *counter,
399 const size_t *dimension_indexes,
400 int64_t *value, bool *overflow,
401 bool *underflow)
402{
403 int cpu, ret;
404 int64_t v, sum = 0;
405 bool of, uf;
406
407 *overflow = false;
408 *underflow = false;
409
410 switch (config->alloc) {
411 case COUNTER_ALLOC_GLOBAL: /* Fallthrough */
412 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
413 /* Read global counter. */
414 ret = lttng_counter_read(config, counter, dimension_indexes,
415 -1, &v, &of, &uf);
416 if (ret < 0)
417 return ret;
418 sum += v;
419 *overflow |= of;
420 *underflow |= uf;
421 break;
422 case COUNTER_ALLOC_PER_CPU:
423 break;
424 default:
425 return -EINVAL;
426 }
427
428 switch (config->alloc) {
429 case COUNTER_ALLOC_GLOBAL:
430 break;
431 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL: /* Fallthrough */
432 case COUNTER_ALLOC_PER_CPU:
74cc1f59 433 for_each_possible_cpu(cpu) {
ebabbf58
MD
434 int64_t old = sum;
435
436 ret = lttng_counter_read(config, counter, dimension_indexes,
437 cpu, &v, &of, &uf);
438 if (ret < 0)
439 return ret;
440 *overflow |= of;
441 *underflow |= uf;
442 /* Overflow is defined on unsigned types. */
443 sum = (int64_t) ((uint64_t) old + (uint64_t) v);
444 if (v > 0 && sum < old)
445 *overflow = true;
446 else if (v < 0 && sum > old)
447 *underflow = true;
448 }
449 break;
450 default:
451 return -EINVAL;
452 }
453 *value = sum;
454 return 0;
455}
456
457static
458int lttng_counter_clear_cpu(const struct lib_counter_config *config,
459 struct lib_counter *counter,
460 const size_t *dimension_indexes,
461 int cpu)
462{
463 size_t index;
464 struct lib_counter_layout *layout;
465
466 if (caa_unlikely(lttng_counter_validate_indexes(config, counter, dimension_indexes)))
467 return -EOVERFLOW;
468 index = lttng_counter_get_index(config, counter, dimension_indexes);
469
470 switch (config->alloc) {
471 case COUNTER_ALLOC_PER_CPU:
a616fb4e 472 if (cpu < 0 || cpu >= get_possible_cpus_array_len())
ebabbf58
MD
473 return -EINVAL;
474 layout = &counter->percpu_counters[cpu];
475 break;
476 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
477 if (cpu >= 0) {
a616fb4e 478 if (cpu >= get_possible_cpus_array_len())
ebabbf58
MD
479 return -EINVAL;
480 layout = &counter->percpu_counters[cpu];
481 } else {
482 layout = &counter->global_counters;
483 }
484 break;
485 case COUNTER_ALLOC_GLOBAL:
486 if (cpu >= 0)
487 return -EINVAL;
488 layout = &counter->global_counters;
489 break;
490 default:
491 return -EINVAL;
492 }
493 if (caa_unlikely(!layout->counters))
494 return -ENODEV;
495
496 switch (config->counter_size) {
497 case COUNTER_SIZE_8_BIT:
498 {
499 int8_t *int_p = (int8_t *) layout->counters + index;
500 CMM_STORE_SHARED(*int_p, 0);
501 break;
502 }
503 case COUNTER_SIZE_16_BIT:
504 {
505 int16_t *int_p = (int16_t *) layout->counters + index;
506 CMM_STORE_SHARED(*int_p, 0);
507 break;
508 }
509 case COUNTER_SIZE_32_BIT:
510 {
511 int32_t *int_p = (int32_t *) layout->counters + index;
512 CMM_STORE_SHARED(*int_p, 0);
513 break;
514 }
515#if CAA_BITS_PER_LONG == 64
516 case COUNTER_SIZE_64_BIT:
517 {
518 int64_t *int_p = (int64_t *) layout->counters + index;
519 CMM_STORE_SHARED(*int_p, 0);
520 break;
521 }
522#endif
523 default:
524 return -EINVAL;
525 }
526 lttng_bitmap_clear_bit(index, layout->overflow_bitmap);
527 lttng_bitmap_clear_bit(index, layout->underflow_bitmap);
528 return 0;
529}
530
531int lttng_counter_clear(const struct lib_counter_config *config,
532 struct lib_counter *counter,
533 const size_t *dimension_indexes)
534{
535 int cpu, ret;
536
537 switch (config->alloc) {
538 case COUNTER_ALLOC_PER_CPU:
539 break;
540 case COUNTER_ALLOC_GLOBAL: /* Fallthrough */
541 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
542 /* Clear global counter. */
543 ret = lttng_counter_clear_cpu(config, counter, dimension_indexes, -1);
544 if (ret < 0)
545 return ret;
546 break;
547 default:
548 return -EINVAL;
549 }
550
551 switch (config->alloc) {
552 case COUNTER_ALLOC_PER_CPU: /* Fallthrough */
553 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
74cc1f59 554 for_each_possible_cpu(cpu) {
ebabbf58
MD
555 ret = lttng_counter_clear_cpu(config, counter, dimension_indexes, cpu);
556 if (ret < 0)
557 return ret;
558 }
559 break;
560 case COUNTER_ALLOC_GLOBAL:
561 break;
562 default:
563 return -EINVAL;
564 }
565 return 0;
566}
This page took 0.048118 seconds and 4 git commands to generate.