Fix: libcounter: use LTTNG_UST_ALIGN
[lttng-ust.git] / libcounter / counter.c
1 /* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
2 *
3 * counter.c
4 *
5 * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 */
7
8 #define _GNU_SOURCE
9 #include <errno.h>
10 #include "counter.h"
11 #include "counter-internal.h"
12 #include <lttng/bitmap.h>
13 #include <urcu/system.h>
14 #include <urcu/compiler.h>
15 #include <stdbool.h>
16 #include <helper.h>
17 #include <lttng/align.h>
18 #include "smp.h"
19 #include "shm.h"
20
21 static size_t lttng_counter_get_dimension_nr_elements(struct lib_counter_dimension *dimension)
22 {
23 return dimension->max_nr_elem;
24 }
25
26 static int lttng_counter_init_stride(const struct lib_counter_config *config,
27 struct lib_counter *counter)
28 {
29 size_t nr_dimensions = counter->nr_dimensions;
30 size_t stride = 1;
31 ssize_t i;
32
33 for (i = nr_dimensions - 1; i >= 0; i--) {
34 struct lib_counter_dimension *dimension = &counter->dimensions[i];
35 size_t nr_elem;
36
37 nr_elem = lttng_counter_get_dimension_nr_elements(dimension);
38 dimension->stride = stride;
39 /* nr_elem should be minimum 1 for each dimension. */
40 if (!nr_elem)
41 return -EINVAL;
42 stride *= nr_elem;
43 if (stride > SIZE_MAX / nr_elem)
44 return -EINVAL;
45 }
46 return 0;
47 }
48
49 static int lttng_counter_layout_init(struct lib_counter *counter, int cpu, int shm_fd)
50 {
51 struct lib_counter_layout *layout;
52 size_t counter_size;
53 size_t nr_elem = counter->allocated_elem;
54 size_t shm_length = 0, counters_offset, overflow_offset, underflow_offset;
55 struct lttng_counter_shm_object *shm_object;
56
57 if (shm_fd < 0)
58 return 0; /* Skip, will be populated later. */
59
60 if (cpu == -1)
61 layout = &counter->global_counters;
62 else
63 layout = &counter->percpu_counters[cpu];
64 switch (counter->config.counter_size) {
65 case COUNTER_SIZE_8_BIT:
66 case COUNTER_SIZE_16_BIT:
67 case COUNTER_SIZE_32_BIT:
68 case COUNTER_SIZE_64_BIT:
69 counter_size = (size_t) counter->config.counter_size;
70 break;
71 default:
72 return -EINVAL;
73 }
74 layout->shm_fd = shm_fd;
75 counters_offset = shm_length;
76 shm_length += counter_size * nr_elem;
77 overflow_offset = shm_length;
78 shm_length += LTTNG_UST_ALIGN(nr_elem, 8) / 8;
79 underflow_offset = shm_length;
80 shm_length += LTTNG_UST_ALIGN(nr_elem, 8) / 8;
81 layout->shm_len = shm_length;
82 if (counter->is_daemon) {
83 /* Allocate and clear shared memory. */
84 shm_object = lttng_counter_shm_object_table_alloc(counter->object_table,
85 shm_length, LTTNG_COUNTER_SHM_OBJECT_SHM, shm_fd, cpu);
86 if (!shm_object)
87 return -ENOMEM;
88 } else {
89 /* Map pre-existing shared memory. */
90 shm_object = lttng_counter_shm_object_table_append_shm(counter->object_table,
91 shm_fd, shm_length);
92 if (!shm_object)
93 return -ENOMEM;
94 }
95 layout->counters = shm_object->memory_map + counters_offset;
96 layout->overflow_bitmap = (unsigned long *)(shm_object->memory_map + overflow_offset);
97 layout->underflow_bitmap = (unsigned long *)(shm_object->memory_map + underflow_offset);
98 return 0;
99 }
100
101 int lttng_counter_set_global_shm(struct lib_counter *counter, int fd)
102 {
103 struct lib_counter_config *config = &counter->config;
104 struct lib_counter_layout *layout;
105
106 if (!(config->alloc & COUNTER_ALLOC_GLOBAL))
107 return -EINVAL;
108 layout = &counter->global_counters;
109 if (layout->shm_fd >= 0)
110 return -EBUSY;
111 return lttng_counter_layout_init(counter, -1, fd);
112 }
113
114 int lttng_counter_set_cpu_shm(struct lib_counter *counter, int cpu, int fd)
115 {
116 struct lib_counter_config *config = &counter->config;
117 struct lib_counter_layout *layout;
118
119 if (cpu < 0 || cpu >= lttng_counter_num_possible_cpus())
120 return -EINVAL;
121
122 if (!(config->alloc & COUNTER_ALLOC_PER_CPU))
123 return -EINVAL;
124 layout = &counter->percpu_counters[cpu];
125 if (layout->shm_fd >= 0)
126 return -EBUSY;
127 return lttng_counter_layout_init(counter, cpu, fd);
128 }
129
130 static
131 int lttng_counter_set_global_sum_step(struct lib_counter *counter,
132 int64_t global_sum_step)
133 {
134 if (global_sum_step < 0)
135 return -EINVAL;
136
137 switch (counter->config.counter_size) {
138 case COUNTER_SIZE_8_BIT:
139 if (global_sum_step > INT8_MAX)
140 return -EINVAL;
141 counter->global_sum_step.s8 = (int8_t) global_sum_step;
142 break;
143 case COUNTER_SIZE_16_BIT:
144 if (global_sum_step > INT16_MAX)
145 return -EINVAL;
146 counter->global_sum_step.s16 = (int16_t) global_sum_step;
147 break;
148 case COUNTER_SIZE_32_BIT:
149 if (global_sum_step > INT32_MAX)
150 return -EINVAL;
151 counter->global_sum_step.s32 = (int32_t) global_sum_step;
152 break;
153 case COUNTER_SIZE_64_BIT:
154 counter->global_sum_step.s64 = global_sum_step;
155 break;
156 default:
157 return -EINVAL;
158 }
159
160 return 0;
161 }
162
163 static
164 int validate_args(const struct lib_counter_config *config,
165 size_t nr_dimensions,
166 const size_t *max_nr_elem,
167 int64_t global_sum_step,
168 int global_counter_fd,
169 int nr_counter_cpu_fds,
170 const int *counter_cpu_fds)
171 {
172 int nr_cpus = lttng_counter_num_possible_cpus();
173
174 if (CAA_BITS_PER_LONG != 64 && config->counter_size == COUNTER_SIZE_64_BIT) {
175 WARN_ON_ONCE(1);
176 return -1;
177 }
178 if (!max_nr_elem)
179 return -1;
180 /*
181 * global sum step is only useful with allocating both per-cpu
182 * and global counters.
183 */
184 if (global_sum_step && (!(config->alloc & COUNTER_ALLOC_GLOBAL) ||
185 !(config->alloc & COUNTER_ALLOC_PER_CPU)))
186 return -1;
187 if (!(config->alloc & COUNTER_ALLOC_GLOBAL) && global_counter_fd >= 0)
188 return -1;
189 if (!(config->alloc & COUNTER_ALLOC_PER_CPU) && counter_cpu_fds)
190 return -1;
191 if (!(config->alloc & COUNTER_ALLOC_PER_CPU) && counter_cpu_fds >= 0)
192 return -1;
193 if (counter_cpu_fds && nr_cpus != nr_counter_cpu_fds)
194 return -1;
195 return 0;
196 }
197
198 struct lib_counter *lttng_counter_create(const struct lib_counter_config *config,
199 size_t nr_dimensions,
200 const size_t *max_nr_elem,
201 int64_t global_sum_step,
202 int global_counter_fd,
203 int nr_counter_cpu_fds,
204 const int *counter_cpu_fds,
205 bool is_daemon)
206 {
207 struct lib_counter *counter;
208 size_t dimension, nr_elem = 1;
209 int cpu, ret;
210 int nr_handles = 0;
211 int nr_cpus = lttng_counter_num_possible_cpus();
212
213 if (validate_args(config, nr_dimensions, max_nr_elem,
214 global_sum_step, global_counter_fd, nr_counter_cpu_fds,
215 counter_cpu_fds))
216 return NULL;
217 counter = zmalloc(sizeof(struct lib_counter));
218 if (!counter)
219 return NULL;
220 counter->global_counters.shm_fd = -1;
221 counter->config = *config;
222 counter->is_daemon = is_daemon;
223 if (lttng_counter_set_global_sum_step(counter, global_sum_step))
224 goto error_sum_step;
225 counter->nr_dimensions = nr_dimensions;
226 counter->dimensions = zmalloc(nr_dimensions * sizeof(*counter->dimensions));
227 if (!counter->dimensions)
228 goto error_dimensions;
229 for (dimension = 0; dimension < nr_dimensions; dimension++)
230 counter->dimensions[dimension].max_nr_elem = max_nr_elem[dimension];
231 if (config->alloc & COUNTER_ALLOC_PER_CPU) {
232 counter->percpu_counters = zmalloc(sizeof(struct lib_counter_layout) * nr_cpus);
233 if (!counter->percpu_counters)
234 goto error_alloc_percpu;
235 lttng_counter_for_each_possible_cpu(cpu)
236 counter->percpu_counters[cpu].shm_fd = -1;
237 }
238
239 if (lttng_counter_init_stride(config, counter))
240 goto error_init_stride;
241 //TODO saturation values.
242 for (dimension = 0; dimension < counter->nr_dimensions; dimension++)
243 nr_elem *= lttng_counter_get_dimension_nr_elements(&counter->dimensions[dimension]);
244 counter->allocated_elem = nr_elem;
245
246 if (config->alloc & COUNTER_ALLOC_GLOBAL)
247 nr_handles++;
248 if (config->alloc & COUNTER_ALLOC_PER_CPU)
249 nr_handles += nr_cpus;
250 /* Allocate table for global and per-cpu counters. */
251 counter->object_table = lttng_counter_shm_object_table_create(nr_handles);
252 if (!counter->object_table)
253 goto error_alloc_object_table;
254
255 if (config->alloc & COUNTER_ALLOC_GLOBAL) {
256 ret = lttng_counter_layout_init(counter, -1, global_counter_fd); /* global */
257 if (ret)
258 goto layout_init_error;
259 }
260 if ((config->alloc & COUNTER_ALLOC_PER_CPU) && counter_cpu_fds) {
261 lttng_counter_for_each_possible_cpu(cpu) {
262 ret = lttng_counter_layout_init(counter, cpu, counter_cpu_fds[cpu]);
263 if (ret)
264 goto layout_init_error;
265 }
266 }
267 return counter;
268
269 layout_init_error:
270 lttng_counter_shm_object_table_destroy(counter->object_table, is_daemon);
271 error_alloc_object_table:
272 error_init_stride:
273 free(counter->percpu_counters);
274 error_alloc_percpu:
275 free(counter->dimensions);
276 error_dimensions:
277 error_sum_step:
278 free(counter);
279 return NULL;
280 }
281
282 void lttng_counter_destroy(struct lib_counter *counter)
283 {
284 struct lib_counter_config *config = &counter->config;
285
286 if (config->alloc & COUNTER_ALLOC_PER_CPU)
287 free(counter->percpu_counters);
288 lttng_counter_shm_object_table_destroy(counter->object_table, counter->is_daemon);
289 free(counter->dimensions);
290 free(counter);
291 }
292
293 int lttng_counter_get_global_shm(struct lib_counter *counter, int *fd, size_t *len)
294 {
295 int shm_fd;
296
297 shm_fd = counter->global_counters.shm_fd;
298 if (shm_fd < 0)
299 return -1;
300 *fd = shm_fd;
301 *len = counter->global_counters.shm_len;
302 return 0;
303 }
304
305 int lttng_counter_get_cpu_shm(struct lib_counter *counter, int cpu, int *fd, size_t *len)
306 {
307 struct lib_counter_layout *layout;
308 int shm_fd;
309
310 if (cpu >= lttng_counter_num_possible_cpus())
311 return -1;
312 layout = &counter->percpu_counters[cpu];
313 shm_fd = layout->shm_fd;
314 if (shm_fd < 0)
315 return -1;
316 *fd = shm_fd;
317 *len = layout->shm_len;
318 return 0;
319 }
320
321 int lttng_counter_read(const struct lib_counter_config *config,
322 struct lib_counter *counter,
323 const size_t *dimension_indexes,
324 int cpu, int64_t *value, bool *overflow,
325 bool *underflow)
326 {
327 size_t index;
328 struct lib_counter_layout *layout;
329
330 if (caa_unlikely(lttng_counter_validate_indexes(config, counter, dimension_indexes)))
331 return -EOVERFLOW;
332 index = lttng_counter_get_index(config, counter, dimension_indexes);
333
334 switch (config->alloc) {
335 case COUNTER_ALLOC_PER_CPU:
336 if (cpu < 0 || cpu >= lttng_counter_num_possible_cpus())
337 return -EINVAL;
338 layout = &counter->percpu_counters[cpu];
339 break;
340 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
341 if (cpu >= 0) {
342 if (cpu >= lttng_counter_num_possible_cpus())
343 return -EINVAL;
344 layout = &counter->percpu_counters[cpu];
345 } else {
346 layout = &counter->global_counters;
347 }
348 break;
349 case COUNTER_ALLOC_GLOBAL:
350 if (cpu >= 0)
351 return -EINVAL;
352 layout = &counter->global_counters;
353 break;
354 default:
355 return -EINVAL;
356 }
357 if (caa_unlikely(!layout->counters))
358 return -ENODEV;
359
360 switch (config->counter_size) {
361 case COUNTER_SIZE_8_BIT:
362 {
363 int8_t *int_p = (int8_t *) layout->counters + index;
364 *value = (int64_t) CMM_LOAD_SHARED(*int_p);
365 break;
366 }
367 case COUNTER_SIZE_16_BIT:
368 {
369 int16_t *int_p = (int16_t *) layout->counters + index;
370 *value = (int64_t) CMM_LOAD_SHARED(*int_p);
371 break;
372 }
373 case COUNTER_SIZE_32_BIT:
374 {
375 int32_t *int_p = (int32_t *) layout->counters + index;
376 *value = (int64_t) CMM_LOAD_SHARED(*int_p);
377 break;
378 }
379 #if CAA_BITS_PER_LONG == 64
380 case COUNTER_SIZE_64_BIT:
381 {
382 int64_t *int_p = (int64_t *) layout->counters + index;
383 *value = CMM_LOAD_SHARED(*int_p);
384 break;
385 }
386 #endif
387 default:
388 return -EINVAL;
389 }
390 *overflow = lttng_bitmap_test_bit(index, layout->overflow_bitmap);
391 *underflow = lttng_bitmap_test_bit(index, layout->underflow_bitmap);
392 return 0;
393 }
394
395 int lttng_counter_aggregate(const struct lib_counter_config *config,
396 struct lib_counter *counter,
397 const size_t *dimension_indexes,
398 int64_t *value, bool *overflow,
399 bool *underflow)
400 {
401 int cpu, ret;
402 int64_t v, sum = 0;
403 bool of, uf;
404
405 *overflow = false;
406 *underflow = false;
407
408 switch (config->alloc) {
409 case COUNTER_ALLOC_GLOBAL: /* Fallthrough */
410 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
411 /* Read global counter. */
412 ret = lttng_counter_read(config, counter, dimension_indexes,
413 -1, &v, &of, &uf);
414 if (ret < 0)
415 return ret;
416 sum += v;
417 *overflow |= of;
418 *underflow |= uf;
419 break;
420 case COUNTER_ALLOC_PER_CPU:
421 break;
422 default:
423 return -EINVAL;
424 }
425
426 switch (config->alloc) {
427 case COUNTER_ALLOC_GLOBAL:
428 break;
429 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL: /* Fallthrough */
430 case COUNTER_ALLOC_PER_CPU:
431 lttng_counter_for_each_possible_cpu(cpu) {
432 int64_t old = sum;
433
434 ret = lttng_counter_read(config, counter, dimension_indexes,
435 cpu, &v, &of, &uf);
436 if (ret < 0)
437 return ret;
438 *overflow |= of;
439 *underflow |= uf;
440 /* Overflow is defined on unsigned types. */
441 sum = (int64_t) ((uint64_t) old + (uint64_t) v);
442 if (v > 0 && sum < old)
443 *overflow = true;
444 else if (v < 0 && sum > old)
445 *underflow = true;
446 }
447 break;
448 default:
449 return -EINVAL;
450 }
451 *value = sum;
452 return 0;
453 }
454
455 static
456 int lttng_counter_clear_cpu(const struct lib_counter_config *config,
457 struct lib_counter *counter,
458 const size_t *dimension_indexes,
459 int cpu)
460 {
461 size_t index;
462 struct lib_counter_layout *layout;
463
464 if (caa_unlikely(lttng_counter_validate_indexes(config, counter, dimension_indexes)))
465 return -EOVERFLOW;
466 index = lttng_counter_get_index(config, counter, dimension_indexes);
467
468 switch (config->alloc) {
469 case COUNTER_ALLOC_PER_CPU:
470 if (cpu < 0 || cpu >= lttng_counter_num_possible_cpus())
471 return -EINVAL;
472 layout = &counter->percpu_counters[cpu];
473 break;
474 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
475 if (cpu >= 0) {
476 if (cpu >= lttng_counter_num_possible_cpus())
477 return -EINVAL;
478 layout = &counter->percpu_counters[cpu];
479 } else {
480 layout = &counter->global_counters;
481 }
482 break;
483 case COUNTER_ALLOC_GLOBAL:
484 if (cpu >= 0)
485 return -EINVAL;
486 layout = &counter->global_counters;
487 break;
488 default:
489 return -EINVAL;
490 }
491 if (caa_unlikely(!layout->counters))
492 return -ENODEV;
493
494 switch (config->counter_size) {
495 case COUNTER_SIZE_8_BIT:
496 {
497 int8_t *int_p = (int8_t *) layout->counters + index;
498 CMM_STORE_SHARED(*int_p, 0);
499 break;
500 }
501 case COUNTER_SIZE_16_BIT:
502 {
503 int16_t *int_p = (int16_t *) layout->counters + index;
504 CMM_STORE_SHARED(*int_p, 0);
505 break;
506 }
507 case COUNTER_SIZE_32_BIT:
508 {
509 int32_t *int_p = (int32_t *) layout->counters + index;
510 CMM_STORE_SHARED(*int_p, 0);
511 break;
512 }
513 #if CAA_BITS_PER_LONG == 64
514 case COUNTER_SIZE_64_BIT:
515 {
516 int64_t *int_p = (int64_t *) layout->counters + index;
517 CMM_STORE_SHARED(*int_p, 0);
518 break;
519 }
520 #endif
521 default:
522 return -EINVAL;
523 }
524 lttng_bitmap_clear_bit(index, layout->overflow_bitmap);
525 lttng_bitmap_clear_bit(index, layout->underflow_bitmap);
526 return 0;
527 }
528
529 int lttng_counter_clear(const struct lib_counter_config *config,
530 struct lib_counter *counter,
531 const size_t *dimension_indexes)
532 {
533 int cpu, ret;
534
535 switch (config->alloc) {
536 case COUNTER_ALLOC_PER_CPU:
537 break;
538 case COUNTER_ALLOC_GLOBAL: /* Fallthrough */
539 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
540 /* Clear global counter. */
541 ret = lttng_counter_clear_cpu(config, counter, dimension_indexes, -1);
542 if (ret < 0)
543 return ret;
544 break;
545 default:
546 return -EINVAL;
547 }
548
549 switch (config->alloc) {
550 case COUNTER_ALLOC_PER_CPU: /* Fallthrough */
551 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
552 lttng_counter_for_each_possible_cpu(cpu) {
553 ret = lttng_counter_clear_cpu(config, counter, dimension_indexes, cpu);
554 if (ret < 0)
555 return ret;
556 }
557 break;
558 case COUNTER_ALLOC_GLOBAL:
559 break;
560 default:
561 return -EINVAL;
562 }
563 return 0;
564 }
This page took 0.039969 seconds and 4 git commands to generate.