fix: Revert "Makefile: Enable -Wimplicit-fallthrough for Clang" (v5.15)
[lttng-modules.git] / src / lib / counter / counter.c
CommitLineData
a101fa10
MD
1/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
2 *
3 * counter.c
4 *
5 * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 */
7
8#include <linux/module.h>
9#include <linux/slab.h>
10#include <lttng/tracer.h>
11#include <linux/cpumask.h>
12#include <counter/counter.h>
13#include <counter/counter-internal.h>
c570be0d 14#include <wrapper/compiler_attributes.h>
a101fa10 15#include <wrapper/vmalloc.h>
d410bc8d 16#include <wrapper/limits.h>
a101fa10
MD
17
18static size_t lttng_counter_get_dimension_nr_elements(struct lib_counter_dimension *dimension)
19{
20 return dimension->max_nr_elem;
21}
22
23static int lttng_counter_init_stride(const struct lib_counter_config *config,
24 struct lib_counter *counter)
25{
26 size_t nr_dimensions = counter->nr_dimensions;
27 size_t stride = 1;
28 ssize_t i;
29
30 for (i = nr_dimensions - 1; i >= 0; i--) {
31 struct lib_counter_dimension *dimension = &counter->dimensions[i];
32 size_t nr_elem;
33
34 nr_elem = lttng_counter_get_dimension_nr_elements(dimension);
35 dimension->stride = stride;
36 /* nr_elem should be minimum 1 for each dimension. */
37 if (!nr_elem)
38 return -EINVAL;
39 stride *= nr_elem;
40 if (stride > SIZE_MAX / nr_elem)
41 return -EINVAL;
42 }
43 return 0;
44}
45
46static int lttng_counter_layout_init(struct lib_counter *counter, int cpu)
47{
48 struct lib_counter_layout *layout;
49 size_t counter_size;
50 size_t nr_elem = counter->allocated_elem;
51
52 if (cpu == -1)
53 layout = &counter->global_counters;
54 else
55 layout = per_cpu_ptr(counter->percpu_counters, cpu);
56 switch (counter->config.counter_size) {
57 case COUNTER_SIZE_8_BIT:
58 case COUNTER_SIZE_16_BIT:
59 case COUNTER_SIZE_32_BIT:
60 case COUNTER_SIZE_64_BIT:
61 counter_size = (size_t) counter->config.counter_size;
62 break;
63 default:
64 return -EINVAL;
65 }
66 layout->counters = lttng_kvzalloc_node(ALIGN(counter_size * nr_elem,
67 1 << INTERNODE_CACHE_SHIFT),
68 GFP_KERNEL | __GFP_NOWARN,
69 cpu_to_node(max(cpu, 0)));
70 if (!layout->counters)
71 return -ENOMEM;
72 layout->overflow_bitmap = lttng_kvzalloc_node(ALIGN(ALIGN(nr_elem, 8) / 8,
73 1 << INTERNODE_CACHE_SHIFT),
74 GFP_KERNEL | __GFP_NOWARN,
75 cpu_to_node(max(cpu, 0)));
76 if (!layout->overflow_bitmap)
77 return -ENOMEM;
78 layout->underflow_bitmap = lttng_kvzalloc_node(ALIGN(ALIGN(nr_elem, 8) / 8,
79 1 << INTERNODE_CACHE_SHIFT),
80 GFP_KERNEL | __GFP_NOWARN,
81 cpu_to_node(max(cpu, 0)));
82 if (!layout->underflow_bitmap)
83 return -ENOMEM;
84 return 0;
85}
86
87static void lttng_counter_layout_fini(struct lib_counter *counter, int cpu)
88{
89 struct lib_counter_layout *layout;
90
91 if (cpu == -1)
92 layout = &counter->global_counters;
93 else
94 layout = per_cpu_ptr(counter->percpu_counters, cpu);
95
96 lttng_kvfree(layout->counters);
97 lttng_kvfree(layout->overflow_bitmap);
98 lttng_kvfree(layout->underflow_bitmap);
99}
100
101static
102int lttng_counter_set_global_sum_step(struct lib_counter *counter,
103 int64_t global_sum_step)
104{
105 if (global_sum_step < 0)
106 return -EINVAL;
107
108 switch (counter->config.counter_size) {
109 case COUNTER_SIZE_8_BIT:
110 if (global_sum_step > S8_MAX)
111 return -EINVAL;
112 counter->global_sum_step.s8 = (int8_t) global_sum_step;
113 break;
114 case COUNTER_SIZE_16_BIT:
115 if (global_sum_step > S16_MAX)
116 return -EINVAL;
117 counter->global_sum_step.s16 = (int16_t) global_sum_step;
118 break;
119 case COUNTER_SIZE_32_BIT:
120 if (global_sum_step > S32_MAX)
121 return -EINVAL;
122 counter->global_sum_step.s32 = (int32_t) global_sum_step;
123 break;
124 case COUNTER_SIZE_64_BIT:
125 counter->global_sum_step.s64 = global_sum_step;
126 break;
127 default:
128 return -EINVAL;
129 }
130
131 return 0;
132}
133
134static
135int validate_args(const struct lib_counter_config *config,
136 size_t nr_dimensions,
137 const size_t *max_nr_elem,
138 int64_t global_sum_step)
139{
140 if (BITS_PER_LONG != 64 && config->counter_size == COUNTER_SIZE_64_BIT) {
141 WARN_ON_ONCE(1);
142 return -1;
143 }
144 if (!max_nr_elem)
145 return -1;
146 /*
147 * global sum step is only useful with allocating both per-cpu
148 * and global counters.
149 */
150 if (global_sum_step && (!(config->alloc & COUNTER_ALLOC_GLOBAL) ||
151 !(config->alloc & COUNTER_ALLOC_PER_CPU)))
152 return -1;
153 return 0;
154}
155
156struct lib_counter *lttng_counter_create(const struct lib_counter_config *config,
157 size_t nr_dimensions,
158 const size_t *max_nr_elem,
159 int64_t global_sum_step)
160{
161 struct lib_counter *counter;
162 size_t dimension, nr_elem = 1;
163 int cpu, ret;
164
165 if (validate_args(config, nr_dimensions, max_nr_elem, global_sum_step))
166 return NULL;
167 counter = kzalloc(sizeof(struct lib_counter), GFP_KERNEL);
168 if (!counter)
169 return NULL;
170 counter->config = *config;
171 if (lttng_counter_set_global_sum_step(counter, global_sum_step))
172 goto error_sum_step;
173 counter->nr_dimensions = nr_dimensions;
174 counter->dimensions = kzalloc(nr_dimensions * sizeof(*counter->dimensions), GFP_KERNEL);
175 if (!counter->dimensions)
176 goto error_dimensions;
177 for (dimension = 0; dimension < nr_dimensions; dimension++)
178 counter->dimensions[dimension].max_nr_elem = max_nr_elem[dimension];
179 if (config->alloc & COUNTER_ALLOC_PER_CPU) {
180 counter->percpu_counters = alloc_percpu(struct lib_counter_layout);
181 if (!counter->percpu_counters)
182 goto error_alloc_percpu;
183 }
184
185 if (lttng_counter_init_stride(config, counter))
186 goto error_init_stride;
187 //TODO saturation values.
188 for (dimension = 0; dimension < counter->nr_dimensions; dimension++)
189 nr_elem *= lttng_counter_get_dimension_nr_elements(&counter->dimensions[dimension]);
190 counter->allocated_elem = nr_elem;
191 if (config->alloc & COUNTER_ALLOC_GLOBAL) {
192 ret = lttng_counter_layout_init(counter, -1); /* global */
193 if (ret)
194 goto layout_init_error;
195 }
196 if (config->alloc & COUNTER_ALLOC_PER_CPU) {
197 //TODO: integrate with CPU hotplug and online cpus
198 for (cpu = 0; cpu < num_possible_cpus(); cpu++) {
199 ret = lttng_counter_layout_init(counter, cpu);
200 if (ret)
201 goto layout_init_error;
202 }
203 }
204 return counter;
205
206layout_init_error:
207 if (config->alloc & COUNTER_ALLOC_PER_CPU) {
208 for (cpu = 0; cpu < num_possible_cpus(); cpu++)
209 lttng_counter_layout_fini(counter, cpu);
210 }
211 if (config->alloc & COUNTER_ALLOC_GLOBAL)
212 lttng_counter_layout_fini(counter, -1);
213error_init_stride:
214 free_percpu(counter->percpu_counters);
215error_alloc_percpu:
216 kfree(counter->dimensions);
217error_dimensions:
218error_sum_step:
219 kfree(counter);
220 return NULL;
221}
222EXPORT_SYMBOL_GPL(lttng_counter_create);
223
224void lttng_counter_destroy(struct lib_counter *counter)
225{
226 struct lib_counter_config *config = &counter->config;
227 int cpu;
228
229 if (config->alloc & COUNTER_ALLOC_PER_CPU) {
230 for (cpu = 0; cpu < num_possible_cpus(); cpu++)
231 lttng_counter_layout_fini(counter, cpu);
232 free_percpu(counter->percpu_counters);
233 }
234 if (config->alloc & COUNTER_ALLOC_GLOBAL)
235 lttng_counter_layout_fini(counter, -1);
236 kfree(counter->dimensions);
237 kfree(counter);
238}
239EXPORT_SYMBOL_GPL(lttng_counter_destroy);
240
241int lttng_counter_read(const struct lib_counter_config *config,
242 struct lib_counter *counter,
243 const size_t *dimension_indexes,
244 int cpu, int64_t *value, bool *overflow,
245 bool *underflow)
246{
247 struct lib_counter_layout *layout;
248 size_t index;
249
250 if (unlikely(lttng_counter_validate_indexes(config, counter, dimension_indexes)))
251 return -EOVERFLOW;
252 index = lttng_counter_get_index(config, counter, dimension_indexes);
253
254 switch (config->alloc) {
255 case COUNTER_ALLOC_PER_CPU:
256 if (cpu < 0 || cpu >= num_possible_cpus())
257 return -EINVAL;
258 layout = per_cpu_ptr(counter->percpu_counters, cpu);
259 break;
260 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
261 if (cpu >= 0) {
262 if (cpu >= num_possible_cpus())
263 return -EINVAL;
264 layout = per_cpu_ptr(counter->percpu_counters, cpu);
265 } else {
266 layout = &counter->global_counters;
267 }
268 break;
269 case COUNTER_ALLOC_GLOBAL:
270 if (cpu >= 0)
271 return -EINVAL;
272 layout = &counter->global_counters;
273 break;
274 default:
275 return -EINVAL;
276 }
277
278 switch (config->counter_size) {
279 case COUNTER_SIZE_8_BIT:
280 {
281 int8_t *int_p = (int8_t *) layout->counters + index;
282 *value = (int64_t) READ_ONCE(*int_p);
283 break;
284 }
285 case COUNTER_SIZE_16_BIT:
286 {
287 int16_t *int_p = (int16_t *) layout->counters + index;
288 *value = (int64_t) READ_ONCE(*int_p);
289 break;
290 }
291 case COUNTER_SIZE_32_BIT:
292 {
293 int32_t *int_p = (int32_t *) layout->counters + index;
294 *value = (int64_t) READ_ONCE(*int_p);
295 break;
296 }
297#if BITS_PER_LONG == 64
298 case COUNTER_SIZE_64_BIT:
299 {
300 int64_t *int_p = (int64_t *) layout->counters + index;
301 *value = READ_ONCE(*int_p);
302 break;
303 }
304#endif
305 default:
306 WARN_ON_ONCE(1);
307 }
308 *overflow = test_bit(index, layout->overflow_bitmap);
309 *underflow = test_bit(index, layout->underflow_bitmap);
310 return 0;
311}
312EXPORT_SYMBOL_GPL(lttng_counter_read);
313
314int lttng_counter_aggregate(const struct lib_counter_config *config,
315 struct lib_counter *counter,
316 const size_t *dimension_indexes,
317 int64_t *value, bool *overflow,
318 bool *underflow)
319{
320 int cpu, ret;
321 int64_t v, sum = 0;
322 bool of, uf;
323
324 *overflow = false;
325 *underflow = false;
326
327 switch (config->alloc) {
c570be0d
MJ
328 case COUNTER_ALLOC_GLOBAL:
329 lttng_fallthrough;
a101fa10
MD
330 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
331 /* Read global counter. */
332 ret = lttng_counter_read(config, counter, dimension_indexes,
333 -1, &v, &of, &uf);
334 if (ret < 0)
335 return ret;
336 sum += v;
337 *overflow |= of;
338 *underflow |= uf;
339 break;
340 case COUNTER_ALLOC_PER_CPU:
341 break;
342 }
343
344 switch (config->alloc) {
345 case COUNTER_ALLOC_GLOBAL:
346 break;
c570be0d
MJ
347 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
348 lttng_fallthrough;
a101fa10
MD
349 case COUNTER_ALLOC_PER_CPU:
350 //TODO: integrate with CPU hotplug and online cpus
351 for (cpu = 0; cpu < num_possible_cpus(); cpu++) {
352 int64_t old = sum;
353
354 ret = lttng_counter_read(config, counter, dimension_indexes,
355 cpu, &v, &of, &uf);
356 if (ret < 0)
357 return ret;
358 *overflow |= of;
359 *underflow |= uf;
360 /* Overflow is defined on unsigned types. */
361 sum = (int64_t) ((uint64_t) old + (uint64_t) v);
362 if (v > 0 && sum < old)
363 *overflow = true;
364 else if (v < 0 && sum > old)
365 *underflow = true;
366 }
367 break;
368 default:
369 return -EINVAL;
370 }
371 *value = sum;
372 return 0;
373}
374EXPORT_SYMBOL_GPL(lttng_counter_aggregate);
375
376static
377int lttng_counter_clear_cpu(const struct lib_counter_config *config,
378 struct lib_counter *counter,
379 const size_t *dimension_indexes,
380 int cpu)
381{
382 struct lib_counter_layout *layout;
383 size_t index;
384
385 if (unlikely(lttng_counter_validate_indexes(config, counter, dimension_indexes)))
386 return -EOVERFLOW;
387 index = lttng_counter_get_index(config, counter, dimension_indexes);
388
389 switch (config->alloc) {
390 case COUNTER_ALLOC_PER_CPU:
391 if (cpu < 0 || cpu >= num_possible_cpus())
392 return -EINVAL;
393 layout = per_cpu_ptr(counter->percpu_counters, cpu);
394 break;
395 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
396 if (cpu >= 0) {
397 if (cpu >= num_possible_cpus())
398 return -EINVAL;
399 layout = per_cpu_ptr(counter->percpu_counters, cpu);
400 } else {
401 layout = &counter->global_counters;
402 }
403 break;
404 case COUNTER_ALLOC_GLOBAL:
405 if (cpu >= 0)
406 return -EINVAL;
407 layout = &counter->global_counters;
408 break;
409 default:
410 return -EINVAL;
411 }
412 switch (config->counter_size) {
413 case COUNTER_SIZE_8_BIT:
414 {
415 int8_t *int_p = (int8_t *) layout->counters + index;
416 WRITE_ONCE(*int_p, 0);
417 break;
418 }
419 case COUNTER_SIZE_16_BIT:
420 {
421 int16_t *int_p = (int16_t *) layout->counters + index;
422 WRITE_ONCE(*int_p, 0);
423 break;
424 }
425 case COUNTER_SIZE_32_BIT:
426 {
427 int32_t *int_p = (int32_t *) layout->counters + index;
428 WRITE_ONCE(*int_p, 0);
429 break;
430 }
431#if BITS_PER_LONG == 64
432 case COUNTER_SIZE_64_BIT:
433 {
434 int64_t *int_p = (int64_t *) layout->counters + index;
435 WRITE_ONCE(*int_p, 0);
436 break;
437 }
438#endif
439 default:
440 WARN_ON_ONCE(1);
441 }
442 clear_bit(index, layout->overflow_bitmap);
443 clear_bit(index, layout->underflow_bitmap);
444 return 0;
445}
446
447int lttng_counter_clear(const struct lib_counter_config *config,
448 struct lib_counter *counter,
449 const size_t *dimension_indexes)
450{
451 int cpu, ret;
452
453 switch (config->alloc) {
c570be0d
MJ
454 case COUNTER_ALLOC_GLOBAL:
455 lttng_fallthrough;
a101fa10
MD
456 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
457 /* Clear global counter. */
458 ret = lttng_counter_clear_cpu(config, counter, dimension_indexes, -1);
459 if (ret < 0)
460 return ret;
461 break;
462 case COUNTER_ALLOC_PER_CPU:
463 break;
464 }
465
466 switch (config->alloc) {
467 case COUNTER_ALLOC_GLOBAL:
468 break;
c570be0d
MJ
469 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
470 lttng_fallthrough;
a101fa10
MD
471 case COUNTER_ALLOC_PER_CPU:
472 //TODO: integrate with CPU hotplug and online cpus
473 for (cpu = 0; cpu < num_possible_cpus(); cpu++) {
474 ret = lttng_counter_clear_cpu(config, counter, dimension_indexes, cpu);
475 if (ret < 0)
476 return ret;
477 }
478 break;
479 default:
480 return -EINVAL;
481 }
482 return 0;
483}
484EXPORT_SYMBOL_GPL(lttng_counter_clear);
485
486int lttng_counter_get_nr_dimensions(const struct lib_counter_config *config,
487 struct lib_counter *counter,
488 size_t *nr_dimensions)
489{
490 *nr_dimensions = counter->nr_dimensions;
491 return 0;
492}
493EXPORT_SYMBOL_GPL(lttng_counter_get_nr_dimensions);
494
495int lttng_counter_get_max_nr_elem(const struct lib_counter_config *config,
496 struct lib_counter *counter,
497 size_t *max_nr_elem) /* array of size nr_dimensions */
498{
499 size_t dimension;
500
501 for (dimension = 0; dimension < counter->nr_dimensions; dimension++)
502 max_nr_elem[dimension] = lttng_counter_get_dimension_nr_elements(&counter->dimensions[dimension]);
503 return 0;
504}
505EXPORT_SYMBOL_GPL(lttng_counter_get_max_nr_elem);
506
507MODULE_LICENSE("GPL and additional rights");
508MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
509MODULE_DESCRIPTION("LTTng counter library");
510MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
511 __stringify(LTTNG_MODULES_MINOR_VERSION) "."
512 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
513 LTTNG_MODULES_EXTRAVERSION);
This page took 0.048884 seconds and 4 git commands to generate.