Fix: include limits.h wrapper from libcounter
[lttng-modules.git] / src / lib / counter / counter.c
1 /* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
2 *
3 * counter.c
4 *
5 * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 */
7
8 #include <linux/module.h>
9 #include <linux/slab.h>
10 #include <lttng/tracer.h>
11 #include <linux/cpumask.h>
12 #include <counter/counter.h>
13 #include <counter/counter-internal.h>
14 #include <wrapper/vmalloc.h>
15 #include <wrapper/limits.h>
16
17 static size_t lttng_counter_get_dimension_nr_elements(struct lib_counter_dimension *dimension)
18 {
19 return dimension->max_nr_elem;
20 }
21
22 static int lttng_counter_init_stride(const struct lib_counter_config *config,
23 struct lib_counter *counter)
24 {
25 size_t nr_dimensions = counter->nr_dimensions;
26 size_t stride = 1;
27 ssize_t i;
28
29 for (i = nr_dimensions - 1; i >= 0; i--) {
30 struct lib_counter_dimension *dimension = &counter->dimensions[i];
31 size_t nr_elem;
32
33 nr_elem = lttng_counter_get_dimension_nr_elements(dimension);
34 dimension->stride = stride;
35 /* nr_elem should be minimum 1 for each dimension. */
36 if (!nr_elem)
37 return -EINVAL;
38 stride *= nr_elem;
39 if (stride > SIZE_MAX / nr_elem)
40 return -EINVAL;
41 }
42 return 0;
43 }
44
45 static int lttng_counter_layout_init(struct lib_counter *counter, int cpu)
46 {
47 struct lib_counter_layout *layout;
48 size_t counter_size;
49 size_t nr_elem = counter->allocated_elem;
50
51 if (cpu == -1)
52 layout = &counter->global_counters;
53 else
54 layout = per_cpu_ptr(counter->percpu_counters, cpu);
55 switch (counter->config.counter_size) {
56 case COUNTER_SIZE_8_BIT:
57 case COUNTER_SIZE_16_BIT:
58 case COUNTER_SIZE_32_BIT:
59 case COUNTER_SIZE_64_BIT:
60 counter_size = (size_t) counter->config.counter_size;
61 break;
62 default:
63 return -EINVAL;
64 }
65 layout->counters = lttng_kvzalloc_node(ALIGN(counter_size * nr_elem,
66 1 << INTERNODE_CACHE_SHIFT),
67 GFP_KERNEL | __GFP_NOWARN,
68 cpu_to_node(max(cpu, 0)));
69 if (!layout->counters)
70 return -ENOMEM;
71 layout->overflow_bitmap = lttng_kvzalloc_node(ALIGN(ALIGN(nr_elem, 8) / 8,
72 1 << INTERNODE_CACHE_SHIFT),
73 GFP_KERNEL | __GFP_NOWARN,
74 cpu_to_node(max(cpu, 0)));
75 if (!layout->overflow_bitmap)
76 return -ENOMEM;
77 layout->underflow_bitmap = lttng_kvzalloc_node(ALIGN(ALIGN(nr_elem, 8) / 8,
78 1 << INTERNODE_CACHE_SHIFT),
79 GFP_KERNEL | __GFP_NOWARN,
80 cpu_to_node(max(cpu, 0)));
81 if (!layout->underflow_bitmap)
82 return -ENOMEM;
83 return 0;
84 }
85
86 static void lttng_counter_layout_fini(struct lib_counter *counter, int cpu)
87 {
88 struct lib_counter_layout *layout;
89
90 if (cpu == -1)
91 layout = &counter->global_counters;
92 else
93 layout = per_cpu_ptr(counter->percpu_counters, cpu);
94
95 lttng_kvfree(layout->counters);
96 lttng_kvfree(layout->overflow_bitmap);
97 lttng_kvfree(layout->underflow_bitmap);
98 }
99
100 static
101 int lttng_counter_set_global_sum_step(struct lib_counter *counter,
102 int64_t global_sum_step)
103 {
104 if (global_sum_step < 0)
105 return -EINVAL;
106
107 switch (counter->config.counter_size) {
108 case COUNTER_SIZE_8_BIT:
109 if (global_sum_step > S8_MAX)
110 return -EINVAL;
111 counter->global_sum_step.s8 = (int8_t) global_sum_step;
112 break;
113 case COUNTER_SIZE_16_BIT:
114 if (global_sum_step > S16_MAX)
115 return -EINVAL;
116 counter->global_sum_step.s16 = (int16_t) global_sum_step;
117 break;
118 case COUNTER_SIZE_32_BIT:
119 if (global_sum_step > S32_MAX)
120 return -EINVAL;
121 counter->global_sum_step.s32 = (int32_t) global_sum_step;
122 break;
123 case COUNTER_SIZE_64_BIT:
124 counter->global_sum_step.s64 = global_sum_step;
125 break;
126 default:
127 return -EINVAL;
128 }
129
130 return 0;
131 }
132
133 static
134 int validate_args(const struct lib_counter_config *config,
135 size_t nr_dimensions,
136 const size_t *max_nr_elem,
137 int64_t global_sum_step)
138 {
139 if (BITS_PER_LONG != 64 && config->counter_size == COUNTER_SIZE_64_BIT) {
140 WARN_ON_ONCE(1);
141 return -1;
142 }
143 if (!max_nr_elem)
144 return -1;
145 /*
146 * global sum step is only useful with allocating both per-cpu
147 * and global counters.
148 */
149 if (global_sum_step && (!(config->alloc & COUNTER_ALLOC_GLOBAL) ||
150 !(config->alloc & COUNTER_ALLOC_PER_CPU)))
151 return -1;
152 return 0;
153 }
154
155 struct lib_counter *lttng_counter_create(const struct lib_counter_config *config,
156 size_t nr_dimensions,
157 const size_t *max_nr_elem,
158 int64_t global_sum_step)
159 {
160 struct lib_counter *counter;
161 size_t dimension, nr_elem = 1;
162 int cpu, ret;
163
164 if (validate_args(config, nr_dimensions, max_nr_elem, global_sum_step))
165 return NULL;
166 counter = kzalloc(sizeof(struct lib_counter), GFP_KERNEL);
167 if (!counter)
168 return NULL;
169 counter->config = *config;
170 if (lttng_counter_set_global_sum_step(counter, global_sum_step))
171 goto error_sum_step;
172 counter->nr_dimensions = nr_dimensions;
173 counter->dimensions = kzalloc(nr_dimensions * sizeof(*counter->dimensions), GFP_KERNEL);
174 if (!counter->dimensions)
175 goto error_dimensions;
176 for (dimension = 0; dimension < nr_dimensions; dimension++)
177 counter->dimensions[dimension].max_nr_elem = max_nr_elem[dimension];
178 if (config->alloc & COUNTER_ALLOC_PER_CPU) {
179 counter->percpu_counters = alloc_percpu(struct lib_counter_layout);
180 if (!counter->percpu_counters)
181 goto error_alloc_percpu;
182 }
183
184 if (lttng_counter_init_stride(config, counter))
185 goto error_init_stride;
186 //TODO saturation values.
187 for (dimension = 0; dimension < counter->nr_dimensions; dimension++)
188 nr_elem *= lttng_counter_get_dimension_nr_elements(&counter->dimensions[dimension]);
189 counter->allocated_elem = nr_elem;
190 if (config->alloc & COUNTER_ALLOC_GLOBAL) {
191 ret = lttng_counter_layout_init(counter, -1); /* global */
192 if (ret)
193 goto layout_init_error;
194 }
195 if (config->alloc & COUNTER_ALLOC_PER_CPU) {
196 //TODO: integrate with CPU hotplug and online cpus
197 for (cpu = 0; cpu < num_possible_cpus(); cpu++) {
198 ret = lttng_counter_layout_init(counter, cpu);
199 if (ret)
200 goto layout_init_error;
201 }
202 }
203 return counter;
204
205 layout_init_error:
206 if (config->alloc & COUNTER_ALLOC_PER_CPU) {
207 for (cpu = 0; cpu < num_possible_cpus(); cpu++)
208 lttng_counter_layout_fini(counter, cpu);
209 }
210 if (config->alloc & COUNTER_ALLOC_GLOBAL)
211 lttng_counter_layout_fini(counter, -1);
212 error_init_stride:
213 free_percpu(counter->percpu_counters);
214 error_alloc_percpu:
215 kfree(counter->dimensions);
216 error_dimensions:
217 error_sum_step:
218 kfree(counter);
219 return NULL;
220 }
221 EXPORT_SYMBOL_GPL(lttng_counter_create);
222
223 void lttng_counter_destroy(struct lib_counter *counter)
224 {
225 struct lib_counter_config *config = &counter->config;
226 int cpu;
227
228 if (config->alloc & COUNTER_ALLOC_PER_CPU) {
229 for (cpu = 0; cpu < num_possible_cpus(); cpu++)
230 lttng_counter_layout_fini(counter, cpu);
231 free_percpu(counter->percpu_counters);
232 }
233 if (config->alloc & COUNTER_ALLOC_GLOBAL)
234 lttng_counter_layout_fini(counter, -1);
235 kfree(counter->dimensions);
236 kfree(counter);
237 }
238 EXPORT_SYMBOL_GPL(lttng_counter_destroy);
239
240 int lttng_counter_read(const struct lib_counter_config *config,
241 struct lib_counter *counter,
242 const size_t *dimension_indexes,
243 int cpu, int64_t *value, bool *overflow,
244 bool *underflow)
245 {
246 struct lib_counter_layout *layout;
247 size_t index;
248
249 if (unlikely(lttng_counter_validate_indexes(config, counter, dimension_indexes)))
250 return -EOVERFLOW;
251 index = lttng_counter_get_index(config, counter, dimension_indexes);
252
253 switch (config->alloc) {
254 case COUNTER_ALLOC_PER_CPU:
255 if (cpu < 0 || cpu >= num_possible_cpus())
256 return -EINVAL;
257 layout = per_cpu_ptr(counter->percpu_counters, cpu);
258 break;
259 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
260 if (cpu >= 0) {
261 if (cpu >= num_possible_cpus())
262 return -EINVAL;
263 layout = per_cpu_ptr(counter->percpu_counters, cpu);
264 } else {
265 layout = &counter->global_counters;
266 }
267 break;
268 case COUNTER_ALLOC_GLOBAL:
269 if (cpu >= 0)
270 return -EINVAL;
271 layout = &counter->global_counters;
272 break;
273 default:
274 return -EINVAL;
275 }
276
277 switch (config->counter_size) {
278 case COUNTER_SIZE_8_BIT:
279 {
280 int8_t *int_p = (int8_t *) layout->counters + index;
281 *value = (int64_t) READ_ONCE(*int_p);
282 break;
283 }
284 case COUNTER_SIZE_16_BIT:
285 {
286 int16_t *int_p = (int16_t *) layout->counters + index;
287 *value = (int64_t) READ_ONCE(*int_p);
288 break;
289 }
290 case COUNTER_SIZE_32_BIT:
291 {
292 int32_t *int_p = (int32_t *) layout->counters + index;
293 *value = (int64_t) READ_ONCE(*int_p);
294 break;
295 }
296 #if BITS_PER_LONG == 64
297 case COUNTER_SIZE_64_BIT:
298 {
299 int64_t *int_p = (int64_t *) layout->counters + index;
300 *value = READ_ONCE(*int_p);
301 break;
302 }
303 #endif
304 default:
305 WARN_ON_ONCE(1);
306 }
307 *overflow = test_bit(index, layout->overflow_bitmap);
308 *underflow = test_bit(index, layout->underflow_bitmap);
309 return 0;
310 }
311 EXPORT_SYMBOL_GPL(lttng_counter_read);
312
313 int lttng_counter_aggregate(const struct lib_counter_config *config,
314 struct lib_counter *counter,
315 const size_t *dimension_indexes,
316 int64_t *value, bool *overflow,
317 bool *underflow)
318 {
319 int cpu, ret;
320 int64_t v, sum = 0;
321 bool of, uf;
322
323 *overflow = false;
324 *underflow = false;
325
326 switch (config->alloc) {
327 case COUNTER_ALLOC_GLOBAL: /* Fallthrough */
328 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
329 /* Read global counter. */
330 ret = lttng_counter_read(config, counter, dimension_indexes,
331 -1, &v, &of, &uf);
332 if (ret < 0)
333 return ret;
334 sum += v;
335 *overflow |= of;
336 *underflow |= uf;
337 break;
338 case COUNTER_ALLOC_PER_CPU:
339 break;
340 }
341
342 switch (config->alloc) {
343 case COUNTER_ALLOC_GLOBAL:
344 break;
345 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL: /* Fallthrough */
346 case COUNTER_ALLOC_PER_CPU:
347 //TODO: integrate with CPU hotplug and online cpus
348 for (cpu = 0; cpu < num_possible_cpus(); cpu++) {
349 int64_t old = sum;
350
351 ret = lttng_counter_read(config, counter, dimension_indexes,
352 cpu, &v, &of, &uf);
353 if (ret < 0)
354 return ret;
355 *overflow |= of;
356 *underflow |= uf;
357 /* Overflow is defined on unsigned types. */
358 sum = (int64_t) ((uint64_t) old + (uint64_t) v);
359 if (v > 0 && sum < old)
360 *overflow = true;
361 else if (v < 0 && sum > old)
362 *underflow = true;
363 }
364 break;
365 default:
366 return -EINVAL;
367 }
368 *value = sum;
369 return 0;
370 }
371 EXPORT_SYMBOL_GPL(lttng_counter_aggregate);
372
373 static
374 int lttng_counter_clear_cpu(const struct lib_counter_config *config,
375 struct lib_counter *counter,
376 const size_t *dimension_indexes,
377 int cpu)
378 {
379 struct lib_counter_layout *layout;
380 size_t index;
381
382 if (unlikely(lttng_counter_validate_indexes(config, counter, dimension_indexes)))
383 return -EOVERFLOW;
384 index = lttng_counter_get_index(config, counter, dimension_indexes);
385
386 switch (config->alloc) {
387 case COUNTER_ALLOC_PER_CPU:
388 if (cpu < 0 || cpu >= num_possible_cpus())
389 return -EINVAL;
390 layout = per_cpu_ptr(counter->percpu_counters, cpu);
391 break;
392 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
393 if (cpu >= 0) {
394 if (cpu >= num_possible_cpus())
395 return -EINVAL;
396 layout = per_cpu_ptr(counter->percpu_counters, cpu);
397 } else {
398 layout = &counter->global_counters;
399 }
400 break;
401 case COUNTER_ALLOC_GLOBAL:
402 if (cpu >= 0)
403 return -EINVAL;
404 layout = &counter->global_counters;
405 break;
406 default:
407 return -EINVAL;
408 }
409 switch (config->counter_size) {
410 case COUNTER_SIZE_8_BIT:
411 {
412 int8_t *int_p = (int8_t *) layout->counters + index;
413 WRITE_ONCE(*int_p, 0);
414 break;
415 }
416 case COUNTER_SIZE_16_BIT:
417 {
418 int16_t *int_p = (int16_t *) layout->counters + index;
419 WRITE_ONCE(*int_p, 0);
420 break;
421 }
422 case COUNTER_SIZE_32_BIT:
423 {
424 int32_t *int_p = (int32_t *) layout->counters + index;
425 WRITE_ONCE(*int_p, 0);
426 break;
427 }
428 #if BITS_PER_LONG == 64
429 case COUNTER_SIZE_64_BIT:
430 {
431 int64_t *int_p = (int64_t *) layout->counters + index;
432 WRITE_ONCE(*int_p, 0);
433 break;
434 }
435 #endif
436 default:
437 WARN_ON_ONCE(1);
438 }
439 clear_bit(index, layout->overflow_bitmap);
440 clear_bit(index, layout->underflow_bitmap);
441 return 0;
442 }
443
444 int lttng_counter_clear(const struct lib_counter_config *config,
445 struct lib_counter *counter,
446 const size_t *dimension_indexes)
447 {
448 int cpu, ret;
449
450 switch (config->alloc) {
451 case COUNTER_ALLOC_GLOBAL: /* Fallthrough */
452 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
453 /* Clear global counter. */
454 ret = lttng_counter_clear_cpu(config, counter, dimension_indexes, -1);
455 if (ret < 0)
456 return ret;
457 break;
458 case COUNTER_ALLOC_PER_CPU:
459 break;
460 }
461
462 switch (config->alloc) {
463 case COUNTER_ALLOC_GLOBAL:
464 break;
465 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL: /* Fallthrough */
466 case COUNTER_ALLOC_PER_CPU:
467 //TODO: integrate with CPU hotplug and online cpus
468 for (cpu = 0; cpu < num_possible_cpus(); cpu++) {
469 ret = lttng_counter_clear_cpu(config, counter, dimension_indexes, cpu);
470 if (ret < 0)
471 return ret;
472 }
473 break;
474 default:
475 return -EINVAL;
476 }
477 return 0;
478 }
479 EXPORT_SYMBOL_GPL(lttng_counter_clear);
480
481 int lttng_counter_get_nr_dimensions(const struct lib_counter_config *config,
482 struct lib_counter *counter,
483 size_t *nr_dimensions)
484 {
485 *nr_dimensions = counter->nr_dimensions;
486 return 0;
487 }
488 EXPORT_SYMBOL_GPL(lttng_counter_get_nr_dimensions);
489
490 int lttng_counter_get_max_nr_elem(const struct lib_counter_config *config,
491 struct lib_counter *counter,
492 size_t *max_nr_elem) /* array of size nr_dimensions */
493 {
494 size_t dimension;
495
496 for (dimension = 0; dimension < counter->nr_dimensions; dimension++)
497 max_nr_elem[dimension] = lttng_counter_get_dimension_nr_elements(&counter->dimensions[dimension]);
498 return 0;
499 }
500 EXPORT_SYMBOL_GPL(lttng_counter_get_max_nr_elem);
501
502 MODULE_LICENSE("GPL and additional rights");
503 MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
504 MODULE_DESCRIPTION("LTTng counter library");
505 MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
506 __stringify(LTTNG_MODULES_MINOR_VERSION) "."
507 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
508 LTTNG_MODULES_EXTRAVERSION);
This page took 0.039462 seconds and 4 git commands to generate.