bd5c459f7b26f315e4f00eaae1f34c4a4757b3eb
[lttng-modules.git] / src / lib / counter / counter.c
1 /* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
2 *
3 * counter.c
4 *
5 * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 */
7
8 #include <linux/module.h>
9 #include <linux/slab.h>
10 #include <lttng/tracer.h>
11 #include <linux/cpumask.h>
12 #include <counter/counter.h>
13 #include <counter/counter-internal.h>
14 #include <wrapper/vmalloc.h>
15
16 static size_t lttng_counter_get_dimension_nr_elements(struct lib_counter_dimension *dimension)
17 {
18 return dimension->max_nr_elem;
19 }
20
21 static int lttng_counter_init_stride(const struct lib_counter_config *config,
22 struct lib_counter *counter)
23 {
24 size_t nr_dimensions = counter->nr_dimensions;
25 size_t stride = 1;
26 ssize_t i;
27
28 for (i = nr_dimensions - 1; i >= 0; i--) {
29 struct lib_counter_dimension *dimension = &counter->dimensions[i];
30 size_t nr_elem;
31
32 nr_elem = lttng_counter_get_dimension_nr_elements(dimension);
33 dimension->stride = stride;
34 /* nr_elem should be minimum 1 for each dimension. */
35 if (!nr_elem)
36 return -EINVAL;
37 stride *= nr_elem;
38 if (stride > SIZE_MAX / nr_elem)
39 return -EINVAL;
40 }
41 return 0;
42 }
43
44 static int lttng_counter_layout_init(struct lib_counter *counter, int cpu)
45 {
46 struct lib_counter_layout *layout;
47 size_t counter_size;
48 size_t nr_elem = counter->allocated_elem;
49
50 if (cpu == -1)
51 layout = &counter->global_counters;
52 else
53 layout = per_cpu_ptr(counter->percpu_counters, cpu);
54 switch (counter->config.counter_size) {
55 case COUNTER_SIZE_8_BIT:
56 case COUNTER_SIZE_16_BIT:
57 case COUNTER_SIZE_32_BIT:
58 case COUNTER_SIZE_64_BIT:
59 counter_size = (size_t) counter->config.counter_size;
60 break;
61 default:
62 return -EINVAL;
63 }
64 layout->counters = lttng_kvzalloc_node(ALIGN(counter_size * nr_elem,
65 1 << INTERNODE_CACHE_SHIFT),
66 GFP_KERNEL | __GFP_NOWARN,
67 cpu_to_node(max(cpu, 0)));
68 if (!layout->counters)
69 return -ENOMEM;
70 layout->overflow_bitmap = lttng_kvzalloc_node(ALIGN(ALIGN(nr_elem, 8) / 8,
71 1 << INTERNODE_CACHE_SHIFT),
72 GFP_KERNEL | __GFP_NOWARN,
73 cpu_to_node(max(cpu, 0)));
74 if (!layout->overflow_bitmap)
75 return -ENOMEM;
76 layout->underflow_bitmap = lttng_kvzalloc_node(ALIGN(ALIGN(nr_elem, 8) / 8,
77 1 << INTERNODE_CACHE_SHIFT),
78 GFP_KERNEL | __GFP_NOWARN,
79 cpu_to_node(max(cpu, 0)));
80 if (!layout->underflow_bitmap)
81 return -ENOMEM;
82 return 0;
83 }
84
85 static void lttng_counter_layout_fini(struct lib_counter *counter, int cpu)
86 {
87 struct lib_counter_layout *layout;
88
89 if (cpu == -1)
90 layout = &counter->global_counters;
91 else
92 layout = per_cpu_ptr(counter->percpu_counters, cpu);
93
94 lttng_kvfree(layout->counters);
95 lttng_kvfree(layout->overflow_bitmap);
96 lttng_kvfree(layout->underflow_bitmap);
97 }
98
99 static
100 int lttng_counter_set_global_sum_step(struct lib_counter *counter,
101 int64_t global_sum_step)
102 {
103 if (global_sum_step < 0)
104 return -EINVAL;
105
106 switch (counter->config.counter_size) {
107 case COUNTER_SIZE_8_BIT:
108 if (global_sum_step > S8_MAX)
109 return -EINVAL;
110 counter->global_sum_step.s8 = (int8_t) global_sum_step;
111 break;
112 case COUNTER_SIZE_16_BIT:
113 if (global_sum_step > S16_MAX)
114 return -EINVAL;
115 counter->global_sum_step.s16 = (int16_t) global_sum_step;
116 break;
117 case COUNTER_SIZE_32_BIT:
118 if (global_sum_step > S32_MAX)
119 return -EINVAL;
120 counter->global_sum_step.s32 = (int32_t) global_sum_step;
121 break;
122 case COUNTER_SIZE_64_BIT:
123 counter->global_sum_step.s64 = global_sum_step;
124 break;
125 default:
126 return -EINVAL;
127 }
128
129 return 0;
130 }
131
132 static
133 int validate_args(const struct lib_counter_config *config,
134 size_t nr_dimensions,
135 const size_t *max_nr_elem,
136 int64_t global_sum_step)
137 {
138 if (BITS_PER_LONG != 64 && config->counter_size == COUNTER_SIZE_64_BIT) {
139 WARN_ON_ONCE(1);
140 return -1;
141 }
142 if (!max_nr_elem)
143 return -1;
144 /*
145 * global sum step is only useful with allocating both per-cpu
146 * and global counters.
147 */
148 if (global_sum_step && (!(config->alloc & COUNTER_ALLOC_GLOBAL) ||
149 !(config->alloc & COUNTER_ALLOC_PER_CPU)))
150 return -1;
151 return 0;
152 }
153
154 struct lib_counter *lttng_counter_create(const struct lib_counter_config *config,
155 size_t nr_dimensions,
156 const size_t *max_nr_elem,
157 int64_t global_sum_step)
158 {
159 struct lib_counter *counter;
160 size_t dimension, nr_elem = 1;
161 int cpu, ret;
162
163 if (validate_args(config, nr_dimensions, max_nr_elem, global_sum_step))
164 return NULL;
165 counter = kzalloc(sizeof(struct lib_counter), GFP_KERNEL);
166 if (!counter)
167 return NULL;
168 counter->config = *config;
169 if (lttng_counter_set_global_sum_step(counter, global_sum_step))
170 goto error_sum_step;
171 counter->nr_dimensions = nr_dimensions;
172 counter->dimensions = kzalloc(nr_dimensions * sizeof(*counter->dimensions), GFP_KERNEL);
173 if (!counter->dimensions)
174 goto error_dimensions;
175 for (dimension = 0; dimension < nr_dimensions; dimension++)
176 counter->dimensions[dimension].max_nr_elem = max_nr_elem[dimension];
177 if (config->alloc & COUNTER_ALLOC_PER_CPU) {
178 counter->percpu_counters = alloc_percpu(struct lib_counter_layout);
179 if (!counter->percpu_counters)
180 goto error_alloc_percpu;
181 }
182
183 if (lttng_counter_init_stride(config, counter))
184 goto error_init_stride;
185 //TODO saturation values.
186 for (dimension = 0; dimension < counter->nr_dimensions; dimension++)
187 nr_elem *= lttng_counter_get_dimension_nr_elements(&counter->dimensions[dimension]);
188 counter->allocated_elem = nr_elem;
189 if (config->alloc & COUNTER_ALLOC_GLOBAL) {
190 ret = lttng_counter_layout_init(counter, -1); /* global */
191 if (ret)
192 goto layout_init_error;
193 }
194 if (config->alloc & COUNTER_ALLOC_PER_CPU) {
195 //TODO: integrate with CPU hotplug and online cpus
196 for (cpu = 0; cpu < num_possible_cpus(); cpu++) {
197 ret = lttng_counter_layout_init(counter, cpu);
198 if (ret)
199 goto layout_init_error;
200 }
201 }
202 return counter;
203
204 layout_init_error:
205 if (config->alloc & COUNTER_ALLOC_PER_CPU) {
206 for (cpu = 0; cpu < num_possible_cpus(); cpu++)
207 lttng_counter_layout_fini(counter, cpu);
208 }
209 if (config->alloc & COUNTER_ALLOC_GLOBAL)
210 lttng_counter_layout_fini(counter, -1);
211 error_init_stride:
212 free_percpu(counter->percpu_counters);
213 error_alloc_percpu:
214 kfree(counter->dimensions);
215 error_dimensions:
216 error_sum_step:
217 kfree(counter);
218 return NULL;
219 }
220 EXPORT_SYMBOL_GPL(lttng_counter_create);
221
222 void lttng_counter_destroy(struct lib_counter *counter)
223 {
224 struct lib_counter_config *config = &counter->config;
225 int cpu;
226
227 if (config->alloc & COUNTER_ALLOC_PER_CPU) {
228 for (cpu = 0; cpu < num_possible_cpus(); cpu++)
229 lttng_counter_layout_fini(counter, cpu);
230 free_percpu(counter->percpu_counters);
231 }
232 if (config->alloc & COUNTER_ALLOC_GLOBAL)
233 lttng_counter_layout_fini(counter, -1);
234 kfree(counter->dimensions);
235 kfree(counter);
236 }
237 EXPORT_SYMBOL_GPL(lttng_counter_destroy);
238
239 int lttng_counter_read(const struct lib_counter_config *config,
240 struct lib_counter *counter,
241 const size_t *dimension_indexes,
242 int cpu, int64_t *value, bool *overflow,
243 bool *underflow)
244 {
245 struct lib_counter_layout *layout;
246 size_t index;
247
248 if (unlikely(lttng_counter_validate_indexes(config, counter, dimension_indexes)))
249 return -EOVERFLOW;
250 index = lttng_counter_get_index(config, counter, dimension_indexes);
251
252 switch (config->alloc) {
253 case COUNTER_ALLOC_PER_CPU:
254 if (cpu < 0 || cpu >= num_possible_cpus())
255 return -EINVAL;
256 layout = per_cpu_ptr(counter->percpu_counters, cpu);
257 break;
258 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
259 if (cpu >= 0) {
260 if (cpu >= num_possible_cpus())
261 return -EINVAL;
262 layout = per_cpu_ptr(counter->percpu_counters, cpu);
263 } else {
264 layout = &counter->global_counters;
265 }
266 break;
267 case COUNTER_ALLOC_GLOBAL:
268 if (cpu >= 0)
269 return -EINVAL;
270 layout = &counter->global_counters;
271 break;
272 default:
273 return -EINVAL;
274 }
275
276 switch (config->counter_size) {
277 case COUNTER_SIZE_8_BIT:
278 {
279 int8_t *int_p = (int8_t *) layout->counters + index;
280 *value = (int64_t) READ_ONCE(*int_p);
281 break;
282 }
283 case COUNTER_SIZE_16_BIT:
284 {
285 int16_t *int_p = (int16_t *) layout->counters + index;
286 *value = (int64_t) READ_ONCE(*int_p);
287 break;
288 }
289 case COUNTER_SIZE_32_BIT:
290 {
291 int32_t *int_p = (int32_t *) layout->counters + index;
292 *value = (int64_t) READ_ONCE(*int_p);
293 break;
294 }
295 #if BITS_PER_LONG == 64
296 case COUNTER_SIZE_64_BIT:
297 {
298 int64_t *int_p = (int64_t *) layout->counters + index;
299 *value = READ_ONCE(*int_p);
300 break;
301 }
302 #endif
303 default:
304 WARN_ON_ONCE(1);
305 }
306 *overflow = test_bit(index, layout->overflow_bitmap);
307 *underflow = test_bit(index, layout->underflow_bitmap);
308 return 0;
309 }
310 EXPORT_SYMBOL_GPL(lttng_counter_read);
311
312 int lttng_counter_aggregate(const struct lib_counter_config *config,
313 struct lib_counter *counter,
314 const size_t *dimension_indexes,
315 int64_t *value, bool *overflow,
316 bool *underflow)
317 {
318 int cpu, ret;
319 int64_t v, sum = 0;
320 bool of, uf;
321
322 *overflow = false;
323 *underflow = false;
324
325 switch (config->alloc) {
326 case COUNTER_ALLOC_GLOBAL: /* Fallthrough */
327 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
328 /* Read global counter. */
329 ret = lttng_counter_read(config, counter, dimension_indexes,
330 -1, &v, &of, &uf);
331 if (ret < 0)
332 return ret;
333 sum += v;
334 *overflow |= of;
335 *underflow |= uf;
336 break;
337 case COUNTER_ALLOC_PER_CPU:
338 break;
339 }
340
341 switch (config->alloc) {
342 case COUNTER_ALLOC_GLOBAL:
343 break;
344 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL: /* Fallthrough */
345 case COUNTER_ALLOC_PER_CPU:
346 //TODO: integrate with CPU hotplug and online cpus
347 for (cpu = 0; cpu < num_possible_cpus(); cpu++) {
348 int64_t old = sum;
349
350 ret = lttng_counter_read(config, counter, dimension_indexes,
351 cpu, &v, &of, &uf);
352 if (ret < 0)
353 return ret;
354 *overflow |= of;
355 *underflow |= uf;
356 /* Overflow is defined on unsigned types. */
357 sum = (int64_t) ((uint64_t) old + (uint64_t) v);
358 if (v > 0 && sum < old)
359 *overflow = true;
360 else if (v < 0 && sum > old)
361 *underflow = true;
362 }
363 break;
364 default:
365 return -EINVAL;
366 }
367 *value = sum;
368 return 0;
369 }
370 EXPORT_SYMBOL_GPL(lttng_counter_aggregate);
371
372 static
373 int lttng_counter_clear_cpu(const struct lib_counter_config *config,
374 struct lib_counter *counter,
375 const size_t *dimension_indexes,
376 int cpu)
377 {
378 struct lib_counter_layout *layout;
379 size_t index;
380
381 if (unlikely(lttng_counter_validate_indexes(config, counter, dimension_indexes)))
382 return -EOVERFLOW;
383 index = lttng_counter_get_index(config, counter, dimension_indexes);
384
385 switch (config->alloc) {
386 case COUNTER_ALLOC_PER_CPU:
387 if (cpu < 0 || cpu >= num_possible_cpus())
388 return -EINVAL;
389 layout = per_cpu_ptr(counter->percpu_counters, cpu);
390 break;
391 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
392 if (cpu >= 0) {
393 if (cpu >= num_possible_cpus())
394 return -EINVAL;
395 layout = per_cpu_ptr(counter->percpu_counters, cpu);
396 } else {
397 layout = &counter->global_counters;
398 }
399 break;
400 case COUNTER_ALLOC_GLOBAL:
401 if (cpu >= 0)
402 return -EINVAL;
403 layout = &counter->global_counters;
404 break;
405 default:
406 return -EINVAL;
407 }
408 switch (config->counter_size) {
409 case COUNTER_SIZE_8_BIT:
410 {
411 int8_t *int_p = (int8_t *) layout->counters + index;
412 WRITE_ONCE(*int_p, 0);
413 break;
414 }
415 case COUNTER_SIZE_16_BIT:
416 {
417 int16_t *int_p = (int16_t *) layout->counters + index;
418 WRITE_ONCE(*int_p, 0);
419 break;
420 }
421 case COUNTER_SIZE_32_BIT:
422 {
423 int32_t *int_p = (int32_t *) layout->counters + index;
424 WRITE_ONCE(*int_p, 0);
425 break;
426 }
427 #if BITS_PER_LONG == 64
428 case COUNTER_SIZE_64_BIT:
429 {
430 int64_t *int_p = (int64_t *) layout->counters + index;
431 WRITE_ONCE(*int_p, 0);
432 break;
433 }
434 #endif
435 default:
436 WARN_ON_ONCE(1);
437 }
438 clear_bit(index, layout->overflow_bitmap);
439 clear_bit(index, layout->underflow_bitmap);
440 return 0;
441 }
442
443 int lttng_counter_clear(const struct lib_counter_config *config,
444 struct lib_counter *counter,
445 const size_t *dimension_indexes)
446 {
447 int cpu, ret;
448
449 switch (config->alloc) {
450 case COUNTER_ALLOC_GLOBAL: /* Fallthrough */
451 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
452 /* Clear global counter. */
453 ret = lttng_counter_clear_cpu(config, counter, dimension_indexes, -1);
454 if (ret < 0)
455 return ret;
456 break;
457 case COUNTER_ALLOC_PER_CPU:
458 break;
459 }
460
461 switch (config->alloc) {
462 case COUNTER_ALLOC_GLOBAL:
463 break;
464 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL: /* Fallthrough */
465 case COUNTER_ALLOC_PER_CPU:
466 //TODO: integrate with CPU hotplug and online cpus
467 for (cpu = 0; cpu < num_possible_cpus(); cpu++) {
468 ret = lttng_counter_clear_cpu(config, counter, dimension_indexes, cpu);
469 if (ret < 0)
470 return ret;
471 }
472 break;
473 default:
474 return -EINVAL;
475 }
476 return 0;
477 }
478 EXPORT_SYMBOL_GPL(lttng_counter_clear);
479
480 int lttng_counter_get_nr_dimensions(const struct lib_counter_config *config,
481 struct lib_counter *counter,
482 size_t *nr_dimensions)
483 {
484 *nr_dimensions = counter->nr_dimensions;
485 return 0;
486 }
487 EXPORT_SYMBOL_GPL(lttng_counter_get_nr_dimensions);
488
489 int lttng_counter_get_max_nr_elem(const struct lib_counter_config *config,
490 struct lib_counter *counter,
491 size_t *max_nr_elem) /* array of size nr_dimensions */
492 {
493 size_t dimension;
494
495 for (dimension = 0; dimension < counter->nr_dimensions; dimension++)
496 max_nr_elem[dimension] = lttng_counter_get_dimension_nr_elements(&counter->dimensions[dimension]);
497 return 0;
498 }
499 EXPORT_SYMBOL_GPL(lttng_counter_get_max_nr_elem);
500
501 MODULE_LICENSE("GPL and additional rights");
502 MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
503 MODULE_DESCRIPTION("LTTng counter library");
504 MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
505 __stringify(LTTNG_MODULES_MINOR_VERSION) "."
506 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
507 LTTNG_MODULES_EXTRAVERSION);
This page took 0.038446 seconds and 3 git commands to generate.