Introduce limits wrapper
[lttng-modules.git] / include / counter / counter-api.h
CommitLineData
a101fa10
MD
1/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
2 *
3 * counter/counter-api.h
4 *
5 * LTTng Counters API, requiring counter/config.h
6 *
7 * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 */
9
10#ifndef _LTTNG_COUNTER_API_H
11#define _LTTNG_COUNTER_API_H
12
13#include <linux/types.h>
14#include <linux/percpu.h>
15#include <linux/bitops.h>
16#include <counter/counter.h>
17#include <counter/counter-internal.h>
236233f7 18#include <wrapper/limits.h>
a101fa10
MD
19
20/*
21 * Using unsigned arithmetic because overflow is defined.
22 */
23static inline int __lttng_counter_add(const struct lib_counter_config *config,
24 enum lib_counter_config_alloc alloc,
25 enum lib_counter_config_sync sync,
26 struct lib_counter *counter,
27 const size_t *dimension_indexes, int64_t v,
28 int64_t *remainder)
29{
30 size_t index;
31 bool overflow = false, underflow = false;
32 struct lib_counter_layout *layout;
33 int64_t move_sum = 0;
34
35 if (unlikely(lttng_counter_validate_indexes(config, counter, dimension_indexes)))
36 return -EOVERFLOW;
37 index = lttng_counter_get_index(config, counter, dimension_indexes);
38
39 switch (alloc) {
40 case COUNTER_ALLOC_PER_CPU:
41 layout = per_cpu_ptr(counter->percpu_counters, smp_processor_id());
42 break;
43 case COUNTER_ALLOC_GLOBAL:
44 layout = &counter->global_counters;
45 break;
46 default:
47 return -EINVAL;
48 }
49
50 switch (config->counter_size) {
51 case COUNTER_SIZE_8_BIT:
52 {
53 int8_t *int_p = (int8_t *) layout->counters + index;
54 int8_t old, n, res;
55 int8_t global_sum_step = counter->global_sum_step.s8;
56
57 res = *int_p;
58 switch (sync) {
59 case COUNTER_SYNC_PER_CPU:
60 {
61 do {
62 move_sum = 0;
63 old = res;
64 n = (int8_t) ((uint8_t) old + (uint8_t) v);
65 if (unlikely(n > (int8_t) global_sum_step))
66 move_sum = (int8_t) global_sum_step / 2;
67 else if (unlikely(n < -(int8_t) global_sum_step))
68 move_sum = -((int8_t) global_sum_step / 2);
69 n -= move_sum;
70 res = cmpxchg_local(int_p, old, n);
71 } while (old != res);
72 break;
73 }
74 case COUNTER_SYNC_GLOBAL:
75 {
76 do {
77 old = res;
78 n = (int8_t) ((uint8_t) old + (uint8_t) v);
79 res = cmpxchg(int_p, old, n);
80 } while (old != res);
81 break;
82 }
83 }
84 if (v > 0 && (v >= U8_MAX || n < old))
85 overflow = true;
86 else if (v < 0 && (v <= -U8_MAX || n > old))
87 underflow = true;
88 break;
89 }
90 case COUNTER_SIZE_16_BIT:
91 {
92 int16_t *int_p = (int16_t *) layout->counters + index;
93 int16_t old, n, res;
94 int16_t global_sum_step = counter->global_sum_step.s16;
95
96 res = *int_p;
97 switch (sync) {
98 case COUNTER_SYNC_PER_CPU:
99 {
100 do {
101 move_sum = 0;
102 old = res;
103 n = (int16_t) ((uint16_t) old + (uint16_t) v);
104 if (unlikely(n > (int16_t) global_sum_step))
105 move_sum = (int16_t) global_sum_step / 2;
106 else if (unlikely(n < -(int16_t) global_sum_step))
107 move_sum = -((int16_t) global_sum_step / 2);
108 n -= move_sum;
109 res = cmpxchg_local(int_p, old, n);
110 } while (old != res);
111 break;
112 }
113 case COUNTER_SYNC_GLOBAL:
114 {
115 do {
116 old = res;
117 n = (int16_t) ((uint16_t) old + (uint16_t) v);
118 res = cmpxchg(int_p, old, n);
119 } while (old != res);
120 break;
121 }
122 }
123 if (v > 0 && (v >= U16_MAX || n < old))
124 overflow = true;
125 else if (v < 0 && (v <= -U16_MAX || n > old))
126 underflow = true;
127 break;
128 }
129 case COUNTER_SIZE_32_BIT:
130 {
131 int32_t *int_p = (int32_t *) layout->counters + index;
132 int32_t old, n, res;
133 int32_t global_sum_step = counter->global_sum_step.s32;
134
135 res = *int_p;
136 switch (sync) {
137 case COUNTER_SYNC_PER_CPU:
138 {
139 do {
140 move_sum = 0;
141 old = res;
142 n = (int32_t) ((uint32_t) old + (uint32_t) v);
143 if (unlikely(n > (int32_t) global_sum_step))
144 move_sum = (int32_t) global_sum_step / 2;
145 else if (unlikely(n < -(int32_t) global_sum_step))
146 move_sum = -((int32_t) global_sum_step / 2);
147 n -= move_sum;
148 res = cmpxchg_local(int_p, old, n);
149 } while (old != res);
150 break;
151 }
152 case COUNTER_SYNC_GLOBAL:
153 {
154 do {
155 old = res;
156 n = (int32_t) ((uint32_t) old + (uint32_t) v);
157 res = cmpxchg(int_p, old, n);
158 } while (old != res);
159 break;
160 }
161 }
162 if (v > 0 && (v >= U32_MAX || n < old))
163 overflow = true;
164 else if (v < 0 && (v <= -U32_MAX || n > old))
165 underflow = true;
166 break;
167 }
168#if BITS_PER_LONG == 64
169 case COUNTER_SIZE_64_BIT:
170 {
171 int64_t *int_p = (int64_t *) layout->counters + index;
172 int64_t old, n, res;
173 int64_t global_sum_step = counter->global_sum_step.s64;
174
175 res = *int_p;
176 switch (sync) {
177 case COUNTER_SYNC_PER_CPU:
178 {
179 do {
180 move_sum = 0;
181 old = res;
182 n = (int64_t) ((uint64_t) old + (uint64_t) v);
183 if (unlikely(n > (int64_t) global_sum_step))
184 move_sum = (int64_t) global_sum_step / 2;
185 else if (unlikely(n < -(int64_t) global_sum_step))
186 move_sum = -((int64_t) global_sum_step / 2);
187 n -= move_sum;
188 res = cmpxchg_local(int_p, old, n);
189 } while (old != res);
190 break;
191 }
192 case COUNTER_SYNC_GLOBAL:
193 {
194 do {
195 old = res;
196 n = (int64_t) ((uint64_t) old + (uint64_t) v);
197 res = cmpxchg(int_p, old, n);
198 } while (old != res);
199 break;
200 }
201 }
202 if (v > 0 && n < old)
203 overflow = true;
204 else if (v < 0 && n > old)
205 underflow = true;
206 break;
207 }
208#endif
209 default:
210 return -EINVAL;
211 }
212 if (unlikely(overflow && !test_bit(index, layout->overflow_bitmap)))
213 set_bit(index, layout->overflow_bitmap);
214 else if (unlikely(underflow && !test_bit(index, layout->underflow_bitmap)))
215 set_bit(index, layout->underflow_bitmap);
216 if (remainder)
217 *remainder = move_sum;
218 return 0;
219}
220
221static inline int __lttng_counter_add_percpu(const struct lib_counter_config *config,
222 struct lib_counter *counter,
223 const size_t *dimension_indexes, int64_t v)
224{
225 int64_t move_sum;
226 int ret;
227
228 ret = __lttng_counter_add(config, COUNTER_ALLOC_PER_CPU, config->sync,
229 counter, dimension_indexes, v, &move_sum);
230 if (unlikely(ret))
231 return ret;
232 if (unlikely(move_sum))
233 return __lttng_counter_add(config, COUNTER_ALLOC_GLOBAL, COUNTER_SYNC_GLOBAL,
234 counter, dimension_indexes, move_sum, NULL);
235 return 0;
236}
237
238static inline int __lttng_counter_add_global(const struct lib_counter_config *config,
239 struct lib_counter *counter,
240 const size_t *dimension_indexes, int64_t v)
241{
242 return __lttng_counter_add(config, COUNTER_ALLOC_GLOBAL, config->sync, counter,
243 dimension_indexes, v, NULL);
244}
245
246static inline int lttng_counter_add(const struct lib_counter_config *config,
247 struct lib_counter *counter,
248 const size_t *dimension_indexes, int64_t v)
249{
250 switch (config->alloc) {
251 case COUNTER_ALLOC_PER_CPU: /* Fallthrough */
252 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
253 return __lttng_counter_add_percpu(config, counter, dimension_indexes, v);
254 case COUNTER_ALLOC_GLOBAL:
255 return __lttng_counter_add_global(config, counter, dimension_indexes, v);
256 default:
257 return -EINVAL;
258 }
259}
260
261static inline int lttng_counter_inc(const struct lib_counter_config *config,
262 struct lib_counter *counter,
263 const size_t *dimension_indexes)
264{
265 return lttng_counter_add(config, counter, dimension_indexes, 1);
266}
267
268static inline int lttng_counter_dec(const struct lib_counter_config *config,
269 struct lib_counter *counter,
270 const size_t *dimension_indexes)
271{
272 return lttng_counter_add(config, counter, dimension_indexes, -1);
273}
274
275#endif /* _LTTNG_COUNTER_API_H */
This page took 0.03213 seconds and 4 git commands to generate.