fix: Revert "Makefile: Enable -Wimplicit-fallthrough for Clang" (v5.15)
[lttng-modules.git] / include / counter / counter-api.h
1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
2 *
3 * counter/counter-api.h
4 *
5 * LTTng Counters API, requiring counter/config.h
6 *
7 * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 */
9
10 #ifndef _LTTNG_COUNTER_API_H
11 #define _LTTNG_COUNTER_API_H
12
13 #include <linux/types.h>
14 #include <linux/percpu.h>
15 #include <linux/bitops.h>
16 #include <counter/counter.h>
17 #include <counter/counter-internal.h>
18 #include <wrapper/compiler_attributes.h>
19 #include <wrapper/limits.h>
20
21 /*
22 * Using unsigned arithmetic because overflow is defined.
23 */
24 static __always_inline int __lttng_counter_add(const struct lib_counter_config *config,
25 enum lib_counter_config_alloc alloc,
26 enum lib_counter_config_sync sync,
27 struct lib_counter *counter,
28 const size_t *dimension_indexes, int64_t v,
29 int64_t *remainder)
30 {
31 size_t index;
32 bool overflow = false, underflow = false;
33 struct lib_counter_layout *layout;
34 int64_t move_sum = 0;
35
36 if (unlikely(lttng_counter_validate_indexes(config, counter, dimension_indexes)))
37 return -EOVERFLOW;
38 index = lttng_counter_get_index(config, counter, dimension_indexes);
39
40 switch (alloc) {
41 case COUNTER_ALLOC_PER_CPU:
42 layout = per_cpu_ptr(counter->percpu_counters, smp_processor_id());
43 break;
44 case COUNTER_ALLOC_GLOBAL:
45 layout = &counter->global_counters;
46 break;
47 default:
48 return -EINVAL;
49 }
50
51 switch (config->counter_size) {
52 case COUNTER_SIZE_8_BIT:
53 {
54 int8_t *int_p = (int8_t *) layout->counters + index;
55 int8_t old, n, res;
56 int8_t global_sum_step = counter->global_sum_step.s8;
57
58 res = *int_p;
59 switch (sync) {
60 case COUNTER_SYNC_PER_CPU:
61 {
62 do {
63 move_sum = 0;
64 old = res;
65 n = (int8_t) ((uint8_t) old + (uint8_t) v);
66 if (unlikely(n > (int8_t) global_sum_step))
67 move_sum = (int8_t) global_sum_step / 2;
68 else if (unlikely(n < -(int8_t) global_sum_step))
69 move_sum = -((int8_t) global_sum_step / 2);
70 n -= move_sum;
71 res = cmpxchg_local(int_p, old, n);
72 } while (old != res);
73 break;
74 }
75 case COUNTER_SYNC_GLOBAL:
76 {
77 do {
78 old = res;
79 n = (int8_t) ((uint8_t) old + (uint8_t) v);
80 res = cmpxchg(int_p, old, n);
81 } while (old != res);
82 break;
83 }
84 default:
85 return -EINVAL;
86 }
87 if (v > 0 && (v >= U8_MAX || n < old))
88 overflow = true;
89 else if (v < 0 && (v <= -(s64) U8_MAX || n > old))
90 underflow = true;
91 break;
92 }
93 case COUNTER_SIZE_16_BIT:
94 {
95 int16_t *int_p = (int16_t *) layout->counters + index;
96 int16_t old, n, res;
97 int16_t global_sum_step = counter->global_sum_step.s16;
98
99 res = *int_p;
100 switch (sync) {
101 case COUNTER_SYNC_PER_CPU:
102 {
103 do {
104 move_sum = 0;
105 old = res;
106 n = (int16_t) ((uint16_t) old + (uint16_t) v);
107 if (unlikely(n > (int16_t) global_sum_step))
108 move_sum = (int16_t) global_sum_step / 2;
109 else if (unlikely(n < -(int16_t) global_sum_step))
110 move_sum = -((int16_t) global_sum_step / 2);
111 n -= move_sum;
112 res = cmpxchg_local(int_p, old, n);
113 } while (old != res);
114 break;
115 }
116 case COUNTER_SYNC_GLOBAL:
117 {
118 do {
119 old = res;
120 n = (int16_t) ((uint16_t) old + (uint16_t) v);
121 res = cmpxchg(int_p, old, n);
122 } while (old != res);
123 break;
124 }
125 default:
126 return -EINVAL;
127 }
128 if (v > 0 && (v >= U16_MAX || n < old))
129 overflow = true;
130 else if (v < 0 && (v <= -(s64) U16_MAX || n > old))
131 underflow = true;
132 break;
133 }
134 case COUNTER_SIZE_32_BIT:
135 {
136 int32_t *int_p = (int32_t *) layout->counters + index;
137 int32_t old, n, res;
138 int32_t global_sum_step = counter->global_sum_step.s32;
139
140 res = *int_p;
141 switch (sync) {
142 case COUNTER_SYNC_PER_CPU:
143 {
144 do {
145 move_sum = 0;
146 old = res;
147 n = (int32_t) ((uint32_t) old + (uint32_t) v);
148 if (unlikely(n > (int32_t) global_sum_step))
149 move_sum = (int32_t) global_sum_step / 2;
150 else if (unlikely(n < -(int32_t) global_sum_step))
151 move_sum = -((int32_t) global_sum_step / 2);
152 n -= move_sum;
153 res = cmpxchg_local(int_p, old, n);
154 } while (old != res);
155 break;
156 }
157 case COUNTER_SYNC_GLOBAL:
158 {
159 do {
160 old = res;
161 n = (int32_t) ((uint32_t) old + (uint32_t) v);
162 res = cmpxchg(int_p, old, n);
163 } while (old != res);
164 break;
165 }
166 default:
167 return -EINVAL;
168 }
169 if (v > 0 && (v >= U32_MAX || n < old))
170 overflow = true;
171 else if (v < 0 && (v <= -(s64) U32_MAX || n > old))
172 underflow = true;
173 break;
174 }
175 #if BITS_PER_LONG == 64
176 case COUNTER_SIZE_64_BIT:
177 {
178 int64_t *int_p = (int64_t *) layout->counters + index;
179 int64_t old, n, res;
180 int64_t global_sum_step = counter->global_sum_step.s64;
181
182 res = *int_p;
183 switch (sync) {
184 case COUNTER_SYNC_PER_CPU:
185 {
186 do {
187 move_sum = 0;
188 old = res;
189 n = (int64_t) ((uint64_t) old + (uint64_t) v);
190 if (unlikely(n > (int64_t) global_sum_step))
191 move_sum = (int64_t) global_sum_step / 2;
192 else if (unlikely(n < -(int64_t) global_sum_step))
193 move_sum = -((int64_t) global_sum_step / 2);
194 n -= move_sum;
195 res = cmpxchg_local(int_p, old, n);
196 } while (old != res);
197 break;
198 }
199 case COUNTER_SYNC_GLOBAL:
200 {
201 do {
202 old = res;
203 n = (int64_t) ((uint64_t) old + (uint64_t) v);
204 res = cmpxchg(int_p, old, n);
205 } while (old != res);
206 break;
207 }
208 default:
209 return -EINVAL;
210 }
211 if (v > 0 && n < old)
212 overflow = true;
213 else if (v < 0 && n > old)
214 underflow = true;
215 break;
216 }
217 #endif
218 default:
219 return -EINVAL;
220 }
221 if (unlikely(overflow && !test_bit(index, layout->overflow_bitmap)))
222 set_bit(index, layout->overflow_bitmap);
223 else if (unlikely(underflow && !test_bit(index, layout->underflow_bitmap)))
224 set_bit(index, layout->underflow_bitmap);
225 if (remainder)
226 *remainder = move_sum;
227 return 0;
228 }
229
230 static __always_inline int __lttng_counter_add_percpu(const struct lib_counter_config *config,
231 struct lib_counter *counter,
232 const size_t *dimension_indexes, int64_t v)
233 {
234 int64_t move_sum;
235 int ret;
236
237 ret = __lttng_counter_add(config, COUNTER_ALLOC_PER_CPU, config->sync,
238 counter, dimension_indexes, v, &move_sum);
239 if (unlikely(ret))
240 return ret;
241 if (unlikely(move_sum))
242 return __lttng_counter_add(config, COUNTER_ALLOC_GLOBAL, COUNTER_SYNC_GLOBAL,
243 counter, dimension_indexes, move_sum, NULL);
244 return 0;
245 }
246
247 static __always_inline int __lttng_counter_add_global(const struct lib_counter_config *config,
248 struct lib_counter *counter,
249 const size_t *dimension_indexes, int64_t v)
250 {
251 return __lttng_counter_add(config, COUNTER_ALLOC_GLOBAL, config->sync, counter,
252 dimension_indexes, v, NULL);
253 }
254
255 static __always_inline int lttng_counter_add(const struct lib_counter_config *config,
256 struct lib_counter *counter,
257 const size_t *dimension_indexes, int64_t v)
258 {
259 switch (config->alloc) {
260 case COUNTER_ALLOC_PER_CPU:
261 lttng_fallthrough;
262 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
263 return __lttng_counter_add_percpu(config, counter, dimension_indexes, v);
264 case COUNTER_ALLOC_GLOBAL:
265 return __lttng_counter_add_global(config, counter, dimension_indexes, v);
266 default:
267 return -EINVAL;
268 }
269 }
270
271 static __always_inline int lttng_counter_inc(const struct lib_counter_config *config,
272 struct lib_counter *counter,
273 const size_t *dimension_indexes)
274 {
275 return lttng_counter_add(config, counter, dimension_indexes, 1);
276 }
277
278 static __always_inline int lttng_counter_dec(const struct lib_counter_config *config,
279 struct lib_counter *counter,
280 const size_t *dimension_indexes)
281 {
282 return lttng_counter_add(config, counter, dimension_indexes, -1);
283 }
284
285 #endif /* _LTTNG_COUNTER_API_H */
This page took 0.034582 seconds and 4 git commands to generate.