Fix: counter: cast UINT*_MAX to 64-bit signed type before negative
[lttng-ust.git] / libcounter / counter-api.h
1 /*
2 * counter/counter-api.h
3 *
4 * LTTng Counters API, requiring counter/config.h
5 *
6 * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * SOFTWARE.
25 */
26
27 #ifndef _LTTNG_COUNTER_API_H
28 #define _LTTNG_COUNTER_API_H
29
30 #include <stdint.h>
31 #include <limits.h>
32 #include "counter.h"
33 #include "counter-internal.h"
34 #include <urcu/compiler.h>
35 #include <urcu/uatomic.h>
36 #include <lttng/bitmap.h>
37 #include "../libringbuffer/getcpu.h"
38
39 /*
40 * Using unsigned arithmetic because overflow is defined.
41 */
42 static inline int __lttng_counter_add(const struct lib_counter_config *config,
43 enum lib_counter_config_alloc alloc,
44 enum lib_counter_config_sync sync,
45 struct lib_counter *counter,
46 const size_t *dimension_indexes, int64_t v,
47 int64_t *remainder)
48 {
49 size_t index;
50 bool overflow = false, underflow = false;
51 struct lib_counter_layout *layout;
52 int64_t move_sum = 0;
53
54 if (caa_unlikely(lttng_counter_validate_indexes(config, counter, dimension_indexes)))
55 return -EOVERFLOW;
56 index = lttng_counter_get_index(config, counter, dimension_indexes);
57
58 switch (alloc) {
59 case COUNTER_ALLOC_PER_CPU:
60 layout = &counter->percpu_counters[lttng_ust_get_cpu()];
61 break;
62 case COUNTER_ALLOC_GLOBAL:
63 layout = &counter->global_counters;
64 break;
65 default:
66 return -EINVAL;
67 }
68 if (caa_unlikely(!layout->counters))
69 return -ENODEV;
70
71 switch (config->counter_size) {
72 case COUNTER_SIZE_8_BIT:
73 {
74 int8_t *int_p = (int8_t *) layout->counters + index;
75 int8_t old, n, res;
76 int8_t global_sum_step = counter->global_sum_step.s8;
77
78 res = *int_p;
79 switch (sync) {
80 case COUNTER_SYNC_PER_CPU:
81 {
82 do {
83 move_sum = 0;
84 old = res;
85 n = (int8_t) ((uint8_t) old + (uint8_t) v);
86 if (caa_unlikely(n > (int8_t) global_sum_step))
87 move_sum = (int8_t) global_sum_step / 2;
88 else if (caa_unlikely(n < -(int8_t) global_sum_step))
89 move_sum = -((int8_t) global_sum_step / 2);
90 n -= move_sum;
91 res = uatomic_cmpxchg(int_p, old, n);
92 } while (old != res);
93 break;
94 }
95 case COUNTER_SYNC_GLOBAL:
96 {
97 do {
98 old = res;
99 n = (int8_t) ((uint8_t) old + (uint8_t) v);
100 res = uatomic_cmpxchg(int_p, old, n);
101 } while (old != res);
102 break;
103 }
104 }
105 if (v > 0 && (v >= UINT8_MAX || n < old))
106 overflow = true;
107 else if (v < 0 && (v <= -(int64_t) UINT8_MAX || n > old))
108 underflow = true;
109 break;
110 }
111 case COUNTER_SIZE_16_BIT:
112 {
113 int16_t *int_p = (int16_t *) layout->counters + index;
114 int16_t old, n, res;
115 int16_t global_sum_step = counter->global_sum_step.s16;
116
117 res = *int_p;
118 switch (sync) {
119 case COUNTER_SYNC_PER_CPU:
120 {
121 do {
122 move_sum = 0;
123 old = res;
124 n = (int16_t) ((uint16_t) old + (uint16_t) v);
125 if (caa_unlikely(n > (int16_t) global_sum_step))
126 move_sum = (int16_t) global_sum_step / 2;
127 else if (caa_unlikely(n < -(int16_t) global_sum_step))
128 move_sum = -((int16_t) global_sum_step / 2);
129 n -= move_sum;
130 res = uatomic_cmpxchg(int_p, old, n);
131 } while (old != res);
132 break;
133 }
134 case COUNTER_SYNC_GLOBAL:
135 {
136 do {
137 old = res;
138 n = (int16_t) ((uint16_t) old + (uint16_t) v);
139 res = uatomic_cmpxchg(int_p, old, n);
140 } while (old != res);
141 break;
142 }
143 }
144 if (v > 0 && (v >= UINT16_MAX || n < old))
145 overflow = true;
146 else if (v < 0 && (v <= -(int64_t) UINT16_MAX || n > old))
147 underflow = true;
148 break;
149 }
150 case COUNTER_SIZE_32_BIT:
151 {
152 int32_t *int_p = (int32_t *) layout->counters + index;
153 int32_t old, n, res;
154 int32_t global_sum_step = counter->global_sum_step.s32;
155
156 res = *int_p;
157 switch (sync) {
158 case COUNTER_SYNC_PER_CPU:
159 {
160 do {
161 move_sum = 0;
162 old = res;
163 n = (int32_t) ((uint32_t) old + (uint32_t) v);
164 if (caa_unlikely(n > (int32_t) global_sum_step))
165 move_sum = (int32_t) global_sum_step / 2;
166 else if (caa_unlikely(n < -(int32_t) global_sum_step))
167 move_sum = -((int32_t) global_sum_step / 2);
168 n -= move_sum;
169 res = uatomic_cmpxchg(int_p, old, n);
170 } while (old != res);
171 break;
172 }
173 case COUNTER_SYNC_GLOBAL:
174 {
175 do {
176 old = res;
177 n = (int32_t) ((uint32_t) old + (uint32_t) v);
178 res = uatomic_cmpxchg(int_p, old, n);
179 } while (old != res);
180 break;
181 }
182 }
183 if (v > 0 && (v >= UINT32_MAX || n < old))
184 overflow = true;
185 else if (v < 0 && (v <= -(int64_t) UINT32_MAX || n > old))
186 underflow = true;
187 break;
188 }
189 #if CAA_BITS_PER_LONG == 64
190 case COUNTER_SIZE_64_BIT:
191 {
192 int64_t *int_p = (int64_t *) layout->counters + index;
193 int64_t old, n, res;
194 int64_t global_sum_step = counter->global_sum_step.s64;
195
196 res = *int_p;
197 switch (sync) {
198 case COUNTER_SYNC_PER_CPU:
199 {
200 do {
201 move_sum = 0;
202 old = res;
203 n = (int64_t) ((uint64_t) old + (uint64_t) v);
204 if (caa_unlikely(n > (int64_t) global_sum_step))
205 move_sum = (int64_t) global_sum_step / 2;
206 else if (caa_unlikely(n < -(int64_t) global_sum_step))
207 move_sum = -((int64_t) global_sum_step / 2);
208 n -= move_sum;
209 res = uatomic_cmpxchg(int_p, old, n);
210 } while (old != res);
211 break;
212 }
213 case COUNTER_SYNC_GLOBAL:
214 {
215 do {
216 old = res;
217 n = (int64_t) ((uint64_t) old + (uint64_t) v);
218 res = uatomic_cmpxchg(int_p, old, n);
219 } while (old != res);
220 break;
221 }
222 }
223 if (v > 0 && n < old)
224 overflow = true;
225 else if (v < 0 && n > old)
226 underflow = true;
227 break;
228 }
229 #endif
230 default:
231 return -EINVAL;
232 }
233 if (caa_unlikely(overflow && !lttng_bitmap_test_bit(index, layout->overflow_bitmap)))
234 lttng_bitmap_set_bit(index, layout->overflow_bitmap);
235 else if (caa_unlikely(underflow && !lttng_bitmap_test_bit(index, layout->underflow_bitmap)))
236 lttng_bitmap_set_bit(index, layout->underflow_bitmap);
237 if (remainder)
238 *remainder = move_sum;
239 return 0;
240 }
241
242 static inline int __lttng_counter_add_percpu(const struct lib_counter_config *config,
243 struct lib_counter *counter,
244 const size_t *dimension_indexes, int64_t v)
245 {
246 int64_t move_sum;
247 int ret;
248
249 ret = __lttng_counter_add(config, COUNTER_ALLOC_PER_CPU, config->sync,
250 counter, dimension_indexes, v, &move_sum);
251 if (caa_unlikely(ret))
252 return ret;
253 if (caa_unlikely(move_sum))
254 return __lttng_counter_add(config, COUNTER_ALLOC_GLOBAL, COUNTER_SYNC_GLOBAL,
255 counter, dimension_indexes, move_sum, NULL);
256 return 0;
257 }
258
259 static inline int __lttng_counter_add_global(const struct lib_counter_config *config,
260 struct lib_counter *counter,
261 const size_t *dimension_indexes, int64_t v)
262 {
263 return __lttng_counter_add(config, COUNTER_ALLOC_GLOBAL, config->sync, counter,
264 dimension_indexes, v, NULL);
265 }
266
267 static inline int lttng_counter_add(const struct lib_counter_config *config,
268 struct lib_counter *counter,
269 const size_t *dimension_indexes, int64_t v)
270 {
271 switch (config->alloc) {
272 case COUNTER_ALLOC_PER_CPU: /* Fallthrough */
273 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
274 return __lttng_counter_add_percpu(config, counter, dimension_indexes, v);
275 case COUNTER_ALLOC_GLOBAL:
276 return __lttng_counter_add_global(config, counter, dimension_indexes, v);
277 default:
278 return -EINVAL;
279 }
280 }
281
282 static inline int lttng_counter_inc(const struct lib_counter_config *config,
283 struct lib_counter *counter,
284 const size_t *dimension_indexes)
285 {
286 return lttng_counter_add(config, counter, dimension_indexes, 1);
287 }
288
289 static inline int lttng_counter_dec(const struct lib_counter_config *config,
290 struct lib_counter *counter,
291 const size_t *dimension_indexes)
292 {
293 return lttng_counter_add(config, counter, dimension_indexes, -1);
294 }
295
296 #endif /* _LTTNG_COUNTER_API_H */
This page took 0.034976 seconds and 4 git commands to generate.