540055aeec616616c14d2eefee410e53c5109792
[lttng-ust.git] / libcounter / counter-api.h
1 /*
2 * counter/counter-api.h
3 *
4 * LTTng Counters API, requiring counter/config.h
5 *
6 * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * SOFTWARE.
25 */
26
27 #ifndef _LTTNG_COUNTER_API_H
28 #define _LTTNG_COUNTER_API_H
29
30 #include <stdint.h>
31 #include <limits.h>
32 #include "counter.h"
33 #include "counter-internal.h"
34 #include <urcu/compiler.h>
35 #include <urcu/uatomic.h>
36 #include <lttng/bitmap.h>
37 #include "../libringbuffer/getcpu.h"
38
39 /*
40 * Using unsigned arithmetic because overflow is defined.
41 */
42 static inline int __lttng_counter_add(const struct lib_counter_config *config,
43 enum lib_counter_config_alloc alloc,
44 enum lib_counter_config_sync sync,
45 struct lib_counter *counter,
46 const size_t *dimension_indexes, int64_t v,
47 int64_t *remainder)
48 {
49 size_t index;
50 bool overflow = false, underflow = false;
51 struct lib_counter_layout *layout;
52 int64_t move_sum = 0;
53
54 if (caa_unlikely(lttng_counter_validate_indexes(config, counter, dimension_indexes)))
55 return -EOVERFLOW;
56 index = lttng_counter_get_index(config, counter, dimension_indexes);
57
58 switch (alloc) {
59 case COUNTER_ALLOC_PER_CPU:
60 layout = &counter->percpu_counters[lttng_ust_get_cpu()];
61 break;
62 case COUNTER_ALLOC_GLOBAL:
63 layout = &counter->global_counters;
64 break;
65 default:
66 return -EINVAL;
67 }
68 if (caa_unlikely(!layout->counters))
69 return -ENODEV;
70
71 switch (config->counter_size) {
72 case COUNTER_SIZE_8_BIT:
73 {
74 int8_t *int_p = (int8_t *) layout->counters + index;
75 int8_t old, n, res;
76 int8_t global_sum_step = counter->global_sum_step.s8;
77
78 res = *int_p;
79 switch (sync) {
80 case COUNTER_SYNC_PER_CPU:
81 {
82 do {
83 move_sum = 0;
84 old = res;
85 n = (int8_t) ((uint8_t) old + (uint8_t) v);
86 if (caa_unlikely(n > (int8_t) global_sum_step))
87 move_sum = (int8_t) global_sum_step / 2;
88 else if (caa_unlikely(n < -(int8_t) global_sum_step))
89 move_sum = -((int8_t) global_sum_step / 2);
90 n -= move_sum;
91 res = uatomic_cmpxchg(int_p, old, n);
92 } while (old != res);
93 break;
94 }
95 case COUNTER_SYNC_GLOBAL:
96 {
97 do {
98 old = res;
99 n = (int8_t) ((uint8_t) old + (uint8_t) v);
100 res = uatomic_cmpxchg(int_p, old, n);
101 } while (old != res);
102 break;
103 }
104 default:
105 return -EINVAL;
106 }
107 if (v > 0 && (v >= UINT8_MAX || n < old))
108 overflow = true;
109 else if (v < 0 && (v <= -(int64_t) UINT8_MAX || n > old))
110 underflow = true;
111 break;
112 }
113 case COUNTER_SIZE_16_BIT:
114 {
115 int16_t *int_p = (int16_t *) layout->counters + index;
116 int16_t old, n, res;
117 int16_t global_sum_step = counter->global_sum_step.s16;
118
119 res = *int_p;
120 switch (sync) {
121 case COUNTER_SYNC_PER_CPU:
122 {
123 do {
124 move_sum = 0;
125 old = res;
126 n = (int16_t) ((uint16_t) old + (uint16_t) v);
127 if (caa_unlikely(n > (int16_t) global_sum_step))
128 move_sum = (int16_t) global_sum_step / 2;
129 else if (caa_unlikely(n < -(int16_t) global_sum_step))
130 move_sum = -((int16_t) global_sum_step / 2);
131 n -= move_sum;
132 res = uatomic_cmpxchg(int_p, old, n);
133 } while (old != res);
134 break;
135 }
136 case COUNTER_SYNC_GLOBAL:
137 {
138 do {
139 old = res;
140 n = (int16_t) ((uint16_t) old + (uint16_t) v);
141 res = uatomic_cmpxchg(int_p, old, n);
142 } while (old != res);
143 break;
144 }
145 default:
146 return -EINVAL;
147 }
148 if (v > 0 && (v >= UINT16_MAX || n < old))
149 overflow = true;
150 else if (v < 0 && (v <= -(int64_t) UINT16_MAX || n > old))
151 underflow = true;
152 break;
153 }
154 case COUNTER_SIZE_32_BIT:
155 {
156 int32_t *int_p = (int32_t *) layout->counters + index;
157 int32_t old, n, res;
158 int32_t global_sum_step = counter->global_sum_step.s32;
159
160 res = *int_p;
161 switch (sync) {
162 case COUNTER_SYNC_PER_CPU:
163 {
164 do {
165 move_sum = 0;
166 old = res;
167 n = (int32_t) ((uint32_t) old + (uint32_t) v);
168 if (caa_unlikely(n > (int32_t) global_sum_step))
169 move_sum = (int32_t) global_sum_step / 2;
170 else if (caa_unlikely(n < -(int32_t) global_sum_step))
171 move_sum = -((int32_t) global_sum_step / 2);
172 n -= move_sum;
173 res = uatomic_cmpxchg(int_p, old, n);
174 } while (old != res);
175 break;
176 }
177 case COUNTER_SYNC_GLOBAL:
178 {
179 do {
180 old = res;
181 n = (int32_t) ((uint32_t) old + (uint32_t) v);
182 res = uatomic_cmpxchg(int_p, old, n);
183 } while (old != res);
184 break;
185 }
186 default:
187 return -EINVAL;
188 }
189 if (v > 0 && (v >= UINT32_MAX || n < old))
190 overflow = true;
191 else if (v < 0 && (v <= -(int64_t) UINT32_MAX || n > old))
192 underflow = true;
193 break;
194 }
195 #if CAA_BITS_PER_LONG == 64
196 case COUNTER_SIZE_64_BIT:
197 {
198 int64_t *int_p = (int64_t *) layout->counters + index;
199 int64_t old, n, res;
200 int64_t global_sum_step = counter->global_sum_step.s64;
201
202 res = *int_p;
203 switch (sync) {
204 case COUNTER_SYNC_PER_CPU:
205 {
206 do {
207 move_sum = 0;
208 old = res;
209 n = (int64_t) ((uint64_t) old + (uint64_t) v);
210 if (caa_unlikely(n > (int64_t) global_sum_step))
211 move_sum = (int64_t) global_sum_step / 2;
212 else if (caa_unlikely(n < -(int64_t) global_sum_step))
213 move_sum = -((int64_t) global_sum_step / 2);
214 n -= move_sum;
215 res = uatomic_cmpxchg(int_p, old, n);
216 } while (old != res);
217 break;
218 }
219 case COUNTER_SYNC_GLOBAL:
220 {
221 do {
222 old = res;
223 n = (int64_t) ((uint64_t) old + (uint64_t) v);
224 res = uatomic_cmpxchg(int_p, old, n);
225 } while (old != res);
226 break;
227 }
228 default:
229 return -EINVAL;
230 }
231 if (v > 0 && n < old)
232 overflow = true;
233 else if (v < 0 && n > old)
234 underflow = true;
235 break;
236 }
237 #endif
238 default:
239 return -EINVAL;
240 }
241 if (caa_unlikely(overflow && !lttng_bitmap_test_bit(index, layout->overflow_bitmap)))
242 lttng_bitmap_set_bit(index, layout->overflow_bitmap);
243 else if (caa_unlikely(underflow && !lttng_bitmap_test_bit(index, layout->underflow_bitmap)))
244 lttng_bitmap_set_bit(index, layout->underflow_bitmap);
245 if (remainder)
246 *remainder = move_sum;
247 return 0;
248 }
249
250 static inline int __lttng_counter_add_percpu(const struct lib_counter_config *config,
251 struct lib_counter *counter,
252 const size_t *dimension_indexes, int64_t v)
253 {
254 int64_t move_sum;
255 int ret;
256
257 ret = __lttng_counter_add(config, COUNTER_ALLOC_PER_CPU, config->sync,
258 counter, dimension_indexes, v, &move_sum);
259 if (caa_unlikely(ret))
260 return ret;
261 if (caa_unlikely(move_sum))
262 return __lttng_counter_add(config, COUNTER_ALLOC_GLOBAL, COUNTER_SYNC_GLOBAL,
263 counter, dimension_indexes, move_sum, NULL);
264 return 0;
265 }
266
267 static inline int __lttng_counter_add_global(const struct lib_counter_config *config,
268 struct lib_counter *counter,
269 const size_t *dimension_indexes, int64_t v)
270 {
271 return __lttng_counter_add(config, COUNTER_ALLOC_GLOBAL, config->sync, counter,
272 dimension_indexes, v, NULL);
273 }
274
275 static inline int lttng_counter_add(const struct lib_counter_config *config,
276 struct lib_counter *counter,
277 const size_t *dimension_indexes, int64_t v)
278 {
279 switch (config->alloc) {
280 case COUNTER_ALLOC_PER_CPU: /* Fallthrough */
281 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
282 return __lttng_counter_add_percpu(config, counter, dimension_indexes, v);
283 case COUNTER_ALLOC_GLOBAL:
284 return __lttng_counter_add_global(config, counter, dimension_indexes, v);
285 default:
286 return -EINVAL;
287 }
288 }
289
290 static inline int lttng_counter_inc(const struct lib_counter_config *config,
291 struct lib_counter *counter,
292 const size_t *dimension_indexes)
293 {
294 return lttng_counter_add(config, counter, dimension_indexes, 1);
295 }
296
297 static inline int lttng_counter_dec(const struct lib_counter_config *config,
298 struct lib_counter *counter,
299 const size_t *dimension_indexes)
300 {
301 return lttng_counter_add(config, counter, dimension_indexes, -1);
302 }
303
304 #endif /* _LTTNG_COUNTER_API_H */
This page took 0.034268 seconds and 3 git commands to generate.