Move the ringbuffer and counter clients to 'src/common/'
[lttng-ust.git] / src / common / counter / counter-api.h
1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * LTTng Counters API, requiring counter/config.h
7 */
8
9 #ifndef _LTTNG_COUNTER_API_H
10 #define _LTTNG_COUNTER_API_H
11
12 #include <stdint.h>
13 #include <limits.h>
14 #include <errno.h>
15 #include "counter.h"
16 #include "counter-internal.h"
17 #include <urcu/compiler.h>
18 #include <urcu/uatomic.h>
19 #include "common/bitmap.h"
20 #include "common/getcpu.h"
21
22 /*
23 * Using unsigned arithmetic because overflow is defined.
24 */
25 static inline int __lttng_counter_add(const struct lib_counter_config *config,
26 enum lib_counter_config_alloc alloc,
27 enum lib_counter_config_sync sync,
28 struct lib_counter *counter,
29 const size_t *dimension_indexes, int64_t v,
30 int64_t *remainder)
31 {
32 size_t index;
33 bool overflow = false, underflow = false;
34 struct lib_counter_layout *layout;
35 int64_t move_sum = 0;
36
37 if (caa_unlikely(lttng_counter_validate_indexes(config, counter, dimension_indexes)))
38 return -EOVERFLOW;
39 index = lttng_counter_get_index(config, counter, dimension_indexes);
40
41 switch (alloc) {
42 case COUNTER_ALLOC_PER_CPU:
43 layout = &counter->percpu_counters[lttng_ust_get_cpu()];
44 break;
45 case COUNTER_ALLOC_GLOBAL:
46 layout = &counter->global_counters;
47 break;
48 default:
49 return -EINVAL;
50 }
51 if (caa_unlikely(!layout->counters))
52 return -ENODEV;
53
54 switch (config->counter_size) {
55 case COUNTER_SIZE_8_BIT:
56 {
57 int8_t *int_p = (int8_t *) layout->counters + index;
58 int8_t old, n, res;
59 int8_t global_sum_step = counter->global_sum_step.s8;
60
61 res = *int_p;
62 switch (sync) {
63 case COUNTER_SYNC_PER_CPU:
64 {
65 do {
66 move_sum = 0;
67 old = res;
68 n = (int8_t) ((uint8_t) old + (uint8_t) v);
69 if (caa_unlikely(n > (int8_t) global_sum_step))
70 move_sum = (int8_t) global_sum_step / 2;
71 else if (caa_unlikely(n < -(int8_t) global_sum_step))
72 move_sum = -((int8_t) global_sum_step / 2);
73 n -= move_sum;
74 res = uatomic_cmpxchg(int_p, old, n);
75 } while (old != res);
76 break;
77 }
78 case COUNTER_SYNC_GLOBAL:
79 {
80 do {
81 old = res;
82 n = (int8_t) ((uint8_t) old + (uint8_t) v);
83 res = uatomic_cmpxchg(int_p, old, n);
84 } while (old != res);
85 break;
86 }
87 default:
88 return -EINVAL;
89 }
90 if (v > 0 && (v >= UINT8_MAX || n < old))
91 overflow = true;
92 else if (v < 0 && (v <= -(int64_t) UINT8_MAX || n > old))
93 underflow = true;
94 break;
95 }
96 case COUNTER_SIZE_16_BIT:
97 {
98 int16_t *int_p = (int16_t *) layout->counters + index;
99 int16_t old, n, res;
100 int16_t global_sum_step = counter->global_sum_step.s16;
101
102 res = *int_p;
103 switch (sync) {
104 case COUNTER_SYNC_PER_CPU:
105 {
106 do {
107 move_sum = 0;
108 old = res;
109 n = (int16_t) ((uint16_t) old + (uint16_t) v);
110 if (caa_unlikely(n > (int16_t) global_sum_step))
111 move_sum = (int16_t) global_sum_step / 2;
112 else if (caa_unlikely(n < -(int16_t) global_sum_step))
113 move_sum = -((int16_t) global_sum_step / 2);
114 n -= move_sum;
115 res = uatomic_cmpxchg(int_p, old, n);
116 } while (old != res);
117 break;
118 }
119 case COUNTER_SYNC_GLOBAL:
120 {
121 do {
122 old = res;
123 n = (int16_t) ((uint16_t) old + (uint16_t) v);
124 res = uatomic_cmpxchg(int_p, old, n);
125 } while (old != res);
126 break;
127 }
128 default:
129 return -EINVAL;
130 }
131 if (v > 0 && (v >= UINT16_MAX || n < old))
132 overflow = true;
133 else if (v < 0 && (v <= -(int64_t) UINT16_MAX || n > old))
134 underflow = true;
135 break;
136 }
137 case COUNTER_SIZE_32_BIT:
138 {
139 int32_t *int_p = (int32_t *) layout->counters + index;
140 int32_t old, n, res;
141 int32_t global_sum_step = counter->global_sum_step.s32;
142
143 res = *int_p;
144 switch (sync) {
145 case COUNTER_SYNC_PER_CPU:
146 {
147 do {
148 move_sum = 0;
149 old = res;
150 n = (int32_t) ((uint32_t) old + (uint32_t) v);
151 if (caa_unlikely(n > (int32_t) global_sum_step))
152 move_sum = (int32_t) global_sum_step / 2;
153 else if (caa_unlikely(n < -(int32_t) global_sum_step))
154 move_sum = -((int32_t) global_sum_step / 2);
155 n -= move_sum;
156 res = uatomic_cmpxchg(int_p, old, n);
157 } while (old != res);
158 break;
159 }
160 case COUNTER_SYNC_GLOBAL:
161 {
162 do {
163 old = res;
164 n = (int32_t) ((uint32_t) old + (uint32_t) v);
165 res = uatomic_cmpxchg(int_p, old, n);
166 } while (old != res);
167 break;
168 }
169 default:
170 return -EINVAL;
171 }
172 if (v > 0 && (v >= UINT32_MAX || n < old))
173 overflow = true;
174 else if (v < 0 && (v <= -(int64_t) UINT32_MAX || n > old))
175 underflow = true;
176 break;
177 }
178 #if CAA_BITS_PER_LONG == 64
179 case COUNTER_SIZE_64_BIT:
180 {
181 int64_t *int_p = (int64_t *) layout->counters + index;
182 int64_t old, n, res;
183 int64_t global_sum_step = counter->global_sum_step.s64;
184
185 res = *int_p;
186 switch (sync) {
187 case COUNTER_SYNC_PER_CPU:
188 {
189 do {
190 move_sum = 0;
191 old = res;
192 n = (int64_t) ((uint64_t) old + (uint64_t) v);
193 if (caa_unlikely(n > (int64_t) global_sum_step))
194 move_sum = (int64_t) global_sum_step / 2;
195 else if (caa_unlikely(n < -(int64_t) global_sum_step))
196 move_sum = -((int64_t) global_sum_step / 2);
197 n -= move_sum;
198 res = uatomic_cmpxchg(int_p, old, n);
199 } while (old != res);
200 break;
201 }
202 case COUNTER_SYNC_GLOBAL:
203 {
204 do {
205 old = res;
206 n = (int64_t) ((uint64_t) old + (uint64_t) v);
207 res = uatomic_cmpxchg(int_p, old, n);
208 } while (old != res);
209 break;
210 }
211 default:
212 return -EINVAL;
213 }
214 if (v > 0 && n < old)
215 overflow = true;
216 else if (v < 0 && n > old)
217 underflow = true;
218 break;
219 }
220 #endif
221 default:
222 return -EINVAL;
223 }
224 if (caa_unlikely(overflow && !lttng_bitmap_test_bit(index, layout->overflow_bitmap)))
225 lttng_bitmap_set_bit(index, layout->overflow_bitmap);
226 else if (caa_unlikely(underflow && !lttng_bitmap_test_bit(index, layout->underflow_bitmap)))
227 lttng_bitmap_set_bit(index, layout->underflow_bitmap);
228 if (remainder)
229 *remainder = move_sum;
230 return 0;
231 }
232
233 static inline int __lttng_counter_add_percpu(const struct lib_counter_config *config,
234 struct lib_counter *counter,
235 const size_t *dimension_indexes, int64_t v)
236 {
237 int64_t move_sum;
238 int ret;
239
240 ret = __lttng_counter_add(config, COUNTER_ALLOC_PER_CPU, config->sync,
241 counter, dimension_indexes, v, &move_sum);
242 if (caa_unlikely(ret))
243 return ret;
244 if (caa_unlikely(move_sum))
245 return __lttng_counter_add(config, COUNTER_ALLOC_GLOBAL, COUNTER_SYNC_GLOBAL,
246 counter, dimension_indexes, move_sum, NULL);
247 return 0;
248 }
249
250 static inline int __lttng_counter_add_global(const struct lib_counter_config *config,
251 struct lib_counter *counter,
252 const size_t *dimension_indexes, int64_t v)
253 {
254 return __lttng_counter_add(config, COUNTER_ALLOC_GLOBAL, config->sync, counter,
255 dimension_indexes, v, NULL);
256 }
257
258 static inline int lttng_counter_add(const struct lib_counter_config *config,
259 struct lib_counter *counter,
260 const size_t *dimension_indexes, int64_t v)
261 {
262 switch (config->alloc) {
263 case COUNTER_ALLOC_PER_CPU: /* Fallthrough */
264 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
265 return __lttng_counter_add_percpu(config, counter, dimension_indexes, v);
266 case COUNTER_ALLOC_GLOBAL:
267 return __lttng_counter_add_global(config, counter, dimension_indexes, v);
268 default:
269 return -EINVAL;
270 }
271 }
272
273 static inline int lttng_counter_inc(const struct lib_counter_config *config,
274 struct lib_counter *counter,
275 const size_t *dimension_indexes)
276 {
277 return lttng_counter_add(config, counter, dimension_indexes, 1);
278 }
279
280 static inline int lttng_counter_dec(const struct lib_counter_config *config,
281 struct lib_counter *counter,
282 const size_t *dimension_indexes)
283 {
284 return lttng_counter_add(config, counter, dimension_indexes, -1);
285 }
286
287 #endif /* _LTTNG_COUNTER_API_H */
This page took 0.034593 seconds and 4 git commands to generate.