Fix: counter-api: always inline counter add function
[lttng-modules.git] / include / counter / counter-api.h
1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
2 *
3 * counter/counter-api.h
4 *
5 * LTTng Counters API, requiring counter/config.h
6 *
7 * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 */
9
10 #ifndef _LTTNG_COUNTER_API_H
11 #define _LTTNG_COUNTER_API_H
12
13 #include <linux/types.h>
14 #include <linux/percpu.h>
15 #include <linux/bitops.h>
16 #include <counter/counter.h>
17 #include <counter/counter-internal.h>
18 #include <wrapper/limits.h>
19
20 /*
21 * Using unsigned arithmetic because overflow is defined.
22 */
23 static __always_inline int __lttng_counter_add(const struct lib_counter_config *config,
24 enum lib_counter_config_alloc alloc,
25 enum lib_counter_config_sync sync,
26 struct lib_counter *counter,
27 const size_t *dimension_indexes, int64_t v,
28 int64_t *remainder)
29 {
30 size_t index;
31 bool overflow = false, underflow = false;
32 struct lib_counter_layout *layout;
33 int64_t move_sum = 0;
34
35 if (unlikely(lttng_counter_validate_indexes(config, counter, dimension_indexes)))
36 return -EOVERFLOW;
37 index = lttng_counter_get_index(config, counter, dimension_indexes);
38
39 switch (alloc) {
40 case COUNTER_ALLOC_PER_CPU:
41 layout = per_cpu_ptr(counter->percpu_counters, smp_processor_id());
42 break;
43 case COUNTER_ALLOC_GLOBAL:
44 layout = &counter->global_counters;
45 break;
46 default:
47 return -EINVAL;
48 }
49
50 switch (config->counter_size) {
51 case COUNTER_SIZE_8_BIT:
52 {
53 int8_t *int_p = (int8_t *) layout->counters + index;
54 int8_t old, n, res;
55 int8_t global_sum_step = counter->global_sum_step.s8;
56
57 res = *int_p;
58 switch (sync) {
59 case COUNTER_SYNC_PER_CPU:
60 {
61 do {
62 move_sum = 0;
63 old = res;
64 n = (int8_t) ((uint8_t) old + (uint8_t) v);
65 if (unlikely(n > (int8_t) global_sum_step))
66 move_sum = (int8_t) global_sum_step / 2;
67 else if (unlikely(n < -(int8_t) global_sum_step))
68 move_sum = -((int8_t) global_sum_step / 2);
69 n -= move_sum;
70 res = cmpxchg_local(int_p, old, n);
71 } while (old != res);
72 break;
73 }
74 case COUNTER_SYNC_GLOBAL:
75 {
76 do {
77 old = res;
78 n = (int8_t) ((uint8_t) old + (uint8_t) v);
79 res = cmpxchg(int_p, old, n);
80 } while (old != res);
81 break;
82 }
83 default:
84 return -EINVAL;
85 }
86 if (v > 0 && (v >= U8_MAX || n < old))
87 overflow = true;
88 else if (v < 0 && (v <= -(s64) U8_MAX || n > old))
89 underflow = true;
90 break;
91 }
92 case COUNTER_SIZE_16_BIT:
93 {
94 int16_t *int_p = (int16_t *) layout->counters + index;
95 int16_t old, n, res;
96 int16_t global_sum_step = counter->global_sum_step.s16;
97
98 res = *int_p;
99 switch (sync) {
100 case COUNTER_SYNC_PER_CPU:
101 {
102 do {
103 move_sum = 0;
104 old = res;
105 n = (int16_t) ((uint16_t) old + (uint16_t) v);
106 if (unlikely(n > (int16_t) global_sum_step))
107 move_sum = (int16_t) global_sum_step / 2;
108 else if (unlikely(n < -(int16_t) global_sum_step))
109 move_sum = -((int16_t) global_sum_step / 2);
110 n -= move_sum;
111 res = cmpxchg_local(int_p, old, n);
112 } while (old != res);
113 break;
114 }
115 case COUNTER_SYNC_GLOBAL:
116 {
117 do {
118 old = res;
119 n = (int16_t) ((uint16_t) old + (uint16_t) v);
120 res = cmpxchg(int_p, old, n);
121 } while (old != res);
122 break;
123 }
124 default:
125 return -EINVAL;
126 }
127 if (v > 0 && (v >= U16_MAX || n < old))
128 overflow = true;
129 else if (v < 0 && (v <= -(s64) U16_MAX || n > old))
130 underflow = true;
131 break;
132 }
133 case COUNTER_SIZE_32_BIT:
134 {
135 int32_t *int_p = (int32_t *) layout->counters + index;
136 int32_t old, n, res;
137 int32_t global_sum_step = counter->global_sum_step.s32;
138
139 res = *int_p;
140 switch (sync) {
141 case COUNTER_SYNC_PER_CPU:
142 {
143 do {
144 move_sum = 0;
145 old = res;
146 n = (int32_t) ((uint32_t) old + (uint32_t) v);
147 if (unlikely(n > (int32_t) global_sum_step))
148 move_sum = (int32_t) global_sum_step / 2;
149 else if (unlikely(n < -(int32_t) global_sum_step))
150 move_sum = -((int32_t) global_sum_step / 2);
151 n -= move_sum;
152 res = cmpxchg_local(int_p, old, n);
153 } while (old != res);
154 break;
155 }
156 case COUNTER_SYNC_GLOBAL:
157 {
158 do {
159 old = res;
160 n = (int32_t) ((uint32_t) old + (uint32_t) v);
161 res = cmpxchg(int_p, old, n);
162 } while (old != res);
163 break;
164 }
165 default:
166 return -EINVAL;
167 }
168 if (v > 0 && (v >= U32_MAX || n < old))
169 overflow = true;
170 else if (v < 0 && (v <= -(s64) U32_MAX || n > old))
171 underflow = true;
172 break;
173 }
174 #if BITS_PER_LONG == 64
175 case COUNTER_SIZE_64_BIT:
176 {
177 int64_t *int_p = (int64_t *) layout->counters + index;
178 int64_t old, n, res;
179 int64_t global_sum_step = counter->global_sum_step.s64;
180
181 res = *int_p;
182 switch (sync) {
183 case COUNTER_SYNC_PER_CPU:
184 {
185 do {
186 move_sum = 0;
187 old = res;
188 n = (int64_t) ((uint64_t) old + (uint64_t) v);
189 if (unlikely(n > (int64_t) global_sum_step))
190 move_sum = (int64_t) global_sum_step / 2;
191 else if (unlikely(n < -(int64_t) global_sum_step))
192 move_sum = -((int64_t) global_sum_step / 2);
193 n -= move_sum;
194 res = cmpxchg_local(int_p, old, n);
195 } while (old != res);
196 break;
197 }
198 case COUNTER_SYNC_GLOBAL:
199 {
200 do {
201 old = res;
202 n = (int64_t) ((uint64_t) old + (uint64_t) v);
203 res = cmpxchg(int_p, old, n);
204 } while (old != res);
205 break;
206 }
207 default:
208 return -EINVAL;
209 }
210 if (v > 0 && n < old)
211 overflow = true;
212 else if (v < 0 && n > old)
213 underflow = true;
214 break;
215 }
216 #endif
217 default:
218 return -EINVAL;
219 }
220 if (unlikely(overflow && !test_bit(index, layout->overflow_bitmap)))
221 set_bit(index, layout->overflow_bitmap);
222 else if (unlikely(underflow && !test_bit(index, layout->underflow_bitmap)))
223 set_bit(index, layout->underflow_bitmap);
224 if (remainder)
225 *remainder = move_sum;
226 return 0;
227 }
228
229 static __always_inline int __lttng_counter_add_percpu(const struct lib_counter_config *config,
230 struct lib_counter *counter,
231 const size_t *dimension_indexes, int64_t v)
232 {
233 int64_t move_sum;
234 int ret;
235
236 ret = __lttng_counter_add(config, COUNTER_ALLOC_PER_CPU, config->sync,
237 counter, dimension_indexes, v, &move_sum);
238 if (unlikely(ret))
239 return ret;
240 if (unlikely(move_sum))
241 return __lttng_counter_add(config, COUNTER_ALLOC_GLOBAL, COUNTER_SYNC_GLOBAL,
242 counter, dimension_indexes, move_sum, NULL);
243 return 0;
244 }
245
246 static __always_inline int __lttng_counter_add_global(const struct lib_counter_config *config,
247 struct lib_counter *counter,
248 const size_t *dimension_indexes, int64_t v)
249 {
250 return __lttng_counter_add(config, COUNTER_ALLOC_GLOBAL, config->sync, counter,
251 dimension_indexes, v, NULL);
252 }
253
254 static __always_inline int lttng_counter_add(const struct lib_counter_config *config,
255 struct lib_counter *counter,
256 const size_t *dimension_indexes, int64_t v)
257 {
258 switch (config->alloc) {
259 case COUNTER_ALLOC_PER_CPU: /* Fallthrough */
260 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
261 return __lttng_counter_add_percpu(config, counter, dimension_indexes, v);
262 case COUNTER_ALLOC_GLOBAL:
263 return __lttng_counter_add_global(config, counter, dimension_indexes, v);
264 default:
265 return -EINVAL;
266 }
267 }
268
269 static __always_inline int lttng_counter_inc(const struct lib_counter_config *config,
270 struct lib_counter *counter,
271 const size_t *dimension_indexes)
272 {
273 return lttng_counter_add(config, counter, dimension_indexes, 1);
274 }
275
276 static __always_inline int lttng_counter_dec(const struct lib_counter_config *config,
277 struct lib_counter *counter,
278 const size_t *dimension_indexes)
279 {
280 return lttng_counter_add(config, counter, dimension_indexes, -1);
281 }
282
283 #endif /* _LTTNG_COUNTER_API_H */
This page took 0.034935 seconds and 4 git commands to generate.