Implement lib counter
[lttng-modules.git] / include / counter / counter-api.h
1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
2 *
3 * counter/counter-api.h
4 *
5 * LTTng Counters API, requiring counter/config.h
6 *
7 * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 */
9
10 #ifndef _LTTNG_COUNTER_API_H
11 #define _LTTNG_COUNTER_API_H
12
13 #include <linux/types.h>
14 #include <linux/percpu.h>
15 #include <linux/bitops.h>
16 #include <counter/counter.h>
17 #include <counter/counter-internal.h>
18
19 /*
20 * Using unsigned arithmetic because overflow is defined.
21 */
22 static inline int __lttng_counter_add(const struct lib_counter_config *config,
23 enum lib_counter_config_alloc alloc,
24 enum lib_counter_config_sync sync,
25 struct lib_counter *counter,
26 const size_t *dimension_indexes, int64_t v,
27 int64_t *remainder)
28 {
29 size_t index;
30 bool overflow = false, underflow = false;
31 struct lib_counter_layout *layout;
32 int64_t move_sum = 0;
33
34 if (unlikely(lttng_counter_validate_indexes(config, counter, dimension_indexes)))
35 return -EOVERFLOW;
36 index = lttng_counter_get_index(config, counter, dimension_indexes);
37
38 switch (alloc) {
39 case COUNTER_ALLOC_PER_CPU:
40 layout = per_cpu_ptr(counter->percpu_counters, smp_processor_id());
41 break;
42 case COUNTER_ALLOC_GLOBAL:
43 layout = &counter->global_counters;
44 break;
45 default:
46 return -EINVAL;
47 }
48
49 switch (config->counter_size) {
50 case COUNTER_SIZE_8_BIT:
51 {
52 int8_t *int_p = (int8_t *) layout->counters + index;
53 int8_t old, n, res;
54 int8_t global_sum_step = counter->global_sum_step.s8;
55
56 res = *int_p;
57 switch (sync) {
58 case COUNTER_SYNC_PER_CPU:
59 {
60 do {
61 move_sum = 0;
62 old = res;
63 n = (int8_t) ((uint8_t) old + (uint8_t) v);
64 if (unlikely(n > (int8_t) global_sum_step))
65 move_sum = (int8_t) global_sum_step / 2;
66 else if (unlikely(n < -(int8_t) global_sum_step))
67 move_sum = -((int8_t) global_sum_step / 2);
68 n -= move_sum;
69 res = cmpxchg_local(int_p, old, n);
70 } while (old != res);
71 break;
72 }
73 case COUNTER_SYNC_GLOBAL:
74 {
75 do {
76 old = res;
77 n = (int8_t) ((uint8_t) old + (uint8_t) v);
78 res = cmpxchg(int_p, old, n);
79 } while (old != res);
80 break;
81 }
82 }
83 if (v > 0 && (v >= U8_MAX || n < old))
84 overflow = true;
85 else if (v < 0 && (v <= -U8_MAX || n > old))
86 underflow = true;
87 break;
88 }
89 case COUNTER_SIZE_16_BIT:
90 {
91 int16_t *int_p = (int16_t *) layout->counters + index;
92 int16_t old, n, res;
93 int16_t global_sum_step = counter->global_sum_step.s16;
94
95 res = *int_p;
96 switch (sync) {
97 case COUNTER_SYNC_PER_CPU:
98 {
99 do {
100 move_sum = 0;
101 old = res;
102 n = (int16_t) ((uint16_t) old + (uint16_t) v);
103 if (unlikely(n > (int16_t) global_sum_step))
104 move_sum = (int16_t) global_sum_step / 2;
105 else if (unlikely(n < -(int16_t) global_sum_step))
106 move_sum = -((int16_t) global_sum_step / 2);
107 n -= move_sum;
108 res = cmpxchg_local(int_p, old, n);
109 } while (old != res);
110 break;
111 }
112 case COUNTER_SYNC_GLOBAL:
113 {
114 do {
115 old = res;
116 n = (int16_t) ((uint16_t) old + (uint16_t) v);
117 res = cmpxchg(int_p, old, n);
118 } while (old != res);
119 break;
120 }
121 }
122 if (v > 0 && (v >= U16_MAX || n < old))
123 overflow = true;
124 else if (v < 0 && (v <= -U16_MAX || n > old))
125 underflow = true;
126 break;
127 }
128 case COUNTER_SIZE_32_BIT:
129 {
130 int32_t *int_p = (int32_t *) layout->counters + index;
131 int32_t old, n, res;
132 int32_t global_sum_step = counter->global_sum_step.s32;
133
134 res = *int_p;
135 switch (sync) {
136 case COUNTER_SYNC_PER_CPU:
137 {
138 do {
139 move_sum = 0;
140 old = res;
141 n = (int32_t) ((uint32_t) old + (uint32_t) v);
142 if (unlikely(n > (int32_t) global_sum_step))
143 move_sum = (int32_t) global_sum_step / 2;
144 else if (unlikely(n < -(int32_t) global_sum_step))
145 move_sum = -((int32_t) global_sum_step / 2);
146 n -= move_sum;
147 res = cmpxchg_local(int_p, old, n);
148 } while (old != res);
149 break;
150 }
151 case COUNTER_SYNC_GLOBAL:
152 {
153 do {
154 old = res;
155 n = (int32_t) ((uint32_t) old + (uint32_t) v);
156 res = cmpxchg(int_p, old, n);
157 } while (old != res);
158 break;
159 }
160 }
161 if (v > 0 && (v >= U32_MAX || n < old))
162 overflow = true;
163 else if (v < 0 && (v <= -U32_MAX || n > old))
164 underflow = true;
165 break;
166 }
167 #if BITS_PER_LONG == 64
168 case COUNTER_SIZE_64_BIT:
169 {
170 int64_t *int_p = (int64_t *) layout->counters + index;
171 int64_t old, n, res;
172 int64_t global_sum_step = counter->global_sum_step.s64;
173
174 res = *int_p;
175 switch (sync) {
176 case COUNTER_SYNC_PER_CPU:
177 {
178 do {
179 move_sum = 0;
180 old = res;
181 n = (int64_t) ((uint64_t) old + (uint64_t) v);
182 if (unlikely(n > (int64_t) global_sum_step))
183 move_sum = (int64_t) global_sum_step / 2;
184 else if (unlikely(n < -(int64_t) global_sum_step))
185 move_sum = -((int64_t) global_sum_step / 2);
186 n -= move_sum;
187 res = cmpxchg_local(int_p, old, n);
188 } while (old != res);
189 break;
190 }
191 case COUNTER_SYNC_GLOBAL:
192 {
193 do {
194 old = res;
195 n = (int64_t) ((uint64_t) old + (uint64_t) v);
196 res = cmpxchg(int_p, old, n);
197 } while (old != res);
198 break;
199 }
200 }
201 if (v > 0 && n < old)
202 overflow = true;
203 else if (v < 0 && n > old)
204 underflow = true;
205 break;
206 }
207 #endif
208 default:
209 return -EINVAL;
210 }
211 if (unlikely(overflow && !test_bit(index, layout->overflow_bitmap)))
212 set_bit(index, layout->overflow_bitmap);
213 else if (unlikely(underflow && !test_bit(index, layout->underflow_bitmap)))
214 set_bit(index, layout->underflow_bitmap);
215 if (remainder)
216 *remainder = move_sum;
217 return 0;
218 }
219
220 static inline int __lttng_counter_add_percpu(const struct lib_counter_config *config,
221 struct lib_counter *counter,
222 const size_t *dimension_indexes, int64_t v)
223 {
224 int64_t move_sum;
225 int ret;
226
227 ret = __lttng_counter_add(config, COUNTER_ALLOC_PER_CPU, config->sync,
228 counter, dimension_indexes, v, &move_sum);
229 if (unlikely(ret))
230 return ret;
231 if (unlikely(move_sum))
232 return __lttng_counter_add(config, COUNTER_ALLOC_GLOBAL, COUNTER_SYNC_GLOBAL,
233 counter, dimension_indexes, move_sum, NULL);
234 return 0;
235 }
236
237 static inline int __lttng_counter_add_global(const struct lib_counter_config *config,
238 struct lib_counter *counter,
239 const size_t *dimension_indexes, int64_t v)
240 {
241 return __lttng_counter_add(config, COUNTER_ALLOC_GLOBAL, config->sync, counter,
242 dimension_indexes, v, NULL);
243 }
244
245 static inline int lttng_counter_add(const struct lib_counter_config *config,
246 struct lib_counter *counter,
247 const size_t *dimension_indexes, int64_t v)
248 {
249 switch (config->alloc) {
250 case COUNTER_ALLOC_PER_CPU: /* Fallthrough */
251 case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
252 return __lttng_counter_add_percpu(config, counter, dimension_indexes, v);
253 case COUNTER_ALLOC_GLOBAL:
254 return __lttng_counter_add_global(config, counter, dimension_indexes, v);
255 default:
256 return -EINVAL;
257 }
258 }
259
260 static inline int lttng_counter_inc(const struct lib_counter_config *config,
261 struct lib_counter *counter,
262 const size_t *dimension_indexes)
263 {
264 return lttng_counter_add(config, counter, dimension_indexes, 1);
265 }
266
267 static inline int lttng_counter_dec(const struct lib_counter_config *config,
268 struct lib_counter *counter,
269 const size_t *dimension_indexes)
270 {
271 return lttng_counter_add(config, counter, dimension_indexes, -1);
272 }
273
274 #endif /* _LTTNG_COUNTER_API_H */
This page took 0.033882 seconds and 4 git commands to generate.