Cleanup: bitfields: streamline use of underscores
[lttng-modules.git] / lib / bitfield.h
1 /* SPDX-License-Identifier: MIT
2 *
3 * Copyright 2010-2019 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 */
5
6 #ifndef _BABELTRACE_BITFIELD_H
7 #define _BABELTRACE_BITFIELD_H
8
9 #include <linux/types.h>
10 #include <lttng-endian.h>
11
12 #ifndef CHAR_BIT
13 #define CHAR_BIT 8
14 #endif
15
16 /*
17 * This header strictly follows the C99 standard, except for use of the
18 * compiler-specific __typeof__.
19 */
20
21 /*
22 * This bitfield header requires the compiler representation of signed
23 * integers to be two's complement.
24 */
25 #if (-1 != ~0)
26 #error "bitfield.h requires the compiler representation of signed integers to be two's complement."
27 #endif
28
29 /*
30 * _bt_is_signed_type() willingly generates comparison of unsigned
31 * expression < 0, which is always false. Silence compiler warnings.
32 */
33 #ifdef __GNUC__
34 # define _BT_DIAG_PUSH _Pragma("GCC diagnostic push")
35 # define _BT_DIAG_POP _Pragma("GCC diagnostic pop")
36
37 # define _BT_DIAG_STRINGIFY_1(x) #x
38 # define _BT_DIAG_STRINGIFY(x) _BT_DIAG_STRINGIFY_1(x)
39
40 # define _BT_DIAG_IGNORE(option) \
41 _Pragma(_BT_DIAG_STRINGIFY(GCC diagnostic ignored option))
42 # define _BT_DIAG_IGNORE_TYPE_LIMITS _BT_DIAG_IGNORE("-Wtype-limits")
43 #else
44 # define _BT_DIAG_PUSH
45 # define _BT_DIAG_POP
46 # define _BT_DIAG_IGNORE
47 #endif
48
49 #define _bt_is_signed_type(type) ((type) -1 < (type) 0)
50
51 /*
52 * Produce a build-time error if the condition `cond` is non-zero.
53 * Evaluates as a size_t expression.
54 */
55 #define _BT_BUILD_ASSERT(cond) \
56 sizeof(struct { int f:(2 * !!(cond) - 1); })
57
58 /*
59 * Cast value `v` to an unsigned integer of the same size as `v`.
60 */
61 #define _bt_cast_value_to_unsigned(v) \
62 (sizeof(v) == sizeof(uint8_t) ? (uint8_t) (v) : \
63 sizeof(v) == sizeof(uint16_t) ? (uint16_t) (v) : \
64 sizeof(v) == sizeof(uint32_t) ? (uint32_t) (v) : \
65 sizeof(v) == sizeof(uint64_t) ? (uint64_t) (v) : \
66 _BT_BUILD_ASSERT(sizeof(v) <= sizeof(uint64_t)))
67
68 /*
69 * Cast value `v` to an unsigned integer type of the size of type `type`
70 * *without* sign-extension.
71 *
72 * The unsigned cast ensures that we're not shifting a negative value,
73 * which is undefined in C. However, this limits the maximum type size
74 * of `type` to 64-bit. Generate a compile-time error if the size of
75 * `type` is larger than 64-bit.
76 */
77 #define _bt_cast_value_to_unsigned_type(type, v) \
78 (sizeof(type) == sizeof(uint8_t) ? \
79 (uint8_t) _bt_cast_value_to_unsigned(v) : \
80 sizeof(type) == sizeof(uint16_t) ? \
81 (uint16_t) _bt_cast_value_to_unsigned(v) : \
82 sizeof(type) == sizeof(uint32_t) ? \
83 (uint32_t) _bt_cast_value_to_unsigned(v) : \
84 sizeof(type) == sizeof(uint64_t) ? \
85 (uint64_t) _bt_cast_value_to_unsigned(v) : \
86 _BT_BUILD_ASSERT(sizeof(v) <= sizeof(uint64_t)))
87
88 /*
89 * _bt_fill_mask evaluates to a "type" integer with all bits set.
90 */
91 #define _bt_fill_mask(type) ((type) ~(type) 0)
92
93 /*
94 * Left shift a value `v` of `shift` bits.
95 *
96 * The type of `v` can be signed or unsigned integer.
97 * The value of `shift` must be less than the size of `v` (in bits),
98 * otherwise the behavior is undefined.
99 * Evaluates to the result of the shift operation.
100 *
101 * According to the C99 standard, left shift of a left hand-side signed
102 * type is undefined if it has a negative value or if the result cannot
103 * be represented in the result type. This bitfield header discards the
104 * bits that are left-shifted beyond the result type representation,
105 * which is the behavior of an unsigned type left shift operation.
106 * Therefore, always perform left shift on an unsigned type.
107 *
108 * This macro should not be used if `shift` can be greater or equal than
109 * the bitwidth of `v`. See `_bt_safe_lshift`.
110 */
111 #define _bt_lshift(v, shift) \
112 ((__typeof__(v)) (_bt_cast_value_to_unsigned(v) << (shift)))
113
114 /*
115 * Generate a mask of type `type` with the `length` least significant bits
116 * cleared, and the most significant bits set.
117 */
118 #define _bt_make_mask_complement(type, length) \
119 _bt_lshift(_bt_fill_mask(type), length)
120
121 /*
122 * Generate a mask of type `type` with the `length` least significant bits
123 * set, and the most significant bits cleared.
124 */
125 #define _bt_make_mask(type, length) \
126 ((type) ~_bt_make_mask_complement(type, length))
127
128 /*
129 * Right shift a value `v` of `shift` bits.
130 *
131 * The type of `v` can be signed or unsigned integer.
132 * The value of `shift` must be less than the size of `v` (in bits),
133 * otherwise the behavior is undefined.
134 * Evaluates to the result of the shift operation.
135 *
136 * According to the C99 standard, right shift of a left hand-side signed
137 * type which has a negative value is implementation defined. This
138 * bitfield header relies on the right shift implementation carrying the
139 * sign bit. If the compiler implementation has a different behavior,
140 * emulate carrying the sign bit.
141 *
142 * This macro should not be used if `shift` can be greater or equal than
143 * the bitwidth of `v`. See `_bt_safe_rshift`.
144 */
145 #if ((-1 >> 1) == -1)
146 #define _bt_rshift(v, shift) ((v) >> (shift))
147 #else
148 #define _bt_rshift(v, shift) \
149 ((__typeof__(v)) ((_bt_cast_value_to_unsigned(v) >> (shift)) | \
150 ((v) < 0 ? _bt_make_mask_complement(__typeof__(v), \
151 sizeof(v) * CHAR_BIT - (shift)) : 0)))
152 #endif
153
154 /*
155 * Right shift a signed or unsigned integer with `shift` value being an
156 * arbitrary number of bits. `v` is modified by this macro. The shift
157 * is transformed into a sequence of `_nr_partial_shifts` consecutive
158 * shift operations, each of a number of bits smaller than the bitwidth
159 * of `v`, ending with a shift of the number of left over bits.
160 */
161 #define _bt_safe_rshift(v, shift) \
162 do { \
163 unsigned long _nr_partial_shifts = (shift) / (sizeof(v) * CHAR_BIT - 1); \
164 unsigned long _leftover_bits = (shift) % (sizeof(v) * CHAR_BIT - 1); \
165 \
166 for (; _nr_partial_shifts; _nr_partial_shifts--) \
167 (v) = _bt_rshift(v, sizeof(v) * CHAR_BIT - 1); \
168 (v) = _bt_rshift(v, _leftover_bits); \
169 } while (0)
170
171 /*
172 * Left shift a signed or unsigned integer with `shift` value being an
173 * arbitrary number of bits. `v` is modified by this macro. The shift
174 * is transformed into a sequence of `_nr_partial_shifts` consecutive
175 * shift operations, each of a number of bits smaller than the bitwidth
176 * of `v`, ending with a shift of the number of left over bits.
177 */
178 #define _bt_safe_lshift(v, shift) \
179 do { \
180 unsigned long _nr_partial_shifts = (shift) / (sizeof(v) * CHAR_BIT - 1); \
181 unsigned long _leftover_bits = (shift) % (sizeof(v) * CHAR_BIT - 1); \
182 \
183 for (; _nr_partial_shifts; _nr_partial_shifts--) \
184 (v) = _bt_lshift(v, sizeof(v) * CHAR_BIT - 1); \
185 (v) = _bt_lshift(v, _leftover_bits); \
186 } while (0)
187
188 /*
189 * bt_bitfield_write - write integer to a bitfield in native endianness
190 *
191 * Save integer to the bitfield, which starts at the "start" bit, has "len"
192 * bits.
193 * The inside of a bitfield is from high bits to low bits.
194 * Uses native endianness.
195 * For unsigned "v", pad MSB with 0 if bitfield is larger than v.
196 * For signed "v", sign-extend v if bitfield is larger than v.
197 *
198 * On little endian, bytes are placed from the less significant to the most
199 * significant. Also, consecutive bitfields are placed from lower bits to higher
200 * bits.
201 *
202 * On big endian, bytes are places from most significant to less significant.
203 * Also, consecutive bitfields are placed from higher to lower bits.
204 */
205
206 #define _bt_bitfield_write_le(ptr, type, start, length, v) \
207 do { \
208 __typeof__(v) _v = (v); \
209 type *_ptr = (void *) (ptr); \
210 unsigned long _start = (start), _length = (length); \
211 type _mask, _cmask; \
212 unsigned long _ts = sizeof(type) * CHAR_BIT; /* type size */ \
213 unsigned long _start_unit, _end_unit, _this_unit; \
214 unsigned long _end, _cshift; /* _cshift is "complement shift" */ \
215 \
216 if (!_length) \
217 break; \
218 \
219 _end = _start + _length; \
220 _start_unit = _start / _ts; \
221 _end_unit = (_end + (_ts - 1)) / _ts; \
222 \
223 /* Trim v high bits */ \
224 if (_length < sizeof(_v) * CHAR_BIT) \
225 _v &= _bt_make_mask(__typeof__(_v), _length); \
226 \
227 /* We can now append v with a simple "or", shift it piece-wise */ \
228 _this_unit = _start_unit; \
229 if (_start_unit == _end_unit - 1) { \
230 _mask = _bt_make_mask(type, _start % _ts); \
231 if (_end % _ts) \
232 _mask |= _bt_make_mask_complement(type, _end % _ts); \
233 _cmask = _bt_lshift((type) (_v), _start % _ts); \
234 _cmask &= ~_mask; \
235 _ptr[_this_unit] &= _mask; \
236 _ptr[_this_unit] |= _cmask; \
237 break; \
238 } \
239 if (_start % _ts) { \
240 _cshift = _start % _ts; \
241 _mask = _bt_make_mask(type, _cshift); \
242 _cmask = _bt_lshift((type) (_v), _cshift); \
243 _cmask &= ~_mask; \
244 _ptr[_this_unit] &= _mask; \
245 _ptr[_this_unit] |= _cmask; \
246 _bt_safe_rshift(_v, _ts - _cshift); \
247 _start += _ts - _cshift; \
248 _this_unit++; \
249 } \
250 for (; _this_unit < _end_unit - 1; _this_unit++) { \
251 _ptr[_this_unit] = (type) _v; \
252 _bt_safe_rshift(_v, _ts); \
253 _start += _ts; \
254 } \
255 if (_end % _ts) { \
256 _mask = _bt_make_mask_complement(type, _end % _ts); \
257 _cmask = (type) _v; \
258 _cmask &= ~_mask; \
259 _ptr[_this_unit] &= _mask; \
260 _ptr[_this_unit] |= _cmask; \
261 } else \
262 _ptr[_this_unit] = (type) _v; \
263 } while (0)
264
265 #define _bt_bitfield_write_be(ptr, type, start, length, v) \
266 do { \
267 __typeof__(v) _v = (v); \
268 type *_ptr = (void *) (ptr); \
269 unsigned long _start = (start), _length = (length); \
270 type _mask, _cmask; \
271 unsigned long _ts = sizeof(type) * CHAR_BIT; /* type size */ \
272 unsigned long _start_unit, _end_unit, _this_unit; \
273 unsigned long _end, _cshift; /* _cshift is "complement shift" */ \
274 \
275 if (!_length) \
276 break; \
277 \
278 _end = _start + _length; \
279 _start_unit = _start / _ts; \
280 _end_unit = (_end + (_ts - 1)) / _ts; \
281 \
282 /* Trim v high bits */ \
283 if (_length < sizeof(_v) * CHAR_BIT) \
284 _v &= _bt_make_mask(__typeof__(_v), _length); \
285 \
286 /* We can now append v with a simple "or", shift it piece-wise */ \
287 _this_unit = _end_unit - 1; \
288 if (_start_unit == _end_unit - 1) { \
289 _mask = _bt_make_mask(type, (_ts - (_end % _ts)) % _ts); \
290 if (_start % _ts) \
291 _mask |= _bt_make_mask_complement(type, _ts - (_start % _ts)); \
292 _cmask = _bt_lshift((type) (_v), (_ts - (_end % _ts)) % _ts); \
293 _cmask &= ~_mask; \
294 _ptr[_this_unit] &= _mask; \
295 _ptr[_this_unit] |= _cmask; \
296 break; \
297 } \
298 if (_end % _ts) { \
299 _cshift = _end % _ts; \
300 _mask = _bt_make_mask(type, _ts - _cshift); \
301 _cmask = _bt_lshift((type) (_v), _ts - _cshift); \
302 _cmask &= ~_mask; \
303 _ptr[_this_unit] &= _mask; \
304 _ptr[_this_unit] |= _cmask; \
305 _bt_safe_rshift(_v, _cshift); \
306 _end -= _cshift; \
307 _this_unit--; \
308 } \
309 for (; (long) _this_unit >= (long) _start_unit + 1; _this_unit--) { \
310 _ptr[_this_unit] = (type) _v; \
311 _bt_safe_rshift(_v, _ts); \
312 _end -= _ts; \
313 } \
314 if (_start % _ts) { \
315 _mask = _bt_make_mask_complement(type, _ts - (_start % _ts)); \
316 _cmask = (type) _v; \
317 _cmask &= ~_mask; \
318 _ptr[_this_unit] &= _mask; \
319 _ptr[_this_unit] |= _cmask; \
320 } else \
321 _ptr[_this_unit] = (type) _v; \
322 } while (0)
323
324 /*
325 * bt_bitfield_write - write integer to a bitfield in native endianness
326 * bt_bitfield_write_le - write integer to a bitfield in little endian
327 * bt_bitfield_write_be - write integer to a bitfield in big endian
328 */
329
330 #if (__BYTE_ORDER == __LITTLE_ENDIAN)
331
332 #define bt_bitfield_write(ptr, type, start, length, v) \
333 _bt_bitfield_write_le(ptr, type, start, length, v)
334
335 #define bt_bitfield_write_le(ptr, type, start, length, v) \
336 _bt_bitfield_write_le(ptr, type, start, length, v)
337
338 #define bt_bitfield_write_be(ptr, type, start, length, v) \
339 _bt_bitfield_write_be(ptr, unsigned char, start, length, v)
340
341 #elif (__BYTE_ORDER == __BIG_ENDIAN)
342
343 #define bt_bitfield_write(ptr, type, start, length, v) \
344 _bt_bitfield_write_be(ptr, type, start, length, v)
345
346 #define bt_bitfield_write_le(ptr, type, start, length, v) \
347 _bt_bitfield_write_le(ptr, unsigned char, start, length, v)
348
349 #define bt_bitfield_write_be(ptr, type, start, length, v) \
350 _bt_bitfield_write_be(ptr, type, start, length, v)
351
352 #else /* (__BYTE_ORDER == __PDP_ENDIAN) */
353
354 #error "Byte order not supported"
355
356 #endif
357
358 #define _bt_bitfield_read_le(ptr, type, start, length, vptr) \
359 do { \
360 __typeof__(*(vptr)) *_vptr = (vptr); \
361 __typeof__(*_vptr) _v; \
362 type *_ptr = (void *) (ptr); \
363 unsigned long _start = (start), _length = (length); \
364 type _mask, _cmask; \
365 unsigned long _ts = sizeof(type) * CHAR_BIT; /* type size */ \
366 unsigned long _start_unit, _end_unit, _this_unit; \
367 unsigned long _end, _cshift; /* _cshift is "complement shift" */ \
368 bool _is_signed_type; \
369 \
370 if (!_length) { \
371 *_vptr = 0; \
372 break; \
373 } \
374 \
375 _end = _start + _length; \
376 _start_unit = _start / _ts; \
377 _end_unit = (_end + (_ts - 1)) / _ts; \
378 \
379 _this_unit = _end_unit - 1; \
380 _BT_DIAG_PUSH \
381 _BT_DIAG_IGNORE_TYPE_LIMITS \
382 _is_signed_type = _bt_is_signed_type(__typeof__(_v)); \
383 _BT_DIAG_POP \
384 if (_is_signed_type \
385 && (_ptr[_this_unit] & _bt_lshift((type) 1, (_end % _ts ? _end % _ts : _ts) - 1))) \
386 _v = ~(__typeof__(_v)) 0; \
387 else \
388 _v = 0; \
389 if (_start_unit == _end_unit - 1) { \
390 _cmask = _ptr[_this_unit]; \
391 _cmask = _bt_rshift(_cmask, _start % _ts); \
392 if ((_end - _start) % _ts) { \
393 _mask = _bt_make_mask(type, _end - _start); \
394 _cmask &= _mask; \
395 } \
396 _bt_safe_lshift(_v, _end - _start); \
397 _v |= _bt_cast_value_to_unsigned_type(__typeof__(_v), _cmask); \
398 *_vptr = _v; \
399 break; \
400 } \
401 if (_end % _ts) { \
402 _cshift = _end % _ts; \
403 _mask = _bt_make_mask(type, _cshift); \
404 _cmask = _ptr[_this_unit]; \
405 _cmask &= _mask; \
406 _bt_safe_lshift(_v, _cshift); \
407 _v |= _bt_cast_value_to_unsigned_type(__typeof__(_v), _cmask); \
408 _end -= _cshift; \
409 _this_unit--; \
410 } \
411 for (; (long) _this_unit >= (long) _start_unit + 1; _this_unit--) { \
412 _bt_safe_lshift(_v, _ts); \
413 _v |= _bt_cast_value_to_unsigned_type(__typeof__(_v), _ptr[_this_unit]); \
414 _end -= _ts; \
415 } \
416 if (_start % _ts) { \
417 _mask = _bt_make_mask(type, _ts - (_start % _ts)); \
418 _cmask = _ptr[_this_unit]; \
419 _cmask = _bt_rshift(_cmask, _start % _ts); \
420 _cmask &= _mask; \
421 _bt_safe_lshift(_v, _ts - (_start % _ts)); \
422 _v |= _bt_cast_value_to_unsigned_type(__typeof__(_v), _cmask); \
423 } else { \
424 _bt_safe_lshift(_v, _ts); \
425 _v |= _bt_cast_value_to_unsigned_type(__typeof__(_v), _ptr[_this_unit]); \
426 } \
427 *_vptr = _v; \
428 } while (0)
429
430 #define _bt_bitfield_read_be(ptr, type, start, length, vptr) \
431 do { \
432 __typeof__(*(vptr)) *_vptr = (vptr); \
433 __typeof__(*_vptr) _v; \
434 type *_ptr = (void *) (ptr); \
435 unsigned long _start = (start), _length = (length); \
436 type _mask, _cmask; \
437 unsigned long _ts = sizeof(type) * CHAR_BIT; /* type size */ \
438 unsigned long _start_unit, _end_unit, _this_unit; \
439 unsigned long _end, _cshift; /* _cshift is "complement shift" */ \
440 bool _is_signed_type; \
441 \
442 if (!_length) { \
443 *_vptr = 0; \
444 break; \
445 } \
446 \
447 _end = _start + _length; \
448 _start_unit = _start / _ts; \
449 _end_unit = (_end + (_ts - 1)) / _ts; \
450 \
451 _this_unit = _start_unit; \
452 _BT_DIAG_PUSH \
453 _BT_DIAG_IGNORE_TYPE_LIMITS \
454 _is_signed_type = _bt_is_signed_type(__typeof__(_v)); \
455 _BT_DIAG_POP \
456 if (_is_signed_type \
457 && (_ptr[_this_unit] & _bt_lshift((type) 1, _ts - (_start % _ts) - 1))) \
458 _v = ~(__typeof__(_v)) 0; \
459 else \
460 _v = 0; \
461 if (_start_unit == _end_unit - 1) { \
462 _cmask = _ptr[_this_unit]; \
463 _cmask = _bt_rshift(_cmask, (_ts - (_end % _ts)) % _ts); \
464 if ((_end - _start) % _ts) { \
465 _mask = _bt_make_mask(type, _end - _start); \
466 _cmask &= _mask; \
467 } \
468 _bt_safe_lshift(_v, _end - _start); \
469 _v |= _bt_cast_value_to_unsigned_type(__typeof__(_v), _cmask); \
470 *_vptr = _v; \
471 break; \
472 } \
473 if (_start % _ts) { \
474 _cshift = _start % _ts; \
475 _mask = _bt_make_mask(type, _ts - _cshift); \
476 _cmask = _ptr[_this_unit]; \
477 _cmask &= _mask; \
478 _bt_safe_lshift(_v, _ts - _cshift); \
479 _v |= _bt_cast_value_to_unsigned_type(__typeof__(_v), _cmask); \
480 _start += _ts - _cshift; \
481 _this_unit++; \
482 } \
483 for (; _this_unit < _end_unit - 1; _this_unit++) { \
484 _bt_safe_lshift(_v, _ts); \
485 _v |= _bt_cast_value_to_unsigned_type(__typeof__(_v), _ptr[_this_unit]); \
486 _start += _ts; \
487 } \
488 if (_end % _ts) { \
489 _mask = _bt_make_mask(type, _end % _ts); \
490 _cmask = _ptr[_this_unit]; \
491 _cmask = _bt_rshift(_cmask, _ts - (_end % _ts)); \
492 _cmask &= _mask; \
493 _bt_safe_lshift(_v, _end % _ts); \
494 _v |= _bt_cast_value_to_unsigned_type(__typeof__(_v), _cmask); \
495 } else { \
496 _bt_safe_lshift(_v, _ts); \
497 _v |= _bt_cast_value_to_unsigned_type(__typeof__(_v), _ptr[_this_unit]); \
498 } \
499 *_vptr = _v; \
500 } while (0)
501
502 /*
503 * bt_bitfield_read - read integer from a bitfield in native endianness
504 * bt_bitfield_read_le - read integer from a bitfield in little endian
505 * bt_bitfield_read_be - read integer from a bitfield in big endian
506 */
507
508 #if (__BYTE_ORDER == __LITTLE_ENDIAN)
509
510 #define bt_bitfield_read(ptr, type, start, length, vptr) \
511 _bt_bitfield_read_le(ptr, type, start, length, vptr)
512
513 #define bt_bitfield_read_le(ptr, type, start, length, vptr) \
514 _bt_bitfield_read_le(ptr, type, start, length, vptr)
515
516 #define bt_bitfield_read_be(ptr, type, start, length, vptr) \
517 _bt_bitfield_read_be(ptr, unsigned char, start, length, vptr)
518
519 #elif (__BYTE_ORDER == __BIG_ENDIAN)
520
521 #define bt_bitfield_read(ptr, type, start, length, vptr) \
522 _bt_bitfield_read_be(ptr, type, start, length, vptr)
523
524 #define bt_bitfield_read_le(ptr, type, start, length, vptr) \
525 _bt_bitfield_read_le(ptr, unsigned char, start, length, vptr)
526
527 #define bt_bitfield_read_be(ptr, type, start, length, vptr) \
528 _bt_bitfield_read_be(ptr, type, start, length, vptr)
529
530 #else /* (__BYTE_ORDER == __PDP_ENDIAN) */
531
532 #error "Byte order not supported"
533
534 #endif
535
536 #endif /* _BABELTRACE_BITFIELD_H */
This page took 0.041245 seconds and 4 git commands to generate.