cleanup: move rand_r compat code to tests
[urcu.git] / include / urcu / uatomic / generic.h
1 // SPDX-FileCopyrightText: 1991-1994 by Xerox Corporation. All rights reserved.
2 // SPDX-FileCopyrightText: 1996-1999 by Silicon Graphics. All rights reserved.
3 // SPDX-FileCopyrightText: 1999-2004 Hewlett-Packard Development Company, L.P.
4 // SPDX-FileCopyrightText: 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 // SPDX-FileCopyrightText: 2010 Paolo Bonzini
6 //
7 // SPDX-License-Identifier: LicenseRef-Boehm-GC
8
9 #ifndef _URCU_UATOMIC_GENERIC_H
10 #define _URCU_UATOMIC_GENERIC_H
11
12 /*
13 * Code inspired from libuatomic_ops-1.2, inherited in part from the
14 * Boehm-Demers-Weiser conservative garbage collector.
15 */
16
17 #include <stdint.h>
18 #include <stdlib.h>
19 #include <urcu/compiler.h>
20 #include <urcu/system.h>
21
22 #ifdef __cplusplus
23 extern "C" {
24 #endif
25
26 #ifndef uatomic_set
27 #define uatomic_set(addr, v) ((void) CMM_STORE_SHARED(*(addr), (v)))
28 #endif
29
30 #define uatomic_load_store_return_op(op, addr, v, mo) \
31 __extension__ \
32 ({ \
33 \
34 switch (mo) { \
35 case CMM_ACQUIRE: \
36 case CMM_CONSUME: \
37 case CMM_RELAXED: \
38 break; \
39 case CMM_RELEASE: \
40 case CMM_ACQ_REL: \
41 case CMM_SEQ_CST: \
42 case CMM_SEQ_CST_FENCE: \
43 cmm_smp_mb(); \
44 break; \
45 default: \
46 abort(); \
47 } \
48 \
49 __typeof__((*addr)) _value = op(addr, v); \
50 \
51 switch (mo) { \
52 case CMM_CONSUME: \
53 cmm_smp_read_barrier_depends(); \
54 break; \
55 case CMM_ACQUIRE: \
56 case CMM_ACQ_REL: \
57 case CMM_SEQ_CST: \
58 case CMM_SEQ_CST_FENCE: \
59 cmm_smp_mb(); \
60 break; \
61 case CMM_RELAXED: \
62 case CMM_RELEASE: \
63 break; \
64 default: \
65 abort(); \
66 } \
67 _value; \
68 })
69
70 #define uatomic_load_store_op(op, addr, v, mo) \
71 do { \
72 switch (mo) { \
73 case CMM_ACQUIRE: \
74 case CMM_CONSUME: \
75 case CMM_RELAXED: \
76 break; \
77 case CMM_RELEASE: \
78 case CMM_ACQ_REL: \
79 case CMM_SEQ_CST: \
80 case CMM_SEQ_CST_FENCE: \
81 cmm_smp_mb(); \
82 break; \
83 default: \
84 abort(); \
85 } \
86 \
87 op(addr, v); \
88 \
89 switch (mo) { \
90 case CMM_CONSUME: \
91 cmm_smp_read_barrier_depends(); \
92 break; \
93 case CMM_ACQUIRE: \
94 case CMM_ACQ_REL: \
95 case CMM_SEQ_CST: \
96 case CMM_SEQ_CST_FENCE: \
97 cmm_smp_mb(); \
98 break; \
99 case CMM_RELAXED: \
100 case CMM_RELEASE: \
101 break; \
102 default: \
103 abort(); \
104 } \
105 } while (0)
106
107 #define uatomic_store(addr, v, mo) \
108 do { \
109 switch (mo) { \
110 case CMM_RELAXED: \
111 break; \
112 case CMM_RELEASE: \
113 case CMM_SEQ_CST: \
114 case CMM_SEQ_CST_FENCE: \
115 cmm_smp_mb(); \
116 break; \
117 default: \
118 abort(); \
119 } \
120 \
121 uatomic_set(addr, v); \
122 \
123 switch (mo) { \
124 case CMM_RELAXED: \
125 case CMM_RELEASE: \
126 break; \
127 case CMM_SEQ_CST: \
128 case CMM_SEQ_CST_FENCE: \
129 cmm_smp_mb(); \
130 break; \
131 default: \
132 abort(); \
133 } \
134 } while (0)
135
136 #define uatomic_and_mo(addr, v, mo) \
137 uatomic_load_store_op(uatomic_and, addr, v, mo)
138
139 #define uatomic_or_mo(addr, v, mo) \
140 uatomic_load_store_op(uatomic_or, addr, v, mo)
141
142 #define uatomic_add_mo(addr, v, mo) \
143 uatomic_load_store_op(uatomic_add, addr, v, mo)
144
145 #define uatomic_sub_mo(addr, v, mo) \
146 uatomic_load_store_op(uatomic_sub, addr, v, mo)
147
148 #define uatomic_inc_mo(addr, mo) \
149 uatomic_load_store_op(uatomic_add, addr, 1, mo)
150
151 #define uatomic_dec_mo(addr, mo) \
152 uatomic_load_store_op(uatomic_add, addr, -1, mo)
153 /*
154 * NOTE: We can not just do switch (_value == (old) ? mos : mof) otherwise the
155 * compiler emit a -Wduplicated-cond warning.
156 */
157 #define uatomic_cmpxchg_mo(addr, old, new, mos, mof) \
158 __extension__ \
159 ({ \
160 switch (mos) { \
161 case CMM_ACQUIRE: \
162 case CMM_CONSUME: \
163 case CMM_RELAXED: \
164 break; \
165 case CMM_RELEASE: \
166 case CMM_ACQ_REL: \
167 case CMM_SEQ_CST: \
168 case CMM_SEQ_CST_FENCE: \
169 cmm_smp_mb(); \
170 break; \
171 default: \
172 abort(); \
173 } \
174 \
175 __typeof__(*(addr)) _value = uatomic_cmpxchg(addr, old, \
176 new); \
177 \
178 if (_value == (old)) { \
179 switch (mos) { \
180 case CMM_CONSUME: \
181 cmm_smp_read_barrier_depends(); \
182 break; \
183 case CMM_ACQUIRE: \
184 case CMM_ACQ_REL: \
185 case CMM_SEQ_CST: \
186 case CMM_SEQ_CST_FENCE: \
187 cmm_smp_mb(); \
188 break; \
189 case CMM_RELAXED: \
190 case CMM_RELEASE: \
191 break; \
192 default: \
193 abort(); \
194 } \
195 } else { \
196 switch (mof) { \
197 case CMM_CONSUME: \
198 cmm_smp_read_barrier_depends(); \
199 break; \
200 case CMM_ACQUIRE: \
201 case CMM_ACQ_REL: \
202 case CMM_SEQ_CST: \
203 case CMM_SEQ_CST_FENCE: \
204 cmm_smp_mb(); \
205 break; \
206 case CMM_RELAXED: \
207 case CMM_RELEASE: \
208 break; \
209 default: \
210 abort(); \
211 } \
212 } \
213 _value; \
214 })
215
216 #define uatomic_xchg_mo(addr, v, mo) \
217 uatomic_load_store_return_op(uatomic_xchg, addr, v, mo)
218
219 #define uatomic_add_return_mo(addr, v, mo) \
220 uatomic_load_store_return_op(uatomic_add_return, addr, v)
221
222 #define uatomic_sub_return_mo(addr, v, mo) \
223 uatomic_load_store_return_op(uatomic_sub_return, addr, v)
224
225
226 #ifndef uatomic_read
227 #define uatomic_read(addr) CMM_LOAD_SHARED(*(addr))
228 #endif
229
230 #define uatomic_load(addr, mo) \
231 __extension__ \
232 ({ \
233 switch (mo) { \
234 case CMM_ACQUIRE: \
235 case CMM_CONSUME: \
236 case CMM_RELAXED: \
237 break; \
238 case CMM_SEQ_CST: \
239 case CMM_SEQ_CST_FENCE: \
240 cmm_smp_mb(); \
241 break; \
242 default: \
243 abort(); \
244 } \
245 \
246 __typeof__(*(addr)) _rcu_value = uatomic_read(addr); \
247 \
248 switch (mo) { \
249 case CMM_RELAXED: \
250 break; \
251 case CMM_CONSUME: \
252 cmm_smp_read_barrier_depends(); \
253 break; \
254 case CMM_ACQUIRE: \
255 case CMM_SEQ_CST: \
256 case CMM_SEQ_CST_FENCE: \
257 cmm_smp_mb(); \
258 break; \
259 default: \
260 abort(); \
261 } \
262 \
263 _rcu_value; \
264 })
265
266 #if !defined __OPTIMIZE__ || defined UATOMIC_NO_LINK_ERROR
267 #ifdef ILLEGAL_INSTR
268 static inline __attribute__((always_inline))
269 void _uatomic_link_error(void)
270 {
271 /*
272 * generate an illegal instruction. Cannot catch this with
273 * linker tricks when optimizations are disabled.
274 */
275 __asm__ __volatile__(ILLEGAL_INSTR);
276 }
277 #else
278 static inline __attribute__((always_inline, __noreturn__))
279 void _uatomic_link_error(void)
280 {
281 __builtin_trap();
282 }
283 #endif
284
285 #else /* #if !defined __OPTIMIZE__ || defined UATOMIC_NO_LINK_ERROR */
286 extern void _uatomic_link_error(void);
287 #endif /* #else #if !defined __OPTIMIZE__ || defined UATOMIC_NO_LINK_ERROR */
288
289 /* cmpxchg */
290
291 #ifndef uatomic_cmpxchg
292 static inline __attribute__((always_inline))
293 unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
294 unsigned long _new, int len)
295 {
296 switch (len) {
297 #ifdef UATOMIC_HAS_ATOMIC_BYTE
298 case 1:
299 return __sync_val_compare_and_swap_1((uint8_t *) addr, old,
300 _new);
301 #endif
302 #ifdef UATOMIC_HAS_ATOMIC_SHORT
303 case 2:
304 return __sync_val_compare_and_swap_2((uint16_t *) addr, old,
305 _new);
306 #endif
307 case 4:
308 return __sync_val_compare_and_swap_4((uint32_t *) addr, old,
309 _new);
310 #if (CAA_BITS_PER_LONG == 64)
311 case 8:
312 return __sync_val_compare_and_swap_8((uint64_t *) addr, old,
313 _new);
314 #endif
315 }
316 _uatomic_link_error();
317 return 0;
318 }
319
320
321 #define uatomic_cmpxchg(addr, old, _new) \
322 ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), \
323 caa_cast_long_keep_sign(old), \
324 caa_cast_long_keep_sign(_new),\
325 sizeof(*(addr))))
326
327
328 /* uatomic_and */
329
330 #ifndef uatomic_and
331 static inline __attribute__((always_inline))
332 void _uatomic_and(void *addr, unsigned long val,
333 int len)
334 {
335 switch (len) {
336 #ifdef UATOMIC_HAS_ATOMIC_BYTE
337 case 1:
338 __sync_and_and_fetch_1((uint8_t *) addr, val);
339 return;
340 #endif
341 #ifdef UATOMIC_HAS_ATOMIC_SHORT
342 case 2:
343 __sync_and_and_fetch_2((uint16_t *) addr, val);
344 return;
345 #endif
346 case 4:
347 __sync_and_and_fetch_4((uint32_t *) addr, val);
348 return;
349 #if (CAA_BITS_PER_LONG == 64)
350 case 8:
351 __sync_and_and_fetch_8((uint64_t *) addr, val);
352 return;
353 #endif
354 }
355 _uatomic_link_error();
356 }
357
358 #define uatomic_and(addr, v) \
359 (_uatomic_and((addr), \
360 caa_cast_long_keep_sign(v), \
361 sizeof(*(addr))))
362 #define cmm_smp_mb__before_uatomic_and() cmm_barrier()
363 #define cmm_smp_mb__after_uatomic_and() cmm_barrier()
364
365 #endif
366
367 /* uatomic_or */
368
369 #ifndef uatomic_or
370 static inline __attribute__((always_inline))
371 void _uatomic_or(void *addr, unsigned long val,
372 int len)
373 {
374 switch (len) {
375 #ifdef UATOMIC_HAS_ATOMIC_BYTE
376 case 1:
377 __sync_or_and_fetch_1((uint8_t *) addr, val);
378 return;
379 #endif
380 #ifdef UATOMIC_HAS_ATOMIC_SHORT
381 case 2:
382 __sync_or_and_fetch_2((uint16_t *) addr, val);
383 return;
384 #endif
385 case 4:
386 __sync_or_and_fetch_4((uint32_t *) addr, val);
387 return;
388 #if (CAA_BITS_PER_LONG == 64)
389 case 8:
390 __sync_or_and_fetch_8((uint64_t *) addr, val);
391 return;
392 #endif
393 }
394 _uatomic_link_error();
395 return;
396 }
397
398 #define uatomic_or(addr, v) \
399 (_uatomic_or((addr), \
400 caa_cast_long_keep_sign(v), \
401 sizeof(*(addr))))
402 #define cmm_smp_mb__before_uatomic_or() cmm_barrier()
403 #define cmm_smp_mb__after_uatomic_or() cmm_barrier()
404
405 #endif
406
407
408 /* uatomic_add_return */
409
410 #ifndef uatomic_add_return
411 static inline __attribute__((always_inline))
412 unsigned long _uatomic_add_return(void *addr, unsigned long val,
413 int len)
414 {
415 switch (len) {
416 #ifdef UATOMIC_HAS_ATOMIC_BYTE
417 case 1:
418 return __sync_add_and_fetch_1((uint8_t *) addr, val);
419 #endif
420 #ifdef UATOMIC_HAS_ATOMIC_SHORT
421 case 2:
422 return __sync_add_and_fetch_2((uint16_t *) addr, val);
423 #endif
424 case 4:
425 return __sync_add_and_fetch_4((uint32_t *) addr, val);
426 #if (CAA_BITS_PER_LONG == 64)
427 case 8:
428 return __sync_add_and_fetch_8((uint64_t *) addr, val);
429 #endif
430 }
431 _uatomic_link_error();
432 return 0;
433 }
434
435
436 #define uatomic_add_return(addr, v) \
437 ((__typeof__(*(addr))) _uatomic_add_return((addr), \
438 caa_cast_long_keep_sign(v), \
439 sizeof(*(addr))))
440 #endif /* #ifndef uatomic_add_return */
441
442 #ifndef uatomic_xchg
443 /* xchg */
444
445 static inline __attribute__((always_inline))
446 unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
447 {
448 switch (len) {
449 #ifdef UATOMIC_HAS_ATOMIC_BYTE
450 case 1:
451 {
452 uint8_t old;
453
454 do {
455 old = uatomic_read((uint8_t *) addr);
456 } while (!__sync_bool_compare_and_swap_1((uint8_t *) addr,
457 old, val));
458
459 return old;
460 }
461 #endif
462 #ifdef UATOMIC_HAS_ATOMIC_SHORT
463 case 2:
464 {
465 uint16_t old;
466
467 do {
468 old = uatomic_read((uint16_t *) addr);
469 } while (!__sync_bool_compare_and_swap_2((uint16_t *) addr,
470 old, val));
471
472 return old;
473 }
474 #endif
475 case 4:
476 {
477 uint32_t old;
478
479 do {
480 old = uatomic_read((uint32_t *) addr);
481 } while (!__sync_bool_compare_and_swap_4((uint32_t *) addr,
482 old, val));
483
484 return old;
485 }
486 #if (CAA_BITS_PER_LONG == 64)
487 case 8:
488 {
489 uint64_t old;
490
491 do {
492 old = uatomic_read((uint64_t *) addr);
493 } while (!__sync_bool_compare_and_swap_8((uint64_t *) addr,
494 old, val));
495
496 return old;
497 }
498 #endif
499 }
500 _uatomic_link_error();
501 return 0;
502 }
503
504 #define uatomic_xchg(addr, v) \
505 ((__typeof__(*(addr))) _uatomic_exchange((addr), \
506 caa_cast_long_keep_sign(v), \
507 sizeof(*(addr))))
508 #endif /* #ifndef uatomic_xchg */
509
510 #else /* #ifndef uatomic_cmpxchg */
511
512 #ifndef uatomic_and
513 /* uatomic_and */
514
515 static inline __attribute__((always_inline))
516 void _uatomic_and(void *addr, unsigned long val, int len)
517 {
518 switch (len) {
519 #ifdef UATOMIC_HAS_ATOMIC_BYTE
520 case 1:
521 {
522 uint8_t old, oldt;
523
524 oldt = uatomic_read((uint8_t *) addr);
525 do {
526 old = oldt;
527 oldt = _uatomic_cmpxchg(addr, old, old & val, 1);
528 } while (oldt != old);
529
530 return;
531 }
532 #endif
533 #ifdef UATOMIC_HAS_ATOMIC_SHORT
534 case 2:
535 {
536 uint16_t old, oldt;
537
538 oldt = uatomic_read((uint16_t *) addr);
539 do {
540 old = oldt;
541 oldt = _uatomic_cmpxchg(addr, old, old & val, 2);
542 } while (oldt != old);
543 }
544 #endif
545 case 4:
546 {
547 uint32_t old, oldt;
548
549 oldt = uatomic_read((uint32_t *) addr);
550 do {
551 old = oldt;
552 oldt = _uatomic_cmpxchg(addr, old, old & val, 4);
553 } while (oldt != old);
554
555 return;
556 }
557 #if (CAA_BITS_PER_LONG == 64)
558 case 8:
559 {
560 uint64_t old, oldt;
561
562 oldt = uatomic_read((uint64_t *) addr);
563 do {
564 old = oldt;
565 oldt = _uatomic_cmpxchg(addr, old, old & val, 8);
566 } while (oldt != old);
567
568 return;
569 }
570 #endif
571 }
572 _uatomic_link_error();
573 }
574
575 #define uatomic_and(addr, v) \
576 (_uatomic_and((addr), \
577 caa_cast_long_keep_sign(v), \
578 sizeof(*(addr))))
579 #define cmm_smp_mb__before_uatomic_and() cmm_barrier()
580 #define cmm_smp_mb__after_uatomic_and() cmm_barrier()
581
582 #endif /* #ifndef uatomic_and */
583
584 #ifndef uatomic_or
585 /* uatomic_or */
586
587 static inline __attribute__((always_inline))
588 void _uatomic_or(void *addr, unsigned long val, int len)
589 {
590 switch (len) {
591 #ifdef UATOMIC_HAS_ATOMIC_BYTE
592 case 1:
593 {
594 uint8_t old, oldt;
595
596 oldt = uatomic_read((uint8_t *) addr);
597 do {
598 old = oldt;
599 oldt = _uatomic_cmpxchg(addr, old, old | val, 1);
600 } while (oldt != old);
601
602 return;
603 }
604 #endif
605 #ifdef UATOMIC_HAS_ATOMIC_SHORT
606 case 2:
607 {
608 uint16_t old, oldt;
609
610 oldt = uatomic_read((uint16_t *) addr);
611 do {
612 old = oldt;
613 oldt = _uatomic_cmpxchg(addr, old, old | val, 2);
614 } while (oldt != old);
615
616 return;
617 }
618 #endif
619 case 4:
620 {
621 uint32_t old, oldt;
622
623 oldt = uatomic_read((uint32_t *) addr);
624 do {
625 old = oldt;
626 oldt = _uatomic_cmpxchg(addr, old, old | val, 4);
627 } while (oldt != old);
628
629 return;
630 }
631 #if (CAA_BITS_PER_LONG == 64)
632 case 8:
633 {
634 uint64_t old, oldt;
635
636 oldt = uatomic_read((uint64_t *) addr);
637 do {
638 old = oldt;
639 oldt = _uatomic_cmpxchg(addr, old, old | val, 8);
640 } while (oldt != old);
641
642 return;
643 }
644 #endif
645 }
646 _uatomic_link_error();
647 }
648
649 #define uatomic_or(addr, v) \
650 (_uatomic_or((addr), \
651 caa_cast_long_keep_sign(v), \
652 sizeof(*(addr))))
653 #define cmm_smp_mb__before_uatomic_or() cmm_barrier()
654 #define cmm_smp_mb__after_uatomic_or() cmm_barrier()
655
656 #endif /* #ifndef uatomic_or */
657
658 #ifndef uatomic_add_return
659 /* uatomic_add_return */
660
661 static inline __attribute__((always_inline))
662 unsigned long _uatomic_add_return(void *addr, unsigned long val, int len)
663 {
664 switch (len) {
665 #ifdef UATOMIC_HAS_ATOMIC_BYTE
666 case 1:
667 {
668 uint8_t old, oldt;
669
670 oldt = uatomic_read((uint8_t *) addr);
671 do {
672 old = oldt;
673 oldt = uatomic_cmpxchg((uint8_t *) addr,
674 old, old + val);
675 } while (oldt != old);
676
677 return old + val;
678 }
679 #endif
680 #ifdef UATOMIC_HAS_ATOMIC_SHORT
681 case 2:
682 {
683 uint16_t old, oldt;
684
685 oldt = uatomic_read((uint16_t *) addr);
686 do {
687 old = oldt;
688 oldt = uatomic_cmpxchg((uint16_t *) addr,
689 old, old + val);
690 } while (oldt != old);
691
692 return old + val;
693 }
694 #endif
695 case 4:
696 {
697 uint32_t old, oldt;
698
699 oldt = uatomic_read((uint32_t *) addr);
700 do {
701 old = oldt;
702 oldt = uatomic_cmpxchg((uint32_t *) addr,
703 old, old + val);
704 } while (oldt != old);
705
706 return old + val;
707 }
708 #if (CAA_BITS_PER_LONG == 64)
709 case 8:
710 {
711 uint64_t old, oldt;
712
713 oldt = uatomic_read((uint64_t *) addr);
714 do {
715 old = oldt;
716 oldt = uatomic_cmpxchg((uint64_t *) addr,
717 old, old + val);
718 } while (oldt != old);
719
720 return old + val;
721 }
722 #endif
723 }
724 _uatomic_link_error();
725 return 0;
726 }
727
728 #define uatomic_add_return(addr, v) \
729 ((__typeof__(*(addr))) _uatomic_add_return((addr), \
730 caa_cast_long_keep_sign(v), \
731 sizeof(*(addr))))
732 #endif /* #ifndef uatomic_add_return */
733
734 #ifndef uatomic_xchg
735 /* xchg */
736
737 static inline __attribute__((always_inline))
738 unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
739 {
740 switch (len) {
741 #ifdef UATOMIC_HAS_ATOMIC_BYTE
742 case 1:
743 {
744 uint8_t old, oldt;
745
746 oldt = uatomic_read((uint8_t *) addr);
747 do {
748 old = oldt;
749 oldt = uatomic_cmpxchg((uint8_t *) addr,
750 old, val);
751 } while (oldt != old);
752
753 return old;
754 }
755 #endif
756 #ifdef UATOMIC_HAS_ATOMIC_SHORT
757 case 2:
758 {
759 uint16_t old, oldt;
760
761 oldt = uatomic_read((uint16_t *) addr);
762 do {
763 old = oldt;
764 oldt = uatomic_cmpxchg((uint16_t *) addr,
765 old, val);
766 } while (oldt != old);
767
768 return old;
769 }
770 #endif
771 case 4:
772 {
773 uint32_t old, oldt;
774
775 oldt = uatomic_read((uint32_t *) addr);
776 do {
777 old = oldt;
778 oldt = uatomic_cmpxchg((uint32_t *) addr,
779 old, val);
780 } while (oldt != old);
781
782 return old;
783 }
784 #if (CAA_BITS_PER_LONG == 64)
785 case 8:
786 {
787 uint64_t old, oldt;
788
789 oldt = uatomic_read((uint64_t *) addr);
790 do {
791 old = oldt;
792 oldt = uatomic_cmpxchg((uint64_t *) addr,
793 old, val);
794 } while (oldt != old);
795
796 return old;
797 }
798 #endif
799 }
800 _uatomic_link_error();
801 return 0;
802 }
803
804 #define uatomic_xchg(addr, v) \
805 ((__typeof__(*(addr))) _uatomic_exchange((addr), \
806 caa_cast_long_keep_sign(v), \
807 sizeof(*(addr))))
808 #endif /* #ifndef uatomic_xchg */
809
810 #endif /* #else #ifndef uatomic_cmpxchg */
811
812 /* uatomic_sub_return, uatomic_add, uatomic_sub, uatomic_inc, uatomic_dec */
813
814 #ifndef uatomic_add
815 #define uatomic_add(addr, v) (void)uatomic_add_return((addr), (v))
816 #define cmm_smp_mb__before_uatomic_add() cmm_barrier()
817 #define cmm_smp_mb__after_uatomic_add() cmm_barrier()
818 #endif
819
820 #define uatomic_sub_return(addr, v) \
821 uatomic_add_return((addr), -(caa_cast_long_keep_sign(v)))
822 #define uatomic_sub(addr, v) \
823 uatomic_add((addr), -(caa_cast_long_keep_sign(v)))
824 #define cmm_smp_mb__before_uatomic_sub() cmm_smp_mb__before_uatomic_add()
825 #define cmm_smp_mb__after_uatomic_sub() cmm_smp_mb__after_uatomic_add()
826
827 #ifndef uatomic_inc
828 #define uatomic_inc(addr) uatomic_add((addr), 1)
829 #define cmm_smp_mb__before_uatomic_inc() cmm_smp_mb__before_uatomic_add()
830 #define cmm_smp_mb__after_uatomic_inc() cmm_smp_mb__after_uatomic_add()
831 #endif
832
833 #ifndef uatomic_dec
834 #define uatomic_dec(addr) uatomic_add((addr), -1)
835 #define cmm_smp_mb__before_uatomic_dec() cmm_smp_mb__before_uatomic_add()
836 #define cmm_smp_mb__after_uatomic_dec() cmm_smp_mb__after_uatomic_add()
837 #endif
838
839 #ifdef __cplusplus
840 }
841 #endif
842
843 #endif /* _URCU_UATOMIC_GENERIC_H */
This page took 0.057846 seconds and 4 git commands to generate.