* Boehm-Demers-Weiser conservative garbage collector.
*/
+#include <compiler.h>
+
#ifndef BITS_PER_LONG
#define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
#endif
};
#define __hp(x) ((struct __atomic_dummy *)(x))
+#define atomic_set(addr, v) \
+do { \
+ ACCESS_ONCE(*(addr)) = (v); \
+} while (0)
+
+#define atomic_read(addr) ACCESS_ONCE(*(addr))
+
/* cmpxchg */
static inline __attribute__((always_inline))
-unsigned long _atomic_cmpxchg(volatile void *addr, unsigned long old,
- unsigned long _new, int len)
+unsigned long _atomic_cmpxchg(void *addr, unsigned long old,
+ unsigned long _new, int len)
{
switch (len) {
case 1:
{
unsigned char result = old;
+
__asm__ __volatile__(
"lock; cmpxchgb %2, %1"
: "+a"(result), "+m"(*__hp(addr))
case 2:
{
unsigned short result = old;
+
__asm__ __volatile__(
"lock; cmpxchgw %2, %1"
: "+a"(result), "+m"(*__hp(addr))
case 4:
{
unsigned int result = old;
+
__asm__ __volatile__(
"lock; cmpxchgl %2, %1"
: "+a"(result), "+m"(*__hp(addr))
#if (BITS_PER_LONG == 64)
case 8:
{
- unsigned int result = old;
+ unsigned long result = old;
+
__asm__ __volatile__(
- "lock; cmpxchgl %2, %1"
+ "lock; cmpxchgq %2, %1"
: "+a"(result), "+m"(*__hp(addr))
: "r"((unsigned long)_new)
: "memory");
/* xchg */
static inline __attribute__((always_inline))
-unsigned long _atomic_exchange(volatile void *addr, unsigned long val, int len)
+unsigned long _atomic_exchange(void *addr, unsigned long val, int len)
{
/* Note: the "xchg" instruction does not need a "lock" prefix. */
switch (len) {
((__typeof__(*(addr))) _atomic_exchange((addr), (unsigned long)(v), \
sizeof(*(addr))))
-/* atomic_add */
+/* atomic_add_return, atomic_sub_return */
static inline __attribute__((always_inline))
-void _atomic_add(volatile void *addr, unsigned long val, int len)
+unsigned long _atomic_add_return(void *addr, unsigned long val,
+ int len)
+{
+ switch (len) {
+ case 1:
+ {
+ unsigned char result = val;
+
+ __asm__ __volatile__(
+ "lock; xaddb %1, %0"
+ : "+m"(*__hp(addr)), "+q" (result)
+ :
+ : "memory");
+ return result + (unsigned char)val;
+ }
+ case 2:
+ {
+ unsigned short result = val;
+
+ __asm__ __volatile__(
+ "lock; xaddw %1, %0"
+ : "+m"(*__hp(addr)), "+r" (result)
+ :
+ : "memory");
+ return result + (unsigned short)val;
+ }
+ case 4:
+ {
+ unsigned int result = val;
+
+ __asm__ __volatile__(
+ "lock; xaddl %1, %0"
+ : "+m"(*__hp(addr)), "+r" (result)
+ :
+ : "memory");
+ return result + (unsigned int)val;
+ }
+#if (BITS_PER_LONG == 64)
+ case 8:
+ {
+ unsigned long result = val;
+
+ __asm__ __volatile__(
+ "lock; xaddq %1, %0"
+ : "+m"(*__hp(addr)), "+r" (result)
+ :
+ : "memory");
+ return result + (unsigned long)val;
+ }
+#endif
+ }
+ /* generate an illegal instruction. Cannot catch this with linker tricks
+ * when optimizations are disabled. */
+ __asm__ __volatile__("ud2");
+ return 0;
+}
+
+#define atomic_add_return(addr, v) \
+ ((__typeof__(*(addr))) _atomic_add_return((addr), \
+ (unsigned long)(v), \
+ sizeof(*(addr))))
+
+#define atomic_sub_return(addr, v) atomic_add_return((addr), -(v))
+
+/* atomic_add, atomic_sub */
+
+static inline __attribute__((always_inline))
+void _atomic_add(void *addr, unsigned long val, int len)
{
switch (len) {
case 1:
#define atomic_add(addr, v) \
(_atomic_add((addr), (unsigned long)(v), sizeof(*(addr))))
+#define atomic_sub(addr, v) atomic_add((addr), -(v))
+
+
+/* atomic_inc */
+
+static inline __attribute__((always_inline))
+void _atomic_inc(void *addr, int len)
+{
+ switch (len) {
+ case 1:
+ {
+ __asm__ __volatile__(
+ "lock; incb %0"
+ : "=m"(*__hp(addr))
+ :
+ : "memory");
+ return;
+ }
+ case 2:
+ {
+ __asm__ __volatile__(
+ "lock; incw %0"
+ : "=m"(*__hp(addr))
+ :
+ : "memory");
+ return;
+ }
+ case 4:
+ {
+ __asm__ __volatile__(
+ "lock; incl %0"
+ : "=m"(*__hp(addr))
+ :
+ : "memory");
+ return;
+ }
+#if (BITS_PER_LONG == 64)
+ case 8:
+ {
+ __asm__ __volatile__(
+ "lock; incq %0"
+ : "=m"(*__hp(addr))
+ :
+ : "memory");
+ return;
+ }
+#endif
+ }
+ /* generate an illegal instruction. Cannot catch this with linker tricks
+ * when optimizations are disabled. */
+ __asm__ __volatile__("ud2");
+ return;
+}
+
+#define atomic_inc(addr) (_atomic_inc((addr), sizeof(*(addr))))
+
+/* atomic_dec */
+
+static inline __attribute__((always_inline))
+void _atomic_dec(void *addr, int len)
+{
+ switch (len) {
+ case 1:
+ {
+ __asm__ __volatile__(
+ "lock; decb %0"
+ : "=m"(*__hp(addr))
+ :
+ : "memory");
+ return;
+ }
+ case 2:
+ {
+ __asm__ __volatile__(
+ "lock; decw %0"
+ : "=m"(*__hp(addr))
+ :
+ : "memory");
+ return;
+ }
+ case 4:
+ {
+ __asm__ __volatile__(
+ "lock; decl %0"
+ : "=m"(*__hp(addr))
+ :
+ : "memory");
+ return;
+ }
+#if (BITS_PER_LONG == 64)
+ case 8:
+ {
+ __asm__ __volatile__(
+ "lock; decq %0"
+ : "=m"(*__hp(addr))
+ :
+ : "memory");
+ return;
+ }
+#endif
+ }
+ /* generate an illegal instruction. Cannot catch this with linker tricks
+ * when optimizations are disabled. */
+ __asm__ __volatile__("ud2");
+ return;
+}
+
+#define atomic_dec(addr) (_atomic_dec((addr), sizeof(*(addr))))
+
#endif /* #ifndef _INCLUDE_API_H */
#endif /* ARCH_ATOMIC_X86_H */