Cleanup: remove unused value warning
[urcu.git] / urcu / uatomic_arch_ppc.h
index b732b08c42389a465db3b72d8b18386357dd120f..bb74934fb19a279dfe43d7f1785ea89865756193 100644 (file)
 extern "C" {
 #endif 
 
-#ifndef __SIZEOF_LONG__
-#ifdef __powerpc64__
-#define __SIZEOF_LONG__ 8
+#ifdef __NO_LWSYNC__
+#define LWSYNC_OPCODE  "sync\n"
 #else
-#define __SIZEOF_LONG__ 4
-#endif
-#endif
-
-#ifndef BITS_PER_LONG
-#define BITS_PER_LONG  (__SIZEOF_LONG__ * 8)
+#define LWSYNC_OPCODE  "lwsync\n"
 #endif
 
 #define ILLEGAL_INSTR  ".long  0xd00d00"
 
-#define uatomic_set(addr, v)   STORE_SHARED(*(addr), (v))
-#define uatomic_read(addr)     LOAD_SHARED(*(addr))
-
 /*
  * Using a isync as second barrier for exchange to provide acquire semantic.
  * According to uatomic_ops/sysdeps/gcc/powerpc.h, the documentation is "fairly
@@ -62,7 +53,7 @@ unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
                unsigned int result;
 
                __asm__ __volatile__(
-                       "lwsync\n"
+                       LWSYNC_OPCODE
                "1:\t"  "lwarx %0,0,%1\n"       /* load and reserve */
                        "stwcx. %2,0,%1\n"      /* else store conditional */
                        "bne- 1b\n"             /* retry if lost reservation */
@@ -73,13 +64,13 @@ unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
 
                return result;
        }
-#if (BITS_PER_LONG == 64)
+#if (CAA_BITS_PER_LONG == 64)
        case 8:
        {
                unsigned long result;
 
                __asm__ __volatile__(
-                       "lwsync\n"
+                       LWSYNC_OPCODE
                "1:\t"  "ldarx %0,0,%1\n"       /* load and reserve */
                        "stdcx. %2,0,%1\n"      /* else store conditional */
                        "bne- 1b\n"             /* retry if lost reservation */
@@ -113,9 +104,9 @@ unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
                unsigned int old_val;
 
                __asm__ __volatile__(
-                       "lwsync\n"
+                       LWSYNC_OPCODE
                "1:\t"  "lwarx %0,0,%1\n"       /* load and reserve */
-                       "cmpd %0,%3\n"          /* if load is not equal to */
+                       "cmpw %0,%3\n"          /* if load is not equal to */
                        "bne 2f\n"              /* old, fail */
                        "stwcx. %2,0,%1\n"      /* else store conditional */
                        "bne- 1b\n"             /* retry if lost reservation */
@@ -128,13 +119,13 @@ unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
 
                return old_val;
        }
-#if (BITS_PER_LONG == 64)
+#if (CAA_BITS_PER_LONG == 64)
        case 8:
        {
                unsigned long old_val;
 
                __asm__ __volatile__(
-                       "lwsync\n"
+                       LWSYNC_OPCODE
                "1:\t"  "ldarx %0,0,%1\n"       /* load and reserve */
                        "cmpd %0,%3\n"          /* if load is not equal to */
                        "bne 2f\n"              /* old, fail */
@@ -142,7 +133,7 @@ unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
                        "bne- 1b\n"             /* retry if lost reservation */
                        "isync\n"
                "2:\n"
-                               : "=&r"(old_val),
+                               : "=&r"(old_val)
                                : "r"(addr), "r"((unsigned long)_new),
                                  "r"((unsigned long)old)
                                : "memory", "cc");
@@ -175,7 +166,7 @@ unsigned long _uatomic_add_return(void *addr, unsigned long val,
                unsigned int result;
 
                __asm__ __volatile__(
-                       "lwsync\n"
+                       LWSYNC_OPCODE
                "1:\t"  "lwarx %0,0,%1\n"       /* load and reserve */
                        "add %0,%2,%0\n"        /* add val to value loaded */
                        "stwcx. %0,0,%1\n"      /* store conditional */
@@ -187,13 +178,13 @@ unsigned long _uatomic_add_return(void *addr, unsigned long val,
 
                return result;
        }
-#if (BITS_PER_LONG == 64)
+#if (CAA_BITS_PER_LONG == 64)
        case 8:
        {
                unsigned long result;
 
                __asm__ __volatile__(
-                       "lwsync\n"
+                       LWSYNC_OPCODE
                "1:\t"  "ldarx %0,0,%1\n"       /* load and reserve */
                        "add %0,%2,%0\n"        /* add val to value loaded */
                        "stdcx. %0,0,%1\n"      /* store conditional */
@@ -219,20 +210,10 @@ unsigned long _uatomic_add_return(void *addr, unsigned long val,
                                                  (unsigned long)(v),   \
                                                  sizeof(*(addr))))
 
-/* uatomic_sub_return, uatomic_add, uatomic_sub, uatomic_inc, uatomic_dec */
-
-#define uatomic_sub_return(addr, v)    uatomic_add_return((addr), -(v))
-
-#define uatomic_add(addr, v)           (void)uatomic_add_return((addr), (v))
-#define uatomic_sub(addr, v)           (void)uatomic_sub_return((addr), (v))
-
-#define uatomic_inc(addr)              uatomic_add((addr), 1)
-#define uatomic_dec(addr)              uatomic_add((addr), -1)
-
-#define compat_uatomic_cmpxchg(ptr, old, _new) uatomic_cmpxchg(ptr, old, _new)
-
 #ifdef __cplusplus 
 }
 #endif
 
+#include <urcu/uatomic_generic.h>
+
 #endif /* _URCU_ARCH_UATOMIC_PPC_H */
This page took 0.024347 seconds and 4 git commands to generate.