X-Git-Url: http://git.lttng.org/?a=blobdiff_plain;f=urcu%2Farch%2Fppc.h;h=8a96dd9deb50626efe6a9a8b75e6cbab7c0dc628;hb=999991c6e4600c410181baea65bda9f406464872;hp=048b217392cc7cd471647625ca1dd128e6627e00;hpb=8c35d699cf442f91fbba3c99beaa41a083ef7bff;p=urcu.git diff --git a/urcu/arch/ppc.h b/urcu/arch/ppc.h index 048b217..8a96dd9 100644 --- a/urcu/arch/ppc.h +++ b/urcu/arch/ppc.h @@ -24,6 +24,7 @@ #include #include +#include #ifdef __cplusplus extern "C" { @@ -32,6 +33,12 @@ extern "C" { /* Include size of POWER5+ L3 cache lines: 256 bytes */ #define CAA_CACHE_LINE_SIZE 256 +#ifdef __NO_LWSYNC__ +#define LWSYNC_OPCODE "sync\n" +#else +#define LWSYNC_OPCODE "lwsync\n" +#endif + /* * Use sync for all cmm_mb/rmb/wmb barriers because lwsync does not * preserve ordering of cacheable vs. non-cacheable accesses, so it @@ -40,7 +47,7 @@ extern "C" { * order cacheable and non-cacheable memory operations separately---i.e. * not the latter against the former. */ -#define cmm_mb() asm volatile("sync":::"memory") +#define cmm_mb() __asm__ __volatile__ ("sync":::"memory") /* * lwsync orders loads in cacheable memory with respect to other loads, @@ -48,27 +55,30 @@ extern "C" { * Therefore, use it for barriers ordering accesses to cacheable memory * only. */ -#define cmm_smp_rmb() asm volatile("lwsync":::"memory") -#define cmm_smp_wmb() asm volatile("lwsync":::"memory") +#define cmm_smp_rmb() __asm__ __volatile__ (LWSYNC_OPCODE:::"memory") +#define cmm_smp_wmb() __asm__ __volatile__ (LWSYNC_OPCODE:::"memory") #define mftbl() \ + __extension__ \ ({ \ unsigned long rval; \ - asm volatile("mftbl %0" : "=r" (rval)); \ + __asm__ __volatile__ ("mftbl %0" : "=r" (rval)); \ rval; \ }) #define mftbu() \ + __extension__ \ ({ \ unsigned long rval; \ - asm volatile("mftbu %0" : "=r" (rval)); \ + __asm__ __volatile__ ("mftbu %0" : "=r" (rval)); \ rval; \ }) #define mftb() \ + __extension__ \ ({ \ unsigned long long rval; \ - asm volatile("mftb %0" : "=r" (rval)); \ + __asm__ __volatile__ ("mftb %0" : "=r" (rval)); \ rval; \ })