projects
/
userspace-rcu.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
fix: properly detect 'cmpxchg' on x86-32
[userspace-rcu.git]
/
include
/
urcu
/
uatomic
/
x86.h
diff --git
a/include/urcu/uatomic/x86.h
b/include/urcu/uatomic/x86.h
index e9f2f78275f472618d2ad302205eda8bb37629c8..d416963c31aeca0cdd5003b67858c1209779627c 100644
(file)
--- a/
include/urcu/uatomic/x86.h
+++ b/
include/urcu/uatomic/x86.h
@@
-37,13
+37,20
@@
extern "C" {
*/
/*
*/
/*
- * The __hp() macro casts the void pointer
"x"
to a pointer to a structure
+ * The __hp() macro casts the void pointer
@x
to a pointer to a structure
* containing an array of char of the specified size. This allows passing the
* @addr arguments of the following inline functions as "m" and "+m" operands
* containing an array of char of the specified size. This allows passing the
* @addr arguments of the following inline functions as "m" and "+m" operands
- * to the assembly.
+ * to the assembly. The @size parameter should be a constant to support
+ * compilers such as clang which do not support VLA. Create typedefs because
+ * C++ does not allow types be defined in casts.
*/
*/
-#define __hp(size, x) ((struct { char v[size]; } *)(x))
+typedef struct { char v[1]; } __hp_1;
+typedef struct { char v[2]; } __hp_2;
+typedef struct { char v[4]; } __hp_4;
+typedef struct { char v[8]; } __hp_8;
+
+#define __hp(size, x) ((__hp_##size *)(x))
#define _uatomic_set(addr, v) ((void) CMM_STORE_SHARED(*(addr), (v)))
#define _uatomic_set(addr, v) ((void) CMM_STORE_SHARED(*(addr), (v)))
@@
-60,7
+67,7
@@
unsigned long __uatomic_cmpxchg(void *addr, unsigned long old,
__asm__ __volatile__(
"lock; cmpxchgb %2, %1"
__asm__ __volatile__(
"lock; cmpxchgb %2, %1"
- : "+a"(result), "+m"(*__hp(
len
, addr))
+ : "+a"(result), "+m"(*__hp(
1
, addr))
: "q"((unsigned char)_new)
: "memory");
return result;
: "q"((unsigned char)_new)
: "memory");
return result;
@@
-71,7
+78,7
@@
unsigned long __uatomic_cmpxchg(void *addr, unsigned long old,
__asm__ __volatile__(
"lock; cmpxchgw %2, %1"
__asm__ __volatile__(
"lock; cmpxchgw %2, %1"
- : "+a"(result), "+m"(*__hp(
len
, addr))
+ : "+a"(result), "+m"(*__hp(
2
, addr))
: "r"((unsigned short)_new)
: "memory");
return result;
: "r"((unsigned short)_new)
: "memory");
return result;
@@
-82,7
+89,7
@@
unsigned long __uatomic_cmpxchg(void *addr, unsigned long old,
__asm__ __volatile__(
"lock; cmpxchgl %2, %1"
__asm__ __volatile__(
"lock; cmpxchgl %2, %1"
- : "+a"(result), "+m"(*__hp(
len
, addr))
+ : "+a"(result), "+m"(*__hp(
4
, addr))
: "r"((unsigned int)_new)
: "memory");
return result;
: "r"((unsigned int)_new)
: "memory");
return result;
@@
-94,7
+101,7
@@
unsigned long __uatomic_cmpxchg(void *addr, unsigned long old,
__asm__ __volatile__(
"lock; cmpxchgq %2, %1"
__asm__ __volatile__(
"lock; cmpxchgq %2, %1"
- : "+a"(result), "+m"(*__hp(
len
, addr))
+ : "+a"(result), "+m"(*__hp(
8
, addr))
: "r"((unsigned long)_new)
: "memory");
return result;
: "r"((unsigned long)_new)
: "memory");
return result;
@@
-127,7
+134,7
@@
unsigned long __uatomic_exchange(void *addr, unsigned long val, int len)
unsigned char result;
__asm__ __volatile__(
"xchgb %0, %1"
unsigned char result;
__asm__ __volatile__(
"xchgb %0, %1"
- : "=q"(result), "+m"(*__hp(
len
, addr))
+ : "=q"(result), "+m"(*__hp(
1
, addr))
: "0" ((unsigned char)val)
: "memory");
return result;
: "0" ((unsigned char)val)
: "memory");
return result;
@@
-137,7
+144,7
@@
unsigned long __uatomic_exchange(void *addr, unsigned long val, int len)
unsigned short result;
__asm__ __volatile__(
"xchgw %0, %1"
unsigned short result;
__asm__ __volatile__(
"xchgw %0, %1"
- : "=r"(result), "+m"(*__hp(
len
, addr))
+ : "=r"(result), "+m"(*__hp(
2
, addr))
: "0" ((unsigned short)val)
: "memory");
return result;
: "0" ((unsigned short)val)
: "memory");
return result;
@@
-147,7
+154,7
@@
unsigned long __uatomic_exchange(void *addr, unsigned long val, int len)
unsigned int result;
__asm__ __volatile__(
"xchgl %0, %1"
unsigned int result;
__asm__ __volatile__(
"xchgl %0, %1"
- : "=r"(result), "+m"(*__hp(
len
, addr))
+ : "=r"(result), "+m"(*__hp(
4
, addr))
: "0" ((unsigned int)val)
: "memory");
return result;
: "0" ((unsigned int)val)
: "memory");
return result;
@@
-158,7
+165,7
@@
unsigned long __uatomic_exchange(void *addr, unsigned long val, int len)
unsigned long result;
__asm__ __volatile__(
"xchgq %0, %1"
unsigned long result;
__asm__ __volatile__(
"xchgq %0, %1"
- : "=r"(result), "+m"(*__hp(
len
, addr))
+ : "=r"(result), "+m"(*__hp(
8
, addr))
: "0" ((unsigned long)val)
: "memory");
return result;
: "0" ((unsigned long)val)
: "memory");
return result;
@@
-191,7
+198,7
@@
unsigned long __uatomic_add_return(void *addr, unsigned long val,
__asm__ __volatile__(
"lock; xaddb %1, %0"
__asm__ __volatile__(
"lock; xaddb %1, %0"
- : "+m"(*__hp(
len
, addr)), "+q" (result)
+ : "+m"(*__hp(
1
, addr)), "+q" (result)
:
: "memory");
return result + (unsigned char)val;
:
: "memory");
return result + (unsigned char)val;
@@
-202,7
+209,7
@@
unsigned long __uatomic_add_return(void *addr, unsigned long val,
__asm__ __volatile__(
"lock; xaddw %1, %0"
__asm__ __volatile__(
"lock; xaddw %1, %0"
- : "+m"(*__hp(
len
, addr)), "+r" (result)
+ : "+m"(*__hp(
2
, addr)), "+r" (result)
:
: "memory");
return result + (unsigned short)val;
:
: "memory");
return result + (unsigned short)val;
@@
-213,7
+220,7
@@
unsigned long __uatomic_add_return(void *addr, unsigned long val,
__asm__ __volatile__(
"lock; xaddl %1, %0"
__asm__ __volatile__(
"lock; xaddl %1, %0"
- : "+m"(*__hp(
len
, addr)), "+r" (result)
+ : "+m"(*__hp(
4
, addr)), "+r" (result)
:
: "memory");
return result + (unsigned int)val;
:
: "memory");
return result + (unsigned int)val;
@@
-225,7
+232,7
@@
unsigned long __uatomic_add_return(void *addr, unsigned long val,
__asm__ __volatile__(
"lock; xaddq %1, %0"
__asm__ __volatile__(
"lock; xaddq %1, %0"
- : "+m"(*__hp(
len
, addr)), "+r" (result)
+ : "+m"(*__hp(
8
, addr)), "+r" (result)
:
: "memory");
return result + (unsigned long)val;
:
: "memory");
return result + (unsigned long)val;
@@
-255,7
+262,7
@@
void __uatomic_and(void *addr, unsigned long val, int len)
{
__asm__ __volatile__(
"lock; andb %1, %0"
{
__asm__ __volatile__(
"lock; andb %1, %0"
- : "=m"(*__hp(
len
, addr))
+ : "=m"(*__hp(
1
, addr))
: "iq" ((unsigned char)val)
: "memory");
return;
: "iq" ((unsigned char)val)
: "memory");
return;
@@
-264,7
+271,7
@@
void __uatomic_and(void *addr, unsigned long val, int len)
{
__asm__ __volatile__(
"lock; andw %1, %0"
{
__asm__ __volatile__(
"lock; andw %1, %0"
- : "=m"(*__hp(
len
, addr))
+ : "=m"(*__hp(
2
, addr))
: "ir" ((unsigned short)val)
: "memory");
return;
: "ir" ((unsigned short)val)
: "memory");
return;
@@
-273,7
+280,7
@@
void __uatomic_and(void *addr, unsigned long val, int len)
{
__asm__ __volatile__(
"lock; andl %1, %0"
{
__asm__ __volatile__(
"lock; andl %1, %0"
- : "=m"(*__hp(
len
, addr))
+ : "=m"(*__hp(
4
, addr))
: "ir" ((unsigned int)val)
: "memory");
return;
: "ir" ((unsigned int)val)
: "memory");
return;
@@
-283,7
+290,7
@@
void __uatomic_and(void *addr, unsigned long val, int len)
{
__asm__ __volatile__(
"lock; andq %1, %0"
{
__asm__ __volatile__(
"lock; andq %1, %0"
- : "=m"(*__hp(
len
, addr))
+ : "=m"(*__hp(
8
, addr))
: "er" ((unsigned long)val)
: "memory");
return;
: "er" ((unsigned long)val)
: "memory");
return;
@@
-311,7
+318,7
@@
void __uatomic_or(void *addr, unsigned long val, int len)
{
__asm__ __volatile__(
"lock; orb %1, %0"
{
__asm__ __volatile__(
"lock; orb %1, %0"
- : "=m"(*__hp(
len
, addr))
+ : "=m"(*__hp(
1
, addr))
: "iq" ((unsigned char)val)
: "memory");
return;
: "iq" ((unsigned char)val)
: "memory");
return;
@@
-320,7
+327,7
@@
void __uatomic_or(void *addr, unsigned long val, int len)
{
__asm__ __volatile__(
"lock; orw %1, %0"
{
__asm__ __volatile__(
"lock; orw %1, %0"
- : "=m"(*__hp(
len
, addr))
+ : "=m"(*__hp(
2
, addr))
: "ir" ((unsigned short)val)
: "memory");
return;
: "ir" ((unsigned short)val)
: "memory");
return;
@@
-329,7
+336,7
@@
void __uatomic_or(void *addr, unsigned long val, int len)
{
__asm__ __volatile__(
"lock; orl %1, %0"
{
__asm__ __volatile__(
"lock; orl %1, %0"
- : "=m"(*__hp(
len
, addr))
+ : "=m"(*__hp(
4
, addr))
: "ir" ((unsigned int)val)
: "memory");
return;
: "ir" ((unsigned int)val)
: "memory");
return;
@@
-339,7
+346,7
@@
void __uatomic_or(void *addr, unsigned long val, int len)
{
__asm__ __volatile__(
"lock; orq %1, %0"
{
__asm__ __volatile__(
"lock; orq %1, %0"
- : "=m"(*__hp(
len
, addr))
+ : "=m"(*__hp(
8
, addr))
: "er" ((unsigned long)val)
: "memory");
return;
: "er" ((unsigned long)val)
: "memory");
return;
@@
-367,7
+374,7
@@
void __uatomic_add(void *addr, unsigned long val, int len)
{
__asm__ __volatile__(
"lock; addb %1, %0"
{
__asm__ __volatile__(
"lock; addb %1, %0"
- : "=m"(*__hp(
len
, addr))
+ : "=m"(*__hp(
1
, addr))
: "iq" ((unsigned char)val)
: "memory");
return;
: "iq" ((unsigned char)val)
: "memory");
return;
@@
-376,7
+383,7
@@
void __uatomic_add(void *addr, unsigned long val, int len)
{
__asm__ __volatile__(
"lock; addw %1, %0"
{
__asm__ __volatile__(
"lock; addw %1, %0"
- : "=m"(*__hp(
len
, addr))
+ : "=m"(*__hp(
2
, addr))
: "ir" ((unsigned short)val)
: "memory");
return;
: "ir" ((unsigned short)val)
: "memory");
return;
@@
-385,7
+392,7
@@
void __uatomic_add(void *addr, unsigned long val, int len)
{
__asm__ __volatile__(
"lock; addl %1, %0"
{
__asm__ __volatile__(
"lock; addl %1, %0"
- : "=m"(*__hp(
len
, addr))
+ : "=m"(*__hp(
4
, addr))
: "ir" ((unsigned int)val)
: "memory");
return;
: "ir" ((unsigned int)val)
: "memory");
return;
@@
-395,7
+402,7
@@
void __uatomic_add(void *addr, unsigned long val, int len)
{
__asm__ __volatile__(
"lock; addq %1, %0"
{
__asm__ __volatile__(
"lock; addq %1, %0"
- : "=m"(*__hp(
len
, addr))
+ : "=m"(*__hp(
8
, addr))
: "er" ((unsigned long)val)
: "memory");
return;
: "er" ((unsigned long)val)
: "memory");
return;
@@
-424,7
+431,7
@@
void __uatomic_inc(void *addr, int len)
{
__asm__ __volatile__(
"lock; incb %0"
{
__asm__ __volatile__(
"lock; incb %0"
- : "=m"(*__hp(
len
, addr))
+ : "=m"(*__hp(
1
, addr))
:
: "memory");
return;
:
: "memory");
return;
@@
-433,7
+440,7
@@
void __uatomic_inc(void *addr, int len)
{
__asm__ __volatile__(
"lock; incw %0"
{
__asm__ __volatile__(
"lock; incw %0"
- : "=m"(*__hp(
len
, addr))
+ : "=m"(*__hp(
2
, addr))
:
: "memory");
return;
:
: "memory");
return;
@@
-442,7
+449,7
@@
void __uatomic_inc(void *addr, int len)
{
__asm__ __volatile__(
"lock; incl %0"
{
__asm__ __volatile__(
"lock; incl %0"
- : "=m"(*__hp(
len
, addr))
+ : "=m"(*__hp(
4
, addr))
:
: "memory");
return;
:
: "memory");
return;
@@
-452,7
+459,7
@@
void __uatomic_inc(void *addr, int len)
{
__asm__ __volatile__(
"lock; incq %0"
{
__asm__ __volatile__(
"lock; incq %0"
- : "=m"(*__hp(
len
, addr))
+ : "=m"(*__hp(
8
, addr))
:
: "memory");
return;
:
: "memory");
return;
@@
-477,7
+484,7
@@
void __uatomic_dec(void *addr, int len)
{
__asm__ __volatile__(
"lock; decb %0"
{
__asm__ __volatile__(
"lock; decb %0"
- : "=m"(*__hp(
len
, addr))
+ : "=m"(*__hp(
1
, addr))
:
: "memory");
return;
:
: "memory");
return;
@@
-486,7
+493,7
@@
void __uatomic_dec(void *addr, int len)
{
__asm__ __volatile__(
"lock; decw %0"
{
__asm__ __volatile__(
"lock; decw %0"
- : "=m"(*__hp(
len
, addr))
+ : "=m"(*__hp(
2
, addr))
:
: "memory");
return;
:
: "memory");
return;
@@
-495,7
+502,7
@@
void __uatomic_dec(void *addr, int len)
{
__asm__ __volatile__(
"lock; decl %0"
{
__asm__ __volatile__(
"lock; decl %0"
- : "=m"(*__hp(
len
, addr))
+ : "=m"(*__hp(
4
, addr))
:
: "memory");
return;
:
: "memory");
return;
@@
-505,7
+512,7
@@
void __uatomic_dec(void *addr, int len)
{
__asm__ __volatile__(
"lock; decq %0"
{
__asm__ __volatile__(
"lock; decq %0"
- : "=m"(*__hp(
len
, addr))
+ : "=m"(*__hp(
8
, addr))
:
: "memory");
return;
:
: "memory");
return;
@@
-522,7
+529,7
@@
void __uatomic_dec(void *addr, int len)
#define _uatomic_dec(addr) (__uatomic_dec((addr), sizeof(*(addr))))
#define _uatomic_dec(addr) (__uatomic_dec((addr), sizeof(*(addr))))
-#if
((CAA_BITS_PER_LONG != 64) && defined(URCU_ARCH_I386))
+#if
def URCU_ARCH_X86_NO_CAS
/* For backwards compat */
#define CONFIG_RCU_COMPAT_ARCH 1
/* For backwards compat */
#define CONFIG_RCU_COMPAT_ARCH 1
This page took
0.02855 seconds
and
4
git commands to generate.