+// SPDX-FileCopyrightText: 1991-1994 by Xerox Corporation. All rights reserved.
+// SPDX-FileCopyrightText: 1996-1999 by Silicon Graphics. All rights reserved.
+// SPDX-FileCopyrightText: 1999-2004 Hewlett-Packard Development Company, L.P.
+// SPDX-FileCopyrightText: 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+//
+// SPDX-License-Identifier: LicenseRef-Boehm-GC
+
#ifndef _URCU_ARCH_UATOMIC_X86_H
#define _URCU_ARCH_UATOMIC_X86_H
/*
- * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
- * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
- * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
- * Copyright (c) 2009 Mathieu Desnoyers
- *
- * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
- * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
- *
- * Permission is hereby granted to use or copy this program
- * for any purpose, provided the above notices are retained on all copies.
- * Permission to modify the code and to distribute modified code is granted,
- * provided the above notices are retained, and a notice that the code was
- * modified is included with the above copyright notice.
- *
* Code inspired from libuatomic_ops-1.2, inherited in part from the
* Boehm-Demers-Weiser conservative garbage collector.
*/
+#include <urcu/arch.h>
+#include <urcu/config.h>
#include <urcu/compiler.h>
#include <urcu/system.h>
* Derived from AO_compare_and_swap() and AO_test_and_set_full().
*/
-struct __uatomic_dummy {
- unsigned long v[10];
-};
-#define __hp(x) ((struct __uatomic_dummy *)(x))
+/*
+ * The __hp() macro casts the void pointer @x to a pointer to a structure
+ * containing an array of char of the specified size. This allows passing the
+ * @addr arguments of the following inline functions as "m" and "+m" operands
+ * to the assembly. The @size parameter should be a constant to support
+ * compilers such as clang which do not support VLA. Create typedefs because
+ * C++ does not allow types be defined in casts.
+ */
+
+typedef struct { char v[1]; } __hp_1;
+typedef struct { char v[2]; } __hp_2;
+typedef struct { char v[4]; } __hp_4;
+typedef struct { char v[8]; } __hp_8;
+
+#define __hp(size, x) ((__hp_##size *)(x))
#define _uatomic_set(addr, v) ((void) CMM_STORE_SHARED(*(addr), (v)))
__asm__ __volatile__(
"lock; cmpxchgb %2, %1"
- : "+a"(result), "+m"(*__hp(addr))
+ : "+a"(result), "+m"(*__hp(1, addr))
: "q"((unsigned char)_new)
: "memory");
return result;
__asm__ __volatile__(
"lock; cmpxchgw %2, %1"
- : "+a"(result), "+m"(*__hp(addr))
+ : "+a"(result), "+m"(*__hp(2, addr))
: "r"((unsigned short)_new)
: "memory");
return result;
__asm__ __volatile__(
"lock; cmpxchgl %2, %1"
- : "+a"(result), "+m"(*__hp(addr))
+ : "+a"(result), "+m"(*__hp(4, addr))
: "r"((unsigned int)_new)
: "memory");
return result;
__asm__ __volatile__(
"lock; cmpxchgq %2, %1"
- : "+a"(result), "+m"(*__hp(addr))
+ : "+a"(result), "+m"(*__hp(8, addr))
: "r"((unsigned long)_new)
: "memory");
return result;
unsigned char result;
__asm__ __volatile__(
"xchgb %0, %1"
- : "=q"(result), "+m"(*__hp(addr))
+ : "=q"(result), "+m"(*__hp(1, addr))
: "0" ((unsigned char)val)
: "memory");
return result;
unsigned short result;
__asm__ __volatile__(
"xchgw %0, %1"
- : "=r"(result), "+m"(*__hp(addr))
+ : "=r"(result), "+m"(*__hp(2, addr))
: "0" ((unsigned short)val)
: "memory");
return result;
unsigned int result;
__asm__ __volatile__(
"xchgl %0, %1"
- : "=r"(result), "+m"(*__hp(addr))
+ : "=r"(result), "+m"(*__hp(4, addr))
: "0" ((unsigned int)val)
: "memory");
return result;
unsigned long result;
__asm__ __volatile__(
"xchgq %0, %1"
- : "=r"(result), "+m"(*__hp(addr))
+ : "=r"(result), "+m"(*__hp(8, addr))
: "0" ((unsigned long)val)
: "memory");
return result;
__asm__ __volatile__(
"lock; xaddb %1, %0"
- : "+m"(*__hp(addr)), "+q" (result)
+ : "+m"(*__hp(1, addr)), "+q" (result)
:
: "memory");
return result + (unsigned char)val;
__asm__ __volatile__(
"lock; xaddw %1, %0"
- : "+m"(*__hp(addr)), "+r" (result)
+ : "+m"(*__hp(2, addr)), "+r" (result)
:
: "memory");
return result + (unsigned short)val;
__asm__ __volatile__(
"lock; xaddl %1, %0"
- : "+m"(*__hp(addr)), "+r" (result)
+ : "+m"(*__hp(4, addr)), "+r" (result)
:
: "memory");
return result + (unsigned int)val;
__asm__ __volatile__(
"lock; xaddq %1, %0"
- : "+m"(*__hp(addr)), "+r" (result)
+ : "+m"(*__hp(8, addr)), "+r" (result)
:
: "memory");
return result + (unsigned long)val;
{
__asm__ __volatile__(
"lock; andb %1, %0"
- : "=m"(*__hp(addr))
+ : "=m"(*__hp(1, addr))
: "iq" ((unsigned char)val)
: "memory");
return;
{
__asm__ __volatile__(
"lock; andw %1, %0"
- : "=m"(*__hp(addr))
+ : "=m"(*__hp(2, addr))
: "ir" ((unsigned short)val)
: "memory");
return;
{
__asm__ __volatile__(
"lock; andl %1, %0"
- : "=m"(*__hp(addr))
+ : "=m"(*__hp(4, addr))
: "ir" ((unsigned int)val)
: "memory");
return;
{
__asm__ __volatile__(
"lock; andq %1, %0"
- : "=m"(*__hp(addr))
+ : "=m"(*__hp(8, addr))
: "er" ((unsigned long)val)
: "memory");
return;
{
__asm__ __volatile__(
"lock; orb %1, %0"
- : "=m"(*__hp(addr))
+ : "=m"(*__hp(1, addr))
: "iq" ((unsigned char)val)
: "memory");
return;
{
__asm__ __volatile__(
"lock; orw %1, %0"
- : "=m"(*__hp(addr))
+ : "=m"(*__hp(2, addr))
: "ir" ((unsigned short)val)
: "memory");
return;
{
__asm__ __volatile__(
"lock; orl %1, %0"
- : "=m"(*__hp(addr))
+ : "=m"(*__hp(4, addr))
: "ir" ((unsigned int)val)
: "memory");
return;
{
__asm__ __volatile__(
"lock; orq %1, %0"
- : "=m"(*__hp(addr))
+ : "=m"(*__hp(8, addr))
: "er" ((unsigned long)val)
: "memory");
return;
{
__asm__ __volatile__(
"lock; addb %1, %0"
- : "=m"(*__hp(addr))
+ : "=m"(*__hp(1, addr))
: "iq" ((unsigned char)val)
: "memory");
return;
{
__asm__ __volatile__(
"lock; addw %1, %0"
- : "=m"(*__hp(addr))
+ : "=m"(*__hp(2, addr))
: "ir" ((unsigned short)val)
: "memory");
return;
{
__asm__ __volatile__(
"lock; addl %1, %0"
- : "=m"(*__hp(addr))
+ : "=m"(*__hp(4, addr))
: "ir" ((unsigned int)val)
: "memory");
return;
{
__asm__ __volatile__(
"lock; addq %1, %0"
- : "=m"(*__hp(addr))
+ : "=m"(*__hp(8, addr))
: "er" ((unsigned long)val)
: "memory");
return;
{
__asm__ __volatile__(
"lock; incb %0"
- : "=m"(*__hp(addr))
+ : "=m"(*__hp(1, addr))
:
: "memory");
return;
{
__asm__ __volatile__(
"lock; incw %0"
- : "=m"(*__hp(addr))
+ : "=m"(*__hp(2, addr))
:
: "memory");
return;
{
__asm__ __volatile__(
"lock; incl %0"
- : "=m"(*__hp(addr))
+ : "=m"(*__hp(4, addr))
:
: "memory");
return;
{
__asm__ __volatile__(
"lock; incq %0"
- : "=m"(*__hp(addr))
+ : "=m"(*__hp(8, addr))
:
: "memory");
return;
{
__asm__ __volatile__(
"lock; decb %0"
- : "=m"(*__hp(addr))
+ : "=m"(*__hp(1, addr))
:
: "memory");
return;
{
__asm__ __volatile__(
"lock; decw %0"
- : "=m"(*__hp(addr))
+ : "=m"(*__hp(2, addr))
:
: "memory");
return;
{
__asm__ __volatile__(
"lock; decl %0"
- : "=m"(*__hp(addr))
+ : "=m"(*__hp(4, addr))
:
: "memory");
return;
{
__asm__ __volatile__(
"lock; decq %0"
- : "=m"(*__hp(addr))
+ : "=m"(*__hp(8, addr))
:
: "memory");
return;
#define _uatomic_dec(addr) (__uatomic_dec((addr), sizeof(*(addr))))
-#if ((CAA_BITS_PER_LONG != 64) && defined(CONFIG_RCU_COMPAT_ARCH))
+#ifdef URCU_ARCH_X86_NO_CAS
+
+/* For backwards compat */
+#define CONFIG_RCU_COMPAT_ARCH 1
+
extern int __rcu_cas_avail;
extern int __rcu_cas_init(void);