From: Brad Smith Date: Tue, 4 Jun 2024 03:51:06 +0000 (-0400) Subject: Adjust shell script to allow Bash in other locations X-Git-Url: http://git.lttng.org/?a=commitdiff_plain;h=refs%2Fheads%2Fmaster;hp=6b071d73cffc66df0bdb9ee3c062143f06923c78;p=userspace-rcu.git Adjust shell script to allow Bash in other locations commit da56d5cad05a ("Adjust shell scripts to allow Bash in other locations") adjusted most of the shell scripts, except one. Signed-off-by: Brad Smith Signed-off-by: Mathieu Desnoyers Change-Id: I30ee8cb36d874f5eaadf7b17c60cfd362ecfa2f0 --- diff --git a/extras/abi/dump_abi.sh b/extras/abi/dump_abi.sh index a7bd5fd..673b839 100755 --- a/extras/abi/dump_abi.sh +++ b/extras/abi/dump_abi.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # SPDX-FileCopyrightText: 2021 Michael Jeanson # diff --git a/include/urcu/futex.h b/include/urcu/futex.h index 9d0a997..f1181ee 100644 --- a/include/urcu/futex.h +++ b/include/urcu/futex.h @@ -19,17 +19,23 @@ #if (defined(__linux__) && defined(__NR_futex)) /* For backwards compat */ -#define CONFIG_RCU_HAVE_FUTEX 1 +# define CONFIG_RCU_HAVE_FUTEX 1 -#include -#include -#include -#include +# include +# include +# include +# include +# include #elif defined(__FreeBSD__) -#include -#include +# include +# include + +#elif defined(__OpenBSD__) + +# include +# include #endif @@ -37,8 +43,10 @@ extern "C" { #endif -#define FUTEX_WAIT 0 -#define FUTEX_WAKE 1 +#ifndef __OpenBSD__ +# define FUTEX_WAIT 0 +# define FUTEX_WAKE 1 +#endif /* * sys_futex compatibility header. @@ -64,8 +72,7 @@ extern int compat_futex_async(int32_t *uaddr, int op, int32_t val, static inline int futex(int32_t *uaddr, int op, int32_t val, const struct timespec *timeout, int32_t *uaddr2, int32_t val3) { - return syscall(__NR_futex, uaddr, op, val, timeout, - uaddr2, val3); + return syscall(__NR_futex, uaddr, op, val, timeout, uaddr2, val3); } static inline int futex_noasync(int32_t *uaddr, int op, int32_t val, @@ -107,9 +114,7 @@ static inline int futex_async(int32_t *uaddr, int op, int32_t val, #elif defined(__FreeBSD__) static inline int futex_async(int32_t *uaddr, int op, int32_t val, - const struct timespec *timeout, - int32_t *uaddr2 __attribute__((unused)), - int32_t val3 __attribute__((unused))) + const struct timespec *timeout, int32_t *uaddr2, int32_t val3) { int umtx_op; void *umtx_uaddr = NULL, *umtx_uaddr2 = NULL; @@ -118,6 +123,13 @@ static inline int futex_async(int32_t *uaddr, int op, int32_t val, ._clockid = CLOCK_MONOTONIC, }; + /* + * Check if NULL or zero. Don't let users expect that they are + * taken into account. + */ + urcu_posix_assert(!uaddr2); + urcu_posix_assert(!val3); + switch (op) { case FUTEX_WAIT: /* On FreeBSD, a "u_int" is a 32-bit integer. */ @@ -146,6 +158,48 @@ static inline int futex_noasync(int32_t *uaddr, int op, int32_t val, return futex_async(uaddr, op, val, timeout, uaddr2, val3); } +#elif defined(__OpenBSD__) + +static inline int futex_noasync(int32_t *uaddr, int op, int32_t val, + const struct timespec *timeout, int32_t *uaddr2, int32_t val3) +{ + int ret; + + /* + * Check that val3 is zero. Don't let users expect that it is + * taken into account. + */ + urcu_posix_assert(!val3); + + ret = futex((volatile uint32_t *) uaddr, op, val, timeout, + (volatile uint32_t *) uaddr2); + if (caa_unlikely(ret < 0 && errno == ENOSYS)) { + return compat_futex_noasync(uaddr, op, val, timeout, + uaddr2, val3); + } + return ret; +} + +static inline int futex_async(int32_t *uaddr, int op, int32_t val, + const struct timespec *timeout, int32_t *uaddr2, int32_t val3) +{ + int ret; + + /* + * Check that val3 is zero. Don't let users expect that it is + * taken into account. + */ + urcu_posix_assert(!val3); + + ret = futex((volatile uint32_t *) uaddr, op, val, timeout, + (volatile uint32_t *) uaddr2); + if (caa_unlikely(ret < 0 && errno == ENOSYS)) { + return compat_futex_async(uaddr, op, val, timeout, + uaddr2, val3); + } + return ret; +} + #elif defined(__CYGWIN__) /* diff --git a/include/urcu/uatomic/generic.h b/include/urcu/uatomic/generic.h index 8f8c437..ed655bb 100644 --- a/include/urcu/uatomic/generic.h +++ b/include/urcu/uatomic/generic.h @@ -15,7 +15,6 @@ */ #include -#include #include #include @@ -27,125 +26,61 @@ extern "C" { #define uatomic_set(addr, v) ((void) CMM_STORE_SHARED(*(addr), (v))) #endif -#define uatomic_load_store_return_op(op, addr, v, mo) \ - __extension__ \ - ({ \ - \ - switch (mo) { \ - case CMM_ACQUIRE: \ - case CMM_CONSUME: \ - case CMM_RELAXED: \ - break; \ - case CMM_RELEASE: \ - case CMM_ACQ_REL: \ - case CMM_SEQ_CST: \ - case CMM_SEQ_CST_FENCE: \ - cmm_smp_mb(); \ - break; \ - default: \ - abort(); \ - } \ - \ - __typeof__((*addr)) _value = op(addr, v); \ - \ - switch (mo) { \ - case CMM_CONSUME: \ - cmm_smp_read_barrier_depends(); \ - break; \ - case CMM_ACQUIRE: \ - case CMM_ACQ_REL: \ - case CMM_SEQ_CST: \ - case CMM_SEQ_CST_FENCE: \ - cmm_smp_mb(); \ - break; \ - case CMM_RELAXED: \ - case CMM_RELEASE: \ - break; \ - default: \ - abort(); \ - } \ - _value; \ +/* + * Can be defined for the architecture. + * + * What needs to be emitted _before_ the `operation' with memory ordering `mo'. + */ +#ifndef _cmm_compat_c11_smp_mb__before_mo +# define _cmm_compat_c11_smp_mb__before_mo(operation, mo) cmm_smp_mb() +#endif + +/* + * Can be defined for the architecture. + * + * What needs to be emitted _after_ the `operation' with memory ordering `mo'. + */ +#ifndef _cmm_compat_c11_smp_mb__after_mo +# define _cmm_compat_c11_smp_mb__after_mo(operation, mo) cmm_smp_mb() +#endif + +#define uatomic_load_store_return_op(op, addr, v, mo) \ + __extension__ \ + ({ \ + _cmm_compat_c11_smp_mb__before_mo(op, mo); \ + __typeof__((*addr)) _value = op(addr, v); \ + _cmm_compat_c11_smp_mb__after_mo(op, mo); \ + \ + _value; \ }) -#define uatomic_load_store_op(op, addr, v, mo) \ - do { \ - switch (mo) { \ - case CMM_ACQUIRE: \ - case CMM_CONSUME: \ - case CMM_RELAXED: \ - break; \ - case CMM_RELEASE: \ - case CMM_ACQ_REL: \ - case CMM_SEQ_CST: \ - case CMM_SEQ_CST_FENCE: \ - cmm_smp_mb(); \ - break; \ - default: \ - abort(); \ - } \ - \ - op(addr, v); \ - \ - switch (mo) { \ - case CMM_CONSUME: \ - cmm_smp_read_barrier_depends(); \ - break; \ - case CMM_ACQUIRE: \ - case CMM_ACQ_REL: \ - case CMM_SEQ_CST: \ - case CMM_SEQ_CST_FENCE: \ - cmm_smp_mb(); \ - break; \ - case CMM_RELAXED: \ - case CMM_RELEASE: \ - break; \ - default: \ - abort(); \ - } \ +#define uatomic_load_store_op(op, addr, v, mo) \ + do { \ + _cmm_compat_c11_smp_mb__before_mo(op, mo); \ + op(addr, v); \ + _cmm_compat_c11_smp_mb__after_mo(op, mo); \ } while (0) -#define uatomic_store(addr, v, mo) \ - do { \ - switch (mo) { \ - case CMM_RELAXED: \ - break; \ - case CMM_RELEASE: \ - case CMM_SEQ_CST: \ - case CMM_SEQ_CST_FENCE: \ - cmm_smp_mb(); \ - break; \ - default: \ - abort(); \ - } \ - \ - uatomic_set(addr, v); \ - \ - switch (mo) { \ - case CMM_RELAXED: \ - case CMM_RELEASE: \ - break; \ - case CMM_SEQ_CST: \ - case CMM_SEQ_CST_FENCE: \ - cmm_smp_mb(); \ - break; \ - default: \ - abort(); \ - } \ +#define uatomic_store(addr, v, mo) \ + do { \ + _cmm_compat_c11_smp_mb__before_mo(uatomic_set, mo); \ + uatomic_set(addr, v); \ + _cmm_compat_c11_smp_mb__after_mo(uatomic_set, mo); \ } while (0) -#define uatomic_and_mo(addr, v, mo) \ +#define uatomic_and_mo(addr, v, mo) \ uatomic_load_store_op(uatomic_and, addr, v, mo) -#define uatomic_or_mo(addr, v, mo) \ +#define uatomic_or_mo(addr, v, mo) \ uatomic_load_store_op(uatomic_or, addr, v, mo) -#define uatomic_add_mo(addr, v, mo) \ +#define uatomic_add_mo(addr, v, mo) \ uatomic_load_store_op(uatomic_add, addr, v, mo) -#define uatomic_sub_mo(addr, v, mo) \ +#define uatomic_sub_mo(addr, v, mo) \ uatomic_load_store_op(uatomic_sub, addr, v, mo) -#define uatomic_inc_mo(addr, mo) \ +#define uatomic_inc_mo(addr, mo) \ uatomic_load_store_op(uatomic_add, addr, 1, mo) #define uatomic_dec_mo(addr, mo) \ @@ -157,58 +92,14 @@ extern "C" { #define uatomic_cmpxchg_mo(addr, old, new, mos, mof) \ __extension__ \ ({ \ - switch (mos) { \ - case CMM_ACQUIRE: \ - case CMM_CONSUME: \ - case CMM_RELAXED: \ - break; \ - case CMM_RELEASE: \ - case CMM_ACQ_REL: \ - case CMM_SEQ_CST: \ - case CMM_SEQ_CST_FENCE: \ - cmm_smp_mb(); \ - break; \ - default: \ - abort(); \ - } \ - \ + _cmm_compat_c11_smp_mb__before_mo(uatomic_cmpxchg, mos); \ __typeof__(*(addr)) _value = uatomic_cmpxchg(addr, old, \ new); \ \ if (_value == (old)) { \ - switch (mos) { \ - case CMM_CONSUME: \ - cmm_smp_read_barrier_depends(); \ - break; \ - case CMM_ACQUIRE: \ - case CMM_ACQ_REL: \ - case CMM_SEQ_CST: \ - case CMM_SEQ_CST_FENCE: \ - cmm_smp_mb(); \ - break; \ - case CMM_RELAXED: \ - case CMM_RELEASE: \ - break; \ - default: \ - abort(); \ - } \ + _cmm_compat_c11_smp_mb__after_mo(uatomic_cmpxchg, mos); \ } else { \ - switch (mof) { \ - case CMM_CONSUME: \ - cmm_smp_read_barrier_depends(); \ - break; \ - case CMM_ACQUIRE: \ - case CMM_ACQ_REL: \ - case CMM_SEQ_CST: \ - case CMM_SEQ_CST_FENCE: \ - cmm_smp_mb(); \ - break; \ - case CMM_RELAXED: \ - case CMM_RELEASE: \ - break; \ - default: \ - abort(); \ - } \ + _cmm_compat_c11_smp_mb__after_mo(uatomic_cmpxchg, mof); \ } \ _value; \ }) @@ -222,7 +113,6 @@ extern "C" { #define uatomic_sub_return_mo(addr, v, mo) \ uatomic_load_store_return_op(uatomic_sub_return, addr, v) - #ifndef uatomic_read #define uatomic_read(addr) CMM_LOAD_SHARED(*(addr)) #endif @@ -230,35 +120,9 @@ extern "C" { #define uatomic_load(addr, mo) \ __extension__ \ ({ \ - switch (mo) { \ - case CMM_ACQUIRE: \ - case CMM_CONSUME: \ - case CMM_RELAXED: \ - break; \ - case CMM_SEQ_CST: \ - case CMM_SEQ_CST_FENCE: \ - cmm_smp_mb(); \ - break; \ - default: \ - abort(); \ - } \ - \ + _cmm_compat_c11_smp_mb__before_mo(uatomic_read, mo); \ __typeof__(*(addr)) _rcu_value = uatomic_read(addr); \ - \ - switch (mo) { \ - case CMM_RELAXED: \ - break; \ - case CMM_CONSUME: \ - cmm_smp_read_barrier_depends(); \ - break; \ - case CMM_ACQUIRE: \ - case CMM_SEQ_CST: \ - case CMM_SEQ_CST_FENCE: \ - cmm_smp_mb(); \ - break; \ - default: \ - abort(); \ - } \ + _cmm_compat_c11_smp_mb__after_mo(uatomic_read, mo); \ \ _rcu_value; \ }) diff --git a/include/urcu/uatomic/x86.h b/include/urcu/uatomic/x86.h index b5725e0..616eee9 100644 --- a/include/urcu/uatomic/x86.h +++ b/include/urcu/uatomic/x86.h @@ -8,6 +8,8 @@ #ifndef _URCU_ARCH_UATOMIC_X86_H #define _URCU_ARCH_UATOMIC_X86_H +#include /* For abort(3). */ + /* * Code inspired from libuatomic_ops-1.2, inherited in part from the * Boehm-Demers-Weiser conservative garbage collector. @@ -630,6 +632,474 @@ extern unsigned long _compat_uatomic_add_return(void *addr, #define cmm_smp_mb__before_uatomic_dec() cmm_barrier() #define cmm_smp_mb__after_uatomic_dec() cmm_barrier() +static inline void _cmm_compat_c11_smp_mb__before_uatomic_read_mo(enum cmm_memorder mo) +{ + /* + * A SMP barrier is not necessary for CMM_SEQ_CST because, only a + * previous store can be reordered with the load. However, emitting the + * memory barrier after the store is sufficient to prevent reordering + * between the two. This follows toolchains decision of emitting the + * memory fence on the stores instead of the loads. + * + * A compiler barrier is necessary because the underlying operation does + * not clobber the registers. + */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + cmm_barrier(); + break; + case CMM_ACQ_REL: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + default: + abort(); + break; + } +} + +static inline void _cmm_compat_c11_smp_mb__after_uatomic_read_mo(enum cmm_memorder mo) +{ + /* + * A SMP barrier is not necessary for CMM_SEQ_CST because following + * loads and stores cannot be reordered with the load. + * + * A SMP barrier is however necessary for CMM_SEQ_CST_FENCE to respect + * the memory model, since the underlying operation does not have a lock + * prefix. + * + * A compiler barrier is necessary because the underlying operation does + * not clobber the registers. + */ + switch (mo) { + case CMM_SEQ_CST_FENCE: + cmm_smp_mb(); + break; + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_SEQ_CST: + cmm_barrier(); + break; + case CMM_ACQ_REL: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + default: + abort(); + break; + } +} + +static inline void _cmm_compat_c11_smp_mb__before_uatomic_set_mo(enum cmm_memorder mo) +{ + /* + * A SMP barrier is not necessary for CMM_SEQ_CST because the store can + * only be reodered with later loads + * + * A compiler barrier is necessary because the underlying operation does + * not clobber the registers. + */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + cmm_barrier(); + break; + case CMM_ACQ_REL: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + default: + abort(); + break; + } +} + +static inline void _cmm_compat_c11_smp_mb__after_uatomic_set_mo(enum cmm_memorder mo) +{ + /* + * A SMP barrier is necessary for CMM_SEQ_CST because the store can be + * reorded with later loads. Since no memory barrier is being emitted + * before loads, one has to be emitted after the store. This follows + * toolchains decision of emitting the memory fence on the stores instead + * of the loads. + * + * A SMP barrier is necessary for CMM_SEQ_CST_FENCE to respect the + * memory model, since the underlying store does not have a lock prefix. + * + * A compiler barrier is necessary because the underlying operation does + * not clobber the registers. + */ + switch (mo) { + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + cmm_smp_mb(); + break; + case CMM_RELAXED: /* Fall-through */ + case CMM_RELEASE: + cmm_barrier(); + break; + case CMM_ACQ_REL: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + default: + abort(); + break; + } +} + +static inline void _cmm_compat_c11_smp_mb__before_uatomic_xchg_mo(enum cmm_memorder mo) +{ + /* NOP. uatomic_xchg has implicit lock prefix. */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_ACQ_REL: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + break; + default: + abort(); + } +} + +static inline void _cmm_compat_c11_smp_mb__after_uatomic_xchg_mo(enum cmm_memorder mo) +{ + /* NOP. uatomic_xchg has implicit lock prefix. */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_ACQ_REL: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + break; + default: + abort(); + } +} + +static inline void _cmm_compat_c11_smp_mb__before_uatomic_cmpxchg_mo(enum cmm_memorder mo) +{ + /* NOP. uatomic_cmpxchg has implicit lock prefix. */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_ACQ_REL: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + break; + default: + abort(); + } +} + +static inline void _cmm_compat_c11_smp_mb__after_uatomic_cmpxchg_mo(enum cmm_memorder mo) +{ + /* NOP. uatomic_cmpxchg has implicit lock prefix. */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_ACQ_REL: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + break; + default: + abort(); + } +} + +static inline void _cmm_compat_c11_smp_mb__before_uatomic_and_mo(enum cmm_memorder mo) +{ + /* NOP. uatomic_and has explicit lock prefix. */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_ACQ_REL: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + break; + default: + abort(); + } +} + +static inline void _cmm_compat_c11_smp_mb__after_uatomic_and_mo(enum cmm_memorder mo) +{ + /* NOP. uatomic_and has explicit lock prefix. */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_ACQ_REL: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + break; + default: + abort(); + } +} + +static inline void _cmm_compat_c11_smp_mb__before_uatomic_or_mo(enum cmm_memorder mo) +{ + /* NOP. uatomic_or has explicit lock prefix. */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_ACQ_REL: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + break; + default: + abort(); + } +} + +static inline void _cmm_compat_c11_smp_mb__after_uatomic_or_mo(enum cmm_memorder mo) +{ + /* NOP. uatomic_or has explicit lock prefix. */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_ACQ_REL: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + break; + default: + abort(); + } +} + +static inline void _cmm_compat_c11_smp_mb__before_uatomic_add_mo(enum cmm_memorder mo) +{ + /* NOP. uatomic_add has explicit lock prefix. */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_ACQ_REL: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + break; + default: + abort(); + } +} + +static inline void _cmm_compat_c11_smp_mb__after_uatomic_add_mo(enum cmm_memorder mo) +{ + /* NOP. uatomic_add has explicit lock prefix. */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_ACQ_REL: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + break; + default: + abort(); + } +} + +static inline void _cmm_compat_c11_smp_mb__before_uatomic_sub_mo(enum cmm_memorder mo) +{ + /* NOP. uatomic_sub has explicit lock prefix. */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_ACQ_REL: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + break; + default: + abort(); + } +} + +static inline void _cmm_compat_c11_smp_mb__after_uatomic_sub_mo(enum cmm_memorder mo) +{ + /* NOP. uatomic_sub has explicit lock prefix. */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_ACQ_REL: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + break; + default: + abort(); + } +} + +static inline void _cmm_compat_c11_smp_mb__before_uatomic_inc_mo(enum cmm_memorder mo) +{ + /* NOP. uatomic_inc has explicit lock prefix. */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_ACQ_REL: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + break; + default: + abort(); + } +} + +static inline void _cmm_compat_c11_smp_mb__after_uatomic_inc_mo(enum cmm_memorder mo) +{ + /* NOP. uatomic_inc has explicit lock prefix. */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_ACQ_REL: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + break; + default: + abort(); + } +} + +static inline void _cmm_compat_c11_smp_mb__before_uatomic_dec_mo(enum cmm_memorder mo) +{ + /* NOP. uatomic_dec has explicit lock prefix. */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_ACQ_REL: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + break; + default: + abort(); + } +} + +static inline void _cmm_compat_c11_smp_mb__after_uatomic_dec_mo(enum cmm_memorder mo) +{ + /* NOP. uatomic_dec has explicit lock prefix. */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_ACQ_REL: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + break; + default: + abort(); + } +} + +static inline void _cmm_compat_c11_smp_mb__before_uatomic_add_return_mo(enum cmm_memorder mo) +{ + /* NOP. uatomic_add_return has explicit lock prefix. */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_ACQ_REL: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + break; + default: + abort(); + } +} + +static inline void _cmm_compat_c11_smp_mb__after_uatomic_add_return_mo(enum cmm_memorder mo) +{ + /* NOP. uatomic_add_return has explicit lock prefix. */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_ACQ_REL: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + break; + default: + abort(); + } +} + +static inline void _cmm_compat_c11_smp_mb__before_uatomic_sub_return_mo(enum cmm_memorder mo) +{ + /* NOP. uatomic_sub_return has explicit lock prefix. */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_ACQ_REL: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + break; + default: + abort(); + } +} + +static inline void _cmm_compat_c11_smp_mb__after_uatomic_sub_return_mo(enum cmm_memorder mo) +{ + /* NOP. uatomic_sub_return has explicit lock prefix. */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_ACQ_REL: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + break; + default: + abort(); + } +} + +#define _cmm_compat_c11_smp_mb__before_mo(operation, mo) \ + do { \ + _cmm_compat_c11_smp_mb__before_ ## operation ## _mo (mo); \ + } while (0) + +#define _cmm_compat_c11_smp_mb__after_mo(operation, mo) \ + do { \ + _cmm_compat_c11_smp_mb__after_ ## operation ## _mo (mo); \ + } while (0) + + #ifdef __cplusplus } #endif diff --git a/src/compat-smp.h b/src/compat-smp.h index 31fa979..5da8d6a 100644 --- a/src/compat-smp.h +++ b/src/compat-smp.h @@ -1,5 +1,5 @@ /* - * SPDX-License-Identifier: LGPL-2.1-only + * SPDX-License-Identifier: MIT * * Copyright (C) 2011-2012 Mathieu Desnoyers * Copyright (C) 2019 Michael Jeanson @@ -164,7 +164,7 @@ static inline int get_cpu_mask_from_sysfs(char *buf, size_t max_bytes, const cha total_bytes_read += bytes_read; assert(total_bytes_read <= max_bytes); - } while (max_bytes > total_bytes_read && bytes_read > 0); + } while (max_bytes > total_bytes_read && bytes_read != 0); /* * Make sure the mask read is a null terminated string.