Adjust shell script to allow Bash in other locations master
authorBrad Smith <brad@comstyle.com>
Tue, 4 Jun 2024 03:51:06 +0000 (23:51 -0400)
committerMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Tue, 4 Jun 2024 13:24:26 +0000 (09:24 -0400)
commit da56d5cad05a ("Adjust shell scripts to allow Bash in other locations")
adjusted most of the shell scripts, except one.

Signed-off-by: Brad Smith <brad@comstyle.com>
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Change-Id: I30ee8cb36d874f5eaadf7b17c60cfd362ecfa2f0

extras/abi/dump_abi.sh
include/urcu/futex.h
include/urcu/uatomic/generic.h
include/urcu/uatomic/x86.h
src/compat-smp.h

index a7bd5fda4c9ffcca25b51b7cd32298c1c7ba804a..673b8391e16442d244076c79ff2ef20b91e88eb6 100755 (executable)
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
 
 # SPDX-FileCopyrightText: 2021 Michael Jeanson <mjeanson@efficios.com>
 #
 
 # SPDX-FileCopyrightText: 2021 Michael Jeanson <mjeanson@efficios.com>
 #
index 9d0a997473c025563bd9c557225e2029ecd6f831..f1181ee4a6011022af997b53b375addc93ba9137 100644 (file)
 #if (defined(__linux__) && defined(__NR_futex))
 
 /* For backwards compat */
 #if (defined(__linux__) && defined(__NR_futex))
 
 /* For backwards compat */
-#define CONFIG_RCU_HAVE_FUTEX 1
+# define CONFIG_RCU_HAVE_FUTEX 1
 
 
-#include <unistd.h>
-#include <errno.h>
-#include <urcu/compiler.h>
-#include <urcu/arch.h>
+# include <unistd.h>
+# include <errno.h>
+# include <urcu/compiler.h>
+# include <urcu/arch.h>
+# include <urcu/assert.h>
 
 #elif defined(__FreeBSD__)
 
 
 #elif defined(__FreeBSD__)
 
-#include <sys/types.h>
-#include <sys/umtx.h>
+# include <sys/types.h>
+# include <sys/umtx.h>
+
+#elif defined(__OpenBSD__)
+
+# include <sys/time.h>
+# include <sys/futex.h>
 
 #endif
 
 
 #endif
 
 extern "C" {
 #endif
 
 extern "C" {
 #endif
 
-#define FUTEX_WAIT             0
-#define FUTEX_WAKE             1
+#ifndef __OpenBSD__
+# define FUTEX_WAIT            0
+# define FUTEX_WAKE            1
+#endif
 
 /*
  * sys_futex compatibility header.
 
 /*
  * sys_futex compatibility header.
@@ -64,8 +72,7 @@ extern int compat_futex_async(int32_t *uaddr, int op, int32_t val,
 static inline int futex(int32_t *uaddr, int op, int32_t val,
                const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
 {
 static inline int futex(int32_t *uaddr, int op, int32_t val,
                const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
 {
-       return syscall(__NR_futex, uaddr, op, val, timeout,
-                       uaddr2, val3);
+       return syscall(__NR_futex, uaddr, op, val, timeout, uaddr2, val3);
 }
 
 static inline int futex_noasync(int32_t *uaddr, int op, int32_t val,
 }
 
 static inline int futex_noasync(int32_t *uaddr, int op, int32_t val,
@@ -107,9 +114,7 @@ static inline int futex_async(int32_t *uaddr, int op, int32_t val,
 #elif defined(__FreeBSD__)
 
 static inline int futex_async(int32_t *uaddr, int op, int32_t val,
 #elif defined(__FreeBSD__)
 
 static inline int futex_async(int32_t *uaddr, int op, int32_t val,
-               const struct timespec *timeout,
-               int32_t *uaddr2 __attribute__((unused)),
-               int32_t val3 __attribute__((unused)))
+               const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
 {
        int umtx_op;
        void *umtx_uaddr = NULL, *umtx_uaddr2 = NULL;
 {
        int umtx_op;
        void *umtx_uaddr = NULL, *umtx_uaddr2 = NULL;
@@ -118,6 +123,13 @@ static inline int futex_async(int32_t *uaddr, int op, int32_t val,
                ._clockid = CLOCK_MONOTONIC,
        };
 
                ._clockid = CLOCK_MONOTONIC,
        };
 
+       /*
+        * Check if NULL or zero. Don't let users expect that they are
+        * taken into account.
+        */
+       urcu_posix_assert(!uaddr2);
+       urcu_posix_assert(!val3);
+
        switch (op) {
        case FUTEX_WAIT:
                /* On FreeBSD, a "u_int" is a 32-bit integer. */
        switch (op) {
        case FUTEX_WAIT:
                /* On FreeBSD, a "u_int" is a 32-bit integer. */
@@ -146,6 +158,48 @@ static inline int futex_noasync(int32_t *uaddr, int op, int32_t val,
        return futex_async(uaddr, op, val, timeout, uaddr2, val3);
 }
 
        return futex_async(uaddr, op, val, timeout, uaddr2, val3);
 }
 
+#elif defined(__OpenBSD__)
+
+static inline int futex_noasync(int32_t *uaddr, int op, int32_t val,
+               const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
+{
+       int ret;
+
+       /*
+        * Check that val3 is zero. Don't let users expect that it is
+        * taken into account.
+        */
+       urcu_posix_assert(!val3);
+
+       ret = futex((volatile uint32_t *) uaddr, op, val, timeout,
+               (volatile uint32_t *) uaddr2);
+       if (caa_unlikely(ret < 0 && errno == ENOSYS)) {
+               return compat_futex_noasync(uaddr, op, val, timeout,
+                               uaddr2, val3);
+       }
+       return ret;
+}
+
+static inline int futex_async(int32_t *uaddr, int op, int32_t val,
+               const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
+{
+       int ret;
+
+       /*
+        * Check that val3 is zero. Don't let users expect that it is
+        * taken into account.
+        */
+       urcu_posix_assert(!val3);
+
+       ret = futex((volatile uint32_t *) uaddr, op, val, timeout,
+               (volatile uint32_t *) uaddr2);
+       if (caa_unlikely(ret < 0 && errno == ENOSYS)) {
+               return compat_futex_async(uaddr, op, val, timeout,
+                               uaddr2, val3);
+       }
+       return ret;
+}
+
 #elif defined(__CYGWIN__)
 
 /*
 #elif defined(__CYGWIN__)
 
 /*
index 8f8c43785ba0c7609d827ab41a6d5d5882dca0ab..ed655bb8def13a5120990d049eb12199ee47376a 100644 (file)
@@ -15,7 +15,6 @@
  */
 
 #include <stdint.h>
  */
 
 #include <stdint.h>
-#include <stdlib.h>
 #include <urcu/compiler.h>
 #include <urcu/system.h>
 
 #include <urcu/compiler.h>
 #include <urcu/system.h>
 
@@ -27,125 +26,61 @@ extern "C" {
 #define uatomic_set(addr, v)   ((void) CMM_STORE_SHARED(*(addr), (v)))
 #endif
 
 #define uatomic_set(addr, v)   ((void) CMM_STORE_SHARED(*(addr), (v)))
 #endif
 
-#define uatomic_load_store_return_op(op, addr, v, mo)                  \
-       __extension__                                                   \
-       ({                                                              \
-                                                                       \
-               switch (mo) {                                           \
-               case CMM_ACQUIRE:                                       \
-               case CMM_CONSUME:                                       \
-               case CMM_RELAXED:                                       \
-                       break;                                          \
-               case CMM_RELEASE:                                       \
-               case CMM_ACQ_REL:                                       \
-               case CMM_SEQ_CST:                                       \
-               case CMM_SEQ_CST_FENCE:                                 \
-                       cmm_smp_mb();                                   \
-                       break;                                          \
-               default:                                                \
-                       abort();                                        \
-               }                                                       \
-                                                                       \
-               __typeof__((*addr)) _value = op(addr, v);               \
-                                                                       \
-               switch (mo) {                                           \
-               case CMM_CONSUME:                                       \
-                       cmm_smp_read_barrier_depends();                 \
-                       break;                                          \
-               case CMM_ACQUIRE:                                       \
-               case CMM_ACQ_REL:                                       \
-               case CMM_SEQ_CST:                                       \
-               case CMM_SEQ_CST_FENCE:                                 \
-                       cmm_smp_mb();                                   \
-                       break;                                          \
-               case CMM_RELAXED:                                       \
-               case CMM_RELEASE:                                       \
-                       break;                                          \
-               default:                                                \
-                       abort();                                        \
-               }                                                       \
-               _value;                                                 \
+/*
+ * Can be defined for the architecture.
+ *
+ * What needs to be emitted _before_ the `operation' with memory ordering `mo'.
+ */
+#ifndef _cmm_compat_c11_smp_mb__before_mo
+# define _cmm_compat_c11_smp_mb__before_mo(operation, mo) cmm_smp_mb()
+#endif
+
+/*
+ * Can be defined for the architecture.
+ *
+ * What needs to be emitted _after_ the `operation' with memory ordering `mo'.
+ */
+#ifndef _cmm_compat_c11_smp_mb__after_mo
+# define _cmm_compat_c11_smp_mb__after_mo(operation, mo) cmm_smp_mb()
+#endif
+
+#define uatomic_load_store_return_op(op, addr, v, mo)          \
+       __extension__                                           \
+       ({                                                      \
+               _cmm_compat_c11_smp_mb__before_mo(op, mo);      \
+               __typeof__((*addr)) _value = op(addr, v);       \
+               _cmm_compat_c11_smp_mb__after_mo(op, mo);       \
+                                                               \
+               _value;                                         \
        })
 
        })
 
-#define uatomic_load_store_op(op, addr, v, mo)                         \
-       do {                                                            \
-               switch (mo) {                                           \
-               case CMM_ACQUIRE:                                       \
-               case CMM_CONSUME:                                       \
-               case CMM_RELAXED:                                       \
-                       break;                                          \
-               case CMM_RELEASE:                                       \
-               case CMM_ACQ_REL:                                       \
-               case CMM_SEQ_CST:                                       \
-               case CMM_SEQ_CST_FENCE:                                 \
-                       cmm_smp_mb();                                   \
-                       break;                                          \
-               default:                                                \
-                       abort();                                        \
-               }                                                       \
-                                                                       \
-               op(addr, v);                                            \
-                                                                       \
-               switch (mo) {                                           \
-               case CMM_CONSUME:                                       \
-                       cmm_smp_read_barrier_depends();                 \
-                       break;                                          \
-               case CMM_ACQUIRE:                                       \
-               case CMM_ACQ_REL:                                       \
-               case CMM_SEQ_CST:                                       \
-               case CMM_SEQ_CST_FENCE:                                 \
-                       cmm_smp_mb();                                   \
-                       break;                                          \
-               case CMM_RELAXED:                                       \
-               case CMM_RELEASE:                                       \
-                       break;                                          \
-               default:                                                \
-                       abort();                                        \
-               }                                                       \
+#define uatomic_load_store_op(op, addr, v, mo)                 \
+       do {                                                    \
+               _cmm_compat_c11_smp_mb__before_mo(op, mo);      \
+               op(addr, v);                                    \
+               _cmm_compat_c11_smp_mb__after_mo(op, mo);       \
        } while (0)
 
        } while (0)
 
-#define uatomic_store(addr, v, mo)                     \
-       do {                                            \
-               switch (mo) {                           \
-               case CMM_RELAXED:                       \
-                       break;                          \
-               case CMM_RELEASE:                       \
-               case CMM_SEQ_CST:                       \
-               case CMM_SEQ_CST_FENCE:                 \
-                       cmm_smp_mb();                   \
-                       break;                          \
-               default:                                \
-                       abort();                        \
-               }                                       \
-                                                       \
-               uatomic_set(addr, v);                   \
-                                                       \
-               switch (mo) {                           \
-               case CMM_RELAXED:                       \
-               case CMM_RELEASE:                       \
-                       break;                          \
-               case CMM_SEQ_CST:                       \
-               case CMM_SEQ_CST_FENCE:                 \
-                       cmm_smp_mb();                   \
-                       break;                          \
-               default:                                \
-                       abort();                        \
-               }                                       \
+#define uatomic_store(addr, v, mo)                                     \
+       do {                                                            \
+               _cmm_compat_c11_smp_mb__before_mo(uatomic_set, mo);     \
+               uatomic_set(addr, v);                                   \
+               _cmm_compat_c11_smp_mb__after_mo(uatomic_set, mo);      \
        } while (0)
 
        } while (0)
 
-#define uatomic_and_mo(addr, v, mo)                            \
+#define uatomic_and_mo(addr, v, mo)                    \
        uatomic_load_store_op(uatomic_and, addr, v, mo)
 
        uatomic_load_store_op(uatomic_and, addr, v, mo)
 
-#define uatomic_or_mo(addr, v, mo)                             \
+#define uatomic_or_mo(addr, v, mo)                     \
        uatomic_load_store_op(uatomic_or, addr, v, mo)
 
        uatomic_load_store_op(uatomic_or, addr, v, mo)
 
-#define uatomic_add_mo(addr, v, mo)                            \
+#define uatomic_add_mo(addr, v, mo)                    \
        uatomic_load_store_op(uatomic_add, addr, v, mo)
 
        uatomic_load_store_op(uatomic_add, addr, v, mo)
 
-#define uatomic_sub_mo(addr, v, mo)                            \
+#define uatomic_sub_mo(addr, v, mo)                    \
        uatomic_load_store_op(uatomic_sub, addr, v, mo)
 
        uatomic_load_store_op(uatomic_sub, addr, v, mo)
 
-#define uatomic_inc_mo(addr, mo)                               \
+#define uatomic_inc_mo(addr, mo)                       \
        uatomic_load_store_op(uatomic_add, addr, 1, mo)
 
 #define uatomic_dec_mo(addr, mo)                               \
        uatomic_load_store_op(uatomic_add, addr, 1, mo)
 
 #define uatomic_dec_mo(addr, mo)                               \
@@ -157,58 +92,14 @@ extern "C" {
 #define uatomic_cmpxchg_mo(addr, old, new, mos, mof)                   \
        __extension__                                                   \
        ({                                                              \
 #define uatomic_cmpxchg_mo(addr, old, new, mos, mof)                   \
        __extension__                                                   \
        ({                                                              \
-               switch (mos) {                                          \
-               case CMM_ACQUIRE:                                       \
-               case CMM_CONSUME:                                       \
-               case CMM_RELAXED:                                       \
-                       break;                                          \
-               case CMM_RELEASE:                                       \
-               case CMM_ACQ_REL:                                       \
-               case CMM_SEQ_CST:                                       \
-               case CMM_SEQ_CST_FENCE:                                 \
-                       cmm_smp_mb();                                   \
-                       break;                                          \
-               default:                                                \
-                       abort();                                        \
-               }                                                       \
-                                                                       \
+               _cmm_compat_c11_smp_mb__before_mo(uatomic_cmpxchg, mos); \
                __typeof__(*(addr)) _value = uatomic_cmpxchg(addr, old, \
                                                        new);           \
                                                                        \
                if (_value == (old)) {                                  \
                __typeof__(*(addr)) _value = uatomic_cmpxchg(addr, old, \
                                                        new);           \
                                                                        \
                if (_value == (old)) {                                  \
-                       switch (mos) {                                  \
-                       case CMM_CONSUME:                               \
-                               cmm_smp_read_barrier_depends();         \
-                               break;                                  \
-                       case CMM_ACQUIRE:                               \
-                       case CMM_ACQ_REL:                               \
-                       case CMM_SEQ_CST:                               \
-                       case CMM_SEQ_CST_FENCE:                         \
-                               cmm_smp_mb();                           \
-                               break;                                  \
-                       case CMM_RELAXED:                               \
-                       case CMM_RELEASE:                               \
-                               break;                                  \
-                       default:                                        \
-                               abort();                                \
-                       }                                               \
+                       _cmm_compat_c11_smp_mb__after_mo(uatomic_cmpxchg, mos); \
                } else {                                                \
                } else {                                                \
-                       switch (mof) {                                  \
-                       case CMM_CONSUME:                               \
-                               cmm_smp_read_barrier_depends();         \
-                               break;                                  \
-                       case CMM_ACQUIRE:                               \
-                       case CMM_ACQ_REL:                               \
-                       case CMM_SEQ_CST:                               \
-                       case CMM_SEQ_CST_FENCE:                         \
-                               cmm_smp_mb();                           \
-                               break;                                  \
-                       case CMM_RELAXED:                               \
-                       case CMM_RELEASE:                               \
-                               break;                                  \
-                       default:                                        \
-                               abort();                                \
-                       }                                               \
+                       _cmm_compat_c11_smp_mb__after_mo(uatomic_cmpxchg, mof); \
                }                                                       \
                _value;                                                 \
        })
                }                                                       \
                _value;                                                 \
        })
@@ -222,7 +113,6 @@ extern "C" {
 #define uatomic_sub_return_mo(addr, v, mo)                             \
        uatomic_load_store_return_op(uatomic_sub_return, addr, v)
 
 #define uatomic_sub_return_mo(addr, v, mo)                             \
        uatomic_load_store_return_op(uatomic_sub_return, addr, v)
 
-
 #ifndef uatomic_read
 #define uatomic_read(addr)     CMM_LOAD_SHARED(*(addr))
 #endif
 #ifndef uatomic_read
 #define uatomic_read(addr)     CMM_LOAD_SHARED(*(addr))
 #endif
@@ -230,35 +120,9 @@ extern "C" {
 #define uatomic_load(addr, mo)                                         \
        __extension__                                                   \
        ({                                                              \
 #define uatomic_load(addr, mo)                                         \
        __extension__                                                   \
        ({                                                              \
-               switch (mo) {                                           \
-               case CMM_ACQUIRE:                                       \
-               case CMM_CONSUME:                                       \
-               case CMM_RELAXED:                                       \
-                       break;                                          \
-               case CMM_SEQ_CST:                                       \
-               case CMM_SEQ_CST_FENCE:                                 \
-                       cmm_smp_mb();                                   \
-                       break;                                          \
-               default:                                                \
-                       abort();                                        \
-               }                                                       \
-                                                                       \
+               _cmm_compat_c11_smp_mb__before_mo(uatomic_read, mo);    \
                __typeof__(*(addr)) _rcu_value = uatomic_read(addr);    \
                __typeof__(*(addr)) _rcu_value = uatomic_read(addr);    \
-                                                                       \
-               switch (mo) {                                           \
-               case CMM_RELAXED:                                       \
-                       break;                                          \
-               case CMM_CONSUME:                                       \
-                       cmm_smp_read_barrier_depends();                 \
-                       break;                                          \
-               case CMM_ACQUIRE:                                       \
-               case CMM_SEQ_CST:                                       \
-               case CMM_SEQ_CST_FENCE:                                 \
-                       cmm_smp_mb();                                   \
-                       break;                                          \
-               default:                                                \
-                       abort();                                        \
-               }                                                       \
+               _cmm_compat_c11_smp_mb__after_mo(uatomic_read, mo);     \
                                                                        \
                _rcu_value;                                             \
        })
                                                                        \
                _rcu_value;                                             \
        })
index b5725e0f7fd5a5203b9a2775a99731bcbc097cc2..616eee9be3ca60b4be62c8a10cadb0aff2d1ea8d 100644 (file)
@@ -8,6 +8,8 @@
 #ifndef _URCU_ARCH_UATOMIC_X86_H
 #define _URCU_ARCH_UATOMIC_X86_H
 
 #ifndef _URCU_ARCH_UATOMIC_X86_H
 #define _URCU_ARCH_UATOMIC_X86_H
 
+#include <stdlib.h>            /* For abort(3). */
+
 /*
  * Code inspired from libuatomic_ops-1.2, inherited in part from the
  * Boehm-Demers-Weiser conservative garbage collector.
 /*
  * Code inspired from libuatomic_ops-1.2, inherited in part from the
  * Boehm-Demers-Weiser conservative garbage collector.
@@ -630,6 +632,474 @@ extern unsigned long _compat_uatomic_add_return(void *addr,
 #define cmm_smp_mb__before_uatomic_dec()       cmm_barrier()
 #define cmm_smp_mb__after_uatomic_dec()                cmm_barrier()
 
 #define cmm_smp_mb__before_uatomic_dec()       cmm_barrier()
 #define cmm_smp_mb__after_uatomic_dec()                cmm_barrier()
 
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_read_mo(enum cmm_memorder mo)
+{
+       /*
+        * A SMP barrier is not necessary for CMM_SEQ_CST because, only a
+        * previous store can be reordered with the load.  However, emitting the
+        * memory barrier after the store is sufficient to prevent reordering
+        * between the two.  This follows toolchains decision of emitting the
+        * memory fence on the stores instead of the loads.
+        *
+        * A compiler barrier is necessary because the underlying operation does
+        * not clobber the registers.
+        */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               cmm_barrier();
+               break;
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       default:
+               abort();
+               break;
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_read_mo(enum cmm_memorder mo)
+{
+       /*
+        * A SMP barrier is not necessary for CMM_SEQ_CST because following
+        * loads and stores cannot be reordered with the load.
+        *
+        * A SMP barrier is however necessary for CMM_SEQ_CST_FENCE to respect
+        * the memory model, since the underlying operation does not have a lock
+        * prefix.
+        *
+        * A compiler barrier is necessary because the underlying operation does
+        * not clobber the registers.
+        */
+       switch (mo) {
+       case CMM_SEQ_CST_FENCE:
+               cmm_smp_mb();
+               break;
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_SEQ_CST:
+               cmm_barrier();
+               break;
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       default:
+               abort();
+               break;
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_set_mo(enum cmm_memorder mo)
+{
+       /*
+        * A SMP barrier is not necessary for CMM_SEQ_CST because the store can
+        * only be reodered with later loads
+        *
+        * A compiler barrier is necessary because the underlying operation does
+        * not clobber the registers.
+        */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               cmm_barrier();
+               break;
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       default:
+               abort();
+               break;
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_set_mo(enum cmm_memorder mo)
+{
+       /*
+        * A SMP barrier is necessary for CMM_SEQ_CST because the store can be
+        * reorded with later loads.  Since no memory barrier is being emitted
+        * before loads, one has to be emitted after the store.  This follows
+        * toolchains decision of emitting the memory fence on the stores instead
+        * of the loads.
+        *
+        * A SMP barrier is necessary for CMM_SEQ_CST_FENCE to respect the
+        * memory model, since the underlying store does not have a lock prefix.
+        *
+        * A compiler barrier is necessary because the underlying operation does
+        * not clobber the registers.
+        */
+       switch (mo) {
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               cmm_smp_mb();
+               break;
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_RELEASE:
+               cmm_barrier();
+               break;
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       default:
+               abort();
+               break;
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_xchg_mo(enum cmm_memorder mo)
+{
+       /* NOP. uatomic_xchg has implicit lock prefix. */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               break;
+       default:
+               abort();
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_xchg_mo(enum cmm_memorder mo)
+{
+       /* NOP. uatomic_xchg has implicit lock prefix. */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               break;
+       default:
+               abort();
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_cmpxchg_mo(enum cmm_memorder mo)
+{
+       /* NOP. uatomic_cmpxchg has implicit lock prefix. */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               break;
+       default:
+               abort();
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_cmpxchg_mo(enum cmm_memorder mo)
+{
+       /* NOP. uatomic_cmpxchg has implicit lock prefix. */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               break;
+       default:
+               abort();
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_and_mo(enum cmm_memorder mo)
+{
+       /* NOP. uatomic_and has explicit lock prefix. */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               break;
+       default:
+               abort();
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_and_mo(enum cmm_memorder mo)
+{
+       /* NOP. uatomic_and has explicit lock prefix. */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               break;
+       default:
+               abort();
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_or_mo(enum cmm_memorder mo)
+{
+       /* NOP. uatomic_or has explicit lock prefix. */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               break;
+       default:
+               abort();
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_or_mo(enum cmm_memorder mo)
+{
+       /* NOP. uatomic_or has explicit lock prefix. */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               break;
+       default:
+               abort();
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_add_mo(enum cmm_memorder mo)
+{
+       /* NOP. uatomic_add has explicit lock prefix. */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               break;
+       default:
+               abort();
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_add_mo(enum cmm_memorder mo)
+{
+       /* NOP. uatomic_add has explicit lock prefix. */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               break;
+       default:
+               abort();
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_sub_mo(enum cmm_memorder mo)
+{
+       /* NOP. uatomic_sub has explicit lock prefix. */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               break;
+       default:
+               abort();
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_sub_mo(enum cmm_memorder mo)
+{
+       /* NOP. uatomic_sub has explicit lock prefix. */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               break;
+       default:
+               abort();
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_inc_mo(enum cmm_memorder mo)
+{
+       /* NOP. uatomic_inc has explicit lock prefix. */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               break;
+       default:
+               abort();
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_inc_mo(enum cmm_memorder mo)
+{
+       /* NOP. uatomic_inc has explicit lock prefix. */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               break;
+       default:
+               abort();
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_dec_mo(enum cmm_memorder mo)
+{
+       /* NOP. uatomic_dec has explicit lock prefix. */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               break;
+       default:
+               abort();
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_dec_mo(enum cmm_memorder mo)
+{
+       /* NOP. uatomic_dec has explicit lock prefix. */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               break;
+       default:
+               abort();
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_add_return_mo(enum cmm_memorder mo)
+{
+       /* NOP. uatomic_add_return has explicit lock prefix. */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               break;
+       default:
+               abort();
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_add_return_mo(enum cmm_memorder mo)
+{
+       /* NOP. uatomic_add_return has explicit lock prefix. */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               break;
+       default:
+               abort();
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__before_uatomic_sub_return_mo(enum cmm_memorder mo)
+{
+       /* NOP. uatomic_sub_return has explicit lock prefix. */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               break;
+       default:
+               abort();
+       }
+}
+
+static inline void _cmm_compat_c11_smp_mb__after_uatomic_sub_return_mo(enum cmm_memorder mo)
+{
+       /* NOP. uatomic_sub_return has explicit lock prefix. */
+       switch (mo) {
+       case CMM_RELAXED:       /* Fall-through */
+       case CMM_ACQUIRE:       /* Fall-through */
+       case CMM_CONSUME:       /* Fall-through */
+       case CMM_RELEASE:       /* Fall-through */
+       case CMM_ACQ_REL:       /* Fall-through */
+       case CMM_SEQ_CST:       /* Fall-through */
+       case CMM_SEQ_CST_FENCE:
+               break;
+       default:
+               abort();
+       }
+}
+
+#define _cmm_compat_c11_smp_mb__before_mo(operation, mo)                       \
+       do {                                                    \
+               _cmm_compat_c11_smp_mb__before_ ## operation ## _mo (mo);       \
+       } while (0)
+
+#define _cmm_compat_c11_smp_mb__after_mo(operation, mo)                        \
+       do {                                                    \
+               _cmm_compat_c11_smp_mb__after_ ## operation ## _mo (mo);        \
+       } while (0)
+
+
 #ifdef __cplusplus
 }
 #endif
 #ifdef __cplusplus
 }
 #endif
index 31fa979a7040736999823a49ca1f01b7a4d92c48..5da8d6a03d7e7fd9b4b65e9617533c9b039bb436 100644 (file)
@@ -1,5 +1,5 @@
 /*
 /*
- * SPDX-License-Identifier: LGPL-2.1-only
+ * SPDX-License-Identifier: MIT
  *
  * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
  * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
  *
  * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
  * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
@@ -164,7 +164,7 @@ static inline int get_cpu_mask_from_sysfs(char *buf, size_t max_bytes, const cha
 
                total_bytes_read += bytes_read;
                assert(total_bytes_read <= max_bytes);
 
                total_bytes_read += bytes_read;
                assert(total_bytes_read <= max_bytes);
-       } while (max_bytes > total_bytes_read && bytes_read > 0);
+       } while (max_bytes > total_bytes_read && bytes_read != 0);
 
        /*
         * Make sure the mask read is a null terminated string.
 
        /*
         * Make sure the mask read is a null terminated string.
This page took 0.038919 seconds and 4 git commands to generate.