From: Benjamin Marzinski via lttng-dev Date: Wed, 1 May 2024 23:42:41 +0000 (-0400) Subject: fix: handle EINTR correctly in get_cpu_mask_from_sysfs X-Git-Url: http://git.lttng.org/?p=urcu.git;a=commitdiff_plain;h=refs%2Fheads%2Fmaster;hp=ac7352545826996e3866c599e4a8eff05fb662ca fix: handle EINTR correctly in get_cpu_mask_from_sysfs If the read() in get_cpu_mask_from_sysfs() fails with EINTR, the code is supposed to retry, but the while loop condition has (bytes_read > 0), which is false when read() fails with EINTR. The result is that the code exits the loop, having only read part of the string. Use (bytes_read != 0) in the while loop condition instead, since the (bytes_read < 0) case is already handled in the loop. Signed-off-by: Benjamin Marzinski Signed-off-by: Mathieu Desnoyers Change-Id: I565030d4625ae199cabc4c2ab5eb8ac49ea4dfcb --- diff --git a/include/urcu/arch/ppc.h b/include/urcu/arch/ppc.h index be9d158..1285b6d 100644 --- a/include/urcu/arch/ppc.h +++ b/include/urcu/arch/ppc.h @@ -19,7 +19,30 @@ extern "C" { #endif -/* Include size of POWER5+ L3 cache lines: 256 bytes */ +/* + * Most powerpc machines have 128 bytes cache lines, but to make sure + * there is no false sharing on all known Power hardware, use the + * largest known cache line size, which is the physical size of POWER5 + * L3 cache lines (256 bytes). + * + * "Each slice [of the L3] is 12-way set-associative, with 4,096 + * congruence classes of 256-byte lines managed as two 128-byte sectors + * to match the L2 line size." + * + * From: "POWER5 system microarchitecture", + * IBM Journal of Research & Development, + * vol. 49, no. 4/5, July/September 2005 + * https://www.eecg.utoronto.ca/~moshovos/ACA08/readings/power5.pdf + * + * This value is a compile-time constant, which prevents us from + * querying the processor for the cache line size at runtime. We + * therefore need to be pessimistic and assume the largest known cache + * line size. + * + * This value is exposed through public headers, so tuning it for + * specific environments is a concern for ABI compatibility between + * applications and liburcu. + */ #define CAA_CACHE_LINE_SIZE 256 #ifdef __NO_LWSYNC__ diff --git a/include/urcu/rculfhash.h b/include/urcu/rculfhash.h index 8b57cd8..e0f4b35 100644 --- a/include/urcu/rculfhash.h +++ b/include/urcu/rculfhash.h @@ -159,8 +159,8 @@ struct cds_lfht *_cds_lfht_new_with_alloc(unsigned long init_size, unsigned long max_nr_buckets, int flags, const struct cds_lfht_mm_type *mm, - const struct cds_lfht_alloc *alloc, const struct rcu_flavor_struct *flavor, + const struct cds_lfht_alloc *alloc, pthread_attr_t *attr); /* @@ -248,7 +248,7 @@ struct cds_lfht *cds_lfht_new_with_flavor_alloc(unsigned long init_size, pthread_attr_t *attr) { return _cds_lfht_new_with_alloc(init_size, min_nr_alloc_buckets, max_nr_buckets, - flags, NULL, alloc, flavor, attr); + flags, NULL, flavor, alloc, attr); } diff --git a/include/urcu/uatomic/generic.h b/include/urcu/uatomic/generic.h index 8f8c437..ed655bb 100644 --- a/include/urcu/uatomic/generic.h +++ b/include/urcu/uatomic/generic.h @@ -15,7 +15,6 @@ */ #include -#include #include #include @@ -27,125 +26,61 @@ extern "C" { #define uatomic_set(addr, v) ((void) CMM_STORE_SHARED(*(addr), (v))) #endif -#define uatomic_load_store_return_op(op, addr, v, mo) \ - __extension__ \ - ({ \ - \ - switch (mo) { \ - case CMM_ACQUIRE: \ - case CMM_CONSUME: \ - case CMM_RELAXED: \ - break; \ - case CMM_RELEASE: \ - case CMM_ACQ_REL: \ - case CMM_SEQ_CST: \ - case CMM_SEQ_CST_FENCE: \ - cmm_smp_mb(); \ - break; \ - default: \ - abort(); \ - } \ - \ - __typeof__((*addr)) _value = op(addr, v); \ - \ - switch (mo) { \ - case CMM_CONSUME: \ - cmm_smp_read_barrier_depends(); \ - break; \ - case CMM_ACQUIRE: \ - case CMM_ACQ_REL: \ - case CMM_SEQ_CST: \ - case CMM_SEQ_CST_FENCE: \ - cmm_smp_mb(); \ - break; \ - case CMM_RELAXED: \ - case CMM_RELEASE: \ - break; \ - default: \ - abort(); \ - } \ - _value; \ +/* + * Can be defined for the architecture. + * + * What needs to be emitted _before_ the `operation' with memory ordering `mo'. + */ +#ifndef _cmm_compat_c11_smp_mb__before_mo +# define _cmm_compat_c11_smp_mb__before_mo(operation, mo) cmm_smp_mb() +#endif + +/* + * Can be defined for the architecture. + * + * What needs to be emitted _after_ the `operation' with memory ordering `mo'. + */ +#ifndef _cmm_compat_c11_smp_mb__after_mo +# define _cmm_compat_c11_smp_mb__after_mo(operation, mo) cmm_smp_mb() +#endif + +#define uatomic_load_store_return_op(op, addr, v, mo) \ + __extension__ \ + ({ \ + _cmm_compat_c11_smp_mb__before_mo(op, mo); \ + __typeof__((*addr)) _value = op(addr, v); \ + _cmm_compat_c11_smp_mb__after_mo(op, mo); \ + \ + _value; \ }) -#define uatomic_load_store_op(op, addr, v, mo) \ - do { \ - switch (mo) { \ - case CMM_ACQUIRE: \ - case CMM_CONSUME: \ - case CMM_RELAXED: \ - break; \ - case CMM_RELEASE: \ - case CMM_ACQ_REL: \ - case CMM_SEQ_CST: \ - case CMM_SEQ_CST_FENCE: \ - cmm_smp_mb(); \ - break; \ - default: \ - abort(); \ - } \ - \ - op(addr, v); \ - \ - switch (mo) { \ - case CMM_CONSUME: \ - cmm_smp_read_barrier_depends(); \ - break; \ - case CMM_ACQUIRE: \ - case CMM_ACQ_REL: \ - case CMM_SEQ_CST: \ - case CMM_SEQ_CST_FENCE: \ - cmm_smp_mb(); \ - break; \ - case CMM_RELAXED: \ - case CMM_RELEASE: \ - break; \ - default: \ - abort(); \ - } \ +#define uatomic_load_store_op(op, addr, v, mo) \ + do { \ + _cmm_compat_c11_smp_mb__before_mo(op, mo); \ + op(addr, v); \ + _cmm_compat_c11_smp_mb__after_mo(op, mo); \ } while (0) -#define uatomic_store(addr, v, mo) \ - do { \ - switch (mo) { \ - case CMM_RELAXED: \ - break; \ - case CMM_RELEASE: \ - case CMM_SEQ_CST: \ - case CMM_SEQ_CST_FENCE: \ - cmm_smp_mb(); \ - break; \ - default: \ - abort(); \ - } \ - \ - uatomic_set(addr, v); \ - \ - switch (mo) { \ - case CMM_RELAXED: \ - case CMM_RELEASE: \ - break; \ - case CMM_SEQ_CST: \ - case CMM_SEQ_CST_FENCE: \ - cmm_smp_mb(); \ - break; \ - default: \ - abort(); \ - } \ +#define uatomic_store(addr, v, mo) \ + do { \ + _cmm_compat_c11_smp_mb__before_mo(uatomic_set, mo); \ + uatomic_set(addr, v); \ + _cmm_compat_c11_smp_mb__after_mo(uatomic_set, mo); \ } while (0) -#define uatomic_and_mo(addr, v, mo) \ +#define uatomic_and_mo(addr, v, mo) \ uatomic_load_store_op(uatomic_and, addr, v, mo) -#define uatomic_or_mo(addr, v, mo) \ +#define uatomic_or_mo(addr, v, mo) \ uatomic_load_store_op(uatomic_or, addr, v, mo) -#define uatomic_add_mo(addr, v, mo) \ +#define uatomic_add_mo(addr, v, mo) \ uatomic_load_store_op(uatomic_add, addr, v, mo) -#define uatomic_sub_mo(addr, v, mo) \ +#define uatomic_sub_mo(addr, v, mo) \ uatomic_load_store_op(uatomic_sub, addr, v, mo) -#define uatomic_inc_mo(addr, mo) \ +#define uatomic_inc_mo(addr, mo) \ uatomic_load_store_op(uatomic_add, addr, 1, mo) #define uatomic_dec_mo(addr, mo) \ @@ -157,58 +92,14 @@ extern "C" { #define uatomic_cmpxchg_mo(addr, old, new, mos, mof) \ __extension__ \ ({ \ - switch (mos) { \ - case CMM_ACQUIRE: \ - case CMM_CONSUME: \ - case CMM_RELAXED: \ - break; \ - case CMM_RELEASE: \ - case CMM_ACQ_REL: \ - case CMM_SEQ_CST: \ - case CMM_SEQ_CST_FENCE: \ - cmm_smp_mb(); \ - break; \ - default: \ - abort(); \ - } \ - \ + _cmm_compat_c11_smp_mb__before_mo(uatomic_cmpxchg, mos); \ __typeof__(*(addr)) _value = uatomic_cmpxchg(addr, old, \ new); \ \ if (_value == (old)) { \ - switch (mos) { \ - case CMM_CONSUME: \ - cmm_smp_read_barrier_depends(); \ - break; \ - case CMM_ACQUIRE: \ - case CMM_ACQ_REL: \ - case CMM_SEQ_CST: \ - case CMM_SEQ_CST_FENCE: \ - cmm_smp_mb(); \ - break; \ - case CMM_RELAXED: \ - case CMM_RELEASE: \ - break; \ - default: \ - abort(); \ - } \ + _cmm_compat_c11_smp_mb__after_mo(uatomic_cmpxchg, mos); \ } else { \ - switch (mof) { \ - case CMM_CONSUME: \ - cmm_smp_read_barrier_depends(); \ - break; \ - case CMM_ACQUIRE: \ - case CMM_ACQ_REL: \ - case CMM_SEQ_CST: \ - case CMM_SEQ_CST_FENCE: \ - cmm_smp_mb(); \ - break; \ - case CMM_RELAXED: \ - case CMM_RELEASE: \ - break; \ - default: \ - abort(); \ - } \ + _cmm_compat_c11_smp_mb__after_mo(uatomic_cmpxchg, mof); \ } \ _value; \ }) @@ -222,7 +113,6 @@ extern "C" { #define uatomic_sub_return_mo(addr, v, mo) \ uatomic_load_store_return_op(uatomic_sub_return, addr, v) - #ifndef uatomic_read #define uatomic_read(addr) CMM_LOAD_SHARED(*(addr)) #endif @@ -230,35 +120,9 @@ extern "C" { #define uatomic_load(addr, mo) \ __extension__ \ ({ \ - switch (mo) { \ - case CMM_ACQUIRE: \ - case CMM_CONSUME: \ - case CMM_RELAXED: \ - break; \ - case CMM_SEQ_CST: \ - case CMM_SEQ_CST_FENCE: \ - cmm_smp_mb(); \ - break; \ - default: \ - abort(); \ - } \ - \ + _cmm_compat_c11_smp_mb__before_mo(uatomic_read, mo); \ __typeof__(*(addr)) _rcu_value = uatomic_read(addr); \ - \ - switch (mo) { \ - case CMM_RELAXED: \ - break; \ - case CMM_CONSUME: \ - cmm_smp_read_barrier_depends(); \ - break; \ - case CMM_ACQUIRE: \ - case CMM_SEQ_CST: \ - case CMM_SEQ_CST_FENCE: \ - cmm_smp_mb(); \ - break; \ - default: \ - abort(); \ - } \ + _cmm_compat_c11_smp_mb__after_mo(uatomic_read, mo); \ \ _rcu_value; \ }) diff --git a/include/urcu/uatomic/x86.h b/include/urcu/uatomic/x86.h index b5725e0..616eee9 100644 --- a/include/urcu/uatomic/x86.h +++ b/include/urcu/uatomic/x86.h @@ -8,6 +8,8 @@ #ifndef _URCU_ARCH_UATOMIC_X86_H #define _URCU_ARCH_UATOMIC_X86_H +#include /* For abort(3). */ + /* * Code inspired from libuatomic_ops-1.2, inherited in part from the * Boehm-Demers-Weiser conservative garbage collector. @@ -630,6 +632,474 @@ extern unsigned long _compat_uatomic_add_return(void *addr, #define cmm_smp_mb__before_uatomic_dec() cmm_barrier() #define cmm_smp_mb__after_uatomic_dec() cmm_barrier() +static inline void _cmm_compat_c11_smp_mb__before_uatomic_read_mo(enum cmm_memorder mo) +{ + /* + * A SMP barrier is not necessary for CMM_SEQ_CST because, only a + * previous store can be reordered with the load. However, emitting the + * memory barrier after the store is sufficient to prevent reordering + * between the two. This follows toolchains decision of emitting the + * memory fence on the stores instead of the loads. + * + * A compiler barrier is necessary because the underlying operation does + * not clobber the registers. + */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + cmm_barrier(); + break; + case CMM_ACQ_REL: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + default: + abort(); + break; + } +} + +static inline void _cmm_compat_c11_smp_mb__after_uatomic_read_mo(enum cmm_memorder mo) +{ + /* + * A SMP barrier is not necessary for CMM_SEQ_CST because following + * loads and stores cannot be reordered with the load. + * + * A SMP barrier is however necessary for CMM_SEQ_CST_FENCE to respect + * the memory model, since the underlying operation does not have a lock + * prefix. + * + * A compiler barrier is necessary because the underlying operation does + * not clobber the registers. + */ + switch (mo) { + case CMM_SEQ_CST_FENCE: + cmm_smp_mb(); + break; + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_SEQ_CST: + cmm_barrier(); + break; + case CMM_ACQ_REL: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + default: + abort(); + break; + } +} + +static inline void _cmm_compat_c11_smp_mb__before_uatomic_set_mo(enum cmm_memorder mo) +{ + /* + * A SMP barrier is not necessary for CMM_SEQ_CST because the store can + * only be reodered with later loads + * + * A compiler barrier is necessary because the underlying operation does + * not clobber the registers. + */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + cmm_barrier(); + break; + case CMM_ACQ_REL: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + default: + abort(); + break; + } +} + +static inline void _cmm_compat_c11_smp_mb__after_uatomic_set_mo(enum cmm_memorder mo) +{ + /* + * A SMP barrier is necessary for CMM_SEQ_CST because the store can be + * reorded with later loads. Since no memory barrier is being emitted + * before loads, one has to be emitted after the store. This follows + * toolchains decision of emitting the memory fence on the stores instead + * of the loads. + * + * A SMP barrier is necessary for CMM_SEQ_CST_FENCE to respect the + * memory model, since the underlying store does not have a lock prefix. + * + * A compiler barrier is necessary because the underlying operation does + * not clobber the registers. + */ + switch (mo) { + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + cmm_smp_mb(); + break; + case CMM_RELAXED: /* Fall-through */ + case CMM_RELEASE: + cmm_barrier(); + break; + case CMM_ACQ_REL: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + default: + abort(); + break; + } +} + +static inline void _cmm_compat_c11_smp_mb__before_uatomic_xchg_mo(enum cmm_memorder mo) +{ + /* NOP. uatomic_xchg has implicit lock prefix. */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_ACQ_REL: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + break; + default: + abort(); + } +} + +static inline void _cmm_compat_c11_smp_mb__after_uatomic_xchg_mo(enum cmm_memorder mo) +{ + /* NOP. uatomic_xchg has implicit lock prefix. */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_ACQ_REL: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + break; + default: + abort(); + } +} + +static inline void _cmm_compat_c11_smp_mb__before_uatomic_cmpxchg_mo(enum cmm_memorder mo) +{ + /* NOP. uatomic_cmpxchg has implicit lock prefix. */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_ACQ_REL: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + break; + default: + abort(); + } +} + +static inline void _cmm_compat_c11_smp_mb__after_uatomic_cmpxchg_mo(enum cmm_memorder mo) +{ + /* NOP. uatomic_cmpxchg has implicit lock prefix. */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_ACQ_REL: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + break; + default: + abort(); + } +} + +static inline void _cmm_compat_c11_smp_mb__before_uatomic_and_mo(enum cmm_memorder mo) +{ + /* NOP. uatomic_and has explicit lock prefix. */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_ACQ_REL: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + break; + default: + abort(); + } +} + +static inline void _cmm_compat_c11_smp_mb__after_uatomic_and_mo(enum cmm_memorder mo) +{ + /* NOP. uatomic_and has explicit lock prefix. */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_ACQ_REL: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + break; + default: + abort(); + } +} + +static inline void _cmm_compat_c11_smp_mb__before_uatomic_or_mo(enum cmm_memorder mo) +{ + /* NOP. uatomic_or has explicit lock prefix. */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_ACQ_REL: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + break; + default: + abort(); + } +} + +static inline void _cmm_compat_c11_smp_mb__after_uatomic_or_mo(enum cmm_memorder mo) +{ + /* NOP. uatomic_or has explicit lock prefix. */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_ACQ_REL: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + break; + default: + abort(); + } +} + +static inline void _cmm_compat_c11_smp_mb__before_uatomic_add_mo(enum cmm_memorder mo) +{ + /* NOP. uatomic_add has explicit lock prefix. */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_ACQ_REL: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + break; + default: + abort(); + } +} + +static inline void _cmm_compat_c11_smp_mb__after_uatomic_add_mo(enum cmm_memorder mo) +{ + /* NOP. uatomic_add has explicit lock prefix. */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_ACQ_REL: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + break; + default: + abort(); + } +} + +static inline void _cmm_compat_c11_smp_mb__before_uatomic_sub_mo(enum cmm_memorder mo) +{ + /* NOP. uatomic_sub has explicit lock prefix. */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_ACQ_REL: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + break; + default: + abort(); + } +} + +static inline void _cmm_compat_c11_smp_mb__after_uatomic_sub_mo(enum cmm_memorder mo) +{ + /* NOP. uatomic_sub has explicit lock prefix. */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_ACQ_REL: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + break; + default: + abort(); + } +} + +static inline void _cmm_compat_c11_smp_mb__before_uatomic_inc_mo(enum cmm_memorder mo) +{ + /* NOP. uatomic_inc has explicit lock prefix. */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_ACQ_REL: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + break; + default: + abort(); + } +} + +static inline void _cmm_compat_c11_smp_mb__after_uatomic_inc_mo(enum cmm_memorder mo) +{ + /* NOP. uatomic_inc has explicit lock prefix. */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_ACQ_REL: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + break; + default: + abort(); + } +} + +static inline void _cmm_compat_c11_smp_mb__before_uatomic_dec_mo(enum cmm_memorder mo) +{ + /* NOP. uatomic_dec has explicit lock prefix. */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_ACQ_REL: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + break; + default: + abort(); + } +} + +static inline void _cmm_compat_c11_smp_mb__after_uatomic_dec_mo(enum cmm_memorder mo) +{ + /* NOP. uatomic_dec has explicit lock prefix. */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_ACQ_REL: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + break; + default: + abort(); + } +} + +static inline void _cmm_compat_c11_smp_mb__before_uatomic_add_return_mo(enum cmm_memorder mo) +{ + /* NOP. uatomic_add_return has explicit lock prefix. */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_ACQ_REL: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + break; + default: + abort(); + } +} + +static inline void _cmm_compat_c11_smp_mb__after_uatomic_add_return_mo(enum cmm_memorder mo) +{ + /* NOP. uatomic_add_return has explicit lock prefix. */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_ACQ_REL: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + break; + default: + abort(); + } +} + +static inline void _cmm_compat_c11_smp_mb__before_uatomic_sub_return_mo(enum cmm_memorder mo) +{ + /* NOP. uatomic_sub_return has explicit lock prefix. */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_ACQ_REL: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + break; + default: + abort(); + } +} + +static inline void _cmm_compat_c11_smp_mb__after_uatomic_sub_return_mo(enum cmm_memorder mo) +{ + /* NOP. uatomic_sub_return has explicit lock prefix. */ + switch (mo) { + case CMM_RELAXED: /* Fall-through */ + case CMM_ACQUIRE: /* Fall-through */ + case CMM_CONSUME: /* Fall-through */ + case CMM_RELEASE: /* Fall-through */ + case CMM_ACQ_REL: /* Fall-through */ + case CMM_SEQ_CST: /* Fall-through */ + case CMM_SEQ_CST_FENCE: + break; + default: + abort(); + } +} + +#define _cmm_compat_c11_smp_mb__before_mo(operation, mo) \ + do { \ + _cmm_compat_c11_smp_mb__before_ ## operation ## _mo (mo); \ + } while (0) + +#define _cmm_compat_c11_smp_mb__after_mo(operation, mo) \ + do { \ + _cmm_compat_c11_smp_mb__after_ ## operation ## _mo (mo); \ + } while (0) + + #ifdef __cplusplus } #endif diff --git a/src/Makefile.am b/src/Makefile.am index ede2669..b555c81 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -9,7 +9,7 @@ AM_CPPFLAGS += -I$(top_srcdir)/src AM_LDFLAGS=-version-info $(URCU_LIBRARY_VERSION) $(LT_NO_UNDEFINED) dist_noinst_HEADERS = urcu-die.h urcu-wait.h compat-getcpu.h \ - compat-rand.h urcu-utils.h compat-smp.h + urcu-utils.h compat-smp.h COMPAT = compat_arch.c compat_futex.c diff --git a/src/compat-rand.h b/src/compat-rand.h deleted file mode 100644 index 42fbb30..0000000 --- a/src/compat-rand.h +++ /dev/null @@ -1,49 +0,0 @@ -// SPDX-FileCopyrightText: 1996 Ulrich Drepper -// SPDX-FileCopyrightText: 2013 Pierre-Luc St-Charles -// -// SPDX-License-Identifier: LGPL-2.1-or-later - -#ifndef _COMPAT_RAND_H -#define _COMPAT_RAND_H - -/* - * Userspace RCU library - rand/rand_r Compatibility Header - * - * Note: this file is only used to simplify the code required to - * use the 'rand_r(...)' system function across multiple platforms, - * which might not always be referenced the same way. - */ - -#ifndef HAVE_RAND_R -/* - * Reentrant random function from POSIX.1c. - * Copyright (C) 1996, 1999 Free Software Foundation, Inc. - * This file is part of the GNU C Library. - * Contributed by Ulrich Drepper >, 1996. - */ -static inline int rand_r(unsigned int *seed) -{ - unsigned int next = *seed; - int result; - - next *= 1103515245; - next += 12345; - result = (unsigned int) (next / 65536) % 2048; - - next *= 1103515245; - next += 12345; - result <<= 10; - result ^= (unsigned int) (next / 65536) % 1024; - - next *= 1103515245; - next += 12345; - result <<= 10; - result ^= (unsigned int) (next / 65536) % 1024; - - *seed = next; - - return result; -} -#endif /* HAVE_RAND_R */ - -#endif /* _COMPAT_RAND_H */ diff --git a/src/compat-smp.h b/src/compat-smp.h index 31fa979..5da8d6a 100644 --- a/src/compat-smp.h +++ b/src/compat-smp.h @@ -1,5 +1,5 @@ /* - * SPDX-License-Identifier: LGPL-2.1-only + * SPDX-License-Identifier: MIT * * Copyright (C) 2011-2012 Mathieu Desnoyers * Copyright (C) 2019 Michael Jeanson @@ -164,7 +164,7 @@ static inline int get_cpu_mask_from_sysfs(char *buf, size_t max_bytes, const cha total_bytes_read += bytes_read; assert(total_bytes_read <= max_bytes); - } while (max_bytes > total_bytes_read && bytes_read > 0); + } while (max_bytes > total_bytes_read && bytes_read != 0); /* * Make sure the mask read is a null terminated string. diff --git a/src/rculfhash.c b/src/rculfhash.c index 8d7c1e6..10f5b8e 100644 --- a/src/rculfhash.c +++ b/src/rculfhash.c @@ -1646,8 +1646,8 @@ struct cds_lfht *_cds_lfht_new_with_alloc(unsigned long init_size, unsigned long max_nr_buckets, int flags, const struct cds_lfht_mm_type *mm, - const struct cds_lfht_alloc *alloc, const struct rcu_flavor_struct *flavor, + const struct cds_lfht_alloc *alloc, pthread_attr_t *attr) { struct cds_lfht *ht; @@ -1714,7 +1714,7 @@ struct cds_lfht *_cds_lfht_new(unsigned long init_size, { return _cds_lfht_new_with_alloc(init_size, min_nr_alloc_buckets, max_nr_buckets, - flags, mm, NULL, flavor, attr); + flags, mm, flavor, NULL, attr); } void cds_lfht_lookup(struct cds_lfht *ht, unsigned long hash, diff --git a/tests/common/Makefile.am b/tests/common/Makefile.am index 2cdc273..af6d89f 100644 --- a/tests/common/Makefile.am +++ b/tests/common/Makefile.am @@ -2,12 +2,13 @@ # # SPDX-License-Identifier: MIT -AM_CPPFLAGS += -I$(top_srcdir)/src +AM_CPPFLAGS += -I$(top_srcdir)/src -I$(top_srcdir)/tests/common -noinst_HEADERS = thread-id.h +noinst_HEADERS = \ + api.h \ + compat-rand.h \ + thread-id.h noinst_LTLIBRARIES = libdebug-yield.la libdebug_yield_la_SOURCES = debug-yield.c debug-yield.h - -EXTRA_DIST = api.h diff --git a/tests/common/compat-rand.h b/tests/common/compat-rand.h new file mode 100644 index 0000000..42fbb30 --- /dev/null +++ b/tests/common/compat-rand.h @@ -0,0 +1,49 @@ +// SPDX-FileCopyrightText: 1996 Ulrich Drepper +// SPDX-FileCopyrightText: 2013 Pierre-Luc St-Charles +// +// SPDX-License-Identifier: LGPL-2.1-or-later + +#ifndef _COMPAT_RAND_H +#define _COMPAT_RAND_H + +/* + * Userspace RCU library - rand/rand_r Compatibility Header + * + * Note: this file is only used to simplify the code required to + * use the 'rand_r(...)' system function across multiple platforms, + * which might not always be referenced the same way. + */ + +#ifndef HAVE_RAND_R +/* + * Reentrant random function from POSIX.1c. + * Copyright (C) 1996, 1999 Free Software Foundation, Inc. + * This file is part of the GNU C Library. + * Contributed by Ulrich Drepper >, 1996. + */ +static inline int rand_r(unsigned int *seed) +{ + unsigned int next = *seed; + int result; + + next *= 1103515245; + next += 12345; + result = (unsigned int) (next / 65536) % 2048; + + next *= 1103515245; + next += 12345; + result <<= 10; + result ^= (unsigned int) (next / 65536) % 1024; + + next *= 1103515245; + next += 12345; + result <<= 10; + result ^= (unsigned int) (next / 65536) % 1024; + + *seed = next; + + return result; +} +#endif /* HAVE_RAND_R */ + +#endif /* _COMPAT_RAND_H */