mirror of
https://git.haproxy.org/git/haproxy.git/
synced 2025-08-07 07:37:02 +02:00
MINOR: atomic: add a read-specific variant of __ha_cpu_relax()
Tests on various systems show that x86 prefers not to wait at all inside read loops while aarch64 prefers to wait a little bit. Instead of having to stuff ifdefs around __ha_cpu_relax() inside plenty of such loops waiting for a condition to appear, better implement a new variant that we call __ha_cpu_relax_for_read() which honors each architecture's preferences and is the same as __ha_cpu_relax() for other ones.
This commit is contained in:
parent
5df0df96dd
commit
c038ca8e8c
@ -185,6 +185,7 @@
|
|||||||
#define __ha_barrier_full() do { } while (0)
|
#define __ha_barrier_full() do { } while (0)
|
||||||
#define __ha_compiler_barrier() do { } while (0)
|
#define __ha_compiler_barrier() do { } while (0)
|
||||||
#define __ha_cpu_relax() ({ 1; })
|
#define __ha_cpu_relax() ({ 1; })
|
||||||
|
#define __ha_cpu_relax_for_read() ({ 1; })
|
||||||
|
|
||||||
#else /* !USE_THREAD */
|
#else /* !USE_THREAD */
|
||||||
|
|
||||||
@ -586,6 +587,9 @@ __ha_cas_dw(void *target, void *compare, const void *set)
|
|||||||
/* short-lived CPU relaxation */
|
/* short-lived CPU relaxation */
|
||||||
#define __ha_cpu_relax() ({ asm volatile("rep;nop\n"); 1; })
|
#define __ha_cpu_relax() ({ asm volatile("rep;nop\n"); 1; })
|
||||||
|
|
||||||
|
/* dummy relaxation: x86 prefers not to wait at all in read loops */
|
||||||
|
#define __ha_cpu_relax_for_read() ({ 1; })
|
||||||
|
|
||||||
#elif defined(__arm__) && (defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__))
|
#elif defined(__arm__) && (defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__))
|
||||||
|
|
||||||
static __inline void
|
static __inline void
|
||||||
@ -651,6 +655,9 @@ static __inline int __ha_cas_dw(void *target, void *compare, const void *set)
|
|||||||
/* short-lived CPU relaxation */
|
/* short-lived CPU relaxation */
|
||||||
#define __ha_cpu_relax() ({ asm volatile(""); 1; })
|
#define __ha_cpu_relax() ({ asm volatile(""); 1; })
|
||||||
|
|
||||||
|
/* short wait in read loops */
|
||||||
|
#define __ha_cpu_relax_for_read() ({ asm volatile(""); 1; })
|
||||||
|
|
||||||
#elif defined (__aarch64__)
|
#elif defined (__aarch64__)
|
||||||
|
|
||||||
static __inline void
|
static __inline void
|
||||||
@ -697,6 +704,9 @@ __ha_barrier_atomic_full(void)
|
|||||||
*/
|
*/
|
||||||
#define __ha_cpu_relax() ({ asm volatile("isb" ::: "memory"); 1; })
|
#define __ha_cpu_relax() ({ asm volatile("isb" ::: "memory"); 1; })
|
||||||
|
|
||||||
|
/* aarch64 prefers to wait for real in read loops */
|
||||||
|
#define __ha_cpu_relax_for_read() ({ asm volatile("isb" ::: "memory"); 1; })
|
||||||
|
|
||||||
#if defined(__ARM_FEATURE_ATOMICS) && !defined(__clang__) // ARMv8.1-A atomics
|
#if defined(__ARM_FEATURE_ATOMICS) && !defined(__clang__) // ARMv8.1-A atomics
|
||||||
|
|
||||||
/* returns 0 on failure, non-zero on success */
|
/* returns 0 on failure, non-zero on success */
|
||||||
@ -799,6 +809,9 @@ static __inline int __ha_cas_dw(void *target, void *compare, void *set)
|
|||||||
/* short-lived CPU relaxation */
|
/* short-lived CPU relaxation */
|
||||||
#define __ha_cpu_relax() ({ asm volatile(""); 1; })
|
#define __ha_cpu_relax() ({ asm volatile(""); 1; })
|
||||||
|
|
||||||
|
/* default wait in read loops */
|
||||||
|
#define __ha_cpu_relax_for_read() ({ asm volatile(""); 1; })
|
||||||
|
|
||||||
#endif /* end of arch-specific barrier/dwcas */
|
#endif /* end of arch-specific barrier/dwcas */
|
||||||
|
|
||||||
static inline void __ha_compiler_barrier(void)
|
static inline void __ha_compiler_barrier(void)
|
||||||
|
Loading…
Reference in New Issue
Block a user