diff options
author | Noah Misch <noah@leadboat.com> | 2019-09-14 19:38:41 -0700 |
---|---|---|
committer | Noah Misch <noah@leadboat.com> | 2019-09-14 19:38:41 -0700 |
commit | 87e9fae0696d9e3ff70a1438775ad9f786b854a5 (patch) | |
tree | 049952454a8af2144db2e9dccdd92845d15437da /src | |
parent | e7ff59686eacf5021fb84be921116986c3828d8a (diff) | |
download | postgresql-87e9fae0696d9e3ff70a1438775ad9f786b854a5.tar.gz postgresql-87e9fae0696d9e3ff70a1438775ad9f786b854a5.zip |
Revert "For all ppc compilers, implement pg_atomic_fetch_add_ with inline asm."
This reverts commit e7ff59686eacf5021fb84be921116986c3828d8a. It
defined pg_atomic_fetch_add_u32_impl() without defining
pg_atomic_compare_exchange_u32_impl(), which is incompatible with
src/include/port/atomics/fallback.h. Per buildfarm member prairiedog.
Discussion: https://postgr.es/m/7517.1568470247@sss.pgh.pa.us
Diffstat (limited to 'src')
-rw-r--r-- | src/include/pg_config.h.in | 3 | ||||
-rw-r--r-- | src/include/port/atomics/arch-ppc.h | 98 | ||||
-rw-r--r-- | src/include/port/atomics/generic-xlc.h | 66 |
3 files changed, 66 insertions, 101 deletions
diff --git a/src/include/pg_config.h.in b/src/include/pg_config.h.in index 509cc92b989..c6014e83fa8 100644 --- a/src/include/pg_config.h.in +++ b/src/include/pg_config.h.in @@ -329,9 +329,6 @@ /* Define to 1 if you have isinf(). */ #undef HAVE_ISINF -/* Define to 1 if __builtin_constant_p(x) implies "i"(x) acceptance. */ -#undef HAVE_I_CONSTRAINT__BUILTIN_CONSTANT_P - /* Define to 1 if you have the <langinfo.h> header file. */ #undef HAVE_LANGINFO_H diff --git a/src/include/port/atomics/arch-ppc.h b/src/include/port/atomics/arch-ppc.h index 35d602e618f..344b39449bd 100644 --- a/src/include/port/atomics/arch-ppc.h +++ b/src/include/port/atomics/arch-ppc.h @@ -25,103 +25,5 @@ #define pg_write_barrier_impl() __asm__ __volatile__ ("lwsync" : : : "memory") #endif -#define PG_HAVE_ATOMIC_U32_SUPPORT -typedef struct pg_atomic_uint32 -{ - volatile uint32 value; -} pg_atomic_uint32; - -/* 64bit atomics are only supported in 64bit mode */ -#ifdef __64BIT__ -#define PG_HAVE_ATOMIC_U64_SUPPORT -typedef struct pg_atomic_uint64 -{ - volatile uint64 value pg_attribute_aligned(8); -} pg_atomic_uint64; - -#endif /* __64BIT__ */ - -#define PG_HAVE_ATOMIC_FETCH_ADD_U32 -static inline uint32 -pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_) -{ - uint32 _t; - uint32 res; - - /* - * xlc has a no-longer-documented __fetch_and_add() intrinsic. In xlc - * 12.01.0000.0000, it emits a leading "sync" and trailing "isync". In - * xlc 13.01.0003.0004, it emits neither. Hence, using the intrinsic - * would add redundant syncs on xlc 12. - */ -#ifdef HAVE_I_CONSTRAINT__BUILTIN_CONSTANT_P - if (__builtin_constant_p(add_) && - add_ <= PG_INT16_MAX && add_ >= PG_INT16_MIN) - __asm__ __volatile__( - " sync \n" - " lwarx %1,0,%4 \n" - " addi %0,%1,%3 \n" - " stwcx. %0,0,%4 \n" - " bne $-12 \n" /* branch to lwarx */ - " isync \n" -: "=&r"(_t), "=&r"(res), "+m"(ptr->value) -: "i"(add_), "r"(&ptr->value) -: "memory", "cc"); - else -#endif - __asm__ __volatile__( - " sync \n" - " lwarx %1,0,%4 \n" - " add %0,%1,%3 \n" - " stwcx. %0,0,%4 \n" - " bne $-12 \n" /* branch to lwarx */ - " isync \n" -: "=&r"(_t), "=&r"(res), "+m"(ptr->value) -: "r"(add_), "r"(&ptr->value) -: "memory", "cc"); - - return res; -} - -#ifdef PG_HAVE_ATOMIC_U64_SUPPORT -#define PG_HAVE_ATOMIC_FETCH_ADD_U64 -static inline uint64 -pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_) -{ - uint64 _t; - uint64 res; - - /* Like u32, but s/lwarx/ldarx/; s/stwcx/stdcx/ */ -#ifdef HAVE_I_CONSTRAINT__BUILTIN_CONSTANT_P - if (__builtin_constant_p(add_) && - add_ <= PG_INT16_MAX && add_ >= PG_INT16_MIN) - __asm__ __volatile__( - " sync \n" - " ldarx %1,0,%4 \n" - " addi %0,%1,%3 \n" - " stdcx. %0,0,%4 \n" - " bne $-12 \n" /* branch to ldarx */ - " isync \n" -: "=&r"(_t), "=&r"(res), "+m"(ptr->value) -: "i"(add_), "r"(&ptr->value) -: "memory", "cc"); - else -#endif - __asm__ __volatile__( - " sync \n" - " ldarx %1,0,%4 \n" - " add %0,%1,%3 \n" - " stdcx. %0,0,%4 \n" - " bne $-12 \n" /* branch to ldarx */ - " isync \n" -: "=&r"(_t), "=&r"(res), "+m"(ptr->value) -: "r"(add_), "r"(&ptr->value) -: "memory", "cc"); - - return res; -} - -#endif /* PG_HAVE_ATOMIC_U64_SUPPORT */ - /* per architecture manual doubleword accesses have single copy atomicity */ #define PG_HAVE_8BYTE_SINGLE_COPY_ATOMICITY diff --git a/src/include/port/atomics/generic-xlc.h b/src/include/port/atomics/generic-xlc.h index 8330b454953..8b5c7329706 100644 --- a/src/include/port/atomics/generic-xlc.h +++ b/src/include/port/atomics/generic-xlc.h @@ -18,6 +18,23 @@ #if defined(HAVE_ATOMICS) +#define PG_HAVE_ATOMIC_U32_SUPPORT +typedef struct pg_atomic_uint32 +{ + volatile uint32 value; +} pg_atomic_uint32; + + +/* 64bit atomics are only supported in 64bit mode */ +#ifdef __64BIT__ +#define PG_HAVE_ATOMIC_U64_SUPPORT +typedef struct pg_atomic_uint64 +{ + volatile uint64 value pg_attribute_aligned(8); +} pg_atomic_uint64; + +#endif /* __64BIT__ */ + #define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32 static inline bool pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, @@ -52,6 +69,33 @@ pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, return ret; } +#define PG_HAVE_ATOMIC_FETCH_ADD_U32 +static inline uint32 +pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_) +{ + uint32 _t; + uint32 res; + + /* + * xlc has a no-longer-documented __fetch_and_add() intrinsic. In xlc + * 12.01.0000.0000, it emits a leading "sync" and trailing "isync". In + * xlc 13.01.0003.0004, it emits neither. Hence, using the intrinsic + * would add redundant syncs on xlc 12. + */ + __asm__ __volatile__( + " sync \n" + " lwarx %1,0,%4 \n" + " add %0,%1,%3 \n" + " stwcx. %0,0,%4 \n" + " bne $-12 \n" /* branch to lwarx */ + " isync \n" +: "=&r"(_t), "=&r"(res), "+m"(ptr->value) +: "r"(add_), "r"(&ptr->value) +: "memory", "cc"); + + return res; +} + #ifdef PG_HAVE_ATOMIC_U64_SUPPORT #define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64 @@ -71,6 +115,28 @@ pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr, return ret; } +#define PG_HAVE_ATOMIC_FETCH_ADD_U64 +static inline uint64 +pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_) +{ + uint64 _t; + uint64 res; + + /* Like u32, but s/lwarx/ldarx/; s/stwcx/stdcx/ */ + __asm__ __volatile__( + " sync \n" + " ldarx %1,0,%4 \n" + " add %0,%1,%3 \n" + " stdcx. %0,0,%4 \n" + " bne $-12 \n" /* branch to ldarx */ + " isync \n" +: "=&r"(_t), "=&r"(res), "+m"(ptr->value) +: "r"(add_), "r"(&ptr->value) +: "memory", "cc"); + + return res; +} + #endif /* PG_HAVE_ATOMIC_U64_SUPPORT */ #endif /* defined(HAVE_ATOMICS) */ |