aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorTom Lane <tgl@sss.pgh.pa.us>2022-11-02 17:37:26 -0400
committerTom Lane <tgl@sss.pgh.pa.us>2022-11-02 17:37:29 -0400
commit1c72d82c25849307416db140007674c674bdee10 (patch)
tree65dc1a7bc253a1b4dec179318cc05787fa2d2dd8 /src
parent3655b46aa0b8156aced0b9ee3d10875632f84407 (diff)
downloadpostgresql-1c72d82c25849307416db140007674c674bdee10.tar.gz
postgresql-1c72d82c25849307416db140007674c674bdee10.zip
Allow use of __sync_lock_test_and_set for spinlocks on any machine.
If we have no special-case code in s_lock.h for the current platform, but the compiler has __sync_lock_test_and_set, use that instead of failing. It's unlikely that anybody's __sync_lock_test_and_set would be so awful as to be worse than our semaphore-based fallback, but if it is, they can (continue to) use --disable-spinlocks. This allows removal of the RISC-V special case installed by commit c32fcac56, which generated exactly the same code but only on that platform. Usefully, the RISC-V buildfarm animals should now test at least the int variant of this patch. I've manually tested both variants on ARM by dint of removing the ARM-specific stanza. We don't want to drop that, because it already has some special knowledge and is likely to grow more over time. Likewise, this is not meant to preclude installing special cases for other arches if that proves worthwhile. Per discussion of a request to install the same code for loongarch64. Like the previous patch, we might as well back-patch to supported branches. Discussion: https://postgr.es/m/761ac43d44b84d679ba803c2bd947cc0@HSMAILSVR04.hs.handsome.com.cn
Diffstat (limited to 'src')
-rw-r--r--src/include/storage/s_lock.h67
1 files changed, 44 insertions, 23 deletions
diff --git a/src/include/storage/s_lock.h b/src/include/storage/s_lock.h
index 4225d9b7fc3..8b19ab160f8 100644
--- a/src/include/storage/s_lock.h
+++ b/src/include/storage/s_lock.h
@@ -293,29 +293,6 @@ spin_delay(void)
#endif /* __arm__ || __arm || __aarch64__ */
-/*
- * RISC-V likewise uses __sync_lock_test_and_set(int *, int) if available.
- */
-#if defined(__riscv)
-#ifdef HAVE_GCC__SYNC_INT32_TAS
-#define HAS_TEST_AND_SET
-
-#define TAS(lock) tas(lock)
-
-typedef int slock_t;
-
-static __inline__ int
-tas(volatile slock_t *lock)
-{
- return __sync_lock_test_and_set(lock, 1);
-}
-
-#define S_UNLOCK(lock) __sync_lock_release(lock)
-
-#endif /* HAVE_GCC__SYNC_INT32_TAS */
-#endif /* __riscv */
-
-
/* S/390 and S/390x Linux (32- and 64-bit zSeries) */
#if defined(__s390__) || defined(__s390x__)
#define HAS_TEST_AND_SET
@@ -620,6 +597,50 @@ tas(volatile slock_t *lock)
/*
+ * If we have no platform-specific knowledge, but we found that the compiler
+ * provides __sync_lock_test_and_set(), use that. Prefer the int-width
+ * version over the char-width version if we have both, on the rather dubious
+ * grounds that that's known to be more likely to work in the ARM ecosystem.
+ * (But we dealt with ARM above.)
+ */
+#if !defined(HAS_TEST_AND_SET)
+
+#if defined(HAVE_GCC__SYNC_INT32_TAS)
+#define HAS_TEST_AND_SET
+
+#define TAS(lock) tas(lock)
+
+typedef int slock_t;
+
+static __inline__ int
+tas(volatile slock_t *lock)
+{
+ return __sync_lock_test_and_set(lock, 1);
+}
+
+#define S_UNLOCK(lock) __sync_lock_release(lock)
+
+#elif defined(HAVE_GCC__SYNC_CHAR_TAS)
+#define HAS_TEST_AND_SET
+
+#define TAS(lock) tas(lock)
+
+typedef char slock_t;
+
+static __inline__ int
+tas(volatile slock_t *lock)
+{
+ return __sync_lock_test_and_set(lock, 1);
+}
+
+#define S_UNLOCK(lock) __sync_lock_release(lock)
+
+#endif /* HAVE_GCC__SYNC_INT32_TAS */
+
+#endif /* !defined(HAS_TEST_AND_SET) */
+
+
+/*
* Default implementation of S_UNLOCK() for gcc/icc.
*
* Note that this implementation is unsafe for any platform that can reorder