diff options
Diffstat (limited to 'src/backend/storage/lmgr/spin.c')
-rw-r--r-- | src/backend/storage/lmgr/spin.c | 97 |
1 files changed, 67 insertions, 30 deletions
diff --git a/src/backend/storage/lmgr/spin.c b/src/backend/storage/lmgr/spin.c index 753943e46d6..9f7eae93392 100644 --- a/src/backend/storage/lmgr/spin.c +++ b/src/backend/storage/lmgr/spin.c @@ -28,8 +28,24 @@ #ifndef HAVE_SPINLOCKS + +/* + * No TAS, so spinlocks are implemented as PGSemaphores. + */ + +#ifndef HAVE_ATOMICS +#define NUM_EMULATION_SEMAPHORES (NUM_SPINLOCK_SEMAPHORES + NUM_ATOMICS_SEMAPHORES) +#else +#define NUM_EMULATION_SEMAPHORES (NUM_SPINLOCK_SEMAPHORES) +#endif /* DISABLE_ATOMICS */ + PGSemaphore *SpinlockSemaArray; -#endif + +#else /* !HAVE_SPINLOCKS */ + +#define NUM_EMULATION_SEMAPHORES 0 + +#endif /* HAVE_SPINLOCKS */ /* * Report the amount of shared memory needed to store semaphores for spinlock @@ -38,34 +54,19 @@ PGSemaphore *SpinlockSemaArray; Size SpinlockSemaSize(void) { - return SpinlockSemas() * sizeof(PGSemaphore); + return NUM_EMULATION_SEMAPHORES * sizeof(PGSemaphore); } -#ifdef HAVE_SPINLOCKS - /* * Report number of semaphores needed to support spinlocks. */ int SpinlockSemas(void) { - return 0; + return NUM_EMULATION_SEMAPHORES; } -#else /* !HAVE_SPINLOCKS */ -/* - * No TAS, so spinlocks are implemented as PGSemaphores. - */ - - -/* - * Report number of semaphores needed to support spinlocks. - */ -int -SpinlockSemas(void) -{ - return NUM_SPINLOCK_SEMAPHORES + NUM_ATOMICS_SEMAPHORES; -} +#ifndef HAVE_SPINLOCKS /* * Initialize spinlock emulation. @@ -92,23 +93,59 @@ SpinlockSemaInit(void) /* * s_lock.h hardware-spinlock emulation using semaphores * - * We map all spinlocks onto a set of NUM_SPINLOCK_SEMAPHORES semaphores. - * It's okay to map multiple spinlocks onto one semaphore because no process - * should ever hold more than one at a time. We just need enough semaphores - * so that we aren't adding too much extra contention from that. + * We map all spinlocks onto NUM_EMULATION_SEMAPHORES semaphores. It's okay to + * map multiple spinlocks onto one semaphore because no process should ever + * hold more than one at a time. We just need enough semaphores so that we + * aren't adding too much extra contention from that. + * + * There is one exception to the restriction of only holding one spinlock at a + * time, which is that it's ok if emulated atomic operations are nested inside + * spinlocks. To avoid the danger of spinlocks and atomic using the same sema, + * we make sure "normal" spinlocks and atomics backed by spinlocks use + * distinct semaphores (see the nested argument to s_init_lock_sema). * * slock_t is just an int for this implementation; it holds the spinlock - * number from 1..NUM_SPINLOCK_SEMAPHORES. We intentionally ensure that 0 + * number from 1..NUM_EMULATION_SEMAPHORES. We intentionally ensure that 0 * is not a valid value, so that testing with this code can help find * failures to initialize spinlocks. */ +static inline void +s_check_valid(int lockndx) +{ + if (unlikely(lockndx <= 0 || lockndx > NUM_EMULATION_SEMAPHORES)) + elog(ERROR, "invalid spinlock number: %d", lockndx); +} + void s_init_lock_sema(volatile slock_t *lock, bool nested) { static uint32 counter = 0; - - *lock = ((++counter) % NUM_SPINLOCK_SEMAPHORES) + 1; + uint32 offset; + uint32 sema_total; + uint32 idx; + + if (nested) + { + /* + * To allow nesting atomics inside spinlocked sections, use a + * different spinlock. See comment above. + */ + offset = 1 + NUM_SPINLOCK_SEMAPHORES; + sema_total = NUM_ATOMICS_SEMAPHORES; + } + else + { + offset = 1; + sema_total = NUM_SPINLOCK_SEMAPHORES; + } + + idx = (counter++ % sema_total) + offset; + + /* double check we did things correctly */ + s_check_valid(idx); + + *lock = idx; } void @@ -116,8 +153,8 @@ s_unlock_sema(volatile slock_t *lock) { int lockndx = *lock; - if (lockndx <= 0 || lockndx > NUM_SPINLOCK_SEMAPHORES) - elog(ERROR, "invalid spinlock number: %d", lockndx); + s_check_valid(lockndx); + PGSemaphoreUnlock(SpinlockSemaArray[lockndx - 1]); } @@ -134,8 +171,8 @@ tas_sema(volatile slock_t *lock) { int lockndx = *lock; - if (lockndx <= 0 || lockndx > NUM_SPINLOCK_SEMAPHORES) - elog(ERROR, "invalid spinlock number: %d", lockndx); + s_check_valid(lockndx); + /* Note that TAS macros return 0 if *success* */ return !PGSemaphoreTryLock(SpinlockSemaArray[lockndx - 1]); } |