diff options
Diffstat (limited to 'src/backend/port/atomics.c')
-rw-r--r-- | src/backend/port/atomics.c | 109 |
1 files changed, 0 insertions, 109 deletions
diff --git a/src/backend/port/atomics.c b/src/backend/port/atomics.c index cd7ede96726..6f1e014d0b8 100644 --- a/src/backend/port/atomics.c +++ b/src/backend/port/atomics.c @@ -49,115 +49,6 @@ pg_extern_compiler_barrier(void) #endif -#ifdef PG_HAVE_ATOMIC_FLAG_SIMULATION - -void -pg_atomic_init_flag_impl(volatile pg_atomic_flag *ptr) -{ - StaticAssertDecl(sizeof(ptr->sema) >= sizeof(slock_t), - "size mismatch of atomic_flag vs slock_t"); - - SpinLockInit((slock_t *) &ptr->sema); - - ptr->value = false; -} - -bool -pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr) -{ - uint32 oldval; - - SpinLockAcquire((slock_t *) &ptr->sema); - oldval = ptr->value; - ptr->value = true; - SpinLockRelease((slock_t *) &ptr->sema); - - return oldval == 0; -} - -void -pg_atomic_clear_flag_impl(volatile pg_atomic_flag *ptr) -{ - SpinLockAcquire((slock_t *) &ptr->sema); - ptr->value = false; - SpinLockRelease((slock_t *) &ptr->sema); -} - -bool -pg_atomic_unlocked_test_flag_impl(volatile pg_atomic_flag *ptr) -{ - return ptr->value == 0; -} - -#endif /* PG_HAVE_ATOMIC_FLAG_SIMULATION */ - -#ifdef PG_HAVE_ATOMIC_U32_SIMULATION -void -pg_atomic_init_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val_) -{ - StaticAssertDecl(sizeof(ptr->sema) >= sizeof(slock_t), - "size mismatch of atomic_uint32 vs slock_t"); - - SpinLockInit((slock_t *) &ptr->sema); - ptr->value = val_; -} - -void -pg_atomic_write_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val) -{ - /* - * One might think that an unlocked write doesn't need to acquire the - * spinlock, but one would be wrong. Even an unlocked write has to cause a - * concurrent pg_atomic_compare_exchange_u32() (et al) to fail. - */ - SpinLockAcquire((slock_t *) &ptr->sema); - ptr->value = val; - SpinLockRelease((slock_t *) &ptr->sema); -} - -bool -pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, - uint32 *expected, uint32 newval) -{ - bool ret; - - /* - * Do atomic op under a spinlock. It might look like we could just skip - * the cmpxchg if the lock isn't available, but that'd just emulate a - * 'weak' compare and swap. I.e. one that allows spurious failures. Since - * several algorithms rely on a strong variant and that is efficiently - * implementable on most major architectures let's emulate it here as - * well. - */ - SpinLockAcquire((slock_t *) &ptr->sema); - - /* perform compare/exchange logic */ - ret = ptr->value == *expected; - *expected = ptr->value; - if (ret) - ptr->value = newval; - - /* and release lock */ - SpinLockRelease((slock_t *) &ptr->sema); - - return ret; -} - -uint32 -pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_) -{ - uint32 oldval; - - SpinLockAcquire((slock_t *) &ptr->sema); - oldval = ptr->value; - ptr->value += add_; - SpinLockRelease((slock_t *) &ptr->sema); - return oldval; -} - -#endif /* PG_HAVE_ATOMIC_U32_SIMULATION */ - - #ifdef PG_HAVE_ATOMIC_U64_SIMULATION void |