blob: f98f6b6dbdbfa0bab8c90c372742572afa766cd5 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
|
/*-------------------------------------------------------------------------
*
* atomics.c
* Non-Inline parts of the atomics implementation
*
* Portions Copyright (c) 2013-2024, PostgreSQL Global Development Group
*
*
* IDENTIFICATION
* src/backend/port/atomics.c
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "miscadmin.h"
#include "port/atomics.h"
#include "storage/spin.h"
#ifdef PG_HAVE_ATOMIC_U64_SIMULATION
void
pg_atomic_init_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 val_)
{
StaticAssertDecl(sizeof(ptr->sema) >= sizeof(slock_t),
"size mismatch of atomic_uint64 vs slock_t");
SpinLockInit((slock_t *) &ptr->sema);
ptr->value = val_;
}
bool
pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
uint64 *expected, uint64 newval)
{
bool ret;
/*
* Do atomic op under a spinlock. It might look like we could just skip
* the cmpxchg if the lock isn't available, but that'd just emulate a
* 'weak' compare and swap. I.e. one that allows spurious failures. Since
* several algorithms rely on a strong variant and that is efficiently
* implementable on most major architectures let's emulate it here as
* well.
*/
SpinLockAcquire((slock_t *) &ptr->sema);
/* perform compare/exchange logic */
ret = ptr->value == *expected;
*expected = ptr->value;
if (ret)
ptr->value = newval;
/* and release lock */
SpinLockRelease((slock_t *) &ptr->sema);
return ret;
}
uint64
pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
{
uint64 oldval;
SpinLockAcquire((slock_t *) &ptr->sema);
oldval = ptr->value;
ptr->value += add_;
SpinLockRelease((slock_t *) &ptr->sema);
return oldval;
}
#endif /* PG_HAVE_ATOMIC_U64_SIMULATION */
|