diff options
author | Noah Misch <noah@leadboat.com> | 2020-08-15 10:15:53 -0700 |
---|---|---|
committer | Noah Misch <noah@leadboat.com> | 2020-08-15 10:15:57 -0700 |
commit | d4031d78460cbbb4ed2fb7be635f84bea0e9a0c1 (patch) | |
tree | d7ee345e4522f52d62b51f33db15d96ba9a8535a /src/backend/commands/async.c | |
parent | 9d472b51e98777102d72f8ccdfb8cef10e087f74 (diff) | |
download | postgresql-d4031d78460cbbb4ed2fb7be635f84bea0e9a0c1.tar.gz postgresql-d4031d78460cbbb4ed2fb7be635f84bea0e9a0c1.zip |
Prevent concurrent SimpleLruTruncate() for any given SLRU.
The SimpleLruTruncate() header comment states the new coding rule. To
achieve this, add locktype "frozenid" and two LWLocks. This closes a
rare opportunity for data loss, which manifested as "apparent
wraparound" or "could not access status of transaction" errors. Data
loss is more likely in pg_multixact, due to released branches' thin
margin between multiStopLimit and multiWrapLimit. If a user's physical
replication primary logged ": apparent wraparound" messages, the user
should rebuild standbys of that primary regardless of symptoms. At less
risk is a cluster having emitted "not accepting commands" errors or
"must be vacuumed" warnings at some point. One can test a cluster for
this data loss by running VACUUM FREEZE in every database. Back-patch
to 9.5 (all supported versions).
Discussion: https://postgr.es/m/20190218073103.GA1434723@rfd.leadboat.com
Diffstat (limited to 'src/backend/commands/async.c')
-rw-r--r-- | src/backend/commands/async.c | 37 |
1 files changed, 27 insertions, 10 deletions
diff --git a/src/backend/commands/async.c b/src/backend/commands/async.c index 4cd1f4b95c3..8ef0aad808e 100644 --- a/src/backend/commands/async.c +++ b/src/backend/commands/async.c @@ -224,19 +224,22 @@ typedef struct QueueBackendStatus /* * Shared memory state for LISTEN/NOTIFY (excluding its SLRU stuff) * - * The AsyncQueueControl structure is protected by the AsyncQueueLock. + * The AsyncQueueControl structure is protected by the AsyncQueueLock and + * NotifyQueueTailLock. * - * When holding the lock in SHARED mode, backends may only inspect their own - * entries as well as the head and tail pointers. Consequently we can allow a - * backend to update its own record while holding only SHARED lock (since no - * other backend will inspect it). + * When holding AsyncQueueLock in SHARED mode, backends may only inspect their + * own entries as well as the head and tail pointers. Consequently we can + * allow a backend to update its own record while holding only SHARED lock + * (since no other backend will inspect it). * - * When holding the lock in EXCLUSIVE mode, backends can inspect the entries - * of other backends and also change the head and tail pointers. + * When holding AsyncQueueLock in EXCLUSIVE mode, backends can inspect the + * entries of other backends and also change the head pointer. When holding + * both AsyncQueueLock and NotifyQueueTailLock in EXCLUSIVE mode, backends can + * change the tail pointer. * * AsyncCtlLock is used as the control lock for the pg_notify SLRU buffers. - * In order to avoid deadlocks, whenever we need both locks, we always first - * get AsyncQueueLock and then AsyncCtlLock. + * In order to avoid deadlocks, whenever we need multiple locks, we first get + * NotifyQueueTailLock, then AsyncQueueLock, and lastly AsyncCtlLock. * * Each backend uses the backend[] array entry with index equal to its * BackendId (which can range from 1 to MaxBackends). We rely on this to make @@ -2013,6 +2016,10 @@ asyncQueueAdvanceTail(void) int newtailpage; int boundary; + /* Restrict task to one backend per cluster; see SimpleLruTruncate(). */ + LWLockAcquire(NotifyQueueTailLock, LW_EXCLUSIVE); + + /* Compute the new tail. */ LWLockAcquire(AsyncQueueLock, LW_EXCLUSIVE); min = QUEUE_HEAD; for (i = 1; i <= MaxBackends; i++) @@ -2021,7 +2028,6 @@ asyncQueueAdvanceTail(void) min = QUEUE_POS_MIN(min, QUEUE_BACKEND_POS(i)); } oldtailpage = QUEUE_POS_PAGE(QUEUE_TAIL); - QUEUE_TAIL = min; LWLockRelease(AsyncQueueLock); /* @@ -2041,6 +2047,17 @@ asyncQueueAdvanceTail(void) */ SimpleLruTruncate(AsyncCtl, newtailpage); } + + /* + * Advertise the new tail. This changes asyncQueueIsFull()'s verdict for + * the segment immediately prior to the new tail, allowing fresh data into + * that segment. + */ + LWLockAcquire(AsyncQueueLock, LW_EXCLUSIVE); + QUEUE_TAIL = min; + LWLockRelease(AsyncQueueLock); + + LWLockRelease(NotifyQueueTailLock); } /* |