aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/backend/access/heap/visibilitymap.c21
-rw-r--r--src/backend/storage/freespace/freespace.c21
2 files changed, 40 insertions, 2 deletions
diff --git a/src/backend/access/heap/visibilitymap.c b/src/backend/access/heap/visibilitymap.c
index b251e697034..239a10f5509 100644
--- a/src/backend/access/heap/visibilitymap.c
+++ b/src/backend/access/heap/visibilitymap.c
@@ -610,11 +610,30 @@ vm_readbuf(Relation rel, BlockNumber blkno, bool extend)
* Use ZERO_ON_ERROR mode, and initialize the page if necessary. It's
* always safe to clear bits, so it's better to clear corrupt pages than
* error out.
+ *
+ * The initialize-the-page part is trickier than it looks, because of the
+ * possibility of multiple backends doing this concurrently, and our
+ * desire to not uselessly take the buffer lock in the normal path where
+ * the page is OK. We must take the lock to initialize the page, so
+ * recheck page newness after we have the lock, in case someone else
+ * already did it. Also, because we initially check PageIsNew with no
+ * lock, it's possible to fall through and return the buffer while someone
+ * else is still initializing the page (i.e., we might see pd_upper as set
+ * but other page header fields are still zeroes). This is harmless for
+ * callers that will take a buffer lock themselves, but some callers
+ * inspect the page without any lock at all. The latter is OK only so
+ * long as it doesn't depend on the page header having correct contents.
+ * Current usage is safe because PageGetContents() does not require that.
*/
buf = ReadBufferExtended(rel, VISIBILITYMAP_FORKNUM, blkno,
RBM_ZERO_ON_ERROR, NULL);
if (PageIsNew(BufferGetPage(buf)))
- PageInit(BufferGetPage(buf), BLCKSZ, 0);
+ {
+ LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
+ if (PageIsNew(BufferGetPage(buf)))
+ PageInit(BufferGetPage(buf), BLCKSZ, 0);
+ LockBuffer(buf, BUFFER_LOCK_UNLOCK);
+ }
return buf;
}
diff --git a/src/backend/storage/freespace/freespace.c b/src/backend/storage/freespace/freespace.c
index 65c4e74999f..8d0ee7fc937 100644
--- a/src/backend/storage/freespace/freespace.c
+++ b/src/backend/storage/freespace/freespace.c
@@ -580,10 +580,29 @@ fsm_readbuf(Relation rel, FSMAddress addr, bool extend)
* pages than error out. Since the FSM changes are not WAL-logged, the
* so-called torn page problem on crash can lead to pages with corrupt
* headers, for example.
+ *
+ * The initialize-the-page part is trickier than it looks, because of the
+ * possibility of multiple backends doing this concurrently, and our
+ * desire to not uselessly take the buffer lock in the normal path where
+ * the page is OK. We must take the lock to initialize the page, so
+ * recheck page newness after we have the lock, in case someone else
+ * already did it. Also, because we initially check PageIsNew with no
+ * lock, it's possible to fall through and return the buffer while someone
+ * else is still initializing the page (i.e., we might see pd_upper as set
+ * but other page header fields are still zeroes). This is harmless for
+ * callers that will take a buffer lock themselves, but some callers
+ * inspect the page without any lock at all. The latter is OK only so
+ * long as it doesn't depend on the page header having correct contents.
+ * Current usage is safe because PageGetContents() does not require that.
*/
buf = ReadBufferExtended(rel, FSM_FORKNUM, blkno, RBM_ZERO_ON_ERROR, NULL);
if (PageIsNew(BufferGetPage(buf)))
- PageInit(BufferGetPage(buf), BLCKSZ, 0);
+ {
+ LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
+ if (PageIsNew(BufferGetPage(buf)))
+ PageInit(BufferGetPage(buf), BLCKSZ, 0);
+ LockBuffer(buf, BUFFER_LOCK_UNLOCK);
+ }
return buf;
}