aboutsummaryrefslogtreecommitdiff
path: root/src/backend/access/transam/xlogutils.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/access/transam/xlogutils.c')
-rw-r--r--src/backend/access/transam/xlogutils.c121
1 files changed, 121 insertions, 0 deletions
diff --git a/src/backend/access/transam/xlogutils.c b/src/backend/access/transam/xlogutils.c
index ef827dbc404..1a21dac8538 100644
--- a/src/backend/access/transam/xlogutils.c
+++ b/src/backend/access/transam/xlogutils.c
@@ -456,6 +456,127 @@ XLogReadBufferExtended(RelFileNode rnode, ForkNumber forknum,
return buffer;
}
+/*
+ * Restore a full-page image from a backup block attached to an XLOG record.
+ *
+ * lsn: LSN of the XLOG record being replayed
+ * record: the complete XLOG record
+ * block_index: which backup block to restore (0 .. XLR_MAX_BKP_BLOCKS - 1)
+ * get_cleanup_lock: TRUE to get a cleanup rather than plain exclusive lock
+ * keep_buffer: TRUE to return the buffer still locked and pinned
+ *
+ * Returns the buffer number containing the page. Note this is not terribly
+ * useful unless keep_buffer is specified as TRUE.
+ *
+ * Note: when a backup block is available in XLOG, we restore it
+ * unconditionally, even if the page in the database appears newer.
+ * This is to protect ourselves against database pages that were partially
+ * or incorrectly written during a crash. We assume that the XLOG data
+ * must be good because it has passed a CRC check, while the database
+ * page might not be. This will force us to replay all subsequent
+ * modifications of the page that appear in XLOG, rather than possibly
+ * ignoring them as already applied, but that's not a huge drawback.
+ *
+ * If 'get_cleanup_lock' is true, a cleanup lock is obtained on the buffer,
+ * else a normal exclusive lock is used. During crash recovery, that's just
+ * pro forma because there can't be any regular backends in the system, but
+ * in hot standby mode the distinction is important.
+ *
+ * If 'keep_buffer' is true, return without releasing the buffer lock and pin;
+ * then caller is responsible for doing UnlockReleaseBuffer() later. This
+ * is needed in some cases when replaying XLOG records that touch multiple
+ * pages, to prevent inconsistent states from being visible to other backends.
+ * (Again, that's only important in hot standby mode.)
+ */
+Buffer
+RestoreBackupBlock(XLogRecPtr lsn, XLogRecord *record, int block_index,
+ bool get_cleanup_lock, bool keep_buffer)
+{
+ BkpBlock bkpb;
+ char *blk;
+ int i;
+
+ /* Locate requested BkpBlock in the record */
+ blk = (char *) XLogRecGetData(record) + record->xl_len;
+ for (i = 0; i < XLR_MAX_BKP_BLOCKS; i++)
+ {
+ if (!(record->xl_info & XLR_BKP_BLOCK(i)))
+ continue;
+
+ memcpy(&bkpb, blk, sizeof(BkpBlock));
+ blk += sizeof(BkpBlock);
+
+ if (i == block_index)
+ {
+ /* Found it, apply the update */
+ return RestoreBackupBlockContents(lsn, bkpb, blk, get_cleanup_lock,
+ keep_buffer);
+ }
+
+ blk += BLCKSZ - bkpb.hole_length;
+ }
+
+ /* Caller specified a bogus block_index */
+ elog(ERROR, "failed to restore block_index %d", block_index);
+ return InvalidBuffer; /* keep compiler quiet */
+}
+
+/*
+ * Workhorse for RestoreBackupBlock usable without an xlog record
+ *
+ * Restores a full-page image from BkpBlock and a data pointer.
+ */
+Buffer
+RestoreBackupBlockContents(XLogRecPtr lsn, BkpBlock bkpb, char *blk,
+ bool get_cleanup_lock, bool keep_buffer)
+{
+ Buffer buffer;
+ Page page;
+
+ buffer = XLogReadBufferExtended(bkpb.node, bkpb.fork, bkpb.block,
+ RBM_ZERO);
+ Assert(BufferIsValid(buffer));
+ if (get_cleanup_lock)
+ LockBufferForCleanup(buffer);
+ else
+ LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
+
+ page = (Page) BufferGetPage(buffer);
+
+ if (bkpb.hole_length == 0)
+ {
+ memcpy((char *) page, blk, BLCKSZ);
+ }
+ else
+ {
+ memcpy((char *) page, blk, bkpb.hole_offset);
+ /* must zero-fill the hole */
+ MemSet((char *) page + bkpb.hole_offset, 0, bkpb.hole_length);
+ memcpy((char *) page + (bkpb.hole_offset + bkpb.hole_length),
+ blk + bkpb.hole_offset,
+ BLCKSZ - (bkpb.hole_offset + bkpb.hole_length));
+ }
+
+ /*
+ * The checksum value on this page is currently invalid. We don't need to
+ * reset it here since it will be set before being written.
+ */
+
+ /*
+ * The page may be uninitialized. If so, we can't set the LSN because that
+ * would corrupt the page.
+ */
+ if (!PageIsNew(page))
+ {
+ PageSetLSN(page, lsn);
+ }
+ MarkBufferDirty(buffer);
+
+ if (!keep_buffer)
+ UnlockReleaseBuffer(buffer);
+
+ return buffer;
+}
/*
* Struct actually returned by XLogFakeRelcacheEntry, though the declared