diff options
author | Noah Misch <noah@leadboat.com> | 2020-03-21 09:38:33 -0700 |
---|---|---|
committer | Noah Misch <noah@leadboat.com> | 2020-03-21 09:38:33 -0700 |
commit | ae86e46c3b7b90903d89b65385e2b77d2c1cf63e (patch) | |
tree | 78857d63bad6c56c73cbf5488b28365b46e5fb43 /src/backend/access/transam/xloginsert.c | |
parent | 4433c6e8c0a5d0891c9bb52d14e73464086c0375 (diff) | |
download | postgresql-ae86e46c3b7b90903d89b65385e2b77d2c1cf63e.tar.gz postgresql-ae86e46c3b7b90903d89b65385e2b77d2c1cf63e.zip |
Back-patch log_newpage_range().
Back-patch a subset of commit 9155580fd5fc2a0cbb23376dfca7cd21f59c2c7b
to v11, v10, 9.6, and 9.5. Include the latest repairs to this function.
Use a new XLOG_FPI_MULTI value instead of reusing XLOG_FPI. That way,
if an older server reads WAL from this function, that server will PANIC
instead of applying just one page of the record. The next commit adds a
call to this function.
Discussion: https://postgr.es/m/20200304.162919.898938381201316571.horikyota.ntt@gmail.com
Diffstat (limited to 'src/backend/access/transam/xloginsert.c')
-rw-r--r-- | src/backend/access/transam/xloginsert.c | 88 |
1 files changed, 88 insertions, 0 deletions
diff --git a/src/backend/access/transam/xloginsert.c b/src/backend/access/transam/xloginsert.c index 34d4db42977..c033e7bd4cf 100644 --- a/src/backend/access/transam/xloginsert.c +++ b/src/backend/access/transam/xloginsert.c @@ -1022,6 +1022,94 @@ log_newpage_buffer(Buffer buffer, bool page_std) } /* + * WAL-log a range of blocks in a relation. + * + * An image of all pages with block numbers 'startblk' <= X < 'endblk' is + * written to the WAL. If the range is large, this is done in multiple WAL + * records. + * + * If all page follows the standard page layout, with a PageHeader and unused + * space between pd_lower and pd_upper, set 'page_std' to true. That allows + * the unused space to be left out from the WAL records, making them smaller. + * + * NOTE: This function acquires exclusive-locks on the pages. Typically, this + * is used on a newly-built relation, and the caller is holding a + * AccessExclusiveLock on it, so no other backend can be accessing it at the + * same time. If that's not the case, you must ensure that this does not + * cause a deadlock through some other means. + */ +void +log_newpage_range(Relation rel, ForkNumber forkNum, + BlockNumber startblk, BlockNumber endblk, + bool page_std) +{ + int flags; + BlockNumber blkno; + + flags = REGBUF_FORCE_IMAGE; + if (page_std) + flags |= REGBUF_STANDARD; + + /* + * Iterate over all the pages in the range. They are collected into + * batches of XLR_MAX_BLOCK_ID pages, and a single WAL-record is written + * for each batch. + */ + XLogEnsureRecordSpace(XLR_MAX_BLOCK_ID - 1, 0); + + blkno = startblk; + while (blkno < endblk) + { + Buffer bufpack[XLR_MAX_BLOCK_ID]; + XLogRecPtr recptr; + int nbufs; + int i; + + CHECK_FOR_INTERRUPTS(); + + /* Collect a batch of blocks. */ + nbufs = 0; + while (nbufs < XLR_MAX_BLOCK_ID && blkno < endblk) + { + Buffer buf = ReadBufferExtended(rel, forkNum, blkno, + RBM_NORMAL, NULL); + + LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE); + + /* + * Completely empty pages are not WAL-logged. Writing a WAL record + * would change the LSN, and we don't want that. We want the page + * to stay empty. + */ + if (!PageIsNew(BufferGetPage(buf))) + bufpack[nbufs++] = buf; + else + UnlockReleaseBuffer(buf); + blkno++; + } + + /* Write WAL record for this batch. */ + XLogBeginInsert(); + + START_CRIT_SECTION(); + for (i = 0; i < nbufs; i++) + { + XLogRegisterBuffer(i, bufpack[i], flags); + MarkBufferDirty(bufpack[i]); + } + + recptr = XLogInsert(RM_XLOG_ID, XLOG_FPI_MULTI); + + for (i = 0; i < nbufs; i++) + { + PageSetLSN(BufferGetPage(bufpack[i]), recptr); + UnlockReleaseBuffer(bufpack[i]); + } + END_CRIT_SECTION(); + } +} + +/* * Allocate working buffers needed for WAL record construction. */ void |