diff options
Diffstat (limited to 'src/backend/access')
-rw-r--r-- | src/backend/access/hash/hash.c | 2 | ||||
-rw-r--r-- | src/backend/access/heap/pruneheap.c | 2 | ||||
-rw-r--r-- | src/backend/access/nbtree/nbtinsert.c | 4 | ||||
-rw-r--r-- | src/backend/access/nbtree/nbtree.c | 2 | ||||
-rw-r--r-- | src/backend/access/nbtree/nbtutils.c | 2 | ||||
-rw-r--r-- | src/backend/access/rmgrdesc/xlogdesc.c | 4 | ||||
-rw-r--r-- | src/backend/access/transam/xlog.c | 18 |
7 files changed, 17 insertions, 17 deletions
diff --git a/src/backend/access/hash/hash.c b/src/backend/access/hash/hash.c index 5ca27a231e2..8895f585034 100644 --- a/src/backend/access/hash/hash.c +++ b/src/backend/access/hash/hash.c @@ -287,7 +287,7 @@ hashgettuple(PG_FUNCTION_ARGS) /* * Since this can be redone later if needed, mark as a hint. */ - MarkBufferDirtyHint(buf); + MarkBufferDirtyHint(buf, true); } /* diff --git a/src/backend/access/heap/pruneheap.c b/src/backend/access/heap/pruneheap.c index 2ab723ddf19..c6e31542935 100644 --- a/src/backend/access/heap/pruneheap.c +++ b/src/backend/access/heap/pruneheap.c @@ -262,7 +262,7 @@ heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin, { ((PageHeader) page)->pd_prune_xid = prstate.new_prune_xid; PageClearFull(page); - MarkBufferDirtyHint(buffer); + MarkBufferDirtyHint(buffer, true); } } diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c index 6ad4f765f5b..a452fea8410 100644 --- a/src/backend/access/nbtree/nbtinsert.c +++ b/src/backend/access/nbtree/nbtinsert.c @@ -413,9 +413,9 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel, * crucial. Be sure to mark the proper buffer dirty. */ if (nbuf != InvalidBuffer) - MarkBufferDirtyHint(nbuf); + MarkBufferDirtyHint(nbuf, true); else - MarkBufferDirtyHint(buf); + MarkBufferDirtyHint(buf, true); } } } diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c index 621b0556390..073190ffd53 100644 --- a/src/backend/access/nbtree/nbtree.c +++ b/src/backend/access/nbtree/nbtree.c @@ -1052,7 +1052,7 @@ restart: opaque->btpo_cycleid == vstate->cycleid) { opaque->btpo_cycleid = 0; - MarkBufferDirtyHint(buf); + MarkBufferDirtyHint(buf, true); } } diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c index fe53ec1fe0a..352c77cbea2 100644 --- a/src/backend/access/nbtree/nbtutils.c +++ b/src/backend/access/nbtree/nbtutils.c @@ -1789,7 +1789,7 @@ _bt_killitems(IndexScanDesc scan, bool haveLock) if (killedsomething) { opaque->btpo_flags |= BTP_HAS_GARBAGE; - MarkBufferDirtyHint(so->currPos.buf); + MarkBufferDirtyHint(so->currPos.buf, true); } if (!haveLock) diff --git a/src/backend/access/rmgrdesc/xlogdesc.c b/src/backend/access/rmgrdesc/xlogdesc.c index 2bad52748a3..12370521d45 100644 --- a/src/backend/access/rmgrdesc/xlogdesc.c +++ b/src/backend/access/rmgrdesc/xlogdesc.c @@ -82,11 +82,11 @@ xlog_desc(StringInfo buf, uint8 xl_info, char *rec) appendStringInfo(buf, "restore point: %s", xlrec->rp_name); } - else if (info == XLOG_HINT) + else if (info == XLOG_FPI) { BkpBlock *bkp = (BkpBlock *) rec; - appendStringInfo(buf, "page hint: %s block %u", + appendStringInfo(buf, "full-page image: %s block %u", relpathperm(bkp->node, bkp->fork), bkp->block); } diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c index 654c9c18d8b..9f858995d12 100644 --- a/src/backend/access/transam/xlog.c +++ b/src/backend/access/transam/xlog.c @@ -7681,12 +7681,9 @@ XLogRestorePoint(const char *rpName) * records. In that case, multiple copies of the same block would be recorded * in separate WAL records by different backends, though that is still OK from * a correctness perspective. - * - * Note that this only works for buffers that fit the standard page model, - * i.e. those for which buffer_std == true */ XLogRecPtr -XLogSaveBufferForHint(Buffer buffer) +XLogSaveBufferForHint(Buffer buffer, bool buffer_std) { XLogRecPtr recptr = InvalidXLogRecPtr; XLogRecPtr lsn; @@ -7708,7 +7705,7 @@ XLogSaveBufferForHint(Buffer buffer) * and reset rdata for any actual WAL record insert. */ rdata[0].buffer = buffer; - rdata[0].buffer_std = true; + rdata[0].buffer_std = buffer_std; /* * Check buffer while not holding an exclusive lock. @@ -7722,6 +7719,9 @@ XLogSaveBufferForHint(Buffer buffer) * Copy buffer so we don't have to worry about concurrent hint bit or * lsn updates. We assume pd_lower/upper cannot be changed without an * exclusive lock, so the contents bkp are not racy. + * + * With buffer_std set to false, XLogCheckBuffer() sets hole_length and + * hole_offset to 0; so the following code is safe for either case. */ memcpy(copied_buffer, origdata, bkpb.hole_offset); memcpy(copied_buffer + bkpb.hole_offset, @@ -7744,7 +7744,7 @@ XLogSaveBufferForHint(Buffer buffer) rdata[1].buffer = InvalidBuffer; rdata[1].next = NULL; - recptr = XLogInsert(RM_XLOG_ID, XLOG_HINT, rdata); + recptr = XLogInsert(RM_XLOG_ID, XLOG_FPI, rdata); } return recptr; @@ -8109,14 +8109,14 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record) { /* nothing to do here */ } - else if (info == XLOG_HINT) + else if (info == XLOG_FPI) { char *data; BkpBlock bkpb; /* - * Hint bit records contain a backup block stored "inline" in the - * normal data since the locking when writing hint records isn't + * Full-page image (FPI) records contain a backup block stored "inline" + * in the normal data since the locking when writing hint records isn't * sufficient to use the normal backup block mechanism, which assumes * exclusive lock on the buffer supplied. * |