aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/backend/access/heap/heapam.c38
-rw-r--r--src/backend/commands/vacuumlazy.c86
-rw-r--r--src/include/access/heapam.h2
3 files changed, 122 insertions, 4 deletions
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index b2d19016e76..81422afa2f8 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -3841,6 +3841,44 @@ recheck_xvac:
return changed;
}
+/*
+ * heap_tuple_needs_freeze
+ *
+ * Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
+ * are older than the specified cutoff XID. If so, return TRUE.
+ *
+ * It doesn't matter whether the tuple is alive or dead, we are checking
+ * to see if a tuple needs to be removed or frozen to avoid wraparound.
+ */
+bool
+heap_tuple_needs_freeze(HeapTupleHeader tuple, TransactionId cutoff_xid,
+ Buffer buf)
+{
+ TransactionId xid;
+
+ xid = HeapTupleHeaderGetXmin(tuple);
+ if (TransactionIdIsNormal(xid) &&
+ TransactionIdPrecedes(xid, cutoff_xid))
+ return true;
+
+ if (!(tuple->t_infomask & HEAP_XMAX_IS_MULTI))
+ {
+ xid = HeapTupleHeaderGetXmax(tuple);
+ if (TransactionIdIsNormal(xid) &&
+ TransactionIdPrecedes(xid, cutoff_xid))
+ return true;
+ }
+
+ if (tuple->t_infomask & HEAP_MOVED)
+ {
+ xid = HeapTupleHeaderGetXvac(tuple);
+ if (TransactionIdIsNormal(xid) &&
+ TransactionIdPrecedes(xid, cutoff_xid))
+ return true;
+ }
+
+ return false;
+}
/* ----------------
* heap_markpos - mark scan position
diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c
index b197b45c127..bbf8b8d084d 100644
--- a/src/backend/commands/vacuumlazy.c
+++ b/src/backend/commands/vacuumlazy.c
@@ -117,6 +117,7 @@ static BufferAccessStrategy vac_strategy;
static void lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
Relation *Irel, int nindexes, bool scan_all);
static void lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats);
+static bool lazy_check_needs_freeze(Buffer buf);
static void lazy_vacuum_index(Relation indrel,
IndexBulkDeleteResult **stats,
LVRelStats *vacrelstats);
@@ -453,8 +454,6 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
vacuum_delay_point();
- vacrelstats->scanned_pages++;
-
/*
* If we are close to overrunning the available space for dead-tuple
* TIDs, pause and do a cycle of vacuuming before we tackle this page.
@@ -486,7 +485,41 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
RBM_NORMAL, vac_strategy);
/* We need buffer cleanup lock so that we can prune HOT chains. */
- LockBufferForCleanup(buf);
+ if (!ConditionalLockBufferForCleanup(buf))
+ {
+ /*
+ * It's OK to skip vacuuming a page, as long as its not got data
+ * that needs to be cleaned for wraparound avoidance.
+ */
+ if (!scan_all)
+ {
+ ReleaseBuffer(buf);
+ continue;
+ }
+
+ /*
+ * If this is a wraparound checking vacuum, then we read the page
+ * with share lock to see if any xids need to be frozen. If the
+ * page doesn't need attention we just skip and continue. If it
+ * does, we wait for cleanup lock.
+ *
+ * We could defer the lock request further by remembering the page
+ * and coming back to it later, of we could even register
+ * ourselves for multiple buffers and then service whichever one
+ * is received first. For now, this seems good enough.
+ */
+ LockBuffer(buf, BUFFER_LOCK_SHARE);
+ if (!lazy_check_needs_freeze(buf))
+ {
+ UnlockReleaseBuffer(buf);
+ continue;
+ }
+ LockBuffer(buf, BUFFER_LOCK_UNLOCK);
+ LockBufferForCleanup(buf);
+ /* drop through to normal processing */
+ }
+
+ vacrelstats->scanned_pages++;
page = BufferGetPage(buf);
@@ -932,7 +965,8 @@ lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
tblk = ItemPointerGetBlockNumber(&vacrelstats->dead_tuples[tupindex]);
buf = ReadBufferExtended(onerel, MAIN_FORKNUM, tblk, RBM_NORMAL,
vac_strategy);
- LockBufferForCleanup(buf);
+ if (!ConditionalLockBufferForCleanup(buf))
+ continue;
tupindex = lazy_vacuum_page(onerel, tblk, buf, tupindex, vacrelstats);
/* Now that we've compacted the page, record its available space */
@@ -1010,6 +1044,50 @@ lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
}
/*
+ * lazy_check_needs_freeze() -- scan page to see if any tuples
+ * need to be cleaned to avoid wraparound
+ *
+ * Returns true if the page needs to be vacuumed using cleanup lock.
+ */
+static bool
+lazy_check_needs_freeze(Buffer buf)
+{
+ Page page;
+ OffsetNumber offnum,
+ maxoff;
+ HeapTupleHeader tupleheader;
+
+ page = BufferGetPage(buf);
+
+ if (PageIsNew(page) || PageIsEmpty(page))
+ {
+ /* PageIsNew probably shouldn't happen... */
+ return false;
+ }
+
+ maxoff = PageGetMaxOffsetNumber(page);
+ for (offnum = FirstOffsetNumber;
+ offnum <= maxoff;
+ offnum = OffsetNumberNext(offnum))
+ {
+ ItemId itemid;
+
+ itemid = PageGetItemId(page, offnum);
+
+ if (!ItemIdIsNormal(itemid))
+ continue;
+
+ tupleheader = (HeapTupleHeader) PageGetItem(page, itemid);
+
+ if (heap_tuple_needs_freeze(tupleheader, FreezeLimit, buf))
+ return true;
+ } /* scan along page */
+
+ return false;
+}
+
+
+/*
* lazy_vacuum_index() -- vacuum one index relation.
*
* Delete all the index entries pointing to tuples listed in
diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h
index 776ea5c4cc5..85cbeb3273b 100644
--- a/src/include/access/heapam.h
+++ b/src/include/access/heapam.h
@@ -111,6 +111,8 @@ extern HTSU_Result heap_lock_tuple(Relation relation, HeapTuple tuple,
extern void heap_inplace_update(Relation relation, HeapTuple tuple);
extern bool heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid,
Buffer buf);
+extern bool heap_tuple_needs_freeze(HeapTupleHeader tuple, TransactionId cutoff_xid,
+ Buffer buf);
extern Oid simple_heap_insert(Relation relation, HeapTuple tup);
extern void simple_heap_delete(Relation relation, ItemPointer tid);