aboutsummaryrefslogtreecommitdiff
path: root/src/backend/commands/vacuumlazy.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/commands/vacuumlazy.c')
-rw-r--r--src/backend/commands/vacuumlazy.c15
1 files changed, 13 insertions, 2 deletions
diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c
index 20ce431e46c..f95346acdb5 100644
--- a/src/backend/commands/vacuumlazy.c
+++ b/src/backend/commands/vacuumlazy.c
@@ -467,6 +467,8 @@ lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats,
blkno;
HeapTupleData tuple;
char *relname;
+ TransactionId relfrozenxid = onerel->rd_rel->relfrozenxid;
+ TransactionId relminmxid = onerel->rd_rel->relminmxid;
BlockNumber empty_pages,
vacuumed_pages;
double num_tuples,
@@ -1004,6 +1006,13 @@ lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats,
* tuple, we choose to keep it, because it'll be a lot
* cheaper to get rid of it in the next pruning pass than
* to treat it like an indexed tuple.
+ *
+ * If this were to happen for a tuple that actually needed
+ * to be deleted, we'd be in trouble, because it'd
+ * possibly leave a tuple below the relation's xmin
+ * horizon alive. heap_prepare_freeze_tuple() is prepared
+ * to detect that case and abort the transaction,
+ * preventing corruption.
*/
if (HeapTupleIsHotUpdated(&tuple) ||
HeapTupleIsHeapOnly(&tuple))
@@ -1095,8 +1104,10 @@ lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats,
* Each non-removable tuple must be checked to see if it needs
* freezing. Note we already have exclusive buffer lock.
*/
- if (heap_prepare_freeze_tuple(tuple.t_data, FreezeLimit,
- MultiXactCutoff, &frozen[nfrozen],
+ if (heap_prepare_freeze_tuple(tuple.t_data,
+ relfrozenxid, relminmxid,
+ FreezeLimit, MultiXactCutoff,
+ &frozen[nfrozen],
&tuple_totally_frozen))
frozen[nfrozen++].offset = offnum;