aboutsummaryrefslogtreecommitdiff
path: root/src/backend/access/heap/heapam.c
diff options
context:
space:
mode:
authorRobert Haas <rhaas@postgresql.org>2016-03-01 21:49:41 -0500
committerRobert Haas <rhaas@postgresql.org>2016-03-01 21:49:41 -0500
commita892234f830e832110f63fc0a2afce2fb21d1584 (patch)
treefbb37cd6dc4e68f450bf5360610756368c90ae46 /src/backend/access/heap/heapam.c
parent68c521eb92c3515e3306f51a7fd3f32d16c97524 (diff)
downloadpostgresql-a892234f830e832110f63fc0a2afce2fb21d1584.tar.gz
postgresql-a892234f830e832110f63fc0a2afce2fb21d1584.zip
Change the format of the VM fork to add a second bit per page.
The new bit indicates whether every tuple on the page is already frozen. It is cleared only when the all-visible bit is cleared, and it can be set only when we vacuum a page and find that every tuple on that page is both visible to every transaction and in no need of any future vacuuming. A future commit will use this new bit to optimize away full-table scans that would otherwise be triggered by XID wraparound considerations. A page which is merely all-visible must still be scanned in that case, but a page which is all-frozen need not be. This commit does not attempt that optimization, although that optimization is the goal here. It seems better to get the basic infrastructure in place first. Per discussion, it's very desirable for pg_upgrade to automatically migrate existing VM forks from the old format to the new format. That, too, will be handled in a follow-on patch. Masahiko Sawada, reviewed by Kyotaro Horiguchi, Fujii Masao, Amit Kapila, Simon Riggs, Andres Freund, and others, and substantially revised by me.
Diffstat (limited to 'src/backend/access/heap/heapam.c')
-rw-r--r--src/backend/access/heap/heapam.c61
1 files changed, 58 insertions, 3 deletions
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index f4437428cb3..8a64321fe49 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -6952,6 +6952,55 @@ ConditionalMultiXactIdWait(MultiXactId multi, MultiXactStatus status,
}
/*
+ * heap_tuple_needs_eventual_freeze
+ *
+ * Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
+ * will eventually require freezing. Similar to heap_tuple_needs_freeze,
+ * but there's no cutoff, since we're trying to figure out whether freezing
+ * will ever be needed, not whether it's needed now.
+ */
+bool
+heap_tuple_needs_eventual_freeze(HeapTupleHeader tuple)
+{
+ TransactionId xid;
+
+ /*
+ * If xmin is a normal transaction ID, this tuple is definitely not
+ * frozen.
+ */
+ xid = HeapTupleHeaderGetXmin(tuple);
+ if (TransactionIdIsNormal(xid))
+ return true;
+
+ /*
+ * If xmax is a valid xact or multixact, this tuple is also not frozen.
+ */
+ if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
+ {
+ MultiXactId multi;
+
+ multi = HeapTupleHeaderGetRawXmax(tuple);
+ if (MultiXactIdIsValid(multi))
+ return true;
+ }
+ else
+ {
+ xid = HeapTupleHeaderGetRawXmax(tuple);
+ if (TransactionIdIsNormal(xid))
+ return true;
+ }
+
+ if (tuple->t_infomask & HEAP_MOVED)
+ {
+ xid = HeapTupleHeaderGetXvac(tuple);
+ if (TransactionIdIsNormal(xid))
+ return true;
+ }
+
+ return false;
+}
+
+/*
* heap_tuple_needs_freeze
*
* Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
@@ -7205,7 +7254,7 @@ log_heap_freeze(Relation reln, Buffer buffer, TransactionId cutoff_xid,
*/
XLogRecPtr
log_heap_visible(RelFileNode rnode, Buffer heap_buffer, Buffer vm_buffer,
- TransactionId cutoff_xid)
+ TransactionId cutoff_xid, uint8 vmflags)
{
xl_heap_visible xlrec;
XLogRecPtr recptr;
@@ -7215,6 +7264,7 @@ log_heap_visible(RelFileNode rnode, Buffer heap_buffer, Buffer vm_buffer,
Assert(BufferIsValid(vm_buffer));
xlrec.cutoff_xid = cutoff_xid;
+ xlrec.flags = vmflags;
XLogBeginInsert();
XLogRegisterData((char *) &xlrec, SizeOfHeapVisible);
@@ -7804,7 +7854,12 @@ heap_xlog_visible(XLogReaderState *record)
* the subsequent update won't be replayed to clear the flag.
*/
page = BufferGetPage(buffer);
- PageSetAllVisible(page);
+
+ if (xlrec->flags & VISIBILITYMAP_ALL_VISIBLE)
+ PageSetAllVisible(page);
+ if (xlrec->flags & VISIBILITYMAP_ALL_FROZEN)
+ PageSetAllFrozen(page);
+
MarkBufferDirty(buffer);
}
else if (action == BLK_RESTORED)
@@ -7856,7 +7911,7 @@ heap_xlog_visible(XLogReaderState *record)
*/
if (lsn > PageGetLSN(vmpage))
visibilitymap_set(reln, blkno, InvalidBuffer, lsn, vmbuffer,
- xlrec->cutoff_xid);
+ xlrec->cutoff_xid, xlrec->flags);
ReleaseBuffer(vmbuffer);
FreeFakeRelcacheEntry(reln);