aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Geoghegan <pg@bowt.ie>2021-11-26 10:58:38 -0800
committerPeter Geoghegan <pg@bowt.ie>2021-11-26 10:58:38 -0800
commit1a6f5a0e876306293fda697e7820b404d5b93693 (patch)
tree661672ac1f0da7561aa9a63bfdfb39c824c529af
parentdd484c97f55be8336fcb41470768c5b8ae347d13 (diff)
downloadpostgresql-1a6f5a0e876306293fda697e7820b404d5b93693.tar.gz
postgresql-1a6f5a0e876306293fda697e7820b404d5b93693.zip
Go back to considering HOT on pages marked full.
Commit 2fd8685e7f simplified the checking of modified attributes that takes place within heap_update(). This included a micro-optimization affecting pages marked PD_PAGE_FULL: don't even try to use HOT to save a few cycles on determining HOT safety. The assumption was that it won't work out this time around, since it can't have worked out last time around. Remove the micro-optimization. It could only ever save cycles that are consumed by the vast majority of heap_update() calls, which hardly seems worth the added complexity. It also seems quite possible that there are workloads that will do worse over time by repeated application of the micro-optimization, despite saving some cycles on average, in the short term. Author: Peter Geoghegan <pg@bowt.ie> Reviewed-By: Álvaro Herrera <alvherre@alvh.no-ip.org> Discussion: https://postgr.es/m/CAH2-WznU1L3+DMPr1F7o2eJBT7=3bAJoY6ZkWABAxNt+-afyTA@mail.gmail.com
-rw-r--r--src/backend/access/heap/heapam.c31
1 files changed, 6 insertions, 25 deletions
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index ec234a5e595..29a4bf0c776 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -3179,7 +3179,6 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
bool have_tuple_lock = false;
bool iscombo;
bool use_hot_update = false;
- bool hot_attrs_checked = false;
bool key_intact;
bool all_visible_cleared = false;
bool all_visible_cleared_new = false;
@@ -3228,32 +3227,15 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
key_attrs = RelationGetIndexAttrBitmap(relation, INDEX_ATTR_BITMAP_KEY);
id_attrs = RelationGetIndexAttrBitmap(relation,
INDEX_ATTR_BITMAP_IDENTITY_KEY);
-
+ interesting_attrs = NULL;
+ interesting_attrs = bms_add_members(interesting_attrs, hot_attrs);
+ interesting_attrs = bms_add_members(interesting_attrs, key_attrs);
+ interesting_attrs = bms_add_members(interesting_attrs, id_attrs);
block = ItemPointerGetBlockNumber(otid);
buffer = ReadBuffer(relation, block);
page = BufferGetPage(buffer);
- interesting_attrs = NULL;
-
- /*
- * If the page is already full, there is hardly any chance of doing a HOT
- * update on this page. It might be wasteful effort to look for index
- * column updates only to later reject HOT updates for lack of space in
- * the same page. So we be conservative and only fetch hot_attrs if the
- * page is not already full. Since we are already holding a pin on the
- * buffer, there is no chance that the buffer can get cleaned up
- * concurrently and even if that was possible, in the worst case we lose a
- * chance to do a HOT update.
- */
- if (!PageIsFull(page))
- {
- interesting_attrs = bms_add_members(interesting_attrs, hot_attrs);
- hot_attrs_checked = true;
- }
- interesting_attrs = bms_add_members(interesting_attrs, key_attrs);
- interesting_attrs = bms_add_members(interesting_attrs, id_attrs);
-
/*
* Before locking the buffer, pin the visibility map page if it appears to
* be necessary. Since we haven't got the lock yet, someone else might be
@@ -3867,10 +3849,9 @@ l2:
/*
* Since the new tuple is going into the same page, we might be able
* to do a HOT update. Check if any of the index columns have been
- * changed. If the page was already full, we may have skipped checking
- * for index columns, and also can't do a HOT update.
+ * changed.
*/
- if (hot_attrs_checked && !bms_overlap(modified_attrs, hot_attrs))
+ if (!bms_overlap(modified_attrs, hot_attrs))
use_hot_update = true;
}
else