aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/backend/access/nbtree/nbtree.c18
-rw-r--r--src/backend/access/spgist/spgvacuum.c23
2 files changed, 32 insertions, 9 deletions
diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c
index 8158508d8c5..6fca8e358fe 100644
--- a/src/backend/access/nbtree/nbtree.c
+++ b/src/backend/access/nbtree/nbtree.c
@@ -832,9 +832,6 @@ btvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats)
btvacuumscan(info, stats, NULL, NULL, 0);
}
- /* Finally, vacuum the FSM */
- IndexFreeSpaceMapVacuum(info->index);
-
/*
* It's quite possible for us to be fooled by concurrent page splits into
* double-counting some index tuples, so disbelieve any total that exceeds
@@ -976,6 +973,21 @@ btvacuumscan(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
MemoryContextDelete(vstate.pagedelcontext);
+ /*
+ * If we found any recyclable pages (and recorded them in the FSM), then
+ * forcibly update the upper-level FSM pages to ensure that searchers can
+ * find them. It's possible that the pages were also found during
+ * previous scans and so this is a waste of time, but it's cheap enough
+ * relative to scanning the index that it shouldn't matter much, and
+ * making sure that free pages are available sooner not later seems
+ * worthwhile.
+ *
+ * Note that if no recyclable pages exist, we don't bother vacuuming the
+ * FSM at all.
+ */
+ if (vstate.totFreePages > 0)
+ IndexFreeSpaceMapVacuum(rel);
+
/* update statistics */
stats->num_pages = num_pages;
stats->pages_free = vstate.totFreePages;
diff --git a/src/backend/access/spgist/spgvacuum.c b/src/backend/access/spgist/spgvacuum.c
index 72839cb8f96..a83a4b581ed 100644
--- a/src/backend/access/spgist/spgvacuum.c
+++ b/src/backend/access/spgist/spgvacuum.c
@@ -846,6 +846,21 @@ spgvacuumscan(spgBulkDeleteState *bds)
SpGistUpdateMetaPage(index);
/*
+ * If we found any empty pages (and recorded them in the FSM), then
+ * forcibly update the upper-level FSM pages to ensure that searchers can
+ * find them. It's possible that the pages were also found during
+ * previous scans and so this is a waste of time, but it's cheap enough
+ * relative to scanning the index that it shouldn't matter much, and
+ * making sure that free pages are available sooner not later seems
+ * worthwhile.
+ *
+ * Note that if no empty pages exist, we don't bother vacuuming the FSM at
+ * all.
+ */
+ if (bds->stats->pages_deleted > 0)
+ IndexFreeSpaceMapVacuum(index);
+
+ /*
* Truncate index if possible
*
* XXX disabled because it's unsafe due to possible concurrent inserts.
@@ -916,7 +931,6 @@ dummy_callback(ItemPointer itemptr, void *state)
IndexBulkDeleteResult *
spgvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats)
{
- Relation index = info->index;
spgBulkDeleteState bds;
/* No-op in ANALYZE ONLY mode */
@@ -926,8 +940,8 @@ spgvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats)
/*
* We don't need to scan the index if there was a preceding bulkdelete
* pass. Otherwise, make a pass that won't delete any live tuples, but
- * might still accomplish useful stuff with redirect/placeholder cleanup,
- * and in any case will provide stats.
+ * might still accomplish useful stuff with redirect/placeholder cleanup
+ * and/or FSM housekeeping, and in any case will provide stats.
*/
if (stats == NULL)
{
@@ -940,9 +954,6 @@ spgvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats)
spgvacuumscan(&bds);
}
- /* Finally, vacuum the FSM */
- IndexFreeSpaceMapVacuum(index);
-
/*
* It's quite possible for us to be fooled by concurrent tuple moves into
* double-counting some index tuples, so disbelieve any total that exceeds