aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorAmit Kapila <akapila@postgresql.org>2019-01-28 08:14:06 +0530
committerAmit Kapila <akapila@postgresql.org>2019-01-28 08:14:06 +0530
commitac88d2962a96a9c7e83d5acfc28fe49a72812086 (patch)
treec66901928bff8ba6a1998f3304551f13cab68c61 /src
parentd66e3664b8baf41908865ad363c6ba943e6f9c4e (diff)
downloadpostgresql-ac88d2962a96a9c7e83d5acfc28fe49a72812086.tar.gz
postgresql-ac88d2962a96a9c7e83d5acfc28fe49a72812086.zip
Avoid creation of the free space map for small heap relations.
Previously, all heaps had FSMs. For very small tables, this means that the FSM took up more space than the heap did. This is wasteful, so now we refrain from creating the FSM for heaps with 4 pages or fewer. If the last known target block has insufficient space, we still try to insert into some other page before giving up and extending the relation, since doing otherwise leads to table bloat. Testing showed that trying every page penalized performance slightly, so we compromise and try every other page. This way, we visit at most two pages. Any pages with wasted free space become visible at next relation extension, so we still control table bloat. As a bonus, directly attempting one or two pages can even be faster than consulting the FSM would have been. Once the FSM is created for a heap we don't remove it even if somebody deletes all the rows from the corresponding relation. We don't think it is a useful optimization as it is quite likely that relation will again grow to the same size. Author: John Naylor with design inputs and some code contribution by Amit Kapila Reviewed-by: Amit Kapila Tested-by: Mithun C Y Discussion: https://www.postgresql.org/message-id/CAJVSVGWvB13PzpbLEecFuGFc5V2fsO736BsdTakPiPAcdMM5tQ@mail.gmail.com
Diffstat (limited to 'src')
-rw-r--r--src/backend/access/brin/brin.c2
-rw-r--r--src/backend/access/brin/brin_pageops.c10
-rw-r--r--src/backend/access/heap/hio.c47
-rw-r--r--src/backend/access/heap/vacuumlazy.c17
-rw-r--r--src/backend/access/transam/xact.c14
-rw-r--r--src/backend/storage/freespace/README38
-rw-r--r--src/backend/storage/freespace/freespace.c275
-rw-r--r--src/backend/storage/freespace/indexfsm.c6
-rw-r--r--src/include/storage/freespace.h9
-rw-r--r--src/test/regress/expected/fsm.out75
-rw-r--r--src/test/regress/parallel_schedule6
-rw-r--r--src/test/regress/serial_schedule1
-rw-r--r--src/test/regress/sql/fsm.sql55
13 files changed, 510 insertions, 45 deletions
diff --git a/src/backend/access/brin/brin.c b/src/backend/access/brin/brin.c
index 467d91e6818..8f008dd0080 100644
--- a/src/backend/access/brin/brin.c
+++ b/src/backend/access/brin/brin.c
@@ -1150,7 +1150,7 @@ terminate_brin_buildstate(BrinBuildState *state)
freespace = PageGetFreeSpace(page);
blk = BufferGetBlockNumber(state->bs_currentInsertBuf);
ReleaseBuffer(state->bs_currentInsertBuf);
- RecordPageWithFreeSpace(state->bs_irel, blk, freespace);
+ RecordPageWithFreeSpace(state->bs_irel, blk, freespace, InvalidBlockNumber);
FreeSpaceMapVacuumRange(state->bs_irel, blk, blk + 1);
}
diff --git a/src/backend/access/brin/brin_pageops.c b/src/backend/access/brin/brin_pageops.c
index 164a4681556..2eb354f948f 100644
--- a/src/backend/access/brin/brin_pageops.c
+++ b/src/backend/access/brin/brin_pageops.c
@@ -310,7 +310,7 @@ brin_doupdate(Relation idxrel, BlockNumber pagesPerRange,
if (extended)
{
- RecordPageWithFreeSpace(idxrel, newblk, freespace);
+ RecordPageWithFreeSpace(idxrel, newblk, freespace, InvalidBlockNumber);
FreeSpaceMapVacuumRange(idxrel, newblk, newblk + 1);
}
@@ -461,7 +461,7 @@ brin_doinsert(Relation idxrel, BlockNumber pagesPerRange,
if (extended)
{
- RecordPageWithFreeSpace(idxrel, blk, freespace);
+ RecordPageWithFreeSpace(idxrel, blk, freespace, InvalidBlockNumber);
FreeSpaceMapVacuumRange(idxrel, blk, blk + 1);
}
@@ -654,7 +654,7 @@ brin_page_cleanup(Relation idxrel, Buffer buf)
/* Measure free space and record it */
RecordPageWithFreeSpace(idxrel, BufferGetBlockNumber(buf),
- br_page_get_freespace(page));
+ br_page_get_freespace(page), InvalidBlockNumber);
}
/*
@@ -703,7 +703,7 @@ brin_getinsertbuffer(Relation irel, Buffer oldbuf, Size itemsz,
/* Choose initial target page, re-using existing target if known */
newblk = RelationGetTargetBlock(irel);
if (newblk == InvalidBlockNumber)
- newblk = GetPageWithFreeSpace(irel, itemsz);
+ newblk = GetPageWithFreeSpace(irel, itemsz, true);
/*
* Loop until we find a page with sufficient free space. By the time we
@@ -895,7 +895,7 @@ brin_initialize_empty_new_buffer(Relation idxrel, Buffer buffer)
* pages whose FSM records were forgotten in a crash.
*/
RecordPageWithFreeSpace(idxrel, BufferGetBlockNumber(buffer),
- br_page_get_freespace(page));
+ br_page_get_freespace(page), InvalidBlockNumber);
}
diff --git a/src/backend/access/heap/hio.c b/src/backend/access/heap/hio.c
index 3da0b49ccc4..4c3e774eee2 100644
--- a/src/backend/access/heap/hio.c
+++ b/src/backend/access/heap/hio.c
@@ -239,8 +239,14 @@ RelationAddExtraBlocks(Relation relation, BulkInsertState bistate)
* Immediately update the bottom level of the FSM. This has a good
* chance of making this page visible to other concurrently inserting
* backends, and we want that to happen without delay.
+ *
+ * Since we know the table will end up with extraBlocks additional
+ * pages, we pass the final number to avoid possible unnecessary
+ * system calls and to make sure the FSM is created when we add the
+ * first new page.
*/
- RecordPageWithFreeSpace(relation, blockNum, freespace);
+ RecordPageWithFreeSpace(relation, blockNum, freespace,
+ firstBlock + extraBlocks);
}
while (--extraBlocks > 0);
@@ -377,20 +383,9 @@ RelationGetBufferForTuple(Relation relation, Size len,
* We have no cached target page, so ask the FSM for an initial
* target.
*/
- targetBlock = GetPageWithFreeSpace(relation, len + saveFreeSpace);
-
- /*
- * If the FSM knows nothing of the rel, try the last page before we
- * give up and extend. This avoids one-tuple-per-page syndrome during
- * bootstrapping or in a recently-started system.
- */
- if (targetBlock == InvalidBlockNumber)
- {
- BlockNumber nblocks = RelationGetNumberOfBlocks(relation);
-
- if (nblocks > 0)
- targetBlock = nblocks - 1;
- }
+ targetBlock = GetPageWithFreeSpace(relation,
+ len + saveFreeSpace,
+ false);
}
loop:
@@ -484,6 +479,14 @@ loop:
{
/* use this page as future insert target, too */
RelationSetTargetBlock(relation, targetBlock);
+
+ /*
+ * In case we used an in-memory map of available blocks, reset it
+ * for next use.
+ */
+ if (targetBlock < HEAP_FSM_CREATION_THRESHOLD)
+ FSMClearLocalMap();
+
return buffer;
}
@@ -543,9 +546,12 @@ loop:
/*
* Check if some other backend has extended a block for us while
- * we were waiting on the lock.
+ * we were waiting on the lock. We only check the FSM -- if there
+ * isn't one we don't recheck the number of blocks.
*/
- targetBlock = GetPageWithFreeSpace(relation, len + saveFreeSpace);
+ targetBlock = GetPageWithFreeSpace(relation,
+ len + saveFreeSpace,
+ true);
/*
* If some other waiter has already extended the relation, we
@@ -625,5 +631,12 @@ loop:
*/
RelationSetTargetBlock(relation, BufferGetBlockNumber(buffer));
+ /*
+ * In case we used an in-memory map of available blocks, reset it for next
+ * use. We do this unconditionally since after relation extension we
+ * can't skip this based on the targetBlock.
+ */
+ FSMClearLocalMap();
+
return buffer;
}
diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
index 37aa484ec3a..9cfa65ca47f 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -153,7 +153,7 @@ static BufferAccessStrategy vac_strategy;
static void lazy_scan_heap(Relation onerel, int options,
LVRelStats *vacrelstats, Relation *Irel, int nindexes,
bool aggressive);
-static void lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats);
+static void lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats, BlockNumber nblocks);
static bool lazy_check_needs_freeze(Buffer buf, bool *hastup);
static void lazy_vacuum_index(Relation indrel,
IndexBulkDeleteResult **stats,
@@ -758,7 +758,7 @@ lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats,
pgstat_progress_update_multi_param(2, hvp_index, hvp_val);
/* Remove tuples from heap */
- lazy_vacuum_heap(onerel, vacrelstats);
+ lazy_vacuum_heap(onerel, vacrelstats, nblocks);
/*
* Forget the now-vacuumed tuples, and press on, but be careful
@@ -896,7 +896,7 @@ lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats,
MarkBufferDirty(buf);
UnlockReleaseBuffer(buf);
- RecordPageWithFreeSpace(onerel, blkno, freespace);
+ RecordPageWithFreeSpace(onerel, blkno, freespace, nblocks);
continue;
}
@@ -935,7 +935,7 @@ lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats,
}
UnlockReleaseBuffer(buf);
- RecordPageWithFreeSpace(onerel, blkno, freespace);
+ RecordPageWithFreeSpace(onerel, blkno, freespace, nblocks);
continue;
}
@@ -1332,7 +1332,7 @@ lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats,
* taken if there are no indexes.)
*/
if (vacrelstats->num_dead_tuples == prev_dead_count)
- RecordPageWithFreeSpace(onerel, blkno, freespace);
+ RecordPageWithFreeSpace(onerel, blkno, freespace, nblocks);
}
/* report that everything is scanned and vacuumed */
@@ -1394,7 +1394,7 @@ lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats,
/* Remove tuples from heap */
pgstat_progress_update_param(PROGRESS_VACUUM_PHASE,
PROGRESS_VACUUM_PHASE_VACUUM_HEAP);
- lazy_vacuum_heap(onerel, vacrelstats);
+ lazy_vacuum_heap(onerel, vacrelstats, nblocks);
vacrelstats->num_index_scans++;
}
@@ -1465,9 +1465,10 @@ lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats,
* Note: the reason for doing this as a second pass is we cannot remove
* the tuples until we've removed their index entries, and we want to
* process index entry removal in batches as large as possible.
+ * Note: nblocks is passed as an optimization for RecordPageWithFreeSpace().
*/
static void
-lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
+lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats, BlockNumber nblocks)
{
int tupindex;
int npages;
@@ -1504,7 +1505,7 @@ lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
freespace = PageGetHeapFreeSpace(page);
UnlockReleaseBuffer(buf);
- RecordPageWithFreeSpace(onerel, tblk, freespace);
+ RecordPageWithFreeSpace(onerel, tblk, freespace, nblocks);
npages++;
}
diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c
index 0181976964c..92bda878043 100644
--- a/src/backend/access/transam/xact.c
+++ b/src/backend/access/transam/xact.c
@@ -48,6 +48,7 @@
#include "replication/walsender.h"
#include "storage/condition_variable.h"
#include "storage/fd.h"
+#include "storage/freespace.h"
#include "storage/lmgr.h"
#include "storage/predicate.h"
#include "storage/proc.h"
@@ -2493,6 +2494,12 @@ AbortTransaction(void)
pgstat_report_wait_end();
pgstat_progress_end_command();
+ /*
+ * In case we aborted during RelationGetBufferForTuple(), clear the local
+ * map of heap pages.
+ */
+ FSMClearLocalMap();
+
/* Clean up buffer I/O and buffer context locks, too */
AbortBufferIO();
UnlockBuffers();
@@ -4714,6 +4721,13 @@ AbortSubTransaction(void)
pgstat_report_wait_end();
pgstat_progress_end_command();
+
+ /*
+ * In case we aborted during RelationGetBufferForTuple(), clear the local
+ * map of heap pages.
+ */
+ FSMClearLocalMap();
+
AbortBufferIO();
UnlockBuffers();
diff --git a/src/backend/storage/freespace/README b/src/backend/storage/freespace/README
index e7ff23b76f7..0d3cd29772e 100644
--- a/src/backend/storage/freespace/README
+++ b/src/backend/storage/freespace/README
@@ -8,7 +8,41 @@ free space to hold a tuple to be stored; or to determine that no such page
exists and the relation must be extended by one page. As of PostgreSQL 8.4
each relation has its own, extensible free space map stored in a separate
"fork" of its relation. This eliminates the disadvantages of the former
-fixed-size FSM.
+fixed-size FSM. There are two exceptions:
+
+1. Hash indexes never have a FSM.
+2. For very small tables, a 3-page relation fork would be relatively large
+and wasteful, so to save space we refrain from creating the FSM if the
+heap has HEAP_FSM_CREATION_THRESHOLD pages or fewer.
+
+To locate free space in the latter case, we simply try pages directly without
+knowing ahead of time how much free space they have. To maintain good
+performance, we create a local in-memory map of pages to try, and only mark
+every other page as available. For example, in a 3-page heap, the local map
+would look like:
+
+ANAN
+0123
+
+Pages 0 and 2 are marked "available", and page 1 as "not available".
+Page 3 is beyond the end of the relation, so is likewise marked "not
+available". First we try page 2, and if that doesn't have sufficient free
+space we try page 0 before giving up and extending the relation. There may
+be some wasted free space on block 1, but if the relation extends to 4 pages:
+
+NANA
+0123
+
+We not only have the new page 3 at our disposal, we can now check page 1
+for free space as well.
+
+Once the FSM is created for a heap we don't remove it even if somebody deletes
+all the rows from the corresponding relation. We don't think it is a useful
+optimization as it is quite likely that relation will again grow to the same
+size.
+
+FSM data structure
+------------------
It is important to keep the map small so that it can be searched rapidly.
Therefore, we don't attempt to record the exact free space on a page.
@@ -192,5 +226,3 @@ TODO
----
- fastroot to avoid traversing upper nodes with just 1 child
-- use a different system for tables that fit into one FSM page, with a
- mechanism to switch to the real thing as it grows.
diff --git a/src/backend/storage/freespace/freespace.c b/src/backend/storage/freespace/freespace.c
index eee82860575..5f46391b681 100644
--- a/src/backend/storage/freespace/freespace.c
+++ b/src/backend/storage/freespace/freespace.c
@@ -76,6 +76,14 @@
#define FSM_ROOT_LEVEL (FSM_TREE_DEPTH - 1)
#define FSM_BOTTOM_LEVEL 0
+/* Status codes for the local map. */
+
+/* Either already tried, or beyond the end of the relation */
+#define FSM_LOCAL_NOT_AVAIL 0x00
+
+/* Available to try */
+#define FSM_LOCAL_AVAIL 0x01
+
/*
* The internal FSM routines work on a logical addressing scheme. Each
* level of the tree can be thought of as a separately addressable file.
@@ -89,6 +97,17 @@ typedef struct
/* Address of the root page. */
static const FSMAddress FSM_ROOT_ADDRESS = {FSM_ROOT_LEVEL, 0};
+/* Local map of block numbers for small heaps with no FSM. */
+typedef struct
+{
+ BlockNumber nblocks;
+ uint8 map[HEAP_FSM_CREATION_THRESHOLD];
+} FSMLocalMap;
+
+static FSMLocalMap fsm_local_map = {0, {FSM_LOCAL_NOT_AVAIL}};
+
+#define FSM_LOCAL_MAP_EXISTS (fsm_local_map.nblocks > 0)
+
/* functions to navigate the tree */
static FSMAddress fsm_get_child(FSMAddress parent, uint16 slot);
static FSMAddress fsm_get_parent(FSMAddress child, uint16 *slot);
@@ -107,10 +126,14 @@ static Size fsm_space_cat_to_avail(uint8 cat);
/* workhorse functions for various operations */
static int fsm_set_and_search(Relation rel, FSMAddress addr, uint16 slot,
uint8 newValue, uint8 minValue);
+static void fsm_local_set(Relation rel, BlockNumber cur_nblocks);
static BlockNumber fsm_search(Relation rel, uint8 min_cat);
+static BlockNumber fsm_local_search(void);
static uint8 fsm_vacuum_page(Relation rel, FSMAddress addr,
BlockNumber start, BlockNumber end,
bool *eof);
+static bool fsm_allow_writes(Relation rel, BlockNumber heapblk,
+ BlockNumber nblocks, BlockNumber *get_nblocks);
/******** Public API ********/
@@ -127,13 +150,46 @@ static uint8 fsm_vacuum_page(Relation rel, FSMAddress addr,
* amount of free space available on that page and then try again (see
* RecordAndGetPageWithFreeSpace). If InvalidBlockNumber is returned,
* extend the relation.
+ *
+ * For very small heap relations that don't have a FSM, we try every other
+ * page before extending the relation. To keep track of which pages have
+ * been tried, initialize a local in-memory map of pages.
*/
BlockNumber
-GetPageWithFreeSpace(Relation rel, Size spaceNeeded)
+GetPageWithFreeSpace(Relation rel, Size spaceNeeded, bool check_fsm_only)
{
uint8 min_cat = fsm_space_needed_to_cat(spaceNeeded);
+ BlockNumber target_block,
+ nblocks;
+
+ /* First try the FSM, if it exists. */
+ target_block = fsm_search(rel, min_cat);
+
+ if (target_block == InvalidBlockNumber &&
+ (rel->rd_rel->relkind == RELKIND_RELATION ||
+ rel->rd_rel->relkind == RELKIND_TOASTVALUE) &&
+ !check_fsm_only)
+ {
+ nblocks = RelationGetNumberOfBlocks(rel);
+
+ if (nblocks > HEAP_FSM_CREATION_THRESHOLD)
+ {
+ /*
+ * If the FSM knows nothing of the rel, try the last page before
+ * we give up and extend. This avoids one-tuple-per-page syndrome
+ * during bootstrapping or in a recently-started system.
+ */
+ target_block = nblocks - 1;
+ }
+ else if (nblocks > 0)
+ {
+ /* Create or update local map and get first candidate block. */
+ fsm_local_set(rel, nblocks);
+ target_block = fsm_local_search();
+ }
+ }
- return fsm_search(rel, min_cat);
+ return target_block;
}
/*
@@ -144,16 +200,47 @@ GetPageWithFreeSpace(Relation rel, Size spaceNeeded)
* also some effort to return a page close to the old page; if there's a
* page with enough free space on the same FSM page where the old one page
* is located, it is preferred.
+ *
+ * For very small heap relations that don't have a FSM, we update the local
+ * map to indicate we have tried a page, and return the next page to try.
*/
BlockNumber
RecordAndGetPageWithFreeSpace(Relation rel, BlockNumber oldPage,
Size oldSpaceAvail, Size spaceNeeded)
{
- int old_cat = fsm_space_avail_to_cat(oldSpaceAvail);
- int search_cat = fsm_space_needed_to_cat(spaceNeeded);
+ int old_cat;
+ int search_cat;
FSMAddress addr;
uint16 slot;
int search_slot;
+ BlockNumber nblocks = InvalidBlockNumber;
+
+ /* First try the local map, if it exists. */
+ if (FSM_LOCAL_MAP_EXISTS)
+ {
+ Assert((rel->rd_rel->relkind == RELKIND_RELATION ||
+ rel->rd_rel->relkind == RELKIND_TOASTVALUE) &&
+ fsm_local_map.map[oldPage] == FSM_LOCAL_AVAIL);
+
+ fsm_local_map.map[oldPage] = FSM_LOCAL_NOT_AVAIL;
+ return fsm_local_search();
+ }
+
+ if (!fsm_allow_writes(rel, oldPage, InvalidBlockNumber, &nblocks))
+ {
+ /*
+ * If we have neither a local map nor a FSM, we probably just
+ * tried the target block in the smgr relation entry and failed,
+ * so we'll need to create the local map.
+ */
+ fsm_local_set(rel, nblocks);
+ return fsm_local_search();
+ }
+
+ /* Normal FSM logic follows */
+
+ old_cat = fsm_space_avail_to_cat(oldSpaceAvail);
+ search_cat = fsm_space_needed_to_cat(spaceNeeded);
/* Get the location of the FSM byte representing the heap block */
addr = fsm_get_location(oldPage, &slot);
@@ -176,21 +263,42 @@ RecordAndGetPageWithFreeSpace(Relation rel, BlockNumber oldPage,
* Note that if the new spaceAvail value is higher than the old value stored
* in the FSM, the space might not become visible to searchers until the next
* FreeSpaceMapVacuum call, which updates the upper level pages.
+ *
+ * Callers have no need for a local map.
*/
void
-RecordPageWithFreeSpace(Relation rel, BlockNumber heapBlk, Size spaceAvail)
+RecordPageWithFreeSpace(Relation rel, BlockNumber heapBlk,
+ Size spaceAvail, BlockNumber nblocks)
{
- int new_cat = fsm_space_avail_to_cat(spaceAvail);
+ int new_cat;
FSMAddress addr;
uint16 slot;
+ BlockNumber dummy;
+
+ if (!fsm_allow_writes(rel, heapBlk, nblocks, &dummy))
+ /* No FSM to update and no local map either */
+ return;
/* Get the location of the FSM byte representing the heap block */
addr = fsm_get_location(heapBlk, &slot);
+ new_cat = fsm_space_avail_to_cat(spaceAvail);
fsm_set_and_search(rel, addr, slot, new_cat, 0);
}
/*
+ * Clear the local map. We must call this when we have found a block with
+ * enough free space, when we extend the relation, or on transaction abort.
+ */
+void
+FSMClearLocalMap(void)
+{
+ fsm_local_map.nblocks = 0;
+ memset(&fsm_local_map.map, FSM_LOCAL_NOT_AVAIL,
+ sizeof(fsm_local_map.map));
+}
+
+/*
* XLogRecordPageWithFreeSpace - like RecordPageWithFreeSpace, for use in
* WAL replay
*/
@@ -204,6 +312,30 @@ XLogRecordPageWithFreeSpace(RelFileNode rnode, BlockNumber heapBlk,
BlockNumber blkno;
Buffer buf;
Page page;
+ bool write_to_fsm;
+
+ /* This is meant to mirror the logic in fsm_allow_writes() */
+ if (heapBlk >= HEAP_FSM_CREATION_THRESHOLD)
+ write_to_fsm = true;
+ else
+ {
+ /* Open the relation at smgr level */
+ SMgrRelation smgr = smgropen(rnode, InvalidBackendId);
+
+ if (smgrexists(smgr, FSM_FORKNUM))
+ write_to_fsm = true;
+ else
+ {
+ BlockNumber heap_nblocks = smgrnblocks(smgr, MAIN_FORKNUM);
+ if (heap_nblocks > HEAP_FSM_CREATION_THRESHOLD)
+ write_to_fsm = true;
+ else
+ write_to_fsm = false;
+ }
+ }
+
+ if (!write_to_fsm)
+ return;
/* Get the location of the FSM byte representing the heap block */
addr = fsm_get_location(heapBlk, &slot);
@@ -904,3 +1036,134 @@ fsm_vacuum_page(Relation rel, FSMAddress addr,
return max_avail;
}
+
+/*
+ * For heaps, we prevent creation of the FSM unless the number of pages
+ * exceeds HEAP_FSM_CREATION_THRESHOLD. For tables that don't already have
+ * a FSM, this will save an inode and a few kB of space.
+ *
+ * XXX The API is a little awkward -- if the caller passes a valid nblocks
+ * value, it can avoid invoking a system call. If the caller passes
+ * InvalidBlockNumber and receives a false return value, it can get an
+ * up-to-date relation size from get_nblocks. This saves a few cycles in
+ * the caller, which would otherwise need to get the relation size by itself.
+ */
+static bool
+fsm_allow_writes(Relation rel, BlockNumber heapblk,
+ BlockNumber nblocks, BlockNumber *get_nblocks)
+{
+ bool skip_get_nblocks;
+
+ if (heapblk >= HEAP_FSM_CREATION_THRESHOLD)
+ return true;
+
+ /* Non-heap rels can always create a FSM. */
+ if (rel->rd_rel->relkind != RELKIND_RELATION &&
+ rel->rd_rel->relkind != RELKIND_TOASTVALUE)
+ return true;
+
+ /*
+ * If the caller knows nblocks, we can avoid a system call later.
+ * If it doesn't, maybe we have relpages from a previous VACUUM.
+ * Since the table may have extended since then, we still have to
+ * count the pages later if we can't return now.
+ */
+ if (nblocks != InvalidBlockNumber)
+ {
+ if (nblocks > HEAP_FSM_CREATION_THRESHOLD)
+ return true;
+ else
+ skip_get_nblocks = true;
+ }
+ else
+ {
+ if (rel->rd_rel->relpages != InvalidBlockNumber &&
+ rel->rd_rel->relpages > HEAP_FSM_CREATION_THRESHOLD)
+ return true;
+ else
+ skip_get_nblocks = false;
+ }
+
+ RelationOpenSmgr(rel);
+ if (smgrexists(rel->rd_smgr, FSM_FORKNUM))
+ return true;
+
+ if (skip_get_nblocks)
+ return false;
+
+ /* last resort */
+ *get_nblocks = RelationGetNumberOfBlocks(rel);
+ if (*get_nblocks > HEAP_FSM_CREATION_THRESHOLD)
+ return true;
+ else
+ return false;
+}
+
+/*
+ * Initialize or update the local map of blocks to try, for when there is
+ * no FSM.
+ *
+ * When we initialize the map, the whole heap is potentially available to
+ * try. Testing revealed that trying every block can cause a small
+ * performance dip compared to when we use a FSM, so we try every other
+ * block instead.
+ */
+static void
+fsm_local_set(Relation rel, BlockNumber cur_nblocks)
+{
+ BlockNumber blkno,
+ cached_target_block;
+
+ /* The local map must not be set already. */
+ Assert(!FSM_LOCAL_MAP_EXISTS);
+
+ /*
+ * Starting at the current last block in the relation and working
+ * backwards, mark alternating blocks as available.
+ */
+ blkno = cur_nblocks - 1;
+ while (true)
+ {
+ fsm_local_map.map[blkno] = FSM_LOCAL_AVAIL;
+ if (blkno >= 2)
+ blkno -= 2;
+ else
+ break;
+ }
+
+ /* Cache the number of blocks. */
+ fsm_local_map.nblocks = cur_nblocks;
+
+ /* Set the status of the cached target block to 'unavailable'. */
+ cached_target_block = RelationGetTargetBlock(rel);
+ if (cached_target_block != InvalidBlockNumber &&
+ cached_target_block < cur_nblocks)
+ fsm_local_map.map[cached_target_block] = FSM_LOCAL_NOT_AVAIL;
+}
+
+/*
+ * Search the local map for an available block to try, in descending order.
+ * As such, there is no heuristic available to decide which order will be
+ * better to try, but the probability of having space in the last block in the
+ * map is higher because that is the most recent block added to the heap.
+ *
+ * This function is used when there is no FSM.
+ */
+static BlockNumber
+fsm_local_search(void)
+{
+ BlockNumber target_block;
+
+ /* Local map must be set by now. */
+ Assert(FSM_LOCAL_MAP_EXISTS);
+
+ target_block = fsm_local_map.nblocks;
+ do
+ {
+ target_block--;
+ if (fsm_local_map.map[target_block] == FSM_LOCAL_AVAIL)
+ return target_block;
+ } while (target_block > 0);
+
+ return InvalidBlockNumber;
+}
diff --git a/src/backend/storage/freespace/indexfsm.c b/src/backend/storage/freespace/indexfsm.c
index 58cedeaa9f7..9d8f43d3739 100644
--- a/src/backend/storage/freespace/indexfsm.c
+++ b/src/backend/storage/freespace/indexfsm.c
@@ -37,7 +37,7 @@
BlockNumber
GetFreeIndexPage(Relation rel)
{
- BlockNumber blkno = GetPageWithFreeSpace(rel, BLCKSZ / 2);
+ BlockNumber blkno = GetPageWithFreeSpace(rel, BLCKSZ / 2, true);
if (blkno != InvalidBlockNumber)
RecordUsedIndexPage(rel, blkno);
@@ -51,7 +51,7 @@ GetFreeIndexPage(Relation rel)
void
RecordFreeIndexPage(Relation rel, BlockNumber freeBlock)
{
- RecordPageWithFreeSpace(rel, freeBlock, BLCKSZ - 1);
+ RecordPageWithFreeSpace(rel, freeBlock, BLCKSZ - 1, InvalidBlockNumber);
}
@@ -61,7 +61,7 @@ RecordFreeIndexPage(Relation rel, BlockNumber freeBlock)
void
RecordUsedIndexPage(Relation rel, BlockNumber usedBlock)
{
- RecordPageWithFreeSpace(rel, usedBlock, 0);
+ RecordPageWithFreeSpace(rel, usedBlock, 0, InvalidBlockNumber);
}
/*
diff --git a/src/include/storage/freespace.h b/src/include/storage/freespace.h
index 8b000334382..dbaae651c58 100644
--- a/src/include/storage/freespace.h
+++ b/src/include/storage/freespace.h
@@ -18,15 +18,20 @@
#include "storage/relfilenode.h"
#include "utils/relcache.h"
+/* Only create the FSM if the heap has greater than this many blocks */
+#define HEAP_FSM_CREATION_THRESHOLD 4
+
/* prototypes for public functions in freespace.c */
extern Size GetRecordedFreeSpace(Relation rel, BlockNumber heapBlk);
-extern BlockNumber GetPageWithFreeSpace(Relation rel, Size spaceNeeded);
+extern BlockNumber GetPageWithFreeSpace(Relation rel, Size spaceNeeded,
+ bool check_fsm_only);
extern BlockNumber RecordAndGetPageWithFreeSpace(Relation rel,
BlockNumber oldPage,
Size oldSpaceAvail,
Size spaceNeeded);
extern void RecordPageWithFreeSpace(Relation rel, BlockNumber heapBlk,
- Size spaceAvail);
+ Size spaceAvail, BlockNumber nblocks);
+extern void FSMClearLocalMap(void);
extern void XLogRecordPageWithFreeSpace(RelFileNode rnode, BlockNumber heapBlk,
Size spaceAvail);
diff --git a/src/test/regress/expected/fsm.out b/src/test/regress/expected/fsm.out
new file mode 100644
index 00000000000..df6b15b9d2a
--- /dev/null
+++ b/src/test/regress/expected/fsm.out
@@ -0,0 +1,75 @@
+--
+-- Free Space Map test
+--
+CREATE TABLE fsm_check_size (num int, str text);
+-- Fill 3 blocks with as many large records as will fit
+-- No FSM
+INSERT INTO fsm_check_size SELECT i, rpad('', 1024, 'a')
+FROM generate_series(1,7*3) i;
+VACUUM fsm_check_size;
+SELECT pg_relation_size('fsm_check_size', 'main') AS heap_size,
+pg_relation_size('fsm_check_size', 'fsm') AS fsm_size;
+ heap_size | fsm_size
+-----------+----------
+ 24576 | 0
+(1 row)
+
+-- Clear some space on block 0
+DELETE FROM fsm_check_size WHERE num <= 5;
+VACUUM fsm_check_size;
+-- Insert small record in block 2 to set the cached smgr targetBlock
+INSERT INTO fsm_check_size VALUES(99, 'b');
+-- Insert large record and make sure it goes in block 0 rather than
+-- causing the relation to extend
+INSERT INTO fsm_check_size VALUES (101, rpad('', 1024, 'a'));
+SELECT pg_relation_size('fsm_check_size', 'main') AS heap_size,
+pg_relation_size('fsm_check_size', 'fsm') AS fsm_size;
+ heap_size | fsm_size
+-----------+----------
+ 24576 | 0
+(1 row)
+
+-- Extend table with enough blocks to exceed the FSM threshold
+-- FSM is created and extended to 3 blocks
+INSERT INTO fsm_check_size SELECT i, 'c' FROM generate_series(200,1200) i;
+VACUUM fsm_check_size;
+SELECT pg_relation_size('fsm_check_size', 'fsm') AS fsm_size;
+ fsm_size
+----------
+ 24576
+(1 row)
+
+-- Truncate heap to 1 block
+-- No change in FSM
+DELETE FROM fsm_check_size WHERE num > 7;
+VACUUM fsm_check_size;
+SELECT pg_relation_size('fsm_check_size', 'fsm') AS fsm_size;
+ fsm_size
+----------
+ 24576
+(1 row)
+
+-- Truncate heap to 0 blocks
+-- FSM now truncated to 2 blocks
+DELETE FROM fsm_check_size;
+VACUUM fsm_check_size;
+SELECT pg_relation_size('fsm_check_size', 'fsm') AS fsm_size;
+ fsm_size
+----------
+ 16384
+(1 row)
+
+-- Add long random string to extend TOAST table to 1 block
+INSERT INTO fsm_check_size
+VALUES(0, (SELECT string_agg(md5(chr(i)), '')
+ FROM generate_series(1,100) i));
+VACUUM fsm_check_size;
+SELECT pg_relation_size(reltoastrelid, 'main') AS toast_size,
+pg_relation_size(reltoastrelid, 'fsm') AS toast_fsm_size
+FROM pg_class WHERE relname = 'fsm_check_size';
+ toast_size | toast_fsm_size
+------------+----------------
+ 8192 | 0
+(1 row)
+
+DROP TABLE fsm_check_size;
diff --git a/src/test/regress/parallel_schedule b/src/test/regress/parallel_schedule
index cc0bbf5db9f..4051a4ad4e1 100644
--- a/src/test/regress/parallel_schedule
+++ b/src/test/regress/parallel_schedule
@@ -69,6 +69,12 @@ test: create_aggregate create_function_3 create_cast constraints triggers inheri
test: sanity_check
# ----------
+# fsm does a delete followed by vacuum, and running it in parallel can prevent
+# removal of rows.
+# ----------
+test: fsm
+
+# ----------
# Believe it or not, select creates a table, subsequent
# tests need.
# ----------
diff --git a/src/test/regress/serial_schedule b/src/test/regress/serial_schedule
index 0c10c7100c6..ac1ea622d65 100644
--- a/src/test/regress/serial_schedule
+++ b/src/test/regress/serial_schedule
@@ -80,6 +80,7 @@ test: roleattributes
test: create_am
test: hash_func
test: sanity_check
+test: fsm
test: errors
test: select
test: select_into
diff --git a/src/test/regress/sql/fsm.sql b/src/test/regress/sql/fsm.sql
new file mode 100644
index 00000000000..07f505591a8
--- /dev/null
+++ b/src/test/regress/sql/fsm.sql
@@ -0,0 +1,55 @@
+--
+-- Free Space Map test
+--
+
+CREATE TABLE fsm_check_size (num int, str text);
+
+-- Fill 3 blocks with as many large records as will fit
+-- No FSM
+INSERT INTO fsm_check_size SELECT i, rpad('', 1024, 'a')
+FROM generate_series(1,7*3) i;
+VACUUM fsm_check_size;
+SELECT pg_relation_size('fsm_check_size', 'main') AS heap_size,
+pg_relation_size('fsm_check_size', 'fsm') AS fsm_size;
+
+-- Clear some space on block 0
+DELETE FROM fsm_check_size WHERE num <= 5;
+VACUUM fsm_check_size;
+
+-- Insert small record in block 2 to set the cached smgr targetBlock
+INSERT INTO fsm_check_size VALUES(99, 'b');
+
+-- Insert large record and make sure it goes in block 0 rather than
+-- causing the relation to extend
+INSERT INTO fsm_check_size VALUES (101, rpad('', 1024, 'a'));
+SELECT pg_relation_size('fsm_check_size', 'main') AS heap_size,
+pg_relation_size('fsm_check_size', 'fsm') AS fsm_size;
+
+-- Extend table with enough blocks to exceed the FSM threshold
+-- FSM is created and extended to 3 blocks
+INSERT INTO fsm_check_size SELECT i, 'c' FROM generate_series(200,1200) i;
+VACUUM fsm_check_size;
+SELECT pg_relation_size('fsm_check_size', 'fsm') AS fsm_size;
+
+-- Truncate heap to 1 block
+-- No change in FSM
+DELETE FROM fsm_check_size WHERE num > 7;
+VACUUM fsm_check_size;
+SELECT pg_relation_size('fsm_check_size', 'fsm') AS fsm_size;
+
+-- Truncate heap to 0 blocks
+-- FSM now truncated to 2 blocks
+DELETE FROM fsm_check_size;
+VACUUM fsm_check_size;
+SELECT pg_relation_size('fsm_check_size', 'fsm') AS fsm_size;
+
+-- Add long random string to extend TOAST table to 1 block
+INSERT INTO fsm_check_size
+VALUES(0, (SELECT string_agg(md5(chr(i)), '')
+ FROM generate_series(1,100) i));
+VACUUM fsm_check_size;
+SELECT pg_relation_size(reltoastrelid, 'main') AS toast_size,
+pg_relation_size(reltoastrelid, 'fsm') AS toast_fsm_size
+FROM pg_class WHERE relname = 'fsm_check_size';
+
+DROP TABLE fsm_check_size;