aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/backend/access/heap/heapam.c58
-rw-r--r--src/backend/access/heap/hio.c8
-rw-r--r--src/include/access/hio.h3
3 files changed, 63 insertions, 6 deletions
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index f7d9ce59a47..57083a33ee5 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -1847,7 +1847,8 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
*/
buffer = RelationGetBufferForTuple(relation, heaptup->t_len,
InvalidBuffer, options, bistate,
- &vmbuffer, NULL);
+ &vmbuffer, NULL,
+ 0);
/*
* We're about to do the actual insert -- but check for conflict first, to
@@ -2051,6 +2052,32 @@ heap_prepare_insert(Relation relation, HeapTuple tup, TransactionId xid,
}
/*
+ * Helper for heap_multi_insert() that computes the number of entire pages
+ * that inserting the remaining heaptuples requires. Used to determine how
+ * much the relation needs to be extended by.
+ */
+static int
+heap_multi_insert_pages(HeapTuple *heaptuples, int done, int ntuples, Size saveFreeSpace)
+{
+ size_t page_avail = BLCKSZ - SizeOfPageHeaderData - saveFreeSpace;
+ int npages = 1;
+
+ for (int i = done; i < ntuples; i++)
+ {
+ size_t tup_sz = sizeof(ItemIdData) + MAXALIGN(heaptuples[i]->t_len);
+
+ if (page_avail < tup_sz)
+ {
+ npages++;
+ page_avail = BLCKSZ - SizeOfPageHeaderData - saveFreeSpace;
+ }
+ page_avail -= tup_sz;
+ }
+
+ return npages;
+}
+
+/*
* heap_multi_insert - insert multiple tuples into a heap
*
* This is like heap_insert(), but inserts multiple tuples in one operation.
@@ -2076,6 +2103,9 @@ heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
Size saveFreeSpace;
bool need_tuple_data = RelationIsLogicallyLogged(relation);
bool need_cids = RelationIsAccessibleInLogicalDecoding(relation);
+ bool starting_with_empty_page = false;
+ int npages = 0;
+ int npages_used = 0;
/* currently not needed (thus unsupported) for heap_multi_insert() */
Assert(!(options & HEAP_INSERT_NO_LOGICAL));
@@ -2126,7 +2156,6 @@ heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
while (ndone < ntuples)
{
Buffer buffer;
- bool starting_with_empty_page;
bool all_visible_cleared = false;
bool all_frozen_set = false;
int nthispage;
@@ -2134,6 +2163,25 @@ heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
CHECK_FOR_INTERRUPTS();
/*
+ * Compute number of pages needed to fit the to-be-inserted tuples in
+ * the worst case. This will be used to determine how much to extend
+ * the relation by in RelationGetBufferForTuple(), if needed. If we
+ * filled a prior page from scratch, we can just update our last
+ * computation, but if we started with a partially filled page,
+ * recompute from scratch, the number of potentially required pages
+ * can vary due to tuples needing to fit onto the page, page headers
+ * etc.
+ */
+ if (ndone == 0 || !starting_with_empty_page)
+ {
+ npages = heap_multi_insert_pages(heaptuples, ndone, ntuples,
+ saveFreeSpace);
+ npages_used = 0;
+ }
+ else
+ npages_used++;
+
+ /*
* Find buffer where at least the next tuple will fit. If the page is
* all-visible, this will also pin the requisite visibility map page.
*
@@ -2142,7 +2190,8 @@ heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
*/
buffer = RelationGetBufferForTuple(relation, heaptuples[ndone]->t_len,
InvalidBuffer, options, bistate,
- &vmbuffer, NULL);
+ &vmbuffer, NULL,
+ npages - npages_used);
page = BufferGetPage(buffer);
starting_with_empty_page = PageGetMaxOffsetNumber(page) == 0;
@@ -3576,7 +3625,8 @@ l2:
/* It doesn't fit, must use RelationGetBufferForTuple. */
newbuf = RelationGetBufferForTuple(relation, heaptup->t_len,
buffer, 0, NULL,
- &vmbuffer_new, &vmbuffer);
+ &vmbuffer_new, &vmbuffer,
+ 0);
/* We're all done. */
break;
}
diff --git a/src/backend/access/heap/hio.c b/src/backend/access/heap/hio.c
index a0713c178ac..f24e3d96eb9 100644
--- a/src/backend/access/heap/hio.c
+++ b/src/backend/access/heap/hio.c
@@ -301,6 +301,11 @@ RelationAddExtraBlocks(Relation relation, BulkInsertState bistate)
* Returns pinned and exclusive-locked buffer of a page in given relation
* with free space >= given len.
*
+ * If num_pages is > 1, we will try to extend the relation by at least that
+ * many pages when we decide to extend the relation. This is more efficient
+ * for callers that know they will need multiple pages
+ * (e.g. heap_multi_insert()).
+ *
* If otherBuffer is not InvalidBuffer, then it references a previously
* pinned buffer of another page in the same relation; on return, this
* buffer will also be exclusive-locked. (This case is used by heap_update;
@@ -359,7 +364,8 @@ Buffer
RelationGetBufferForTuple(Relation relation, Size len,
Buffer otherBuffer, int options,
BulkInsertState bistate,
- Buffer *vmbuffer, Buffer *vmbuffer_other)
+ Buffer *vmbuffer, Buffer *vmbuffer_other,
+ int num_pages)
{
bool use_fsm = !(options & HEAP_INSERT_SKIP_FSM);
Buffer buffer = InvalidBuffer;
diff --git a/src/include/access/hio.h b/src/include/access/hio.h
index 3f20b585326..b665de7d41e 100644
--- a/src/include/access/hio.h
+++ b/src/include/access/hio.h
@@ -38,6 +38,7 @@ extern void RelationPutHeapTuple(Relation relation, Buffer buffer,
extern Buffer RelationGetBufferForTuple(Relation relation, Size len,
Buffer otherBuffer, int options,
BulkInsertStateData *bistate,
- Buffer *vmbuffer, Buffer *vmbuffer_other);
+ Buffer *vmbuffer, Buffer *vmbuffer_other,
+ int num_pages);
#endif /* HIO_H */