aboutsummaryrefslogtreecommitdiff
path: root/src/backend/access/spgist/spginsert.c
diff options
context:
space:
mode:
authorHeikki Linnakangas <heikki.linnakangas@iki.fi>2024-02-23 16:10:51 +0200
committerHeikki Linnakangas <heikki.linnakangas@iki.fi>2024-02-23 16:10:51 +0200
commit8af256524893987a3e534c6578dd60edfb782a77 (patch)
tree76761046e9f1fd4e18abc502e208c76132b7b1c7 /src/backend/access/spgist/spginsert.c
parente612384fc78d35c3d3a8b3d27cef5181dca8430b (diff)
downloadpostgresql-8af256524893987a3e534c6578dd60edfb782a77.tar.gz
postgresql-8af256524893987a3e534c6578dd60edfb782a77.zip
Introduce a new smgr bulk loading facility.
The new facility makes it easier to optimize bulk loading, as the logic for buffering, WAL-logging, and syncing the relation only needs to be implemented once. It's also less error-prone: We have had a number of bugs in how a relation is fsync'd - or not - at the end of a bulk loading operation. By centralizing that logic to one place, we only need to write it correctly once. The new facility is faster for small relations: Instead of of calling smgrimmedsync(), we register the fsync to happen at next checkpoint, which avoids the fsync latency. That can make a big difference if you are e.g. restoring a schema-only dump with lots of relations. It is also slightly more efficient with large relations, as the WAL logging is performed multiple pages at a time. That avoids some WAL header overhead. The sorted GiST index build did that already, this moves the buffering to the new facility. The changes to pageinspect GiST test needs an explanation: Before this patch, the sorted GiST index build set the LSN on every page to the special GistBuildLSN value, not the LSN of the WAL record, even though they were WAL-logged. There was no particular need for it, it just happened naturally when we wrote out the pages before WAL-logging them. Now we WAL-log the pages first, like in B-tree build, so the pages are stamped with the record's real LSN. When the build is not WAL-logged, we still use GistBuildLSN. To make the test output predictable, use an unlogged index. Reviewed-by: Andres Freund Discussion: https://www.postgresql.org/message-id/30e8f366-58b3-b239-c521-422122dd5150%40iki.fi
Diffstat (limited to 'src/backend/access/spgist/spginsert.c')
-rw-r--r--src/backend/access/spgist/spginsert.c49
1 files changed, 17 insertions, 32 deletions
diff --git a/src/backend/access/spgist/spginsert.c b/src/backend/access/spgist/spginsert.c
index 98b1da20d58..1b70c5a59fd 100644
--- a/src/backend/access/spgist/spginsert.c
+++ b/src/backend/access/spgist/spginsert.c
@@ -25,7 +25,7 @@
#include "catalog/index.h"
#include "miscadmin.h"
#include "storage/bufmgr.h"
-#include "storage/smgr.h"
+#include "storage/bulk_write.h"
#include "utils/memutils.h"
#include "utils/rel.h"
@@ -155,42 +155,27 @@ spgbuild(Relation heap, Relation index, IndexInfo *indexInfo)
void
spgbuildempty(Relation index)
{
- Buffer metabuffer,
- rootbuffer,
- nullbuffer;
-
- /*
- * Initialize the meta page and root pages
- */
- metabuffer = ReadBufferExtended(index, INIT_FORKNUM, P_NEW, RBM_NORMAL, NULL);
- LockBuffer(metabuffer, BUFFER_LOCK_EXCLUSIVE);
- rootbuffer = ReadBufferExtended(index, INIT_FORKNUM, P_NEW, RBM_NORMAL, NULL);
- LockBuffer(rootbuffer, BUFFER_LOCK_EXCLUSIVE);
- nullbuffer = ReadBufferExtended(index, INIT_FORKNUM, P_NEW, RBM_NORMAL, NULL);
- LockBuffer(nullbuffer, BUFFER_LOCK_EXCLUSIVE);
-
- Assert(BufferGetBlockNumber(metabuffer) == SPGIST_METAPAGE_BLKNO);
- Assert(BufferGetBlockNumber(rootbuffer) == SPGIST_ROOT_BLKNO);
- Assert(BufferGetBlockNumber(nullbuffer) == SPGIST_NULL_BLKNO);
+ BulkWriteState *bulkstate;
+ BulkWriteBuffer buf;
- START_CRIT_SECTION();
+ bulkstate = smgr_bulk_start_rel(index, INIT_FORKNUM);
- SpGistInitMetapage(BufferGetPage(metabuffer));
- MarkBufferDirty(metabuffer);
- SpGistInitBuffer(rootbuffer, SPGIST_LEAF);
- MarkBufferDirty(rootbuffer);
- SpGistInitBuffer(nullbuffer, SPGIST_LEAF | SPGIST_NULLS);
- MarkBufferDirty(nullbuffer);
+ /* Construct metapage. */
+ buf = smgr_bulk_get_buf(bulkstate);
+ SpGistInitMetapage((Page) buf);
+ smgr_bulk_write(bulkstate, SPGIST_METAPAGE_BLKNO, buf, true);
- log_newpage_buffer(metabuffer, true);
- log_newpage_buffer(rootbuffer, true);
- log_newpage_buffer(nullbuffer, true);
+ /* Likewise for the root page. */
+ buf = smgr_bulk_get_buf(bulkstate);
+ SpGistInitPage((Page) buf, SPGIST_LEAF);
+ smgr_bulk_write(bulkstate, SPGIST_ROOT_BLKNO, buf, true);
- END_CRIT_SECTION();
+ /* Likewise for the null-tuples root page. */
+ buf = smgr_bulk_get_buf(bulkstate);
+ SpGistInitPage((Page) buf, SPGIST_LEAF | SPGIST_NULLS);
+ smgr_bulk_write(bulkstate, SPGIST_NULL_BLKNO, buf, true);
- UnlockReleaseBuffer(metabuffer);
- UnlockReleaseBuffer(rootbuffer);
- UnlockReleaseBuffer(nullbuffer);
+ smgr_bulk_finish(bulkstate);
}
/*