diff options
author | Thomas Munro <tmunro@postgresql.org> | 2020-07-31 14:15:18 +1200 |
---|---|---|
committer | Thomas Munro <tmunro@postgresql.org> | 2020-07-31 14:29:52 +1200 |
commit | c5315f4f44843c20ada876fdb0d0828795dfbdf5 (patch) | |
tree | b94c24609a31ae7243b217ae7d5358c8cff8fe20 /src/backend/storage/smgr | |
parent | e3931d01f3afef14703827eda1dad0a3fb3b5d07 (diff) | |
download | postgresql-c5315f4f44843c20ada876fdb0d0828795dfbdf5.tar.gz postgresql-c5315f4f44843c20ada876fdb0d0828795dfbdf5.zip |
Cache smgrnblocks() results in recovery.
Avoid repeatedly calling lseek(SEEK_END) during recovery by caching
the size of each fork. For now, we can't use the same technique in
other processes, because we lack a shared invalidation mechanism.
Do this by generalizing the pre-existing caching used by FSM and VM
to support all forks.
Discussion: https://postgr.es/m/CAEepm%3D3SSw-Ty1DFcK%3D1rU-K6GSzYzfdD4d%2BZwapdN7dTa6%3DnQ%40mail.gmail.com
Diffstat (limited to 'src/backend/storage/smgr')
-rw-r--r-- | src/backend/storage/smgr/smgr.c | 49 |
1 files changed, 36 insertions, 13 deletions
diff --git a/src/backend/storage/smgr/smgr.c b/src/backend/storage/smgr/smgr.c index 7d667c6586f..dcc09df0c77 100644 --- a/src/backend/storage/smgr/smgr.c +++ b/src/backend/storage/smgr/smgr.c @@ -17,6 +17,7 @@ */ #include "postgres.h" +#include "access/xlog.h" #include "lib/ilist.h" #include "storage/bufmgr.h" #include "storage/ipc.h" @@ -174,8 +175,8 @@ smgropen(RelFileNode rnode, BackendId backend) /* hash_search already filled in the lookup key */ reln->smgr_owner = NULL; reln->smgr_targblock = InvalidBlockNumber; - reln->smgr_fsm_nblocks = InvalidBlockNumber; - reln->smgr_vm_nblocks = InvalidBlockNumber; + for (int i = 0; i <= MAX_FORKNUM; ++i) + reln->smgr_cached_nblocks[i] = InvalidBlockNumber; reln->smgr_which = 0; /* we only have md.c at present */ /* implementation-specific initialization */ @@ -464,6 +465,16 @@ smgrextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, { smgrsw[reln->smgr_which].smgr_extend(reln, forknum, blocknum, buffer, skipFsync); + + /* + * Normally we expect this to increase nblocks by one, but if the cached + * value isn't as expected, just invalidate it so the next call asks the + * kernel. + */ + if (reln->smgr_cached_nblocks[forknum] == blocknum) + reln->smgr_cached_nblocks[forknum] = blocknum + 1; + else + reln->smgr_cached_nblocks[forknum] = InvalidBlockNumber; } /* @@ -537,7 +548,20 @@ smgrwriteback(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, BlockNumber smgrnblocks(SMgrRelation reln, ForkNumber forknum) { - return smgrsw[reln->smgr_which].smgr_nblocks(reln, forknum); + BlockNumber result; + + /* + * For now, we only use cached values in recovery due to lack of a shared + * invalidation mechanism for changes in file size. + */ + if (InRecovery && reln->smgr_cached_nblocks[forknum] != InvalidBlockNumber) + return reln->smgr_cached_nblocks[forknum]; + + result = smgrsw[reln->smgr_which].smgr_nblocks(reln, forknum); + + reln->smgr_cached_nblocks[forknum] = result; + + return result; } /* @@ -576,20 +600,19 @@ smgrtruncate(SMgrRelation reln, ForkNumber *forknum, int nforks, BlockNumber *nb /* Do the truncation */ for (i = 0; i < nforks; i++) { + /* Make the cached size is invalid if we encounter an error. */ + reln->smgr_cached_nblocks[forknum[i]] = InvalidBlockNumber; + smgrsw[reln->smgr_which].smgr_truncate(reln, forknum[i], nblocks[i]); /* - * We might as well update the local smgr_fsm_nblocks and - * smgr_vm_nblocks settings. The smgr cache inval message that this - * function sent will cause other backends to invalidate their copies - * of smgr_fsm_nblocks and smgr_vm_nblocks, and these ones too at the - * next command boundary. But these ensure they aren't outright wrong - * until then. + * We might as well update the local smgr_cached_nblocks values. The + * smgr cache inval message that this function sent will cause other + * backends to invalidate their copies of smgr_fsm_nblocks and + * smgr_vm_nblocks, and these ones too at the next command boundary. + * But these ensure they aren't outright wrong until then. */ - if (forknum[i] == FSM_FORKNUM) - reln->smgr_fsm_nblocks = nblocks[i]; - if (forknum[i] == VISIBILITYMAP_FORKNUM) - reln->smgr_vm_nblocks = nblocks[i]; + reln->smgr_cached_nblocks[forknum[i]] = nblocks[i]; } } |