diff options
author | Robert Haas <rhaas@postgresql.org> | 2022-08-24 15:50:48 -0400 |
---|---|---|
committer | Robert Haas <rhaas@postgresql.org> | 2022-08-24 15:50:48 -0400 |
commit | 82ac34db2036ec5b3cb32c9180f40549aa790dc2 (patch) | |
tree | 19385f9f12f528216ece87e22c29a487a0d0dc69 /src/backend/storage/buffer/bufmgr.c | |
parent | 396d348b046c6b7e5dc83158c4c1df1377a1d2ef (diff) | |
download | postgresql-82ac34db2036ec5b3cb32c9180f40549aa790dc2.tar.gz postgresql-82ac34db2036ec5b3cb32c9180f40549aa790dc2.zip |
Include RelFileLocator fields individually in BufferTag.
This is preparatory work for a project to increase the number of bits
in a RelFileNumber from 32 to 56.
Along the way, introduce static inline accessor functions for a couple
of BufferTag fields.
Dilip Kumar, reviewed by me. The overall patch series has also had
review at various times from Andres Freund, Ashutosh Sharma, Hannu
Krosing, Vignesh C, Álvaro Herrera, and Tom Lane.
Discussion: http://postgr.es/m/CAFiTN-trubju5YbWAq-BSpZ90-Z6xCVBQE8BVqXqANOZAF1Znw@mail.gmail.com
Diffstat (limited to 'src/backend/storage/buffer/bufmgr.c')
-rw-r--r-- | src/backend/storage/buffer/bufmgr.c | 115 |
1 files changed, 68 insertions, 47 deletions
diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c index 49d3b8c9dd0..e898ffad7bb 100644 --- a/src/backend/storage/buffer/bufmgr.c +++ b/src/backend/storage/buffer/bufmgr.c @@ -1657,8 +1657,8 @@ ReleaseAndReadBuffer(Buffer buffer, { bufHdr = GetLocalBufferDescriptor(-buffer - 1); if (bufHdr->tag.blockNum == blockNum && - RelFileLocatorEquals(bufHdr->tag.rlocator, relation->rd_locator) && - bufHdr->tag.forkNum == forkNum) + BufTagMatchesRelFileLocator(&bufHdr->tag, &relation->rd_locator) && + BufTagGetForkNum(&bufHdr->tag) == forkNum) return buffer; ResourceOwnerForgetBuffer(CurrentResourceOwner, buffer); LocalRefCount[-buffer - 1]--; @@ -1668,8 +1668,8 @@ ReleaseAndReadBuffer(Buffer buffer, bufHdr = GetBufferDescriptor(buffer - 1); /* we have pin, so it's ok to examine tag without spinlock */ if (bufHdr->tag.blockNum == blockNum && - RelFileLocatorEquals(bufHdr->tag.rlocator, relation->rd_locator) && - bufHdr->tag.forkNum == forkNum) + BufTagMatchesRelFileLocator(&bufHdr->tag, &relation->rd_locator) && + BufTagGetForkNum(&bufHdr->tag) == forkNum) return buffer; UnpinBuffer(bufHdr, true); } @@ -2010,9 +2010,9 @@ BufferSync(int flags) item = &CkptBufferIds[num_to_scan++]; item->buf_id = buf_id; - item->tsId = bufHdr->tag.rlocator.spcOid; - item->relNumber = bufHdr->tag.rlocator.relNumber; - item->forkNum = bufHdr->tag.forkNum; + item->tsId = bufHdr->tag.spcOid; + item->relNumber = BufTagGetRelNumber(&bufHdr->tag); + item->forkNum = BufTagGetForkNum(&bufHdr->tag); item->blockNum = bufHdr->tag.blockNum; } @@ -2718,7 +2718,8 @@ PrintBufferLeakWarning(Buffer buffer) } /* theoretically we should lock the bufhdr here */ - path = relpathbackend(buf->tag.rlocator, backend, buf->tag.forkNum); + path = relpathbackend(BufTagGetRelFileLocator(&buf->tag), backend, + BufTagGetForkNum(&buf->tag)); buf_state = pg_atomic_read_u32(&buf->state); elog(WARNING, "buffer refcount leak: [%03d] " @@ -2797,8 +2798,8 @@ BufferGetTag(Buffer buffer, RelFileLocator *rlocator, ForkNumber *forknum, bufHdr = GetBufferDescriptor(buffer - 1); /* pinned, so OK to read tag without spinlock */ - *rlocator = bufHdr->tag.rlocator; - *forknum = bufHdr->tag.forkNum; + *rlocator = BufTagGetRelFileLocator(&bufHdr->tag); + *forknum = BufTagGetForkNum(&bufHdr->tag); *blknum = bufHdr->tag.blockNum; } @@ -2848,9 +2849,9 @@ FlushBuffer(BufferDesc *buf, SMgrRelation reln) /* Find smgr relation for buffer */ if (reln == NULL) - reln = smgropen(buf->tag.rlocator, InvalidBackendId); + reln = smgropen(BufTagGetRelFileLocator(&buf->tag), InvalidBackendId); - TRACE_POSTGRESQL_BUFFER_FLUSH_START(buf->tag.forkNum, + TRACE_POSTGRESQL_BUFFER_FLUSH_START(BufTagGetForkNum(&buf->tag), buf->tag.blockNum, reln->smgr_rlocator.locator.spcOid, reln->smgr_rlocator.locator.dbOid, @@ -2909,7 +2910,7 @@ FlushBuffer(BufferDesc *buf, SMgrRelation reln) * bufToWrite is either the shared buffer or a copy, as appropriate. */ smgrwrite(reln, - buf->tag.forkNum, + BufTagGetForkNum(&buf->tag), buf->tag.blockNum, bufToWrite, false); @@ -2930,7 +2931,7 @@ FlushBuffer(BufferDesc *buf, SMgrRelation reln) */ TerminateBufferIO(buf, true, 0); - TRACE_POSTGRESQL_BUFFER_FLUSH_DONE(buf->tag.forkNum, + TRACE_POSTGRESQL_BUFFER_FLUSH_DONE(BufTagGetForkNum(&buf->tag), buf->tag.blockNum, reln->smgr_rlocator.locator.spcOid, reln->smgr_rlocator.locator.dbOid, @@ -3151,15 +3152,15 @@ DropRelationBuffers(SMgrRelation smgr_reln, ForkNumber *forkNum, * We could check forkNum and blockNum as well as the rlocator, but * the incremental win from doing so seems small. */ - if (!RelFileLocatorEquals(bufHdr->tag.rlocator, rlocator.locator)) + if (!BufTagMatchesRelFileLocator(&bufHdr->tag, &rlocator.locator)) continue; buf_state = LockBufHdr(bufHdr); for (j = 0; j < nforks; j++) { - if (RelFileLocatorEquals(bufHdr->tag.rlocator, rlocator.locator) && - bufHdr->tag.forkNum == forkNum[j] && + if (BufTagMatchesRelFileLocator(&bufHdr->tag, &rlocator.locator) && + BufTagGetForkNum(&bufHdr->tag) == forkNum[j] && bufHdr->tag.blockNum >= firstDelBlock[j]) { InvalidateBuffer(bufHdr); /* releases spinlock */ @@ -3309,7 +3310,7 @@ DropRelationsAllBuffers(SMgrRelation *smgr_reln, int nlocators) for (j = 0; j < n; j++) { - if (RelFileLocatorEquals(bufHdr->tag.rlocator, locators[j])) + if (BufTagMatchesRelFileLocator(&bufHdr->tag, &locators[j])) { rlocator = &locators[j]; break; @@ -3318,7 +3319,10 @@ DropRelationsAllBuffers(SMgrRelation *smgr_reln, int nlocators) } else { - rlocator = bsearch((const void *) &(bufHdr->tag.rlocator), + RelFileLocator locator; + + locator = BufTagGetRelFileLocator(&bufHdr->tag); + rlocator = bsearch((const void *) &(locator), locators, n, sizeof(RelFileLocator), rlocator_comparator); } @@ -3328,7 +3332,7 @@ DropRelationsAllBuffers(SMgrRelation *smgr_reln, int nlocators) continue; buf_state = LockBufHdr(bufHdr); - if (RelFileLocatorEquals(bufHdr->tag.rlocator, (*rlocator))) + if (BufTagMatchesRelFileLocator(&bufHdr->tag, rlocator)) InvalidateBuffer(bufHdr); /* releases spinlock */ else UnlockBufHdr(bufHdr, buf_state); @@ -3388,8 +3392,8 @@ FindAndDropRelationBuffers(RelFileLocator rlocator, ForkNumber forkNum, */ buf_state = LockBufHdr(bufHdr); - if (RelFileLocatorEquals(bufHdr->tag.rlocator, rlocator) && - bufHdr->tag.forkNum == forkNum && + if (BufTagMatchesRelFileLocator(&bufHdr->tag, &rlocator) && + BufTagGetForkNum(&bufHdr->tag) == forkNum && bufHdr->tag.blockNum >= firstDelBlock) InvalidateBuffer(bufHdr); /* releases spinlock */ else @@ -3427,11 +3431,11 @@ DropDatabaseBuffers(Oid dbid) * As in DropRelationBuffers, an unlocked precheck should be * safe and saves some cycles. */ - if (bufHdr->tag.rlocator.dbOid != dbid) + if (bufHdr->tag.dbOid != dbid) continue; buf_state = LockBufHdr(bufHdr); - if (bufHdr->tag.rlocator.dbOid == dbid) + if (bufHdr->tag.dbOid == dbid) InvalidateBuffer(bufHdr); /* releases spinlock */ else UnlockBufHdr(bufHdr, buf_state); @@ -3461,7 +3465,8 @@ PrintBufferDescs(void) "[%02d] (freeNext=%d, rel=%s, " "blockNum=%u, flags=0x%x, refcount=%u %d)", i, buf->freeNext, - relpathbackend(buf->tag.rlocator, InvalidBackendId, buf->tag.forkNum), + relpathbackend(BufTagGetRelFileLocator(&buf->tag), + InvalidBackendId, BufTagGetForkNum(&buf->tag)), buf->tag.blockNum, buf->flags, buf->refcount, GetPrivateRefCount(b)); } @@ -3486,7 +3491,8 @@ PrintPinnedBufs(void) "[%02d] (freeNext=%d, rel=%s, " "blockNum=%u, flags=0x%x, refcount=%u %d)", i, buf->freeNext, - relpathperm(buf->tag.rlocator, buf->tag.forkNum), + relpathperm(BufTagGetRelFileLocator(&buf->tag), + BufTagGetForkNum(&buf->tag)), buf->tag.blockNum, buf->flags, buf->refcount, GetPrivateRefCount(b)); } @@ -3525,7 +3531,7 @@ FlushRelationBuffers(Relation rel) uint32 buf_state; bufHdr = GetLocalBufferDescriptor(i); - if (RelFileLocatorEquals(bufHdr->tag.rlocator, rel->rd_locator) && + if (BufTagMatchesRelFileLocator(&bufHdr->tag, &rel->rd_locator) && ((buf_state = pg_atomic_read_u32(&bufHdr->state)) & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY)) { @@ -3543,7 +3549,7 @@ FlushRelationBuffers(Relation rel) PageSetChecksumInplace(localpage, bufHdr->tag.blockNum); smgrwrite(RelationGetSmgr(rel), - bufHdr->tag.forkNum, + BufTagGetForkNum(&bufHdr->tag), bufHdr->tag.blockNum, localpage, false); @@ -3572,13 +3578,13 @@ FlushRelationBuffers(Relation rel) * As in DropRelationBuffers, an unlocked precheck should be * safe and saves some cycles. */ - if (!RelFileLocatorEquals(bufHdr->tag.rlocator, rel->rd_locator)) + if (!BufTagMatchesRelFileLocator(&bufHdr->tag, &rel->rd_locator)) continue; ReservePrivateRefCountEntry(); buf_state = LockBufHdr(bufHdr); - if (RelFileLocatorEquals(bufHdr->tag.rlocator, rel->rd_locator) && + if (BufTagMatchesRelFileLocator(&bufHdr->tag, &rel->rd_locator) && (buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY)) { PinBuffer_Locked(bufHdr); @@ -3652,7 +3658,7 @@ FlushRelationsAllBuffers(SMgrRelation *smgrs, int nrels) for (j = 0; j < nrels; j++) { - if (RelFileLocatorEquals(bufHdr->tag.rlocator, srels[j].rlocator)) + if (BufTagMatchesRelFileLocator(&bufHdr->tag, &srels[j].rlocator)) { srelent = &srels[j]; break; @@ -3661,7 +3667,10 @@ FlushRelationsAllBuffers(SMgrRelation *smgrs, int nrels) } else { - srelent = bsearch((const void *) &(bufHdr->tag.rlocator), + RelFileLocator rlocator; + + rlocator = BufTagGetRelFileLocator(&bufHdr->tag); + srelent = bsearch((const void *) &(rlocator), srels, nrels, sizeof(SMgrSortArray), rlocator_comparator); } @@ -3673,7 +3682,7 @@ FlushRelationsAllBuffers(SMgrRelation *smgrs, int nrels) ReservePrivateRefCountEntry(); buf_state = LockBufHdr(bufHdr); - if (RelFileLocatorEquals(bufHdr->tag.rlocator, srelent->rlocator) && + if (BufTagMatchesRelFileLocator(&bufHdr->tag, &srelent->rlocator) && (buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY)) { PinBuffer_Locked(bufHdr); @@ -3877,13 +3886,13 @@ FlushDatabaseBuffers(Oid dbid) * As in DropRelationBuffers, an unlocked precheck should be * safe and saves some cycles. */ - if (bufHdr->tag.rlocator.dbOid != dbid) + if (bufHdr->tag.dbOid != dbid) continue; ReservePrivateRefCountEntry(); buf_state = LockBufHdr(bufHdr); - if (bufHdr->tag.rlocator.dbOid == dbid && + if (bufHdr->tag.dbOid == dbid && (buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY)) { PinBuffer_Locked(bufHdr); @@ -4052,7 +4061,7 @@ MarkBufferDirtyHint(Buffer buffer, bool buffer_std) * See src/backend/storage/page/README for longer discussion. */ if (RecoveryInProgress() || - RelFileLocatorSkippingWAL(bufHdr->tag.rlocator)) + RelFileLocatorSkippingWAL(BufTagGetRelFileLocator(&bufHdr->tag))) return; /* @@ -4661,7 +4670,8 @@ AbortBufferIO(void) /* Buffer is pinned, so we can read tag without spinlock */ char *path; - path = relpathperm(buf->tag.rlocator, buf->tag.forkNum); + path = relpathperm(BufTagGetRelFileLocator(&buf->tag), + BufTagGetForkNum(&buf->tag)); ereport(WARNING, (errcode(ERRCODE_IO_ERROR), errmsg("could not write block %u of %s", @@ -4685,7 +4695,8 @@ shared_buffer_write_error_callback(void *arg) /* Buffer is pinned, so we can read the tag without locking the spinlock */ if (bufHdr != NULL) { - char *path = relpathperm(bufHdr->tag.rlocator, bufHdr->tag.forkNum); + char *path = relpathperm(BufTagGetRelFileLocator(&bufHdr->tag), + BufTagGetForkNum(&bufHdr->tag)); errcontext("writing block %u of relation %s", bufHdr->tag.blockNum, path); @@ -4703,8 +4714,9 @@ local_buffer_write_error_callback(void *arg) if (bufHdr != NULL) { - char *path = relpathbackend(bufHdr->tag.rlocator, MyBackendId, - bufHdr->tag.forkNum); + char *path = relpathbackend(BufTagGetRelFileLocator(&bufHdr->tag), + MyBackendId, + BufTagGetForkNum(&bufHdr->tag)); errcontext("writing block %u of relation %s", bufHdr->tag.blockNum, path); @@ -4798,15 +4810,20 @@ static inline int buffertag_comparator(const BufferTag *ba, const BufferTag *bb) { int ret; + RelFileLocator rlocatora; + RelFileLocator rlocatorb; - ret = rlocator_comparator(&ba->rlocator, &bb->rlocator); + rlocatora = BufTagGetRelFileLocator(ba); + rlocatorb = BufTagGetRelFileLocator(bb); + + ret = rlocator_comparator(&rlocatora, &rlocatorb); if (ret != 0) return ret; - if (ba->forkNum < bb->forkNum) + if (BufTagGetForkNum(ba) < BufTagGetForkNum(bb)) return -1; - if (ba->forkNum > bb->forkNum) + if (BufTagGetForkNum(ba) > BufTagGetForkNum(bb)) return 1; if (ba->blockNum < bb->blockNum) @@ -4956,10 +4973,12 @@ IssuePendingWritebacks(WritebackContext *context) SMgrRelation reln; int ahead; BufferTag tag; + RelFileLocator currlocator; Size nblocks = 1; cur = &context->pending_writebacks[i]; tag = cur->tag; + currlocator = BufTagGetRelFileLocator(&tag); /* * Peek ahead, into following writeback requests, to see if they can @@ -4967,11 +4986,13 @@ IssuePendingWritebacks(WritebackContext *context) */ for (ahead = 0; i + ahead + 1 < context->nr_pending; ahead++) { + next = &context->pending_writebacks[i + ahead + 1]; /* different file, stop */ - if (!RelFileLocatorEquals(cur->tag.rlocator, next->tag.rlocator) || - cur->tag.forkNum != next->tag.forkNum) + if (!RelFileLocatorEquals(currlocator, + BufTagGetRelFileLocator(&next->tag)) || + BufTagGetForkNum(&cur->tag) != BufTagGetForkNum(&next->tag)) break; /* ok, block queued twice, skip */ @@ -4989,8 +5010,8 @@ IssuePendingWritebacks(WritebackContext *context) i += ahead; /* and finally tell the kernel to write the data to storage */ - reln = smgropen(tag.rlocator, InvalidBackendId); - smgrwriteback(reln, tag.forkNum, tag.blockNum, nblocks); + reln = smgropen(currlocator, InvalidBackendId); + smgrwriteback(reln, BufTagGetForkNum(&tag), tag.blockNum, nblocks); } context->nr_pending = 0; |