diff options
Diffstat (limited to 'src/backend/storage')
-rw-r--r-- | src/backend/storage/buffer/bufmgr.c | 22 | ||||
-rw-r--r-- | src/backend/storage/file/fd.c | 14 | ||||
-rw-r--r-- | src/backend/storage/freespace/freespace.c | 215 | ||||
-rw-r--r-- | src/backend/storage/ipc/ipc.c | 6 | ||||
-rw-r--r-- | src/backend/storage/ipc/ipci.c | 3 | ||||
-rw-r--r-- | src/backend/storage/ipc/sinval.c | 9 | ||||
-rw-r--r-- | src/backend/storage/lmgr/deadlock.c | 35 | ||||
-rw-r--r-- | src/backend/storage/lmgr/lock.c | 65 | ||||
-rw-r--r-- | src/backend/storage/lmgr/proc.c | 25 | ||||
-rw-r--r-- | src/backend/storage/page/bufpage.c | 15 | ||||
-rw-r--r-- | src/backend/storage/smgr/md.c | 15 |
11 files changed, 218 insertions, 206 deletions
diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c index 857e9d4f4f6..c1a4666907b 100644 --- a/src/backend/storage/buffer/bufmgr.c +++ b/src/backend/storage/buffer/bufmgr.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $Header: /cvsroot/pgsql/src/backend/storage/buffer/bufmgr.c,v 1.137 2003/07/24 22:04:08 tgl Exp $ + * $Header: /cvsroot/pgsql/src/backend/storage/buffer/bufmgr.c,v 1.138 2003/08/04 00:43:22 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -61,7 +61,7 @@ /* GUC variable */ -bool zero_damaged_pages = false; +bool zero_damaged_pages = false; static void WaitIO(BufferDesc *buf); @@ -232,14 +232,14 @@ ReadBufferInternal(Relation reln, BlockNumber blockNum, ereport(WARNING, (errcode(ERRCODE_DATA_CORRUPTED), errmsg("invalid page header in block %u of \"%s\"; zeroing out page", - blockNum, RelationGetRelationName(reln)))); + blockNum, RelationGetRelationName(reln)))); MemSet((char *) MAKE_PTR(bufHdr->data), 0, BLCKSZ); } else ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("invalid page header in block %u of \"%s\"", - blockNum, RelationGetRelationName(reln)))); + errmsg("invalid page header in block %u of \"%s\"", + blockNum, RelationGetRelationName(reln)))); } } @@ -959,8 +959,8 @@ AtEOXact_Buffers(bool isCommit) if (isCommit) elog(WARNING, - "buffer refcount leak: [%03d] (freeNext=%d, freePrev=%d, " - "rel=%u/%u, blockNum=%u, flags=0x%x, refcount=%d %ld)", + "buffer refcount leak: [%03d] (freeNext=%d, freePrev=%d, " + "rel=%u/%u, blockNum=%u, flags=0x%x, refcount=%d %ld)", i, buf->freeNext, buf->freePrev, buf->tag.rnode.tblNode, buf->tag.rnode.relNode, buf->tag.blockNum, buf->flags, @@ -1509,10 +1509,10 @@ FlushRelationBuffers(Relation rel, BlockNumber firstDelBlock) if (status == SM_FAIL) /* disk failure ?! */ ereport(PANIC, (errcode(ERRCODE_IO_ERROR), - errmsg("could not write block %u of %u/%u", - bufHdr->tag.blockNum, - bufHdr->tag.rnode.tblNode, - bufHdr->tag.rnode.relNode))); + errmsg("could not write block %u of %u/%u", + bufHdr->tag.blockNum, + bufHdr->tag.rnode.tblNode, + bufHdr->tag.rnode.relNode))); BufferFlushCount++; diff --git a/src/backend/storage/file/fd.c b/src/backend/storage/file/fd.c index 06606990c51..271a752a623 100644 --- a/src/backend/storage/file/fd.c +++ b/src/backend/storage/file/fd.c @@ -7,7 +7,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $Header: /cvsroot/pgsql/src/backend/storage/file/fd.c,v 1.99 2003/07/24 22:04:09 tgl Exp $ + * $Header: /cvsroot/pgsql/src/backend/storage/file/fd.c,v 1.100 2003/08/04 00:43:23 momjian Exp $ * * NOTES: * @@ -113,8 +113,8 @@ int max_files_per_process = 1000; #define FileUnknownPos (-1L) /* these are the assigned bits in fdstate below: */ -#define FD_TEMPORARY (1 << 0) /* T = delete when closed */ -#define FD_XACT_TEMPORARY (1 << 1) /* T = delete at eoXact */ +#define FD_TEMPORARY (1 << 0) /* T = delete when closed */ +#define FD_XACT_TEMPORARY (1 << 1) /* T = delete at eoXact */ typedef struct vfd { @@ -247,7 +247,7 @@ pg_fdatasync(int fd) * This is exported for use by places that really want a plain kernel FD, * but need to be proof against running out of FDs. Once an FD has been * successfully returned, it is the caller's responsibility to ensure that - * it will not be leaked on ereport()! Most users should *not* call this + * it will not be leaked on ereport()! Most users should *not* call this * routine directly, but instead use the VFD abstraction level, which * provides protection against descriptor leaks as well as management of * files that need to be open for more than a short period of time. @@ -274,7 +274,7 @@ tryAgain: ereport(LOG, (errcode(ERRCODE_INSUFFICIENT_RESOURCES), - errmsg("out of file descriptors: %m; release and retry"))); + errmsg("out of file descriptors: %m; release and retry"))); errno = 0; if (ReleaseLruFile()) goto tryAgain; @@ -1064,7 +1064,7 @@ TryAgain: ereport(LOG, (errcode(ERRCODE_INSUFFICIENT_RESOURCES), - errmsg("out of file descriptors: %m; release and retry"))); + errmsg("out of file descriptors: %m; release and retry"))); errno = 0; if (ReleaseLruFile()) goto TryAgain; @@ -1158,7 +1158,7 @@ AtProcExit_Files(void) static void CleanupTempFiles(bool isProcExit) { - Index i; + Index i; if (SizeVfdCache > 0) { diff --git a/src/backend/storage/freespace/freespace.c b/src/backend/storage/freespace/freespace.c index c7f72aafd62..5d381aa7f3e 100644 --- a/src/backend/storage/freespace/freespace.c +++ b/src/backend/storage/freespace/freespace.c @@ -8,7 +8,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $Header: /cvsroot/pgsql/src/backend/storage/freespace/freespace.c,v 1.18 2003/07/24 22:04:09 tgl Exp $ + * $Header: /cvsroot/pgsql/src/backend/storage/freespace/freespace.c,v 1.19 2003/08/04 00:43:24 momjian Exp $ * * * NOTES: @@ -39,7 +39,7 @@ * Actually, our space allocation is done in "chunks" of CHUNKPAGES pages, * with each relation guaranteed at least one chunk. This reduces thrashing * of the storage allocations when there are small changes in the RRFS page - * counts from one VACUUM to the next. (XXX it might also be worthwhile to + * counts from one VACUUM to the next. (XXX it might also be worthwhile to * impose some kind of moving-average smoothing on the RRFS page counts?) * * So the actual arithmetic is: for each relation compute myRequest as the @@ -72,10 +72,10 @@ /* Initial value for average-request moving average */ -#define INITIAL_AVERAGE ((Size) (BLCKSZ / 32)) +#define INITIAL_AVERAGE ((Size) (BLCKSZ / 32)) /* - * Number of pages and bytes per allocation chunk. Indexes can squeeze 50% + * Number of pages and bytes per allocation chunk. Indexes can squeeze 50% * more pages into the same space because they don't need to remember how much * free space on each page. The nominal number of pages, CHUNKPAGES, is for * regular rels, and INDEXCHUNKPAGES is for indexes. CHUNKPAGES should be @@ -83,7 +83,7 @@ */ #define CHUNKPAGES 16 #define CHUNKBYTES (CHUNKPAGES * sizeof(FSMPageData)) -#define INDEXCHUNKPAGES ((int) (CHUNKBYTES / sizeof(IndexFSMPageData))) +#define INDEXCHUNKPAGES ((int) (CHUNKBYTES / sizeof(IndexFSMPageData))) /* @@ -104,9 +104,9 @@ typedef BlockIdData IndexFSMPageData; BlockIdSet(&(ptr)->ip_blkid, pg) #define FSMPageSetSpace(ptr, sz) \ ((ptr)->ip_posid = (OffsetNumber) (sz)) -#define IndexFSMPageGetPageNum(ptr) \ +#define IndexFSMPageGetPageNum(ptr) \ BlockIdGetBlockNumber(ptr) -#define IndexFSMPageSetPageNum(ptr, pg) \ +#define IndexFSMPageSetPageNum(ptr, pg) \ BlockIdSet(ptr, pg) /*---------- @@ -144,7 +144,7 @@ typedef struct FsmCacheFileHeader uint32 endian; uint32 version; int32 numRels; -} FsmCacheFileHeader; +} FsmCacheFileHeader; /* Per-relation header */ typedef struct FsmCacheRelHeader @@ -154,7 +154,7 @@ typedef struct FsmCacheRelHeader uint32 avgRequest; /* moving average of space requests */ int32 lastPageCount; /* pages passed to RecordRelationFreeSpace */ int32 storedPages; /* # of pages stored in arena */ -} FsmCacheRelHeader; +} FsmCacheRelHeader; /* @@ -167,7 +167,7 @@ typedef struct FsmCacheRelHeader * * Each relation owns one or more chunks of per-page storage in the "arena". * The chunks for each relation are always consecutive, so that it can treat - * its page storage as a simple array. We further insist that its page data + * its page storage as a simple array. We further insist that its page data * be ordered by block number, so that binary search is possible. * * Note: we handle pointers to these items as pointers, not as SHMEM_OFFSETs. @@ -182,7 +182,7 @@ struct FSMHeader { HTAB *relHash; /* hashtable of FSMRelation entries */ FSMRelation *usageList; /* FSMRelations in usage-recency order */ - FSMRelation *usageListTail; /* tail of usage-recency list */ + FSMRelation *usageListTail; /* tail of usage-recency list */ FSMRelation *firstRel; /* FSMRelations in arena storage order */ FSMRelation *lastRel; /* tail of storage-order list */ int numRels; /* number of FSMRelations now in use */ @@ -204,7 +204,7 @@ struct FSMRelation FSMRelation *nextUsage; /* next rel in usage-recency order */ FSMRelation *priorUsage; /* prior rel in usage-recency order */ FSMRelation *nextPhysical; /* next rel in arena-storage order */ - FSMRelation *priorPhysical; /* prior rel in arena-storage order */ + FSMRelation *priorPhysical; /* prior rel in arena-storage order */ bool isIndex; /* if true, we store only page numbers */ Size avgRequest; /* moving average of space requests */ int lastPageCount; /* pages passed to RecordRelationFreeSpace */ @@ -233,13 +233,13 @@ static BlockNumber find_index_free_space(FSMRelation *fsmrel); static void fsm_record_free_space(FSMRelation *fsmrel, BlockNumber page, Size spaceAvail); static bool lookup_fsm_page_entry(FSMRelation *fsmrel, BlockNumber page, - int *outPageIndex); + int *outPageIndex); static void compact_fsm_storage(void); static void push_fsm_rels_after(FSMRelation *afterRel); -static void pack_incoming_pages(FSMPageData *newLocation, int newPages, - PageFreeSpaceInfo *pageSpaces, int nPages); -static void pack_existing_pages(FSMPageData *newLocation, int newPages, - FSMPageData *oldLocation, int oldPages); +static void pack_incoming_pages(FSMPageData * newLocation, int newPages, + PageFreeSpaceInfo * pageSpaces, int nPages); +static void pack_existing_pages(FSMPageData * newLocation, int newPages, + FSMPageData * oldLocation, int oldPages); static int fsm_calc_request(FSMRelation *fsmrel); static int fsm_calc_target_allocation(int myRequest); static int fsm_current_chunks(FSMRelation *fsmrel); @@ -271,7 +271,7 @@ InitFreeSpaceMap(void) if (FreeSpaceMap == NULL) ereport(FATAL, (errcode(ERRCODE_OUT_OF_MEMORY), - errmsg("insufficient shared memory for free space map"))); + errmsg("insufficient shared memory for free space map"))); MemSet(FreeSpaceMap, 0, sizeof(FSMHeader)); /* Create hashtable for FSMRelations */ @@ -288,7 +288,7 @@ InitFreeSpaceMap(void) if (!FreeSpaceMap->relHash) ereport(FATAL, (errcode(ERRCODE_OUT_OF_MEMORY), - errmsg("insufficient shared memory for free space map"))); + errmsg("insufficient shared memory for free space map"))); /* Allocate page-storage arena */ nchunks = (MaxFSMPages - 1) / CHUNKPAGES + 1; @@ -296,14 +296,14 @@ InitFreeSpaceMap(void) if (nchunks <= MaxFSMRelations) ereport(FATAL, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("max_fsm_pages must exceed max_fsm_relations * %d", - CHUNKPAGES))); + errmsg("max_fsm_pages must exceed max_fsm_relations * %d", + CHUNKPAGES))); FreeSpaceMap->arena = (char *) ShmemAlloc(nchunks * CHUNKBYTES); if (FreeSpaceMap->arena == NULL) ereport(FATAL, (errcode(ERRCODE_OUT_OF_MEMORY), - errmsg("insufficient shared memory for free space map"))); + errmsg("insufficient shared memory for free space map"))); FreeSpaceMap->totalChunks = nchunks; FreeSpaceMap->usedChunks = 0; @@ -348,7 +348,7 @@ FreeSpaceShmemSize(void) * will turn out to have too little space available by the time the caller * gets a lock on it. In that case, the caller should report the actual * amount of free space available on that page and then try again (see - * RecordAndGetPageWithFreeSpace). If InvalidBlockNumber is returned, + * RecordAndGetPageWithFreeSpace). If InvalidBlockNumber is returned, * extend the relation. */ BlockNumber @@ -365,10 +365,10 @@ GetPageWithFreeSpace(RelFileNode *rel, Size spaceNeeded) fsmrel = create_fsm_rel(rel); /* - * Update the moving average of space requests. This code implements an - * exponential moving average with an equivalent period of about 63 - * requests. Ignore silly requests, however, to ensure that the average - * stays sane. + * Update the moving average of space requests. This code implements + * an exponential moving average with an equivalent period of about 63 + * requests. Ignore silly requests, however, to ensure that the + * average stays sane. */ if (spaceNeeded > 0 && spaceNeeded < BLCKSZ) { @@ -409,6 +409,7 @@ RecordAndGetPageWithFreeSpace(RelFileNode *rel, /* Do the Record */ fsm_record_free_space(fsmrel, oldPage, oldSpaceAvail); + /* * Update the moving average of space requests, same as in * GetPageWithFreeSpace. @@ -458,7 +459,7 @@ GetAvgFSMRequestSize(RelFileNode *rel) void RecordRelationFreeSpace(RelFileNode *rel, int nPages, - PageFreeSpaceInfo *pageSpaces) + PageFreeSpaceInfo * pageSpaces) { FSMRelation *fsmrel; @@ -469,11 +470,12 @@ RecordRelationFreeSpace(RelFileNode *rel, nPages = MaxFSMPages; LWLockAcquire(FreeSpaceLock, LW_EXCLUSIVE); + /* * Note we don't record info about a relation unless there's already * an FSM entry for it, implying someone has done GetPageWithFreeSpace - * for it. Inactive rels thus will not clutter the map simply by being - * vacuumed. + * for it. Inactive rels thus will not clutter the map simply by + * being vacuumed. */ fsmrel = lookup_fsm_rel(rel); if (fsmrel) @@ -484,6 +486,7 @@ RecordRelationFreeSpace(RelFileNode *rel, curAlloc = realloc_fsm_rel(fsmrel, nPages, false); curAllocPages = curAlloc * CHUNKPAGES; + /* * If the data fits in our current allocation, just copy it; * otherwise must compress. @@ -500,7 +503,7 @@ RecordRelationFreeSpace(RelFileNode *rel, Size avail = pageSpaces[i].avail; /* Check caller provides sorted data */ - if (i > 0 && page <= pageSpaces[i-1].blkno) + if (i > 0 && page <= pageSpaces[i - 1].blkno) elog(ERROR, "free-space data is not in page order"); FSMPageSetPageNum(newLocation, page); FSMPageSetSpace(newLocation, avail); @@ -556,10 +559,11 @@ RecordIndexFreeSpace(RelFileNode *rel, nPages = MaxFSMPages; LWLockAcquire(FreeSpaceLock, LW_EXCLUSIVE); + /* * Note we don't record info about a relation unless there's already - * an FSM entry for it, implying someone has done GetFreeIndexPage - * for it. Inactive rels thus will not clutter the map simply by being + * an FSM entry for it, implying someone has done GetFreeIndexPage for + * it. Inactive rels thus will not clutter the map simply by being * vacuumed. */ fsmrel = lookup_fsm_rel(rel); @@ -572,6 +576,7 @@ RecordIndexFreeSpace(RelFileNode *rel, curAlloc = realloc_fsm_rel(fsmrel, nPages, true); curAllocPages = curAlloc * INDEXCHUNKPAGES; + /* * If the data fits in our current allocation, just copy it; * otherwise must compress. But compression is easy: we merely @@ -587,7 +592,7 @@ RecordIndexFreeSpace(RelFileNode *rel, BlockNumber page = pages[i]; /* Check caller provides sorted data */ - if (i > 0 && page <= pages[i-1]) + if (i > 0 && page <= pages[i - 1]) elog(ERROR, "free-space data is not in page order"); IndexFSMPageSetPageNum(newLocation, page); newLocation++; @@ -660,7 +665,7 @@ FreeSpaceMapForgetDatabase(Oid dbid) LWLockAcquire(FreeSpaceLock, LW_EXCLUSIVE); for (fsmrel = FreeSpaceMap->usageList; fsmrel; fsmrel = nextrel) { - nextrel = fsmrel->nextUsage; /* in case we delete it */ + nextrel = fsmrel->nextUsage; /* in case we delete it */ if (fsmrel->key.tblNode == dbid) delete_fsm_rel(fsmrel); } @@ -670,7 +675,7 @@ FreeSpaceMapForgetDatabase(Oid dbid) /* * PrintFreeSpaceMapStatistics - print statistics about FSM contents * - * The info is sent to ereport() with the specified message level. This is + * The info is sent to ereport() with the specified message level. This is * intended for use during VACUUM. */ void @@ -687,9 +692,7 @@ PrintFreeSpaceMapStatistics(int elevel) for (fsmrel = FreeSpaceMap->firstRel; fsmrel != NULL; fsmrel = fsmrel->nextPhysical) - { storedPages += fsmrel->storedPages; - } /* Copy other stats before dropping lock */ numRels = FreeSpaceMap->numRels; sumRequests = FreeSpaceMap->sumRequests; @@ -808,7 +811,7 @@ write_failed: * forma --- if anyone else is accessing FSM yet, there's a problem. * * Notes: no complaint is issued if no cache file is found. If the file is - * found, it is deleted after reading. Thus, if we crash without a clean + * found, it is deleted after reading. Thus, if we crash without a clean * shutdown, the next cycle of life starts with no FSM data. To do otherwise, * we'd need to do significantly more validation in this routine, because of * the likelihood that what is in the dump file would be out-of-date, eg @@ -879,7 +882,7 @@ LoadFreeSpaceMap(void) len = nPages * sizeof(IndexFSMPageData); else len = nPages * sizeof(FSMPageData); - data = (char *) palloc(len + 1); /* +1 to avoid palloc(0) */ + data = (char *) palloc(len + 1); /* +1 to avoid palloc(0) */ if (fread(data, 1, len, fp) != len) { elog(LOG, "premature EOF in \"%s\"", cachefilename); @@ -888,7 +891,7 @@ LoadFreeSpaceMap(void) } /* - * Okay, create the FSM entry and insert data into it. Since the + * Okay, create the FSM entry and insert data into it. Since the * rels were stored in reverse usage order, at the end of the loop * they will be correctly usage-ordered in memory; and if * MaxFSMRelations is less than it used to be, we will correctly @@ -904,10 +907,11 @@ LoadFreeSpaceMap(void) IndexFSMPageData *newLocation; curAllocPages = curAlloc * INDEXCHUNKPAGES; + /* * If the data fits in our current allocation, just copy it; - * otherwise must compress. But compression is easy: we merely - * forget extra pages. + * otherwise must compress. But compression is easy: we + * merely forget extra pages. */ newLocation = (IndexFSMPageData *) (FreeSpaceMap->arena + fsmrel->firstChunk * CHUNKBYTES); @@ -921,6 +925,7 @@ LoadFreeSpaceMap(void) FSMPageData *newLocation; curAllocPages = curAlloc * CHUNKPAGES; + /* * If the data fits in our current allocation, just copy it; * otherwise must compress. @@ -1005,7 +1010,7 @@ create_fsm_rel(RelFileNode *rel) fsmrel->isIndex = false; /* until we learn different */ fsmrel->avgRequest = INITIAL_AVERAGE; fsmrel->lastPageCount = 0; - fsmrel->firstChunk = -1; /* no space allocated */ + fsmrel->firstChunk = -1; /* no space allocated */ fsmrel->storedPages = 0; fsmrel->nextPage = 0; @@ -1015,7 +1020,7 @@ create_fsm_rel(RelFileNode *rel) /* Add new entry at front of LRU list */ link_fsm_rel_usage(fsmrel); - fsmrel->nextPhysical = NULL; /* not in physical-storage list */ + fsmrel->nextPhysical = NULL; /* not in physical-storage list */ fsmrel->priorPhysical = NULL; FreeSpaceMap->numRels++; /* sumRequests is unchanged because request must be zero */ @@ -1076,14 +1081,15 @@ realloc_fsm_rel(FSMRelation *fsmrel, int nPages, bool isIndex) myRequest = fsm_calc_request(fsmrel); FreeSpaceMap->sumRequests += myRequest; myAlloc = fsm_calc_target_allocation(myRequest); + /* - * Need to reallocate space if (a) my target allocation is more - * than my current allocation, AND (b) my actual immediate need - * (myRequest+1 chunks) is more than my current allocation. - * Otherwise just store the new data in-place. + * Need to reallocate space if (a) my target allocation is more than + * my current allocation, AND (b) my actual immediate need + * (myRequest+1 chunks) is more than my current allocation. Otherwise + * just store the new data in-place. */ curAlloc = fsm_current_allocation(fsmrel); - if (myAlloc > curAlloc && (myRequest+1) > curAlloc && nPages > 0) + if (myAlloc > curAlloc && (myRequest + 1) > curAlloc && nPages > 0) { /* Remove entry from storage list, and compact */ unlink_fsm_rel_storage(fsmrel); @@ -1133,6 +1139,7 @@ unlink_fsm_rel_usage(FSMRelation *fsmrel) fsmrel->nextUsage->priorUsage = fsmrel->priorUsage; else FreeSpaceMap->usageListTail = fsmrel->priorUsage; + /* * We don't bother resetting fsmrel's links, since it's about to be * deleted or relinked at the head. @@ -1212,7 +1219,8 @@ find_free_space(FSMRelation *fsmrel, Size spaceNeeded) if (spaceAvail >= spaceNeeded) { /* - * Found what we want --- adjust the entry, and update nextPage. + * Found what we want --- adjust the entry, and update + * nextPage. */ FSMPageSetSpace(page, spaceAvail - spaceNeeded); fsmrel->nextPage = pageIndex + 1; @@ -1233,12 +1241,12 @@ static BlockNumber find_index_free_space(FSMRelation *fsmrel) { IndexFSMPageData *info; - BlockNumber result; + BlockNumber result; /* * If isIndex isn't set, it could be that RecordIndexFreeSpace() has - * never yet been called on this relation, and we're still looking - * at the default setting from create_fsm_rel(). If so, just act as + * never yet been called on this relation, and we're still looking at + * the default setting from create_fsm_rel(). If so, just act as * though there's no space. */ if (!fsmrel->isIndex) @@ -1247,14 +1255,15 @@ find_index_free_space(FSMRelation *fsmrel) return InvalidBlockNumber; elog(ERROR, "find_index_free_space called for a non-index relation"); } + /* - * For indexes, there's no need for the nextPage state variable; we just - * remove and return the first available page. (We could save cycles here - * by returning the last page, but it seems better to encourage re-use - * of lower-numbered pages.) + * For indexes, there's no need for the nextPage state variable; we + * just remove and return the first available page. (We could save + * cycles here by returning the last page, but it seems better to + * encourage re-use of lower-numbered pages.) */ if (fsmrel->storedPages <= 0) - return InvalidBlockNumber; /* no pages available */ + return InvalidBlockNumber; /* no pages available */ info = (IndexFSMPageData *) (FreeSpaceMap->arena + fsmrel->firstChunk * CHUNKBYTES); result = IndexFSMPageGetPageNum(info); @@ -1287,8 +1296,8 @@ fsm_record_free_space(FSMRelation *fsmrel, BlockNumber page, Size spaceAvail) else { /* - * No existing entry; ignore the call. We used to add the page - * to the FSM --- but in practice, if the page hasn't got enough + * No existing entry; ignore the call. We used to add the page to + * the FSM --- but in practice, if the page hasn't got enough * space to satisfy the caller who's kicking it back to us, then * it's probably uninteresting to everyone else as well. */ @@ -1327,7 +1336,7 @@ lookup_fsm_page_entry(FSMRelation *fsmrel, BlockNumber page, while (low <= high) { int middle; - BlockNumber probe; + BlockNumber probe; middle = low + (high - low) / 2; probe = IndexFSMPageGetPageNum(info + middle); @@ -1357,7 +1366,7 @@ lookup_fsm_page_entry(FSMRelation *fsmrel, BlockNumber page, while (low <= high) { int middle; - BlockNumber probe; + BlockNumber probe; middle = low + (high - low) / 2; probe = FSMPageGetPageNum(info + middle); @@ -1391,13 +1400,13 @@ compact_fsm_storage(void) fsmrel != NULL; fsmrel = fsmrel->nextPhysical) { - int newAlloc; - int newAllocPages; - int newChunkIndex; - int oldChunkIndex; - int curChunks; - char *newLocation; - char *oldLocation; + int newAlloc; + int newAllocPages; + int newChunkIndex; + int oldChunkIndex; + int curChunks; + char *newLocation; + char *oldLocation; /* * Calculate target allocation, make sure we don't overrun due to @@ -1412,6 +1421,7 @@ compact_fsm_storage(void) newAllocPages = newAlloc * CHUNKPAGES; newChunkIndex = nextChunkIndex; nextChunkIndex += newAlloc; + /* * Determine current size, current and new locations */ @@ -1419,18 +1429,19 @@ compact_fsm_storage(void) oldChunkIndex = fsmrel->firstChunk; newLocation = FreeSpaceMap->arena + newChunkIndex * CHUNKBYTES; oldLocation = FreeSpaceMap->arena + oldChunkIndex * CHUNKBYTES; + /* * It's possible that we have to move data down, not up, if the * allocations of previous rels expanded. This should mean that * our allocation expanded too (or at least got no worse), and * ditto for later rels. So there should be room --- but we might * have to push down following rels to make it. We don't want to - * do the push more than once, so pack everything against the - * end of the arena if so. + * do the push more than once, so pack everything against the end + * of the arena if so. */ if (newChunkIndex > oldChunkIndex) { - int limitChunkIndex; + int limitChunkIndex; if (newAllocPages < fsmrel->storedPages) elog(PANIC, "can't juggle and compress too"); @@ -1455,9 +1466,9 @@ compact_fsm_storage(void) else if (newAllocPages < fsmrel->storedPages) { /* - * Need to compress the page data. For an index, "compression" - * just means dropping excess pages; otherwise we try to keep - * the ones with the most space. + * Need to compress the page data. For an index, + * "compression" just means dropping excess pages; otherwise + * we try to keep the ones with the most space. */ if (fsmrel->isIndex) { @@ -1508,11 +1519,11 @@ push_fsm_rels_after(FSMRelation *afterRel) fsmrel != NULL; fsmrel = fsmrel->priorPhysical) { - int chunkCount; - int newChunkIndex; - int oldChunkIndex; - char *newLocation; - char *oldLocation; + int chunkCount; + int newChunkIndex; + int oldChunkIndex; + char *newLocation; + char *oldLocation; if (fsmrel == afterRel) break; @@ -1549,8 +1560,8 @@ push_fsm_rels_after(FSMRelation *afterRel) #define HISTOGRAM_BINS 64 static void -pack_incoming_pages(FSMPageData *newLocation, int newPages, - PageFreeSpaceInfo *pageSpaces, int nPages) +pack_incoming_pages(FSMPageData * newLocation, int newPages, + PageFreeSpaceInfo * pageSpaces, int nPages) { int histogram[HISTOGRAM_BINS]; int above, @@ -1564,35 +1575,35 @@ pack_incoming_pages(FSMPageData *newLocation, int newPages, MemSet(histogram, 0, sizeof(histogram)); for (i = 0; i < nPages; i++) { - Size avail = pageSpaces[i].avail; + Size avail = pageSpaces[i].avail; if (avail >= BLCKSZ) elog(ERROR, "bogus freespace amount"); - avail /= (BLCKSZ/HISTOGRAM_BINS); + avail /= (BLCKSZ / HISTOGRAM_BINS); histogram[avail]++; } /* Find the breakpoint bin */ above = 0; - for (i = HISTOGRAM_BINS-1; i >= 0; i--) + for (i = HISTOGRAM_BINS - 1; i >= 0; i--) { - int sum = above + histogram[i]; + int sum = above + histogram[i]; if (sum > newPages) break; above = sum; } Assert(i >= 0); - thresholdL = i * BLCKSZ/HISTOGRAM_BINS; /* low bound of bp bin */ - thresholdU = (i+1) * BLCKSZ/HISTOGRAM_BINS; /* hi bound */ + thresholdL = i * BLCKSZ / HISTOGRAM_BINS; /* low bound of bp bin */ + thresholdU = (i + 1) * BLCKSZ / HISTOGRAM_BINS; /* hi bound */ binct = newPages - above; /* number to take from bp bin */ /* And copy the appropriate data */ for (i = 0; i < nPages; i++) { BlockNumber page = pageSpaces[i].blkno; - Size avail = pageSpaces[i].avail; + Size avail = pageSpaces[i].avail; /* Check caller provides sorted data */ - if (i > 0 && page <= pageSpaces[i-1].blkno) + if (i > 0 && page <= pageSpaces[i - 1].blkno) elog(ERROR, "free-space data is not in page order"); /* Save this page? */ if (avail >= thresholdU || @@ -1619,8 +1630,8 @@ pack_incoming_pages(FSMPageData *newLocation, int newPages, * so that we can copy data moving forward in the arrays without problem. */ static void -pack_existing_pages(FSMPageData *newLocation, int newPages, - FSMPageData *oldLocation, int oldPages) +pack_existing_pages(FSMPageData * newLocation, int newPages, + FSMPageData * oldLocation, int oldPages) { int histogram[HISTOGRAM_BINS]; int above, @@ -1634,33 +1645,33 @@ pack_existing_pages(FSMPageData *newLocation, int newPages, MemSet(histogram, 0, sizeof(histogram)); for (i = 0; i < oldPages; i++) { - Size avail = FSMPageGetSpace(oldLocation + i); + Size avail = FSMPageGetSpace(oldLocation + i); /* Shouldn't happen, but test to protect against stack clobber */ if (avail >= BLCKSZ) elog(ERROR, "bogus freespace amount"); - avail /= (BLCKSZ/HISTOGRAM_BINS); + avail /= (BLCKSZ / HISTOGRAM_BINS); histogram[avail]++; } /* Find the breakpoint bin */ above = 0; - for (i = HISTOGRAM_BINS-1; i >= 0; i--) + for (i = HISTOGRAM_BINS - 1; i >= 0; i--) { - int sum = above + histogram[i]; + int sum = above + histogram[i]; if (sum > newPages) break; above = sum; } Assert(i >= 0); - thresholdL = i * BLCKSZ/HISTOGRAM_BINS; /* low bound of bp bin */ - thresholdU = (i+1) * BLCKSZ/HISTOGRAM_BINS; /* hi bound */ + thresholdL = i * BLCKSZ / HISTOGRAM_BINS; /* low bound of bp bin */ + thresholdU = (i + 1) * BLCKSZ / HISTOGRAM_BINS; /* hi bound */ binct = newPages - above; /* number to take from bp bin */ /* And copy the appropriate data */ for (i = 0; i < oldPages; i++) { BlockNumber page = FSMPageGetPageNum(oldLocation + i); - Size avail = FSMPageGetSpace(oldLocation + i); + Size avail = FSMPageGetSpace(oldLocation + i); /* Save this page? */ if (avail >= thresholdU || @@ -1755,13 +1766,9 @@ static int fsm_current_allocation(FSMRelation *fsmrel) { if (fsmrel->nextPhysical != NULL) - { return fsmrel->nextPhysical->firstChunk - fsmrel->firstChunk; - } else if (fsmrel == FreeSpaceMap->lastRel) - { return FreeSpaceMap->usedChunks - fsmrel->firstChunk; - } else { /* it's not in the storage-order list */ diff --git a/src/backend/storage/ipc/ipc.c b/src/backend/storage/ipc/ipc.c index 5cc0f5f2cdb..154e39b2845 100644 --- a/src/backend/storage/ipc/ipc.c +++ b/src/backend/storage/ipc/ipc.c @@ -13,7 +13,7 @@ * * * IDENTIFICATION - * $Header: /cvsroot/pgsql/src/backend/storage/ipc/ipc.c,v 1.83 2003/07/24 22:04:09 tgl Exp $ + * $Header: /cvsroot/pgsql/src/backend/storage/ipc/ipc.c,v 1.84 2003/08/04 00:43:24 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -74,8 +74,8 @@ void proc_exit(int code) { /* - * Once we set this flag, we are committed to exit. Any ereport() will - * NOT send control back to the main loop, but right back here. + * Once we set this flag, we are committed to exit. Any ereport() + * will NOT send control back to the main loop, but right back here. */ proc_exit_inprogress = true; diff --git a/src/backend/storage/ipc/ipci.c b/src/backend/storage/ipc/ipci.c index 187b75ad55d..508149bc42c 100644 --- a/src/backend/storage/ipc/ipci.c +++ b/src/backend/storage/ipc/ipci.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $Header: /cvsroot/pgsql/src/backend/storage/ipc/ipci.c,v 1.54 2003/07/24 22:04:09 tgl Exp $ + * $Header: /cvsroot/pgsql/src/backend/storage/ipc/ipci.c,v 1.55 2003/08/04 00:43:24 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -146,4 +146,3 @@ AttachSharedMemoryAndSemaphores(void) { CLOGShmemInit(); } - diff --git a/src/backend/storage/ipc/sinval.c b/src/backend/storage/ipc/sinval.c index 225436ffd93..653dbf89263 100644 --- a/src/backend/storage/ipc/sinval.c +++ b/src/backend/storage/ipc/sinval.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $Header: /cvsroot/pgsql/src/backend/storage/ipc/sinval.c,v 1.57 2003/07/24 22:04:09 tgl Exp $ + * $Header: /cvsroot/pgsql/src/backend/storage/ipc/sinval.c,v 1.58 2003/08/04 00:43:24 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -324,11 +324,10 @@ GetSnapshotData(Snapshot snapshot, bool serializable) * lastBackend would be sufficient. But it seems better to do the * malloc while not holding the lock, so we can't look at lastBackend. * - * if (snapshot->xip != NULL) - * no need to free and reallocate xip; + * if (snapshot->xip != NULL) no need to free and reallocate xip; * - * We can reuse the old xip array, because MaxBackends does not change - * at runtime. + * We can reuse the old xip array, because MaxBackends does not change at + * runtime. */ if (snapshot->xip == NULL) { diff --git a/src/backend/storage/lmgr/deadlock.c b/src/backend/storage/lmgr/deadlock.c index 757f1df2f4f..1106a76c3df 100644 --- a/src/backend/storage/lmgr/deadlock.c +++ b/src/backend/storage/lmgr/deadlock.c @@ -12,7 +12,7 @@ * * * IDENTIFICATION - * $Header: /cvsroot/pgsql/src/backend/storage/lmgr/deadlock.c,v 1.21 2003/07/24 22:04:13 tgl Exp $ + * $Header: /cvsroot/pgsql/src/backend/storage/lmgr/deadlock.c,v 1.22 2003/08/04 00:43:24 momjian Exp $ * * Interface: * @@ -49,7 +49,7 @@ typedef struct } WAIT_ORDER; /* - * Information saved about each edge in a detected deadlock cycle. This + * Information saved about each edge in a detected deadlock cycle. This * is used to print a diagnostic message upon failure. * * Note: because we want to examine this info after releasing the LockMgrLock, @@ -61,7 +61,7 @@ typedef struct LOCKTAG locktag; /* ID of awaited lock object */ LOCKMODE lockmode; /* type of lock we're waiting for */ int pid; /* PID of blocked backend */ -} DEADLOCK_INFO; +} DEADLOCK_INFO; static bool DeadLockCheckRecurse(PGPROC *proc); @@ -147,7 +147,7 @@ InitDeadLockChecking(void) * We need to consider rearranging at most MaxBackends/2 wait queues * (since it takes at least two waiters in a queue to create a soft * edge), and the expanded form of the wait queues can't involve more - * than MaxBackends total waiters. (But avoid palloc(0) if + * than MaxBackends total waiters. (But avoid palloc(0) if * MaxBackends = 1.) */ waitOrders = (WAIT_ORDER *) @@ -221,7 +221,7 @@ DeadLockCheck(PGPROC *proc) * Call FindLockCycle one more time, to record the correct * deadlockDetails[] for the basic state with no rearrangements. */ - int nSoftEdges; + int nSoftEdges; nWaitOrders = 0; if (!FindLockCycle(proc, possibleConstraints, &nSoftEdges)) @@ -486,7 +486,7 @@ FindLockCycleRecurse(PGPROC *checkProc, lockHolders = &(lock->lockHolders); proclock = (PROCLOCK *) SHMQueueNext(lockHolders, lockHolders, - offsetof(PROCLOCK, lockLink)); + offsetof(PROCLOCK, lockLink)); while (proclock) { @@ -501,11 +501,11 @@ FindLockCycleRecurse(PGPROC *checkProc, ((1 << lm) & conflictMask) != 0) { /* This proc hard-blocks checkProc */ - if (FindLockCycleRecurse(proc, depth+1, + if (FindLockCycleRecurse(proc, depth + 1, softEdges, nSoftEdges)) { /* fill deadlockDetails[] */ - DEADLOCK_INFO *info = &deadlockDetails[depth]; + DEADLOCK_INFO *info = &deadlockDetails[depth]; info->locktag = lock->tag; info->lockmode = checkProc->waitLockMode; @@ -558,11 +558,11 @@ FindLockCycleRecurse(PGPROC *checkProc, if (((1 << proc->waitLockMode) & conflictMask) != 0) { /* This proc soft-blocks checkProc */ - if (FindLockCycleRecurse(proc, depth+1, + if (FindLockCycleRecurse(proc, depth + 1, softEdges, nSoftEdges)) { /* fill deadlockDetails[] */ - DEADLOCK_INFO *info = &deadlockDetails[depth]; + DEADLOCK_INFO *info = &deadlockDetails[depth]; info->locktag = lock->tag; info->lockmode = checkProc->waitLockMode; @@ -599,11 +599,11 @@ FindLockCycleRecurse(PGPROC *checkProc, if (((1 << proc->waitLockMode) & conflictMask) != 0) { /* This proc soft-blocks checkProc */ - if (FindLockCycleRecurse(proc, depth+1, + if (FindLockCycleRecurse(proc, depth + 1, softEdges, nSoftEdges)) { /* fill deadlockDetails[] */ - DEADLOCK_INFO *info = &deadlockDetails[depth]; + DEADLOCK_INFO *info = &deadlockDetails[depth]; info->locktag = lock->tag; info->lockmode = checkProc->waitLockMode; @@ -834,7 +834,6 @@ PrintLockQueue(LOCK *lock, const char *info) printf("\n"); fflush(stdout); } - #endif /* @@ -843,17 +842,17 @@ PrintLockQueue(LOCK *lock, const char *info) void DeadLockReport(void) { - StringInfoData buf; - int i; + StringInfoData buf; + int i; initStringInfo(&buf); for (i = 0; i < nDeadlockDetails; i++) { - DEADLOCK_INFO *info = &deadlockDetails[i]; + DEADLOCK_INFO *info = &deadlockDetails[i]; int nextpid; /* The last proc waits for the first one... */ - if (i < nDeadlockDetails-1) + if (i < nDeadlockDetails - 1) nextpid = info[1].pid; else nextpid = deadlockDetails[0].pid; @@ -900,7 +899,7 @@ RememberSimpleDeadLock(PGPROC *proc1, LOCK *lock, PGPROC *proc2) { - DEADLOCK_INFO *info = &deadlockDetails[0]; + DEADLOCK_INFO *info = &deadlockDetails[0]; info->locktag = lock->tag; info->lockmode = lockmode; diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c index 2e71f40c09b..9d4c52f75fa 100644 --- a/src/backend/storage/lmgr/lock.c +++ b/src/backend/storage/lmgr/lock.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $Header: /cvsroot/pgsql/src/backend/storage/lmgr/lock.c,v 1.124 2003/07/28 00:09:15 tgl Exp $ + * $Header: /cvsroot/pgsql/src/backend/storage/lmgr/lock.c,v 1.125 2003/08/04 00:43:24 momjian Exp $ * * NOTES * Outside modules can create a lock table and acquire/release @@ -127,9 +127,9 @@ inline static void PROCLOCK_PRINT(const char *where, const PROCLOCK *proclockP) { if ( - (((PROCLOCK_LOCKMETHOD(*proclockP) == DEFAULT_LOCKMETHOD && Trace_locks) - || (PROCLOCK_LOCKMETHOD(*proclockP) == USER_LOCKMETHOD && Trace_userlocks)) - && (((LOCK *) MAKE_PTR(proclockP->tag.lock))->tag.relId >= (Oid) Trace_lock_oidmin)) + (((PROCLOCK_LOCKMETHOD(*proclockP) == DEFAULT_LOCKMETHOD && Trace_locks) + || (PROCLOCK_LOCKMETHOD(*proclockP) == USER_LOCKMETHOD && Trace_userlocks)) + && (((LOCK *) MAKE_PTR(proclockP->tag.lock))->tag.relId >= (Oid) Trace_lock_oidmin)) || (Trace_lock_table && (((LOCK *) MAKE_PTR(proclockP->tag.lock))->tag.relId == Trace_lock_table)) ) elog(LOG, @@ -137,8 +137,8 @@ PROCLOCK_PRINT(const char *where, const PROCLOCK *proclockP) where, MAKE_OFFSET(proclockP), proclockP->tag.lock, PROCLOCK_LOCKMETHOD(*(proclockP)), proclockP->tag.proc, proclockP->tag.xid, - proclockP->holding[1], proclockP->holding[2], proclockP->holding[3], - proclockP->holding[4], proclockP->holding[5], proclockP->holding[6], + proclockP->holding[1], proclockP->holding[2], proclockP->holding[3], + proclockP->holding[4], proclockP->holding[5], proclockP->holding[6], proclockP->holding[7], proclockP->nHolding); } @@ -321,10 +321,10 @@ LockMethodTableInit(char *tabName, sprintf(shmemName, "%s (proclock hash)", tabName); lockMethodTable->proclockHash = ShmemInitHash(shmemName, - init_table_size, - max_table_size, - &info, - hash_flags); + init_table_size, + max_table_size, + &info, + hash_flags); if (!lockMethodTable->proclockHash) elog(FATAL, "could not initialize lock table \"%s\"", tabName); @@ -509,8 +509,8 @@ LockAcquire(LOCKMETHOD lockmethod, LOCKTAG *locktag, /* * Create the hash key for the proclock table. */ - MemSet(&proclocktag, 0, sizeof(PROCLOCKTAG)); /* must clear padding, - * needed */ + MemSet(&proclocktag, 0, sizeof(PROCLOCKTAG)); /* must clear padding, + * needed */ proclocktag.lock = MAKE_OFFSET(lock); proclocktag.proc = MAKE_OFFSET(MyProc); TransactionIdStore(xid, &proclocktag.xid); @@ -520,8 +520,8 @@ LockAcquire(LOCKMETHOD lockmethod, LOCKTAG *locktag, */ proclockTable = lockMethodTable->proclockHash; proclock = (PROCLOCK *) hash_search(proclockTable, - (void *) &proclocktag, - HASH_ENTER, &found); + (void *) &proclocktag, + HASH_ENTER, &found); if (!proclock) { LWLockRelease(masterLock); @@ -604,8 +604,8 @@ LockAcquire(LOCKMETHOD lockmethod, LOCKTAG *locktag, } /* - * If this process (under any XID) is a proclock of the lock, also grant - * myself another one without blocking. + * If this process (under any XID) is a proclock of the lock, also + * grant myself another one without blocking. */ LockCountMyLocks(proclock->tag.lock, MyProc, myHolding); if (myHolding[lockmode] > 0) @@ -649,8 +649,8 @@ LockAcquire(LOCKMETHOD lockmethod, LOCKTAG *locktag, SHMQueueDelete(&proclock->lockLink); SHMQueueDelete(&proclock->procLink); proclock = (PROCLOCK *) hash_search(proclockTable, - (void *) proclock, - HASH_REMOVE, NULL); + (void *) proclock, + HASH_REMOVE, NULL); if (!proclock) elog(WARNING, "proclock table corrupted"); } @@ -818,7 +818,7 @@ LockCountMyLocks(SHMEM_OFFSET lockOffset, PGPROC *proc, int *myHolding) MemSet(myHolding, 0, MAX_LOCKMODES * sizeof(int)); proclock = (PROCLOCK *) SHMQueueNext(procHolders, procHolders, - offsetof(PROCLOCK, procLink)); + offsetof(PROCLOCK, procLink)); while (proclock) { @@ -908,9 +908,10 @@ WaitOnLock(LOCKMETHOD lockmethod, LOCKMODE lockmode, */ LOCK_PRINT("WaitOnLock: aborting on lock", lock, lockmode); LWLockRelease(lockMethodTable->masterLock); + /* - * Now that we aren't holding the LockMgrLock, we can give an error - * report including details about the detected deadlock. + * Now that we aren't holding the LockMgrLock, we can give an + * error report including details about the detected deadlock. */ DeadLockReport(); /* not reached */ @@ -1033,16 +1034,16 @@ LockRelease(LOCKMETHOD lockmethod, LOCKTAG *locktag, /* * Find the proclock entry for this proclock. */ - MemSet(&proclocktag, 0, sizeof(PROCLOCKTAG)); /* must clear padding, - * needed */ + MemSet(&proclocktag, 0, sizeof(PROCLOCKTAG)); /* must clear padding, + * needed */ proclocktag.lock = MAKE_OFFSET(lock); proclocktag.proc = MAKE_OFFSET(MyProc); TransactionIdStore(xid, &proclocktag.xid); proclockTable = lockMethodTable->proclockHash; proclock = (PROCLOCK *) hash_search(proclockTable, - (void *) &proclocktag, - HASH_FIND_SAVE, NULL); + (void *) &proclocktag, + HASH_FIND_SAVE, NULL); if (!proclock) { LWLockRelease(masterLock); @@ -1143,8 +1144,8 @@ LockRelease(LOCKMETHOD lockmethod, LOCKTAG *locktag, SHMQueueDelete(&proclock->lockLink); SHMQueueDelete(&proclock->procLink); proclock = (PROCLOCK *) hash_search(proclockTable, - (void *) &proclock, - HASH_REMOVE_SAVED, NULL); + (void *) &proclock, + HASH_REMOVE_SAVED, NULL); if (!proclock) { LWLockRelease(masterLock); @@ -1207,7 +1208,7 @@ LockReleaseAll(LOCKMETHOD lockmethod, PGPROC *proc, LWLockAcquire(masterLock, LW_EXCLUSIVE); proclock = (PROCLOCK *) SHMQueueNext(procHolders, procHolders, - offsetof(PROCLOCK, procLink)); + offsetof(PROCLOCK, procLink)); while (proclock) { @@ -1295,9 +1296,9 @@ LockReleaseAll(LOCKMETHOD lockmethod, PGPROC *proc, * remove the proclock entry from the hashtable */ proclock = (PROCLOCK *) hash_search(lockMethodTable->proclockHash, - (void *) proclock, - HASH_REMOVE, - NULL); + (void *) proclock, + HASH_REMOVE, + NULL); if (!proclock) { LWLockRelease(masterLock); @@ -1466,7 +1467,7 @@ DumpLocks(void) LOCK_PRINT("DumpLocks: waiting on", proc->waitLock, 0); proclock = (PROCLOCK *) SHMQueueNext(procHolders, procHolders, - offsetof(PROCLOCK, procLink)); + offsetof(PROCLOCK, procLink)); while (proclock) { diff --git a/src/backend/storage/lmgr/proc.c b/src/backend/storage/lmgr/proc.c index eebd696c2c2..3bdda5924d7 100644 --- a/src/backend/storage/lmgr/proc.c +++ b/src/backend/storage/lmgr/proc.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $Header: /cvsroot/pgsql/src/backend/storage/lmgr/proc.c,v 1.131 2003/07/24 22:04:14 tgl Exp $ + * $Header: /cvsroot/pgsql/src/backend/storage/lmgr/proc.c,v 1.132 2003/08/04 00:43:24 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -78,6 +78,7 @@ static bool waitingForSignal = false; /* Mark these volatile because they can be changed by signal handler */ static volatile bool statement_timeout_active = false; static volatile bool deadlock_timeout_active = false; + /* statement_fin_time is valid only if statement_timeout_active is true */ static struct timeval statement_fin_time; @@ -571,7 +572,8 @@ ProcSleep(LOCKMETHODTABLE *lockMethodTable, * up correctly is to call RemoveFromWaitQueue(), but * we can't do that until we are *on* the wait queue. * So, set a flag to check below, and break out of - * loop. Also, record deadlock info for later message. + * loop. Also, record deadlock info for later + * message. */ RememberSimpleDeadLock(MyProc, lockmode, lock, proc); early_deadlock = true; @@ -950,11 +952,13 @@ bool enable_sig_alarm(int delayms, bool is_statement_timeout) { #ifdef WIN32 -# warning add Win32 timer +#warning add Win32 timer #else struct timeval fin_time; + #ifndef __BEOS__ struct itimerval timeval; + #else bigtime_t time_interval; #endif @@ -984,16 +988,16 @@ enable_sig_alarm(int delayms, bool is_statement_timeout) /* * Begin deadlock timeout with statement-level timeout active * - * Here, we want to interrupt at the closer of the two timeout - * times. If fin_time >= statement_fin_time then we need not - * touch the existing timer setting; else set up to interrupt - * at the deadlock timeout time. + * Here, we want to interrupt at the closer of the two timeout times. + * If fin_time >= statement_fin_time then we need not touch the + * existing timer setting; else set up to interrupt at the + * deadlock timeout time. * * NOTE: in this case it is possible that this routine will be * interrupted by the previously-set timer alarm. This is okay - * because the signal handler will do only what it should do according - * to the state variables. The deadlock checker may get run earlier - * than normal, but that does no harm. + * because the signal handler will do only what it should do + * according to the state variables. The deadlock checker may get + * run earlier than normal, but that does no harm. */ deadlock_timeout_active = true; if (fin_time.tv_sec > statement_fin_time.tv_sec || @@ -1037,6 +1041,7 @@ disable_sig_alarm(bool is_statement_timeout) #ifdef WIN32 #warning add Win32 timer #else + /* * Always disable the interrupt if it is active; this avoids being * interrupted by the signal handler and thereby possibly getting diff --git a/src/backend/storage/page/bufpage.c b/src/backend/storage/page/bufpage.c index b8766ae6ab7..2441534b46c 100644 --- a/src/backend/storage/page/bufpage.c +++ b/src/backend/storage/page/bufpage.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $Header: /cvsroot/pgsql/src/backend/storage/page/bufpage.c,v 1.53 2003/07/24 22:04:15 tgl Exp $ + * $Header: /cvsroot/pgsql/src/backend/storage/page/bufpage.c,v 1.54 2003/08/04 00:43:24 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -50,7 +50,7 @@ PageInit(Page page, Size pageSize, Size specialSize) * PageHeaderIsValid * Check that the header fields of a page appear valid. * - * This is called when a page has just been read in from disk. The idea is + * This is called when a page has just been read in from disk. The idea is * to cheaply detect trashed pages before we go nuts following bogus item * pointers, testing invalid transaction identifiers, etc. * @@ -135,7 +135,7 @@ PageAddItem(Page page, ereport(PANIC, (errcode(ERRCODE_DATA_CORRUPTED), errmsg("corrupted page pointers: lower = %u, upper = %u, special = %u", - phdr->pd_lower, phdr->pd_upper, phdr->pd_special))); + phdr->pd_lower, phdr->pd_upper, phdr->pd_special))); /* * Select offsetNumber to place the new item at @@ -391,8 +391,8 @@ PageRepairFragmentation(Page page, OffsetNumber *unused) if (totallen > (Size) (pd_special - pd_lower)) ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), - errmsg("corrupted item lengths: total %u, available space %u", - (unsigned int) totallen, pd_special - pd_lower))); + errmsg("corrupted item lengths: total %u, available space %u", + (unsigned int) totallen, pd_special - pd_lower))); /* sort itemIdSortData array into decreasing itemoff order */ qsort((char *) itemidbase, nused, sizeof(struct itemIdSortData), @@ -472,7 +472,7 @@ PageIndexTupleDelete(Page page, OffsetNumber offnum) ereport(ERROR, (errcode(ERRCODE_DATA_CORRUPTED), errmsg("corrupted page pointers: lower = %u, upper = %u, special = %u", - phdr->pd_lower, phdr->pd_upper, phdr->pd_special))); + phdr->pd_lower, phdr->pd_upper, phdr->pd_special))); nline = PageGetMaxOffsetNumber(page); if ((int) offnum <= 0 || (int) offnum > nline) @@ -533,7 +533,8 @@ PageIndexTupleDelete(Page page, OffsetNumber offnum) */ if (!PageIsEmpty(page)) { - int i; + int i; + nline--; /* there's one less than when we started */ for (i = 1; i <= nline; i++) { diff --git a/src/backend/storage/smgr/md.c b/src/backend/storage/smgr/md.c index bd46d3dbd52..420413a510b 100644 --- a/src/backend/storage/smgr/md.c +++ b/src/backend/storage/smgr/md.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $Header: /cvsroot/pgsql/src/backend/storage/smgr/md.c,v 1.96 2003/07/28 00:09:15 tgl Exp $ + * $Header: /cvsroot/pgsql/src/backend/storage/smgr/md.c,v 1.97 2003/08/04 00:43:25 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -571,12 +571,13 @@ mdnblocks(Relation reln) segno = 0; /* - * Skip through any segments that aren't the last one, to avoid redundant - * seeks on them. We have previously verified that these segments are - * exactly RELSEG_SIZE long, and it's useless to recheck that each time. - * (NOTE: this assumption could only be wrong if another backend has - * truncated the relation. We rely on higher code levels to handle that - * scenario by closing and re-opening the md fd.) + * Skip through any segments that aren't the last one, to avoid + * redundant seeks on them. We have previously verified that these + * segments are exactly RELSEG_SIZE long, and it's useless to recheck + * that each time. (NOTE: this assumption could only be wrong if + * another backend has truncated the relation. We rely on higher code + * levels to handle that scenario by closing and re-opening the md + * fd.) */ while (v->mdfd_chain != (MdfdVec *) NULL) { |