diff options
author | Bruce Momjian <bruce@momjian.us> | 2011-04-10 11:42:00 -0400 |
---|---|---|
committer | Bruce Momjian <bruce@momjian.us> | 2011-04-10 11:42:00 -0400 |
commit | bf50caf105a901c4f83ac1df3cdaf910c26694a4 (patch) | |
tree | dac42d7795070f107eefb085c500f86a4d35f92f /src/backend/access | |
parent | 9a8b73147c07e02e10e0d0a34aa99d72e3336fb2 (diff) | |
download | postgresql-bf50caf105a901c4f83ac1df3cdaf910c26694a4.tar.gz postgresql-bf50caf105a901c4f83ac1df3cdaf910c26694a4.zip |
pgindent run before PG 9.1 beta 1.
Diffstat (limited to 'src/backend/access')
34 files changed, 455 insertions, 432 deletions
diff --git a/src/backend/access/common/heaptuple.c b/src/backend/access/common/heaptuple.c index 6d608fed895..175e6ea2f2e 100644 --- a/src/backend/access/common/heaptuple.c +++ b/src/backend/access/common/heaptuple.c @@ -350,7 +350,7 @@ nocachegetattr(HeapTuple tuple, * * check to see if any preceding bits are null... */ - int byte = attnum >> 3; + int byte = attnum >> 3; int finalbit = attnum & 0x07; /* check for nulls "before" final bit of last byte */ diff --git a/src/backend/access/common/indextuple.c b/src/backend/access/common/indextuple.c index 9ea87360f91..85c43199aa7 100644 --- a/src/backend/access/common/indextuple.c +++ b/src/backend/access/common/indextuple.c @@ -237,7 +237,7 @@ nocache_index_getattr(IndexTuple tup, * Now check to see if any preceding bits are null... */ { - int byte = attnum >> 3; + int byte = attnum >> 3; int finalbit = attnum & 0x07; /* check for nulls "before" final bit of last byte */ diff --git a/src/backend/access/gin/ginarrayproc.c b/src/backend/access/gin/ginarrayproc.c index ce9abae6aa4..2de58604eee 100644 --- a/src/backend/access/gin/ginarrayproc.c +++ b/src/backend/access/gin/ginarrayproc.c @@ -82,7 +82,8 @@ ginqueryarrayextract(PG_FUNCTION_ARGS) ArrayType *array = PG_GETARG_ARRAYTYPE_P_COPY(0); int32 *nkeys = (int32 *) PG_GETARG_POINTER(1); StrategyNumber strategy = PG_GETARG_UINT16(2); - /* bool **pmatch = (bool **) PG_GETARG_POINTER(3); */ + + /* bool **pmatch = (bool **) PG_GETARG_POINTER(3); */ /* Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); */ bool **nullFlags = (bool **) PG_GETARG_POINTER(5); int32 *searchMode = (int32 *) PG_GETARG_POINTER(6); @@ -112,7 +113,7 @@ ginqueryarrayextract(PG_FUNCTION_ARGS) case GinContainsStrategy: if (nelems > 0) *searchMode = GIN_SEARCH_MODE_DEFAULT; - else /* everything contains the empty set */ + else /* everything contains the empty set */ *searchMode = GIN_SEARCH_MODE_ALL; break; case GinContainedStrategy: @@ -142,10 +143,13 @@ ginarrayconsistent(PG_FUNCTION_ARGS) { bool *check = (bool *) PG_GETARG_POINTER(0); StrategyNumber strategy = PG_GETARG_UINT16(1); + /* ArrayType *query = PG_GETARG_ARRAYTYPE_P(2); */ int32 nkeys = PG_GETARG_INT32(3); + /* Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); */ bool *recheck = (bool *) PG_GETARG_POINTER(5); + /* Datum *queryKeys = (Datum *) PG_GETARG_POINTER(6); */ bool *nullFlags = (bool *) PG_GETARG_POINTER(7); bool res; @@ -190,10 +194,11 @@ ginarrayconsistent(PG_FUNCTION_ARGS) case GinEqualStrategy: /* we will need recheck */ *recheck = true; + /* * Must have all elements in check[] true; no discrimination - * against nulls here. This is because array_contain_compare - * and array_eq handle nulls differently ... + * against nulls here. This is because array_contain_compare and + * array_eq handle nulls differently ... */ res = true; for (i = 0; i < nkeys; i++) diff --git a/src/backend/access/gin/ginbulk.c b/src/backend/access/gin/ginbulk.c index f0c8c8e37f6..9e5bab194de 100644 --- a/src/backend/access/gin/ginbulk.c +++ b/src/backend/access/gin/ginbulk.c @@ -80,8 +80,8 @@ ginAllocEntryAccumulator(void *arg) GinEntryAccumulator *ea; /* - * Allocate memory by rather big chunks to decrease overhead. We have - * no need to reclaim RBNodes individually, so this costs nothing. + * Allocate memory by rather big chunks to decrease overhead. We have no + * need to reclaim RBNodes individually, so this costs nothing. */ if (accum->entryallocator == NULL || accum->eas_used >= DEF_NENTRY) { @@ -108,7 +108,7 @@ ginInitBA(BuildAccumulator *accum) cmpEntryAccumulator, ginCombineData, ginAllocEntryAccumulator, - NULL, /* no freefunc needed */ + NULL, /* no freefunc needed */ (void *) accum); } @@ -145,8 +145,8 @@ ginInsertBAEntry(BuildAccumulator *accum, bool isNew; /* - * For the moment, fill only the fields of eatmp that will be looked at - * by cmpEntryAccumulator or ginCombineData. + * For the moment, fill only the fields of eatmp that will be looked at by + * cmpEntryAccumulator or ginCombineData. */ eatmp.attnum = attnum; eatmp.key = key; diff --git a/src/backend/access/gin/gindatapage.c b/src/backend/access/gin/gindatapage.c index 4a1e7548008..41dbe9fd11e 100644 --- a/src/backend/access/gin/gindatapage.c +++ b/src/backend/access/gin/gindatapage.c @@ -21,13 +21,13 @@ int ginCompareItemPointers(ItemPointer a, ItemPointer b) { - BlockNumber ba = GinItemPointerGetBlockNumber(a); - BlockNumber bb = GinItemPointerGetBlockNumber(b); + BlockNumber ba = GinItemPointerGetBlockNumber(a); + BlockNumber bb = GinItemPointerGetBlockNumber(b); if (ba == bb) { - OffsetNumber oa = GinItemPointerGetOffsetNumber(a); - OffsetNumber ob = GinItemPointerGetOffsetNumber(b); + OffsetNumber oa = GinItemPointerGetOffsetNumber(a); + OffsetNumber ob = GinItemPointerGetOffsetNumber(b); if (oa == ob) return 0; @@ -383,6 +383,7 @@ dataPlaceToPage(GinBtree btree, Buffer buf, OffsetNumber off, XLogRecData **prda Page page = BufferGetPage(buf); int sizeofitem = GinSizeOfDataPageItem(page); int cnt = 0; + /* these must be static so they can be returned to caller */ static XLogRecData rdata[3]; static ginxlogInsert data; @@ -474,6 +475,7 @@ dataSplitPage(GinBtree btree, Buffer lbuf, Buffer rbuf, OffsetNumber off, XLogRe Size pageSize = PageGetPageSize(lpage); Size freeSpace; uint32 nCopied = 1; + /* these must be static so they can be returned to caller */ static ginxlogSplit data; static XLogRecData rdata[4]; diff --git a/src/backend/access/gin/ginentrypage.c b/src/backend/access/gin/ginentrypage.c index 9749a1be786..fa134f9fc3f 100644 --- a/src/backend/access/gin/ginentrypage.c +++ b/src/backend/access/gin/ginentrypage.c @@ -98,11 +98,11 @@ GinFormTuple(GinState *ginstate, if (errorTooBig) ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), - errmsg("index row size %lu exceeds maximum %lu for index \"%s\"", - (unsigned long) newsize, - (unsigned long) Min(INDEX_SIZE_MASK, - GinMaxItemSize), - RelationGetRelationName(ginstate->index)))); + errmsg("index row size %lu exceeds maximum %lu for index \"%s\"", + (unsigned long) newsize, + (unsigned long) Min(INDEX_SIZE_MASK, + GinMaxItemSize), + RelationGetRelationName(ginstate->index)))); pfree(itup); return NULL; } @@ -164,7 +164,7 @@ GinShortenTuple(IndexTuple itup, uint32 nipd) * Form a non-leaf entry tuple by copying the key data from the given tuple, * which can be either a leaf or non-leaf entry tuple. * - * Any posting list in the source tuple is not copied. The specified child + * Any posting list in the source tuple is not copied. The specified child * block number is inserted into t_tid. */ static IndexTuple @@ -225,7 +225,7 @@ entryIsMoveRight(GinBtree btree, Page page) key = gintuple_get_key(btree->ginstate, itup, &category); if (ginCompareAttEntries(btree->ginstate, - btree->entryAttnum, btree->entryKey, btree->entryCategory, + btree->entryAttnum, btree->entryKey, btree->entryCategory, attnum, key, category) > 0) return TRUE; @@ -488,6 +488,7 @@ entryPlaceToPage(GinBtree btree, Buffer buf, OffsetNumber off, XLogRecData **prd Page page = BufferGetPage(buf); OffsetNumber placed; int cnt = 0; + /* these must be static so they can be returned to caller */ static XLogRecData rdata[3]; static ginxlogInsert data; @@ -561,6 +562,7 @@ entrySplitPage(GinBtree btree, Buffer lbuf, Buffer rbuf, OffsetNumber off, XLogR Page lpage = PageGetTempPageCopy(BufferGetPage(lbuf)); Page rpage = BufferGetPage(rbuf); Size pageSize = PageGetPageSize(lpage); + /* these must be static so they can be returned to caller */ static XLogRecData rdata[2]; static ginxlogSplit data; diff --git a/src/backend/access/gin/ginfast.c b/src/backend/access/gin/ginfast.c index 9960c786c94..82419e37acb 100644 --- a/src/backend/access/gin/ginfast.c +++ b/src/backend/access/gin/ginfast.c @@ -88,9 +88,9 @@ writeListPage(Relation index, Buffer buffer, GinPageGetOpaque(page)->rightlink = rightlink; /* - * tail page may contain only whole row(s) or final part of row placed - * on previous pages (a "row" here meaning all the index tuples generated - * for one heap tuple) + * tail page may contain only whole row(s) or final part of row placed on + * previous pages (a "row" here meaning all the index tuples generated for + * one heap tuple) */ if (rightlink == InvalidBlockNumber) { @@ -437,7 +437,7 @@ ginHeapTupleFastInsert(GinState *ginstate, GinTupleCollector *collector) * Create temporary index tuples for a single indexable item (one index column * for the heap tuple specified by ht_ctid), and append them to the array * in *collector. They will subsequently be written out using - * ginHeapTupleFastInsert. Note that to guarantee consistent state, all + * ginHeapTupleFastInsert. Note that to guarantee consistent state, all * temp tuples for a given heap tuple must be written in one call to * ginHeapTupleFastInsert. */ @@ -475,8 +475,8 @@ ginHeapTupleFastCollect(GinState *ginstate, } /* - * Build an index tuple for each key value, and add to array. In - * pending tuples we just stick the heap TID into t_tid. + * Build an index tuple for each key value, and add to array. In pending + * tuples we just stick the heap TID into t_tid. */ for (i = 0; i < nentries; i++) { @@ -665,7 +665,7 @@ processPendingPage(BuildAccumulator *accum, KeyArray *ka, { IndexTuple itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, i)); OffsetNumber curattnum; - Datum curkey; + Datum curkey; GinNullCategory curcategory; /* Check for change of heap TID or attnum */ @@ -830,7 +830,7 @@ ginInsertCleanup(GinState *ginstate, */ ginBeginBAScan(&accum); while ((list = ginGetBAEntry(&accum, - &attnum, &key, &category, &nlist)) != NULL) + &attnum, &key, &category, &nlist)) != NULL) { ginEntryInsert(ginstate, attnum, key, category, list, nlist, NULL); @@ -867,7 +867,7 @@ ginInsertCleanup(GinState *ginstate, ginBeginBAScan(&accum); while ((list = ginGetBAEntry(&accum, - &attnum, &key, &category, &nlist)) != NULL) + &attnum, &key, &category, &nlist)) != NULL) ginEntryInsert(ginstate, attnum, key, category, list, nlist, NULL); } diff --git a/src/backend/access/gin/ginget.c b/src/backend/access/gin/ginget.c index e07dc0a6ce0..a4771654a6d 100644 --- a/src/backend/access/gin/ginget.c +++ b/src/backend/access/gin/ginget.c @@ -40,8 +40,8 @@ static bool callConsistentFn(GinState *ginstate, GinScanKey key) { /* - * If we're dealing with a dummy EVERYTHING key, we don't want to call - * the consistentFn; just claim it matches. + * If we're dealing with a dummy EVERYTHING key, we don't want to call the + * consistentFn; just claim it matches. */ if (key->searchMode == GIN_SEARCH_MODE_EVERYTHING) { @@ -174,14 +174,14 @@ scanPostingTree(Relation index, GinScanEntry scanEntry, /* * Collects TIDs into scanEntry->matchBitmap for all heap tuples that - * match the search entry. This supports three different match modes: + * match the search entry. This supports three different match modes: * * 1. Partial-match support: scan from current point until the - * comparePartialFn says we're done. + * comparePartialFn says we're done. * 2. SEARCH_MODE_ALL: scan from current point (which should be first - * key for the current attnum) until we hit null items or end of attnum + * key for the current attnum) until we hit null items or end of attnum * 3. SEARCH_MODE_EVERYTHING: scan from current point (which should be first - * key for the current attnum) until we hit end of attnum + * key for the current attnum) until we hit end of attnum * * Returns true if done, false if it's necessary to restart scan from scratch */ @@ -189,7 +189,7 @@ static bool collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack, GinScanEntry scanEntry) { - OffsetNumber attnum; + OffsetNumber attnum; Form_pg_attribute attr; /* Initialize empty bitmap result */ @@ -253,8 +253,8 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack, cmp = DatumGetInt32(FunctionCall4(&btree->ginstate->comparePartialFn[attnum - 1], scanEntry->queryKey, idatum, - UInt16GetDatum(scanEntry->strategy), - PointerGetDatum(scanEntry->extra_data))); + UInt16GetDatum(scanEntry->strategy), + PointerGetDatum(scanEntry->extra_data))); if (cmp > 0) return true; @@ -269,7 +269,7 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack, /* * In ALL mode, we are not interested in null items, so we can * stop if we get to a null-item placeholder (which will be the - * last entry for a given attnum). We do want to include NULL_KEY + * last entry for a given attnum). We do want to include NULL_KEY * and EMPTY_ITEM entries, though. */ if (icategory == GIN_CAT_NULL_ITEM) @@ -287,8 +287,8 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack, * We should unlock current page (but not unpin) during tree scan * to prevent deadlock with vacuum processes. * - * We save current entry value (idatum) to be able to re-find - * our tuple after re-locking + * We save current entry value (idatum) to be able to re-find our + * tuple after re-locking */ if (icategory == GIN_CAT_NORM_KEY) idatum = datumCopy(idatum, attr->attbyval, attr->attlen); @@ -442,11 +442,11 @@ restartScanEntry: Page page; /* - * We should unlock entry page before touching posting tree - * to prevent deadlocks with vacuum processes. Because entry is - * never deleted from page and posting tree is never reduced to - * the posting list, we can unlock page after getting BlockNumber - * of root of posting tree. + * We should unlock entry page before touching posting tree to + * prevent deadlocks with vacuum processes. Because entry is never + * deleted from page and posting tree is never reduced to the + * posting list, we can unlock page after getting BlockNumber of + * root of posting tree. */ LockBuffer(stackEntry->buffer, GIN_UNLOCK); needUnlock = FALSE; @@ -596,7 +596,7 @@ entryGetNextItem(GinState *ginstate, GinScanEntry entry) if (!ItemPointerIsValid(&entry->curItem) || ginCompareItemPointers(&entry->curItem, - entry->list + entry->offset - 1) == 0) + entry->list + entry->offset - 1) == 0) { /* * First pages are deleted or empty, or we found exact @@ -656,10 +656,10 @@ entryGetItem(GinState *ginstate, GinScanEntry entry) } /* - * Reset counter to the beginning of entry->matchResult. - * Note: entry->offset is still greater than - * matchResult->ntuples if matchResult is lossy. So, on next - * call we will get next result from TIDBitmap. + * Reset counter to the beginning of entry->matchResult. Note: + * entry->offset is still greater than matchResult->ntuples if + * matchResult is lossy. So, on next call we will get next + * result from TIDBitmap. */ entry->offset = 0; } @@ -745,10 +745,10 @@ keyGetItem(GinState *ginstate, MemoryContext tempCtx, GinScanKey key) /* * Find the minimum of the active entry curItems. * - * Note: a lossy-page entry is encoded by a ItemPointer with max value - * for offset (0xffff), so that it will sort after any exact entries - * for the same page. So we'll prefer to return exact pointers not - * lossy pointers, which is good. + * Note: a lossy-page entry is encoded by a ItemPointer with max value for + * offset (0xffff), so that it will sort after any exact entries for the + * same page. So we'll prefer to return exact pointers not lossy + * pointers, which is good. */ ItemPointerSetMax(&minItem); @@ -782,28 +782,27 @@ keyGetItem(GinState *ginstate, MemoryContext tempCtx, GinScanKey key) /* * Lossy-page entries pose a problem, since we don't know the correct - * entryRes state to pass to the consistentFn, and we also don't know - * what its combining logic will be (could be AND, OR, or even NOT). - * If the logic is OR then the consistentFn might succeed for all - * items in the lossy page even when none of the other entries match. + * entryRes state to pass to the consistentFn, and we also don't know what + * its combining logic will be (could be AND, OR, or even NOT). If the + * logic is OR then the consistentFn might succeed for all items in the + * lossy page even when none of the other entries match. * * If we have a single lossy-page entry then we check to see if the - * consistentFn will succeed with only that entry TRUE. If so, - * we return a lossy-page pointer to indicate that the whole heap - * page must be checked. (On subsequent calls, we'll do nothing until - * minItem is past the page altogether, thus ensuring that we never return - * both regular and lossy pointers for the same page.) + * consistentFn will succeed with only that entry TRUE. If so, we return + * a lossy-page pointer to indicate that the whole heap page must be + * checked. (On subsequent calls, we'll do nothing until minItem is past + * the page altogether, thus ensuring that we never return both regular + * and lossy pointers for the same page.) * - * This idea could be generalized to more than one lossy-page entry, - * but ideally lossy-page entries should be infrequent so it would - * seldom be the case that we have more than one at once. So it - * doesn't seem worth the extra complexity to optimize that case. - * If we do find more than one, we just punt and return a lossy-page - * pointer always. + * This idea could be generalized to more than one lossy-page entry, but + * ideally lossy-page entries should be infrequent so it would seldom be + * the case that we have more than one at once. So it doesn't seem worth + * the extra complexity to optimize that case. If we do find more than + * one, we just punt and return a lossy-page pointer always. * - * Note that only lossy-page entries pointing to the current item's - * page should trigger this processing; we might have future lossy - * pages in the entry array, but they aren't relevant yet. + * Note that only lossy-page entries pointing to the current item's page + * should trigger this processing; we might have future lossy pages in the + * entry array, but they aren't relevant yet. */ ItemPointerSetLossyPage(&curPageLossy, GinItemPointerGetBlockNumber(&key->curItem)); @@ -853,15 +852,14 @@ keyGetItem(GinState *ginstate, MemoryContext tempCtx, GinScanKey key) } /* - * At this point we know that we don't need to return a lossy - * whole-page pointer, but we might have matches for individual exact - * item pointers, possibly in combination with a lossy pointer. Our - * strategy if there's a lossy pointer is to try the consistentFn both - * ways and return a hit if it accepts either one (forcing the hit to - * be marked lossy so it will be rechecked). An exception is that - * we don't need to try it both ways if the lossy pointer is in a - * "hidden" entry, because the consistentFn's result can't depend on - * that. + * At this point we know that we don't need to return a lossy whole-page + * pointer, but we might have matches for individual exact item pointers, + * possibly in combination with a lossy pointer. Our strategy if there's + * a lossy pointer is to try the consistentFn both ways and return a hit + * if it accepts either one (forcing the hit to be marked lossy so it will + * be rechecked). An exception is that we don't need to try it both ways + * if the lossy pointer is in a "hidden" entry, because the consistentFn's + * result can't depend on that. * * Prepare entryRes array to be passed to consistentFn. */ @@ -960,7 +958,7 @@ scanGetItem(IndexScanDesc scan, ItemPointer advancePast, keyGetItem(&so->ginstate, so->tempCtx, key); if (key->isFinished) - return false; /* finished one of keys */ + return false; /* finished one of keys */ if (ginCompareItemPointers(&key->curItem, item) < 0) *item = key->curItem; @@ -975,7 +973,7 @@ scanGetItem(IndexScanDesc scan, ItemPointer advancePast, * that exact TID, or a lossy reference to the same page. * * This logic works only if a keyGetItem stream can never contain both - * exact and lossy pointers for the same page. Else we could have a + * exact and lossy pointers for the same page. Else we could have a * case like * * stream 1 stream 2 @@ -1011,8 +1009,8 @@ scanGetItem(IndexScanDesc scan, ItemPointer advancePast, break; /* - * No hit. Update myAdvancePast to this TID, so that on the next - * pass we'll move to the next possible entry. + * No hit. Update myAdvancePast to this TID, so that on the next pass + * we'll move to the next possible entry. */ myAdvancePast = *item; } @@ -1118,8 +1116,8 @@ scanGetCandidate(IndexScanDesc scan, pendingPosition *pos) /* * Now pos->firstOffset points to the first tuple of current heap - * row, pos->lastOffset points to the first tuple of next heap - * row (or to the end of page) + * row, pos->lastOffset points to the first tuple of next heap row + * (or to the end of page) */ break; } @@ -1181,7 +1179,7 @@ matchPartialInPendingList(GinState *ginstate, Page page, entry->queryKey, datum[off - 1], UInt16GetDatum(entry->strategy), - PointerGetDatum(entry->extra_data))); + PointerGetDatum(entry->extra_data))); if (cmp == 0) return true; else if (cmp > 0) @@ -1227,8 +1225,8 @@ collectMatchesForHeapRow(IndexScanDesc scan, pendingPosition *pos) memset(pos->hasMatchKey, FALSE, so->nkeys); /* - * Outer loop iterates over multiple pending-list pages when a single - * heap row has entries spanning those pages. + * Outer loop iterates over multiple pending-list pages when a single heap + * row has entries spanning those pages. */ for (;;) { @@ -1322,11 +1320,11 @@ collectMatchesForHeapRow(IndexScanDesc scan, pendingPosition *pos) if (res == 0) { /* - * Found exact match (there can be only one, except - * in EMPTY_QUERY mode). + * Found exact match (there can be only one, except in + * EMPTY_QUERY mode). * - * If doing partial match, scan forward from - * here to end of page to check for matches. + * If doing partial match, scan forward from here to + * end of page to check for matches. * * See comment above about tuple's ordering. */ @@ -1355,13 +1353,12 @@ collectMatchesForHeapRow(IndexScanDesc scan, pendingPosition *pos) if (StopLow >= StopHigh && entry->isPartialMatch) { /* - * No exact match on this page. If doing partial - * match, scan from the first tuple greater than - * target value to end of page. Note that since we - * don't remember whether the comparePartialFn told us - * to stop early on a previous page, we will uselessly - * apply comparePartialFn to the first tuple on each - * subsequent page. + * No exact match on this page. If doing partial match, + * scan from the first tuple greater than target value to + * end of page. Note that since we don't remember whether + * the comparePartialFn told us to stop early on a + * previous page, we will uselessly apply comparePartialFn + * to the first tuple on each subsequent page. */ key->entryRes[j] = matchPartialInPendingList(&so->ginstate, diff --git a/src/backend/access/gin/gininsert.c b/src/backend/access/gin/gininsert.c index af5068906fb..3e32af94a96 100644 --- a/src/backend/access/gin/gininsert.c +++ b/src/backend/access/gin/gininsert.c @@ -97,7 +97,7 @@ createPostingTree(Relation index, ItemPointerData *items, uint32 nitems) * Adds array of item pointers to tuple's posting list, or * creates posting tree and tuple pointing to tree in case * of not enough space. Max size of tuple is defined in - * GinFormTuple(). Returns a new, modified index tuple. + * GinFormTuple(). Returns a new, modified index tuple. * items[] must be in sorted order with no duplicates. */ static IndexTuple @@ -195,14 +195,14 @@ buildFreshLeafTuple(GinState *ginstate, BlockNumber postingRoot; /* - * Build posting-tree-only result tuple. We do this first so as - * to fail quickly if the key is too big. + * Build posting-tree-only result tuple. We do this first so as to + * fail quickly if the key is too big. */ res = GinFormTuple(ginstate, attnum, key, category, NULL, 0, true); /* - * Initialize posting tree with as many TIDs as will fit on the - * first page. + * Initialize posting tree with as many TIDs as will fit on the first + * page. */ postingRoot = createPostingTree(ginstate->index, items, @@ -361,7 +361,7 @@ ginBuildCallback(Relation index, HeapTuple htup, Datum *values, ginBeginBAScan(&buildstate->accum); while ((list = ginGetBAEntry(&buildstate->accum, - &attnum, &key, &category, &nlist)) != NULL) + &attnum, &key, &category, &nlist)) != NULL) { /* there could be many entries, so be willing to abort here */ CHECK_FOR_INTERRUPTS(); diff --git a/src/backend/access/gin/ginscan.c b/src/backend/access/gin/ginscan.c index 25f60e15a0d..37b08c0df62 100644 --- a/src/backend/access/gin/ginscan.c +++ b/src/backend/access/gin/ginscan.c @@ -199,7 +199,7 @@ ginFillScanKey(GinScanOpaque so, OffsetNumber attnum, break; default: elog(ERROR, "unexpected searchMode: %d", searchMode); - queryCategory = 0; /* keep compiler quiet */ + queryCategory = 0; /* keep compiler quiet */ break; } isPartialMatch = false; @@ -294,8 +294,8 @@ ginNewScanKey(IndexScanDesc scan) int32 searchMode = GIN_SEARCH_MODE_DEFAULT; /* - * We assume that GIN-indexable operators are strict, so a null - * query argument means an unsatisfiable query. + * We assume that GIN-indexable operators are strict, so a null query + * argument means an unsatisfiable query. */ if (skey->sk_flags & SK_ISNULL) { @@ -315,8 +315,8 @@ ginNewScanKey(IndexScanDesc scan) PointerGetDatum(&searchMode))); /* - * If bogus searchMode is returned, treat as GIN_SEARCH_MODE_ALL; - * note in particular we don't allow extractQueryFn to select + * If bogus searchMode is returned, treat as GIN_SEARCH_MODE_ALL; note + * in particular we don't allow extractQueryFn to select * GIN_SEARCH_MODE_EVERYTHING. */ if (searchMode < GIN_SEARCH_MODE_DEFAULT || @@ -344,20 +344,20 @@ ginNewScanKey(IndexScanDesc scan) * If the extractQueryFn didn't create a nullFlags array, create one, * assuming that everything's non-null. Otherwise, run through the * array and make sure each value is exactly 0 or 1; this ensures - * binary compatibility with the GinNullCategory representation. - * While at it, detect whether any null keys are present. + * binary compatibility with the GinNullCategory representation. While + * at it, detect whether any null keys are present. */ if (nullFlags == NULL) nullFlags = (bool *) palloc0(nQueryValues * sizeof(bool)); else { - int32 j; + int32 j; for (j = 0; j < nQueryValues; j++) { if (nullFlags[j]) { - nullFlags[j] = true; /* not any other nonzero value */ + nullFlags[j] = true; /* not any other nonzero value */ hasNullQuery = true; } } @@ -387,11 +387,11 @@ ginNewScanKey(IndexScanDesc scan) /* * If the index is version 0, it may be missing null and placeholder * entries, which would render searches for nulls and full-index scans - * unreliable. Throw an error if so. + * unreliable. Throw an error if so. */ if (hasNullQuery && !so->isVoidRes) { - GinStatsData ginStats; + GinStatsData ginStats; ginGetStats(scan->indexRelation, &ginStats); if (ginStats.ginVersion < 1) @@ -410,6 +410,7 @@ ginrescan(PG_FUNCTION_ARGS) { IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0); ScanKey scankey = (ScanKey) PG_GETARG_POINTER(1); + /* remaining arguments are ignored */ GinScanOpaque so = (GinScanOpaque) scan->opaque; diff --git a/src/backend/access/gin/ginutil.c b/src/backend/access/gin/ginutil.c index 392c12d47ab..716cf3a7348 100644 --- a/src/backend/access/gin/ginutil.c +++ b/src/backend/access/gin/ginutil.c @@ -70,7 +70,7 @@ initGinState(GinState *state, Relation index) * However, we may have a collatable storage type for a noncollatable * indexed data type (for instance, hstore uses text index entries). * If there's no index collation then specify default collation in - * case the comparison function needs one. This is harmless if the + * case the comparison function needs one. This is harmless if the * comparison function doesn't care about collation, so we just do it * unconditionally. (We could alternatively call get_typcollation, * but that seems like expensive overkill --- there aren't going to be @@ -359,9 +359,9 @@ cmpEntries(const void *a, const void *b, void *arg) aa->datum, bb->datum)); /* - * Detect if we have any duplicates. If there are equal keys, qsort - * must compare them at some point, else it wouldn't know whether one - * should go before or after the other. + * Detect if we have any duplicates. If there are equal keys, qsort must + * compare them at some point, else it wouldn't know whether one should go + * before or after the other. */ if (res == 0) data->haveDups = true; @@ -422,9 +422,9 @@ ginExtractEntries(GinState *ginstate, OffsetNumber attnum, /* * If the extractValueFn didn't create a nullFlags array, create one, - * assuming that everything's non-null. Otherwise, run through the - * array and make sure each value is exactly 0 or 1; this ensures - * binary compatibility with the GinNullCategory representation. + * assuming that everything's non-null. Otherwise, run through the array + * and make sure each value is exactly 0 or 1; this ensures binary + * compatibility with the GinNullCategory representation. */ if (nullFlags == NULL) nullFlags = (bool *) palloc0(*nentries * sizeof(bool)); @@ -440,8 +440,8 @@ ginExtractEntries(GinState *ginstate, OffsetNumber attnum, * If there's more than one key, sort and unique-ify. * * XXX Using qsort here is notationally painful, and the overhead is - * pretty bad too. For small numbers of keys it'd likely be better to - * use a simple insertion sort. + * pretty bad too. For small numbers of keys it'd likely be better to use + * a simple insertion sort. */ if (*nentries > 1) { @@ -470,7 +470,7 @@ ginExtractEntries(GinState *ginstate, OffsetNumber attnum, j = 1; for (i = 1; i < *nentries; i++) { - if (cmpEntries(&keydata[i-1], &keydata[i], &arg) != 0) + if (cmpEntries(&keydata[i - 1], &keydata[i], &arg) != 0) { entries[j] = keydata[i].datum; nullFlags[j] = keydata[i].isnull; @@ -533,9 +533,9 @@ ginoptions(PG_FUNCTION_ARGS) void ginGetStats(Relation index, GinStatsData *stats) { - Buffer metabuffer; - Page metapage; - GinMetaPageData *metadata; + Buffer metabuffer; + Page metapage; + GinMetaPageData *metadata; metabuffer = ReadBuffer(index, GIN_METAPAGE_BLKNO); LockBuffer(metabuffer, GIN_SHARE); @@ -560,9 +560,9 @@ ginGetStats(Relation index, GinStatsData *stats) void ginUpdateStats(Relation index, const GinStatsData *stats) { - Buffer metabuffer; - Page metapage; - GinMetaPageData *metadata; + Buffer metabuffer; + Page metapage; + GinMetaPageData *metadata; metabuffer = ReadBuffer(index, GIN_METAPAGE_BLKNO); LockBuffer(metabuffer, GIN_EXCLUSIVE); @@ -580,9 +580,9 @@ ginUpdateStats(Relation index, const GinStatsData *stats) if (RelationNeedsWAL(index)) { - XLogRecPtr recptr; - ginxlogUpdateMeta data; - XLogRecData rdata; + XLogRecPtr recptr; + ginxlogUpdateMeta data; + XLogRecData rdata; data.node = index->rd_node; data.ntuples = 0; diff --git a/src/backend/access/gin/ginvacuum.c b/src/backend/access/gin/ginvacuum.c index 41ad382df0c..79c54f16b8d 100644 --- a/src/backend/access/gin/ginvacuum.c +++ b/src/backend/access/gin/ginvacuum.c @@ -783,7 +783,7 @@ ginvacuumcleanup(PG_FUNCTION_ARGS) { idxStat.nEntryPages++; - if ( GinPageIsLeaf(page) ) + if (GinPageIsLeaf(page)) idxStat.nEntries += PageGetMaxOffsetNumber(page); } diff --git a/src/backend/access/gin/ginxlog.c b/src/backend/access/gin/ginxlog.c index e410959b851..c954bcb12fc 100644 --- a/src/backend/access/gin/ginxlog.c +++ b/src/backend/access/gin/ginxlog.c @@ -388,7 +388,7 @@ ginRedoVacuumPage(XLogRecPtr lsn, XLogRecord *record) else { OffsetNumber i, - *tod; + *tod; IndexTuple itup = (IndexTuple) (XLogRecGetData(record) + sizeof(ginxlogVacuumPage)); tod = (OffsetNumber *) palloc(sizeof(OffsetNumber) * PageGetMaxOffsetNumber(page)); @@ -513,10 +513,10 @@ ginRedoUpdateMetapage(XLogRecPtr lsn, XLogRecord *record) if (!XLByteLE(lsn, PageGetLSN(page))) { OffsetNumber l, - off = (PageIsEmpty(page)) ? FirstOffsetNumber : - OffsetNumberNext(PageGetMaxOffsetNumber(page)); + off = (PageIsEmpty(page)) ? FirstOffsetNumber : + OffsetNumberNext(PageGetMaxOffsetNumber(page)); int i, - tupsize; + tupsize; IndexTuple tuples = (IndexTuple) (XLogRecGetData(record) + sizeof(ginxlogUpdateMeta)); for (i = 0; i < data->ntuples; i++) diff --git a/src/backend/access/gist/gist.c b/src/backend/access/gist/gist.c index 9529413e80e..fae3464600a 100644 --- a/src/backend/access/gist/gist.c +++ b/src/backend/access/gist/gist.c @@ -34,8 +34,8 @@ typedef struct /* A List of these is used represent a split-in-progress. */ typedef struct { - Buffer buf; /* the split page "half" */ - IndexTuple downlink; /* downlink for this half. */ + Buffer buf; /* the split page "half" */ + IndexTuple downlink; /* downlink for this half. */ } GISTPageSplitInfo; /* non-export function prototypes */ @@ -306,13 +306,13 @@ gistplacetopage(GISTInsertState *state, GISTSTATE *giststate, bool is_split; /* - * Refuse to modify a page that's incompletely split. This should - * not happen because we finish any incomplete splits while we walk - * down the tree. However, it's remotely possible that another - * concurrent inserter splits a parent page, and errors out before - * completing the split. We will just throw an error in that case, - * and leave any split we had in progress unfinished too. The next - * insert that comes along will clean up the mess. + * Refuse to modify a page that's incompletely split. This should not + * happen because we finish any incomplete splits while we walk down the + * tree. However, it's remotely possible that another concurrent inserter + * splits a parent page, and errors out before completing the split. We + * will just throw an error in that case, and leave any split we had in + * progress unfinished too. The next insert that comes along will clean up + * the mess. */ if (GistFollowRight(page)) elog(ERROR, "concurrent GiST page split was incomplete"); @@ -338,7 +338,7 @@ gistplacetopage(GISTInsertState *state, GISTSTATE *giststate, SplitedPageLayout *dist = NULL, *ptr; BlockNumber oldrlink = InvalidBlockNumber; - GistNSN oldnsn = { 0, 0 }; + GistNSN oldnsn = {0, 0}; SplitedPageLayout rootpg; BlockNumber blkno = BufferGetBlockNumber(buffer); bool is_rootsplit; @@ -364,8 +364,8 @@ gistplacetopage(GISTInsertState *state, GISTSTATE *giststate, /* * Set up pages to work with. Allocate new buffers for all but the - * leftmost page. The original page becomes the new leftmost page, - * and is just replaced with the new contents. + * leftmost page. The original page becomes the new leftmost page, and + * is just replaced with the new contents. * * For a root-split, allocate new buffers for all child pages, the * original page is overwritten with new root page containing @@ -414,8 +414,8 @@ gistplacetopage(GISTInsertState *state, GISTSTATE *giststate, if (is_rootsplit) { IndexTuple *downlinks; - int ndownlinks = 0; - int i; + int ndownlinks = 0; + int i; rootpg.buffer = buffer; rootpg.page = PageGetTempPageCopySpecial(BufferGetPage(rootpg.buffer)); @@ -443,6 +443,7 @@ gistplacetopage(GISTInsertState *state, GISTSTATE *giststate, for (ptr = dist; ptr; ptr = ptr->next) { GISTPageSplitInfo *si = palloc(sizeof(GISTPageSplitInfo)); + si->buf = ptr->buffer; si->downlink = ptr->itup; *splitinfo = lappend(*splitinfo, si); @@ -455,7 +456,8 @@ gistplacetopage(GISTInsertState *state, GISTSTATE *giststate, */ for (ptr = dist; ptr; ptr = ptr->next) { - char *data = (char *) (ptr->list); + char *data = (char *) (ptr->list); + for (i = 0; i < ptr->block.num; i++) { if (PageAddItem(ptr->page, (Item) data, IndexTupleSize((IndexTuple) data), i + FirstOffsetNumber, false, false) == InvalidOffsetNumber) @@ -495,8 +497,8 @@ gistplacetopage(GISTInsertState *state, GISTSTATE *giststate, MarkBufferDirty(leftchildbuf); /* - * The first page in the chain was a temporary working copy meant - * to replace the old page. Copy it over the old page. + * The first page in the chain was a temporary working copy meant to + * replace the old page. Copy it over the old page. */ PageRestoreTempPage(dist->page, BufferGetPage(dist->buffer)); dist->page = BufferGetPage(dist->buffer); @@ -518,8 +520,8 @@ gistplacetopage(GISTInsertState *state, GISTSTATE *giststate, * Return the new child buffers to the caller. * * If this was a root split, we've already inserted the downlink - * pointers, in the form of a new root page. Therefore we can - * release all the new buffers, and keep just the root page locked. + * pointers, in the form of a new root page. Therefore we can release + * all the new buffers, and keep just the root page locked. */ if (is_rootsplit) { @@ -572,20 +574,20 @@ gistplacetopage(GISTInsertState *state, GISTSTATE *giststate, /* * If we inserted the downlink for a child page, set NSN and clear - * F_FOLLOW_RIGHT flag on the left child, so that concurrent scans know - * to follow the rightlink if and only if they looked at the parent page + * F_FOLLOW_RIGHT flag on the left child, so that concurrent scans know to + * follow the rightlink if and only if they looked at the parent page * before we inserted the downlink. * * Note that we do this *after* writing the WAL record. That means that - * the possible full page image in the WAL record does not include - * these changes, and they must be replayed even if the page is restored - * from the full page image. There's a chicken-and-egg problem: if we - * updated the child pages first, we wouldn't know the recptr of the WAL - * record we're about to write. + * the possible full page image in the WAL record does not include these + * changes, and they must be replayed even if the page is restored from + * the full page image. There's a chicken-and-egg problem: if we updated + * the child pages first, we wouldn't know the recptr of the WAL record + * we're about to write. */ if (BufferIsValid(leftchildbuf)) { - Page leftpg = BufferGetPage(leftchildbuf); + Page leftpg = BufferGetPage(leftchildbuf); GistPageGetOpaque(leftpg)->nsn = recptr; GistClearFollowRight(leftpg); @@ -636,8 +638,8 @@ gistdoinsert(Relation r, IndexTuple itup, Size freespace, GISTSTATE *giststate) stack->buffer = ReadBuffer(state.r, stack->blkno); /* - * Be optimistic and grab shared lock first. Swap it for an - * exclusive lock later if we need to update the page. + * Be optimistic and grab shared lock first. Swap it for an exclusive + * lock later if we need to update the page. */ if (!xlocked) { @@ -650,9 +652,9 @@ gistdoinsert(Relation r, IndexTuple itup, Size freespace, GISTSTATE *giststate) Assert(!RelationNeedsWAL(state.r) || !XLogRecPtrIsInvalid(stack->lsn)); /* - * If this page was split but the downlink was never inserted to - * the parent because the inserting backend crashed before doing - * that, fix that now. + * If this page was split but the downlink was never inserted to the + * parent because the inserting backend crashed before doing that, fix + * that now. */ if (GistFollowRight(stack->page)) { @@ -680,8 +682,8 @@ gistdoinsert(Relation r, IndexTuple itup, Size freespace, GISTSTATE *giststate) /* * Concurrent split detected. There's no guarantee that the * downlink for this page is consistent with the tuple we're - * inserting anymore, so go back to parent and rechoose the - * best child. + * inserting anymore, so go back to parent and rechoose the best + * child. */ UnlockReleaseBuffer(stack->buffer); xlocked = false; @@ -696,7 +698,7 @@ gistdoinsert(Relation r, IndexTuple itup, Size freespace, GISTSTATE *giststate) * Find the child node that has the minimum insertion penalty. */ BlockNumber childblkno; - IndexTuple newtup; + IndexTuple newtup; GISTInsertStack *item; stack->childoffnum = gistchoose(state.r, stack->page, itup, giststate); @@ -722,8 +724,8 @@ gistdoinsert(Relation r, IndexTuple itup, Size freespace, GISTSTATE *giststate) if (newtup) { /* - * Swap shared lock for an exclusive one. Beware, the page - * may change while we unlock/lock the page... + * Swap shared lock for an exclusive one. Beware, the page may + * change while we unlock/lock the page... */ if (!xlocked) { @@ -738,6 +740,7 @@ gistdoinsert(Relation r, IndexTuple itup, Size freespace, GISTSTATE *giststate) continue; } } + /* * Update the tuple. * @@ -752,8 +755,8 @@ gistdoinsert(Relation r, IndexTuple itup, Size freespace, GISTSTATE *giststate) stack->childoffnum, InvalidBuffer)) { /* - * If this was a root split, the root page continues to - * be the parent and the updated tuple went to one of the + * If this was a root split, the root page continues to be + * the parent and the updated tuple went to one of the * child pages, so we just need to retry from the root * page. */ @@ -779,13 +782,13 @@ gistdoinsert(Relation r, IndexTuple itup, Size freespace, GISTSTATE *giststate) { /* * Leaf page. Insert the new key. We've already updated all the - * parents on the way down, but we might have to split the page - * if it doesn't fit. gistinserthere() will take care of that. + * parents on the way down, but we might have to split the page if + * it doesn't fit. gistinserthere() will take care of that. */ /* - * Swap shared lock for an exclusive one. Be careful, the page - * may change while we unlock/lock the page... + * Swap shared lock for an exclusive one. Be careful, the page may + * change while we unlock/lock the page... */ if (!xlocked) { @@ -798,8 +801,8 @@ gistdoinsert(Relation r, IndexTuple itup, Size freespace, GISTSTATE *giststate) if (stack->blkno == GIST_ROOT_BLKNO) { /* - * the only page that can become inner instead of leaf - * is the root page, so for root we should recheck it + * the only page that can become inner instead of leaf is + * the root page, so for root we should recheck it */ if (!GistPageIsLeaf(stack->page)) { @@ -1059,21 +1062,23 @@ static IndexTuple gistformdownlink(Relation rel, Buffer buf, GISTSTATE *giststate, GISTInsertStack *stack) { - Page page = BufferGetPage(buf); + Page page = BufferGetPage(buf); OffsetNumber maxoff; OffsetNumber offset; - IndexTuple downlink = NULL; + IndexTuple downlink = NULL; maxoff = PageGetMaxOffsetNumber(page); for (offset = FirstOffsetNumber; offset <= maxoff; offset = OffsetNumberNext(offset)) { IndexTuple ituple = (IndexTuple) - PageGetItem(page, PageGetItemId(page, offset)); + PageGetItem(page, PageGetItemId(page, offset)); + if (downlink == NULL) downlink = CopyIndexTuple(ituple); else { - IndexTuple newdownlink; + IndexTuple newdownlink; + newdownlink = gistgetadjusted(rel, downlink, ituple, giststate); if (newdownlink) @@ -1082,19 +1087,18 @@ gistformdownlink(Relation rel, Buffer buf, GISTSTATE *giststate, } /* - * If the page is completely empty, we can't form a meaningful - * downlink for it. But we have to insert a downlink for the page. - * Any key will do, as long as its consistent with the downlink of - * parent page, so that we can legally insert it to the parent. - * A minimal one that matches as few scans as possible would be best, - * to keep scans from doing useless work, but we don't know how to - * construct that. So we just use the downlink of the original page - * that was split - that's as far from optimal as it can get but will - * do.. + * If the page is completely empty, we can't form a meaningful downlink + * for it. But we have to insert a downlink for the page. Any key will do, + * as long as its consistent with the downlink of parent page, so that we + * can legally insert it to the parent. A minimal one that matches as few + * scans as possible would be best, to keep scans from doing useless work, + * but we don't know how to construct that. So we just use the downlink of + * the original page that was split - that's as far from optimal as it can + * get but will do.. */ if (!downlink) { - ItemId iid; + ItemId iid; LockBuffer(stack->parent->buffer, GIST_EXCLUSIVE); gistFindCorrectParent(rel, stack); @@ -1131,13 +1135,13 @@ gistfixsplit(GISTInsertState *state, GISTSTATE *giststate) buf = stack->buffer; /* - * Read the chain of split pages, following the rightlinks. Construct - * a downlink tuple for each page. + * Read the chain of split pages, following the rightlinks. Construct a + * downlink tuple for each page. */ for (;;) { GISTPageSplitInfo *si = palloc(sizeof(GISTPageSplitInfo)); - IndexTuple downlink; + IndexTuple downlink; page = BufferGetPage(buf); @@ -1182,8 +1186,8 @@ gistinserttuples(GISTInsertState *state, GISTInsertStack *stack, IndexTuple *tuples, int ntup, OffsetNumber oldoffnum, Buffer leftchild) { - List *splitinfo; - bool is_split; + List *splitinfo; + bool is_split; is_split = gistplacetopage(state, giststate, stack->buffer, tuples, ntup, oldoffnum, @@ -1204,21 +1208,21 @@ static void gistfinishsplit(GISTInsertState *state, GISTInsertStack *stack, GISTSTATE *giststate, List *splitinfo) { - ListCell *lc; - List *reversed; + ListCell *lc; + List *reversed; GISTPageSplitInfo *right; GISTPageSplitInfo *left; - IndexTuple tuples[2]; + IndexTuple tuples[2]; /* A split always contains at least two halves */ Assert(list_length(splitinfo) >= 2); /* - * We need to insert downlinks for each new page, and update the - * downlink for the original (leftmost) page in the split. Begin at - * the rightmost page, inserting one downlink at a time until there's - * only two pages left. Finally insert the downlink for the last new - * page and update the downlink for the original page as one operation. + * We need to insert downlinks for each new page, and update the downlink + * for the original (leftmost) page in the split. Begin at the rightmost + * page, inserting one downlink at a time until there's only two pages + * left. Finally insert the downlink for the last new page and update the + * downlink for the original page as one operation. */ /* for convenience, create a copy of the list in reverse order */ @@ -1231,7 +1235,7 @@ gistfinishsplit(GISTInsertState *state, GISTInsertStack *stack, LockBuffer(stack->parent->buffer, GIST_EXCLUSIVE); gistFindCorrectParent(state->r, stack); - while(list_length(reversed) > 2) + while (list_length(reversed) > 2) { right = (GISTPageSplitInfo *) linitial(reversed); left = (GISTPageSplitInfo *) lsecond(reversed); @@ -1386,7 +1390,7 @@ initGISTstate(GISTSTATE *giststate, Relation index) /* opclasses are not required to provide a Distance method */ if (OidIsValid(index_getprocid(index, i + 1, GIST_DISTANCE_PROC))) fmgr_info_copy(&(giststate->distanceFn[i]), - index_getprocinfo(index, i + 1, GIST_DISTANCE_PROC), + index_getprocinfo(index, i + 1, GIST_DISTANCE_PROC), CurrentMemoryContext); else giststate->distanceFn[i].fn_oid = InvalidOid; diff --git a/src/backend/access/gist/gistget.c b/src/backend/access/gist/gistget.c index 8355081553d..e4488a925de 100644 --- a/src/backend/access/gist/gistget.c +++ b/src/backend/access/gist/gistget.c @@ -32,7 +32,7 @@ * * On success return for a heap tuple, *recheck_p is set to indicate * whether recheck is needed. We recheck if any of the consistent() functions - * request it. recheck is not interesting when examining a non-leaf entry, + * request it. recheck is not interesting when examining a non-leaf entry, * since we must visit the lower index page if there's any doubt. * * If we are doing an ordered scan, so->distances[] is filled with distance @@ -62,15 +62,15 @@ gistindex_keytest(IndexScanDesc scan, *recheck_p = false; /* - * If it's a leftover invalid tuple from pre-9.1, treat it as a match - * with minimum possible distances. This means we'll always follow it - * to the referenced page. + * If it's a leftover invalid tuple from pre-9.1, treat it as a match with + * minimum possible distances. This means we'll always follow it to the + * referenced page. */ if (GistTupleIsInvalid(tuple)) { - int i; + int i; - if (GistPageIsLeaf(page)) /* shouldn't happen */ + if (GistPageIsLeaf(page)) /* shouldn't happen */ elog(ERROR, "invalid GIST tuple found on leaf page"); for (i = 0; i < scan->numberOfOrderBys; i++) so->distances[i] = -get_float8_infinity(); @@ -191,8 +191,8 @@ gistindex_keytest(IndexScanDesc scan, * always be zero, but might as well pass it for possible future * use.) * - * Note that Distance functions don't get a recheck argument. - * We can't tolerate lossy distance calculations on leaf tuples; + * Note that Distance functions don't get a recheck argument. We + * can't tolerate lossy distance calculations on leaf tuples; * there is no opportunity to re-sort the tuples afterwards. */ dist = FunctionCall4(&key->sk_func, @@ -223,7 +223,7 @@ gistindex_keytest(IndexScanDesc scan, * ntids: if not NULL, gistgetbitmap's output tuple counter * * If tbm/ntids aren't NULL, we are doing an amgetbitmap scan, and heap - * tuples should be reported directly into the bitmap. If they are NULL, + * tuples should be reported directly into the bitmap. If they are NULL, * we're doing a plain or ordered indexscan. For a plain indexscan, heap * tuple TIDs are returned into so->pageData[]. For an ordered indexscan, * heap tuple TIDs are pushed into individual search queue items. @@ -525,8 +525,8 @@ gistgettuple(PG_FUNCTION_ARGS) /* * While scanning a leaf page, ItemPointers of matching heap * tuples are stored in so->pageData. If there are any on - * this page, we fall out of the inner "do" and loop around - * to return them. + * this page, we fall out of the inner "do" and loop around to + * return them. */ gistScanPage(scan, item, so->curTreeItem->distances, NULL, NULL); diff --git a/src/backend/access/gist/gistproc.c b/src/backend/access/gist/gistproc.c index 86a5d90f955..43c4b1251b1 100644 --- a/src/backend/access/gist/gistproc.c +++ b/src/backend/access/gist/gistproc.c @@ -904,7 +904,7 @@ gist_point_compress(PG_FUNCTION_ARGS) PG_RETURN_POINTER(entry); } -#define point_point_distance(p1,p2) \ +#define point_point_distance(p1,p2) \ DatumGetFloat8(DirectFunctionCall2(point_distance, \ PointPGetDatum(p1), PointPGetDatum(p2))) @@ -949,8 +949,8 @@ computeDistance(bool isLeaf, BOX *box, Point *point) else { /* closest point will be a vertex */ - Point p; - double subresult; + Point p; + double subresult; result = point_point_distance(point, &box->low); diff --git a/src/backend/access/gist/gistscan.c b/src/backend/access/gist/gistscan.c index 0a125e772d0..67308ed37e5 100644 --- a/src/backend/access/gist/gistscan.c +++ b/src/backend/access/gist/gistscan.c @@ -57,9 +57,9 @@ GISTSearchTreeItemCombiner(RBNode *existing, const RBNode *newrb, void *arg) /* * If new item is heap tuple, it goes to front of chain; otherwise insert - * it before the first index-page item, so that index pages are visited - * in LIFO order, ensuring depth-first search of index pages. See - * comments in gist_private.h. + * it before the first index-page item, so that index pages are visited in + * LIFO order, ensuring depth-first search of index pages. See comments + * in gist_private.h. */ if (GISTSearchItemIsHeap(*newitem)) { @@ -136,6 +136,7 @@ gistrescan(PG_FUNCTION_ARGS) IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0); ScanKey key = (ScanKey) PG_GETARG_POINTER(1); ScanKey orderbys = (ScanKey) PG_GETARG_POINTER(3); + /* nkeys and norderbys arguments are ignored */ GISTScanOpaque so = (GISTScanOpaque) scan->opaque; int i; @@ -164,8 +165,8 @@ gistrescan(PG_FUNCTION_ARGS) scan->numberOfKeys * sizeof(ScanKeyData)); /* - * Modify the scan key so that the Consistent method is called for - * all comparisons. The original operator is passed to the Consistent + * Modify the scan key so that the Consistent method is called for all + * comparisons. The original operator is passed to the Consistent * function in the form of its strategy number, which is available * from the sk_strategy field, and its subtype from the sk_subtype * field. Also, preserve sk_func.fn_collation which is the input diff --git a/src/backend/access/gist/gistutil.c b/src/backend/access/gist/gistutil.c index 6736fd166c3..e8bbd564c71 100644 --- a/src/backend/access/gist/gistutil.c +++ b/src/backend/access/gist/gistutil.c @@ -503,11 +503,12 @@ gistFormTuple(GISTSTATE *giststate, Relation r, } res = index_form_tuple(giststate->tupdesc, compatt, isnull); + /* * The offset number on tuples on internal pages is unused. For historical * reasons, it is set 0xffff. */ - ItemPointerSetOffsetNumber( &(res->t_tid), 0xffff); + ItemPointerSetOffsetNumber(&(res->t_tid), 0xffff); return res; } diff --git a/src/backend/access/gist/gistxlog.c b/src/backend/access/gist/gistxlog.c index 0f406e16c4e..51354c1c185 100644 --- a/src/backend/access/gist/gistxlog.c +++ b/src/backend/access/gist/gistxlog.c @@ -41,12 +41,12 @@ static void gistRedoClearFollowRight(RelFileNode node, XLogRecPtr lsn, BlockNumber leftblkno) { - Buffer buffer; + Buffer buffer; buffer = XLogReadBuffer(node, leftblkno, false); if (BufferIsValid(buffer)) { - Page page = (Page) BufferGetPage(buffer); + Page page = (Page) BufferGetPage(buffer); /* * Note that we still update the page even if page LSN is equal to the @@ -103,6 +103,7 @@ gistRedoPageUpdateRecord(XLogRecPtr lsn, XLogRecord *record) { int i; OffsetNumber *todelete = (OffsetNumber *) data; + data += sizeof(OffsetNumber) * xldata->ntodelete; for (i = 0; i < xldata->ntodelete; i++) @@ -115,12 +116,14 @@ gistRedoPageUpdateRecord(XLogRecPtr lsn, XLogRecord *record) if (data - begin < record->xl_len) { OffsetNumber off = (PageIsEmpty(page)) ? FirstOffsetNumber : - OffsetNumberNext(PageGetMaxOffsetNumber(page)); + OffsetNumberNext(PageGetMaxOffsetNumber(page)); + while (data - begin < record->xl_len) { - IndexTuple itup = (IndexTuple) data; + IndexTuple itup = (IndexTuple) data; Size sz = IndexTupleSize(itup); OffsetNumber l; + data += sz; l = PageAddItem(page, (Item) itup, sz, off, false, false); @@ -418,7 +421,7 @@ gistXLogSplit(RelFileNode node, BlockNumber blkno, bool page_is_leaf, SplitedPageLayout *ptr; int npage = 0, cur; - XLogRecPtr recptr; + XLogRecPtr recptr; for (ptr = dist; ptr; ptr = ptr->next) npage++; @@ -540,8 +543,8 @@ gistXLogUpdate(RelFileNode node, Buffer buffer, } /* - * Include a full page image of the child buf. (only necessary if - * a checkpoint happened since the child page was split) + * Include a full page image of the child buf. (only necessary if a + * checkpoint happened since the child page was split) */ if (BufferIsValid(leftchildbuf)) { diff --git a/src/backend/access/hash/hash.c b/src/backend/access/hash/hash.c index f19e5627f83..4cb29b2bb45 100644 --- a/src/backend/access/hash/hash.c +++ b/src/backend/access/hash/hash.c @@ -413,6 +413,7 @@ hashrescan(PG_FUNCTION_ARGS) { IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0); ScanKey scankey = (ScanKey) PG_GETARG_POINTER(1); + /* remaining arguments are ignored */ HashScanOpaque so = (HashScanOpaque) scan->opaque; Relation rel = scan->indexRelation; diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index 89697f6ff5e..1fbd8b39b4a 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -1070,7 +1070,7 @@ relation_close(Relation relation, LOCKMODE lockmode) * This is essentially relation_open plus check that the relation * is not an index nor a composite type. (The caller should also * check that it's not a view or foreign table before assuming it has - * storage.) + * storage.) * ---------------- */ Relation @@ -1922,8 +1922,8 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid, /* * We're about to do the actual insert -- check for conflict at the - * relation or buffer level first, to avoid possibly having to roll - * back work we've just done. + * relation or buffer level first, to avoid possibly having to roll back + * work we've just done. */ CheckForSerializableConflictIn(relation, NULL, buffer); @@ -2228,8 +2228,8 @@ l1: } /* - * We're about to do the actual delete -- check for conflict first, - * to avoid possibly having to roll back work we've just done. + * We're about to do the actual delete -- check for conflict first, to + * avoid possibly having to roll back work we've just done. */ CheckForSerializableConflictIn(relation, &tp, buffer); @@ -2587,8 +2587,8 @@ l2: } /* - * We're about to do the actual update -- check for conflict first, - * to avoid possibly having to roll back work we've just done. + * We're about to do the actual update -- check for conflict first, to + * avoid possibly having to roll back work we've just done. */ CheckForSerializableConflictIn(relation, &oldtup, buffer); @@ -2737,8 +2737,8 @@ l2: } /* - * We're about to create the new tuple -- check for conflict first, - * to avoid possibly having to roll back work we've just done. + * We're about to create the new tuple -- check for conflict first, to + * avoid possibly having to roll back work we've just done. * * NOTE: For a tuple insert, we only need to check for table locks, since * predicate locking at the index level will cover ranges for anything @@ -3860,12 +3860,12 @@ HeapTupleHeaderAdvanceLatestRemovedXid(HeapTupleHeader tuple, } /* - * Ignore tuples inserted by an aborted transaction or - * if the tuple was updated/deleted by the inserting transaction. + * Ignore tuples inserted by an aborted transaction or if the tuple was + * updated/deleted by the inserting transaction. * * Look for a committed hint bit, or if no xmin bit is set, check clog. - * This needs to work on both master and standby, where it is used - * to assess btree delete records. + * This needs to work on both master and standby, where it is used to + * assess btree delete records. */ if ((tuple->t_infomask & HEAP_XMIN_COMMITTED) || (!(tuple->t_infomask & HEAP_XMIN_COMMITTED) && @@ -3874,7 +3874,7 @@ HeapTupleHeaderAdvanceLatestRemovedXid(HeapTupleHeader tuple, { if (xmax != xmin && TransactionIdFollows(xmax, *latestRemovedXid)) - *latestRemovedXid = xmax; + *latestRemovedXid = xmax; } /* *latestRemovedXid may still be invalid at end */ @@ -4158,8 +4158,8 @@ log_newpage(RelFileNode *rnode, ForkNumber forkNum, BlockNumber blkno, recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_NEWPAGE, rdata); /* - * The page may be uninitialized. If so, we can't set the LSN - * and TLI because that would corrupt the page. + * The page may be uninitialized. If so, we can't set the LSN and TLI + * because that would corrupt the page. */ if (!PageIsNew(page)) { @@ -4352,8 +4352,8 @@ heap_xlog_newpage(XLogRecPtr lsn, XLogRecord *record) memcpy(page, (char *) xlrec + SizeOfHeapNewpage, BLCKSZ); /* - * The page may be uninitialized. If so, we can't set the LSN - * and TLI because that would corrupt the page. + * The page may be uninitialized. If so, we can't set the LSN and TLI + * because that would corrupt the page. */ if (!PageIsNew(page)) { diff --git a/src/backend/access/heap/hio.c b/src/backend/access/heap/hio.c index 28499925281..72a69e52b02 100644 --- a/src/backend/access/heap/hio.c +++ b/src/backend/access/heap/hio.c @@ -150,7 +150,7 @@ ReadBufferBI(Relation relation, BlockNumber targetBlock, Buffer RelationGetBufferForTuple(Relation relation, Size len, Buffer otherBuffer, int options, - struct BulkInsertStateData *bistate) + struct BulkInsertStateData * bistate) { bool use_fsm = !(options & HEAP_INSERT_SKIP_FSM); Buffer buffer = InvalidBuffer; diff --git a/src/backend/access/heap/rewriteheap.c b/src/backend/access/heap/rewriteheap.c index c710f1d316e..e56140950af 100644 --- a/src/backend/access/heap/rewriteheap.c +++ b/src/backend/access/heap/rewriteheap.c @@ -131,7 +131,7 @@ typedef struct RewriteStateData * them */ HTAB *rs_unresolved_tups; /* unmatched A tuples */ HTAB *rs_old_new_tid_map; /* unmatched B tuples */ -} RewriteStateData; +} RewriteStateData; /* * The lookup keys for the hash tables are tuple TID and xmin (we must check @@ -277,7 +277,7 @@ end_heap_rewrite(RewriteState state) } /* - * If the rel is WAL-logged, must fsync before commit. We use heap_sync + * If the rel is WAL-logged, must fsync before commit. We use heap_sync * to ensure that the toast table gets fsync'd too. * * It's obvious that we must do this when not WAL-logging. It's less diff --git a/src/backend/access/index/indexam.c b/src/backend/access/index/indexam.c index 88f73e8241e..66af2c37c54 100644 --- a/src/backend/access/index/indexam.c +++ b/src/backend/access/index/indexam.c @@ -872,7 +872,7 @@ index_getprocinfo(Relation irel, procnum, attnum, RelationGetRelationName(irel)); fmgr_info_cxt(procId, locinfo, irel->rd_indexcxt); - fmgr_info_set_collation(irel->rd_indcollation[attnum-1], locinfo); + fmgr_info_set_collation(irel->rd_indcollation[attnum - 1], locinfo); } return locinfo; diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c index 0dd745f19a4..219f94fd0dd 100644 --- a/src/backend/access/nbtree/nbtinsert.c +++ b/src/backend/access/nbtree/nbtinsert.c @@ -179,8 +179,8 @@ top: * The only conflict predicate locking cares about for indexes is when * an index tuple insert conflicts with an existing lock. Since the * actual location of the insert is hard to predict because of the - * random search used to prevent O(N^2) performance when there are many - * duplicate entries, we can just use the "first valid" page. + * random search used to prevent O(N^2) performance when there are + * many duplicate entries, we can just use the "first valid" page. */ CheckForSerializableConflictIn(rel, NULL, buf); /* do the insertion */ @@ -915,13 +915,13 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright, /* * origpage is the original page to be split. leftpage is a temporary * buffer that receives the left-sibling data, which will be copied back - * into origpage on success. rightpage is the new page that receives - * the right-sibling data. If we fail before reaching the critical - * section, origpage hasn't been modified and leftpage is only workspace. - * In principle we shouldn't need to worry about rightpage either, - * because it hasn't been linked into the btree page structure; but to - * avoid leaving possibly-confusing junk behind, we are careful to rewrite - * rightpage as zeroes before throwing any error. + * into origpage on success. rightpage is the new page that receives the + * right-sibling data. If we fail before reaching the critical section, + * origpage hasn't been modified and leftpage is only workspace. In + * principle we shouldn't need to worry about rightpage either, because it + * hasn't been linked into the btree page structure; but to avoid leaving + * possibly-confusing junk behind, we are careful to rewrite rightpage as + * zeroes before throwing any error. */ origpage = BufferGetPage(buf); leftpage = PageGetTempPage(origpage); @@ -1118,7 +1118,7 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright, { memset(rightpage, 0, BufferGetPageSize(rbuf)); elog(ERROR, "right sibling's left-link doesn't match: " - "block %u links to %u instead of expected %u in index \"%s\"", + "block %u links to %u instead of expected %u in index \"%s\"", oopaque->btpo_next, sopaque->btpo_prev, origpagenumber, RelationGetRelationName(rel)); } diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c index 27964455f7c..2477736281b 100644 --- a/src/backend/access/nbtree/nbtpage.c +++ b/src/backend/access/nbtree/nbtpage.c @@ -1268,9 +1268,9 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack) /* * Check that the parent-page index items we're about to delete/overwrite - * contain what we expect. This can fail if the index has become - * corrupt for some reason. We want to throw any error before entering - * the critical section --- otherwise it'd be a PANIC. + * contain what we expect. This can fail if the index has become corrupt + * for some reason. We want to throw any error before entering the + * critical section --- otherwise it'd be a PANIC. * * The test on the target item is just an Assert because _bt_getstackbuf * should have guaranteed it has the expected contents. The test on the diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c index 7a0e1a9c25e..6a7ddd7db4d 100644 --- a/src/backend/access/nbtree/nbtree.c +++ b/src/backend/access/nbtree/nbtree.c @@ -220,7 +220,7 @@ btbuildempty(PG_FUNCTION_ARGS) metapage = (Page) palloc(BLCKSZ); _bt_initmetapage(metapage, P_NONE, 0); - /* Write the page. If archiving/streaming, XLOG it. */ + /* Write the page. If archiving/streaming, XLOG it. */ smgrwrite(index->rd_smgr, INIT_FORKNUM, BTREE_METAPAGE, (char *) metapage, true); if (XLogIsNeeded()) @@ -403,6 +403,7 @@ btrescan(PG_FUNCTION_ARGS) { IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0); ScanKey scankey = (ScanKey) PG_GETARG_POINTER(1); + /* remaining arguments are ignored */ BTScanOpaque so = (BTScanOpaque) scan->opaque; diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c index cb78a1bae16..91f8cadea52 100644 --- a/src/backend/access/nbtree/nbtsearch.c +++ b/src/backend/access/nbtree/nbtsearch.c @@ -65,7 +65,7 @@ _bt_search(Relation rel, int keysz, ScanKey scankey, bool nextkey, /* If index is empty and access = BT_READ, no root page is created. */ if (!BufferIsValid(*bufP)) { - PredicateLockRelation(rel); /* Nothing finer to lock exists. */ + PredicateLockRelation(rel); /* Nothing finer to lock exists. */ return (BTStack) NULL; } @@ -1364,7 +1364,7 @@ _bt_get_endpoint(Relation rel, uint32 level, bool rightmost) if (!BufferIsValid(buf)) { /* empty index... */ - PredicateLockRelation(rel); /* Nothing finer to lock exists. */ + PredicateLockRelation(rel); /* Nothing finer to lock exists. */ return InvalidBuffer; } @@ -1444,7 +1444,7 @@ _bt_endpoint(IndexScanDesc scan, ScanDirection dir) if (!BufferIsValid(buf)) { /* empty index... */ - PredicateLockRelation(rel); /* Nothing finer to lock exists. */ + PredicateLockRelation(rel); /* Nothing finer to lock exists. */ so->currPos.buf = InvalidBuffer; return false; } diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c index fd0e86a6aa3..256a7f9f98f 100644 --- a/src/backend/access/nbtree/nbtsort.c +++ b/src/backend/access/nbtree/nbtsort.c @@ -799,7 +799,7 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2) /* * If the index is WAL-logged, we must fsync it down to disk before it's - * safe to commit the transaction. (For a non-WAL-logged index we don't + * safe to commit the transaction. (For a non-WAL-logged index we don't * care since the index will be uninteresting after a crash anyway.) * * It's obvious that we must do this when not WAL-logging the build. It's diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c index add932d9428..d448ba6a502 100644 --- a/src/backend/access/nbtree/nbtutils.c +++ b/src/backend/access/nbtree/nbtutils.c @@ -70,8 +70,8 @@ _bt_mkscankey(Relation rel, IndexTuple itup) /* * We can use the cached (default) support procs since no cross-type - * comparison can be needed. The cached support proc entries have - * the right collation for the index, too. + * comparison can be needed. The cached support proc entries have the + * right collation for the index, too. */ procinfo = index_getprocinfo(rel, i + 1, BTORDER_PROC); arg = index_getattr(itup, i + 1, itupdesc, &null); @@ -120,8 +120,8 @@ _bt_mkscankey_nodata(Relation rel) /* * We can use the cached (default) support procs since no cross-type - * comparison can be needed. The cached support proc entries have - * the right collation for the index, too. + * comparison can be needed. The cached support proc entries have the + * right collation for the index, too. */ procinfo = index_getprocinfo(rel, i + 1, BTORDER_PROC); flags = SK_ISNULL | (indoption[i] << SK_BT_INDOPTION_SHIFT); diff --git a/src/backend/access/transam/twophase.c b/src/backend/access/transam/twophase.c index 729c7b72e0f..281268120ef 100644 --- a/src/backend/access/transam/twophase.c +++ b/src/backend/access/transam/twophase.c @@ -120,7 +120,7 @@ typedef struct GlobalTransactionData TransactionId locking_xid; /* top-level XID of backend working on xact */ bool valid; /* TRUE if fully prepared */ char gid[GIDSIZE]; /* The GID assigned to the prepared xact */ -} GlobalTransactionData; +} GlobalTransactionData; /* * Two Phase Commit shared state. Access to this struct is protected @@ -1029,8 +1029,8 @@ EndPrepare(GlobalTransaction gxact) /* If we crash now, we have prepared: WAL replay will fix things */ /* - * Wake up all walsenders to send WAL up to the PREPARE record - * immediately if replication is enabled + * Wake up all walsenders to send WAL up to the PREPARE record immediately + * if replication is enabled */ if (max_wal_senders > 0) WalSndWakeup(); @@ -2043,8 +2043,8 @@ RecordTransactionCommitPrepared(TransactionId xid, /* * Wait for synchronous replication, if required. * - * Note that at this stage we have marked clog, but still show as - * running in the procarray and continue to hold locks. + * Note that at this stage we have marked clog, but still show as running + * in the procarray and continue to hold locks. */ SyncRepWaitForLSN(recptr); } @@ -2130,8 +2130,8 @@ RecordTransactionAbortPrepared(TransactionId xid, /* * Wait for synchronous replication, if required. * - * Note that at this stage we have marked clog, but still show as - * running in the procarray and continue to hold locks. + * Note that at this stage we have marked clog, but still show as running + * in the procarray and continue to hold locks. */ SyncRepWaitForLSN(recptr); } diff --git a/src/backend/access/transam/varsup.c b/src/backend/access/transam/varsup.c index a828b3de48f..500335bd6ff 100644 --- a/src/backend/access/transam/varsup.c +++ b/src/backend/access/transam/varsup.c @@ -355,9 +355,9 @@ SetTransactionIdLimit(TransactionId oldest_datfrozenxid, Oid oldest_datoid) char *oldest_datname; /* - * We can be called when not inside a transaction, for example - * during StartupXLOG(). In such a case we cannot do database - * access, so we must just report the oldest DB's OID. + * We can be called when not inside a transaction, for example during + * StartupXLOG(). In such a case we cannot do database access, so we + * must just report the oldest DB's OID. * * Note: it's also possible that get_database_name fails and returns * NULL, for example because the database just got dropped. We'll diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c index 55aee879101..8a4c4eccd73 100644 --- a/src/backend/access/transam/xact.c +++ b/src/backend/access/transam/xact.c @@ -420,11 +420,11 @@ AssignTransactionId(TransactionState s) */ if (isSubXact && !TransactionIdIsValid(s->parent->transactionId)) { - TransactionState p = s->parent; - TransactionState *parents; - size_t parentOffset = 0; + TransactionState p = s->parent; + TransactionState *parents; + size_t parentOffset = 0; - parents = palloc(sizeof(TransactionState) * s->nestingLevel); + parents = palloc(sizeof(TransactionState) * s->nestingLevel); while (p != NULL && !TransactionIdIsValid(p->transactionId)) { parents[parentOffset++] = p; @@ -432,8 +432,8 @@ AssignTransactionId(TransactionState s) } /* - * This is technically a recursive call, but the recursion will - * never be more than one layer deep. + * This is technically a recursive call, but the recursion will never + * be more than one layer deep. */ while (parentOffset != 0) AssignTransactionId(parents[--parentOffset]); @@ -1037,16 +1037,17 @@ RecordTransactionCommit(void) /* * Check if we want to commit asynchronously. We can allow the XLOG flush * to happen asynchronously if synchronous_commit=off, or if the current - * transaction has not performed any WAL-logged operation. The latter case - * can arise if the current transaction wrote only to temporary and/or - * unlogged tables. In case of a crash, the loss of such a transaction - * will be irrelevant since temp tables will be lost anyway, and unlogged - * tables will be truncated. (Given the foregoing, you might think that it - * would be unnecessary to emit the XLOG record at all in this case, but we - * don't currently try to do that. It would certainly cause problems at - * least in Hot Standby mode, where the KnownAssignedXids machinery - * requires tracking every XID assignment. It might be OK to skip it only - * when wal_level < hot_standby, but for now we don't.) + * transaction has not performed any WAL-logged operation. The latter + * case can arise if the current transaction wrote only to temporary + * and/or unlogged tables. In case of a crash, the loss of such a + * transaction will be irrelevant since temp tables will be lost anyway, + * and unlogged tables will be truncated. (Given the foregoing, you might + * think that it would be unnecessary to emit the XLOG record at all in + * this case, but we don't currently try to do that. It would certainly + * cause problems at least in Hot Standby mode, where the + * KnownAssignedXids machinery requires tracking every XID assignment. It + * might be OK to skip it only when wal_level < hot_standby, but for now + * we don't.) * * However, if we're doing cleanup of any non-temp rels or committing any * command that wanted to force sync commit, then we must flush XLOG @@ -1130,8 +1131,8 @@ RecordTransactionCommit(void) /* * Wait for synchronous replication, if required. * - * Note that at this stage we have marked clog, but still show as - * running in the procarray and continue to hold locks. + * Note that at this stage we have marked clog, but still show as running + * in the procarray and continue to hold locks. */ SyncRepWaitForLSN(XactLastRecEnd); @@ -1785,10 +1786,10 @@ CommitTransaction(void) } /* - * The remaining actions cannot call any user-defined code, so it's - * safe to start shutting down within-transaction services. But note - * that most of this stuff could still throw an error, which would - * switch us into the transaction-abort path. + * The remaining actions cannot call any user-defined code, so it's safe + * to start shutting down within-transaction services. But note that most + * of this stuff could still throw an error, which would switch us into + * the transaction-abort path. */ /* Shut down the deferred-trigger manager */ @@ -1805,8 +1806,8 @@ CommitTransaction(void) /* * Mark serializable transaction as complete for predicate locking - * purposes. This should be done as late as we can put it and still - * allow errors to be raised for failure patterns found at commit. + * purposes. This should be done as late as we can put it and still allow + * errors to be raised for failure patterns found at commit. */ PreCommit_CheckForSerializationFailure(); @@ -1988,10 +1989,10 @@ PrepareTransaction(void) } /* - * The remaining actions cannot call any user-defined code, so it's - * safe to start shutting down within-transaction services. But note - * that most of this stuff could still throw an error, which would - * switch us into the transaction-abort path. + * The remaining actions cannot call any user-defined code, so it's safe + * to start shutting down within-transaction services. But note that most + * of this stuff could still throw an error, which would switch us into + * the transaction-abort path. */ /* Shut down the deferred-trigger manager */ @@ -2008,8 +2009,8 @@ PrepareTransaction(void) /* * Mark serializable transaction as complete for predicate locking - * purposes. This should be done as late as we can put it and still - * allow errors to be raised for failure patterns found at commit. + * purposes. This should be done as late as we can put it and still allow + * errors to be raised for failure patterns found at commit. */ PreCommit_CheckForSerializationFailure(); diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c index b31c79ebbdc..9c45759661c 100644 --- a/src/backend/access/transam/xlog.c +++ b/src/backend/access/transam/xlog.c @@ -64,7 +64,7 @@ /* File path names (all relative to $PGDATA) */ #define RECOVERY_COMMAND_FILE "recovery.conf" #define RECOVERY_COMMAND_DONE "recovery.done" -#define PROMOTE_SIGNAL_FILE "promote" +#define PROMOTE_SIGNAL_FILE "promote" /* User-settable parameters */ @@ -160,6 +160,7 @@ static XLogRecPtr LastRec; * known, need to check the shared state". */ static bool LocalRecoveryInProgress = true; + /* * Local copy of SharedHotStandbyActive variable. False actually means "not * known, need to check the shared state". @@ -355,10 +356,9 @@ typedef struct XLogCtlInsert /* * exclusiveBackup is true if a backup started with pg_start_backup() is * in progress, and nonExclusiveBackups is a counter indicating the number - * of streaming base backups currently in progress. forcePageWrites is - * set to true when either of these is non-zero. lastBackupStart is the - * latest checkpoint redo location used as a starting point for an online - * backup. + * of streaming base backups currently in progress. forcePageWrites is set + * to true when either of these is non-zero. lastBackupStart is the latest + * checkpoint redo location used as a starting point for an online backup. */ bool exclusiveBackup; int nonExclusiveBackups; @@ -388,7 +388,7 @@ typedef struct XLogCtlData XLogwrtResult LogwrtResult; uint32 ckptXidEpoch; /* nextXID & epoch of latest checkpoint */ TransactionId ckptXid; - XLogRecPtr asyncXactLSN; /* LSN of newest async commit/abort */ + XLogRecPtr asyncXactLSN; /* LSN of newest async commit/abort */ uint32 lastRemovedLog; /* latest removed/recycled XLOG segment */ uint32 lastRemovedSeg; @@ -425,9 +425,9 @@ typedef struct XLogCtlData bool SharedHotStandbyActive; /* - * recoveryWakeupLatch is used to wake up the startup process to - * continue WAL replay, if it is waiting for WAL to arrive or failover - * trigger file to appear. + * recoveryWakeupLatch is used to wake up the startup process to continue + * WAL replay, if it is waiting for WAL to arrive or failover trigger file + * to appear. */ Latch recoveryWakeupLatch; @@ -576,7 +576,7 @@ typedef struct xl_parameter_change /* logs restore point */ typedef struct xl_restore_point { - TimestampTz rp_time; + TimestampTz rp_time; char rp_name[MAXFNAMELEN]; } xl_restore_point; @@ -4272,27 +4272,29 @@ existsTimeLineHistory(TimeLineID probeTLI) static bool rescanLatestTimeLine(void) { - TimeLineID newtarget; + TimeLineID newtarget; + newtarget = findNewestTimeLine(recoveryTargetTLI); if (newtarget != recoveryTargetTLI) { /* * Determine the list of expected TLIs for the new TLI */ - List *newExpectedTLIs; + List *newExpectedTLIs; + newExpectedTLIs = readTimeLineHistory(newtarget); /* - * If the current timeline is not part of the history of the - * new timeline, we cannot proceed to it. + * If the current timeline is not part of the history of the new + * timeline, we cannot proceed to it. * * XXX This isn't foolproof: The new timeline might have forked from * the current one, but before the current recovery location. In that * case we will still switch to the new timeline and proceed replaying * from it even though the history doesn't match what we already * replayed. That's not good. We will likely notice at the next online - * checkpoint, as the TLI won't match what we expected, but it's - * not guaranteed. The admin needs to make sure that doesn't happen. + * checkpoint, as the TLI won't match what we expected, but it's not + * guaranteed. The admin needs to make sure that doesn't happen. */ if (!list_member_int(newExpectedTLIs, (int) recoveryTargetTLI)) @@ -4480,7 +4482,7 @@ writeTimeLineHistory(TimeLineID newTLI, TimeLineID parentTLI, timestamptz_to_str(recoveryStopTime)); else if (recoveryTarget == RECOVERY_TARGET_NAME) snprintf(buffer, sizeof(buffer), - "%s%u\t%s\tat restore point \"%s\"\n", + "%s%u\t%s\tat restore point \"%s\"\n", (srcfd < 0) ? "" : "\n", parentTLI, xlogfname, @@ -4921,7 +4923,7 @@ check_wal_buffers(int *newval, void **extra, GucSource source) { /* * If we haven't yet changed the boot_val default of -1, just let it - * be. We'll fix it when XLOGShmemSize is called. + * be. We'll fix it when XLOGShmemSize is called. */ if (XLOGbuffers == -1) return true; @@ -4954,8 +4956,8 @@ XLOGShmemSize(void) /* * If the value of wal_buffers is -1, use the preferred auto-tune value. * This isn't an amazingly clean place to do this, but we must wait till - * NBuffers has received its final value, and must do it before using - * the value of XLOGbuffers to do anything important. + * NBuffers has received its final value, and must do it before using the + * value of XLOGbuffers to do anything important. */ if (XLOGbuffers == -1) { @@ -5086,9 +5088,9 @@ BootStrapXLOG(void) /* * Set up information for the initial checkpoint record * - * The initial checkpoint record is written to the beginning of the - * WAL segment with logid=0 logseg=1. The very first WAL segment, 0/0, is - * not used, so that we can use 0/0 to mean "before any valid WAL segment". + * The initial checkpoint record is written to the beginning of the WAL + * segment with logid=0 logseg=1. The very first WAL segment, 0/0, is not + * used, so that we can use 0/0 to mean "before any valid WAL segment". */ checkPoint.redo.xlogid = 0; checkPoint.redo.xrecoff = XLogSegSize + SizeOfXLogLongPHD; @@ -5219,8 +5221,8 @@ readRecoveryCommandFile(void) TimeLineID rtli = 0; bool rtliGiven = false; ConfigVariable *item, - *head = NULL, - *tail = NULL; + *head = NULL, + *tail = NULL; fd = AllocateFile(RECOVERY_COMMAND_FILE, "r"); if (fd == NULL) @@ -5236,7 +5238,7 @@ readRecoveryCommandFile(void) /* * Since we're asking ParseConfigFp() to error out at FATAL, there's no * need to check the return value. - */ + */ ParseConfigFp(fd, RECOVERY_COMMAND_FILE, 0, FATAL, &head, &tail); for (item = head; item; item = item->next) @@ -5312,7 +5314,7 @@ readRecoveryCommandFile(void) * this overrides recovery_target_time */ if (recoveryTarget == RECOVERY_TARGET_XID || - recoveryTarget == RECOVERY_TARGET_NAME) + recoveryTarget == RECOVERY_TARGET_NAME) continue; recoveryTarget = RECOVERY_TARGET_TIME; @@ -5321,7 +5323,7 @@ readRecoveryCommandFile(void) */ recoveryTargetTime = DatumGetTimestampTz(DirectFunctionCall3(timestamptz_in, - CStringGetDatum(item->value), + CStringGetDatum(item->value), ObjectIdGetDatum(InvalidOid), Int32GetDatum(-1))); ereport(DEBUG2, @@ -5610,8 +5612,8 @@ recoveryStopsHere(XLogRecord *record, bool *includeThis) if (recoveryTarget == RECOVERY_TARGET_UNSET) { /* - * Save timestamp of latest transaction commit/abort if this is - * a transaction record + * Save timestamp of latest transaction commit/abort if this is a + * transaction record */ if (record->xl_rmid == RM_XACT_ID) SetLatestXTime(recordXtime); @@ -5636,8 +5638,8 @@ recoveryStopsHere(XLogRecord *record, bool *includeThis) else if (recoveryTarget == RECOVERY_TARGET_NAME) { /* - * There can be many restore points that share the same name, so we stop - * at the first one + * There can be many restore points that share the same name, so we + * stop at the first one */ stopsHere = (strcmp(recordRPName, recoveryTargetName) == 0); @@ -5699,14 +5701,14 @@ recoveryStopsHere(XLogRecord *record, bool *includeThis) strncpy(recoveryStopName, recordRPName, MAXFNAMELEN); ereport(LOG, - (errmsg("recovery stopping at restore point \"%s\", time %s", - recoveryStopName, - timestamptz_to_str(recoveryStopTime)))); + (errmsg("recovery stopping at restore point \"%s\", time %s", + recoveryStopName, + timestamptz_to_str(recoveryStopTime)))); } /* - * Note that if we use a RECOVERY_TARGET_TIME then we can stop - * at a restore point since they are timestamped, though the latest + * Note that if we use a RECOVERY_TARGET_TIME then we can stop at a + * restore point since they are timestamped, though the latest * transaction time is not updated. */ if (record->xl_rmid == RM_XACT_ID && recoveryStopAfter) @@ -5732,7 +5734,7 @@ recoveryPausesHere(void) while (RecoveryIsPaused()) { - pg_usleep(1000000L); /* 1000 ms */ + pg_usleep(1000000L); /* 1000 ms */ HandleStartupProcInterrupts(); } } @@ -5742,7 +5744,7 @@ RecoveryIsPaused(void) { /* use volatile pointer to prevent code rearrangement */ volatile XLogCtlData *xlogctl = XLogCtl; - bool recoveryPause; + bool recoveryPause; SpinLockAcquire(&xlogctl->info_lck); recoveryPause = xlogctl->recoveryPause; @@ -5771,7 +5773,7 @@ pg_xlog_replay_pause(PG_FUNCTION_ARGS) if (!superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - (errmsg("must be superuser to control recovery")))); + (errmsg("must be superuser to control recovery")))); if (!RecoveryInProgress()) ereport(ERROR, @@ -5793,7 +5795,7 @@ pg_xlog_replay_resume(PG_FUNCTION_ARGS) if (!superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - (errmsg("must be superuser to control recovery")))); + (errmsg("must be superuser to control recovery")))); if (!RecoveryInProgress()) ereport(ERROR, @@ -5815,7 +5817,7 @@ pg_is_xlog_replay_paused(PG_FUNCTION_ARGS) if (!superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - (errmsg("must be superuser to control recovery")))); + (errmsg("must be superuser to control recovery")))); if (!RecoveryInProgress()) ereport(ERROR, @@ -5870,7 +5872,7 @@ GetLatestXTime(void) Datum pg_last_xact_replay_timestamp(PG_FUNCTION_ARGS) { - TimestampTz xtime; + TimestampTz xtime; xtime = GetLatestXTime(); if (xtime == 0) @@ -6132,10 +6134,10 @@ StartupXLOG(void) InRecovery = true; /* force recovery even if SHUTDOWNED */ /* - * Make sure that REDO location exists. This may not be - * the case if there was a crash during an online backup, - * which left a backup_label around that references a WAL - * segment that's already been archived. + * Make sure that REDO location exists. This may not be the case + * if there was a crash during an online backup, which left a + * backup_label around that references a WAL segment that's + * already been archived. */ if (XLByteLT(checkPoint.redo, checkPointLoc)) { @@ -6150,7 +6152,7 @@ StartupXLOG(void) ereport(FATAL, (errmsg("could not locate required checkpoint record"), errhint("If you are not restoring from a backup, try removing the file \"%s/backup_label\".", DataDir))); - wasShutdown = false; /* keep compiler quiet */ + wasShutdown = false; /* keep compiler quiet */ } /* set flag to delete it later */ haveBackupLabel = true; @@ -6330,9 +6332,9 @@ StartupXLOG(void) /* * We're in recovery, so unlogged relations relations may be trashed - * and must be reset. This should be done BEFORE allowing Hot - * Standby connections, so that read-only backends don't try to - * read whatever garbage is left over from before. + * and must be reset. This should be done BEFORE allowing Hot Standby + * connections, so that read-only backends don't try to read whatever + * garbage is left over from before. */ ResetUnloggedRelations(UNLOGGED_RELATION_CLEANUP); @@ -6517,7 +6519,8 @@ StartupXLOG(void) if (recoveryStopsHere(record, &recoveryApply)) { /* - * Pause only if users can connect to send a resume message + * Pause only if users can connect to send a resume + * message */ if (recoveryPauseAtTarget && standbyState == STANDBY_SNAPSHOT_READY) { @@ -7003,8 +7006,8 @@ HotStandbyActive(void) { /* * We check shared state each time only until Hot Standby is active. We - * can't de-activate Hot Standby, so there's no need to keep checking after - * the shared variable has once been seen true. + * can't de-activate Hot Standby, so there's no need to keep checking + * after the shared variable has once been seen true. */ if (LocalHotStandbyActive) return true; @@ -7429,14 +7432,14 @@ LogCheckpointEnd(bool restartpoint) */ longest_secs = (long) (CheckpointStats.ckpt_longest_sync / 1000000); longest_usecs = CheckpointStats.ckpt_longest_sync - - (uint64) longest_secs * 1000000; + (uint64) longest_secs *1000000; average_sync_time = 0; - if (CheckpointStats.ckpt_sync_rels > 0) + if (CheckpointStats.ckpt_sync_rels > 0) average_sync_time = CheckpointStats.ckpt_agg_sync_time / CheckpointStats.ckpt_sync_rels; average_secs = (long) (average_sync_time / 1000000); - average_usecs = average_sync_time - (uint64) average_secs * 1000000; + average_usecs = average_sync_time - (uint64) average_secs *1000000; if (restartpoint) elog(LOG, "restartpoint complete: wrote %d buffers (%.1f%%); " @@ -8241,9 +8244,9 @@ RequestXLogSwitch(void) XLogRecPtr XLogRestorePoint(const char *rpName) { - XLogRecPtr RecPtr; - XLogRecData rdata; - xl_restore_point xlrec; + XLogRecPtr RecPtr; + XLogRecData rdata; + xl_restore_point xlrec; xlrec.rp_time = GetCurrentTimestamp(); strncpy(xlrec.rp_name, rpName, MAXFNAMELEN); @@ -8257,7 +8260,7 @@ XLogRestorePoint(const char *rpName) ereport(LOG, (errmsg("restore point \"%s\" created at %X/%X", - rpName, RecPtr.xlogid, RecPtr.xrecoff))); + rpName, RecPtr.xlogid, RecPtr.xrecoff))); return RecPtr; } @@ -8643,7 +8646,7 @@ get_sync_bit(int method) /* * Optimize writes by bypassing kernel cache with O_DIRECT when using - * O_SYNC/O_FSYNC and O_DSYNC. But only if archiving and streaming are + * O_SYNC/O_FSYNC and O_DSYNC. But only if archiving and streaming are * disabled, otherwise the archive command or walsender process will read * the WAL soon after writing it, which is guaranteed to cause a physical * read if we bypassed the kernel cache. We also skip the @@ -8775,7 +8778,7 @@ pg_start_backup(PG_FUNCTION_ARGS) text *backupid = PG_GETARG_TEXT_P(0); bool fast = PG_GETARG_BOOL(1); char *backupidstr; - XLogRecPtr startpoint; + XLogRecPtr startpoint; char startxlogstr[MAXFNAMELEN]; backupidstr = text_to_cstring(backupid); @@ -8791,7 +8794,7 @@ pg_start_backup(PG_FUNCTION_ARGS) * do_pg_start_backup is the workhorse of the user-visible pg_start_backup() * function. It creates the necessary starting checkpoint and constructs the * backup label file. - * + * * There are two kind of backups: exclusive and non-exclusive. An exclusive * backup is started with pg_start_backup(), and there can be only one active * at a time. The backup label file of an exclusive backup is written to @@ -8826,7 +8829,7 @@ do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile) if (!superuser() && !is_authenticated_user_replication_role()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("must be superuser or replication role to run a backup"))); + errmsg("must be superuser or replication role to run a backup"))); if (RecoveryInProgress()) ereport(ERROR, @@ -8897,25 +8900,27 @@ do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile) /* Ensure we release forcePageWrites if fail below */ PG_ENSURE_ERROR_CLEANUP(pg_start_backup_callback, (Datum) BoolGetDatum(exclusive)); { - bool gotUniqueStartpoint = false; + bool gotUniqueStartpoint = false; + do { /* * Force a CHECKPOINT. Aside from being necessary to prevent torn - * page problems, this guarantees that two successive backup runs will - * have different checkpoint positions and hence different history - * file names, even if nothing happened in between. + * page problems, this guarantees that two successive backup runs + * will have different checkpoint positions and hence different + * history file names, even if nothing happened in between. * - * We use CHECKPOINT_IMMEDIATE only if requested by user (via passing - * fast = true). Otherwise this can take awhile. + * We use CHECKPOINT_IMMEDIATE only if requested by user (via + * passing fast = true). Otherwise this can take awhile. */ RequestCheckpoint(CHECKPOINT_FORCE | CHECKPOINT_WAIT | (fast ? CHECKPOINT_IMMEDIATE : 0)); /* - * Now we need to fetch the checkpoint record location, and also its - * REDO pointer. The oldest point in WAL that would be needed to - * restore starting from the checkpoint is precisely the REDO pointer. + * Now we need to fetch the checkpoint record location, and also + * its REDO pointer. The oldest point in WAL that would be needed + * to restore starting from the checkpoint is precisely the REDO + * pointer. */ LWLockAcquire(ControlFileLock, LW_SHARED); checkpointloc = ControlFile->checkPoint; @@ -8923,16 +8928,15 @@ do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile) LWLockRelease(ControlFileLock); /* - * If two base backups are started at the same time (in WAL - * sender processes), we need to make sure that they use - * different checkpoints as starting locations, because we use - * the starting WAL location as a unique identifier for the base - * backup in the end-of-backup WAL record and when we write the - * backup history file. Perhaps it would be better generate a - * separate unique ID for each backup instead of forcing another - * checkpoint, but taking a checkpoint right after another is - * not that expensive either because only few buffers have been - * dirtied yet. + * If two base backups are started at the same time (in WAL sender + * processes), we need to make sure that they use different + * checkpoints as starting locations, because we use the starting + * WAL location as a unique identifier for the base backup in the + * end-of-backup WAL record and when we write the backup history + * file. Perhaps it would be better generate a separate unique ID + * for each backup instead of forcing another checkpoint, but + * taking a checkpoint right after another is not that expensive + * either because only few buffers have been dirtied yet. */ LWLockAcquire(WALInsertLock, LW_SHARED); if (XLByteLT(XLogCtl->Insert.lastBackupStart, startpoint)) @@ -8941,13 +8945,13 @@ do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile) gotUniqueStartpoint = true; } LWLockRelease(WALInsertLock); - } while(!gotUniqueStartpoint); + } while (!gotUniqueStartpoint); XLByteToSeg(startpoint, _logId, _logSeg); XLogFileName(xlogfilename, ThisTimeLineID, _logId, _logSeg); /* - * Construct backup label file + * Construct backup label file */ initStringInfo(&labelfbuf); @@ -8970,8 +8974,8 @@ do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile) { /* * Check for existing backup label --- implies a backup is already - * running. (XXX given that we checked exclusiveBackup above, maybe - * it would be OK to just unlink any such label file?) + * running. (XXX given that we checked exclusiveBackup above, + * maybe it would be OK to just unlink any such label file?) */ if (stat(BACKUP_LABEL_FILE, &stat_buf) != 0) { @@ -9018,7 +9022,7 @@ do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile) static void pg_start_backup_callback(int code, Datum arg) { - bool exclusive = DatumGetBool(arg); + bool exclusive = DatumGetBool(arg); /* Update backup counters and forcePageWrites on failure */ LWLockAcquire(WALInsertLock, LW_EXCLUSIVE); @@ -9101,7 +9105,7 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive) if (!superuser() && !is_authenticated_user_replication_role()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - (errmsg("must be superuser or replication role to run a backup")))); + (errmsg("must be superuser or replication role to run a backup")))); if (RecoveryInProgress()) ereport(ERROR, @@ -9145,8 +9149,8 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive) /* * Read the existing label file into memory. */ - struct stat statbuf; - int r; + struct stat statbuf; + int r; if (stat(BACKUP_LABEL_FILE, &statbuf)) { @@ -9197,7 +9201,7 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("invalid data in file \"%s\"", BACKUP_LABEL_FILE))); - remaining = strchr(labelfile, '\n') + 1; /* %n is not portable enough */ + remaining = strchr(labelfile, '\n') + 1; /* %n is not portable enough */ /* * Write the backup-end xlog record @@ -9388,8 +9392,8 @@ pg_switch_xlog(PG_FUNCTION_ARGS) Datum pg_create_restore_point(PG_FUNCTION_ARGS) { - text *restore_name = PG_GETARG_TEXT_P(0); - char *restore_name_str; + text *restore_name = PG_GETARG_TEXT_P(0); + char *restore_name_str; XLogRecPtr restorepoint; char location[MAXFNAMELEN]; @@ -9407,7 +9411,7 @@ pg_create_restore_point(PG_FUNCTION_ARGS) if (!XLogIsNeeded()) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("WAL level not sufficient for creating a restore point"), + errmsg("WAL level not sufficient for creating a restore point"), errhint("wal_level must be set to \"archive\" or \"hot_standby\" at server start."))); restore_name_str = text_to_cstring(restore_name); @@ -9423,7 +9427,7 @@ pg_create_restore_point(PG_FUNCTION_ARGS) * As a convenience, return the WAL location of the restore point record */ snprintf(location, sizeof(location), "%X/%X", - restorepoint.xlogid, restorepoint.xrecoff); + restorepoint.xlogid, restorepoint.xrecoff); PG_RETURN_TEXT_P(cstring_to_text(location)); } @@ -10177,8 +10181,8 @@ retry: } /* - * If it hasn't been long since last attempt, sleep - * to avoid busy-waiting. + * If it hasn't been long since last attempt, sleep to + * avoid busy-waiting. */ now = (pg_time_t) time(NULL); if ((now - last_fail_time) < 5) @@ -10404,7 +10408,7 @@ static bool CheckForStandbyTrigger(void) { struct stat stat_buf; - static bool triggered = false; + static bool triggered = false; if (triggered) return true; @@ -10446,8 +10450,8 @@ CheckPromoteSignal(void) if (stat(PROMOTE_SIGNAL_FILE, &stat_buf) == 0) { /* - * Since we are in a signal handler, it's not safe - * to elog. We silently ignore any error from unlink. + * Since we are in a signal handler, it's not safe to elog. We + * silently ignore any error from unlink. */ unlink(PROMOTE_SIGNAL_FILE); return true; |