diff options
Diffstat (limited to 'src/backend/access/nbtree')
-rw-r--r-- | src/backend/access/nbtree/nbtinsert.c | 55 | ||||
-rw-r--r-- | src/backend/access/nbtree/nbtpage.c | 8 | ||||
-rw-r--r-- | src/backend/access/nbtree/nbtree.c | 22 | ||||
-rw-r--r-- | src/backend/access/nbtree/nbtsort.c | 20 | ||||
-rw-r--r-- | src/backend/access/nbtree/nbtutils.c | 37 |
5 files changed, 72 insertions, 70 deletions
diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c index ecf4e53502a..3d5936f186e 100644 --- a/src/backend/access/nbtree/nbtinsert.c +++ b/src/backend/access/nbtree/nbtinsert.c @@ -132,31 +132,31 @@ _bt_doinsert(Relation rel, IndexTuple itup, * rightmost leaf, has enough free space to accommodate a new entry and * the insertion key is strictly greater than the first key in this page, * then we can safely conclude that the new key will be inserted in the - * cached block. So we simply search within the cached block and insert the - * key at the appropriate location. We call it a fastpath. + * cached block. So we simply search within the cached block and insert + * the key at the appropriate location. We call it a fastpath. * * Testing has revealed, though, that the fastpath can result in increased * contention on the exclusive-lock on the rightmost leaf page. So we - * conditionally check if the lock is available. If it's not available then - * we simply abandon the fastpath and take the regular path. This makes - * sense because unavailability of the lock also signals that some other - * backend might be concurrently inserting into the page, thus reducing our - * chances to finding an insertion place in this page. + * conditionally check if the lock is available. If it's not available + * then we simply abandon the fastpath and take the regular path. This + * makes sense because unavailability of the lock also signals that some + * other backend might be concurrently inserting into the page, thus + * reducing our chances to finding an insertion place in this page. */ top: fastpath = false; offset = InvalidOffsetNumber; if (RelationGetTargetBlock(rel) != InvalidBlockNumber) { - Size itemsz; - Page page; - BTPageOpaque lpageop; + Size itemsz; + Page page; + BTPageOpaque lpageop; /* * Conditionally acquire exclusive lock on the buffer before doing any * checks. If we don't get the lock, we simply follow slowpath. If we - * do get the lock, this ensures that the index state cannot change, as - * far as the rightmost part of the index is concerned. + * do get the lock, this ensures that the index state cannot change, + * as far as the rightmost part of the index is concerned. */ buf = ReadBuffer(rel, RelationGetTargetBlock(rel)); @@ -173,8 +173,8 @@ top: /* * Check if the page is still the rightmost leaf page, has enough - * free space to accommodate the new tuple, and the insertion - * scan key is strictly greater than the first key on the page. + * free space to accommodate the new tuple, and the insertion scan + * key is strictly greater than the first key on the page. */ if (P_ISLEAF(lpageop) && P_RIGHTMOST(lpageop) && !P_IGNORE(lpageop) && @@ -207,8 +207,8 @@ top: ReleaseBuffer(buf); /* - * If someone's holding a lock, it's likely to change anyway, - * so don't try again until we get an updated rightmost leaf. + * If someone's holding a lock, it's likely to change anyway, so + * don't try again until we get an updated rightmost leaf. */ RelationSetTargetBlock(rel, InvalidBlockNumber); } @@ -882,22 +882,22 @@ _bt_insertonpg(Relation rel, Buffer rbuf; /* - * If we're here then a pagesplit is needed. We should never reach here - * if we're using the fastpath since we should have checked for all the - * required conditions, including the fact that this page has enough - * freespace. Note that this routine can in theory deal with the - * situation where a NULL stack pointer is passed (that's what would - * happen if the fastpath is taken), like it does during crash + * If we're here then a pagesplit is needed. We should never reach + * here if we're using the fastpath since we should have checked for + * all the required conditions, including the fact that this page has + * enough freespace. Note that this routine can in theory deal with + * the situation where a NULL stack pointer is passed (that's what + * would happen if the fastpath is taken), like it does during crash * recovery. But that path is much slower, defeating the very purpose - * of the optimization. The following assertion should protect us from - * any future code changes that invalidate those assumptions. + * of the optimization. The following assertion should protect us + * from any future code changes that invalidate those assumptions. * * Note that whenever we fail to take the fastpath, we clear the * cached block. Checking for a valid cached block at this point is * enough to decide whether we're in a fastpath or not. */ Assert(!(P_ISLEAF(lpageop) && - BlockNumberIsValid(RelationGetTargetBlock(rel)))); + BlockNumberIsValid(RelationGetTargetBlock(rel)))); /* Choose the split point */ firstright = _bt_findsplitloc(rel, page, @@ -936,7 +936,7 @@ _bt_insertonpg(Relation rel, BTMetaPageData *metad = NULL; OffsetNumber itup_off; BlockNumber itup_blkno; - BlockNumber cachedBlock = InvalidBlockNumber; + BlockNumber cachedBlock = InvalidBlockNumber; itup_off = newitemoff; itup_blkno = BufferGetBlockNumber(buf); @@ -1093,7 +1093,8 @@ _bt_insertonpg(Relation rel, * We do this after dropping locks on all buffers. So the information * about whether the insertion block is still the rightmost block or * not may have changed in between. But we will deal with that during - * next insert operation. No special care is required while setting it. + * next insert operation. No special care is required while setting + * it. */ if (BlockNumberIsValid(cachedBlock) && _bt_getrootheight(rel) >= BTREE_FASTPATH_MIN_LEVEL) diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c index 3be229db1f0..3bcc56e9d27 100644 --- a/src/backend/access/nbtree/nbtpage.c +++ b/src/backend/access/nbtree/nbtpage.c @@ -155,11 +155,11 @@ void _bt_update_meta_cleanup_info(Relation rel, TransactionId oldestBtpoXact, float8 numHeapTuples) { - Buffer metabuf; - Page metapg; + Buffer metabuf; + Page metapg; BTMetaPageData *metad; - bool needsRewrite = false; - XLogRecPtr recptr; + bool needsRewrite = false; + XLogRecPtr recptr; /* read the metapage and check if it needs rewrite */ metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_READ); diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c index d97f5249deb..e5dce00876e 100644 --- a/src/backend/access/nbtree/nbtree.c +++ b/src/backend/access/nbtree/nbtree.c @@ -785,10 +785,10 @@ _bt_parallel_advance_array_keys(IndexScanDesc scan) static bool _bt_vacuum_needs_cleanup(IndexVacuumInfo *info) { - Buffer metabuf; - Page metapg; + Buffer metabuf; + Page metapg; BTMetaPageData *metad; - bool result = false; + bool result = false; metabuf = _bt_getbuf(info->index, BTREE_METAPAGE, BT_READ); metapg = BufferGetPage(metabuf); @@ -814,8 +814,8 @@ _bt_vacuum_needs_cleanup(IndexVacuumInfo *info) } else { - StdRdOptions *relopts; - float8 cleanup_scale_factor; + StdRdOptions *relopts; + float8 cleanup_scale_factor; /* * If table receives large enough amount of insertions and no cleanup @@ -825,14 +825,14 @@ _bt_vacuum_needs_cleanup(IndexVacuumInfo *info) */ relopts = (StdRdOptions *) info->index->rd_options; cleanup_scale_factor = (relopts && - relopts->vacuum_cleanup_index_scale_factor >= 0) - ? relopts->vacuum_cleanup_index_scale_factor - : vacuum_cleanup_index_scale_factor; + relopts->vacuum_cleanup_index_scale_factor >= 0) + ? relopts->vacuum_cleanup_index_scale_factor + : vacuum_cleanup_index_scale_factor; if (cleanup_scale_factor < 0 || metad->btm_last_cleanup_num_heap_tuples < 0 || info->num_heap_tuples > (1.0 + cleanup_scale_factor) * - metad->btm_last_cleanup_num_heap_tuples) + metad->btm_last_cleanup_num_heap_tuples) result = true; } @@ -862,7 +862,7 @@ btbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, /* The ENSURE stuff ensures we clean up shared memory on failure */ PG_ENSURE_ERROR_CLEANUP(_bt_end_vacuum_callback, PointerGetDatum(rel)); { - TransactionId oldestBtpoXact; + TransactionId oldestBtpoXact; cycleid = _bt_start_vacuum(rel); @@ -907,7 +907,7 @@ btvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats) */ if (stats == NULL) { - TransactionId oldestBtpoXact; + TransactionId oldestBtpoXact; /* Check if we need a cleanup */ if (!_bt_vacuum_needs_cleanup(info)) diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c index 7deda9acac9..0587e425731 100644 --- a/src/backend/access/nbtree/nbtsort.c +++ b/src/backend/access/nbtree/nbtsort.c @@ -897,10 +897,10 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup) /* * Truncate any non-key attributes from high key on leaf level * (i.e. truncate on leaf level if we're building an INCLUDE - * index). This is only done at the leaf level because - * downlinks in internal pages are either negative infinity - * items, or get their contents from copying from one level - * down. See also: _bt_split(). + * index). This is only done at the leaf level because downlinks + * in internal pages are either negative infinity items, or get + * their contents from copying from one level down. See also: + * _bt_split(). * * Since the truncated tuple is probably smaller than the * original, it cannot just be copied in place (besides, we want @@ -908,11 +908,11 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup) * original high key, and add our own truncated high key at the * same offset. * - * Note that the page layout won't be changed very much. oitup - * is already located at the physical beginning of tuple space, - * so we only shift the line pointer array back and forth, and - * overwrite the latter portion of the space occupied by the - * original tuple. This is fairly cheap. + * Note that the page layout won't be changed very much. oitup is + * already located at the physical beginning of tuple space, so we + * only shift the line pointer array back and forth, and overwrite + * the latter portion of the space occupied by the original tuple. + * This is fairly cheap. */ truncated = _bt_nonkey_truncate(wstate->index, oitup); truncsz = IndexTupleSize(truncated); @@ -978,7 +978,7 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup) */ if (last_off == P_HIKEY) { - BTPageOpaque npageop; + BTPageOpaque npageop; Assert(state->btps_minkey == NULL); diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c index 0cecbf8e389..acb944357a3 100644 --- a/src/backend/access/nbtree/nbtutils.c +++ b/src/backend/access/nbtree/nbtutils.c @@ -2101,12 +2101,12 @@ btproperty(Oid index_oid, int attno, IndexTuple _bt_nonkey_truncate(Relation rel, IndexTuple itup) { - int nkeyattrs = IndexRelationGetNumberOfKeyAttributes(rel); - IndexTuple truncated; + int nkeyattrs = IndexRelationGetNumberOfKeyAttributes(rel); + IndexTuple truncated; /* - * We should only ever truncate leaf index tuples, which must have both key - * and non-key attributes. It's never okay to truncate a second time. + * We should only ever truncate leaf index tuples, which must have both + * key and non-key attributes. It's never okay to truncate a second time. */ Assert(BTreeTupleGetNAtts(itup, rel) == IndexRelationGetNumberOfAttributes(rel)); @@ -2133,10 +2133,10 @@ _bt_nonkey_truncate(Relation rel, IndexTuple itup) bool _bt_check_natts(Relation rel, Page page, OffsetNumber offnum) { - int16 natts = IndexRelationGetNumberOfAttributes(rel); - int16 nkeyatts = IndexRelationGetNumberOfKeyAttributes(rel); - BTPageOpaque opaque = (BTPageOpaque) PageGetSpecialPointer(page); - IndexTuple itup; + int16 natts = IndexRelationGetNumberOfAttributes(rel); + int16 nkeyatts = IndexRelationGetNumberOfKeyAttributes(rel); + BTPageOpaque opaque = (BTPageOpaque) PageGetSpecialPointer(page); + IndexTuple itup; /* * We cannot reliably test a deleted or half-deleted page, since they have @@ -2147,6 +2147,7 @@ _bt_check_natts(Relation rel, Page page, OffsetNumber offnum) Assert(offnum >= FirstOffsetNumber && offnum <= PageGetMaxOffsetNumber(page)); + /* * Mask allocated for number of keys in index tuple must be able to fit * maximum possible number of index attributes @@ -2178,29 +2179,29 @@ _bt_check_natts(Relation rel, Page page, OffsetNumber offnum) return BTreeTupleGetNAtts(itup, rel) == nkeyatts; } } - else /* !P_ISLEAF(opaque) */ + else /* !P_ISLEAF(opaque) */ { if (offnum == P_FIRSTDATAKEY(opaque)) { /* * The first tuple on any internal page (possibly the first after - * its high key) is its negative infinity tuple. Negative infinity - * tuples are always truncated to zero attributes. They are a - * particular kind of pivot tuple. + * its high key) is its negative infinity tuple. Negative + * infinity tuples are always truncated to zero attributes. They + * are a particular kind of pivot tuple. * * The number of attributes won't be explicitly represented if the * negative infinity tuple was generated during a page split that - * occurred with a version of Postgres before v11. There must be a - * problem when there is an explicit representation that is + * occurred with a version of Postgres before v11. There must be + * a problem when there is an explicit representation that is * non-zero, or when there is no explicit representation and the * tuple is evidently not a pre-pg_upgrade tuple. * - * Prior to v11, downlinks always had P_HIKEY as their offset. Use - * that to decide if the tuple is a pre-v11 tuple. + * Prior to v11, downlinks always had P_HIKEY as their offset. + * Use that to decide if the tuple is a pre-v11 tuple. */ return BTreeTupleGetNAtts(itup, rel) == 0 || - ((itup->t_info & INDEX_ALT_TID_MASK) == 0 && - ItemPointerGetOffsetNumber(&(itup->t_tid)) == P_HIKEY); + ((itup->t_info & INDEX_ALT_TID_MASK) == 0 && + ItemPointerGetOffsetNumber(&(itup->t_tid)) == P_HIKEY); } else { |