aboutsummaryrefslogtreecommitdiff
path: root/src/backend/executor/nodeIndexonlyscan.c
diff options
context:
space:
mode:
authorBruce Momjian <bruce@momjian.us>2012-06-10 15:20:04 -0400
committerBruce Momjian <bruce@momjian.us>2012-06-10 15:20:04 -0400
commit927d61eeff78363ea3938c818d07e511ebaf75cf (patch)
tree2f0bcecf53327f76272a8ce690fa62505520fab9 /src/backend/executor/nodeIndexonlyscan.c
parent60801944fa105252b48ea5688d47dfc05c695042 (diff)
downloadpostgresql-927d61eeff78363ea3938c818d07e511ebaf75cf.tar.gz
postgresql-927d61eeff78363ea3938c818d07e511ebaf75cf.zip
Run pgindent on 9.2 source tree in preparation for first 9.3
commit-fest.
Diffstat (limited to 'src/backend/executor/nodeIndexonlyscan.c')
-rw-r--r--src/backend/executor/nodeIndexonlyscan.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/src/backend/executor/nodeIndexonlyscan.c b/src/backend/executor/nodeIndexonlyscan.c
index af31671b3eb..38078763f57 100644
--- a/src/backend/executor/nodeIndexonlyscan.c
+++ b/src/backend/executor/nodeIndexonlyscan.c
@@ -86,7 +86,7 @@ IndexOnlyNext(IndexOnlyScanState *node)
* Note on Memory Ordering Effects: visibilitymap_test does not lock
* the visibility map buffer, and therefore the result we read here
* could be slightly stale. However, it can't be stale enough to
- * matter. It suffices to show that (1) there is a read barrier
+ * matter. It suffices to show that (1) there is a read barrier
* between the time we read the index TID and the time we test the
* visibility map; and (2) there is a write barrier between the time
* some other concurrent process clears the visibility map bit and the
@@ -106,12 +106,12 @@ IndexOnlyNext(IndexOnlyScanState *node)
node->ioss_HeapFetches++;
tuple = index_fetch_heap(scandesc);
if (tuple == NULL)
- continue; /* no visible tuple, try next index entry */
+ continue; /* no visible tuple, try next index entry */
/*
* Only MVCC snapshots are supported here, so there should be no
* need to keep following the HOT chain once a visible entry has
- * been found. If we did want to allow that, we'd need to keep
+ * been found. If we did want to allow that, we'd need to keep
* more state to remember not to call index_getnext_tid next time.
*/
if (scandesc->xs_continue_hot)
@@ -120,7 +120,7 @@ IndexOnlyNext(IndexOnlyScanState *node)
/*
* Note: at this point we are holding a pin on the heap page, as
* recorded in scandesc->xs_cbuf. We could release that pin now,
- * but it's not clear whether it's a win to do so. The next index
+ * but it's not clear whether it's a win to do so. The next index
* entry might require a visit to the same heap page.
*/
}
@@ -176,8 +176,8 @@ StoreIndexTuple(TupleTableSlot *slot, IndexTuple itup, TupleDesc itupdesc)
* Note: we must use the tupdesc supplied by the AM in index_getattr, not
* the slot's tupdesc, in case the latter has different datatypes (this
* happens for btree name_ops in particular). They'd better have the same
- * number of columns though, as well as being datatype-compatible which
- * is something we can't so easily check.
+ * number of columns though, as well as being datatype-compatible which is
+ * something we can't so easily check.
*/
Assert(slot->tts_tupleDescriptor->natts == nindexatts);
@@ -494,10 +494,10 @@ ExecInitIndexOnlyScan(IndexOnlyScan *node, EState *estate, int eflags)
* Initialize scan descriptor.
*/
indexstate->ioss_ScanDesc = index_beginscan(currentRelation,
- indexstate->ioss_RelationDesc,
- estate->es_snapshot,
- indexstate->ioss_NumScanKeys,
- indexstate->ioss_NumOrderByKeys);
+ indexstate->ioss_RelationDesc,
+ estate->es_snapshot,
+ indexstate->ioss_NumScanKeys,
+ indexstate->ioss_NumOrderByKeys);
/* Set it up for index-only scan */
indexstate->ioss_ScanDesc->xs_want_itup = true;