aboutsummaryrefslogtreecommitdiff
path: root/src/backend/executor
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/executor')
-rw-r--r--src/backend/executor/nodeHash.c14
-rw-r--r--src/backend/executor/nodeHashjoin.c10
-rw-r--r--src/backend/executor/nodeLimit.c4
3 files changed, 14 insertions, 14 deletions
diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c
index 4516c6346bd..5da13ada726 100644
--- a/src/backend/executor/nodeHash.c
+++ b/src/backend/executor/nodeHash.c
@@ -256,7 +256,7 @@ MultiExecParallelHash(HashState *node)
* way, wait for everyone to arrive here so we can proceed.
*/
BarrierArriveAndWait(build_barrier, WAIT_EVENT_HASH_BUILD_ALLOCATING);
- /* FALLTHROUGH */
+ /* Fall through. */
case PHJ_BUILD_HASHING_INNER:
@@ -1181,13 +1181,13 @@ ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable)
/* All other participants just flush their tuples to disk. */
ExecParallelHashCloseBatchAccessors(hashtable);
}
- /* FALLTHROUGH */
+ /* Fall through. */
case PHJ_GROW_BATCHES_ALLOCATING:
/* Wait for the above to be finished. */
BarrierArriveAndWait(&pstate->grow_batches_barrier,
WAIT_EVENT_HASH_GROW_BATCHES_ALLOCATING);
- /* FALLTHROUGH */
+ /* Fall through. */
case PHJ_GROW_BATCHES_REPARTITIONING:
/* Make sure that we have the current dimensions and buckets. */
@@ -1200,7 +1200,7 @@ ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable)
/* Wait for the above to be finished. */
BarrierArriveAndWait(&pstate->grow_batches_barrier,
WAIT_EVENT_HASH_GROW_BATCHES_REPARTITIONING);
- /* FALLTHROUGH */
+ /* Fall through. */
case PHJ_GROW_BATCHES_DECIDING:
@@ -1255,7 +1255,7 @@ ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable)
dsa_free(hashtable->area, pstate->old_batches);
pstate->old_batches = InvalidDsaPointer;
}
- /* FALLTHROUGH */
+ /* Fall through. */
case PHJ_GROW_BATCHES_FINISHING:
/* Wait for the above to complete. */
@@ -1533,13 +1533,13 @@ ExecParallelHashIncreaseNumBuckets(HashJoinTable hashtable)
/* Clear the flag. */
pstate->growth = PHJ_GROWTH_OK;
}
- /* FALLTHROUGH */
+ /* Fall through. */
case PHJ_GROW_BUCKETS_ALLOCATING:
/* Wait for the above to complete. */
BarrierArriveAndWait(&pstate->grow_buckets_barrier,
WAIT_EVENT_HASH_GROW_BUCKETS_ALLOCATING);
- /* FALLTHROUGH */
+ /* Fall through. */
case PHJ_GROW_BUCKETS_REINSERTING:
/* Reinsert all tuples into the hash table. */
diff --git a/src/backend/executor/nodeHashjoin.c b/src/backend/executor/nodeHashjoin.c
index 6159a6957f4..cc8edacdd01 100644
--- a/src/backend/executor/nodeHashjoin.c
+++ b/src/backend/executor/nodeHashjoin.c
@@ -340,7 +340,7 @@ ExecHashJoinImpl(PlanState *pstate, bool parallel)
else
node->hj_JoinState = HJ_NEED_NEW_OUTER;
- /* FALLTHROUGH */
+ /* FALL THRU */
case HJ_NEED_NEW_OUTER:
@@ -413,7 +413,7 @@ ExecHashJoinImpl(PlanState *pstate, bool parallel)
/* OK, let's scan the bucket for matches */
node->hj_JoinState = HJ_SCAN_BUCKET;
- /* FALLTHROUGH */
+ /* FALL THRU */
case HJ_SCAN_BUCKET:
@@ -1137,13 +1137,13 @@ ExecParallelHashJoinNewBatch(HashJoinState *hjstate)
if (BarrierArriveAndWait(batch_barrier,
WAIT_EVENT_HASH_BATCH_ELECTING))
ExecParallelHashTableAlloc(hashtable, batchno);
- /* FALLTHROUGH */
+ /* Fall through. */
case PHJ_BATCH_ALLOCATING:
/* Wait for allocation to complete. */
BarrierArriveAndWait(batch_barrier,
WAIT_EVENT_HASH_BATCH_ALLOCATING);
- /* FALLTHROUGH */
+ /* Fall through. */
case PHJ_BATCH_LOADING:
/* Start (or join in) loading tuples. */
@@ -1163,7 +1163,7 @@ ExecParallelHashJoinNewBatch(HashJoinState *hjstate)
sts_end_parallel_scan(inner_tuples);
BarrierArriveAndWait(batch_barrier,
WAIT_EVENT_HASH_BATCH_LOADING);
- /* FALLTHROUGH */
+ /* Fall through. */
case PHJ_BATCH_PROBING:
diff --git a/src/backend/executor/nodeLimit.c b/src/backend/executor/nodeLimit.c
index 371b15c14a7..d85cf7d93e8 100644
--- a/src/backend/executor/nodeLimit.c
+++ b/src/backend/executor/nodeLimit.c
@@ -69,7 +69,7 @@ ExecLimit(PlanState *pstate)
*/
recompute_limits(node);
- /* FALLTHROUGH */
+ /* FALL THRU */
case LIMIT_RESCAN:
@@ -216,7 +216,7 @@ ExecLimit(PlanState *pstate)
}
Assert(node->lstate == LIMIT_WINDOWEND_TIES);
- /* FALLTHROUGH */
+ /* FALL THRU */
case LIMIT_WINDOWEND_TIES:
if (ScanDirectionIsForward(direction))