aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/backend/executor/nodeIncrementalSort.c4
-rw-r--r--src/backend/executor/nodeSort.c2
-rw-r--r--src/backend/utils/sort/tuplesort.c20
-rw-r--r--src/include/utils/tuplesort.h3
4 files changed, 24 insertions, 5 deletions
diff --git a/src/backend/executor/nodeIncrementalSort.c b/src/backend/executor/nodeIncrementalSort.c
index 4f50bc845da..d1b97d46bc0 100644
--- a/src/backend/executor/nodeIncrementalSort.c
+++ b/src/backend/executor/nodeIncrementalSort.c
@@ -315,7 +315,7 @@ switchToPresortedPrefixMode(PlanState *pstate)
&(plannode->sort.nullsFirst[nPresortedCols]),
work_mem,
NULL,
- TUPLESORT_NONE);
+ node->bounded ? TUPLESORT_ALLOWBOUNDED : TUPLESORT_NONE);
node->prefixsort_state = prefixsort_state;
}
else
@@ -616,6 +616,8 @@ ExecIncrementalSort(PlanState *pstate)
plannode->sort.nullsFirst,
work_mem,
NULL,
+ node->bounded ?
+ TUPLESORT_ALLOWBOUNDED :
TUPLESORT_NONE);
node->fullsort_state = fullsort_state;
}
diff --git a/src/backend/executor/nodeSort.c b/src/backend/executor/nodeSort.c
index a113d737955..3c28d60c3ef 100644
--- a/src/backend/executor/nodeSort.c
+++ b/src/backend/executor/nodeSort.c
@@ -99,6 +99,8 @@ ExecSort(PlanState *pstate)
if (node->randomAccess)
tuplesortopts |= TUPLESORT_RANDOMACCESS;
+ if (node->bounded)
+ tuplesortopts |= TUPLESORT_ALLOWBOUNDED;
if (node->datumSort)
tuplesortstate = tuplesort_begin_datum(TupleDescAttr(tupDesc, 0)->atttypid,
diff --git a/src/backend/utils/sort/tuplesort.c b/src/backend/utils/sort/tuplesort.c
index a8a5cc52047..571fb955327 100644
--- a/src/backend/utils/sort/tuplesort.c
+++ b/src/backend/utils/sort/tuplesort.c
@@ -935,11 +935,21 @@ tuplesort_begin_batch(Tuplesortstate *state)
* eases memory management. Resetting at key points reduces
* fragmentation. Note that the memtuples array of SortTuples is allocated
* in the parent context, not this context, because there is no need to
- * free memtuples early.
+ * free memtuples early. For bounded sorts, tuples may be pfreed in any
+ * order, so we use a regular aset.c context so that it can make use of
+ * free'd memory. When the sort is not bounded, we make use of a
+ * generation.c context as this keeps allocations more compact with less
+ * wastage. Allocations are also slightly more CPU efficient.
*/
- state->tuplecontext = AllocSetContextCreate(state->sortcontext,
- "Caller tuples",
- ALLOCSET_DEFAULT_SIZES);
+ if (state->sortopt & TUPLESORT_ALLOWBOUNDED)
+ state->tuplecontext = AllocSetContextCreate(state->sortcontext,
+ "Caller tuples",
+ ALLOCSET_DEFAULT_SIZES);
+ else
+ state->tuplecontext = GenerationContextCreate(state->sortcontext,
+ "Caller tuples",
+ ALLOCSET_DEFAULT_SIZES);
+
state->status = TSS_INITIAL;
state->bounded = false;
@@ -1444,6 +1454,8 @@ tuplesort_set_bound(Tuplesortstate *state, int64 bound)
{
/* Assert we're called before loading any tuples */
Assert(state->status == TSS_INITIAL && state->memtupcount == 0);
+ /* Assert we allow bounded sorts */
+ Assert(state->sortopt & TUPLESORT_ALLOWBOUNDED);
/* Can't set the bound twice, either */
Assert(!state->bounded);
/* Also, this shouldn't be called in a parallel worker */
diff --git a/src/include/utils/tuplesort.h b/src/include/utils/tuplesort.h
index 345f4ce8024..364cf132fcb 100644
--- a/src/include/utils/tuplesort.h
+++ b/src/include/utils/tuplesort.h
@@ -92,6 +92,9 @@ typedef enum
/* specifies whether non-sequential access to the sort result is required */
#define TUPLESORT_RANDOMACCESS (1 << 0)
+/* specifies if the tuplesort is able to support bounded sorts */
+#define TUPLESORT_ALLOWBOUNDED (1 << 1)
+
typedef struct TuplesortInstrumentation
{
TuplesortMethod sortMethod; /* sort algorithm used */