aboutsummaryrefslogtreecommitdiff
path: root/src/backend/executor/nodeAgg.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/executor/nodeAgg.c')
-rw-r--r--src/backend/executor/nodeAgg.c323
1 files changed, 162 insertions, 161 deletions
diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c
index 9f4229de600..8553db0dd07 100644
--- a/src/backend/executor/nodeAgg.c
+++ b/src/backend/executor/nodeAgg.c
@@ -317,11 +317,11 @@
*/
typedef struct HashTapeInfo
{
- LogicalTapeSet *tapeset;
- int ntapes;
- int *freetapes;
- int nfreetapes;
- int freetapes_alloc;
+ LogicalTapeSet *tapeset;
+ int ntapes;
+ int *freetapes;
+ int nfreetapes;
+ int freetapes_alloc;
} HashTapeInfo;
/*
@@ -336,11 +336,11 @@ typedef struct HashTapeInfo
typedef struct HashAggSpill
{
LogicalTapeSet *tapeset; /* borrowed reference to tape set */
- int npartitions; /* number of partitions */
- int *partitions; /* spill partition tape numbers */
- int64 *ntuples; /* number of tuples in each partition */
- uint32 mask; /* mask to find partition from hash value */
- int shift; /* after masking, shift by this amount */
+ int npartitions; /* number of partitions */
+ int *partitions; /* spill partition tape numbers */
+ int64 *ntuples; /* number of tuples in each partition */
+ uint32 mask; /* mask to find partition from hash value */
+ int shift; /* after masking, shift by this amount */
} HashAggSpill;
/*
@@ -354,11 +354,11 @@ typedef struct HashAggSpill
*/
typedef struct HashAggBatch
{
- int setno; /* grouping set */
- int used_bits; /* number of bits of hash already used */
- LogicalTapeSet *tapeset; /* borrowed reference to tape set */
- int input_tapenum; /* input partition tape */
- int64 input_tuples; /* number of tuples in this batch */
+ int setno; /* grouping set */
+ int used_bits; /* number of bits of hash already used */
+ LogicalTapeSet *tapeset; /* borrowed reference to tape set */
+ int input_tapenum; /* input partition tape */
+ int64 input_tuples; /* number of tuples in this batch */
} HashAggBatch;
static void select_current_set(AggState *aggstate, int setno, bool is_hash);
@@ -402,10 +402,10 @@ static void hashagg_recompile_expressions(AggState *aggstate, bool minslot,
static long hash_choose_num_buckets(double hashentrysize,
long estimated_nbuckets,
Size memory);
-static int hash_choose_num_partitions(uint64 input_groups,
- double hashentrysize,
- int used_bits,
- int *log2_npartittions);
+static int hash_choose_num_partitions(uint64 input_groups,
+ double hashentrysize,
+ int used_bits,
+ int *log2_npartittions);
static AggStatePerGroup lookup_hash_entry(AggState *aggstate, uint32 hash,
bool *in_hash_table);
static void lookup_hash_entries(AggState *aggstate);
@@ -786,14 +786,14 @@ advance_transition_function(AggState *aggstate,
* pointer to a R/W expanded object that is already a child of the
* aggcontext, assume we can adopt that value without copying it.
*
- * It's safe to compare newVal with pergroup->transValue without
- * regard for either being NULL, because ExecAggTransReparent()
- * takes care to set transValue to 0 when NULL. Otherwise we could
- * end up accidentally not reparenting, when the transValue has
- * the same numerical value as newValue, despite being NULL. This
- * is a somewhat hot path, making it undesirable to instead solve
- * this with another branch for the common case of the transition
- * function returning its (modified) input argument.
+ * It's safe to compare newVal with pergroup->transValue without regard
+ * for either being NULL, because ExecAggTransReparent() takes care to set
+ * transValue to 0 when NULL. Otherwise we could end up accidentally not
+ * reparenting, when the transValue has the same numerical value as
+ * newValue, despite being NULL. This is a somewhat hot path, making it
+ * undesirable to instead solve this with another branch for the common
+ * case of the transition function returning its (modified) input
+ * argument.
*/
if (!pertrans->transtypeByVal &&
DatumGetPointer(newVal) != DatumGetPointer(pergroupstate->transValue))
@@ -1206,7 +1206,7 @@ prepare_hash_slot(AggState *aggstate)
TupleTableSlot *inputslot = aggstate->tmpcontext->ecxt_outertuple;
AggStatePerHash perhash = &aggstate->perhash[aggstate->current_set];
TupleTableSlot *hashslot = perhash->hashslot;
- int i;
+ int i;
/* transfer just the needed columns into hashslot */
slot_getsomeattrs(inputslot, perhash->largestGrpColIdx);
@@ -1438,13 +1438,13 @@ find_unaggregated_cols_walker(Node *node, Bitmapset **colnos)
static void
build_hash_tables(AggState *aggstate)
{
- int setno;
+ int setno;
for (setno = 0; setno < aggstate->num_hashes; ++setno)
{
AggStatePerHash perhash = &aggstate->perhash[setno];
- long nbuckets;
- Size memory;
+ long nbuckets;
+ Size memory;
if (perhash->hashtable != NULL)
{
@@ -1457,8 +1457,9 @@ build_hash_tables(AggState *aggstate)
memory = aggstate->hash_mem_limit / aggstate->num_hashes;
/* choose reasonable number of buckets per hashtable */
- nbuckets = hash_choose_num_buckets(
- aggstate->hashentrysize, perhash->aggnode->numGroups, memory);
+ nbuckets = hash_choose_num_buckets(aggstate->hashentrysize,
+ perhash->aggnode->numGroups,
+ memory);
build_hash_table(aggstate, setno, nbuckets);
}
@@ -1473,10 +1474,10 @@ static void
build_hash_table(AggState *aggstate, int setno, long nbuckets)
{
AggStatePerHash perhash = &aggstate->perhash[setno];
- MemoryContext metacxt = aggstate->hash_metacxt;
- MemoryContext hashcxt = aggstate->hashcontext->ecxt_per_tuple_memory;
- MemoryContext tmpcxt = aggstate->tmpcontext->ecxt_per_tuple_memory;
- Size additionalsize;
+ MemoryContext metacxt = aggstate->hash_metacxt;
+ MemoryContext hashcxt = aggstate->hashcontext->ecxt_per_tuple_memory;
+ MemoryContext tmpcxt = aggstate->tmpcontext->ecxt_per_tuple_memory;
+ Size additionalsize;
Assert(aggstate->aggstrategy == AGG_HASHED ||
aggstate->aggstrategy == AGG_MIXED);
@@ -1489,20 +1490,19 @@ build_hash_table(AggState *aggstate, int setno, long nbuckets)
*/
additionalsize = aggstate->numtrans * sizeof(AggStatePerGroupData);
- perhash->hashtable = BuildTupleHashTableExt(
- &aggstate->ss.ps,
- perhash->hashslot->tts_tupleDescriptor,
- perhash->numCols,
- perhash->hashGrpColIdxHash,
- perhash->eqfuncoids,
- perhash->hashfunctions,
- perhash->aggnode->grpCollations,
- nbuckets,
- additionalsize,
- metacxt,
- hashcxt,
- tmpcxt,
- DO_AGGSPLIT_SKIPFINAL(aggstate->aggsplit));
+ perhash->hashtable = BuildTupleHashTableExt(&aggstate->ss.ps,
+ perhash->hashslot->tts_tupleDescriptor,
+ perhash->numCols,
+ perhash->hashGrpColIdxHash,
+ perhash->eqfuncoids,
+ perhash->hashfunctions,
+ perhash->aggnode->grpCollations,
+ nbuckets,
+ additionalsize,
+ metacxt,
+ hashcxt,
+ tmpcxt,
+ DO_AGGSPLIT_SKIPFINAL(aggstate->aggsplit));
}
/*
@@ -1648,12 +1648,12 @@ find_hash_columns(AggState *aggstate)
Size
hash_agg_entry_size(int numTrans, Size tupleWidth, Size transitionSpace)
{
- Size tupleChunkSize;
- Size pergroupChunkSize;
- Size transitionChunkSize;
- Size tupleSize = (MAXALIGN(SizeofMinimalTupleHeader) +
- tupleWidth);
- Size pergroupSize = numTrans * sizeof(AggStatePerGroupData);
+ Size tupleChunkSize;
+ Size pergroupChunkSize;
+ Size transitionChunkSize;
+ Size tupleSize = (MAXALIGN(SizeofMinimalTupleHeader) +
+ tupleWidth);
+ Size pergroupSize = numTrans * sizeof(AggStatePerGroupData);
tupleChunkSize = CHUNKHDRSZ + tupleSize;
@@ -1695,24 +1695,24 @@ hash_agg_entry_size(int numTrans, Size tupleWidth, Size transitionSpace)
static void
hashagg_recompile_expressions(AggState *aggstate, bool minslot, bool nullcheck)
{
- AggStatePerPhase phase;
- int i = minslot ? 1 : 0;
- int j = nullcheck ? 1 : 0;
+ AggStatePerPhase phase;
+ int i = minslot ? 1 : 0;
+ int j = nullcheck ? 1 : 0;
Assert(aggstate->aggstrategy == AGG_HASHED ||
aggstate->aggstrategy == AGG_MIXED);
if (aggstate->aggstrategy == AGG_HASHED)
phase = &aggstate->phases[0];
- else /* AGG_MIXED */
+ else /* AGG_MIXED */
phase = &aggstate->phases[1];
if (phase->evaltrans_cache[i][j] == NULL)
{
- const TupleTableSlotOps *outerops = aggstate->ss.ps.outerops;
- bool outerfixed = aggstate->ss.ps.outeropsfixed;
- bool dohash = true;
- bool dosort;
+ const TupleTableSlotOps *outerops = aggstate->ss.ps.outerops;
+ bool outerfixed = aggstate->ss.ps.outeropsfixed;
+ bool dohash = true;
+ bool dosort;
dosort = aggstate->aggstrategy == AGG_MIXED ? true : false;
@@ -1723,8 +1723,9 @@ hashagg_recompile_expressions(AggState *aggstate, bool minslot, bool nullcheck)
aggstate->ss.ps.outeropsfixed = true;
}
- phase->evaltrans_cache[i][j] = ExecBuildAggTrans(
- aggstate, phase, dosort, dohash, nullcheck);
+ phase->evaltrans_cache[i][j] = ExecBuildAggTrans(aggstate, phase,
+ dosort, dohash,
+ nullcheck);
/* change back */
aggstate->ss.ps.outerops = outerops;
@@ -1747,8 +1748,8 @@ hash_agg_set_limits(double hashentrysize, uint64 input_groups, int used_bits,
Size *mem_limit, uint64 *ngroups_limit,
int *num_partitions)
{
- int npartitions;
- Size partition_mem;
+ int npartitions;
+ Size partition_mem;
/* if not expected to spill, use all of work_mem */
if (input_groups * hashentrysize < work_mem * 1024L)
@@ -1762,9 +1763,8 @@ hash_agg_set_limits(double hashentrysize, uint64 input_groups, int used_bits,
/*
* Calculate expected memory requirements for spilling, which is the size
- * of the buffers needed for all the tapes that need to be open at
- * once. Then, subtract that from the memory available for holding hash
- * tables.
+ * of the buffers needed for all the tapes that need to be open at once.
+ * Then, subtract that from the memory available for holding hash tables.
*/
npartitions = hash_choose_num_partitions(input_groups,
hashentrysize,
@@ -1803,11 +1803,11 @@ hash_agg_set_limits(double hashentrysize, uint64 input_groups, int used_bits,
static void
hash_agg_check_limits(AggState *aggstate)
{
- uint64 ngroups = aggstate->hash_ngroups_current;
- Size meta_mem = MemoryContextMemAllocated(
- aggstate->hash_metacxt, true);
- Size hash_mem = MemoryContextMemAllocated(
- aggstate->hashcontext->ecxt_per_tuple_memory, true);
+ uint64 ngroups = aggstate->hash_ngroups_current;
+ Size meta_mem = MemoryContextMemAllocated(aggstate->hash_metacxt,
+ true);
+ Size hash_mem = MemoryContextMemAllocated(aggstate->hashcontext->ecxt_per_tuple_memory,
+ true);
/*
* Don't spill unless there's at least one group in the hash table so we
@@ -1841,13 +1841,12 @@ hash_agg_enter_spill_mode(AggState *aggstate)
hashagg_tapeinfo_init(aggstate);
- aggstate->hash_spills = palloc(
- sizeof(HashAggSpill) * aggstate->num_hashes);
+ aggstate->hash_spills = palloc(sizeof(HashAggSpill) * aggstate->num_hashes);
for (int setno = 0; setno < aggstate->num_hashes; setno++)
{
- AggStatePerHash perhash = &aggstate->perhash[setno];
- HashAggSpill *spill = &aggstate->hash_spills[setno];
+ AggStatePerHash perhash = &aggstate->perhash[setno];
+ HashAggSpill *spill = &aggstate->hash_spills[setno];
hashagg_spill_init(spill, aggstate->hash_tapeinfo, 0,
perhash->aggnode->numGroups,
@@ -1865,10 +1864,10 @@ hash_agg_enter_spill_mode(AggState *aggstate)
static void
hash_agg_update_metrics(AggState *aggstate, bool from_tape, int npartitions)
{
- Size meta_mem;
- Size hash_mem;
- Size buffer_mem;
- Size total_mem;
+ Size meta_mem;
+ Size hash_mem;
+ Size buffer_mem;
+ Size total_mem;
if (aggstate->aggstrategy != AGG_MIXED &&
aggstate->aggstrategy != AGG_HASHED)
@@ -1878,8 +1877,7 @@ hash_agg_update_metrics(AggState *aggstate, bool from_tape, int npartitions)
meta_mem = MemoryContextMemAllocated(aggstate->hash_metacxt, true);
/* memory for the group keys and transition states */
- hash_mem = MemoryContextMemAllocated(
- aggstate->hashcontext->ecxt_per_tuple_memory, true);
+ hash_mem = MemoryContextMemAllocated(aggstate->hashcontext->ecxt_per_tuple_memory, true);
/* memory for read/write tape buffers, if spilled */
buffer_mem = npartitions * HASHAGG_WRITE_BUFFER_SIZE;
@@ -1894,8 +1892,7 @@ hash_agg_update_metrics(AggState *aggstate, bool from_tape, int npartitions)
/* update disk usage */
if (aggstate->hash_tapeinfo != NULL)
{
- uint64 disk_used = LogicalTapeSetBlocks(
- aggstate->hash_tapeinfo->tapeset) * (BLCKSZ / 1024);
+ uint64 disk_used = LogicalTapeSetBlocks(aggstate->hash_tapeinfo->tapeset) * (BLCKSZ / 1024);
if (aggstate->hash_disk_used < disk_used)
aggstate->hash_disk_used = disk_used;
@@ -1906,7 +1903,7 @@ hash_agg_update_metrics(AggState *aggstate, bool from_tape, int npartitions)
{
aggstate->hashentrysize =
sizeof(TupleHashEntryData) +
- (hash_mem / (double)aggstate->hash_ngroups_current);
+ (hash_mem / (double) aggstate->hash_ngroups_current);
}
}
@@ -1916,8 +1913,8 @@ hash_agg_update_metrics(AggState *aggstate, bool from_tape, int npartitions)
static long
hash_choose_num_buckets(double hashentrysize, long ngroups, Size memory)
{
- long max_nbuckets;
- long nbuckets = ngroups;
+ long max_nbuckets;
+ long nbuckets = ngroups;
max_nbuckets = memory / hashentrysize;
@@ -1943,10 +1940,10 @@ static int
hash_choose_num_partitions(uint64 input_groups, double hashentrysize,
int used_bits, int *log2_npartitions)
{
- Size mem_wanted;
- int partition_limit;
- int npartitions;
- int partition_bits;
+ Size mem_wanted;
+ int partition_limit;
+ int npartitions;
+ int partition_bits;
/*
* Avoid creating so many partitions that the memory requirements of the
@@ -2005,8 +2002,8 @@ lookup_hash_entry(AggState *aggstate, uint32 hash, bool *in_hash_table)
AggStatePerHash perhash = &aggstate->perhash[aggstate->current_set];
TupleTableSlot *hashslot = perhash->hashslot;
TupleHashEntryData *entry;
- bool isnew = false;
- bool *p_isnew;
+ bool isnew = false;
+ bool *p_isnew;
/* if hash table already spilled, don't create new entries */
p_isnew = aggstate->hash_spill_mode ? NULL : &isnew;
@@ -2025,8 +2022,8 @@ lookup_hash_entry(AggState *aggstate, uint32 hash, bool *in_hash_table)
if (isnew)
{
- AggStatePerGroup pergroup;
- int transno;
+ AggStatePerGroup pergroup;
+ int transno;
aggstate->hash_ngroups_current++;
hash_agg_check_limits(aggstate);
@@ -2083,9 +2080,9 @@ lookup_hash_entries(AggState *aggstate)
for (setno = 0; setno < aggstate->num_hashes; setno++)
{
- AggStatePerHash perhash = &aggstate->perhash[setno];
- uint32 hash;
- bool in_hash_table;
+ AggStatePerHash perhash = &aggstate->perhash[setno];
+ uint32 hash;
+ bool in_hash_table;
select_current_set(aggstate, setno, true);
prepare_hash_slot(aggstate);
@@ -2095,8 +2092,8 @@ lookup_hash_entries(AggState *aggstate)
/* check to see if we need to spill the tuple for this grouping set */
if (!in_hash_table)
{
- HashAggSpill *spill = &aggstate->hash_spills[setno];
- TupleTableSlot *slot = aggstate->tmpcontext->ecxt_outertuple;
+ HashAggSpill *spill = &aggstate->hash_spills[setno];
+ TupleTableSlot *slot = aggstate->tmpcontext->ecxt_outertuple;
if (spill->partitions == NULL)
hashagg_spill_init(spill, aggstate->hash_tapeinfo, 0,
@@ -2560,11 +2557,11 @@ agg_fill_hash_table(AggState *aggstate)
static bool
agg_refill_hash_table(AggState *aggstate)
{
- HashAggBatch *batch;
- HashAggSpill spill;
- HashTapeInfo *tapeinfo = aggstate->hash_tapeinfo;
- uint64 ngroups_estimate;
- bool spill_initialized = false;
+ HashAggBatch *batch;
+ HashAggSpill spill;
+ HashTapeInfo *tapeinfo = aggstate->hash_tapeinfo;
+ uint64 ngroups_estimate;
+ bool spill_initialized = false;
if (aggstate->hash_batches == NIL)
return false;
@@ -2623,11 +2620,12 @@ agg_refill_hash_table(AggState *aggstate)
LogicalTapeRewindForRead(tapeinfo->tapeset, batch->input_tapenum,
HASHAGG_READ_BUFFER_SIZE);
- for (;;) {
- TupleTableSlot *slot = aggstate->hash_spill_slot;
- MinimalTuple tuple;
- uint32 hash;
- bool in_hash_table;
+ for (;;)
+ {
+ TupleTableSlot *slot = aggstate->hash_spill_slot;
+ MinimalTuple tuple;
+ uint32 hash;
+ bool in_hash_table;
CHECK_FOR_INTERRUPTS();
@@ -2639,8 +2637,8 @@ agg_refill_hash_table(AggState *aggstate)
aggstate->tmpcontext->ecxt_outertuple = slot;
prepare_hash_slot(aggstate);
- aggstate->hash_pergroup[batch->setno] = lookup_hash_entry(
- aggstate, hash, &in_hash_table);
+ aggstate->hash_pergroup[batch->setno] =
+ lookup_hash_entry(aggstate, hash, &in_hash_table);
if (in_hash_table)
{
@@ -2657,7 +2655,7 @@ agg_refill_hash_table(AggState *aggstate)
*/
spill_initialized = true;
hashagg_spill_init(&spill, tapeinfo, batch->used_bits,
- ngroups_estimate, aggstate->hashentrysize);
+ ngroups_estimate, aggstate->hashentrysize);
}
/* no memory for a new group, spill */
hashagg_spill_tuple(&spill, slot, hash);
@@ -2851,8 +2849,8 @@ agg_retrieve_hash_table_in_memory(AggState *aggstate)
static void
hashagg_tapeinfo_init(AggState *aggstate)
{
- HashTapeInfo *tapeinfo = palloc(sizeof(HashTapeInfo));
- int init_tapes = 16; /* expanded dynamically */
+ HashTapeInfo *tapeinfo = palloc(sizeof(HashTapeInfo));
+ int init_tapes = 16; /* expanded dynamically */
tapeinfo->tapeset = LogicalTapeSetCreate(init_tapes, NULL, NULL, -1);
tapeinfo->ntapes = init_tapes;
@@ -2873,7 +2871,7 @@ static void
hashagg_tapeinfo_assign(HashTapeInfo *tapeinfo, int *partitions,
int npartitions)
{
- int partidx = 0;
+ int partidx = 0;
/* use free tapes if available */
while (partidx < npartitions && tapeinfo->nfreetapes > 0)
@@ -2899,8 +2897,8 @@ hashagg_tapeinfo_release(HashTapeInfo *tapeinfo, int tapenum)
if (tapeinfo->freetapes_alloc == tapeinfo->nfreetapes)
{
tapeinfo->freetapes_alloc <<= 1;
- tapeinfo->freetapes = repalloc(
- tapeinfo->freetapes, tapeinfo->freetapes_alloc * sizeof(int));
+ tapeinfo->freetapes = repalloc(tapeinfo->freetapes,
+ tapeinfo->freetapes_alloc * sizeof(int));
}
tapeinfo->freetapes[tapeinfo->nfreetapes++] = tapenum;
}
@@ -2915,11 +2913,11 @@ static void
hashagg_spill_init(HashAggSpill *spill, HashTapeInfo *tapeinfo, int used_bits,
uint64 input_groups, double hashentrysize)
{
- int npartitions;
- int partition_bits;
+ int npartitions;
+ int partition_bits;
- npartitions = hash_choose_num_partitions(
- input_groups, hashentrysize, used_bits, &partition_bits);
+ npartitions = hash_choose_num_partitions(input_groups, hashentrysize,
+ used_bits, &partition_bits);
spill->partitions = palloc0(sizeof(int) * npartitions);
spill->ntuples = palloc0(sizeof(int64) * npartitions);
@@ -2941,12 +2939,12 @@ hashagg_spill_init(HashAggSpill *spill, HashTapeInfo *tapeinfo, int used_bits,
static Size
hashagg_spill_tuple(HashAggSpill *spill, TupleTableSlot *slot, uint32 hash)
{
- LogicalTapeSet *tapeset = spill->tapeset;
- int partition;
- MinimalTuple tuple;
- int tapenum;
- int total_written = 0;
- bool shouldFree;
+ LogicalTapeSet *tapeset = spill->tapeset;
+ int partition;
+ MinimalTuple tuple;
+ int tapenum;
+ int total_written = 0;
+ bool shouldFree;
Assert(spill->partitions != NULL);
@@ -2999,11 +2997,11 @@ static MinimalTuple
hashagg_batch_read(HashAggBatch *batch, uint32 *hashp)
{
LogicalTapeSet *tapeset = batch->tapeset;
- int tapenum = batch->input_tapenum;
- MinimalTuple tuple;
- uint32 t_len;
- size_t nread;
- uint32 hash;
+ int tapenum = batch->input_tapenum;
+ MinimalTuple tuple;
+ uint32 t_len;
+ size_t nread;
+ uint32 hash;
nread = LogicalTapeRead(tapeset, tapenum, &hash, sizeof(uint32));
if (nread == 0)
@@ -3027,7 +3025,7 @@ hashagg_batch_read(HashAggBatch *batch, uint32 *hashp)
tuple->t_len = t_len;
nread = LogicalTapeRead(tapeset, tapenum,
- (void *)((char *)tuple + sizeof(uint32)),
+ (void *) ((char *) tuple + sizeof(uint32)),
t_len - sizeof(uint32));
if (nread != t_len - sizeof(uint32))
ereport(ERROR,
@@ -3048,14 +3046,15 @@ hashagg_batch_read(HashAggBatch *batch, uint32 *hashp)
static void
hashagg_finish_initial_spills(AggState *aggstate)
{
- int setno;
- int total_npartitions = 0;
+ int setno;
+ int total_npartitions = 0;
if (aggstate->hash_spills != NULL)
{
for (setno = 0; setno < aggstate->num_hashes; setno++)
{
HashAggSpill *spill = &aggstate->hash_spills[setno];
+
total_npartitions += spill->npartitions;
hashagg_spill_finish(aggstate, spill, setno);
}
@@ -3081,16 +3080,16 @@ hashagg_finish_initial_spills(AggState *aggstate)
static void
hashagg_spill_finish(AggState *aggstate, HashAggSpill *spill, int setno)
{
- int i;
- int used_bits = 32 - spill->shift;
+ int i;
+ int used_bits = 32 - spill->shift;
if (spill->npartitions == 0)
- return; /* didn't spill */
+ return; /* didn't spill */
for (i = 0; i < spill->npartitions; i++)
{
- int tapenum = spill->partitions[i];
- HashAggBatch *new_batch;
+ int tapenum = spill->partitions[i];
+ HashAggBatch *new_batch;
/* if the partition is empty, don't create a new batch of work */
if (spill->ntuples[i] == 0)
@@ -3113,16 +3112,17 @@ hashagg_spill_finish(AggState *aggstate, HashAggSpill *spill, int setno)
static void
hashagg_reset_spill_state(AggState *aggstate)
{
- ListCell *lc;
+ ListCell *lc;
/* free spills from initial pass */
if (aggstate->hash_spills != NULL)
{
- int setno;
+ int setno;
for (setno = 0; setno < aggstate->num_hashes; setno++)
{
HashAggSpill *spill = &aggstate->hash_spills[setno];
+
pfree(spill->ntuples);
pfree(spill->partitions);
}
@@ -3133,7 +3133,8 @@ hashagg_reset_spill_state(AggState *aggstate)
/* free batches */
foreach(lc, aggstate->hash_batches)
{
- HashAggBatch *batch = (HashAggBatch*) lfirst(lc);
+ HashAggBatch *batch = (HashAggBatch *) lfirst(lc);
+
pfree(batch);
}
list_free(aggstate->hash_batches);
@@ -3142,7 +3143,7 @@ hashagg_reset_spill_state(AggState *aggstate)
/* close tape set */
if (aggstate->hash_tapeinfo != NULL)
{
- HashTapeInfo *tapeinfo = aggstate->hash_tapeinfo;
+ HashTapeInfo *tapeinfo = aggstate->hash_tapeinfo;
LogicalTapeSetClose(tapeinfo->tapeset);
pfree(tapeinfo->freetapes);
@@ -3558,22 +3559,22 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
*/
if (use_hashing)
{
- Plan *outerplan = outerPlan(node);
- uint64 totalGroups = 0;
- int i;
+ Plan *outerplan = outerPlan(node);
+ uint64 totalGroups = 0;
+ int i;
- aggstate->hash_metacxt = AllocSetContextCreate(
- aggstate->ss.ps.state->es_query_cxt,
- "HashAgg meta context",
- ALLOCSET_DEFAULT_SIZES);
- aggstate->hash_spill_slot = ExecInitExtraTupleSlot(
- estate, scanDesc, &TTSOpsMinimalTuple);
+ aggstate->hash_metacxt = AllocSetContextCreate(aggstate->ss.ps.state->es_query_cxt,
+ "HashAgg meta context",
+ ALLOCSET_DEFAULT_SIZES);
+ aggstate->hash_spill_slot = ExecInitExtraTupleSlot(estate, scanDesc,
+ &TTSOpsMinimalTuple);
/* this is an array of pointers, not structures */
aggstate->hash_pergroup = pergroups;
- aggstate->hashentrysize = hash_agg_entry_size(
- aggstate->numtrans, outerplan->plan_width, node->transitionSpace);
+ aggstate->hashentrysize = hash_agg_entry_size(aggstate->numtrans,
+ outerplan->plan_width,
+ node->transitionSpace);
/*
* Consider all of the grouping sets together when setting the limits