aboutsummaryrefslogtreecommitdiff
path: root/src/backend/executor
diff options
context:
space:
mode:
authorPeter Geoghegan <pg@bowt.ie>2020-07-28 17:59:14 -0700
committerPeter Geoghegan <pg@bowt.ie>2020-07-28 17:59:14 -0700
commitcdd7bd695bed552936e86b70ff1d234360bc5bea (patch)
treec356490c350a91b11e923461f3f44579e5db3b69 /src/backend/executor
parentb6c15e71f33fe9aa7f38cc7bde26d420fbaaef5b (diff)
downloadpostgresql-cdd7bd695bed552936e86b70ff1d234360bc5bea.tar.gz
postgresql-cdd7bd695bed552936e86b70ff1d234360bc5bea.zip
Rename another "hash_mem" local variable.
Missed by my commit 564ce621. Backpatch: 13-, where disk-based hash aggregation was introduced.
Diffstat (limited to 'src/backend/executor')
-rw-r--r--src/backend/executor/nodeAgg.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c
index 5ca1751f956..586509c50b2 100644
--- a/src/backend/executor/nodeAgg.c
+++ b/src/backend/executor/nodeAgg.c
@@ -1839,15 +1839,15 @@ hash_agg_check_limits(AggState *aggstate)
uint64 ngroups = aggstate->hash_ngroups_current;
Size meta_mem = MemoryContextMemAllocated(aggstate->hash_metacxt,
true);
- Size hash_mem = MemoryContextMemAllocated(aggstate->hashcontext->ecxt_per_tuple_memory,
- true);
+ Size hashkey_mem = MemoryContextMemAllocated(aggstate->hashcontext->ecxt_per_tuple_memory,
+ true);
/*
* Don't spill unless there's at least one group in the hash table so we
* can be sure to make progress even in edge cases.
*/
if (aggstate->hash_ngroups_current > 0 &&
- (meta_mem + hash_mem > aggstate->hash_mem_limit ||
+ (meta_mem + hashkey_mem > aggstate->hash_mem_limit ||
ngroups > aggstate->hash_ngroups_limit))
{
hash_agg_enter_spill_mode(aggstate);
@@ -1898,7 +1898,7 @@ static void
hash_agg_update_metrics(AggState *aggstate, bool from_tape, int npartitions)
{
Size meta_mem;
- Size hash_mem;
+ Size hashkey_mem;
Size buffer_mem;
Size total_mem;
@@ -1910,7 +1910,7 @@ hash_agg_update_metrics(AggState *aggstate, bool from_tape, int npartitions)
meta_mem = MemoryContextMemAllocated(aggstate->hash_metacxt, true);
/* memory for the group keys and transition states */
- hash_mem = MemoryContextMemAllocated(aggstate->hashcontext->ecxt_per_tuple_memory, true);
+ hashkey_mem = MemoryContextMemAllocated(aggstate->hashcontext->ecxt_per_tuple_memory, true);
/* memory for read/write tape buffers, if spilled */
buffer_mem = npartitions * HASHAGG_WRITE_BUFFER_SIZE;
@@ -1918,7 +1918,7 @@ hash_agg_update_metrics(AggState *aggstate, bool from_tape, int npartitions)
buffer_mem += HASHAGG_READ_BUFFER_SIZE;
/* update peak mem */
- total_mem = meta_mem + hash_mem + buffer_mem;
+ total_mem = meta_mem + hashkey_mem + buffer_mem;
if (total_mem > aggstate->hash_mem_peak)
aggstate->hash_mem_peak = total_mem;
@@ -1936,7 +1936,7 @@ hash_agg_update_metrics(AggState *aggstate, bool from_tape, int npartitions)
{
aggstate->hashentrysize =
sizeof(TupleHashEntryData) +
- (hash_mem / (double) aggstate->hash_ngroups_current);
+ (hashkey_mem / (double) aggstate->hash_ngroups_current);
}
}