aboutsummaryrefslogtreecommitdiff
path: root/src/backend/optimizer
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/optimizer')
-rw-r--r--src/backend/optimizer/path/costsize.c7
-rw-r--r--src/backend/optimizer/plan/planner.c6
-rw-r--r--src/backend/optimizer/plan/subselect.c6
-rw-r--r--src/backend/optimizer/prep/prepunion.c8
-rw-r--r--src/backend/optimizer/util/pathnode.c3
5 files changed, 11 insertions, 19 deletions
diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c
index b54cf34a8e1..30c8595f761 100644
--- a/src/backend/optimizer/path/costsize.c
+++ b/src/backend/optimizer/path/costsize.c
@@ -2438,7 +2438,7 @@ cost_memoize_rescan(PlannerInfo *root, MemoizePath *mpath,
Cost total_cost;
/* available cache space */
- hash_mem_bytes = get_hash_mem() * 1024L;
+ hash_mem_bytes = get_hash_memory_limit();
/*
* Set the number of bytes each cache entry should consume in the cache.
@@ -3860,7 +3860,6 @@ final_cost_hashjoin(PlannerInfo *root, HashPath *path,
Cost run_cost = workspace->run_cost;
int numbuckets = workspace->numbuckets;
int numbatches = workspace->numbatches;
- int hash_mem;
Cost cpu_per_tuple;
QualCost hash_qual_cost;
QualCost qp_qual_cost;
@@ -3986,10 +3985,8 @@ final_cost_hashjoin(PlannerInfo *root, HashPath *path,
* that way, so it will be unable to drive the batch size below hash_mem
* when this is true.)
*/
- hash_mem = get_hash_mem();
if (relation_byte_size(clamp_row_est(inner_path_rows * innermcvfreq),
- inner_path->pathtarget->width) >
- (hash_mem * 1024L))
+ inner_path->pathtarget->width) > get_hash_memory_limit())
startup_cost += disable_cost;
/*
diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c
index 1868c4eff47..86816ffe19d 100644
--- a/src/backend/optimizer/plan/planner.c
+++ b/src/backend/optimizer/plan/planner.c
@@ -3668,7 +3668,7 @@ consider_groupingsets_paths(PlannerInfo *root,
double dNumGroups)
{
Query *parse = root->parse;
- int hash_mem = get_hash_mem();
+ Size hash_mem_limit = get_hash_memory_limit();
/*
* If we're not being offered sorted input, then only consider plans that
@@ -3734,7 +3734,7 @@ consider_groupingsets_paths(PlannerInfo *root,
* with. Override hash_mem in that case; otherwise, we'll rely on the
* sorted-input case to generate usable mixed paths.
*/
- if (hashsize > hash_mem * 1024L && gd->rollups)
+ if (hashsize > hash_mem_limit && gd->rollups)
return; /* nope, won't fit */
/*
@@ -3853,7 +3853,7 @@ consider_groupingsets_paths(PlannerInfo *root,
{
List *rollups = NIL;
List *hash_sets = list_copy(gd->unsortable_sets);
- double availspace = (hash_mem * 1024.0);
+ double availspace = hash_mem_limit;
ListCell *lc;
/*
diff --git a/src/backend/optimizer/plan/subselect.c b/src/backend/optimizer/plan/subselect.c
index b5a61f39335..c9f7a09d102 100644
--- a/src/backend/optimizer/plan/subselect.c
+++ b/src/backend/optimizer/plan/subselect.c
@@ -724,7 +724,6 @@ static bool
subplan_is_hashable(Plan *plan)
{
double subquery_size;
- int hash_mem = get_hash_mem();
/*
* The estimated size of the subquery result must fit in hash_mem. (Note:
@@ -734,7 +733,7 @@ subplan_is_hashable(Plan *plan)
*/
subquery_size = plan->plan_rows *
(MAXALIGN(plan->plan_width) + MAXALIGN(SizeofHeapTupleHeader));
- if (subquery_size > hash_mem * 1024L)
+ if (subquery_size > get_hash_memory_limit())
return false;
return true;
@@ -749,7 +748,6 @@ static bool
subpath_is_hashable(Path *path)
{
double subquery_size;
- int hash_mem = get_hash_mem();
/*
* The estimated size of the subquery result must fit in hash_mem. (Note:
@@ -759,7 +757,7 @@ subpath_is_hashable(Path *path)
*/
subquery_size = path->rows *
(MAXALIGN(path->pathtarget->width) + MAXALIGN(SizeofHeapTupleHeader));
- if (subquery_size > hash_mem * 1024L)
+ if (subquery_size > get_hash_memory_limit())
return false;
return true;
diff --git a/src/backend/optimizer/prep/prepunion.c b/src/backend/optimizer/prep/prepunion.c
index 037dfaacfd4..e9256a2d4d2 100644
--- a/src/backend/optimizer/prep/prepunion.c
+++ b/src/backend/optimizer/prep/prepunion.c
@@ -1019,7 +1019,7 @@ choose_hashed_setop(PlannerInfo *root, List *groupClauses,
const char *construct)
{
int numGroupCols = list_length(groupClauses);
- int hash_mem = get_hash_mem();
+ Size hash_mem_limit = get_hash_memory_limit();
bool can_sort;
bool can_hash;
Size hashentrysize;
@@ -1055,13 +1055,11 @@ choose_hashed_setop(PlannerInfo *root, List *groupClauses,
*/
hashentrysize = MAXALIGN(input_path->pathtarget->width) + MAXALIGN(SizeofMinimalTupleHeader);
- if (hashentrysize * dNumGroups > hash_mem * 1024L)
+ if (hashentrysize * dNumGroups > hash_mem_limit)
return false;
/*
- * See if the estimated cost is no more than doing it the other way. We
- * deliberately give the hash case more memory when hash_mem exceeds
- * standard work mem (i.e. when hash_mem_multiplier exceeds 1.0).
+ * See if the estimated cost is no more than doing it the other way.
*
* We need to consider input_plan + hashagg versus input_plan + sort +
* group. Note that the actual result plan might involve a SetOp or
diff --git a/src/backend/optimizer/util/pathnode.c b/src/backend/optimizer/util/pathnode.c
index 0c94cbe767a..41cbf328c46 100644
--- a/src/backend/optimizer/util/pathnode.c
+++ b/src/backend/optimizer/util/pathnode.c
@@ -1794,9 +1794,8 @@ create_unique_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
* planner.c).
*/
int hashentrysize = subpath->pathtarget->width + 64;
- int hash_mem = get_hash_mem();
- if (hashentrysize * pathnode->path.rows > hash_mem * 1024L)
+ if (hashentrysize * pathnode->path.rows > get_hash_memory_limit())
{
/*
* We should not try to hash. Hack the SpecialJoinInfo to