aboutsummaryrefslogtreecommitdiff
path: root/src/backend/optimizer/path/costsize.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/optimizer/path/costsize.c')
-rw-r--r--src/backend/optimizer/path/costsize.c21
1 files changed, 21 insertions, 0 deletions
diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c
index 7812a8628fc..e480797ca8e 100644
--- a/src/backend/optimizer/path/costsize.c
+++ b/src/backend/optimizer/path/costsize.c
@@ -110,6 +110,7 @@ Cost disable_cost = 1.0e10;
bool enable_seqscan = true;
bool enable_indexscan = true;
+bool enable_indexonlyscan = true;
bool enable_bitmapscan = true;
bool enable_tidscan = true;
bool enable_sort = true;
@@ -119,6 +120,9 @@ bool enable_material = true;
bool enable_mergejoin = true;
bool enable_hashjoin = true;
+/* Possibly this should become a GUC too */
+static double visibility_fraction = 0.9;
+
typedef struct
{
PlannerInfo *root;
@@ -210,6 +214,7 @@ cost_seqscan(Path *path, PlannerInfo *root,
* 'index' is the index to be used
* 'indexQuals' is the list of applicable qual clauses (implicit AND semantics)
* 'indexOrderBys' is the list of ORDER BY operators for amcanorderbyop indexes
+ * 'indexonly' is true if it's an index-only scan
* 'outer_rel' is the outer relation when we are considering using the index
* scan as the inside of a nestloop join (hence, some of the indexQuals
* are join clauses, and we should expect repeated scans of the index);
@@ -232,6 +237,7 @@ cost_index(IndexPath *path, PlannerInfo *root,
IndexOptInfo *index,
List *indexQuals,
List *indexOrderBys,
+ bool indexonly,
RelOptInfo *outer_rel)
{
RelOptInfo *baserel = index->rel;
@@ -314,6 +320,12 @@ cost_index(IndexPath *path, PlannerInfo *root,
* For partially-correlated indexes, we ought to charge somewhere between
* these two estimates. We currently interpolate linearly between the
* estimates based on the correlation squared (XXX is that appropriate?).
+ *
+ * If it's an index-only scan, then we will not need to fetch any heap
+ * pages for which the visibility map shows all tuples are visible.
+ * Unfortunately, we have no stats as to how much of the heap is
+ * all-visible, and that's likely to be a rather unstable number anyway.
+ * We use an arbitrary constant visibility_fraction to estimate this.
*----------
*/
if (outer_rel != NULL && outer_rel->rows > 1)
@@ -333,6 +345,8 @@ cost_index(IndexPath *path, PlannerInfo *root,
(double) index->pages,
root);
+ pages_fetched = ceil(pages_fetched * visibility_fraction);
+
max_IO_cost = (pages_fetched * spc_random_page_cost) / num_scans;
/*
@@ -352,6 +366,8 @@ cost_index(IndexPath *path, PlannerInfo *root,
(double) index->pages,
root);
+ pages_fetched = ceil(pages_fetched * visibility_fraction);
+
min_IO_cost = (pages_fetched * spc_random_page_cost) / num_scans;
}
else
@@ -365,11 +381,16 @@ cost_index(IndexPath *path, PlannerInfo *root,
(double) index->pages,
root);
+ pages_fetched = ceil(pages_fetched * visibility_fraction);
+
/* max_IO_cost is for the perfectly uncorrelated case (csquared=0) */
max_IO_cost = pages_fetched * spc_random_page_cost;
/* min_IO_cost is for the perfectly correlated case (csquared=1) */
pages_fetched = ceil(indexSelectivity * (double) baserel->pages);
+
+ pages_fetched = ceil(pages_fetched * visibility_fraction);
+
min_IO_cost = spc_random_page_cost;
if (pages_fetched > 1)
min_IO_cost += (pages_fetched - 1) * spc_seq_page_cost;