aboutsummaryrefslogtreecommitdiff
path: root/src/backend/optimizer/path/allpaths.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/optimizer/path/allpaths.c')
-rw-r--r--src/backend/optimizer/path/allpaths.c45
1 files changed, 45 insertions, 0 deletions
diff --git a/src/backend/optimizer/path/allpaths.c b/src/backend/optimizer/path/allpaths.c
index a1e1a87c293..343b35aa326 100644
--- a/src/backend/optimizer/path/allpaths.c
+++ b/src/backend/optimizer/path/allpaths.c
@@ -111,6 +111,8 @@ static void set_tablefunc_pathlist(PlannerInfo *root, RelOptInfo *rel,
RangeTblEntry *rte);
static void set_cte_pathlist(PlannerInfo *root, RelOptInfo *rel,
RangeTblEntry *rte);
+static void set_namedtuplestore_pathlist(PlannerInfo *root, RelOptInfo *rel,
+ RangeTblEntry *rte);
static void set_worktable_pathlist(PlannerInfo *root, RelOptInfo *rel,
RangeTblEntry *rte);
static RelOptInfo *make_rel_from_joinlist(PlannerInfo *root, List *joinlist);
@@ -396,6 +398,9 @@ set_rel_size(PlannerInfo *root, RelOptInfo *rel,
else
set_cte_pathlist(root, rel, rte);
break;
+ case RTE_NAMEDTUPLESTORE:
+ set_namedtuplestore_pathlist(root, rel, rte);
+ break;
default:
elog(ERROR, "unexpected rtekind: %d", (int) rel->rtekind);
break;
@@ -464,6 +469,9 @@ set_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
case RTE_CTE:
/* CTE reference --- fully handled during set_rel_size */
break;
+ case RTE_NAMEDTUPLESTORE:
+ /* tuplestore reference --- fully handled during set_rel_size */
+ break;
default:
elog(ERROR, "unexpected rtekind: %d", (int) rel->rtekind);
break;
@@ -639,6 +647,13 @@ set_rel_consider_parallel(PlannerInfo *root, RelOptInfo *rel,
* executed only once.
*/
return;
+
+ case RTE_NAMEDTUPLESTORE:
+ /*
+ * tuplestore cannot be shared, at least without more
+ * infrastructure to support that.
+ */
+ return;
}
/*
@@ -2090,6 +2105,36 @@ set_cte_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
}
/*
+ * set_namedtuplestore_pathlist
+ * Build the (single) access path for a named tuplestore RTE
+ *
+ * There's no need for a separate set_namedtuplestore_size phase, since we
+ * don't support join-qual-parameterized paths for tuplestores.
+ */
+static void
+set_namedtuplestore_pathlist(PlannerInfo *root, RelOptInfo *rel,
+ RangeTblEntry *rte)
+{
+ Relids required_outer;
+
+ /* Mark rel with estimated output rows, width, etc */
+ set_namedtuplestore_size_estimates(root, rel);
+
+ /*
+ * We don't support pushing join clauses into the quals of a tuplestore
+ * scan, but it could still have required parameterization due to LATERAL
+ * refs in its tlist.
+ */
+ required_outer = rel->lateral_relids;
+
+ /* Generate appropriate path */
+ add_path(rel, create_namedtuplestorescan_path(root, rel, required_outer));
+
+ /* Select cheapest path (pretty easy in this case...) */
+ set_cheapest(rel);
+}
+
+/*
* set_worktable_pathlist
* Build the (single) access path for a self-reference CTE RTE
*