aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/backend/executor/nodeMemoize.c15
-rw-r--r--src/test/regress/expected/memoize.out31
-rw-r--r--src/test/regress/sql/memoize.sql23
3 files changed, 63 insertions, 6 deletions
diff --git a/src/backend/executor/nodeMemoize.c b/src/backend/executor/nodeMemoize.c
index 262f79784e7..3c54ca54214 100644
--- a/src/backend/executor/nodeMemoize.c
+++ b/src/backend/executor/nodeMemoize.c
@@ -13,7 +13,7 @@
* Memoize nodes are intended to sit above parameterized nodes in the plan
* tree in order to cache results from them. The intention here is that a
* repeat scan with a parameter value that has already been seen by the node
- * can fetch tuples from the cache rather than having to re-scan the outer
+ * can fetch tuples from the cache rather than having to re-scan the inner
* node all over again. The query planner may choose to make use of one of
* these when it thinks rescans for previously seen values are likely enough
* to warrant adding the additional node.
@@ -207,7 +207,6 @@ MemoizeHash_hash(struct memoize_hash *tb, const MemoizeKey *key)
}
}
- ResetExprContext(econtext);
MemoryContextSwitchTo(oldcontext);
return murmurhash32(hashkey);
}
@@ -265,7 +264,6 @@ MemoizeHash_equal(struct memoize_hash *tb, const MemoizeKey *key1,
}
}
- ResetExprContext(econtext);
MemoryContextSwitchTo(oldcontext);
return match;
}
@@ -273,7 +271,7 @@ MemoizeHash_equal(struct memoize_hash *tb, const MemoizeKey *key1,
{
econtext->ecxt_innertuple = tslot;
econtext->ecxt_outertuple = pslot;
- return ExecQualAndReset(mstate->cache_eq_expr, econtext);
+ return ExecQual(mstate->cache_eq_expr, econtext);
}
}
@@ -694,9 +692,18 @@ static TupleTableSlot *
ExecMemoize(PlanState *pstate)
{
MemoizeState *node = castNode(MemoizeState, pstate);
+ ExprContext *econtext = node->ss.ps.ps_ExprContext;
PlanState *outerNode;
TupleTableSlot *slot;
+ CHECK_FOR_INTERRUPTS();
+
+ /*
+ * Reset per-tuple memory context to free any expression evaluation
+ * storage allocated in the previous tuple cycle.
+ */
+ ResetExprContext(econtext);
+
switch (node->mstatus)
{
case MEMO_CACHE_LOOKUP:
diff --git a/src/test/regress/expected/memoize.out b/src/test/regress/expected/memoize.out
index f5202430f80..5be2cc99303 100644
--- a/src/test/regress/expected/memoize.out
+++ b/src/test/regress/expected/memoize.out
@@ -92,10 +92,39 @@ WHERE t1.unique1 < 1000;
1000 | 9.5000000000000000
(1 row)
+SET enable_mergejoin TO off;
+-- Test for varlena datatype with expr evaluation
+CREATE TABLE expr_key (x numeric, t text);
+INSERT INTO expr_key (x, t)
+SELECT d1::numeric, d1::text FROM (
+ SELECT round((d / pi())::numeric, 7) AS d1 FROM generate_series(1, 20) AS d
+) t;
+-- duplicate rows so we get some cache hits
+INSERT INTO expr_key SELECT * FROM expr_key;
+CREATE INDEX expr_key_idx_x_t ON expr_key (x, t);
+VACUUM ANALYZE expr_key;
+-- Ensure we get we get a cache miss and hit for each of the 20 distinct values
+SELECT explain_memoize('
+SELECT * FROM expr_key t1 INNER JOIN expr_key t2
+ON t1.x = t2.t::numeric AND t1.t::numeric = t2.x;', false);
+ explain_memoize
+-------------------------------------------------------------------------------------------
+ Nested Loop (actual rows=80 loops=N)
+ -> Seq Scan on expr_key t1 (actual rows=40 loops=N)
+ -> Memoize (actual rows=2 loops=N)
+ Cache Key: t1.x, (t1.t)::numeric
+ Cache Mode: logical
+ Hits: 20 Misses: 20 Evictions: Zero Overflows: 0 Memory Usage: NkB
+ -> Index Only Scan using expr_key_idx_x_t on expr_key t2 (actual rows=2 loops=N)
+ Index Cond: (x = (t1.t)::numeric)
+ Filter: (t1.x = (t)::numeric)
+ Heap Fetches: N
+(10 rows)
+
+DROP TABLE expr_key;
-- Reduce work_mem and hash_mem_multiplier so that we see some cache evictions
SET work_mem TO '64kB';
SET hash_mem_multiplier TO 1.0;
-SET enable_mergejoin TO off;
-- Ensure we get some evictions. We're unable to validate the hits and misses
-- here as the number of entries that fit in the cache at once will vary
-- between different machines.
diff --git a/src/test/regress/sql/memoize.sql b/src/test/regress/sql/memoize.sql
index 29ab1ea62d3..09a0ba1b3a6 100644
--- a/src/test/regress/sql/memoize.sql
+++ b/src/test/regress/sql/memoize.sql
@@ -57,10 +57,31 @@ LATERAL (SELECT t2.unique1 FROM tenk1 t2
WHERE t1.twenty = t2.unique1 OFFSET 0) t2
WHERE t1.unique1 < 1000;
+SET enable_mergejoin TO off;
+
+-- Test for varlena datatype with expr evaluation
+CREATE TABLE expr_key (x numeric, t text);
+INSERT INTO expr_key (x, t)
+SELECT d1::numeric, d1::text FROM (
+ SELECT round((d / pi())::numeric, 7) AS d1 FROM generate_series(1, 20) AS d
+) t;
+
+-- duplicate rows so we get some cache hits
+INSERT INTO expr_key SELECT * FROM expr_key;
+
+CREATE INDEX expr_key_idx_x_t ON expr_key (x, t);
+VACUUM ANALYZE expr_key;
+
+-- Ensure we get we get a cache miss and hit for each of the 20 distinct values
+SELECT explain_memoize('
+SELECT * FROM expr_key t1 INNER JOIN expr_key t2
+ON t1.x = t2.t::numeric AND t1.t::numeric = t2.x;', false);
+
+DROP TABLE expr_key;
+
-- Reduce work_mem and hash_mem_multiplier so that we see some cache evictions
SET work_mem TO '64kB';
SET hash_mem_multiplier TO 1.0;
-SET enable_mergejoin TO off;
-- Ensure we get some evictions. We're unable to validate the hits and misses
-- here as the number of entries that fit in the cache at once will vary
-- between different machines.