aboutsummaryrefslogtreecommitdiff
path: root/src/test/regress/expected/memoize.out
blob: 9a025c4a7ab9eef644d780d3a21d1275e2e73e7d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
-- Perform tests on the Memoize node.
-- The cache hits/misses/evictions from the Memoize node can vary between
-- machines.  Let's just replace the number with an 'N'.  In order to allow us
-- to perform validation when the measure was zero, we replace a zero value
-- with "Zero".  All other numbers are replaced with 'N'.
create function explain_memoize(query text, hide_hitmiss bool) returns setof text
language plpgsql as
$$
declare
    ln text;
begin
    for ln in
        execute format('explain (analyze, costs off, summary off, timing off) %s',
            query)
    loop
        if hide_hitmiss = true then
                ln := regexp_replace(ln, 'Hits: 0', 'Hits: Zero');
                ln := regexp_replace(ln, 'Hits: \d+', 'Hits: N');
                ln := regexp_replace(ln, 'Misses: 0', 'Misses: Zero');
                ln := regexp_replace(ln, 'Misses: \d+', 'Misses: N');
        end if;
        ln := regexp_replace(ln, 'Evictions: 0', 'Evictions: Zero');
        ln := regexp_replace(ln, 'Evictions: \d+', 'Evictions: N');
        ln := regexp_replace(ln, 'Memory Usage: \d+', 'Memory Usage: N');
	ln := regexp_replace(ln, 'Heap Fetches: \d+', 'Heap Fetches: N');
	ln := regexp_replace(ln, 'loops=\d+', 'loops=N');
        return next ln;
    end loop;
end;
$$;
-- Ensure we get a memoize node on the inner side of the nested loop
SET enable_hashjoin TO off;
SET enable_bitmapscan TO off;
SELECT explain_memoize('
SELECT COUNT(*),AVG(t1.unique1) FROM tenk1 t1
INNER JOIN tenk1 t2 ON t1.unique1 = t2.twenty
WHERE t2.unique1 < 1000;', false);
                                      explain_memoize                                      
-------------------------------------------------------------------------------------------
 Aggregate (actual rows=1 loops=N)
   ->  Nested Loop (actual rows=1000 loops=N)
         ->  Seq Scan on tenk1 t2 (actual rows=1000 loops=N)
               Filter: (unique1 < 1000)
               Rows Removed by Filter: 9000
         ->  Memoize (actual rows=1 loops=N)
               Cache Key: t2.twenty
               Hits: 980  Misses: 20  Evictions: Zero  Overflows: 0  Memory Usage: NkB
               ->  Index Only Scan using tenk1_unique1 on tenk1 t1 (actual rows=1 loops=N)
                     Index Cond: (unique1 = t2.twenty)
                     Heap Fetches: N
(11 rows)

-- And check we get the expected results.
SELECT COUNT(*),AVG(t1.unique1) FROM tenk1 t1
INNER JOIN tenk1 t2 ON t1.unique1 = t2.twenty
WHERE t2.unique1 < 1000;
 count |        avg         
-------+--------------------
  1000 | 9.5000000000000000
(1 row)

-- Try with LATERAL joins
SELECT explain_memoize('
SELECT COUNT(*),AVG(t2.unique1) FROM tenk1 t1,
LATERAL (SELECT t2.unique1 FROM tenk1 t2 WHERE t1.twenty = t2.unique1) t2
WHERE t1.unique1 < 1000;', false);
                                      explain_memoize                                      
-------------------------------------------------------------------------------------------
 Aggregate (actual rows=1 loops=N)
   ->  Nested Loop (actual rows=1000 loops=N)
         ->  Seq Scan on tenk1 t1 (actual rows=1000 loops=N)
               Filter: (unique1 < 1000)
               Rows Removed by Filter: 9000
         ->  Memoize (actual rows=1 loops=N)
               Cache Key: t1.twenty
               Hits: 980  Misses: 20  Evictions: Zero  Overflows: 0  Memory Usage: NkB
               ->  Index Only Scan using tenk1_unique1 on tenk1 t2 (actual rows=1 loops=N)
                     Index Cond: (unique1 = t1.twenty)
                     Heap Fetches: N
(11 rows)

-- And check we get the expected results.
SELECT COUNT(*),AVG(t2.unique1) FROM tenk1 t1,
LATERAL (SELECT t2.unique1 FROM tenk1 t2 WHERE t1.twenty = t2.unique1) t2
WHERE t1.unique1 < 1000;
 count |        avg         
-------+--------------------
  1000 | 9.5000000000000000
(1 row)

-- Reduce work_mem so that we see some cache evictions
SET work_mem TO '64kB';
SET enable_mergejoin TO off;
-- Ensure we get some evictions.  We're unable to validate the hits and misses
-- here as the number of entries that fit in the cache at once will vary
-- between different machines.
SELECT explain_memoize('
SELECT COUNT(*),AVG(t1.unique1) FROM tenk1 t1
INNER JOIN tenk1 t2 ON t1.unique1 = t2.thousand
WHERE t2.unique1 < 1200;', true);
                                      explain_memoize                                      
-------------------------------------------------------------------------------------------
 Aggregate (actual rows=1 loops=N)
   ->  Nested Loop (actual rows=1200 loops=N)
         ->  Seq Scan on tenk1 t2 (actual rows=1200 loops=N)
               Filter: (unique1 < 1200)
               Rows Removed by Filter: 8800
         ->  Memoize (actual rows=1 loops=N)
               Cache Key: t2.thousand
               Hits: N  Misses: N  Evictions: N  Overflows: 0  Memory Usage: NkB
               ->  Index Only Scan using tenk1_unique1 on tenk1 t1 (actual rows=1 loops=N)
                     Index Cond: (unique1 = t2.thousand)
                     Heap Fetches: N
(11 rows)

RESET enable_mergejoin;
RESET work_mem;
RESET enable_bitmapscan;
RESET enable_hashjoin;
-- Test parallel plans with Memoize
SET min_parallel_table_scan_size TO 0;
SET parallel_setup_cost TO 0;
SET parallel_tuple_cost TO 0;
SET max_parallel_workers_per_gather TO 2;
-- Ensure we get a parallel plan.
EXPLAIN (COSTS OFF)
SELECT COUNT(*),AVG(t2.unique1) FROM tenk1 t1,
LATERAL (SELECT t2.unique1 FROM tenk1 t2 WHERE t1.twenty = t2.unique1) t2
WHERE t1.unique1 < 1000;
                                  QUERY PLAN                                   
-------------------------------------------------------------------------------
 Finalize Aggregate
   ->  Gather
         Workers Planned: 2
         ->  Partial Aggregate
               ->  Nested Loop
                     ->  Parallel Bitmap Heap Scan on tenk1 t1
                           Recheck Cond: (unique1 < 1000)
                           ->  Bitmap Index Scan on tenk1_unique1
                                 Index Cond: (unique1 < 1000)
                     ->  Memoize
                           Cache Key: t1.twenty
                           ->  Index Only Scan using tenk1_unique1 on tenk1 t2
                                 Index Cond: (unique1 = t1.twenty)
(13 rows)

-- And ensure the parallel plan gives us the correct results.
SELECT COUNT(*),AVG(t2.unique1) FROM tenk1 t1,
LATERAL (SELECT t2.unique1 FROM tenk1 t2 WHERE t1.twenty = t2.unique1) t2
WHERE t1.unique1 < 1000;
 count |        avg         
-------+--------------------
  1000 | 9.5000000000000000
(1 row)

RESET max_parallel_workers_per_gather;
RESET parallel_tuple_cost;
RESET parallel_setup_cost;
RESET min_parallel_table_scan_size;