diff options
author | Teodor Sigaev <teodor@sigaev.ru> | 2018-04-23 17:59:17 +0300 |
---|---|---|
committer | Teodor Sigaev <teodor@sigaev.ru> | 2018-04-23 17:59:17 +0300 |
commit | a5ab8928d7d9e2cf76ed0590efbd9795ea9e5b5e (patch) | |
tree | 5b08c17e1de26da5773d868c62662e72c16e2119 /src | |
parent | 9975c128a1d1bd7e7366adf133b21540a2bc2450 (diff) | |
download | postgresql-a5ab8928d7d9e2cf76ed0590efbd9795ea9e5b5e.tar.gz postgresql-a5ab8928d7d9e2cf76ed0590efbd9795ea9e5b5e.zip |
Make bms_prev_member work correctly with a 64 bit bitmapword
5c067521 erroneously had coded bms_prev_member assuming that a bitmapword
would always hold 32 bits and started it's search on what it thought was the
highest 8-bits of the word. This was not the case if bitmapwords were 64
bits.
In passing add a test to exercise this function a little. Previously there was
no coverage at all.
David Rowly
Diffstat (limited to 'src')
-rw-r--r-- | src/backend/nodes/bitmapset.c | 2 | ||||
-rw-r--r-- | src/test/regress/expected/partition_prune.out | 23 | ||||
-rw-r--r-- | src/test/regress/sql/partition_prune.sql | 25 |
3 files changed, 49 insertions, 1 deletions
diff --git a/src/backend/nodes/bitmapset.c b/src/backend/nodes/bitmapset.c index 9341bf579e3..81182f2518c 100644 --- a/src/backend/nodes/bitmapset.c +++ b/src/backend/nodes/bitmapset.c @@ -1167,7 +1167,7 @@ bms_prev_member(const Bitmapset *a, int prevbit) if (w != 0) { int result; - int shift = 24; + int shift = BITS_PER_BITMAPWORD - 8; result = wordnum * BITS_PER_BITMAPWORD; while ((w >> shift) == 0) diff --git a/src/test/regress/expected/partition_prune.out b/src/test/regress/expected/partition_prune.out index 9c65ee001de..3e818edd339 100644 --- a/src/test/regress/expected/partition_prune.out +++ b/src/test/regress/expected/partition_prune.out @@ -1747,6 +1747,29 @@ explain (analyze, costs off, summary off, timing off) execute ab_q3 (2, 2); Filter: ((b >= $1) AND (b <= $2) AND (a < $0)) (10 rows) +-- Test a backwards Append scan +create table list_part (a int) partition by list (a); +create table list_part1 partition of list_part for values in (1); +create table list_part2 partition of list_part for values in (2); +create table list_part3 partition of list_part for values in (3); +create table list_part4 partition of list_part for values in (4); +insert into list_part select generate_series(1,4); +begin; +-- Don't select an actual value out of the table as the order of the Append's +-- subnodes may not be stable. +declare cur SCROLL CURSOR for select 1 from list_part where a > (select 1) and a < (select 4); +-- move beyond the final row +move 3 from cur; +-- Ensure we get two rows. +fetch backward all from cur; + ?column? +---------- + 1 + 1 +(2 rows) + +commit; +drop table list_part; -- Parallel append -- Suppress the number of loops each parallel node runs for. This is because -- more than one worker may run the same parallel node if timing conditions diff --git a/src/test/regress/sql/partition_prune.sql b/src/test/regress/sql/partition_prune.sql index b38b39c71e0..d8d3e3c47db 100644 --- a/src/test/regress/sql/partition_prune.sql +++ b/src/test/regress/sql/partition_prune.sql @@ -359,6 +359,31 @@ execute ab_q3 (1, 8); explain (analyze, costs off, summary off, timing off) execute ab_q3 (2, 2); +-- Test a backwards Append scan +create table list_part (a int) partition by list (a); +create table list_part1 partition of list_part for values in (1); +create table list_part2 partition of list_part for values in (2); +create table list_part3 partition of list_part for values in (3); +create table list_part4 partition of list_part for values in (4); + +insert into list_part select generate_series(1,4); + +begin; + +-- Don't select an actual value out of the table as the order of the Append's +-- subnodes may not be stable. +declare cur SCROLL CURSOR for select 1 from list_part where a > (select 1) and a < (select 4); + +-- move beyond the final row +move 3 from cur; + +-- Ensure we get two rows. +fetch backward all from cur; + +commit; + +drop table list_part; + -- Parallel append -- Suppress the number of loops each parallel node runs for. This is because |