From dc9c3b0ff21465fa89d71eecf5e6cc956d647eca Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Mon, 20 Dec 2021 14:15:52 -0500 Subject: Remove dynamic translation of regression test scripts, step 2. "git mv" all the input/*.source and output/*.source files into the corresponding sql/ and expected/ directories. Then remove the pg_regress and Makefile infrastructure associated with dynamic translation. Discussion: https://postgr.es/m/1655733.1639871614@sss.pgh.pa.us --- contrib/dblink/Makefile | 1 - contrib/dblink/expected/.gitignore | 1 - contrib/dblink/expected/paths.out | 18 + contrib/dblink/input/paths.source | 23 - contrib/dblink/output/paths.source | 18 - contrib/dblink/sql/.gitignore | 1 - contrib/dblink/sql/paths.sql | 23 + contrib/file_fdw/Makefile | 2 - contrib/file_fdw/expected/.gitignore | 1 - contrib/file_fdw/expected/file_fdw.out | 477 ++++++++++++ contrib/file_fdw/input/file_fdw.source | 253 ------ contrib/file_fdw/output/file_fdw.source | 477 ------------ contrib/file_fdw/sql/.gitignore | 1 - contrib/file_fdw/sql/file_fdw.sql | 253 ++++++ src/interfaces/ecpg/test/pg_regress_ecpg.c | 7 +- src/pl/plpgsql/src/Makefile | 6 - src/pl/plpgsql/src/expected/.gitignore | 1 - src/pl/plpgsql/src/expected/plpgsql_copy.out | 82 ++ src/pl/plpgsql/src/input/plpgsql_copy.source | 58 -- src/pl/plpgsql/src/output/plpgsql_copy.source | 82 -- src/pl/plpgsql/src/sql/.gitignore | 1 - src/pl/plpgsql/src/sql/plpgsql_copy.sql | 58 ++ src/test/regress/GNUmakefile | 12 +- src/test/regress/expected/.gitignore | 10 - src/test/regress/expected/constraints.out | 742 ++++++++++++++++++ src/test/regress/expected/copy.out | 248 ++++++ src/test/regress/expected/create_function_0.out | 104 +++ src/test/regress/expected/create_function_1.out | 30 + src/test/regress/expected/create_function_2.out | 73 ++ src/test/regress/expected/largeobject.out | 496 ++++++++++++ src/test/regress/expected/largeobject_1.out | 496 ++++++++++++ src/test/regress/expected/misc.out | 697 +++++++++++++++++ src/test/regress/expected/tablespace.out | 939 +++++++++++++++++++++++ src/test/regress/input/constraints.source | 559 -------------- src/test/regress/input/copy.source | 293 ------- src/test/regress/input/create_function_0.source | 108 --- src/test/regress/input/create_function_1.source | 31 - src/test/regress/input/create_function_2.source | 96 --- src/test/regress/input/largeobject.source | 279 ------- src/test/regress/input/misc.source | 268 ------- src/test/regress/input/tablespace.source | 416 ---------- src/test/regress/output/constraints.source | 742 ------------------ src/test/regress/output/copy.source | 248 ------ src/test/regress/output/create_function_0.source | 104 --- src/test/regress/output/create_function_1.source | 30 - src/test/regress/output/create_function_2.source | 73 -- src/test/regress/output/largeobject.source | 496 ------------ src/test/regress/output/largeobject_1.source | 496 ------------ src/test/regress/output/misc.source | 697 ----------------- src/test/regress/output/tablespace.source | 939 ----------------------- src/test/regress/pg_regress.c | 150 ---- src/test/regress/pg_regress.h | 2 - src/test/regress/sql/.gitignore | 9 - src/test/regress/sql/constraints.sql | 559 ++++++++++++++ src/test/regress/sql/copy.sql | 293 +++++++ src/test/regress/sql/create_function_0.sql | 108 +++ src/test/regress/sql/create_function_1.sql | 31 + src/test/regress/sql/create_function_2.sql | 96 +++ src/test/regress/sql/largeobject.sql | 279 +++++++ src/test/regress/sql/misc.sql | 268 +++++++ src/test/regress/sql/tablespace.sql | 416 ++++++++++ 61 files changed, 6794 insertions(+), 6983 deletions(-) delete mode 100644 contrib/dblink/expected/.gitignore create mode 100644 contrib/dblink/expected/paths.out delete mode 100644 contrib/dblink/input/paths.source delete mode 100644 contrib/dblink/output/paths.source delete mode 100644 contrib/dblink/sql/.gitignore create mode 100644 contrib/dblink/sql/paths.sql delete mode 100644 contrib/file_fdw/expected/.gitignore create mode 100644 contrib/file_fdw/expected/file_fdw.out delete mode 100644 contrib/file_fdw/input/file_fdw.source delete mode 100644 contrib/file_fdw/output/file_fdw.source delete mode 100644 contrib/file_fdw/sql/.gitignore create mode 100644 contrib/file_fdw/sql/file_fdw.sql delete mode 100644 src/pl/plpgsql/src/expected/.gitignore create mode 100644 src/pl/plpgsql/src/expected/plpgsql_copy.out delete mode 100644 src/pl/plpgsql/src/input/plpgsql_copy.source delete mode 100644 src/pl/plpgsql/src/output/plpgsql_copy.source delete mode 100644 src/pl/plpgsql/src/sql/.gitignore create mode 100644 src/pl/plpgsql/src/sql/plpgsql_copy.sql delete mode 100644 src/test/regress/expected/.gitignore create mode 100644 src/test/regress/expected/constraints.out create mode 100644 src/test/regress/expected/copy.out create mode 100644 src/test/regress/expected/create_function_0.out create mode 100644 src/test/regress/expected/create_function_1.out create mode 100644 src/test/regress/expected/create_function_2.out create mode 100644 src/test/regress/expected/largeobject.out create mode 100644 src/test/regress/expected/largeobject_1.out create mode 100644 src/test/regress/expected/misc.out create mode 100644 src/test/regress/expected/tablespace.out delete mode 100644 src/test/regress/input/constraints.source delete mode 100644 src/test/regress/input/copy.source delete mode 100644 src/test/regress/input/create_function_0.source delete mode 100644 src/test/regress/input/create_function_1.source delete mode 100644 src/test/regress/input/create_function_2.source delete mode 100644 src/test/regress/input/largeobject.source delete mode 100644 src/test/regress/input/misc.source delete mode 100644 src/test/regress/input/tablespace.source delete mode 100644 src/test/regress/output/constraints.source delete mode 100644 src/test/regress/output/copy.source delete mode 100644 src/test/regress/output/create_function_0.source delete mode 100644 src/test/regress/output/create_function_1.source delete mode 100644 src/test/regress/output/create_function_2.source delete mode 100644 src/test/regress/output/largeobject.source delete mode 100644 src/test/regress/output/largeobject_1.source delete mode 100644 src/test/regress/output/misc.source delete mode 100644 src/test/regress/output/tablespace.source delete mode 100644 src/test/regress/sql/.gitignore create mode 100644 src/test/regress/sql/constraints.sql create mode 100644 src/test/regress/sql/copy.sql create mode 100644 src/test/regress/sql/create_function_0.sql create mode 100644 src/test/regress/sql/create_function_1.sql create mode 100644 src/test/regress/sql/create_function_2.sql create mode 100644 src/test/regress/sql/largeobject.sql create mode 100644 src/test/regress/sql/misc.sql create mode 100644 src/test/regress/sql/tablespace.sql diff --git a/contrib/dblink/Makefile b/contrib/dblink/Makefile index b008c8c4c4b..6bb3ece38c8 100644 --- a/contrib/dblink/Makefile +++ b/contrib/dblink/Makefile @@ -13,7 +13,6 @@ PGFILEDESC = "dblink - connect to other PostgreSQL databases" REGRESS = paths dblink REGRESS_OPTS = --dlpath=$(top_builddir)/src/test/regress -EXTRA_CLEAN = sql/paths.sql expected/paths.out ifdef USE_PGXS PG_CONFIG = pg_config diff --git a/contrib/dblink/expected/.gitignore b/contrib/dblink/expected/.gitignore deleted file mode 100644 index d9c7942c646..00000000000 --- a/contrib/dblink/expected/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/paths.out diff --git a/contrib/dblink/expected/paths.out b/contrib/dblink/expected/paths.out new file mode 100644 index 00000000000..d09b169b19a --- /dev/null +++ b/contrib/dblink/expected/paths.out @@ -0,0 +1,18 @@ +-- Initialization that requires path substitution. +-- directory paths and dlsuffix are passed to us in environment variables +\getenv abs_srcdir PG_ABS_SRCDIR +\getenv libdir PG_LIBDIR +\getenv dlsuffix PG_DLSUFFIX +\set regresslib :libdir '/regress' :dlsuffix +CREATE FUNCTION setenv(text, text) + RETURNS void + AS :'regresslib', 'regress_setenv' + LANGUAGE C STRICT; +CREATE FUNCTION wait_pid(int) + RETURNS void + AS :'regresslib' + LANGUAGE C STRICT; +\set path :abs_srcdir '/' +\set fnbody 'SELECT setenv(''PGSERVICEFILE'', ' :'path' ' || $1)' +CREATE FUNCTION set_pgservicefile(text) RETURNS void LANGUAGE SQL + AS :'fnbody'; diff --git a/contrib/dblink/input/paths.source b/contrib/dblink/input/paths.source deleted file mode 100644 index 30403d9d5f7..00000000000 --- a/contrib/dblink/input/paths.source +++ /dev/null @@ -1,23 +0,0 @@ --- Initialization that requires path substitution. - --- directory paths and dlsuffix are passed to us in environment variables -\getenv abs_srcdir PG_ABS_SRCDIR -\getenv libdir PG_LIBDIR -\getenv dlsuffix PG_DLSUFFIX - -\set regresslib :libdir '/regress' :dlsuffix - -CREATE FUNCTION setenv(text, text) - RETURNS void - AS :'regresslib', 'regress_setenv' - LANGUAGE C STRICT; - -CREATE FUNCTION wait_pid(int) - RETURNS void - AS :'regresslib' - LANGUAGE C STRICT; - -\set path :abs_srcdir '/' -\set fnbody 'SELECT setenv(''PGSERVICEFILE'', ' :'path' ' || $1)' -CREATE FUNCTION set_pgservicefile(text) RETURNS void LANGUAGE SQL - AS :'fnbody'; diff --git a/contrib/dblink/output/paths.source b/contrib/dblink/output/paths.source deleted file mode 100644 index d09b169b19a..00000000000 --- a/contrib/dblink/output/paths.source +++ /dev/null @@ -1,18 +0,0 @@ --- Initialization that requires path substitution. --- directory paths and dlsuffix are passed to us in environment variables -\getenv abs_srcdir PG_ABS_SRCDIR -\getenv libdir PG_LIBDIR -\getenv dlsuffix PG_DLSUFFIX -\set regresslib :libdir '/regress' :dlsuffix -CREATE FUNCTION setenv(text, text) - RETURNS void - AS :'regresslib', 'regress_setenv' - LANGUAGE C STRICT; -CREATE FUNCTION wait_pid(int) - RETURNS void - AS :'regresslib' - LANGUAGE C STRICT; -\set path :abs_srcdir '/' -\set fnbody 'SELECT setenv(''PGSERVICEFILE'', ' :'path' ' || $1)' -CREATE FUNCTION set_pgservicefile(text) RETURNS void LANGUAGE SQL - AS :'fnbody'; diff --git a/contrib/dblink/sql/.gitignore b/contrib/dblink/sql/.gitignore deleted file mode 100644 index d17507846d0..00000000000 --- a/contrib/dblink/sql/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/paths.sql diff --git a/contrib/dblink/sql/paths.sql b/contrib/dblink/sql/paths.sql new file mode 100644 index 00000000000..30403d9d5f7 --- /dev/null +++ b/contrib/dblink/sql/paths.sql @@ -0,0 +1,23 @@ +-- Initialization that requires path substitution. + +-- directory paths and dlsuffix are passed to us in environment variables +\getenv abs_srcdir PG_ABS_SRCDIR +\getenv libdir PG_LIBDIR +\getenv dlsuffix PG_DLSUFFIX + +\set regresslib :libdir '/regress' :dlsuffix + +CREATE FUNCTION setenv(text, text) + RETURNS void + AS :'regresslib', 'regress_setenv' + LANGUAGE C STRICT; + +CREATE FUNCTION wait_pid(int) + RETURNS void + AS :'regresslib' + LANGUAGE C STRICT; + +\set path :abs_srcdir '/' +\set fnbody 'SELECT setenv(''PGSERVICEFILE'', ' :'path' ' || $1)' +CREATE FUNCTION set_pgservicefile(text) RETURNS void LANGUAGE SQL + AS :'fnbody'; diff --git a/contrib/file_fdw/Makefile b/contrib/file_fdw/Makefile index 4da9f2d697a..885459d3c16 100644 --- a/contrib/file_fdw/Makefile +++ b/contrib/file_fdw/Makefile @@ -8,8 +8,6 @@ PGFILEDESC = "file_fdw - foreign data wrapper for files" REGRESS = file_fdw -EXTRA_CLEAN = sql/file_fdw.sql expected/file_fdw.out - ifdef USE_PGXS PG_CONFIG = pg_config PGXS := $(shell $(PG_CONFIG) --pgxs) diff --git a/contrib/file_fdw/expected/.gitignore b/contrib/file_fdw/expected/.gitignore deleted file mode 100644 index a464ad144ff..00000000000 --- a/contrib/file_fdw/expected/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/file_fdw.out diff --git a/contrib/file_fdw/expected/file_fdw.out b/contrib/file_fdw/expected/file_fdw.out new file mode 100644 index 00000000000..891146fef38 --- /dev/null +++ b/contrib/file_fdw/expected/file_fdw.out @@ -0,0 +1,477 @@ +-- +-- Test foreign-data wrapper file_fdw. +-- +-- directory paths are passed to us in environment variables +\getenv abs_srcdir PG_ABS_SRCDIR +-- Clean up in case a prior regression run failed +SET client_min_messages TO 'warning'; +DROP ROLE IF EXISTS regress_file_fdw_superuser, regress_file_fdw_user, regress_no_priv_user; +RESET client_min_messages; +CREATE ROLE regress_file_fdw_superuser LOGIN SUPERUSER; -- is a superuser +CREATE ROLE regress_file_fdw_user LOGIN; -- has priv and user mapping +CREATE ROLE regress_no_priv_user LOGIN; -- has priv but no user mapping +-- Install file_fdw +CREATE EXTENSION file_fdw; +-- create function to filter unstable results of EXPLAIN +CREATE FUNCTION explain_filter(text) RETURNS setof text +LANGUAGE plpgsql AS +$$ +declare + ln text; +begin + for ln in execute $1 + loop + -- Remove the path portion of foreign file names + ln := regexp_replace(ln, 'Foreign File: .*/([a-z.]+)$', 'Foreign File: .../\1'); + return next ln; + end loop; +end; +$$; +-- regress_file_fdw_superuser owns fdw-related objects +SET ROLE regress_file_fdw_superuser; +CREATE SERVER file_server FOREIGN DATA WRAPPER file_fdw; +-- privilege tests +SET ROLE regress_file_fdw_user; +CREATE FOREIGN DATA WRAPPER file_fdw2 HANDLER file_fdw_handler VALIDATOR file_fdw_validator; -- ERROR +ERROR: permission denied to create foreign-data wrapper "file_fdw2" +HINT: Must be superuser to create a foreign-data wrapper. +CREATE SERVER file_server2 FOREIGN DATA WRAPPER file_fdw; -- ERROR +ERROR: permission denied for foreign-data wrapper file_fdw +CREATE USER MAPPING FOR regress_file_fdw_user SERVER file_server; -- ERROR +ERROR: permission denied for foreign server file_server +SET ROLE regress_file_fdw_superuser; +GRANT USAGE ON FOREIGN SERVER file_server TO regress_file_fdw_user; +SET ROLE regress_file_fdw_user; +CREATE USER MAPPING FOR regress_file_fdw_user SERVER file_server; +-- create user mappings and grant privilege to test users +SET ROLE regress_file_fdw_superuser; +CREATE USER MAPPING FOR regress_file_fdw_superuser SERVER file_server; +CREATE USER MAPPING FOR regress_no_priv_user SERVER file_server; +-- validator tests +CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'xml'); -- ERROR +ERROR: COPY format "xml" not recognized +CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'text', header 'true'); -- ERROR +ERROR: COPY HEADER available only in CSV mode +CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'text', quote ':'); -- ERROR +ERROR: COPY quote available only in CSV mode +CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'text', escape ':'); -- ERROR +ERROR: COPY escape available only in CSV mode +CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'binary', header 'true'); -- ERROR +ERROR: COPY HEADER available only in CSV mode +CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'binary', quote ':'); -- ERROR +ERROR: COPY quote available only in CSV mode +CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'binary', escape ':'); -- ERROR +ERROR: COPY escape available only in CSV mode +CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'text', delimiter 'a'); -- ERROR +ERROR: COPY delimiter cannot be "a" +CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'text', escape '-'); -- ERROR +ERROR: COPY escape available only in CSV mode +CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'csv', quote '-', null '=-='); -- ERROR +ERROR: CSV quote character must not appear in the NULL specification +CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'csv', delimiter '-', null '=-='); -- ERROR +ERROR: COPY delimiter must not appear in the NULL specification +CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'csv', delimiter '-', quote '-'); -- ERROR +ERROR: COPY delimiter and quote must be different +CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'csv', delimiter '---'); -- ERROR +ERROR: COPY delimiter must be a single one-byte character +CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'csv', quote '---'); -- ERROR +ERROR: COPY quote must be a single one-byte character +CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'csv', escape '---'); -- ERROR +ERROR: COPY escape must be a single one-byte character +CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'text', delimiter '\'); -- ERROR +ERROR: COPY delimiter cannot be "\" +CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'text', delimiter '.'); -- ERROR +ERROR: COPY delimiter cannot be "." +CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'text', delimiter '1'); -- ERROR +ERROR: COPY delimiter cannot be "1" +CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'text', delimiter 'a'); -- ERROR +ERROR: COPY delimiter cannot be "a" +CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'csv', delimiter ' +'); -- ERROR +ERROR: COPY delimiter cannot be newline or carriage return +CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'csv', null ' +'); -- ERROR +ERROR: COPY null representation cannot use newline or carriage return +CREATE FOREIGN TABLE tbl () SERVER file_server; -- ERROR +ERROR: either filename or program is required for file_fdw foreign tables +\set filename :abs_srcdir '/data/agg.data' +CREATE FOREIGN TABLE agg_text ( + a int2 CHECK (a >= 0), + b float4 +) SERVER file_server +OPTIONS (format 'text', filename :'filename', delimiter ' ', null '\N'); +GRANT SELECT ON agg_text TO regress_file_fdw_user; +\set filename :abs_srcdir '/data/agg.csv' +CREATE FOREIGN TABLE agg_csv ( + a int2, + b float4 +) SERVER file_server +OPTIONS (format 'csv', filename :'filename', header 'true', delimiter ';', quote '@', escape '"', null ''); +ALTER FOREIGN TABLE agg_csv ADD CHECK (a >= 0); +\set filename :abs_srcdir '/data/agg.bad' +CREATE FOREIGN TABLE agg_bad ( + a int2, + b float4 +) SERVER file_server +OPTIONS (format 'csv', filename :'filename', header 'true', delimiter ';', quote '@', escape '"', null ''); +ALTER FOREIGN TABLE agg_bad ADD CHECK (a >= 0); +-- per-column options tests +\set filename :abs_srcdir '/data/text.csv' +CREATE FOREIGN TABLE text_csv ( + word1 text OPTIONS (force_not_null 'true'), + word2 text OPTIONS (force_not_null 'off'), + word3 text OPTIONS (force_null 'true'), + word4 text OPTIONS (force_null 'off') +) SERVER file_server +OPTIONS (format 'text', filename :'filename', null 'NULL'); +SELECT * FROM text_csv; -- ERROR +ERROR: COPY force not null available only in CSV mode +ALTER FOREIGN TABLE text_csv OPTIONS (SET format 'csv'); +\pset null _null_ +SELECT * FROM text_csv; + word1 | word2 | word3 | word4 +-------+--------+--------+-------- + AAA | aaa | 123 | + XYZ | xyz | | 321 + NULL | _null_ | _null_ | _null_ + NULL | _null_ | _null_ | _null_ + ABC | abc | | +(5 rows) + +-- force_not_null and force_null can be used together on the same column +ALTER FOREIGN TABLE text_csv ALTER COLUMN word1 OPTIONS (force_null 'true'); +ALTER FOREIGN TABLE text_csv ALTER COLUMN word3 OPTIONS (force_not_null 'true'); +-- force_not_null is not allowed to be specified at any foreign object level: +ALTER FOREIGN DATA WRAPPER file_fdw OPTIONS (ADD force_not_null '*'); -- ERROR +ERROR: invalid option "force_not_null" +HINT: There are no valid options in this context. +ALTER SERVER file_server OPTIONS (ADD force_not_null '*'); -- ERROR +ERROR: invalid option "force_not_null" +HINT: There are no valid options in this context. +CREATE USER MAPPING FOR public SERVER file_server OPTIONS (force_not_null '*'); -- ERROR +ERROR: invalid option "force_not_null" +HINT: There are no valid options in this context. +CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (force_not_null '*'); -- ERROR +ERROR: invalid option "force_not_null" +HINT: Valid options in this context are: filename, program, format, header, delimiter, quote, escape, null, encoding +-- force_null is not allowed to be specified at any foreign object level: +ALTER FOREIGN DATA WRAPPER file_fdw OPTIONS (ADD force_null '*'); -- ERROR +ERROR: invalid option "force_null" +HINT: There are no valid options in this context. +ALTER SERVER file_server OPTIONS (ADD force_null '*'); -- ERROR +ERROR: invalid option "force_null" +HINT: There are no valid options in this context. +CREATE USER MAPPING FOR public SERVER file_server OPTIONS (force_null '*'); -- ERROR +ERROR: invalid option "force_null" +HINT: There are no valid options in this context. +CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (force_null '*'); -- ERROR +ERROR: invalid option "force_null" +HINT: Valid options in this context are: filename, program, format, header, delimiter, quote, escape, null, encoding +-- basic query tests +SELECT * FROM agg_text WHERE b > 10.0 ORDER BY a; + a | b +-----+-------- + 42 | 324.78 + 100 | 99.097 +(2 rows) + +SELECT * FROM agg_csv ORDER BY a; + a | b +-----+--------- + 0 | 0.09561 + 42 | 324.78 + 100 | 99.097 +(3 rows) + +SELECT * FROM agg_csv c JOIN agg_text t ON (t.a = c.a) ORDER BY c.a; + a | b | a | b +-----+---------+-----+--------- + 0 | 0.09561 | 0 | 0.09561 + 42 | 324.78 | 42 | 324.78 + 100 | 99.097 | 100 | 99.097 +(3 rows) + +-- error context report tests +SELECT * FROM agg_bad; -- ERROR +ERROR: invalid input syntax for type real: "aaa" +CONTEXT: COPY agg_bad, line 3, column b: "aaa" +-- misc query tests +\t on +SELECT explain_filter('EXPLAIN (VERBOSE, COSTS FALSE) SELECT * FROM agg_csv'); + Foreign Scan on public.agg_csv + Output: a, b + Foreign File: .../agg.csv + +\t off +PREPARE st(int) AS SELECT * FROM agg_csv WHERE a = $1; +EXECUTE st(100); + a | b +-----+-------- + 100 | 99.097 +(1 row) + +EXECUTE st(100); + a | b +-----+-------- + 100 | 99.097 +(1 row) + +DEALLOCATE st; +-- tableoid +SELECT tableoid::regclass, b FROM agg_csv; + tableoid | b +----------+--------- + agg_csv | 99.097 + agg_csv | 0.09561 + agg_csv | 324.78 +(3 rows) + +-- updates aren't supported +INSERT INTO agg_csv VALUES(1,2.0); +ERROR: cannot insert into foreign table "agg_csv" +UPDATE agg_csv SET a = 1; +ERROR: cannot update foreign table "agg_csv" +DELETE FROM agg_csv WHERE a = 100; +ERROR: cannot delete from foreign table "agg_csv" +-- but this should be allowed +SELECT * FROM agg_csv FOR UPDATE; + a | b +-----+--------- + 100 | 99.097 + 0 | 0.09561 + 42 | 324.78 +(3 rows) + +-- copy from isn't supported either +COPY agg_csv FROM STDIN; +ERROR: cannot insert into foreign table "agg_csv" +-- constraint exclusion tests +\t on +SELECT explain_filter('EXPLAIN (VERBOSE, COSTS FALSE) SELECT * FROM agg_csv WHERE a < 0'); + Foreign Scan on public.agg_csv + Output: a, b + Filter: (agg_csv.a < 0) + Foreign File: .../agg.csv + +\t off +SELECT * FROM agg_csv WHERE a < 0; + a | b +---+--- +(0 rows) + +SET constraint_exclusion = 'on'; +\t on +SELECT explain_filter('EXPLAIN (VERBOSE, COSTS FALSE) SELECT * FROM agg_csv WHERE a < 0'); + Result + Output: a, b + One-Time Filter: false + +\t off +SELECT * FROM agg_csv WHERE a < 0; + a | b +---+--- +(0 rows) + +RESET constraint_exclusion; +-- table inheritance tests +CREATE TABLE agg (a int2, b float4); +ALTER FOREIGN TABLE agg_csv INHERIT agg; +SELECT tableoid::regclass, * FROM agg; + tableoid | a | b +----------+-----+--------- + agg_csv | 100 | 99.097 + agg_csv | 0 | 0.09561 + agg_csv | 42 | 324.78 +(3 rows) + +SELECT tableoid::regclass, * FROM agg_csv; + tableoid | a | b +----------+-----+--------- + agg_csv | 100 | 99.097 + agg_csv | 0 | 0.09561 + agg_csv | 42 | 324.78 +(3 rows) + +SELECT tableoid::regclass, * FROM ONLY agg; + tableoid | a | b +----------+---+--- +(0 rows) + +-- updates aren't supported +UPDATE agg SET a = 1; +ERROR: cannot update foreign table "agg_csv" +DELETE FROM agg WHERE a = 100; +ERROR: cannot delete from foreign table "agg_csv" +-- but this should be allowed +SELECT tableoid::regclass, * FROM agg FOR UPDATE; + tableoid | a | b +----------+-----+--------- + agg_csv | 100 | 99.097 + agg_csv | 0 | 0.09561 + agg_csv | 42 | 324.78 +(3 rows) + +ALTER FOREIGN TABLE agg_csv NO INHERIT agg; +DROP TABLE agg; +-- declarative partitioning tests +SET ROLE regress_file_fdw_superuser; +CREATE TABLE pt (a int, b text) partition by list (a); +\set filename :abs_srcdir '/data/list1.csv' +CREATE FOREIGN TABLE p1 partition of pt for values in (1) SERVER file_server +OPTIONS (format 'csv', filename :'filename', delimiter ','); +CREATE TABLE p2 partition of pt for values in (2); +SELECT tableoid::regclass, * FROM pt; + tableoid | a | b +----------+---+----- + p1 | 1 | foo + p1 | 1 | bar +(2 rows) + +SELECT tableoid::regclass, * FROM p1; + tableoid | a | b +----------+---+----- + p1 | 1 | foo + p1 | 1 | bar +(2 rows) + +SELECT tableoid::regclass, * FROM p2; + tableoid | a | b +----------+---+--- +(0 rows) + +\set filename :abs_srcdir '/data/list2.bad' +COPY pt FROM :'filename' with (format 'csv', delimiter ','); -- ERROR +ERROR: cannot insert into foreign table "p1" +CONTEXT: COPY pt, line 2: "1,qux" +\set filename :abs_srcdir '/data/list2.csv' +COPY pt FROM :'filename' with (format 'csv', delimiter ','); +SELECT tableoid::regclass, * FROM pt; + tableoid | a | b +----------+---+----- + p1 | 1 | foo + p1 | 1 | bar + p2 | 2 | baz + p2 | 2 | qux +(4 rows) + +SELECT tableoid::regclass, * FROM p1; + tableoid | a | b +----------+---+----- + p1 | 1 | foo + p1 | 1 | bar +(2 rows) + +SELECT tableoid::regclass, * FROM p2; + tableoid | a | b +----------+---+----- + p2 | 2 | baz + p2 | 2 | qux +(2 rows) + +INSERT INTO pt VALUES (1, 'xyzzy'); -- ERROR +ERROR: cannot insert into foreign table "p1" +INSERT INTO pt VALUES (2, 'xyzzy'); +UPDATE pt set a = 1 where a = 2; -- ERROR +ERROR: cannot insert into foreign table "p1" +SELECT tableoid::regclass, * FROM pt; + tableoid | a | b +----------+---+------- + p1 | 1 | foo + p1 | 1 | bar + p2 | 2 | baz + p2 | 2 | qux + p2 | 2 | xyzzy +(5 rows) + +SELECT tableoid::regclass, * FROM p1; + tableoid | a | b +----------+---+----- + p1 | 1 | foo + p1 | 1 | bar +(2 rows) + +SELECT tableoid::regclass, * FROM p2; + tableoid | a | b +----------+---+------- + p2 | 2 | baz + p2 | 2 | qux + p2 | 2 | xyzzy +(3 rows) + +DROP TABLE pt; +-- generated column tests +\set filename :abs_srcdir '/data/list1.csv' +CREATE FOREIGN TABLE gft1 (a int, b text, c text GENERATED ALWAYS AS ('foo') STORED) SERVER file_server +OPTIONS (format 'csv', filename :'filename', delimiter ','); +SELECT a, c FROM gft1; + a | c +---+-------- + 1 | _null_ + 1 | _null_ +(2 rows) + +DROP FOREIGN TABLE gft1; +-- privilege tests +SET ROLE regress_file_fdw_superuser; +SELECT * FROM agg_text ORDER BY a; + a | b +-----+--------- + 0 | 0.09561 + 42 | 324.78 + 56 | 7.8 + 100 | 99.097 +(4 rows) + +SET ROLE regress_file_fdw_user; +SELECT * FROM agg_text ORDER BY a; + a | b +-----+--------- + 0 | 0.09561 + 42 | 324.78 + 56 | 7.8 + 100 | 99.097 +(4 rows) + +SET ROLE regress_no_priv_user; +SELECT * FROM agg_text ORDER BY a; -- ERROR +ERROR: permission denied for foreign table agg_text +SET ROLE regress_file_fdw_user; +\t on +SELECT explain_filter('EXPLAIN (VERBOSE, COSTS FALSE) SELECT * FROM agg_text WHERE a > 0'); + Foreign Scan on public.agg_text + Output: a, b + Filter: (agg_text.a > 0) + Foreign File: .../agg.data + +\t off +-- file FDW allows foreign tables to be accessed without user mapping +DROP USER MAPPING FOR regress_file_fdw_user SERVER file_server; +SELECT * FROM agg_text ORDER BY a; + a | b +-----+--------- + 0 | 0.09561 + 42 | 324.78 + 56 | 7.8 + 100 | 99.097 +(4 rows) + +-- privilege tests for object +SET ROLE regress_file_fdw_superuser; +ALTER FOREIGN TABLE agg_text OWNER TO regress_file_fdw_user; +ALTER FOREIGN TABLE agg_text OPTIONS (SET format 'text'); +SET ROLE regress_file_fdw_user; +ALTER FOREIGN TABLE agg_text OPTIONS (SET format 'text'); +ERROR: only superuser or a member of the pg_read_server_files role may specify the filename option of a file_fdw foreign table +SET ROLE regress_file_fdw_superuser; +-- cleanup +RESET ROLE; +DROP EXTENSION file_fdw CASCADE; +NOTICE: drop cascades to 7 other objects +DETAIL: drop cascades to server file_server +drop cascades to user mapping for regress_file_fdw_superuser on server file_server +drop cascades to user mapping for regress_no_priv_user on server file_server +drop cascades to foreign table agg_text +drop cascades to foreign table agg_csv +drop cascades to foreign table agg_bad +drop cascades to foreign table text_csv +DROP ROLE regress_file_fdw_superuser, regress_file_fdw_user, regress_no_priv_user; diff --git a/contrib/file_fdw/input/file_fdw.source b/contrib/file_fdw/input/file_fdw.source deleted file mode 100644 index 0ea8b145080..00000000000 --- a/contrib/file_fdw/input/file_fdw.source +++ /dev/null @@ -1,253 +0,0 @@ --- --- Test foreign-data wrapper file_fdw. --- - --- directory paths are passed to us in environment variables -\getenv abs_srcdir PG_ABS_SRCDIR - --- Clean up in case a prior regression run failed -SET client_min_messages TO 'warning'; -DROP ROLE IF EXISTS regress_file_fdw_superuser, regress_file_fdw_user, regress_no_priv_user; -RESET client_min_messages; - -CREATE ROLE regress_file_fdw_superuser LOGIN SUPERUSER; -- is a superuser -CREATE ROLE regress_file_fdw_user LOGIN; -- has priv and user mapping -CREATE ROLE regress_no_priv_user LOGIN; -- has priv but no user mapping - --- Install file_fdw -CREATE EXTENSION file_fdw; - --- create function to filter unstable results of EXPLAIN -CREATE FUNCTION explain_filter(text) RETURNS setof text -LANGUAGE plpgsql AS -$$ -declare - ln text; -begin - for ln in execute $1 - loop - -- Remove the path portion of foreign file names - ln := regexp_replace(ln, 'Foreign File: .*/([a-z.]+)$', 'Foreign File: .../\1'); - return next ln; - end loop; -end; -$$; - --- regress_file_fdw_superuser owns fdw-related objects -SET ROLE regress_file_fdw_superuser; -CREATE SERVER file_server FOREIGN DATA WRAPPER file_fdw; - --- privilege tests -SET ROLE regress_file_fdw_user; -CREATE FOREIGN DATA WRAPPER file_fdw2 HANDLER file_fdw_handler VALIDATOR file_fdw_validator; -- ERROR -CREATE SERVER file_server2 FOREIGN DATA WRAPPER file_fdw; -- ERROR -CREATE USER MAPPING FOR regress_file_fdw_user SERVER file_server; -- ERROR - -SET ROLE regress_file_fdw_superuser; -GRANT USAGE ON FOREIGN SERVER file_server TO regress_file_fdw_user; - -SET ROLE regress_file_fdw_user; -CREATE USER MAPPING FOR regress_file_fdw_user SERVER file_server; - --- create user mappings and grant privilege to test users -SET ROLE regress_file_fdw_superuser; -CREATE USER MAPPING FOR regress_file_fdw_superuser SERVER file_server; -CREATE USER MAPPING FOR regress_no_priv_user SERVER file_server; - --- validator tests -CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'xml'); -- ERROR -CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'text', header 'true'); -- ERROR -CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'text', quote ':'); -- ERROR -CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'text', escape ':'); -- ERROR -CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'binary', header 'true'); -- ERROR -CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'binary', quote ':'); -- ERROR -CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'binary', escape ':'); -- ERROR -CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'text', delimiter 'a'); -- ERROR -CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'text', escape '-'); -- ERROR -CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'csv', quote '-', null '=-='); -- ERROR -CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'csv', delimiter '-', null '=-='); -- ERROR -CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'csv', delimiter '-', quote '-'); -- ERROR -CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'csv', delimiter '---'); -- ERROR -CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'csv', quote '---'); -- ERROR -CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'csv', escape '---'); -- ERROR -CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'text', delimiter '\'); -- ERROR -CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'text', delimiter '.'); -- ERROR -CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'text', delimiter '1'); -- ERROR -CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'text', delimiter 'a'); -- ERROR -CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'csv', delimiter ' -'); -- ERROR -CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'csv', null ' -'); -- ERROR -CREATE FOREIGN TABLE tbl () SERVER file_server; -- ERROR - -\set filename :abs_srcdir '/data/agg.data' -CREATE FOREIGN TABLE agg_text ( - a int2 CHECK (a >= 0), - b float4 -) SERVER file_server -OPTIONS (format 'text', filename :'filename', delimiter ' ', null '\N'); -GRANT SELECT ON agg_text TO regress_file_fdw_user; - -\set filename :abs_srcdir '/data/agg.csv' -CREATE FOREIGN TABLE agg_csv ( - a int2, - b float4 -) SERVER file_server -OPTIONS (format 'csv', filename :'filename', header 'true', delimiter ';', quote '@', escape '"', null ''); -ALTER FOREIGN TABLE agg_csv ADD CHECK (a >= 0); - -\set filename :abs_srcdir '/data/agg.bad' -CREATE FOREIGN TABLE agg_bad ( - a int2, - b float4 -) SERVER file_server -OPTIONS (format 'csv', filename :'filename', header 'true', delimiter ';', quote '@', escape '"', null ''); -ALTER FOREIGN TABLE agg_bad ADD CHECK (a >= 0); - --- per-column options tests -\set filename :abs_srcdir '/data/text.csv' -CREATE FOREIGN TABLE text_csv ( - word1 text OPTIONS (force_not_null 'true'), - word2 text OPTIONS (force_not_null 'off'), - word3 text OPTIONS (force_null 'true'), - word4 text OPTIONS (force_null 'off') -) SERVER file_server -OPTIONS (format 'text', filename :'filename', null 'NULL'); -SELECT * FROM text_csv; -- ERROR -ALTER FOREIGN TABLE text_csv OPTIONS (SET format 'csv'); -\pset null _null_ -SELECT * FROM text_csv; - --- force_not_null and force_null can be used together on the same column -ALTER FOREIGN TABLE text_csv ALTER COLUMN word1 OPTIONS (force_null 'true'); -ALTER FOREIGN TABLE text_csv ALTER COLUMN word3 OPTIONS (force_not_null 'true'); - --- force_not_null is not allowed to be specified at any foreign object level: -ALTER FOREIGN DATA WRAPPER file_fdw OPTIONS (ADD force_not_null '*'); -- ERROR -ALTER SERVER file_server OPTIONS (ADD force_not_null '*'); -- ERROR -CREATE USER MAPPING FOR public SERVER file_server OPTIONS (force_not_null '*'); -- ERROR -CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (force_not_null '*'); -- ERROR - --- force_null is not allowed to be specified at any foreign object level: -ALTER FOREIGN DATA WRAPPER file_fdw OPTIONS (ADD force_null '*'); -- ERROR -ALTER SERVER file_server OPTIONS (ADD force_null '*'); -- ERROR -CREATE USER MAPPING FOR public SERVER file_server OPTIONS (force_null '*'); -- ERROR -CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (force_null '*'); -- ERROR - --- basic query tests -SELECT * FROM agg_text WHERE b > 10.0 ORDER BY a; -SELECT * FROM agg_csv ORDER BY a; -SELECT * FROM agg_csv c JOIN agg_text t ON (t.a = c.a) ORDER BY c.a; - --- error context report tests -SELECT * FROM agg_bad; -- ERROR - --- misc query tests -\t on -SELECT explain_filter('EXPLAIN (VERBOSE, COSTS FALSE) SELECT * FROM agg_csv'); -\t off -PREPARE st(int) AS SELECT * FROM agg_csv WHERE a = $1; -EXECUTE st(100); -EXECUTE st(100); -DEALLOCATE st; - --- tableoid -SELECT tableoid::regclass, b FROM agg_csv; - --- updates aren't supported -INSERT INTO agg_csv VALUES(1,2.0); -UPDATE agg_csv SET a = 1; -DELETE FROM agg_csv WHERE a = 100; --- but this should be allowed -SELECT * FROM agg_csv FOR UPDATE; - --- copy from isn't supported either -COPY agg_csv FROM STDIN; -12 3.4 -\. - --- constraint exclusion tests -\t on -SELECT explain_filter('EXPLAIN (VERBOSE, COSTS FALSE) SELECT * FROM agg_csv WHERE a < 0'); -\t off -SELECT * FROM agg_csv WHERE a < 0; -SET constraint_exclusion = 'on'; -\t on -SELECT explain_filter('EXPLAIN (VERBOSE, COSTS FALSE) SELECT * FROM agg_csv WHERE a < 0'); -\t off -SELECT * FROM agg_csv WHERE a < 0; -RESET constraint_exclusion; - --- table inheritance tests -CREATE TABLE agg (a int2, b float4); -ALTER FOREIGN TABLE agg_csv INHERIT agg; -SELECT tableoid::regclass, * FROM agg; -SELECT tableoid::regclass, * FROM agg_csv; -SELECT tableoid::regclass, * FROM ONLY agg; --- updates aren't supported -UPDATE agg SET a = 1; -DELETE FROM agg WHERE a = 100; --- but this should be allowed -SELECT tableoid::regclass, * FROM agg FOR UPDATE; -ALTER FOREIGN TABLE agg_csv NO INHERIT agg; -DROP TABLE agg; - --- declarative partitioning tests -SET ROLE regress_file_fdw_superuser; -CREATE TABLE pt (a int, b text) partition by list (a); -\set filename :abs_srcdir '/data/list1.csv' -CREATE FOREIGN TABLE p1 partition of pt for values in (1) SERVER file_server -OPTIONS (format 'csv', filename :'filename', delimiter ','); -CREATE TABLE p2 partition of pt for values in (2); -SELECT tableoid::regclass, * FROM pt; -SELECT tableoid::regclass, * FROM p1; -SELECT tableoid::regclass, * FROM p2; -\set filename :abs_srcdir '/data/list2.bad' -COPY pt FROM :'filename' with (format 'csv', delimiter ','); -- ERROR -\set filename :abs_srcdir '/data/list2.csv' -COPY pt FROM :'filename' with (format 'csv', delimiter ','); -SELECT tableoid::regclass, * FROM pt; -SELECT tableoid::regclass, * FROM p1; -SELECT tableoid::regclass, * FROM p2; -INSERT INTO pt VALUES (1, 'xyzzy'); -- ERROR -INSERT INTO pt VALUES (2, 'xyzzy'); -UPDATE pt set a = 1 where a = 2; -- ERROR -SELECT tableoid::regclass, * FROM pt; -SELECT tableoid::regclass, * FROM p1; -SELECT tableoid::regclass, * FROM p2; -DROP TABLE pt; - --- generated column tests -\set filename :abs_srcdir '/data/list1.csv' -CREATE FOREIGN TABLE gft1 (a int, b text, c text GENERATED ALWAYS AS ('foo') STORED) SERVER file_server -OPTIONS (format 'csv', filename :'filename', delimiter ','); -SELECT a, c FROM gft1; -DROP FOREIGN TABLE gft1; - --- privilege tests -SET ROLE regress_file_fdw_superuser; -SELECT * FROM agg_text ORDER BY a; -SET ROLE regress_file_fdw_user; -SELECT * FROM agg_text ORDER BY a; -SET ROLE regress_no_priv_user; -SELECT * FROM agg_text ORDER BY a; -- ERROR -SET ROLE regress_file_fdw_user; -\t on -SELECT explain_filter('EXPLAIN (VERBOSE, COSTS FALSE) SELECT * FROM agg_text WHERE a > 0'); -\t off --- file FDW allows foreign tables to be accessed without user mapping -DROP USER MAPPING FOR regress_file_fdw_user SERVER file_server; -SELECT * FROM agg_text ORDER BY a; - --- privilege tests for object -SET ROLE regress_file_fdw_superuser; -ALTER FOREIGN TABLE agg_text OWNER TO regress_file_fdw_user; -ALTER FOREIGN TABLE agg_text OPTIONS (SET format 'text'); -SET ROLE regress_file_fdw_user; -ALTER FOREIGN TABLE agg_text OPTIONS (SET format 'text'); -SET ROLE regress_file_fdw_superuser; - --- cleanup -RESET ROLE; -DROP EXTENSION file_fdw CASCADE; -DROP ROLE regress_file_fdw_superuser, regress_file_fdw_user, regress_no_priv_user; diff --git a/contrib/file_fdw/output/file_fdw.source b/contrib/file_fdw/output/file_fdw.source deleted file mode 100644 index 891146fef38..00000000000 --- a/contrib/file_fdw/output/file_fdw.source +++ /dev/null @@ -1,477 +0,0 @@ --- --- Test foreign-data wrapper file_fdw. --- --- directory paths are passed to us in environment variables -\getenv abs_srcdir PG_ABS_SRCDIR --- Clean up in case a prior regression run failed -SET client_min_messages TO 'warning'; -DROP ROLE IF EXISTS regress_file_fdw_superuser, regress_file_fdw_user, regress_no_priv_user; -RESET client_min_messages; -CREATE ROLE regress_file_fdw_superuser LOGIN SUPERUSER; -- is a superuser -CREATE ROLE regress_file_fdw_user LOGIN; -- has priv and user mapping -CREATE ROLE regress_no_priv_user LOGIN; -- has priv but no user mapping --- Install file_fdw -CREATE EXTENSION file_fdw; --- create function to filter unstable results of EXPLAIN -CREATE FUNCTION explain_filter(text) RETURNS setof text -LANGUAGE plpgsql AS -$$ -declare - ln text; -begin - for ln in execute $1 - loop - -- Remove the path portion of foreign file names - ln := regexp_replace(ln, 'Foreign File: .*/([a-z.]+)$', 'Foreign File: .../\1'); - return next ln; - end loop; -end; -$$; --- regress_file_fdw_superuser owns fdw-related objects -SET ROLE regress_file_fdw_superuser; -CREATE SERVER file_server FOREIGN DATA WRAPPER file_fdw; --- privilege tests -SET ROLE regress_file_fdw_user; -CREATE FOREIGN DATA WRAPPER file_fdw2 HANDLER file_fdw_handler VALIDATOR file_fdw_validator; -- ERROR -ERROR: permission denied to create foreign-data wrapper "file_fdw2" -HINT: Must be superuser to create a foreign-data wrapper. -CREATE SERVER file_server2 FOREIGN DATA WRAPPER file_fdw; -- ERROR -ERROR: permission denied for foreign-data wrapper file_fdw -CREATE USER MAPPING FOR regress_file_fdw_user SERVER file_server; -- ERROR -ERROR: permission denied for foreign server file_server -SET ROLE regress_file_fdw_superuser; -GRANT USAGE ON FOREIGN SERVER file_server TO regress_file_fdw_user; -SET ROLE regress_file_fdw_user; -CREATE USER MAPPING FOR regress_file_fdw_user SERVER file_server; --- create user mappings and grant privilege to test users -SET ROLE regress_file_fdw_superuser; -CREATE USER MAPPING FOR regress_file_fdw_superuser SERVER file_server; -CREATE USER MAPPING FOR regress_no_priv_user SERVER file_server; --- validator tests -CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'xml'); -- ERROR -ERROR: COPY format "xml" not recognized -CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'text', header 'true'); -- ERROR -ERROR: COPY HEADER available only in CSV mode -CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'text', quote ':'); -- ERROR -ERROR: COPY quote available only in CSV mode -CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'text', escape ':'); -- ERROR -ERROR: COPY escape available only in CSV mode -CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'binary', header 'true'); -- ERROR -ERROR: COPY HEADER available only in CSV mode -CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'binary', quote ':'); -- ERROR -ERROR: COPY quote available only in CSV mode -CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'binary', escape ':'); -- ERROR -ERROR: COPY escape available only in CSV mode -CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'text', delimiter 'a'); -- ERROR -ERROR: COPY delimiter cannot be "a" -CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'text', escape '-'); -- ERROR -ERROR: COPY escape available only in CSV mode -CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'csv', quote '-', null '=-='); -- ERROR -ERROR: CSV quote character must not appear in the NULL specification -CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'csv', delimiter '-', null '=-='); -- ERROR -ERROR: COPY delimiter must not appear in the NULL specification -CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'csv', delimiter '-', quote '-'); -- ERROR -ERROR: COPY delimiter and quote must be different -CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'csv', delimiter '---'); -- ERROR -ERROR: COPY delimiter must be a single one-byte character -CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'csv', quote '---'); -- ERROR -ERROR: COPY quote must be a single one-byte character -CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'csv', escape '---'); -- ERROR -ERROR: COPY escape must be a single one-byte character -CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'text', delimiter '\'); -- ERROR -ERROR: COPY delimiter cannot be "\" -CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'text', delimiter '.'); -- ERROR -ERROR: COPY delimiter cannot be "." -CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'text', delimiter '1'); -- ERROR -ERROR: COPY delimiter cannot be "1" -CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'text', delimiter 'a'); -- ERROR -ERROR: COPY delimiter cannot be "a" -CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'csv', delimiter ' -'); -- ERROR -ERROR: COPY delimiter cannot be newline or carriage return -CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'csv', null ' -'); -- ERROR -ERROR: COPY null representation cannot use newline or carriage return -CREATE FOREIGN TABLE tbl () SERVER file_server; -- ERROR -ERROR: either filename or program is required for file_fdw foreign tables -\set filename :abs_srcdir '/data/agg.data' -CREATE FOREIGN TABLE agg_text ( - a int2 CHECK (a >= 0), - b float4 -) SERVER file_server -OPTIONS (format 'text', filename :'filename', delimiter ' ', null '\N'); -GRANT SELECT ON agg_text TO regress_file_fdw_user; -\set filename :abs_srcdir '/data/agg.csv' -CREATE FOREIGN TABLE agg_csv ( - a int2, - b float4 -) SERVER file_server -OPTIONS (format 'csv', filename :'filename', header 'true', delimiter ';', quote '@', escape '"', null ''); -ALTER FOREIGN TABLE agg_csv ADD CHECK (a >= 0); -\set filename :abs_srcdir '/data/agg.bad' -CREATE FOREIGN TABLE agg_bad ( - a int2, - b float4 -) SERVER file_server -OPTIONS (format 'csv', filename :'filename', header 'true', delimiter ';', quote '@', escape '"', null ''); -ALTER FOREIGN TABLE agg_bad ADD CHECK (a >= 0); --- per-column options tests -\set filename :abs_srcdir '/data/text.csv' -CREATE FOREIGN TABLE text_csv ( - word1 text OPTIONS (force_not_null 'true'), - word2 text OPTIONS (force_not_null 'off'), - word3 text OPTIONS (force_null 'true'), - word4 text OPTIONS (force_null 'off') -) SERVER file_server -OPTIONS (format 'text', filename :'filename', null 'NULL'); -SELECT * FROM text_csv; -- ERROR -ERROR: COPY force not null available only in CSV mode -ALTER FOREIGN TABLE text_csv OPTIONS (SET format 'csv'); -\pset null _null_ -SELECT * FROM text_csv; - word1 | word2 | word3 | word4 --------+--------+--------+-------- - AAA | aaa | 123 | - XYZ | xyz | | 321 - NULL | _null_ | _null_ | _null_ - NULL | _null_ | _null_ | _null_ - ABC | abc | | -(5 rows) - --- force_not_null and force_null can be used together on the same column -ALTER FOREIGN TABLE text_csv ALTER COLUMN word1 OPTIONS (force_null 'true'); -ALTER FOREIGN TABLE text_csv ALTER COLUMN word3 OPTIONS (force_not_null 'true'); --- force_not_null is not allowed to be specified at any foreign object level: -ALTER FOREIGN DATA WRAPPER file_fdw OPTIONS (ADD force_not_null '*'); -- ERROR -ERROR: invalid option "force_not_null" -HINT: There are no valid options in this context. -ALTER SERVER file_server OPTIONS (ADD force_not_null '*'); -- ERROR -ERROR: invalid option "force_not_null" -HINT: There are no valid options in this context. -CREATE USER MAPPING FOR public SERVER file_server OPTIONS (force_not_null '*'); -- ERROR -ERROR: invalid option "force_not_null" -HINT: There are no valid options in this context. -CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (force_not_null '*'); -- ERROR -ERROR: invalid option "force_not_null" -HINT: Valid options in this context are: filename, program, format, header, delimiter, quote, escape, null, encoding --- force_null is not allowed to be specified at any foreign object level: -ALTER FOREIGN DATA WRAPPER file_fdw OPTIONS (ADD force_null '*'); -- ERROR -ERROR: invalid option "force_null" -HINT: There are no valid options in this context. -ALTER SERVER file_server OPTIONS (ADD force_null '*'); -- ERROR -ERROR: invalid option "force_null" -HINT: There are no valid options in this context. -CREATE USER MAPPING FOR public SERVER file_server OPTIONS (force_null '*'); -- ERROR -ERROR: invalid option "force_null" -HINT: There are no valid options in this context. -CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (force_null '*'); -- ERROR -ERROR: invalid option "force_null" -HINT: Valid options in this context are: filename, program, format, header, delimiter, quote, escape, null, encoding --- basic query tests -SELECT * FROM agg_text WHERE b > 10.0 ORDER BY a; - a | b ------+-------- - 42 | 324.78 - 100 | 99.097 -(2 rows) - -SELECT * FROM agg_csv ORDER BY a; - a | b ------+--------- - 0 | 0.09561 - 42 | 324.78 - 100 | 99.097 -(3 rows) - -SELECT * FROM agg_csv c JOIN agg_text t ON (t.a = c.a) ORDER BY c.a; - a | b | a | b ------+---------+-----+--------- - 0 | 0.09561 | 0 | 0.09561 - 42 | 324.78 | 42 | 324.78 - 100 | 99.097 | 100 | 99.097 -(3 rows) - --- error context report tests -SELECT * FROM agg_bad; -- ERROR -ERROR: invalid input syntax for type real: "aaa" -CONTEXT: COPY agg_bad, line 3, column b: "aaa" --- misc query tests -\t on -SELECT explain_filter('EXPLAIN (VERBOSE, COSTS FALSE) SELECT * FROM agg_csv'); - Foreign Scan on public.agg_csv - Output: a, b - Foreign File: .../agg.csv - -\t off -PREPARE st(int) AS SELECT * FROM agg_csv WHERE a = $1; -EXECUTE st(100); - a | b ------+-------- - 100 | 99.097 -(1 row) - -EXECUTE st(100); - a | b ------+-------- - 100 | 99.097 -(1 row) - -DEALLOCATE st; --- tableoid -SELECT tableoid::regclass, b FROM agg_csv; - tableoid | b -----------+--------- - agg_csv | 99.097 - agg_csv | 0.09561 - agg_csv | 324.78 -(3 rows) - --- updates aren't supported -INSERT INTO agg_csv VALUES(1,2.0); -ERROR: cannot insert into foreign table "agg_csv" -UPDATE agg_csv SET a = 1; -ERROR: cannot update foreign table "agg_csv" -DELETE FROM agg_csv WHERE a = 100; -ERROR: cannot delete from foreign table "agg_csv" --- but this should be allowed -SELECT * FROM agg_csv FOR UPDATE; - a | b ------+--------- - 100 | 99.097 - 0 | 0.09561 - 42 | 324.78 -(3 rows) - --- copy from isn't supported either -COPY agg_csv FROM STDIN; -ERROR: cannot insert into foreign table "agg_csv" --- constraint exclusion tests -\t on -SELECT explain_filter('EXPLAIN (VERBOSE, COSTS FALSE) SELECT * FROM agg_csv WHERE a < 0'); - Foreign Scan on public.agg_csv - Output: a, b - Filter: (agg_csv.a < 0) - Foreign File: .../agg.csv - -\t off -SELECT * FROM agg_csv WHERE a < 0; - a | b ----+--- -(0 rows) - -SET constraint_exclusion = 'on'; -\t on -SELECT explain_filter('EXPLAIN (VERBOSE, COSTS FALSE) SELECT * FROM agg_csv WHERE a < 0'); - Result - Output: a, b - One-Time Filter: false - -\t off -SELECT * FROM agg_csv WHERE a < 0; - a | b ----+--- -(0 rows) - -RESET constraint_exclusion; --- table inheritance tests -CREATE TABLE agg (a int2, b float4); -ALTER FOREIGN TABLE agg_csv INHERIT agg; -SELECT tableoid::regclass, * FROM agg; - tableoid | a | b -----------+-----+--------- - agg_csv | 100 | 99.097 - agg_csv | 0 | 0.09561 - agg_csv | 42 | 324.78 -(3 rows) - -SELECT tableoid::regclass, * FROM agg_csv; - tableoid | a | b -----------+-----+--------- - agg_csv | 100 | 99.097 - agg_csv | 0 | 0.09561 - agg_csv | 42 | 324.78 -(3 rows) - -SELECT tableoid::regclass, * FROM ONLY agg; - tableoid | a | b -----------+---+--- -(0 rows) - --- updates aren't supported -UPDATE agg SET a = 1; -ERROR: cannot update foreign table "agg_csv" -DELETE FROM agg WHERE a = 100; -ERROR: cannot delete from foreign table "agg_csv" --- but this should be allowed -SELECT tableoid::regclass, * FROM agg FOR UPDATE; - tableoid | a | b -----------+-----+--------- - agg_csv | 100 | 99.097 - agg_csv | 0 | 0.09561 - agg_csv | 42 | 324.78 -(3 rows) - -ALTER FOREIGN TABLE agg_csv NO INHERIT agg; -DROP TABLE agg; --- declarative partitioning tests -SET ROLE regress_file_fdw_superuser; -CREATE TABLE pt (a int, b text) partition by list (a); -\set filename :abs_srcdir '/data/list1.csv' -CREATE FOREIGN TABLE p1 partition of pt for values in (1) SERVER file_server -OPTIONS (format 'csv', filename :'filename', delimiter ','); -CREATE TABLE p2 partition of pt for values in (2); -SELECT tableoid::regclass, * FROM pt; - tableoid | a | b -----------+---+----- - p1 | 1 | foo - p1 | 1 | bar -(2 rows) - -SELECT tableoid::regclass, * FROM p1; - tableoid | a | b -----------+---+----- - p1 | 1 | foo - p1 | 1 | bar -(2 rows) - -SELECT tableoid::regclass, * FROM p2; - tableoid | a | b -----------+---+--- -(0 rows) - -\set filename :abs_srcdir '/data/list2.bad' -COPY pt FROM :'filename' with (format 'csv', delimiter ','); -- ERROR -ERROR: cannot insert into foreign table "p1" -CONTEXT: COPY pt, line 2: "1,qux" -\set filename :abs_srcdir '/data/list2.csv' -COPY pt FROM :'filename' with (format 'csv', delimiter ','); -SELECT tableoid::regclass, * FROM pt; - tableoid | a | b -----------+---+----- - p1 | 1 | foo - p1 | 1 | bar - p2 | 2 | baz - p2 | 2 | qux -(4 rows) - -SELECT tableoid::regclass, * FROM p1; - tableoid | a | b -----------+---+----- - p1 | 1 | foo - p1 | 1 | bar -(2 rows) - -SELECT tableoid::regclass, * FROM p2; - tableoid | a | b -----------+---+----- - p2 | 2 | baz - p2 | 2 | qux -(2 rows) - -INSERT INTO pt VALUES (1, 'xyzzy'); -- ERROR -ERROR: cannot insert into foreign table "p1" -INSERT INTO pt VALUES (2, 'xyzzy'); -UPDATE pt set a = 1 where a = 2; -- ERROR -ERROR: cannot insert into foreign table "p1" -SELECT tableoid::regclass, * FROM pt; - tableoid | a | b -----------+---+------- - p1 | 1 | foo - p1 | 1 | bar - p2 | 2 | baz - p2 | 2 | qux - p2 | 2 | xyzzy -(5 rows) - -SELECT tableoid::regclass, * FROM p1; - tableoid | a | b -----------+---+----- - p1 | 1 | foo - p1 | 1 | bar -(2 rows) - -SELECT tableoid::regclass, * FROM p2; - tableoid | a | b -----------+---+------- - p2 | 2 | baz - p2 | 2 | qux - p2 | 2 | xyzzy -(3 rows) - -DROP TABLE pt; --- generated column tests -\set filename :abs_srcdir '/data/list1.csv' -CREATE FOREIGN TABLE gft1 (a int, b text, c text GENERATED ALWAYS AS ('foo') STORED) SERVER file_server -OPTIONS (format 'csv', filename :'filename', delimiter ','); -SELECT a, c FROM gft1; - a | c ----+-------- - 1 | _null_ - 1 | _null_ -(2 rows) - -DROP FOREIGN TABLE gft1; --- privilege tests -SET ROLE regress_file_fdw_superuser; -SELECT * FROM agg_text ORDER BY a; - a | b ------+--------- - 0 | 0.09561 - 42 | 324.78 - 56 | 7.8 - 100 | 99.097 -(4 rows) - -SET ROLE regress_file_fdw_user; -SELECT * FROM agg_text ORDER BY a; - a | b ------+--------- - 0 | 0.09561 - 42 | 324.78 - 56 | 7.8 - 100 | 99.097 -(4 rows) - -SET ROLE regress_no_priv_user; -SELECT * FROM agg_text ORDER BY a; -- ERROR -ERROR: permission denied for foreign table agg_text -SET ROLE regress_file_fdw_user; -\t on -SELECT explain_filter('EXPLAIN (VERBOSE, COSTS FALSE) SELECT * FROM agg_text WHERE a > 0'); - Foreign Scan on public.agg_text - Output: a, b - Filter: (agg_text.a > 0) - Foreign File: .../agg.data - -\t off --- file FDW allows foreign tables to be accessed without user mapping -DROP USER MAPPING FOR regress_file_fdw_user SERVER file_server; -SELECT * FROM agg_text ORDER BY a; - a | b ------+--------- - 0 | 0.09561 - 42 | 324.78 - 56 | 7.8 - 100 | 99.097 -(4 rows) - --- privilege tests for object -SET ROLE regress_file_fdw_superuser; -ALTER FOREIGN TABLE agg_text OWNER TO regress_file_fdw_user; -ALTER FOREIGN TABLE agg_text OPTIONS (SET format 'text'); -SET ROLE regress_file_fdw_user; -ALTER FOREIGN TABLE agg_text OPTIONS (SET format 'text'); -ERROR: only superuser or a member of the pg_read_server_files role may specify the filename option of a file_fdw foreign table -SET ROLE regress_file_fdw_superuser; --- cleanup -RESET ROLE; -DROP EXTENSION file_fdw CASCADE; -NOTICE: drop cascades to 7 other objects -DETAIL: drop cascades to server file_server -drop cascades to user mapping for regress_file_fdw_superuser on server file_server -drop cascades to user mapping for regress_no_priv_user on server file_server -drop cascades to foreign table agg_text -drop cascades to foreign table agg_csv -drop cascades to foreign table agg_bad -drop cascades to foreign table text_csv -DROP ROLE regress_file_fdw_superuser, regress_file_fdw_user, regress_no_priv_user; diff --git a/contrib/file_fdw/sql/.gitignore b/contrib/file_fdw/sql/.gitignore deleted file mode 100644 index ebf16fed947..00000000000 --- a/contrib/file_fdw/sql/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/file_fdw.sql diff --git a/contrib/file_fdw/sql/file_fdw.sql b/contrib/file_fdw/sql/file_fdw.sql new file mode 100644 index 00000000000..0ea8b145080 --- /dev/null +++ b/contrib/file_fdw/sql/file_fdw.sql @@ -0,0 +1,253 @@ +-- +-- Test foreign-data wrapper file_fdw. +-- + +-- directory paths are passed to us in environment variables +\getenv abs_srcdir PG_ABS_SRCDIR + +-- Clean up in case a prior regression run failed +SET client_min_messages TO 'warning'; +DROP ROLE IF EXISTS regress_file_fdw_superuser, regress_file_fdw_user, regress_no_priv_user; +RESET client_min_messages; + +CREATE ROLE regress_file_fdw_superuser LOGIN SUPERUSER; -- is a superuser +CREATE ROLE regress_file_fdw_user LOGIN; -- has priv and user mapping +CREATE ROLE regress_no_priv_user LOGIN; -- has priv but no user mapping + +-- Install file_fdw +CREATE EXTENSION file_fdw; + +-- create function to filter unstable results of EXPLAIN +CREATE FUNCTION explain_filter(text) RETURNS setof text +LANGUAGE plpgsql AS +$$ +declare + ln text; +begin + for ln in execute $1 + loop + -- Remove the path portion of foreign file names + ln := regexp_replace(ln, 'Foreign File: .*/([a-z.]+)$', 'Foreign File: .../\1'); + return next ln; + end loop; +end; +$$; + +-- regress_file_fdw_superuser owns fdw-related objects +SET ROLE regress_file_fdw_superuser; +CREATE SERVER file_server FOREIGN DATA WRAPPER file_fdw; + +-- privilege tests +SET ROLE regress_file_fdw_user; +CREATE FOREIGN DATA WRAPPER file_fdw2 HANDLER file_fdw_handler VALIDATOR file_fdw_validator; -- ERROR +CREATE SERVER file_server2 FOREIGN DATA WRAPPER file_fdw; -- ERROR +CREATE USER MAPPING FOR regress_file_fdw_user SERVER file_server; -- ERROR + +SET ROLE regress_file_fdw_superuser; +GRANT USAGE ON FOREIGN SERVER file_server TO regress_file_fdw_user; + +SET ROLE regress_file_fdw_user; +CREATE USER MAPPING FOR regress_file_fdw_user SERVER file_server; + +-- create user mappings and grant privilege to test users +SET ROLE regress_file_fdw_superuser; +CREATE USER MAPPING FOR regress_file_fdw_superuser SERVER file_server; +CREATE USER MAPPING FOR regress_no_priv_user SERVER file_server; + +-- validator tests +CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'xml'); -- ERROR +CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'text', header 'true'); -- ERROR +CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'text', quote ':'); -- ERROR +CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'text', escape ':'); -- ERROR +CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'binary', header 'true'); -- ERROR +CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'binary', quote ':'); -- ERROR +CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'binary', escape ':'); -- ERROR +CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'text', delimiter 'a'); -- ERROR +CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'text', escape '-'); -- ERROR +CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'csv', quote '-', null '=-='); -- ERROR +CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'csv', delimiter '-', null '=-='); -- ERROR +CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'csv', delimiter '-', quote '-'); -- ERROR +CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'csv', delimiter '---'); -- ERROR +CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'csv', quote '---'); -- ERROR +CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'csv', escape '---'); -- ERROR +CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'text', delimiter '\'); -- ERROR +CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'text', delimiter '.'); -- ERROR +CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'text', delimiter '1'); -- ERROR +CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'text', delimiter 'a'); -- ERROR +CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'csv', delimiter ' +'); -- ERROR +CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'csv', null ' +'); -- ERROR +CREATE FOREIGN TABLE tbl () SERVER file_server; -- ERROR + +\set filename :abs_srcdir '/data/agg.data' +CREATE FOREIGN TABLE agg_text ( + a int2 CHECK (a >= 0), + b float4 +) SERVER file_server +OPTIONS (format 'text', filename :'filename', delimiter ' ', null '\N'); +GRANT SELECT ON agg_text TO regress_file_fdw_user; + +\set filename :abs_srcdir '/data/agg.csv' +CREATE FOREIGN TABLE agg_csv ( + a int2, + b float4 +) SERVER file_server +OPTIONS (format 'csv', filename :'filename', header 'true', delimiter ';', quote '@', escape '"', null ''); +ALTER FOREIGN TABLE agg_csv ADD CHECK (a >= 0); + +\set filename :abs_srcdir '/data/agg.bad' +CREATE FOREIGN TABLE agg_bad ( + a int2, + b float4 +) SERVER file_server +OPTIONS (format 'csv', filename :'filename', header 'true', delimiter ';', quote '@', escape '"', null ''); +ALTER FOREIGN TABLE agg_bad ADD CHECK (a >= 0); + +-- per-column options tests +\set filename :abs_srcdir '/data/text.csv' +CREATE FOREIGN TABLE text_csv ( + word1 text OPTIONS (force_not_null 'true'), + word2 text OPTIONS (force_not_null 'off'), + word3 text OPTIONS (force_null 'true'), + word4 text OPTIONS (force_null 'off') +) SERVER file_server +OPTIONS (format 'text', filename :'filename', null 'NULL'); +SELECT * FROM text_csv; -- ERROR +ALTER FOREIGN TABLE text_csv OPTIONS (SET format 'csv'); +\pset null _null_ +SELECT * FROM text_csv; + +-- force_not_null and force_null can be used together on the same column +ALTER FOREIGN TABLE text_csv ALTER COLUMN word1 OPTIONS (force_null 'true'); +ALTER FOREIGN TABLE text_csv ALTER COLUMN word3 OPTIONS (force_not_null 'true'); + +-- force_not_null is not allowed to be specified at any foreign object level: +ALTER FOREIGN DATA WRAPPER file_fdw OPTIONS (ADD force_not_null '*'); -- ERROR +ALTER SERVER file_server OPTIONS (ADD force_not_null '*'); -- ERROR +CREATE USER MAPPING FOR public SERVER file_server OPTIONS (force_not_null '*'); -- ERROR +CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (force_not_null '*'); -- ERROR + +-- force_null is not allowed to be specified at any foreign object level: +ALTER FOREIGN DATA WRAPPER file_fdw OPTIONS (ADD force_null '*'); -- ERROR +ALTER SERVER file_server OPTIONS (ADD force_null '*'); -- ERROR +CREATE USER MAPPING FOR public SERVER file_server OPTIONS (force_null '*'); -- ERROR +CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (force_null '*'); -- ERROR + +-- basic query tests +SELECT * FROM agg_text WHERE b > 10.0 ORDER BY a; +SELECT * FROM agg_csv ORDER BY a; +SELECT * FROM agg_csv c JOIN agg_text t ON (t.a = c.a) ORDER BY c.a; + +-- error context report tests +SELECT * FROM agg_bad; -- ERROR + +-- misc query tests +\t on +SELECT explain_filter('EXPLAIN (VERBOSE, COSTS FALSE) SELECT * FROM agg_csv'); +\t off +PREPARE st(int) AS SELECT * FROM agg_csv WHERE a = $1; +EXECUTE st(100); +EXECUTE st(100); +DEALLOCATE st; + +-- tableoid +SELECT tableoid::regclass, b FROM agg_csv; + +-- updates aren't supported +INSERT INTO agg_csv VALUES(1,2.0); +UPDATE agg_csv SET a = 1; +DELETE FROM agg_csv WHERE a = 100; +-- but this should be allowed +SELECT * FROM agg_csv FOR UPDATE; + +-- copy from isn't supported either +COPY agg_csv FROM STDIN; +12 3.4 +\. + +-- constraint exclusion tests +\t on +SELECT explain_filter('EXPLAIN (VERBOSE, COSTS FALSE) SELECT * FROM agg_csv WHERE a < 0'); +\t off +SELECT * FROM agg_csv WHERE a < 0; +SET constraint_exclusion = 'on'; +\t on +SELECT explain_filter('EXPLAIN (VERBOSE, COSTS FALSE) SELECT * FROM agg_csv WHERE a < 0'); +\t off +SELECT * FROM agg_csv WHERE a < 0; +RESET constraint_exclusion; + +-- table inheritance tests +CREATE TABLE agg (a int2, b float4); +ALTER FOREIGN TABLE agg_csv INHERIT agg; +SELECT tableoid::regclass, * FROM agg; +SELECT tableoid::regclass, * FROM agg_csv; +SELECT tableoid::regclass, * FROM ONLY agg; +-- updates aren't supported +UPDATE agg SET a = 1; +DELETE FROM agg WHERE a = 100; +-- but this should be allowed +SELECT tableoid::regclass, * FROM agg FOR UPDATE; +ALTER FOREIGN TABLE agg_csv NO INHERIT agg; +DROP TABLE agg; + +-- declarative partitioning tests +SET ROLE regress_file_fdw_superuser; +CREATE TABLE pt (a int, b text) partition by list (a); +\set filename :abs_srcdir '/data/list1.csv' +CREATE FOREIGN TABLE p1 partition of pt for values in (1) SERVER file_server +OPTIONS (format 'csv', filename :'filename', delimiter ','); +CREATE TABLE p2 partition of pt for values in (2); +SELECT tableoid::regclass, * FROM pt; +SELECT tableoid::regclass, * FROM p1; +SELECT tableoid::regclass, * FROM p2; +\set filename :abs_srcdir '/data/list2.bad' +COPY pt FROM :'filename' with (format 'csv', delimiter ','); -- ERROR +\set filename :abs_srcdir '/data/list2.csv' +COPY pt FROM :'filename' with (format 'csv', delimiter ','); +SELECT tableoid::regclass, * FROM pt; +SELECT tableoid::regclass, * FROM p1; +SELECT tableoid::regclass, * FROM p2; +INSERT INTO pt VALUES (1, 'xyzzy'); -- ERROR +INSERT INTO pt VALUES (2, 'xyzzy'); +UPDATE pt set a = 1 where a = 2; -- ERROR +SELECT tableoid::regclass, * FROM pt; +SELECT tableoid::regclass, * FROM p1; +SELECT tableoid::regclass, * FROM p2; +DROP TABLE pt; + +-- generated column tests +\set filename :abs_srcdir '/data/list1.csv' +CREATE FOREIGN TABLE gft1 (a int, b text, c text GENERATED ALWAYS AS ('foo') STORED) SERVER file_server +OPTIONS (format 'csv', filename :'filename', delimiter ','); +SELECT a, c FROM gft1; +DROP FOREIGN TABLE gft1; + +-- privilege tests +SET ROLE regress_file_fdw_superuser; +SELECT * FROM agg_text ORDER BY a; +SET ROLE regress_file_fdw_user; +SELECT * FROM agg_text ORDER BY a; +SET ROLE regress_no_priv_user; +SELECT * FROM agg_text ORDER BY a; -- ERROR +SET ROLE regress_file_fdw_user; +\t on +SELECT explain_filter('EXPLAIN (VERBOSE, COSTS FALSE) SELECT * FROM agg_text WHERE a > 0'); +\t off +-- file FDW allows foreign tables to be accessed without user mapping +DROP USER MAPPING FOR regress_file_fdw_user SERVER file_server; +SELECT * FROM agg_text ORDER BY a; + +-- privilege tests for object +SET ROLE regress_file_fdw_superuser; +ALTER FOREIGN TABLE agg_text OWNER TO regress_file_fdw_user; +ALTER FOREIGN TABLE agg_text OPTIONS (SET format 'text'); +SET ROLE regress_file_fdw_user; +ALTER FOREIGN TABLE agg_text OPTIONS (SET format 'text'); +SET ROLE regress_file_fdw_superuser; + +-- cleanup +RESET ROLE; +DROP EXTENSION file_fdw CASCADE; +DROP ROLE regress_file_fdw_superuser, regress_file_fdw_user, regress_no_priv_user; diff --git a/src/interfaces/ecpg/test/pg_regress_ecpg.c b/src/interfaces/ecpg/test/pg_regress_ecpg.c index 15f588a8023..9465ba7845a 100644 --- a/src/interfaces/ecpg/test/pg_regress_ecpg.c +++ b/src/interfaces/ecpg/test/pg_regress_ecpg.c @@ -166,9 +166,14 @@ ecpg_start_test(const char *testname, snprintf(inprg, sizeof(inprg), "%s/%s", inputdir, testname); snprintf(insource, sizeof(insource), "%s.c", testname); + /* make a version of the test name that has dashes in place of slashes */ initStringInfo(&testname_dash); appendStringInfoString(&testname_dash, testname); - replace_string(&testname_dash, "/", "-"); + for (char *c = testname_dash.data; *c != '\0'; c++) + { + if (*c == '/') + *c = '-'; + } snprintf(expectfile_stdout, sizeof(expectfile_stdout), "%s/expected/%s.stdout", diff --git a/src/pl/plpgsql/src/Makefile b/src/pl/plpgsql/src/Makefile index 9946abbc1de..f7eb42d54fc 100644 --- a/src/pl/plpgsql/src/Makefile +++ b/src/pl/plpgsql/src/Makefile @@ -41,11 +41,6 @@ TOOLSDIR = $(top_srcdir)/src/tools GEN_KEYWORDLIST = $(PERL) -I $(TOOLSDIR) $(TOOLSDIR)/gen_keywordlist.pl GEN_KEYWORDLIST_DEPS = $(TOOLSDIR)/gen_keywordlist.pl $(TOOLSDIR)/PerfectHash.pm -# Test input and expected files. These are created by pg_regress itself, so we -# don't have a rule to create them. We do need rules to clean them however. -input_files = $(patsubst $(srcdir)/input/%.source,sql/%.sql, $(wildcard $(srcdir)/input/*.source)) -output_files := $(patsubst $(srcdir)/output/%.source,expected/%.out, $(wildcard $(srcdir)/output/*.source)) - all: all-lib # Shared library stuff @@ -116,7 +111,6 @@ distprep: pl_gram.h pl_gram.c plerrcodes.h pl_reserved_kwlist_d.h pl_unreserved_ # are not cleaned here. clean distclean: clean-lib rm -f $(OBJS) - rm -f $(output_files) $(input_files) rm -rf $(pg_regress_clean_files) maintainer-clean: distclean diff --git a/src/pl/plpgsql/src/expected/.gitignore b/src/pl/plpgsql/src/expected/.gitignore deleted file mode 100644 index 13e59187210..00000000000 --- a/src/pl/plpgsql/src/expected/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/plpgsql_copy.out diff --git a/src/pl/plpgsql/src/expected/plpgsql_copy.out b/src/pl/plpgsql/src/expected/plpgsql_copy.out new file mode 100644 index 00000000000..bc834be1971 --- /dev/null +++ b/src/pl/plpgsql/src/expected/plpgsql_copy.out @@ -0,0 +1,82 @@ +-- directory paths are passed to us in environment variables +\getenv abs_srcdir PG_ABS_SRCDIR +\getenv abs_builddir PG_ABS_BUILDDIR +-- set up file names to use +\set srcfilename :abs_srcdir '/data/copy1.data' +\set destfilename :abs_builddir '/results/copy1.data' +CREATE TABLE copy1 (a int, b float); +-- COPY TO/FROM not authorized from client. +DO LANGUAGE plpgsql $$ +BEGIN + COPY copy1 TO stdout; +END; +$$; +ERROR: cannot COPY to/from client in PL/pgSQL +CONTEXT: PL/pgSQL function inline_code_block line 3 at SQL statement +DO LANGUAGE plpgsql $$ +BEGIN + COPY copy1 FROM stdin; +END; +$$; +ERROR: cannot COPY to/from client in PL/pgSQL +CONTEXT: PL/pgSQL function inline_code_block line 3 at SQL statement +DO LANGUAGE plpgsql $$ +BEGIN + EXECUTE 'COPY copy1 TO stdout'; +END; +$$; +ERROR: cannot COPY to/from client in PL/pgSQL +CONTEXT: PL/pgSQL function inline_code_block line 3 at EXECUTE +DO LANGUAGE plpgsql $$ +BEGIN + EXECUTE 'COPY copy1 FROM stdin'; +END; +$$; +ERROR: cannot COPY to/from client in PL/pgSQL +CONTEXT: PL/pgSQL function inline_code_block line 3 at EXECUTE +-- Valid cases +-- COPY FROM +\set dobody 'BEGIN COPY copy1 FROM ' :'srcfilename' '; END' +DO LANGUAGE plpgsql :'dobody'; +SELECT * FROM copy1 ORDER BY 1; + a | b +---+----- + 1 | 1.1 + 2 | 2.2 + 3 | 3.3 +(3 rows) + +TRUNCATE copy1; +\set cmd 'COPY copy1 FROM ' :'srcfilename' +\set dobody 'BEGIN EXECUTE ' :'cmd' '; END' +DO LANGUAGE plpgsql :'dobody'; +SELECT * FROM copy1 ORDER BY 1; + a | b +---+----- + 1 | 1.1 + 2 | 2.2 + 3 | 3.3 +(3 rows) + +-- COPY TO +-- Copy the data externally once, then process it back to the table. +\set dobody 'BEGIN COPY copy1 TO ' :'destfilename' '; END' +DO LANGUAGE plpgsql :'dobody'; +TRUNCATE copy1; +\set dobody 'BEGIN COPY copy1 FROM ' :'destfilename' '; END' +DO LANGUAGE plpgsql :'dobody'; +\set cmd 'COPY copy1 FROM ' :'destfilename' +\set dobody 'BEGIN EXECUTE ' :'cmd' '; END' +DO LANGUAGE plpgsql :'dobody'; +SELECT * FROM copy1 ORDER BY 1; + a | b +---+----- + 1 | 1.1 + 1 | 1.1 + 2 | 2.2 + 2 | 2.2 + 3 | 3.3 + 3 | 3.3 +(6 rows) + +DROP TABLE copy1; diff --git a/src/pl/plpgsql/src/input/plpgsql_copy.source b/src/pl/plpgsql/src/input/plpgsql_copy.source deleted file mode 100644 index 37f1fa132b2..00000000000 --- a/src/pl/plpgsql/src/input/plpgsql_copy.source +++ /dev/null @@ -1,58 +0,0 @@ --- directory paths are passed to us in environment variables -\getenv abs_srcdir PG_ABS_SRCDIR -\getenv abs_builddir PG_ABS_BUILDDIR - --- set up file names to use -\set srcfilename :abs_srcdir '/data/copy1.data' -\set destfilename :abs_builddir '/results/copy1.data' - -CREATE TABLE copy1 (a int, b float); - --- COPY TO/FROM not authorized from client. -DO LANGUAGE plpgsql $$ -BEGIN - COPY copy1 TO stdout; -END; -$$; -DO LANGUAGE plpgsql $$ -BEGIN - COPY copy1 FROM stdin; -END; -$$; -DO LANGUAGE plpgsql $$ -BEGIN - EXECUTE 'COPY copy1 TO stdout'; -END; -$$; -DO LANGUAGE plpgsql $$ -BEGIN - EXECUTE 'COPY copy1 FROM stdin'; -END; -$$; - --- Valid cases --- COPY FROM -\set dobody 'BEGIN COPY copy1 FROM ' :'srcfilename' '; END' -DO LANGUAGE plpgsql :'dobody'; -SELECT * FROM copy1 ORDER BY 1; -TRUNCATE copy1; -\set cmd 'COPY copy1 FROM ' :'srcfilename' -\set dobody 'BEGIN EXECUTE ' :'cmd' '; END' -DO LANGUAGE plpgsql :'dobody'; -SELECT * FROM copy1 ORDER BY 1; - --- COPY TO --- Copy the data externally once, then process it back to the table. -\set dobody 'BEGIN COPY copy1 TO ' :'destfilename' '; END' -DO LANGUAGE plpgsql :'dobody'; -TRUNCATE copy1; -\set dobody 'BEGIN COPY copy1 FROM ' :'destfilename' '; END' -DO LANGUAGE plpgsql :'dobody'; - -\set cmd 'COPY copy1 FROM ' :'destfilename' -\set dobody 'BEGIN EXECUTE ' :'cmd' '; END' -DO LANGUAGE plpgsql :'dobody'; - -SELECT * FROM copy1 ORDER BY 1; - -DROP TABLE copy1; diff --git a/src/pl/plpgsql/src/output/plpgsql_copy.source b/src/pl/plpgsql/src/output/plpgsql_copy.source deleted file mode 100644 index bc834be1971..00000000000 --- a/src/pl/plpgsql/src/output/plpgsql_copy.source +++ /dev/null @@ -1,82 +0,0 @@ --- directory paths are passed to us in environment variables -\getenv abs_srcdir PG_ABS_SRCDIR -\getenv abs_builddir PG_ABS_BUILDDIR --- set up file names to use -\set srcfilename :abs_srcdir '/data/copy1.data' -\set destfilename :abs_builddir '/results/copy1.data' -CREATE TABLE copy1 (a int, b float); --- COPY TO/FROM not authorized from client. -DO LANGUAGE plpgsql $$ -BEGIN - COPY copy1 TO stdout; -END; -$$; -ERROR: cannot COPY to/from client in PL/pgSQL -CONTEXT: PL/pgSQL function inline_code_block line 3 at SQL statement -DO LANGUAGE plpgsql $$ -BEGIN - COPY copy1 FROM stdin; -END; -$$; -ERROR: cannot COPY to/from client in PL/pgSQL -CONTEXT: PL/pgSQL function inline_code_block line 3 at SQL statement -DO LANGUAGE plpgsql $$ -BEGIN - EXECUTE 'COPY copy1 TO stdout'; -END; -$$; -ERROR: cannot COPY to/from client in PL/pgSQL -CONTEXT: PL/pgSQL function inline_code_block line 3 at EXECUTE -DO LANGUAGE plpgsql $$ -BEGIN - EXECUTE 'COPY copy1 FROM stdin'; -END; -$$; -ERROR: cannot COPY to/from client in PL/pgSQL -CONTEXT: PL/pgSQL function inline_code_block line 3 at EXECUTE --- Valid cases --- COPY FROM -\set dobody 'BEGIN COPY copy1 FROM ' :'srcfilename' '; END' -DO LANGUAGE plpgsql :'dobody'; -SELECT * FROM copy1 ORDER BY 1; - a | b ----+----- - 1 | 1.1 - 2 | 2.2 - 3 | 3.3 -(3 rows) - -TRUNCATE copy1; -\set cmd 'COPY copy1 FROM ' :'srcfilename' -\set dobody 'BEGIN EXECUTE ' :'cmd' '; END' -DO LANGUAGE plpgsql :'dobody'; -SELECT * FROM copy1 ORDER BY 1; - a | b ----+----- - 1 | 1.1 - 2 | 2.2 - 3 | 3.3 -(3 rows) - --- COPY TO --- Copy the data externally once, then process it back to the table. -\set dobody 'BEGIN COPY copy1 TO ' :'destfilename' '; END' -DO LANGUAGE plpgsql :'dobody'; -TRUNCATE copy1; -\set dobody 'BEGIN COPY copy1 FROM ' :'destfilename' '; END' -DO LANGUAGE plpgsql :'dobody'; -\set cmd 'COPY copy1 FROM ' :'destfilename' -\set dobody 'BEGIN EXECUTE ' :'cmd' '; END' -DO LANGUAGE plpgsql :'dobody'; -SELECT * FROM copy1 ORDER BY 1; - a | b ----+----- - 1 | 1.1 - 1 | 1.1 - 2 | 2.2 - 2 | 2.2 - 3 | 3.3 - 3 | 3.3 -(6 rows) - -DROP TABLE copy1; diff --git a/src/pl/plpgsql/src/sql/.gitignore b/src/pl/plpgsql/src/sql/.gitignore deleted file mode 100644 index 210bee188ef..00000000000 --- a/src/pl/plpgsql/src/sql/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/plpgsql_copy.sql diff --git a/src/pl/plpgsql/src/sql/plpgsql_copy.sql b/src/pl/plpgsql/src/sql/plpgsql_copy.sql new file mode 100644 index 00000000000..37f1fa132b2 --- /dev/null +++ b/src/pl/plpgsql/src/sql/plpgsql_copy.sql @@ -0,0 +1,58 @@ +-- directory paths are passed to us in environment variables +\getenv abs_srcdir PG_ABS_SRCDIR +\getenv abs_builddir PG_ABS_BUILDDIR + +-- set up file names to use +\set srcfilename :abs_srcdir '/data/copy1.data' +\set destfilename :abs_builddir '/results/copy1.data' + +CREATE TABLE copy1 (a int, b float); + +-- COPY TO/FROM not authorized from client. +DO LANGUAGE plpgsql $$ +BEGIN + COPY copy1 TO stdout; +END; +$$; +DO LANGUAGE plpgsql $$ +BEGIN + COPY copy1 FROM stdin; +END; +$$; +DO LANGUAGE plpgsql $$ +BEGIN + EXECUTE 'COPY copy1 TO stdout'; +END; +$$; +DO LANGUAGE plpgsql $$ +BEGIN + EXECUTE 'COPY copy1 FROM stdin'; +END; +$$; + +-- Valid cases +-- COPY FROM +\set dobody 'BEGIN COPY copy1 FROM ' :'srcfilename' '; END' +DO LANGUAGE plpgsql :'dobody'; +SELECT * FROM copy1 ORDER BY 1; +TRUNCATE copy1; +\set cmd 'COPY copy1 FROM ' :'srcfilename' +\set dobody 'BEGIN EXECUTE ' :'cmd' '; END' +DO LANGUAGE plpgsql :'dobody'; +SELECT * FROM copy1 ORDER BY 1; + +-- COPY TO +-- Copy the data externally once, then process it back to the table. +\set dobody 'BEGIN COPY copy1 TO ' :'destfilename' '; END' +DO LANGUAGE plpgsql :'dobody'; +TRUNCATE copy1; +\set dobody 'BEGIN COPY copy1 FROM ' :'destfilename' '; END' +DO LANGUAGE plpgsql :'dobody'; + +\set cmd 'COPY copy1 FROM ' :'destfilename' +\set dobody 'BEGIN EXECUTE ' :'cmd' '; END' +DO LANGUAGE plpgsql :'dobody'; + +SELECT * FROM copy1 ORDER BY 1; + +DROP TABLE copy1; diff --git a/src/test/regress/GNUmakefile b/src/test/regress/GNUmakefile index fe6e0c98aa2..330eca2b839 100644 --- a/src/test/regress/GNUmakefile +++ b/src/test/regress/GNUmakefile @@ -69,19 +69,12 @@ all: all-lib # Ensure parallel safety if a build is started in this directory $(OBJS): | submake-libpgport submake-generated-headers -# Test input and expected files. These are created by pg_regress itself, so we -# don't have a rule to create them. We do need rules to clean them however. -input_files = $(patsubst $(srcdir)/input/%.source,sql/%.sql, $(wildcard $(srcdir)/input/*.source)) -output_files := $(patsubst $(srcdir)/output/%.source,expected/%.out, $(wildcard $(srcdir)/output/*.source)) - # not installed by default regress_data_files = \ - $(filter-out $(addprefix $(srcdir)/,$(output_files)),$(wildcard $(srcdir)/expected/*.out)) \ - $(wildcard $(srcdir)/input/*.source) \ - $(wildcard $(srcdir)/output/*.source) \ - $(filter-out $(addprefix $(srcdir)/,$(input_files)),$(wildcard $(srcdir)/sql/*.sql)) \ + $(wildcard $(srcdir)/sql/*.sql) \ + $(wildcard $(srcdir)/expected/*.out) \ $(wildcard $(srcdir)/data/*.data) \ $(srcdir)/parallel_schedule $(srcdir)/resultmap @@ -162,6 +155,5 @@ clean distclean maintainer-clean: clean-lib rm -f $(OBJS) refint$(DLSUFFIX) autoinc$(DLSUFFIX) rm -f pg_regress_main.o pg_regress.o pg_regress$(X) # things created by various check targets - rm -f $(output_files) $(input_files) rm -rf testtablespace rm -rf $(pg_regress_clean_files) diff --git a/src/test/regress/expected/.gitignore b/src/test/regress/expected/.gitignore deleted file mode 100644 index b99caf5f40b..00000000000 --- a/src/test/regress/expected/.gitignore +++ /dev/null @@ -1,10 +0,0 @@ -/constraints.out -/copy.out -/create_function_0.out -/create_function_1.out -/create_function_2.out -/largeobject.out -/largeobject_1.out -/misc.out -/security_label.out -/tablespace.out diff --git a/src/test/regress/expected/constraints.out b/src/test/regress/expected/constraints.out new file mode 100644 index 00000000000..e32cf8bb574 --- /dev/null +++ b/src/test/regress/expected/constraints.out @@ -0,0 +1,742 @@ +-- +-- CONSTRAINTS +-- Constraints can be specified with: +-- - DEFAULT clause +-- - CHECK clauses +-- - PRIMARY KEY clauses +-- - UNIQUE clauses +-- - EXCLUDE clauses +-- +-- directory paths are passed to us in environment variables +\getenv abs_srcdir PG_ABS_SRCDIR +-- +-- DEFAULT syntax +-- +CREATE TABLE DEFAULT_TBL (i int DEFAULT 100, + x text DEFAULT 'vadim', f float8 DEFAULT 123.456); +INSERT INTO DEFAULT_TBL VALUES (1, 'thomas', 57.0613); +INSERT INTO DEFAULT_TBL VALUES (1, 'bruce'); +INSERT INTO DEFAULT_TBL (i, f) VALUES (2, 987.654); +INSERT INTO DEFAULT_TBL (x) VALUES ('marc'); +INSERT INTO DEFAULT_TBL VALUES (3, null, 1.0); +SELECT * FROM DEFAULT_TBL; + i | x | f +-----+--------+--------- + 1 | thomas | 57.0613 + 1 | bruce | 123.456 + 2 | vadim | 987.654 + 100 | marc | 123.456 + 3 | | 1 +(5 rows) + +CREATE SEQUENCE DEFAULT_SEQ; +CREATE TABLE DEFAULTEXPR_TBL (i1 int DEFAULT 100 + (200-199) * 2, + i2 int DEFAULT nextval('default_seq')); +INSERT INTO DEFAULTEXPR_TBL VALUES (-1, -2); +INSERT INTO DEFAULTEXPR_TBL (i1) VALUES (-3); +INSERT INTO DEFAULTEXPR_TBL (i2) VALUES (-4); +INSERT INTO DEFAULTEXPR_TBL (i2) VALUES (NULL); +SELECT * FROM DEFAULTEXPR_TBL; + i1 | i2 +-----+---- + -1 | -2 + -3 | 1 + 102 | -4 + 102 | +(4 rows) + +-- syntax errors +-- test for extraneous comma +CREATE TABLE error_tbl (i int DEFAULT (100, )); +ERROR: syntax error at or near ")" +LINE 1: CREATE TABLE error_tbl (i int DEFAULT (100, )); + ^ +-- this will fail because gram.y uses b_expr not a_expr for defaults, +-- to avoid a shift/reduce conflict that arises from NOT NULL being +-- part of the column definition syntax: +CREATE TABLE error_tbl (b1 bool DEFAULT 1 IN (1, 2)); +ERROR: syntax error at or near "IN" +LINE 1: CREATE TABLE error_tbl (b1 bool DEFAULT 1 IN (1, 2)); + ^ +-- this should work, however: +CREATE TABLE error_tbl (b1 bool DEFAULT (1 IN (1, 2))); +DROP TABLE error_tbl; +-- +-- CHECK syntax +-- +CREATE TABLE CHECK_TBL (x int, + CONSTRAINT CHECK_CON CHECK (x > 3)); +INSERT INTO CHECK_TBL VALUES (5); +INSERT INTO CHECK_TBL VALUES (4); +INSERT INTO CHECK_TBL VALUES (3); +ERROR: new row for relation "check_tbl" violates check constraint "check_con" +DETAIL: Failing row contains (3). +INSERT INTO CHECK_TBL VALUES (2); +ERROR: new row for relation "check_tbl" violates check constraint "check_con" +DETAIL: Failing row contains (2). +INSERT INTO CHECK_TBL VALUES (6); +INSERT INTO CHECK_TBL VALUES (1); +ERROR: new row for relation "check_tbl" violates check constraint "check_con" +DETAIL: Failing row contains (1). +SELECT * FROM CHECK_TBL; + x +--- + 5 + 4 + 6 +(3 rows) + +CREATE SEQUENCE CHECK_SEQ; +CREATE TABLE CHECK2_TBL (x int, y text, z int, + CONSTRAINT SEQUENCE_CON + CHECK (x > 3 and y <> 'check failed' and z < 8)); +INSERT INTO CHECK2_TBL VALUES (4, 'check ok', -2); +INSERT INTO CHECK2_TBL VALUES (1, 'x check failed', -2); +ERROR: new row for relation "check2_tbl" violates check constraint "sequence_con" +DETAIL: Failing row contains (1, x check failed, -2). +INSERT INTO CHECK2_TBL VALUES (5, 'z check failed', 10); +ERROR: new row for relation "check2_tbl" violates check constraint "sequence_con" +DETAIL: Failing row contains (5, z check failed, 10). +INSERT INTO CHECK2_TBL VALUES (0, 'check failed', -2); +ERROR: new row for relation "check2_tbl" violates check constraint "sequence_con" +DETAIL: Failing row contains (0, check failed, -2). +INSERT INTO CHECK2_TBL VALUES (6, 'check failed', 11); +ERROR: new row for relation "check2_tbl" violates check constraint "sequence_con" +DETAIL: Failing row contains (6, check failed, 11). +INSERT INTO CHECK2_TBL VALUES (7, 'check ok', 7); +SELECT * from CHECK2_TBL; + x | y | z +---+----------+---- + 4 | check ok | -2 + 7 | check ok | 7 +(2 rows) + +-- +-- Check constraints on INSERT +-- +CREATE SEQUENCE INSERT_SEQ; +CREATE TABLE INSERT_TBL (x INT DEFAULT nextval('insert_seq'), + y TEXT DEFAULT '-NULL-', + z INT DEFAULT -1 * currval('insert_seq'), + CONSTRAINT INSERT_TBL_CON CHECK (x >= 3 AND y <> 'check failed' AND x < 8), + CHECK (x + z = 0)); +INSERT INTO INSERT_TBL(x,z) VALUES (2, -2); +ERROR: new row for relation "insert_tbl" violates check constraint "insert_tbl_con" +DETAIL: Failing row contains (2, -NULL-, -2). +SELECT * FROM INSERT_TBL; + x | y | z +---+---+--- +(0 rows) + +SELECT 'one' AS one, nextval('insert_seq'); + one | nextval +-----+--------- + one | 1 +(1 row) + +INSERT INTO INSERT_TBL(y) VALUES ('Y'); +ERROR: new row for relation "insert_tbl" violates check constraint "insert_tbl_con" +DETAIL: Failing row contains (2, Y, -2). +INSERT INTO INSERT_TBL(y) VALUES ('Y'); +INSERT INTO INSERT_TBL(x,z) VALUES (1, -2); +ERROR: new row for relation "insert_tbl" violates check constraint "insert_tbl_check" +DETAIL: Failing row contains (1, -NULL-, -2). +INSERT INTO INSERT_TBL(z,x) VALUES (-7, 7); +INSERT INTO INSERT_TBL VALUES (5, 'check failed', -5); +ERROR: new row for relation "insert_tbl" violates check constraint "insert_tbl_con" +DETAIL: Failing row contains (5, check failed, -5). +INSERT INTO INSERT_TBL VALUES (7, '!check failed', -7); +INSERT INTO INSERT_TBL(y) VALUES ('-!NULL-'); +SELECT * FROM INSERT_TBL; + x | y | z +---+---------------+---- + 3 | Y | -3 + 7 | -NULL- | -7 + 7 | !check failed | -7 + 4 | -!NULL- | -4 +(4 rows) + +INSERT INTO INSERT_TBL(y,z) VALUES ('check failed', 4); +ERROR: new row for relation "insert_tbl" violates check constraint "insert_tbl_check" +DETAIL: Failing row contains (5, check failed, 4). +INSERT INTO INSERT_TBL(x,y) VALUES (5, 'check failed'); +ERROR: new row for relation "insert_tbl" violates check constraint "insert_tbl_con" +DETAIL: Failing row contains (5, check failed, -5). +INSERT INTO INSERT_TBL(x,y) VALUES (5, '!check failed'); +INSERT INTO INSERT_TBL(y) VALUES ('-!NULL-'); +SELECT * FROM INSERT_TBL; + x | y | z +---+---------------+---- + 3 | Y | -3 + 7 | -NULL- | -7 + 7 | !check failed | -7 + 4 | -!NULL- | -4 + 5 | !check failed | -5 + 6 | -!NULL- | -6 +(6 rows) + +SELECT 'seven' AS one, nextval('insert_seq'); + one | nextval +-------+--------- + seven | 7 +(1 row) + +INSERT INTO INSERT_TBL(y) VALUES ('Y'); +ERROR: new row for relation "insert_tbl" violates check constraint "insert_tbl_con" +DETAIL: Failing row contains (8, Y, -8). +SELECT 'eight' AS one, currval('insert_seq'); + one | currval +-------+--------- + eight | 8 +(1 row) + +-- According to SQL, it is OK to insert a record that gives rise to NULL +-- constraint-condition results. Postgres used to reject this, but it +-- was wrong: +INSERT INTO INSERT_TBL VALUES (null, null, null); +SELECT * FROM INSERT_TBL; + x | y | z +---+---------------+---- + 3 | Y | -3 + 7 | -NULL- | -7 + 7 | !check failed | -7 + 4 | -!NULL- | -4 + 5 | !check failed | -5 + 6 | -!NULL- | -6 + | | +(7 rows) + +-- +-- Check constraints on system columns +-- +CREATE TABLE SYS_COL_CHECK_TBL (city text, state text, is_capital bool, + altitude int, + CHECK (NOT (is_capital AND tableoid::regclass::text = 'sys_col_check_tbl'))); +INSERT INTO SYS_COL_CHECK_TBL VALUES ('Seattle', 'Washington', false, 100); +INSERT INTO SYS_COL_CHECK_TBL VALUES ('Olympia', 'Washington', true, 100); +ERROR: new row for relation "sys_col_check_tbl" violates check constraint "sys_col_check_tbl_check" +DETAIL: Failing row contains (Olympia, Washington, t, 100). +SELECT *, tableoid::regclass::text FROM SYS_COL_CHECK_TBL; + city | state | is_capital | altitude | tableoid +---------+------------+------------+----------+------------------- + Seattle | Washington | f | 100 | sys_col_check_tbl +(1 row) + +DROP TABLE SYS_COL_CHECK_TBL; +-- +-- Check constraints on system columns other then TableOid should return error +-- +CREATE TABLE SYS_COL_CHECK_TBL (city text, state text, is_capital bool, + altitude int, + CHECK (NOT (is_capital AND ctid::text = 'sys_col_check_tbl'))); +ERROR: system column "ctid" reference in check constraint is invalid +LINE 3: CHECK (NOT (is_capital AND ctid::text = 'sys_col_check... + ^ +-- +-- Check inheritance of defaults and constraints +-- +CREATE TABLE INSERT_CHILD (cx INT default 42, + cy INT CHECK (cy > x)) + INHERITS (INSERT_TBL); +INSERT INTO INSERT_CHILD(x,z,cy) VALUES (7,-7,11); +INSERT INTO INSERT_CHILD(x,z,cy) VALUES (7,-7,6); +ERROR: new row for relation "insert_child" violates check constraint "insert_child_check" +DETAIL: Failing row contains (7, -NULL-, -7, 42, 6). +INSERT INTO INSERT_CHILD(x,z,cy) VALUES (6,-7,7); +ERROR: new row for relation "insert_child" violates check constraint "insert_tbl_check" +DETAIL: Failing row contains (6, -NULL-, -7, 42, 7). +INSERT INTO INSERT_CHILD(x,y,z,cy) VALUES (6,'check failed',-6,7); +ERROR: new row for relation "insert_child" violates check constraint "insert_tbl_con" +DETAIL: Failing row contains (6, check failed, -6, 42, 7). +SELECT * FROM INSERT_CHILD; + x | y | z | cx | cy +---+--------+----+----+---- + 7 | -NULL- | -7 | 42 | 11 +(1 row) + +DROP TABLE INSERT_CHILD; +-- +-- Check NO INHERIT type of constraints and inheritance +-- +CREATE TABLE ATACC1 (TEST INT + CHECK (TEST > 0) NO INHERIT); +CREATE TABLE ATACC2 (TEST2 INT) INHERITS (ATACC1); +-- check constraint is not there on child +INSERT INTO ATACC2 (TEST) VALUES (-3); +-- check constraint is there on parent +INSERT INTO ATACC1 (TEST) VALUES (-3); +ERROR: new row for relation "atacc1" violates check constraint "atacc1_test_check" +DETAIL: Failing row contains (-3). +DROP TABLE ATACC1 CASCADE; +NOTICE: drop cascades to table atacc2 +CREATE TABLE ATACC1 (TEST INT, TEST2 INT + CHECK (TEST > 0), CHECK (TEST2 > 10) NO INHERIT); +CREATE TABLE ATACC2 () INHERITS (ATACC1); +-- check constraint is there on child +INSERT INTO ATACC2 (TEST) VALUES (-3); +ERROR: new row for relation "atacc2" violates check constraint "atacc1_test_check" +DETAIL: Failing row contains (-3, null). +-- check constraint is there on parent +INSERT INTO ATACC1 (TEST) VALUES (-3); +ERROR: new row for relation "atacc1" violates check constraint "atacc1_test_check" +DETAIL: Failing row contains (-3, null). +-- check constraint is not there on child +INSERT INTO ATACC2 (TEST2) VALUES (3); +-- check constraint is there on parent +INSERT INTO ATACC1 (TEST2) VALUES (3); +ERROR: new row for relation "atacc1" violates check constraint "atacc1_test2_check" +DETAIL: Failing row contains (null, 3). +DROP TABLE ATACC1 CASCADE; +NOTICE: drop cascades to table atacc2 +-- +-- Check constraints on INSERT INTO +-- +DELETE FROM INSERT_TBL; +ALTER SEQUENCE INSERT_SEQ RESTART WITH 4; +CREATE TEMP TABLE tmp (xd INT, yd TEXT, zd INT); +INSERT INTO tmp VALUES (null, 'Y', null); +INSERT INTO tmp VALUES (5, '!check failed', null); +INSERT INTO tmp VALUES (null, 'try again', null); +INSERT INTO INSERT_TBL(y) select yd from tmp; +SELECT * FROM INSERT_TBL; + x | y | z +---+---------------+---- + 4 | Y | -4 + 5 | !check failed | -5 + 6 | try again | -6 +(3 rows) + +INSERT INTO INSERT_TBL SELECT * FROM tmp WHERE yd = 'try again'; +INSERT INTO INSERT_TBL(y,z) SELECT yd, -7 FROM tmp WHERE yd = 'try again'; +INSERT INTO INSERT_TBL(y,z) SELECT yd, -8 FROM tmp WHERE yd = 'try again'; +ERROR: new row for relation "insert_tbl" violates check constraint "insert_tbl_con" +DETAIL: Failing row contains (8, try again, -8). +SELECT * FROM INSERT_TBL; + x | y | z +---+---------------+---- + 4 | Y | -4 + 5 | !check failed | -5 + 6 | try again | -6 + | try again | + 7 | try again | -7 +(5 rows) + +DROP TABLE tmp; +-- +-- Check constraints on UPDATE +-- +UPDATE INSERT_TBL SET x = NULL WHERE x = 5; +UPDATE INSERT_TBL SET x = 6 WHERE x = 6; +UPDATE INSERT_TBL SET x = -z, z = -x; +UPDATE INSERT_TBL SET x = z, z = x; +ERROR: new row for relation "insert_tbl" violates check constraint "insert_tbl_con" +DETAIL: Failing row contains (-4, Y, 4). +SELECT * FROM INSERT_TBL; + x | y | z +---+---------------+---- + 4 | Y | -4 + | try again | + 7 | try again | -7 + 5 | !check failed | + 6 | try again | -6 +(5 rows) + +-- DROP TABLE INSERT_TBL; +-- +-- Check constraints on COPY FROM +-- +CREATE TABLE COPY_TBL (x INT, y TEXT, z INT, + CONSTRAINT COPY_CON + CHECK (x > 3 AND y <> 'check failed' AND x < 7 )); +\set filename :abs_srcdir '/data/constro.data' +COPY COPY_TBL FROM :'filename'; +SELECT * FROM COPY_TBL; + x | y | z +---+---------------+--- + 4 | !check failed | 5 + 6 | OK | 4 +(2 rows) + +\set filename :abs_srcdir '/data/constrf.data' +COPY COPY_TBL FROM :'filename'; +ERROR: new row for relation "copy_tbl" violates check constraint "copy_con" +DETAIL: Failing row contains (7, check failed, 6). +CONTEXT: COPY copy_tbl, line 2: "7 check failed 6" +SELECT * FROM COPY_TBL; + x | y | z +---+---------------+--- + 4 | !check failed | 5 + 6 | OK | 4 +(2 rows) + +-- +-- Primary keys +-- +CREATE TABLE PRIMARY_TBL (i int PRIMARY KEY, t text); +INSERT INTO PRIMARY_TBL VALUES (1, 'one'); +INSERT INTO PRIMARY_TBL VALUES (2, 'two'); +INSERT INTO PRIMARY_TBL VALUES (1, 'three'); +ERROR: duplicate key value violates unique constraint "primary_tbl_pkey" +DETAIL: Key (i)=(1) already exists. +INSERT INTO PRIMARY_TBL VALUES (4, 'three'); +INSERT INTO PRIMARY_TBL VALUES (5, 'one'); +INSERT INTO PRIMARY_TBL (t) VALUES ('six'); +ERROR: null value in column "i" of relation "primary_tbl" violates not-null constraint +DETAIL: Failing row contains (null, six). +SELECT * FROM PRIMARY_TBL; + i | t +---+------- + 1 | one + 2 | two + 4 | three + 5 | one +(4 rows) + +DROP TABLE PRIMARY_TBL; +CREATE TABLE PRIMARY_TBL (i int, t text, + PRIMARY KEY(i,t)); +INSERT INTO PRIMARY_TBL VALUES (1, 'one'); +INSERT INTO PRIMARY_TBL VALUES (2, 'two'); +INSERT INTO PRIMARY_TBL VALUES (1, 'three'); +INSERT INTO PRIMARY_TBL VALUES (4, 'three'); +INSERT INTO PRIMARY_TBL VALUES (5, 'one'); +INSERT INTO PRIMARY_TBL (t) VALUES ('six'); +ERROR: null value in column "i" of relation "primary_tbl" violates not-null constraint +DETAIL: Failing row contains (null, six). +SELECT * FROM PRIMARY_TBL; + i | t +---+------- + 1 | one + 2 | two + 1 | three + 4 | three + 5 | one +(5 rows) + +DROP TABLE PRIMARY_TBL; +-- +-- Unique keys +-- +CREATE TABLE UNIQUE_TBL (i int UNIQUE, t text); +INSERT INTO UNIQUE_TBL VALUES (1, 'one'); +INSERT INTO UNIQUE_TBL VALUES (2, 'two'); +INSERT INTO UNIQUE_TBL VALUES (1, 'three'); +ERROR: duplicate key value violates unique constraint "unique_tbl_i_key" +DETAIL: Key (i)=(1) already exists. +INSERT INTO UNIQUE_TBL VALUES (4, 'four'); +INSERT INTO UNIQUE_TBL VALUES (5, 'one'); +INSERT INTO UNIQUE_TBL (t) VALUES ('six'); +INSERT INTO UNIQUE_TBL (t) VALUES ('seven'); +INSERT INTO UNIQUE_TBL VALUES (5, 'five-upsert-insert') ON CONFLICT (i) DO UPDATE SET t = 'five-upsert-update'; +INSERT INTO UNIQUE_TBL VALUES (6, 'six-upsert-insert') ON CONFLICT (i) DO UPDATE SET t = 'six-upsert-update'; +-- should fail +INSERT INTO UNIQUE_TBL VALUES (1, 'a'), (2, 'b'), (2, 'b') ON CONFLICT (i) DO UPDATE SET t = 'fails'; +ERROR: ON CONFLICT DO UPDATE command cannot affect row a second time +HINT: Ensure that no rows proposed for insertion within the same command have duplicate constrained values. +SELECT * FROM UNIQUE_TBL; + i | t +---+-------------------- + 1 | one + 2 | two + 4 | four + | six + | seven + 5 | five-upsert-update + 6 | six-upsert-insert +(7 rows) + +DROP TABLE UNIQUE_TBL; +CREATE TABLE UNIQUE_TBL (i int, t text, + UNIQUE(i,t)); +INSERT INTO UNIQUE_TBL VALUES (1, 'one'); +INSERT INTO UNIQUE_TBL VALUES (2, 'two'); +INSERT INTO UNIQUE_TBL VALUES (1, 'three'); +INSERT INTO UNIQUE_TBL VALUES (1, 'one'); +ERROR: duplicate key value violates unique constraint "unique_tbl_i_t_key" +DETAIL: Key (i, t)=(1, one) already exists. +INSERT INTO UNIQUE_TBL VALUES (5, 'one'); +INSERT INTO UNIQUE_TBL (t) VALUES ('six'); +SELECT * FROM UNIQUE_TBL; + i | t +---+------- + 1 | one + 2 | two + 1 | three + 5 | one + | six +(5 rows) + +DROP TABLE UNIQUE_TBL; +-- +-- Deferrable unique constraints +-- +CREATE TABLE unique_tbl (i int UNIQUE DEFERRABLE, t text); +INSERT INTO unique_tbl VALUES (0, 'one'); +INSERT INTO unique_tbl VALUES (1, 'two'); +INSERT INTO unique_tbl VALUES (2, 'tree'); +INSERT INTO unique_tbl VALUES (3, 'four'); +INSERT INTO unique_tbl VALUES (4, 'five'); +BEGIN; +-- default is immediate so this should fail right away +UPDATE unique_tbl SET i = 1 WHERE i = 0; +ERROR: duplicate key value violates unique constraint "unique_tbl_i_key" +DETAIL: Key (i)=(1) already exists. +ROLLBACK; +-- check is done at end of statement, so this should succeed +UPDATE unique_tbl SET i = i+1; +SELECT * FROM unique_tbl; + i | t +---+------ + 1 | one + 2 | two + 3 | tree + 4 | four + 5 | five +(5 rows) + +-- explicitly defer the constraint +BEGIN; +SET CONSTRAINTS unique_tbl_i_key DEFERRED; +INSERT INTO unique_tbl VALUES (3, 'three'); +DELETE FROM unique_tbl WHERE t = 'tree'; -- makes constraint valid again +COMMIT; -- should succeed +SELECT * FROM unique_tbl; + i | t +---+------- + 1 | one + 2 | two + 4 | four + 5 | five + 3 | three +(5 rows) + +-- try adding an initially deferred constraint +ALTER TABLE unique_tbl DROP CONSTRAINT unique_tbl_i_key; +ALTER TABLE unique_tbl ADD CONSTRAINT unique_tbl_i_key + UNIQUE (i) DEFERRABLE INITIALLY DEFERRED; +BEGIN; +INSERT INTO unique_tbl VALUES (1, 'five'); +INSERT INTO unique_tbl VALUES (5, 'one'); +UPDATE unique_tbl SET i = 4 WHERE i = 2; +UPDATE unique_tbl SET i = 2 WHERE i = 4 AND t = 'four'; +DELETE FROM unique_tbl WHERE i = 1 AND t = 'one'; +DELETE FROM unique_tbl WHERE i = 5 AND t = 'five'; +COMMIT; +SELECT * FROM unique_tbl; + i | t +---+------- + 3 | three + 1 | five + 5 | one + 4 | two + 2 | four +(5 rows) + +-- should fail at commit-time +BEGIN; +INSERT INTO unique_tbl VALUES (3, 'Three'); -- should succeed for now +COMMIT; -- should fail +ERROR: duplicate key value violates unique constraint "unique_tbl_i_key" +DETAIL: Key (i)=(3) already exists. +-- make constraint check immediate +BEGIN; +SET CONSTRAINTS ALL IMMEDIATE; +INSERT INTO unique_tbl VALUES (3, 'Three'); -- should fail +ERROR: duplicate key value violates unique constraint "unique_tbl_i_key" +DETAIL: Key (i)=(3) already exists. +COMMIT; +-- forced check when SET CONSTRAINTS is called +BEGIN; +SET CONSTRAINTS ALL DEFERRED; +INSERT INTO unique_tbl VALUES (3, 'Three'); -- should succeed for now +SET CONSTRAINTS ALL IMMEDIATE; -- should fail +ERROR: duplicate key value violates unique constraint "unique_tbl_i_key" +DETAIL: Key (i)=(3) already exists. +COMMIT; +-- test deferrable UNIQUE with a partitioned table +CREATE TABLE parted_uniq_tbl (i int UNIQUE DEFERRABLE) partition by range (i); +CREATE TABLE parted_uniq_tbl_1 PARTITION OF parted_uniq_tbl FOR VALUES FROM (0) TO (10); +CREATE TABLE parted_uniq_tbl_2 PARTITION OF parted_uniq_tbl FOR VALUES FROM (20) TO (30); +SELECT conname, conrelid::regclass FROM pg_constraint + WHERE conname LIKE 'parted_uniq%' ORDER BY conname; + conname | conrelid +-------------------------+------------------- + parted_uniq_tbl_1_i_key | parted_uniq_tbl_1 + parted_uniq_tbl_2_i_key | parted_uniq_tbl_2 + parted_uniq_tbl_i_key | parted_uniq_tbl +(3 rows) + +BEGIN; +INSERT INTO parted_uniq_tbl VALUES (1); +SAVEPOINT f; +INSERT INTO parted_uniq_tbl VALUES (1); -- unique violation +ERROR: duplicate key value violates unique constraint "parted_uniq_tbl_1_i_key" +DETAIL: Key (i)=(1) already exists. +ROLLBACK TO f; +SET CONSTRAINTS parted_uniq_tbl_i_key DEFERRED; +INSERT INTO parted_uniq_tbl VALUES (1); -- OK now, fail at commit +COMMIT; +ERROR: duplicate key value violates unique constraint "parted_uniq_tbl_1_i_key" +DETAIL: Key (i)=(1) already exists. +DROP TABLE parted_uniq_tbl; +-- test a HOT update that invalidates the conflicting tuple. +-- the trigger should still fire and catch the violation +BEGIN; +INSERT INTO unique_tbl VALUES (3, 'Three'); -- should succeed for now +UPDATE unique_tbl SET t = 'THREE' WHERE i = 3 AND t = 'Three'; +COMMIT; -- should fail +ERROR: duplicate key value violates unique constraint "unique_tbl_i_key" +DETAIL: Key (i)=(3) already exists. +SELECT * FROM unique_tbl; + i | t +---+------- + 3 | three + 1 | five + 5 | one + 4 | two + 2 | four +(5 rows) + +-- test a HOT update that modifies the newly inserted tuple, +-- but should succeed because we then remove the other conflicting tuple. +BEGIN; +INSERT INTO unique_tbl VALUES(3, 'tree'); -- should succeed for now +UPDATE unique_tbl SET t = 'threex' WHERE t = 'tree'; +DELETE FROM unique_tbl WHERE t = 'three'; +SELECT * FROM unique_tbl; + i | t +---+-------- + 1 | five + 5 | one + 4 | two + 2 | four + 3 | threex +(5 rows) + +COMMIT; +SELECT * FROM unique_tbl; + i | t +---+-------- + 1 | five + 5 | one + 4 | two + 2 | four + 3 | threex +(5 rows) + +DROP TABLE unique_tbl; +-- +-- EXCLUDE constraints +-- +CREATE TABLE circles ( + c1 CIRCLE, + c2 TEXT, + EXCLUDE USING gist + (c1 WITH &&, (c2::circle) WITH &&) + WHERE (circle_center(c1) <> '(0,0)') +); +-- these should succeed because they don't match the index predicate +INSERT INTO circles VALUES('<(0,0), 5>', '<(0,0), 5>'); +INSERT INTO circles VALUES('<(0,0), 5>', '<(0,0), 4>'); +-- succeed +INSERT INTO circles VALUES('<(10,10), 10>', '<(0,0), 5>'); +-- fail, overlaps +INSERT INTO circles VALUES('<(20,20), 10>', '<(0,0), 4>'); +ERROR: conflicting key value violates exclusion constraint "circles_c1_c2_excl" +DETAIL: Key (c1, (c2::circle))=(<(20,20),10>, <(0,0),4>) conflicts with existing key (c1, (c2::circle))=(<(10,10),10>, <(0,0),5>). +-- succeed, because violation is ignored +INSERT INTO circles VALUES('<(20,20), 10>', '<(0,0), 4>') + ON CONFLICT ON CONSTRAINT circles_c1_c2_excl DO NOTHING; +-- fail, because DO UPDATE variant requires unique index +INSERT INTO circles VALUES('<(20,20), 10>', '<(0,0), 4>') + ON CONFLICT ON CONSTRAINT circles_c1_c2_excl DO UPDATE SET c2 = EXCLUDED.c2; +ERROR: ON CONFLICT DO UPDATE not supported with exclusion constraints +-- succeed because c1 doesn't overlap +INSERT INTO circles VALUES('<(20,20), 1>', '<(0,0), 5>'); +-- succeed because c2 doesn't overlap +INSERT INTO circles VALUES('<(20,20), 10>', '<(10,10), 5>'); +-- should fail on existing data without the WHERE clause +ALTER TABLE circles ADD EXCLUDE USING gist + (c1 WITH &&, (c2::circle) WITH &&); +ERROR: could not create exclusion constraint "circles_c1_c2_excl1" +DETAIL: Key (c1, (c2::circle))=(<(0,0),5>, <(0,0),5>) conflicts with key (c1, (c2::circle))=(<(0,0),5>, <(0,0),4>). +-- try reindexing an existing constraint +REINDEX INDEX circles_c1_c2_excl; +DROP TABLE circles; +-- Check deferred exclusion constraint +CREATE TABLE deferred_excl ( + f1 int, + f2 int, + CONSTRAINT deferred_excl_con EXCLUDE (f1 WITH =) INITIALLY DEFERRED +); +INSERT INTO deferred_excl VALUES(1); +INSERT INTO deferred_excl VALUES(2); +INSERT INTO deferred_excl VALUES(1); -- fail +ERROR: conflicting key value violates exclusion constraint "deferred_excl_con" +DETAIL: Key (f1)=(1) conflicts with existing key (f1)=(1). +INSERT INTO deferred_excl VALUES(1) ON CONFLICT ON CONSTRAINT deferred_excl_con DO NOTHING; -- fail +ERROR: ON CONFLICT does not support deferrable unique constraints/exclusion constraints as arbiters +BEGIN; +INSERT INTO deferred_excl VALUES(2); -- no fail here +COMMIT; -- should fail here +ERROR: conflicting key value violates exclusion constraint "deferred_excl_con" +DETAIL: Key (f1)=(2) conflicts with existing key (f1)=(2). +BEGIN; +INSERT INTO deferred_excl VALUES(3); +INSERT INTO deferred_excl VALUES(3); -- no fail here +COMMIT; -- should fail here +ERROR: conflicting key value violates exclusion constraint "deferred_excl_con" +DETAIL: Key (f1)=(3) conflicts with existing key (f1)=(3). +-- bug #13148: deferred constraint versus HOT update +BEGIN; +INSERT INTO deferred_excl VALUES(2, 1); -- no fail here +DELETE FROM deferred_excl WHERE f1 = 2 AND f2 IS NULL; -- remove old row +UPDATE deferred_excl SET f2 = 2 WHERE f1 = 2; +COMMIT; -- should not fail +SELECT * FROM deferred_excl; + f1 | f2 +----+---- + 1 | + 2 | 2 +(2 rows) + +ALTER TABLE deferred_excl DROP CONSTRAINT deferred_excl_con; +-- This should fail, but worth testing because of HOT updates +UPDATE deferred_excl SET f1 = 3; +ALTER TABLE deferred_excl ADD EXCLUDE (f1 WITH =); +ERROR: could not create exclusion constraint "deferred_excl_f1_excl" +DETAIL: Key (f1)=(3) conflicts with key (f1)=(3). +DROP TABLE deferred_excl; +-- Comments +-- Setup a low-level role to enforce non-superuser checks. +CREATE ROLE regress_constraint_comments; +SET SESSION AUTHORIZATION regress_constraint_comments; +CREATE TABLE constraint_comments_tbl (a int CONSTRAINT the_constraint CHECK (a > 0)); +CREATE DOMAIN constraint_comments_dom AS int CONSTRAINT the_constraint CHECK (value > 0); +COMMENT ON CONSTRAINT the_constraint ON constraint_comments_tbl IS 'yes, the comment'; +COMMENT ON CONSTRAINT the_constraint ON DOMAIN constraint_comments_dom IS 'yes, another comment'; +-- no such constraint +COMMENT ON CONSTRAINT no_constraint ON constraint_comments_tbl IS 'yes, the comment'; +ERROR: constraint "no_constraint" for table "constraint_comments_tbl" does not exist +COMMENT ON CONSTRAINT no_constraint ON DOMAIN constraint_comments_dom IS 'yes, another comment'; +ERROR: constraint "no_constraint" for domain constraint_comments_dom does not exist +-- no such table/domain +COMMENT ON CONSTRAINT the_constraint ON no_comments_tbl IS 'bad comment'; +ERROR: relation "no_comments_tbl" does not exist +COMMENT ON CONSTRAINT the_constraint ON DOMAIN no_comments_dom IS 'another bad comment'; +ERROR: type "no_comments_dom" does not exist +COMMENT ON CONSTRAINT the_constraint ON constraint_comments_tbl IS NULL; +COMMENT ON CONSTRAINT the_constraint ON DOMAIN constraint_comments_dom IS NULL; +-- unauthorized user +RESET SESSION AUTHORIZATION; +CREATE ROLE regress_constraint_comments_noaccess; +SET SESSION AUTHORIZATION regress_constraint_comments_noaccess; +COMMENT ON CONSTRAINT the_constraint ON constraint_comments_tbl IS 'no, the comment'; +ERROR: must be owner of relation constraint_comments_tbl +COMMENT ON CONSTRAINT the_constraint ON DOMAIN constraint_comments_dom IS 'no, another comment'; +ERROR: must be owner of type constraint_comments_dom +RESET SESSION AUTHORIZATION; +DROP TABLE constraint_comments_tbl; +DROP DOMAIN constraint_comments_dom; +DROP ROLE regress_constraint_comments; +DROP ROLE regress_constraint_comments_noaccess; diff --git a/src/test/regress/expected/copy.out b/src/test/regress/expected/copy.out new file mode 100644 index 00000000000..931e7b2e699 --- /dev/null +++ b/src/test/regress/expected/copy.out @@ -0,0 +1,248 @@ +-- +-- COPY +-- +-- directory paths are passed to us in environment variables +\getenv abs_srcdir PG_ABS_SRCDIR +\getenv abs_builddir PG_ABS_BUILDDIR +-- CLASS POPULATION +-- (any resemblance to real life is purely coincidental) +-- +\set filename :abs_srcdir '/data/agg.data' +COPY aggtest FROM :'filename'; +\set filename :abs_srcdir '/data/onek.data' +COPY onek FROM :'filename'; +\set filename :abs_builddir '/results/onek.data' +COPY onek TO :'filename'; +DELETE FROM onek; +COPY onek FROM :'filename'; +\set filename :abs_srcdir '/data/tenk.data' +COPY tenk1 FROM :'filename'; +\set filename :abs_srcdir '/data/rect.data' +COPY slow_emp4000 FROM :'filename'; +\set filename :abs_srcdir '/data/person.data' +COPY person FROM :'filename'; +\set filename :abs_srcdir '/data/emp.data' +COPY emp FROM :'filename'; +\set filename :abs_srcdir '/data/student.data' +COPY student FROM :'filename'; +\set filename :abs_srcdir '/data/stud_emp.data' +COPY stud_emp FROM :'filename'; +\set filename :abs_srcdir '/data/streets.data' +COPY road FROM :'filename'; +\set filename :abs_srcdir '/data/real_city.data' +COPY real_city FROM :'filename'; +\set filename :abs_srcdir '/data/hash.data' +COPY hash_i4_heap FROM :'filename'; +COPY hash_name_heap FROM :'filename'; +COPY hash_txt_heap FROM :'filename'; +COPY hash_f8_heap FROM :'filename'; +\set filename :abs_srcdir '/data/tsearch.data' +COPY test_tsvector FROM :'filename'; +\set filename :abs_srcdir '/data/jsonb.data' +COPY testjsonb FROM :'filename'; +-- the data in this file has a lot of duplicates in the index key +-- fields, leading to long bucket chains and lots of table expansion. +-- this is therefore a stress test of the bucket overflow code (unlike +-- the data in hash.data, which has unique index keys). +-- +-- \set filename :abs_srcdir '/data/hashovfl.data' +-- COPY hash_ovfl_heap FROM :'filename'; +\set filename :abs_srcdir '/data/desc.data' +COPY bt_i4_heap FROM :'filename'; +\set filename :abs_srcdir '/data/hash.data' +COPY bt_name_heap FROM :'filename'; +\set filename :abs_srcdir '/data/desc.data' +COPY bt_txt_heap FROM :'filename'; +\set filename :abs_srcdir '/data/hash.data' +COPY bt_f8_heap FROM :'filename'; +\set filename :abs_srcdir '/data/array.data' +COPY array_op_test FROM :'filename'; +\set filename :abs_srcdir '/data/array.data' +COPY array_index_op_test FROM :'filename'; +-- analyze all the data we just loaded, to ensure plan consistency +-- in later tests +ANALYZE aggtest; +ANALYZE onek; +ANALYZE tenk1; +ANALYZE slow_emp4000; +ANALYZE person; +ANALYZE emp; +ANALYZE student; +ANALYZE stud_emp; +ANALYZE road; +ANALYZE real_city; +ANALYZE hash_i4_heap; +ANALYZE hash_name_heap; +ANALYZE hash_txt_heap; +ANALYZE hash_f8_heap; +ANALYZE test_tsvector; +ANALYZE bt_i4_heap; +ANALYZE bt_name_heap; +ANALYZE bt_txt_heap; +ANALYZE bt_f8_heap; +ANALYZE array_op_test; +ANALYZE array_index_op_test; +--- test copying in CSV mode with various styles +--- of embedded line ending characters +create temp table copytest ( + style text, + test text, + filler int); +insert into copytest values('DOS',E'abc\r\ndef',1); +insert into copytest values('Unix',E'abc\ndef',2); +insert into copytest values('Mac',E'abc\rdef',3); +insert into copytest values(E'esc\\ape',E'a\\r\\\r\\\n\\nb',4); +\set filename :abs_builddir '/results/copytest.csv' +copy copytest to :'filename' csv; +create temp table copytest2 (like copytest); +copy copytest2 from :'filename' csv; +select * from copytest except select * from copytest2; + style | test | filler +-------+------+-------- +(0 rows) + +truncate copytest2; +--- same test but with an escape char different from quote char +copy copytest to :'filename' csv quote '''' escape E'\\'; +copy copytest2 from :'filename' csv quote '''' escape E'\\'; +select * from copytest except select * from copytest2; + style | test | filler +-------+------+-------- +(0 rows) + +-- test header line feature +create temp table copytest3 ( + c1 int, + "col with , comma" text, + "col with "" quote" int); +copy copytest3 from stdin csv header; +copy copytest3 to stdout csv header; +c1,"col with , comma","col with "" quote" +1,a,1 +2,b,2 +-- test copy from with a partitioned table +create table parted_copytest ( + a int, + b int, + c text +) partition by list (b); +create table parted_copytest_a1 (c text, b int, a int); +create table parted_copytest_a2 (a int, c text, b int); +alter table parted_copytest attach partition parted_copytest_a1 for values in(1); +alter table parted_copytest attach partition parted_copytest_a2 for values in(2); +-- We must insert enough rows to trigger multi-inserts. These are only +-- enabled adaptively when there are few enough partition changes. +insert into parted_copytest select x,1,'One' from generate_series(1,1000) x; +insert into parted_copytest select x,2,'Two' from generate_series(1001,1010) x; +insert into parted_copytest select x,1,'One' from generate_series(1011,1020) x; +\set filename :abs_builddir '/results/parted_copytest.csv' +copy (select * from parted_copytest order by a) to :'filename'; +truncate parted_copytest; +copy parted_copytest from :'filename'; +-- Ensure COPY FREEZE errors for partitioned tables. +begin; +truncate parted_copytest; +copy parted_copytest from :'filename' (freeze); +ERROR: cannot perform COPY FREEZE on a partitioned table +rollback; +select tableoid::regclass,count(*),sum(a) from parted_copytest +group by tableoid order by tableoid::regclass::name; + tableoid | count | sum +--------------------+-------+-------- + parted_copytest_a1 | 1010 | 510655 + parted_copytest_a2 | 10 | 10055 +(2 rows) + +truncate parted_copytest; +-- create before insert row trigger on parted_copytest_a2 +create function part_ins_func() returns trigger language plpgsql as $$ +begin + return new; +end; +$$; +create trigger part_ins_trig + before insert on parted_copytest_a2 + for each row + execute procedure part_ins_func(); +copy parted_copytest from :'filename'; +select tableoid::regclass,count(*),sum(a) from parted_copytest +group by tableoid order by tableoid::regclass::name; + tableoid | count | sum +--------------------+-------+-------- + parted_copytest_a1 | 1010 | 510655 + parted_copytest_a2 | 10 | 10055 +(2 rows) + +truncate table parted_copytest; +create index on parted_copytest (b); +drop trigger part_ins_trig on parted_copytest_a2; +copy parted_copytest from stdin; +-- Ensure index entries were properly added during the copy. +select * from parted_copytest where b = 1; + a | b | c +---+---+------ + 1 | 1 | str1 +(1 row) + +select * from parted_copytest where b = 2; + a | b | c +---+---+------ + 2 | 2 | str2 +(1 row) + +drop table parted_copytest; +-- +-- Progress reporting for COPY +-- +create table tab_progress_reporting ( + name text, + age int4, + location point, + salary int4, + manager name +); +-- Add a trigger to catch and print the contents of the catalog view +-- pg_stat_progress_copy during data insertion. This allows to test +-- the validation of some progress reports for COPY FROM where the trigger +-- would fire. +create function notice_after_tab_progress_reporting() returns trigger AS +$$ +declare report record; +begin + -- The fields ignored here are the ones that may not remain + -- consistent across multiple runs. The sizes reported may differ + -- across platforms, so just check if these are strictly positive. + with progress_data as ( + select + relid::regclass::text as relname, + command, + type, + bytes_processed > 0 as has_bytes_processed, + bytes_total > 0 as has_bytes_total, + tuples_processed, + tuples_excluded + from pg_stat_progress_copy + where pid = pg_backend_pid()) + select into report (to_jsonb(r)) as value + from progress_data r; + + raise info 'progress: %', report.value::text; + return new; +end; +$$ language plpgsql; +create trigger check_after_tab_progress_reporting + after insert on tab_progress_reporting + for each statement + execute function notice_after_tab_progress_reporting(); +-- Generate COPY FROM report with PIPE. +copy tab_progress_reporting from stdin; +INFO: progress: {"type": "PIPE", "command": "COPY FROM", "relname": "tab_progress_reporting", "has_bytes_total": false, "tuples_excluded": 0, "tuples_processed": 3, "has_bytes_processed": true} +-- Generate COPY FROM report with FILE, with some excluded tuples. +truncate tab_progress_reporting; +\set filename :abs_srcdir '/data/emp.data' +copy tab_progress_reporting from :'filename' + where (salary < 2000); +INFO: progress: {"type": "FILE", "command": "COPY FROM", "relname": "tab_progress_reporting", "has_bytes_total": true, "tuples_excluded": 1, "tuples_processed": 2, "has_bytes_processed": true} +drop trigger check_after_tab_progress_reporting on tab_progress_reporting; +drop function notice_after_tab_progress_reporting(); +drop table tab_progress_reporting; diff --git a/src/test/regress/expected/create_function_0.out b/src/test/regress/expected/create_function_0.out new file mode 100644 index 00000000000..6e96d6c5d66 --- /dev/null +++ b/src/test/regress/expected/create_function_0.out @@ -0,0 +1,104 @@ +-- +-- CREATE_FUNCTION_0 +-- +-- directory path and dlsuffix are passed to us in environment variables +\getenv libdir PG_LIBDIR +\getenv dlsuffix PG_DLSUFFIX +\set autoinclib :libdir '/autoinc' :dlsuffix +\set refintlib :libdir '/refint' :dlsuffix +\set regresslib :libdir '/regress' :dlsuffix +-- Create a bunch of C functions that will be used by later tests: +CREATE FUNCTION check_primary_key () + RETURNS trigger + AS :'refintlib' + LANGUAGE C; +CREATE FUNCTION check_foreign_key () + RETURNS trigger + AS :'refintlib' + LANGUAGE C; +CREATE FUNCTION autoinc () + RETURNS trigger + AS :'autoinclib' + LANGUAGE C; +CREATE FUNCTION trigger_return_old () + RETURNS trigger + AS :'regresslib' + LANGUAGE C; +CREATE FUNCTION ttdummy () + RETURNS trigger + AS :'regresslib' + LANGUAGE C; +CREATE FUNCTION set_ttdummy (int4) + RETURNS int4 + AS :'regresslib' + LANGUAGE C STRICT; +CREATE FUNCTION make_tuple_indirect (record) + RETURNS record + AS :'regresslib' + LANGUAGE C STRICT; +CREATE FUNCTION test_atomic_ops() + RETURNS bool + AS :'regresslib' + LANGUAGE C; +CREATE FUNCTION test_fdw_handler() + RETURNS fdw_handler + AS :'regresslib', 'test_fdw_handler' + LANGUAGE C; +CREATE FUNCTION test_support_func(internal) + RETURNS internal + AS :'regresslib', 'test_support_func' + LANGUAGE C STRICT; +CREATE FUNCTION test_opclass_options_func(internal) + RETURNS void + AS :'regresslib', 'test_opclass_options_func' + LANGUAGE C; +CREATE FUNCTION test_enc_conversion(bytea, name, name, bool, validlen OUT int, result OUT bytea) + AS :'regresslib', 'test_enc_conversion' + LANGUAGE C STRICT; +CREATE FUNCTION binary_coercible(oid, oid) + RETURNS bool + AS :'regresslib', 'binary_coercible' + LANGUAGE C STRICT STABLE PARALLEL SAFE; +-- Things that shouldn't work: +CREATE FUNCTION test1 (int) RETURNS int LANGUAGE SQL + AS 'SELECT ''not an integer'';'; +ERROR: return type mismatch in function declared to return integer +DETAIL: Actual return type is text. +CONTEXT: SQL function "test1" +CREATE FUNCTION test1 (int) RETURNS int LANGUAGE SQL + AS 'not even SQL'; +ERROR: syntax error at or near "not" +LINE 2: AS 'not even SQL'; + ^ +CREATE FUNCTION test1 (int) RETURNS int LANGUAGE SQL + AS 'SELECT 1, 2, 3;'; +ERROR: return type mismatch in function declared to return integer +DETAIL: Final statement must return exactly one column. +CONTEXT: SQL function "test1" +CREATE FUNCTION test1 (int) RETURNS int LANGUAGE SQL + AS 'SELECT $2;'; +ERROR: there is no parameter $2 +LINE 2: AS 'SELECT $2;'; + ^ +CREATE FUNCTION test1 (int) RETURNS int LANGUAGE SQL + AS 'a', 'b'; +ERROR: only one AS item needed for language "sql" +CREATE FUNCTION test1 (int) RETURNS int LANGUAGE C + AS 'nosuchfile'; +ERROR: could not access file "nosuchfile": No such file or directory +-- To produce stable regression test output, we have to filter the name +-- of the regresslib file out of the error message in this test. +\set VERBOSITY sqlstate +CREATE FUNCTION test1 (int) RETURNS int LANGUAGE C + AS :'regresslib', 'nosuchsymbol'; +ERROR: 42883 +\set VERBOSITY default +SELECT regexp_replace(:'LAST_ERROR_MESSAGE', 'file ".*"', 'file "..."'); + regexp_replace +------------------------------------------------------ + could not find function "nosuchsymbol" in file "..." +(1 row) + +CREATE FUNCTION test1 (int) RETURNS int LANGUAGE internal + AS 'nosuch'; +ERROR: there is no built-in function named "nosuch" diff --git a/src/test/regress/expected/create_function_1.out b/src/test/regress/expected/create_function_1.out new file mode 100644 index 00000000000..5345ed08400 --- /dev/null +++ b/src/test/regress/expected/create_function_1.out @@ -0,0 +1,30 @@ +-- +-- CREATE_FUNCTION_1 +-- +-- directory path and dlsuffix are passed to us in environment variables +\getenv libdir PG_LIBDIR +\getenv dlsuffix PG_DLSUFFIX +\set regresslib :libdir '/regress' :dlsuffix +-- Create C functions needed by create_type.sql +CREATE FUNCTION widget_in(cstring) + RETURNS widget + AS :'regresslib' + LANGUAGE C STRICT IMMUTABLE; +NOTICE: type "widget" is not yet defined +DETAIL: Creating a shell type definition. +CREATE FUNCTION widget_out(widget) + RETURNS cstring + AS :'regresslib' + LANGUAGE C STRICT IMMUTABLE; +NOTICE: argument type widget is only a shell +CREATE FUNCTION int44in(cstring) + RETURNS city_budget + AS :'regresslib' + LANGUAGE C STRICT IMMUTABLE; +NOTICE: type "city_budget" is not yet defined +DETAIL: Creating a shell type definition. +CREATE FUNCTION int44out(city_budget) + RETURNS cstring + AS :'regresslib' + LANGUAGE C STRICT IMMUTABLE; +NOTICE: argument type city_budget is only a shell diff --git a/src/test/regress/expected/create_function_2.out b/src/test/regress/expected/create_function_2.out new file mode 100644 index 00000000000..a366294add7 --- /dev/null +++ b/src/test/regress/expected/create_function_2.out @@ -0,0 +1,73 @@ +-- +-- CREATE_FUNCTION_2 +-- +-- directory path and dlsuffix are passed to us in environment variables +\getenv libdir PG_LIBDIR +\getenv dlsuffix PG_DLSUFFIX +\set regresslib :libdir '/regress' :dlsuffix +CREATE FUNCTION hobbies(person) + RETURNS setof hobbies_r + AS 'select * from hobbies_r where person = $1.name' + LANGUAGE SQL; +CREATE FUNCTION hobby_construct(text, text) + RETURNS hobbies_r + AS 'select $1 as name, $2 as hobby' + LANGUAGE SQL; +CREATE FUNCTION hobby_construct_named(name text, hobby text) + RETURNS hobbies_r + AS 'select name, hobby' + LANGUAGE SQL; +CREATE FUNCTION hobbies_by_name(hobbies_r.name%TYPE) + RETURNS hobbies_r.person%TYPE + AS 'select person from hobbies_r where name = $1' + LANGUAGE SQL; +NOTICE: type reference hobbies_r.name%TYPE converted to text +NOTICE: type reference hobbies_r.person%TYPE converted to text +CREATE FUNCTION equipment(hobbies_r) + RETURNS setof equipment_r + AS 'select * from equipment_r where hobby = $1.name' + LANGUAGE SQL; +CREATE FUNCTION equipment_named(hobby hobbies_r) + RETURNS setof equipment_r + AS 'select * from equipment_r where equipment_r.hobby = equipment_named.hobby.name' + LANGUAGE SQL; +CREATE FUNCTION equipment_named_ambiguous_1a(hobby hobbies_r) + RETURNS setof equipment_r + AS 'select * from equipment_r where hobby = equipment_named_ambiguous_1a.hobby.name' + LANGUAGE SQL; +CREATE FUNCTION equipment_named_ambiguous_1b(hobby hobbies_r) + RETURNS setof equipment_r + AS 'select * from equipment_r where equipment_r.hobby = hobby.name' + LANGUAGE SQL; +CREATE FUNCTION equipment_named_ambiguous_1c(hobby hobbies_r) + RETURNS setof equipment_r + AS 'select * from equipment_r where hobby = hobby.name' + LANGUAGE SQL; +CREATE FUNCTION equipment_named_ambiguous_2a(hobby text) + RETURNS setof equipment_r + AS 'select * from equipment_r where hobby = equipment_named_ambiguous_2a.hobby' + LANGUAGE SQL; +CREATE FUNCTION equipment_named_ambiguous_2b(hobby text) + RETURNS setof equipment_r + AS 'select * from equipment_r where equipment_r.hobby = hobby' + LANGUAGE SQL; +CREATE FUNCTION pt_in_widget(point, widget) + RETURNS bool + AS :'regresslib' + LANGUAGE C STRICT; +CREATE FUNCTION overpaid(emp) + RETURNS bool + AS :'regresslib' + LANGUAGE C STRICT; +CREATE FUNCTION interpt_pp(path, path) + RETURNS point + AS :'regresslib' + LANGUAGE C STRICT; +CREATE FUNCTION reverse_name(name) + RETURNS name + AS :'regresslib' + LANGUAGE C STRICT; +-- +-- Function dynamic loading +-- +LOAD :'regresslib'; diff --git a/src/test/regress/expected/largeobject.out b/src/test/regress/expected/largeobject.out new file mode 100644 index 00000000000..f461ca3b9f4 --- /dev/null +++ b/src/test/regress/expected/largeobject.out @@ -0,0 +1,496 @@ +-- +-- Test large object support +-- +-- directory paths are passed to us in environment variables +\getenv abs_srcdir PG_ABS_SRCDIR +\getenv abs_builddir PG_ABS_BUILDDIR +-- ensure consistent test output regardless of the default bytea format +SET bytea_output TO escape; +-- Load a file +CREATE TABLE lotest_stash_values (loid oid, fd integer); +-- lo_creat(mode integer) returns oid +-- The mode arg to lo_creat is unused, some vestigal holdover from ancient times +-- returns the large object id +INSERT INTO lotest_stash_values (loid) SELECT lo_creat(42); +-- Test ALTER LARGE OBJECT +CREATE ROLE regress_lo_user; +DO $$ + BEGIN + EXECUTE 'ALTER LARGE OBJECT ' || (select loid from lotest_stash_values) + || ' OWNER TO regress_lo_user'; + END +$$; +SELECT + rol.rolname +FROM + lotest_stash_values s + JOIN pg_largeobject_metadata lo ON s.loid = lo.oid + JOIN pg_authid rol ON lo.lomowner = rol.oid; + rolname +----------------- + regress_lo_user +(1 row) + +-- NOTE: large objects require transactions +BEGIN; +-- lo_open(lobjId oid, mode integer) returns integer +-- The mode parameter to lo_open uses two constants: +-- INV_READ = 0x20000 +-- INV_WRITE = 0x40000 +-- The return value is a file descriptor-like value which remains valid for the +-- transaction. +UPDATE lotest_stash_values SET fd = lo_open(loid, CAST(x'20000' | x'40000' AS integer)); +-- loread/lowrite names are wonky, different from other functions which are lo_* +-- lowrite(fd integer, data bytea) returns integer +-- the integer is the number of bytes written +SELECT lowrite(fd, ' +I wandered lonely as a cloud +That floats on high o''er vales and hills, +When all at once I saw a crowd, +A host, of golden daffodils; +Beside the lake, beneath the trees, +Fluttering and dancing in the breeze. + +Continuous as the stars that shine +And twinkle on the milky way, +They stretched in never-ending line +Along the margin of a bay: +Ten thousand saw I at a glance, +Tossing their heads in sprightly dance. + +The waves beside them danced; but they +Out-did the sparkling waves in glee: +A poet could not but be gay, +In such a jocund company: +I gazed--and gazed--but little thought +What wealth the show to me had brought: + +For oft, when on my couch I lie +In vacant or in pensive mood, +They flash upon that inward eye +Which is the bliss of solitude; +And then my heart with pleasure fills, +And dances with the daffodils. + + -- William Wordsworth +') FROM lotest_stash_values; + lowrite +--------- + 848 +(1 row) + +-- lo_close(fd integer) returns integer +-- return value is 0 for success, or <0 for error (actually only -1, but...) +SELECT lo_close(fd) FROM lotest_stash_values; + lo_close +---------- + 0 +(1 row) + +END; +-- Copy to another large object. +-- Note: we intentionally don't remove the object created here; +-- it's left behind to help test pg_dump. +SELECT lo_from_bytea(0, lo_get(loid)) AS newloid FROM lotest_stash_values +\gset +-- Add a comment to it, as well, for pg_dump/pg_upgrade testing. +COMMENT ON LARGE OBJECT :newloid IS 'I Wandered Lonely as a Cloud'; +-- Read out a portion +BEGIN; +UPDATE lotest_stash_values SET fd=lo_open(loid, CAST(x'20000' | x'40000' AS integer)); +-- lo_lseek(fd integer, offset integer, whence integer) returns integer +-- offset is in bytes, whence is one of three values: +-- SEEK_SET (= 0) meaning relative to beginning +-- SEEK_CUR (= 1) meaning relative to current position +-- SEEK_END (= 2) meaning relative to end (offset better be negative) +-- returns current position in file +SELECT lo_lseek(fd, 104, 0) FROM lotest_stash_values; + lo_lseek +---------- + 104 +(1 row) + +-- loread/lowrite names are wonky, different from other functions which are lo_* +-- loread(fd integer, len integer) returns bytea +SELECT loread(fd, 28) FROM lotest_stash_values; + loread +------------------------------ + A host, of golden daffodils; +(1 row) + +SELECT lo_lseek(fd, -19, 1) FROM lotest_stash_values; + lo_lseek +---------- + 113 +(1 row) + +SELECT lowrite(fd, 'n') FROM lotest_stash_values; + lowrite +--------- + 1 +(1 row) + +SELECT lo_tell(fd) FROM lotest_stash_values; + lo_tell +--------- + 114 +(1 row) + +SELECT lo_lseek(fd, -744, 2) FROM lotest_stash_values; + lo_lseek +---------- + 104 +(1 row) + +SELECT loread(fd, 28) FROM lotest_stash_values; + loread +------------------------------ + A host, on golden daffodils; +(1 row) + +SELECT lo_close(fd) FROM lotest_stash_values; + lo_close +---------- + 0 +(1 row) + +END; +-- Test resource management +BEGIN; +SELECT lo_open(loid, x'40000'::int) from lotest_stash_values; + lo_open +--------- + 0 +(1 row) + +ABORT; +\set filename :abs_builddir '/results/invalid/path' +\set dobody 'DECLARE loid oid; BEGIN ' +\set dobody :dobody 'SELECT tbl.loid INTO loid FROM lotest_stash_values tbl; ' +\set dobody :dobody 'PERFORM lo_export(loid, ' :'filename' '); ' +\set dobody :dobody 'EXCEPTION WHEN UNDEFINED_FILE THEN ' +\set dobody :dobody 'RAISE NOTICE ''could not open file, as expected''; END' +DO :'dobody'; +NOTICE: could not open file, as expected +-- Test truncation. +BEGIN; +UPDATE lotest_stash_values SET fd=lo_open(loid, CAST(x'20000' | x'40000' AS integer)); +SELECT lo_truncate(fd, 11) FROM lotest_stash_values; + lo_truncate +------------- + 0 +(1 row) + +SELECT loread(fd, 15) FROM lotest_stash_values; + loread +---------------- + \012I wandered +(1 row) + +SELECT lo_truncate(fd, 10000) FROM lotest_stash_values; + lo_truncate +------------- + 0 +(1 row) + +SELECT loread(fd, 10) FROM lotest_stash_values; + loread +------------------------------------------ + \000\000\000\000\000\000\000\000\000\000 +(1 row) + +SELECT lo_lseek(fd, 0, 2) FROM lotest_stash_values; + lo_lseek +---------- + 10000 +(1 row) + +SELECT lo_tell(fd) FROM lotest_stash_values; + lo_tell +--------- + 10000 +(1 row) + +SELECT lo_truncate(fd, 5000) FROM lotest_stash_values; + lo_truncate +------------- + 0 +(1 row) + +SELECT lo_lseek(fd, 0, 2) FROM lotest_stash_values; + lo_lseek +---------- + 5000 +(1 row) + +SELECT lo_tell(fd) FROM lotest_stash_values; + lo_tell +--------- + 5000 +(1 row) + +SELECT lo_close(fd) FROM lotest_stash_values; + lo_close +---------- + 0 +(1 row) + +END; +-- Test 64-bit large object functions. +BEGIN; +UPDATE lotest_stash_values SET fd = lo_open(loid, CAST(x'20000' | x'40000' AS integer)); +SELECT lo_lseek64(fd, 4294967296, 0) FROM lotest_stash_values; + lo_lseek64 +------------ + 4294967296 +(1 row) + +SELECT lowrite(fd, 'offset:4GB') FROM lotest_stash_values; + lowrite +--------- + 10 +(1 row) + +SELECT lo_tell64(fd) FROM lotest_stash_values; + lo_tell64 +------------ + 4294967306 +(1 row) + +SELECT lo_lseek64(fd, -10, 1) FROM lotest_stash_values; + lo_lseek64 +------------ + 4294967296 +(1 row) + +SELECT lo_tell64(fd) FROM lotest_stash_values; + lo_tell64 +------------ + 4294967296 +(1 row) + +SELECT loread(fd, 10) FROM lotest_stash_values; + loread +------------ + offset:4GB +(1 row) + +SELECT lo_truncate64(fd, 5000000000) FROM lotest_stash_values; + lo_truncate64 +--------------- + 0 +(1 row) + +SELECT lo_lseek64(fd, 0, 2) FROM lotest_stash_values; + lo_lseek64 +------------ + 5000000000 +(1 row) + +SELECT lo_tell64(fd) FROM lotest_stash_values; + lo_tell64 +------------ + 5000000000 +(1 row) + +SELECT lo_truncate64(fd, 3000000000) FROM lotest_stash_values; + lo_truncate64 +--------------- + 0 +(1 row) + +SELECT lo_lseek64(fd, 0, 2) FROM lotest_stash_values; + lo_lseek64 +------------ + 3000000000 +(1 row) + +SELECT lo_tell64(fd) FROM lotest_stash_values; + lo_tell64 +------------ + 3000000000 +(1 row) + +SELECT lo_close(fd) FROM lotest_stash_values; + lo_close +---------- + 0 +(1 row) + +END; +-- lo_unlink(lobjId oid) returns integer +-- return value appears to always be 1 +SELECT lo_unlink(loid) from lotest_stash_values; + lo_unlink +----------- + 1 +(1 row) + +TRUNCATE lotest_stash_values; +\set filename :abs_srcdir '/data/tenk.data' +INSERT INTO lotest_stash_values (loid) SELECT lo_import(:'filename'); +BEGIN; +UPDATE lotest_stash_values SET fd=lo_open(loid, CAST(x'20000' | x'40000' AS integer)); +-- verify length of large object +SELECT lo_lseek(fd, 0, 2) FROM lotest_stash_values; + lo_lseek +---------- + 670800 +(1 row) + +-- with the default BLCKSZ, LOBLKSIZE = 2048, so this positions us for a block +-- edge case +SELECT lo_lseek(fd, 2030, 0) FROM lotest_stash_values; + lo_lseek +---------- + 2030 +(1 row) + +-- this should get half of the value from page 0 and half from page 1 of the +-- large object +SELECT loread(fd, 36) FROM lotest_stash_values; + loread +----------------------------------------------------------------- + AAA\011FBAAAA\011VVVVxx\0122513\01132\0111\0111\0113\01113\0111 +(1 row) + +SELECT lo_tell(fd) FROM lotest_stash_values; + lo_tell +--------- + 2066 +(1 row) + +SELECT lo_lseek(fd, -26, 1) FROM lotest_stash_values; + lo_lseek +---------- + 2040 +(1 row) + +SELECT lowrite(fd, 'abcdefghijklmnop') FROM lotest_stash_values; + lowrite +--------- + 16 +(1 row) + +SELECT lo_lseek(fd, 2030, 0) FROM lotest_stash_values; + lo_lseek +---------- + 2030 +(1 row) + +SELECT loread(fd, 36) FROM lotest_stash_values; + loread +----------------------------------------------------- + AAA\011FBAAAAabcdefghijklmnop1\0111\0113\01113\0111 +(1 row) + +SELECT lo_close(fd) FROM lotest_stash_values; + lo_close +---------- + 0 +(1 row) + +END; +\set filename :abs_builddir '/results/lotest.txt' +SELECT lo_export(loid, :'filename') FROM lotest_stash_values; + lo_export +----------- + 1 +(1 row) + +\lo_import :filename +\set newloid :LASTOID +-- just make sure \lo_export does not barf +\set filename :abs_builddir '/results/lotest2.txt' +\lo_export :newloid :filename +-- This is a hack to test that export/import are reversible +-- This uses knowledge about the inner workings of large object mechanism +-- which should not be used outside it. This makes it a HACK +SELECT pageno, data FROM pg_largeobject WHERE loid = (SELECT loid from lotest_stash_values) +EXCEPT +SELECT pageno, data FROM pg_largeobject WHERE loid = :newloid; + pageno | data +--------+------ +(0 rows) + +SELECT lo_unlink(loid) FROM lotest_stash_values; + lo_unlink +----------- + 1 +(1 row) + +TRUNCATE lotest_stash_values; +\lo_unlink :newloid +\set filename :abs_builddir '/results/lotest.txt' +\lo_import :filename +\set newloid_1 :LASTOID +SELECT lo_from_bytea(0, lo_get(:newloid_1)) AS newloid_2 +\gset +SELECT md5(lo_get(:newloid_1)) = md5(lo_get(:newloid_2)); + ?column? +---------- + t +(1 row) + +SELECT lo_get(:newloid_1, 0, 20); + lo_get +------------------------------------------- + 8800\0110\0110\0110\0110\0110\0110\011800 +(1 row) + +SELECT lo_get(:newloid_1, 10, 20); + lo_get +------------------------------------------- + \0110\0110\0110\011800\011800\0113800\011 +(1 row) + +SELECT lo_put(:newloid_1, 5, decode('afafafaf', 'hex')); + lo_put +-------- + +(1 row) + +SELECT lo_get(:newloid_1, 0, 20); + lo_get +------------------------------------------------- + 8800\011\257\257\257\2570\0110\0110\0110\011800 +(1 row) + +SELECT lo_put(:newloid_1, 4294967310, 'foo'); + lo_put +-------- + +(1 row) + +SELECT lo_get(:newloid_1); +ERROR: large object read request is too large +SELECT lo_get(:newloid_1, 4294967294, 100); + lo_get +--------------------------------------------------------------------- + \000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000foo +(1 row) + +\lo_unlink :newloid_1 +\lo_unlink :newloid_2 +-- This object is left in the database for pg_dump test purposes +SELECT lo_from_bytea(0, E'\\xdeadbeef') AS newloid +\gset +SET bytea_output TO hex; +SELECT lo_get(:newloid); + lo_get +------------ + \xdeadbeef +(1 row) + +-- Create one more object that we leave behind for testing pg_dump/pg_upgrade; +-- this one intentionally has an OID in the system range +SELECT lo_create(2121); + lo_create +----------- + 2121 +(1 row) + +COMMENT ON LARGE OBJECT 2121 IS 'testing comments'; +-- Clean up +DROP TABLE lotest_stash_values; +DROP ROLE regress_lo_user; diff --git a/src/test/regress/expected/largeobject_1.out b/src/test/regress/expected/largeobject_1.out new file mode 100644 index 00000000000..a9725c375d5 --- /dev/null +++ b/src/test/regress/expected/largeobject_1.out @@ -0,0 +1,496 @@ +-- +-- Test large object support +-- +-- directory paths are passed to us in environment variables +\getenv abs_srcdir PG_ABS_SRCDIR +\getenv abs_builddir PG_ABS_BUILDDIR +-- ensure consistent test output regardless of the default bytea format +SET bytea_output TO escape; +-- Load a file +CREATE TABLE lotest_stash_values (loid oid, fd integer); +-- lo_creat(mode integer) returns oid +-- The mode arg to lo_creat is unused, some vestigal holdover from ancient times +-- returns the large object id +INSERT INTO lotest_stash_values (loid) SELECT lo_creat(42); +-- Test ALTER LARGE OBJECT +CREATE ROLE regress_lo_user; +DO $$ + BEGIN + EXECUTE 'ALTER LARGE OBJECT ' || (select loid from lotest_stash_values) + || ' OWNER TO regress_lo_user'; + END +$$; +SELECT + rol.rolname +FROM + lotest_stash_values s + JOIN pg_largeobject_metadata lo ON s.loid = lo.oid + JOIN pg_authid rol ON lo.lomowner = rol.oid; + rolname +----------------- + regress_lo_user +(1 row) + +-- NOTE: large objects require transactions +BEGIN; +-- lo_open(lobjId oid, mode integer) returns integer +-- The mode parameter to lo_open uses two constants: +-- INV_READ = 0x20000 +-- INV_WRITE = 0x40000 +-- The return value is a file descriptor-like value which remains valid for the +-- transaction. +UPDATE lotest_stash_values SET fd = lo_open(loid, CAST(x'20000' | x'40000' AS integer)); +-- loread/lowrite names are wonky, different from other functions which are lo_* +-- lowrite(fd integer, data bytea) returns integer +-- the integer is the number of bytes written +SELECT lowrite(fd, ' +I wandered lonely as a cloud +That floats on high o''er vales and hills, +When all at once I saw a crowd, +A host, of golden daffodils; +Beside the lake, beneath the trees, +Fluttering and dancing in the breeze. + +Continuous as the stars that shine +And twinkle on the milky way, +They stretched in never-ending line +Along the margin of a bay: +Ten thousand saw I at a glance, +Tossing their heads in sprightly dance. + +The waves beside them danced; but they +Out-did the sparkling waves in glee: +A poet could not but be gay, +In such a jocund company: +I gazed--and gazed--but little thought +What wealth the show to me had brought: + +For oft, when on my couch I lie +In vacant or in pensive mood, +They flash upon that inward eye +Which is the bliss of solitude; +And then my heart with pleasure fills, +And dances with the daffodils. + + -- William Wordsworth +') FROM lotest_stash_values; + lowrite +--------- + 848 +(1 row) + +-- lo_close(fd integer) returns integer +-- return value is 0 for success, or <0 for error (actually only -1, but...) +SELECT lo_close(fd) FROM lotest_stash_values; + lo_close +---------- + 0 +(1 row) + +END; +-- Copy to another large object. +-- Note: we intentionally don't remove the object created here; +-- it's left behind to help test pg_dump. +SELECT lo_from_bytea(0, lo_get(loid)) AS newloid FROM lotest_stash_values +\gset +-- Add a comment to it, as well, for pg_dump/pg_upgrade testing. +COMMENT ON LARGE OBJECT :newloid IS 'I Wandered Lonely as a Cloud'; +-- Read out a portion +BEGIN; +UPDATE lotest_stash_values SET fd=lo_open(loid, CAST(x'20000' | x'40000' AS integer)); +-- lo_lseek(fd integer, offset integer, whence integer) returns integer +-- offset is in bytes, whence is one of three values: +-- SEEK_SET (= 0) meaning relative to beginning +-- SEEK_CUR (= 1) meaning relative to current position +-- SEEK_END (= 2) meaning relative to end (offset better be negative) +-- returns current position in file +SELECT lo_lseek(fd, 104, 0) FROM lotest_stash_values; + lo_lseek +---------- + 104 +(1 row) + +-- loread/lowrite names are wonky, different from other functions which are lo_* +-- loread(fd integer, len integer) returns bytea +SELECT loread(fd, 28) FROM lotest_stash_values; + loread +------------------------------ + A host, of golden daffodils; +(1 row) + +SELECT lo_lseek(fd, -19, 1) FROM lotest_stash_values; + lo_lseek +---------- + 113 +(1 row) + +SELECT lowrite(fd, 'n') FROM lotest_stash_values; + lowrite +--------- + 1 +(1 row) + +SELECT lo_tell(fd) FROM lotest_stash_values; + lo_tell +--------- + 114 +(1 row) + +SELECT lo_lseek(fd, -744, 2) FROM lotest_stash_values; + lo_lseek +---------- + 104 +(1 row) + +SELECT loread(fd, 28) FROM lotest_stash_values; + loread +------------------------------ + A host, on golden daffodils; +(1 row) + +SELECT lo_close(fd) FROM lotest_stash_values; + lo_close +---------- + 0 +(1 row) + +END; +-- Test resource management +BEGIN; +SELECT lo_open(loid, x'40000'::int) from lotest_stash_values; + lo_open +--------- + 0 +(1 row) + +ABORT; +\set filename :abs_builddir '/results/invalid/path' +\set dobody 'DECLARE loid oid; BEGIN ' +\set dobody :dobody 'SELECT tbl.loid INTO loid FROM lotest_stash_values tbl; ' +\set dobody :dobody 'PERFORM lo_export(loid, ' :'filename' '); ' +\set dobody :dobody 'EXCEPTION WHEN UNDEFINED_FILE THEN ' +\set dobody :dobody 'RAISE NOTICE ''could not open file, as expected''; END' +DO :'dobody'; +NOTICE: could not open file, as expected +-- Test truncation. +BEGIN; +UPDATE lotest_stash_values SET fd=lo_open(loid, CAST(x'20000' | x'40000' AS integer)); +SELECT lo_truncate(fd, 11) FROM lotest_stash_values; + lo_truncate +------------- + 0 +(1 row) + +SELECT loread(fd, 15) FROM lotest_stash_values; + loread +---------------- + \012I wandered +(1 row) + +SELECT lo_truncate(fd, 10000) FROM lotest_stash_values; + lo_truncate +------------- + 0 +(1 row) + +SELECT loread(fd, 10) FROM lotest_stash_values; + loread +------------------------------------------ + \000\000\000\000\000\000\000\000\000\000 +(1 row) + +SELECT lo_lseek(fd, 0, 2) FROM lotest_stash_values; + lo_lseek +---------- + 10000 +(1 row) + +SELECT lo_tell(fd) FROM lotest_stash_values; + lo_tell +--------- + 10000 +(1 row) + +SELECT lo_truncate(fd, 5000) FROM lotest_stash_values; + lo_truncate +------------- + 0 +(1 row) + +SELECT lo_lseek(fd, 0, 2) FROM lotest_stash_values; + lo_lseek +---------- + 5000 +(1 row) + +SELECT lo_tell(fd) FROM lotest_stash_values; + lo_tell +--------- + 5000 +(1 row) + +SELECT lo_close(fd) FROM lotest_stash_values; + lo_close +---------- + 0 +(1 row) + +END; +-- Test 64-bit large object functions. +BEGIN; +UPDATE lotest_stash_values SET fd = lo_open(loid, CAST(x'20000' | x'40000' AS integer)); +SELECT lo_lseek64(fd, 4294967296, 0) FROM lotest_stash_values; + lo_lseek64 +------------ + 4294967296 +(1 row) + +SELECT lowrite(fd, 'offset:4GB') FROM lotest_stash_values; + lowrite +--------- + 10 +(1 row) + +SELECT lo_tell64(fd) FROM lotest_stash_values; + lo_tell64 +------------ + 4294967306 +(1 row) + +SELECT lo_lseek64(fd, -10, 1) FROM lotest_stash_values; + lo_lseek64 +------------ + 4294967296 +(1 row) + +SELECT lo_tell64(fd) FROM lotest_stash_values; + lo_tell64 +------------ + 4294967296 +(1 row) + +SELECT loread(fd, 10) FROM lotest_stash_values; + loread +------------ + offset:4GB +(1 row) + +SELECT lo_truncate64(fd, 5000000000) FROM lotest_stash_values; + lo_truncate64 +--------------- + 0 +(1 row) + +SELECT lo_lseek64(fd, 0, 2) FROM lotest_stash_values; + lo_lseek64 +------------ + 5000000000 +(1 row) + +SELECT lo_tell64(fd) FROM lotest_stash_values; + lo_tell64 +------------ + 5000000000 +(1 row) + +SELECT lo_truncate64(fd, 3000000000) FROM lotest_stash_values; + lo_truncate64 +--------------- + 0 +(1 row) + +SELECT lo_lseek64(fd, 0, 2) FROM lotest_stash_values; + lo_lseek64 +------------ + 3000000000 +(1 row) + +SELECT lo_tell64(fd) FROM lotest_stash_values; + lo_tell64 +------------ + 3000000000 +(1 row) + +SELECT lo_close(fd) FROM lotest_stash_values; + lo_close +---------- + 0 +(1 row) + +END; +-- lo_unlink(lobjId oid) returns integer +-- return value appears to always be 1 +SELECT lo_unlink(loid) from lotest_stash_values; + lo_unlink +----------- + 1 +(1 row) + +TRUNCATE lotest_stash_values; +\set filename :abs_srcdir '/data/tenk.data' +INSERT INTO lotest_stash_values (loid) SELECT lo_import(:'filename'); +BEGIN; +UPDATE lotest_stash_values SET fd=lo_open(loid, CAST(x'20000' | x'40000' AS integer)); +-- verify length of large object +SELECT lo_lseek(fd, 0, 2) FROM lotest_stash_values; + lo_lseek +---------- + 680800 +(1 row) + +-- with the default BLCKSZ, LOBLKSIZE = 2048, so this positions us for a block +-- edge case +SELECT lo_lseek(fd, 2030, 0) FROM lotest_stash_values; + lo_lseek +---------- + 2030 +(1 row) + +-- this should get half of the value from page 0 and half from page 1 of the +-- large object +SELECT loread(fd, 36) FROM lotest_stash_values; + loread +-------------------------------------------------------------- + 44\011144\0111144\0114144\0119144\01188\01189\011SNAAAA\011F +(1 row) + +SELECT lo_tell(fd) FROM lotest_stash_values; + lo_tell +--------- + 2066 +(1 row) + +SELECT lo_lseek(fd, -26, 1) FROM lotest_stash_values; + lo_lseek +---------- + 2040 +(1 row) + +SELECT lowrite(fd, 'abcdefghijklmnop') FROM lotest_stash_values; + lowrite +--------- + 16 +(1 row) + +SELECT lo_lseek(fd, 2030, 0) FROM lotest_stash_values; + lo_lseek +---------- + 2030 +(1 row) + +SELECT loread(fd, 36) FROM lotest_stash_values; + loread +-------------------------------------------------- + 44\011144\011114abcdefghijklmnop9\011SNAAAA\011F +(1 row) + +SELECT lo_close(fd) FROM lotest_stash_values; + lo_close +---------- + 0 +(1 row) + +END; +\set filename :abs_builddir '/results/lotest.txt' +SELECT lo_export(loid, :'filename') FROM lotest_stash_values; + lo_export +----------- + 1 +(1 row) + +\lo_import :filename +\set newloid :LASTOID +-- just make sure \lo_export does not barf +\set filename :abs_builddir '/results/lotest2.txt' +\lo_export :newloid :filename +-- This is a hack to test that export/import are reversible +-- This uses knowledge about the inner workings of large object mechanism +-- which should not be used outside it. This makes it a HACK +SELECT pageno, data FROM pg_largeobject WHERE loid = (SELECT loid from lotest_stash_values) +EXCEPT +SELECT pageno, data FROM pg_largeobject WHERE loid = :newloid; + pageno | data +--------+------ +(0 rows) + +SELECT lo_unlink(loid) FROM lotest_stash_values; + lo_unlink +----------- + 1 +(1 row) + +TRUNCATE lotest_stash_values; +\lo_unlink :newloid +\set filename :abs_builddir '/results/lotest.txt' +\lo_import :filename +\set newloid_1 :LASTOID +SELECT lo_from_bytea(0, lo_get(:newloid_1)) AS newloid_2 +\gset +SELECT md5(lo_get(:newloid_1)) = md5(lo_get(:newloid_2)); + ?column? +---------- + t +(1 row) + +SELECT lo_get(:newloid_1, 0, 20); + lo_get +------------------------------------------- + 8800\0110\0110\0110\0110\0110\0110\011800 +(1 row) + +SELECT lo_get(:newloid_1, 10, 20); + lo_get +------------------------------------------- + \0110\0110\0110\011800\011800\0113800\011 +(1 row) + +SELECT lo_put(:newloid_1, 5, decode('afafafaf', 'hex')); + lo_put +-------- + +(1 row) + +SELECT lo_get(:newloid_1, 0, 20); + lo_get +------------------------------------------------- + 8800\011\257\257\257\2570\0110\0110\0110\011800 +(1 row) + +SELECT lo_put(:newloid_1, 4294967310, 'foo'); + lo_put +-------- + +(1 row) + +SELECT lo_get(:newloid_1); +ERROR: large object read request is too large +SELECT lo_get(:newloid_1, 4294967294, 100); + lo_get +--------------------------------------------------------------------- + \000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000foo +(1 row) + +\lo_unlink :newloid_1 +\lo_unlink :newloid_2 +-- This object is left in the database for pg_dump test purposes +SELECT lo_from_bytea(0, E'\\xdeadbeef') AS newloid +\gset +SET bytea_output TO hex; +SELECT lo_get(:newloid); + lo_get +------------ + \xdeadbeef +(1 row) + +-- Create one more object that we leave behind for testing pg_dump/pg_upgrade; +-- this one intentionally has an OID in the system range +SELECT lo_create(2121); + lo_create +----------- + 2121 +(1 row) + +COMMENT ON LARGE OBJECT 2121 IS 'testing comments'; +-- Clean up +DROP TABLE lotest_stash_values; +DROP ROLE regress_lo_user; diff --git a/src/test/regress/expected/misc.out b/src/test/regress/expected/misc.out new file mode 100644 index 00000000000..dd65e6ed62f --- /dev/null +++ b/src/test/regress/expected/misc.out @@ -0,0 +1,697 @@ +-- +-- MISC +-- +-- directory paths are passed to us in environment variables +\getenv abs_srcdir PG_ABS_SRCDIR +\getenv abs_builddir PG_ABS_BUILDDIR +-- +-- BTREE +-- +UPDATE onek + SET unique1 = onek.unique1 + 1; +UPDATE onek + SET unique1 = onek.unique1 - 1; +-- +-- BTREE partial +-- +-- UPDATE onek2 +-- SET unique1 = onek2.unique1 + 1; +--UPDATE onek2 +-- SET unique1 = onek2.unique1 - 1; +-- +-- BTREE shutting out non-functional updates +-- +-- the following two tests seem to take a long time on some +-- systems. This non-func update stuff needs to be examined +-- more closely. - jolly (2/22/96) +-- +UPDATE tmp + SET stringu1 = reverse_name(onek.stringu1) + FROM onek + WHERE onek.stringu1 = 'JBAAAA' and + onek.stringu1 = tmp.stringu1; +UPDATE tmp + SET stringu1 = reverse_name(onek2.stringu1) + FROM onek2 + WHERE onek2.stringu1 = 'JCAAAA' and + onek2.stringu1 = tmp.stringu1; +DROP TABLE tmp; +--UPDATE person* +-- SET age = age + 1; +--UPDATE person* +-- SET age = age + 3 +-- WHERE name = 'linda'; +-- +-- copy +-- +\set filename :abs_builddir '/results/onek.data' +COPY onek TO :'filename'; +DELETE FROM onek; +COPY onek FROM :'filename'; +SELECT unique1 FROM onek WHERE unique1 < 2 ORDER BY unique1; + unique1 +--------- + 0 + 1 +(2 rows) + +DELETE FROM onek2; +COPY onek2 FROM :'filename'; +SELECT unique1 FROM onek2 WHERE unique1 < 2 ORDER BY unique1; + unique1 +--------- + 0 + 1 +(2 rows) + +\set filename :abs_builddir '/results/stud_emp.data' +COPY BINARY stud_emp TO :'filename'; +DELETE FROM stud_emp; +COPY BINARY stud_emp FROM :'filename'; +SELECT * FROM stud_emp; + name | age | location | salary | manager | gpa | percent +-------+-----+------------+--------+---------+-----+--------- + jeff | 23 | (8,7.7) | 600 | sharon | 3.5 | + cim | 30 | (10.5,4.7) | 400 | | 3.4 | + linda | 19 | (0.9,6.1) | 100 | | 2.9 | +(3 rows) + +-- COPY aggtest FROM stdin; +-- 56 7.8 +-- 100 99.097 +-- 0 0.09561 +-- 42 324.78 +-- . +-- COPY aggtest TO stdout; +-- +-- inheritance stress test +-- +SELECT * FROM a_star*; + class | a +-------+---- + a | 1 + a | 2 + a | + b | 3 + b | 4 + b | + b | + c | 5 + c | 6 + c | + c | + d | 7 + d | 8 + d | 9 + d | 10 + d | + d | 11 + d | 12 + d | 13 + d | + d | + d | + d | 14 + d | + d | + d | + d | + e | 15 + e | 16 + e | 17 + e | + e | 18 + e | + e | + f | 19 + f | 20 + f | 21 + f | 22 + f | + f | 24 + f | 25 + f | 26 + f | + f | + f | + f | 27 + f | + f | + f | + f | +(50 rows) + +SELECT * + FROM b_star* x + WHERE x.b = text 'bumble' or x.a < 3; + class | a | b +-------+---+-------- + b | | bumble +(1 row) + +SELECT class, a + FROM c_star* x + WHERE x.c ~ text 'hi'; + class | a +-------+---- + c | 5 + c | + d | 7 + d | 8 + d | 10 + d | + d | 12 + d | + d | + d | + e | 15 + e | 16 + e | + e | + f | 19 + f | 20 + f | 21 + f | + f | 24 + f | + f | + f | +(22 rows) + +SELECT class, b, c + FROM d_star* x + WHERE x.a < 100; + class | b | c +-------+---------+------------ + d | grumble | hi sunita + d | stumble | hi koko + d | rumble | + d | | hi kristin + d | fumble | + d | | hi avi + d | | + d | | +(8 rows) + +SELECT class, c FROM e_star* x WHERE x.c NOTNULL; + class | c +-------+------------- + e | hi carol + e | hi bob + e | hi michelle + e | hi elisa + f | hi claire + f | hi mike + f | hi marcel + f | hi keith + f | hi marc + f | hi allison + f | hi jeff + f | hi carl +(12 rows) + +SELECT * FROM f_star* x WHERE x.c ISNULL; + class | a | c | e | f +-------+----+---+-----+------------------------------------------- + f | 22 | | -7 | ((111,555),(222,666),(333,777),(444,888)) + f | 25 | | -9 | + f | 26 | | | ((11111,33333),(22222,44444)) + f | | | -11 | ((1111111,3333333),(2222222,4444444)) + f | 27 | | | + f | | | -12 | + f | | | | ((11111111,33333333),(22222222,44444444)) + f | | | | +(8 rows) + +-- grouping and aggregation on inherited sets have been busted in the past... +SELECT sum(a) FROM a_star*; + sum +----- + 355 +(1 row) + +SELECT class, sum(a) FROM a_star* GROUP BY class ORDER BY class; + class | sum +-------+----- + a | 3 + b | 7 + c | 11 + d | 84 + e | 66 + f | 184 +(6 rows) + +ALTER TABLE f_star RENAME COLUMN f TO ff; +ALTER TABLE e_star* RENAME COLUMN e TO ee; +ALTER TABLE d_star* RENAME COLUMN d TO dd; +ALTER TABLE c_star* RENAME COLUMN c TO cc; +ALTER TABLE b_star* RENAME COLUMN b TO bb; +ALTER TABLE a_star* RENAME COLUMN a TO aa; +SELECT class, aa + FROM a_star* x + WHERE aa ISNULL; + class | aa +-------+---- + a | + b | + b | + c | + c | + d | + d | + d | + d | + d | + d | + d | + d | + e | + e | + e | + f | + f | + f | + f | + f | + f | + f | + f | +(24 rows) + +-- As of Postgres 7.1, ALTER implicitly recurses, +-- so this should be same as ALTER a_star* +ALTER TABLE a_star RENAME COLUMN aa TO foo; +SELECT class, foo + FROM a_star* x + WHERE x.foo >= 2; + class | foo +-------+----- + a | 2 + b | 3 + b | 4 + c | 5 + c | 6 + d | 7 + d | 8 + d | 9 + d | 10 + d | 11 + d | 12 + d | 13 + d | 14 + e | 15 + e | 16 + e | 17 + e | 18 + f | 19 + f | 20 + f | 21 + f | 22 + f | 24 + f | 25 + f | 26 + f | 27 +(25 rows) + +ALTER TABLE a_star RENAME COLUMN foo TO aa; +SELECT * + from a_star* + WHERE aa < 1000; + class | aa +-------+---- + a | 1 + a | 2 + b | 3 + b | 4 + c | 5 + c | 6 + d | 7 + d | 8 + d | 9 + d | 10 + d | 11 + d | 12 + d | 13 + d | 14 + e | 15 + e | 16 + e | 17 + e | 18 + f | 19 + f | 20 + f | 21 + f | 22 + f | 24 + f | 25 + f | 26 + f | 27 +(26 rows) + +ALTER TABLE f_star ADD COLUMN f int4; +UPDATE f_star SET f = 10; +ALTER TABLE e_star* ADD COLUMN e int4; +--UPDATE e_star* SET e = 42; +SELECT * FROM e_star*; + class | aa | cc | ee | e +-------+----+-------------+-----+--- + e | 15 | hi carol | -1 | + e | 16 | hi bob | | + e | 17 | | -2 | + e | | hi michelle | -3 | + e | 18 | | | + e | | hi elisa | | + e | | | -4 | + f | 19 | hi claire | -5 | + f | 20 | hi mike | -6 | + f | 21 | hi marcel | | + f | 22 | | -7 | + f | | hi keith | -8 | + f | 24 | hi marc | | + f | 25 | | -9 | + f | 26 | | | + f | | hi allison | -10 | + f | | hi jeff | | + f | | | -11 | + f | 27 | | | + f | | hi carl | | + f | | | -12 | + f | | | | + f | | | | +(23 rows) + +ALTER TABLE a_star* ADD COLUMN a text; +NOTICE: merging definition of column "a" for child "d_star" +-- That ALTER TABLE should have added TOAST tables. +SELECT relname, reltoastrelid <> 0 AS has_toast_table + FROM pg_class + WHERE oid::regclass IN ('a_star', 'c_star') + ORDER BY 1; + relname | has_toast_table +---------+----------------- + a_star | t + c_star | t +(2 rows) + +--UPDATE b_star* +-- SET a = text 'gazpacho' +-- WHERE aa > 4; +SELECT class, aa, a FROM a_star*; + class | aa | a +-------+----+--- + a | 1 | + a | 2 | + a | | + b | 3 | + b | 4 | + b | | + b | | + c | 5 | + c | 6 | + c | | + c | | + d | 7 | + d | 8 | + d | 9 | + d | 10 | + d | | + d | 11 | + d | 12 | + d | 13 | + d | | + d | | + d | | + d | 14 | + d | | + d | | + d | | + d | | + e | 15 | + e | 16 | + e | 17 | + e | | + e | 18 | + e | | + e | | + f | 19 | + f | 20 | + f | 21 | + f | 22 | + f | | + f | 24 | + f | 25 | + f | 26 | + f | | + f | | + f | | + f | 27 | + f | | + f | | + f | | + f | | +(50 rows) + +-- +-- versions +-- +-- +-- postquel functions +-- +-- +-- mike does post_hacking, +-- joe and sally play basketball, and +-- everyone else does nothing. +-- +SELECT p.name, name(p.hobbies) FROM ONLY person p; + name | name +-------+------------- + mike | posthacking + joe | basketball + sally | basketball +(3 rows) + +-- +-- as above, but jeff also does post_hacking. +-- +SELECT p.name, name(p.hobbies) FROM person* p; + name | name +-------+------------- + mike | posthacking + joe | basketball + sally | basketball + jeff | posthacking +(4 rows) + +-- +-- the next two queries demonstrate how functions generate bogus duplicates. +-- this is a "feature" .. +-- +SELECT DISTINCT hobbies_r.name, name(hobbies_r.equipment) FROM hobbies_r + ORDER BY 1,2; + name | name +-------------+--------------- + basketball | hightops + posthacking | advil + posthacking | peet's coffee + skywalking | guts +(4 rows) + +SELECT hobbies_r.name, (hobbies_r.equipment).name FROM hobbies_r; + name | name +-------------+--------------- + posthacking | advil + posthacking | peet's coffee + posthacking | advil + posthacking | peet's coffee + basketball | hightops + basketball | hightops + skywalking | guts +(7 rows) + +-- +-- mike needs advil and peet's coffee, +-- joe and sally need hightops, and +-- everyone else is fine. +-- +SELECT p.name, name(p.hobbies), name(equipment(p.hobbies)) FROM ONLY person p; + name | name | name +-------+-------------+--------------- + mike | posthacking | advil + mike | posthacking | peet's coffee + joe | basketball | hightops + sally | basketball | hightops +(4 rows) + +-- +-- as above, but jeff needs advil and peet's coffee as well. +-- +SELECT p.name, name(p.hobbies), name(equipment(p.hobbies)) FROM person* p; + name | name | name +-------+-------------+--------------- + mike | posthacking | advil + mike | posthacking | peet's coffee + joe | basketball | hightops + sally | basketball | hightops + jeff | posthacking | advil + jeff | posthacking | peet's coffee +(6 rows) + +-- +-- just like the last two, but make sure that the target list fixup and +-- unflattening is being done correctly. +-- +SELECT name(equipment(p.hobbies)), p.name, name(p.hobbies) FROM ONLY person p; + name | name | name +---------------+-------+------------- + advil | mike | posthacking + peet's coffee | mike | posthacking + hightops | joe | basketball + hightops | sally | basketball +(4 rows) + +SELECT (p.hobbies).equipment.name, p.name, name(p.hobbies) FROM person* p; + name | name | name +---------------+-------+------------- + advil | mike | posthacking + peet's coffee | mike | posthacking + hightops | joe | basketball + hightops | sally | basketball + advil | jeff | posthacking + peet's coffee | jeff | posthacking +(6 rows) + +SELECT (p.hobbies).equipment.name, name(p.hobbies), p.name FROM ONLY person p; + name | name | name +---------------+-------------+------- + advil | posthacking | mike + peet's coffee | posthacking | mike + hightops | basketball | joe + hightops | basketball | sally +(4 rows) + +SELECT name(equipment(p.hobbies)), name(p.hobbies), p.name FROM person* p; + name | name | name +---------------+-------------+------- + advil | posthacking | mike + peet's coffee | posthacking | mike + hightops | basketball | joe + hightops | basketball | sally + advil | posthacking | jeff + peet's coffee | posthacking | jeff +(6 rows) + +SELECT name(equipment(hobby_construct(text 'skywalking', text 'mer'))); + name +------ + guts +(1 row) + +SELECT name(equipment(hobby_construct_named(text 'skywalking', text 'mer'))); + name +------ + guts +(1 row) + +SELECT name(equipment_named(hobby_construct_named(text 'skywalking', text 'mer'))); + name +------ + guts +(1 row) + +SELECT name(equipment_named_ambiguous_1a(hobby_construct_named(text 'skywalking', text 'mer'))); + name +------ + guts +(1 row) + +SELECT name(equipment_named_ambiguous_1b(hobby_construct_named(text 'skywalking', text 'mer'))); + name +------ + guts +(1 row) + +SELECT name(equipment_named_ambiguous_1c(hobby_construct_named(text 'skywalking', text 'mer'))); + name +------ + guts +(1 row) + +SELECT name(equipment_named_ambiguous_2a(text 'skywalking')); + name +------ + guts +(1 row) + +SELECT name(equipment_named_ambiguous_2b(text 'skywalking')); + name +--------------- + advil + peet's coffee + hightops + guts +(4 rows) + +SELECT hobbies_by_name('basketball'); + hobbies_by_name +----------------- + joe +(1 row) + +SELECT name, overpaid(emp.*) FROM emp; + name | overpaid +--------+---------- + sharon | t + sam | t + bill | t + jeff | f + cim | f + linda | f +(6 rows) + +-- +-- Try a few cases with SQL-spec row constructor expressions +-- +SELECT * FROM equipment(ROW('skywalking', 'mer')); + name | hobby +------+------------ + guts | skywalking +(1 row) + +SELECT name(equipment(ROW('skywalking', 'mer'))); + name +------ + guts +(1 row) + +SELECT *, name(equipment(h.*)) FROM hobbies_r h; + name | person | name +-------------+--------+--------------- + posthacking | mike | advil + posthacking | mike | peet's coffee + posthacking | jeff | advil + posthacking | jeff | peet's coffee + basketball | joe | hightops + basketball | sally | hightops + skywalking | | guts +(7 rows) + +SELECT *, (equipment(CAST((h.*) AS hobbies_r))).name FROM hobbies_r h; + name | person | name +-------------+--------+--------------- + posthacking | mike | advil + posthacking | mike | peet's coffee + posthacking | jeff | advil + posthacking | jeff | peet's coffee + basketball | joe | hightops + basketball | sally | hightops + skywalking | | guts +(7 rows) + +-- +-- functional joins +-- +-- +-- instance rules +-- +-- +-- rewrite rules +-- diff --git a/src/test/regress/expected/tablespace.out b/src/test/regress/expected/tablespace.out new file mode 100644 index 00000000000..864f4b6e208 --- /dev/null +++ b/src/test/regress/expected/tablespace.out @@ -0,0 +1,939 @@ +-- directory paths are passed to us in environment variables +\getenv abs_builddir PG_ABS_BUILDDIR +\set testtablespace :abs_builddir '/testtablespace' +-- create a tablespace using WITH clause +CREATE TABLESPACE regress_tblspacewith LOCATION :'testtablespace' WITH (some_nonexistent_parameter = true); -- fail +ERROR: unrecognized parameter "some_nonexistent_parameter" +CREATE TABLESPACE regress_tblspacewith LOCATION :'testtablespace' WITH (random_page_cost = 3.0); -- ok +-- check to see the parameter was used +SELECT spcoptions FROM pg_tablespace WHERE spcname = 'regress_tblspacewith'; + spcoptions +------------------------ + {random_page_cost=3.0} +(1 row) + +-- drop the tablespace so we can re-use the location +DROP TABLESPACE regress_tblspacewith; +-- create a tablespace we can use +CREATE TABLESPACE regress_tblspace LOCATION :'testtablespace'; +-- try setting and resetting some properties for the new tablespace +ALTER TABLESPACE regress_tblspace SET (random_page_cost = 1.0, seq_page_cost = 1.1); +ALTER TABLESPACE regress_tblspace SET (some_nonexistent_parameter = true); -- fail +ERROR: unrecognized parameter "some_nonexistent_parameter" +ALTER TABLESPACE regress_tblspace RESET (random_page_cost = 2.0); -- fail +ERROR: RESET must not include values for parameters +ALTER TABLESPACE regress_tblspace RESET (random_page_cost, effective_io_concurrency); -- ok +-- REINDEX (TABLESPACE) +-- catalogs and system tablespaces +-- system catalog, fail +REINDEX (TABLESPACE regress_tblspace) TABLE pg_am; +ERROR: cannot move system relation "pg_am_name_index" +REINDEX (TABLESPACE regress_tblspace) TABLE CONCURRENTLY pg_am; +ERROR: cannot reindex system catalogs concurrently +-- shared catalog, fail +REINDEX (TABLESPACE regress_tblspace) TABLE pg_authid; +ERROR: cannot move system relation "pg_authid_rolname_index" +REINDEX (TABLESPACE regress_tblspace) TABLE CONCURRENTLY pg_authid; +ERROR: cannot reindex system catalogs concurrently +-- toast relations, fail +REINDEX (TABLESPACE regress_tblspace) INDEX pg_toast.pg_toast_1260_index; +ERROR: cannot move system relation "pg_toast_1260_index" +REINDEX (TABLESPACE regress_tblspace) INDEX CONCURRENTLY pg_toast.pg_toast_1260_index; +ERROR: cannot reindex system catalogs concurrently +REINDEX (TABLESPACE regress_tblspace) TABLE pg_toast.pg_toast_1260; +ERROR: cannot move system relation "pg_toast_1260_index" +REINDEX (TABLESPACE regress_tblspace) TABLE CONCURRENTLY pg_toast.pg_toast_1260; +ERROR: cannot reindex system catalogs concurrently +-- system catalog, fail +REINDEX (TABLESPACE pg_global) TABLE pg_authid; +ERROR: cannot move system relation "pg_authid_rolname_index" +REINDEX (TABLESPACE pg_global) TABLE CONCURRENTLY pg_authid; +ERROR: cannot reindex system catalogs concurrently +-- table with toast relation +CREATE TABLE regress_tblspace_test_tbl (num1 bigint, num2 double precision, t text); +INSERT INTO regress_tblspace_test_tbl (num1, num2, t) + SELECT round(random()*100), random(), 'text' + FROM generate_series(1, 10) s(i); +CREATE INDEX regress_tblspace_test_tbl_idx ON regress_tblspace_test_tbl (num1); +-- move to global tablespace, fail +REINDEX (TABLESPACE pg_global) INDEX regress_tblspace_test_tbl_idx; +ERROR: only shared relations can be placed in pg_global tablespace +REINDEX (TABLESPACE pg_global) INDEX CONCURRENTLY regress_tblspace_test_tbl_idx; +ERROR: cannot move non-shared relation to tablespace "pg_global" +-- check transactional behavior of REINDEX (TABLESPACE) +BEGIN; +REINDEX (TABLESPACE regress_tblspace) INDEX regress_tblspace_test_tbl_idx; +REINDEX (TABLESPACE regress_tblspace) TABLE regress_tblspace_test_tbl; +ROLLBACK; +-- no relation moved to the new tablespace +SELECT c.relname FROM pg_class c, pg_tablespace s + WHERE c.reltablespace = s.oid AND s.spcname = 'regress_tblspace'; + relname +--------- +(0 rows) + +-- check that all indexes are moved to a new tablespace with different +-- relfilenode. +-- Save first the existing relfilenode for the toast and main relations. +SELECT relfilenode as main_filenode FROM pg_class + WHERE relname = 'regress_tblspace_test_tbl_idx' \gset +SELECT relfilenode as toast_filenode FROM pg_class + WHERE oid = + (SELECT i.indexrelid + FROM pg_class c, + pg_index i + WHERE i.indrelid = c.reltoastrelid AND + c.relname = 'regress_tblspace_test_tbl') \gset +REINDEX (TABLESPACE regress_tblspace) TABLE regress_tblspace_test_tbl; +SELECT c.relname FROM pg_class c, pg_tablespace s + WHERE c.reltablespace = s.oid AND s.spcname = 'regress_tblspace' + ORDER BY c.relname; + relname +------------------------------- + regress_tblspace_test_tbl_idx +(1 row) + +ALTER TABLE regress_tblspace_test_tbl SET TABLESPACE regress_tblspace; +ALTER TABLE regress_tblspace_test_tbl SET TABLESPACE pg_default; +SELECT c.relname FROM pg_class c, pg_tablespace s + WHERE c.reltablespace = s.oid AND s.spcname = 'regress_tblspace' + ORDER BY c.relname; + relname +------------------------------- + regress_tblspace_test_tbl_idx +(1 row) + +-- Move back to the default tablespace. +ALTER INDEX regress_tblspace_test_tbl_idx SET TABLESPACE pg_default; +SELECT c.relname FROM pg_class c, pg_tablespace s + WHERE c.reltablespace = s.oid AND s.spcname = 'regress_tblspace' + ORDER BY c.relname; + relname +--------- +(0 rows) + +REINDEX (TABLESPACE regress_tblspace, CONCURRENTLY) TABLE regress_tblspace_test_tbl; +SELECT c.relname FROM pg_class c, pg_tablespace s + WHERE c.reltablespace = s.oid AND s.spcname = 'regress_tblspace' + ORDER BY c.relname; + relname +------------------------------- + regress_tblspace_test_tbl_idx +(1 row) + +SELECT relfilenode = :main_filenode AS main_same FROM pg_class + WHERE relname = 'regress_tblspace_test_tbl_idx'; + main_same +----------- + f +(1 row) + +SELECT relfilenode = :toast_filenode as toast_same FROM pg_class + WHERE oid = + (SELECT i.indexrelid + FROM pg_class c, + pg_index i + WHERE i.indrelid = c.reltoastrelid AND + c.relname = 'regress_tblspace_test_tbl'); + toast_same +------------ + f +(1 row) + +DROP TABLE regress_tblspace_test_tbl; +-- REINDEX (TABLESPACE) with partitions +-- Create a partition tree and check the set of relations reindexed +-- with their new tablespace. +CREATE TABLE tbspace_reindex_part (c1 int, c2 int) PARTITION BY RANGE (c1); +CREATE TABLE tbspace_reindex_part_0 PARTITION OF tbspace_reindex_part + FOR VALUES FROM (0) TO (10) PARTITION BY list (c2); +CREATE TABLE tbspace_reindex_part_0_1 PARTITION OF tbspace_reindex_part_0 + FOR VALUES IN (1); +CREATE TABLE tbspace_reindex_part_0_2 PARTITION OF tbspace_reindex_part_0 + FOR VALUES IN (2); +-- This partitioned table will have no partitions. +CREATE TABLE tbspace_reindex_part_10 PARTITION OF tbspace_reindex_part + FOR VALUES FROM (10) TO (20) PARTITION BY list (c2); +-- Create some partitioned indexes +CREATE INDEX tbspace_reindex_part_index ON ONLY tbspace_reindex_part (c1); +CREATE INDEX tbspace_reindex_part_index_0 ON ONLY tbspace_reindex_part_0 (c1); +ALTER INDEX tbspace_reindex_part_index ATTACH PARTITION tbspace_reindex_part_index_0; +-- This partitioned index will have no partitions. +CREATE INDEX tbspace_reindex_part_index_10 ON ONLY tbspace_reindex_part_10 (c1); +ALTER INDEX tbspace_reindex_part_index ATTACH PARTITION tbspace_reindex_part_index_10; +CREATE INDEX tbspace_reindex_part_index_0_1 ON ONLY tbspace_reindex_part_0_1 (c1); +ALTER INDEX tbspace_reindex_part_index_0 ATTACH PARTITION tbspace_reindex_part_index_0_1; +CREATE INDEX tbspace_reindex_part_index_0_2 ON ONLY tbspace_reindex_part_0_2 (c1); +ALTER INDEX tbspace_reindex_part_index_0 ATTACH PARTITION tbspace_reindex_part_index_0_2; +SELECT relid, parentrelid, level FROM pg_partition_tree('tbspace_reindex_part_index') + ORDER BY relid, level; + relid | parentrelid | level +--------------------------------+------------------------------+------- + tbspace_reindex_part_index | | 0 + tbspace_reindex_part_index_0 | tbspace_reindex_part_index | 1 + tbspace_reindex_part_index_10 | tbspace_reindex_part_index | 1 + tbspace_reindex_part_index_0_1 | tbspace_reindex_part_index_0 | 2 + tbspace_reindex_part_index_0_2 | tbspace_reindex_part_index_0 | 2 +(5 rows) + +-- Track the original tablespace, relfilenode and OID of each index +-- in the tree. +CREATE TEMP TABLE reindex_temp_before AS + SELECT oid, relname, relfilenode, reltablespace + FROM pg_class + WHERE relname ~ 'tbspace_reindex_part_index'; +REINDEX (TABLESPACE regress_tblspace, CONCURRENTLY) TABLE tbspace_reindex_part; +-- REINDEX CONCURRENTLY changes the OID of the old relation, hence a check +-- based on the relation name below. +SELECT b.relname, + CASE WHEN a.relfilenode = b.relfilenode THEN 'relfilenode is unchanged' + ELSE 'relfilenode has changed' END AS filenode, + CASE WHEN a.reltablespace = b.reltablespace THEN 'reltablespace is unchanged' + ELSE 'reltablespace has changed' END AS tbspace + FROM reindex_temp_before b JOIN pg_class a ON b.relname = a.relname + ORDER BY 1; + relname | filenode | tbspace +--------------------------------+--------------------------+---------------------------- + tbspace_reindex_part_index | relfilenode is unchanged | reltablespace is unchanged + tbspace_reindex_part_index_0 | relfilenode is unchanged | reltablespace is unchanged + tbspace_reindex_part_index_0_1 | relfilenode has changed | reltablespace has changed + tbspace_reindex_part_index_0_2 | relfilenode has changed | reltablespace has changed + tbspace_reindex_part_index_10 | relfilenode is unchanged | reltablespace is unchanged +(5 rows) + +DROP TABLE tbspace_reindex_part; +-- create a schema we can use +CREATE SCHEMA testschema; +-- try a table +CREATE TABLE testschema.foo (i int) TABLESPACE regress_tblspace; +SELECT relname, spcname FROM pg_catalog.pg_tablespace t, pg_catalog.pg_class c + where c.reltablespace = t.oid AND c.relname = 'foo'; + relname | spcname +---------+------------------ + foo | regress_tblspace +(1 row) + +INSERT INTO testschema.foo VALUES(1); +INSERT INTO testschema.foo VALUES(2); +-- tables from dynamic sources +CREATE TABLE testschema.asselect TABLESPACE regress_tblspace AS SELECT 1; +SELECT relname, spcname FROM pg_catalog.pg_tablespace t, pg_catalog.pg_class c + where c.reltablespace = t.oid AND c.relname = 'asselect'; + relname | spcname +----------+------------------ + asselect | regress_tblspace +(1 row) + +PREPARE selectsource(int) AS SELECT $1; +CREATE TABLE testschema.asexecute TABLESPACE regress_tblspace + AS EXECUTE selectsource(2); +SELECT relname, spcname FROM pg_catalog.pg_tablespace t, pg_catalog.pg_class c + where c.reltablespace = t.oid AND c.relname = 'asexecute'; + relname | spcname +-----------+------------------ + asexecute | regress_tblspace +(1 row) + +-- index +CREATE INDEX foo_idx on testschema.foo(i) TABLESPACE regress_tblspace; +SELECT relname, spcname FROM pg_catalog.pg_tablespace t, pg_catalog.pg_class c + where c.reltablespace = t.oid AND c.relname = 'foo_idx'; + relname | spcname +---------+------------------ + foo_idx | regress_tblspace +(1 row) + +-- check \d output +\d testschema.foo + Table "testschema.foo" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + i | integer | | | +Indexes: + "foo_idx" btree (i), tablespace "regress_tblspace" +Tablespace: "regress_tblspace" + +\d testschema.foo_idx + Index "testschema.foo_idx" + Column | Type | Key? | Definition +--------+---------+------+------------ + i | integer | yes | i +btree, for table "testschema.foo" +Tablespace: "regress_tblspace" + +-- +-- partitioned table +-- +CREATE TABLE testschema.part (a int) PARTITION BY LIST (a); +SET default_tablespace TO pg_global; +CREATE TABLE testschema.part_1 PARTITION OF testschema.part FOR VALUES IN (1); +ERROR: only shared relations can be placed in pg_global tablespace +RESET default_tablespace; +CREATE TABLE testschema.part_1 PARTITION OF testschema.part FOR VALUES IN (1); +SET default_tablespace TO regress_tblspace; +CREATE TABLE testschema.part_2 PARTITION OF testschema.part FOR VALUES IN (2); +SET default_tablespace TO pg_global; +CREATE TABLE testschema.part_3 PARTITION OF testschema.part FOR VALUES IN (3); +ERROR: only shared relations can be placed in pg_global tablespace +ALTER TABLE testschema.part SET TABLESPACE regress_tblspace; +CREATE TABLE testschema.part_3 PARTITION OF testschema.part FOR VALUES IN (3); +CREATE TABLE testschema.part_4 PARTITION OF testschema.part FOR VALUES IN (4) + TABLESPACE pg_default; +CREATE TABLE testschema.part_56 PARTITION OF testschema.part FOR VALUES IN (5, 6) + PARTITION BY LIST (a); +ALTER TABLE testschema.part SET TABLESPACE pg_default; +CREATE TABLE testschema.part_78 PARTITION OF testschema.part FOR VALUES IN (7, 8) + PARTITION BY LIST (a); +ERROR: only shared relations can be placed in pg_global tablespace +CREATE TABLE testschema.part_910 PARTITION OF testschema.part FOR VALUES IN (9, 10) + PARTITION BY LIST (a) TABLESPACE regress_tblspace; +RESET default_tablespace; +CREATE TABLE testschema.part_78 PARTITION OF testschema.part FOR VALUES IN (7, 8) + PARTITION BY LIST (a); +SELECT relname, spcname FROM pg_catalog.pg_class c + JOIN pg_catalog.pg_namespace n ON (c.relnamespace = n.oid) + LEFT JOIN pg_catalog.pg_tablespace t ON c.reltablespace = t.oid + where c.relname LIKE 'part%' AND n.nspname = 'testschema' order by relname; + relname | spcname +----------+------------------ + part | + part_1 | + part_2 | regress_tblspace + part_3 | regress_tblspace + part_4 | + part_56 | regress_tblspace + part_78 | + part_910 | regress_tblspace +(8 rows) + +RESET default_tablespace; +DROP TABLE testschema.part; +-- partitioned index +CREATE TABLE testschema.part (a int) PARTITION BY LIST (a); +CREATE TABLE testschema.part1 PARTITION OF testschema.part FOR VALUES IN (1); +CREATE INDEX part_a_idx ON testschema.part (a) TABLESPACE regress_tblspace; +CREATE TABLE testschema.part2 PARTITION OF testschema.part FOR VALUES IN (2); +SELECT relname, spcname FROM pg_catalog.pg_tablespace t, pg_catalog.pg_class c + where c.reltablespace = t.oid AND c.relname LIKE 'part%_idx'; + relname | spcname +-------------+------------------ + part1_a_idx | regress_tblspace + part2_a_idx | regress_tblspace + part_a_idx | regress_tblspace +(3 rows) + +\d testschema.part + Partitioned table "testschema.part" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | +Partition key: LIST (a) +Indexes: + "part_a_idx" btree (a), tablespace "regress_tblspace" +Number of partitions: 2 (Use \d+ to list them.) + +\d+ testschema.part + Partitioned table "testschema.part" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+---------+--------------+------------- + a | integer | | | | plain | | +Partition key: LIST (a) +Indexes: + "part_a_idx" btree (a), tablespace "regress_tblspace" +Partitions: testschema.part1 FOR VALUES IN (1), + testschema.part2 FOR VALUES IN (2) + +\d testschema.part1 + Table "testschema.part1" + Column | Type | Collation | Nullable | Default +--------+---------+-----------+----------+--------- + a | integer | | | +Partition of: testschema.part FOR VALUES IN (1) +Indexes: + "part1_a_idx" btree (a), tablespace "regress_tblspace" + +\d+ testschema.part1 + Table "testschema.part1" + Column | Type | Collation | Nullable | Default | Storage | Stats target | Description +--------+---------+-----------+----------+---------+---------+--------------+------------- + a | integer | | | | plain | | +Partition of: testschema.part FOR VALUES IN (1) +Partition constraint: ((a IS NOT NULL) AND (a = 1)) +Indexes: + "part1_a_idx" btree (a), tablespace "regress_tblspace" + +\d testschema.part_a_idx +Partitioned index "testschema.part_a_idx" + Column | Type | Key? | Definition +--------+---------+------+------------ + a | integer | yes | a +btree, for table "testschema.part" +Number of partitions: 2 (Use \d+ to list them.) +Tablespace: "regress_tblspace" + +\d+ testschema.part_a_idx + Partitioned index "testschema.part_a_idx" + Column | Type | Key? | Definition | Storage | Stats target +--------+---------+------+------------+---------+-------------- + a | integer | yes | a | plain | +btree, for table "testschema.part" +Partitions: testschema.part1_a_idx, + testschema.part2_a_idx +Tablespace: "regress_tblspace" + +-- partitioned rels cannot specify the default tablespace. These fail: +CREATE TABLE testschema.dflt (a int PRIMARY KEY) PARTITION BY LIST (a) TABLESPACE pg_default; +ERROR: cannot specify default tablespace for partitioned relations +CREATE TABLE testschema.dflt (a int PRIMARY KEY USING INDEX TABLESPACE pg_default) PARTITION BY LIST (a); +ERROR: cannot specify default tablespace for partitioned relations +SET default_tablespace TO 'pg_default'; +CREATE TABLE testschema.dflt (a int PRIMARY KEY) PARTITION BY LIST (a) TABLESPACE regress_tblspace; +ERROR: cannot specify default tablespace for partitioned relations +CREATE TABLE testschema.dflt (a int PRIMARY KEY USING INDEX TABLESPACE regress_tblspace) PARTITION BY LIST (a); +ERROR: cannot specify default tablespace for partitioned relations +-- but these work: +CREATE TABLE testschema.dflt (a int PRIMARY KEY USING INDEX TABLESPACE regress_tblspace) PARTITION BY LIST (a) TABLESPACE regress_tblspace; +SET default_tablespace TO ''; +CREATE TABLE testschema.dflt2 (a int PRIMARY KEY) PARTITION BY LIST (a); +DROP TABLE testschema.dflt, testschema.dflt2; +-- check that default_tablespace doesn't affect ALTER TABLE index rebuilds +CREATE TABLE testschema.test_default_tab(id bigint) TABLESPACE regress_tblspace; +INSERT INTO testschema.test_default_tab VALUES (1); +CREATE INDEX test_index1 on testschema.test_default_tab (id); +CREATE INDEX test_index2 on testschema.test_default_tab (id) TABLESPACE regress_tblspace; +ALTER TABLE testschema.test_default_tab ADD CONSTRAINT test_index3 PRIMARY KEY (id); +ALTER TABLE testschema.test_default_tab ADD CONSTRAINT test_index4 UNIQUE (id) USING INDEX TABLESPACE regress_tblspace; +\d testschema.test_index1 + Index "testschema.test_index1" + Column | Type | Key? | Definition +--------+--------+------+------------ + id | bigint | yes | id +btree, for table "testschema.test_default_tab" + +\d testschema.test_index2 + Index "testschema.test_index2" + Column | Type | Key? | Definition +--------+--------+------+------------ + id | bigint | yes | id +btree, for table "testschema.test_default_tab" +Tablespace: "regress_tblspace" + +\d testschema.test_index3 + Index "testschema.test_index3" + Column | Type | Key? | Definition +--------+--------+------+------------ + id | bigint | yes | id +primary key, btree, for table "testschema.test_default_tab" + +\d testschema.test_index4 + Index "testschema.test_index4" + Column | Type | Key? | Definition +--------+--------+------+------------ + id | bigint | yes | id +unique, btree, for table "testschema.test_default_tab" +Tablespace: "regress_tblspace" + +-- use a custom tablespace for default_tablespace +SET default_tablespace TO regress_tblspace; +-- tablespace should not change if no rewrite +ALTER TABLE testschema.test_default_tab ALTER id TYPE bigint; +\d testschema.test_index1 + Index "testschema.test_index1" + Column | Type | Key? | Definition +--------+--------+------+------------ + id | bigint | yes | id +btree, for table "testschema.test_default_tab" + +\d testschema.test_index2 + Index "testschema.test_index2" + Column | Type | Key? | Definition +--------+--------+------+------------ + id | bigint | yes | id +btree, for table "testschema.test_default_tab" +Tablespace: "regress_tblspace" + +\d testschema.test_index3 + Index "testschema.test_index3" + Column | Type | Key? | Definition +--------+--------+------+------------ + id | bigint | yes | id +primary key, btree, for table "testschema.test_default_tab" + +\d testschema.test_index4 + Index "testschema.test_index4" + Column | Type | Key? | Definition +--------+--------+------+------------ + id | bigint | yes | id +unique, btree, for table "testschema.test_default_tab" +Tablespace: "regress_tblspace" + +SELECT * FROM testschema.test_default_tab; + id +---- + 1 +(1 row) + +-- tablespace should not change even if there is an index rewrite +ALTER TABLE testschema.test_default_tab ALTER id TYPE int; +\d testschema.test_index1 + Index "testschema.test_index1" + Column | Type | Key? | Definition +--------+---------+------+------------ + id | integer | yes | id +btree, for table "testschema.test_default_tab" + +\d testschema.test_index2 + Index "testschema.test_index2" + Column | Type | Key? | Definition +--------+---------+------+------------ + id | integer | yes | id +btree, for table "testschema.test_default_tab" +Tablespace: "regress_tblspace" + +\d testschema.test_index3 + Index "testschema.test_index3" + Column | Type | Key? | Definition +--------+---------+------+------------ + id | integer | yes | id +primary key, btree, for table "testschema.test_default_tab" + +\d testschema.test_index4 + Index "testschema.test_index4" + Column | Type | Key? | Definition +--------+---------+------+------------ + id | integer | yes | id +unique, btree, for table "testschema.test_default_tab" +Tablespace: "regress_tblspace" + +SELECT * FROM testschema.test_default_tab; + id +---- + 1 +(1 row) + +-- now use the default tablespace for default_tablespace +SET default_tablespace TO ''; +-- tablespace should not change if no rewrite +ALTER TABLE testschema.test_default_tab ALTER id TYPE int; +\d testschema.test_index1 + Index "testschema.test_index1" + Column | Type | Key? | Definition +--------+---------+------+------------ + id | integer | yes | id +btree, for table "testschema.test_default_tab" + +\d testschema.test_index2 + Index "testschema.test_index2" + Column | Type | Key? | Definition +--------+---------+------+------------ + id | integer | yes | id +btree, for table "testschema.test_default_tab" +Tablespace: "regress_tblspace" + +\d testschema.test_index3 + Index "testschema.test_index3" + Column | Type | Key? | Definition +--------+---------+------+------------ + id | integer | yes | id +primary key, btree, for table "testschema.test_default_tab" + +\d testschema.test_index4 + Index "testschema.test_index4" + Column | Type | Key? | Definition +--------+---------+------+------------ + id | integer | yes | id +unique, btree, for table "testschema.test_default_tab" +Tablespace: "regress_tblspace" + +-- tablespace should not change even if there is an index rewrite +ALTER TABLE testschema.test_default_tab ALTER id TYPE bigint; +\d testschema.test_index1 + Index "testschema.test_index1" + Column | Type | Key? | Definition +--------+--------+------+------------ + id | bigint | yes | id +btree, for table "testschema.test_default_tab" + +\d testschema.test_index2 + Index "testschema.test_index2" + Column | Type | Key? | Definition +--------+--------+------+------------ + id | bigint | yes | id +btree, for table "testschema.test_default_tab" +Tablespace: "regress_tblspace" + +\d testschema.test_index3 + Index "testschema.test_index3" + Column | Type | Key? | Definition +--------+--------+------+------------ + id | bigint | yes | id +primary key, btree, for table "testschema.test_default_tab" + +\d testschema.test_index4 + Index "testschema.test_index4" + Column | Type | Key? | Definition +--------+--------+------+------------ + id | bigint | yes | id +unique, btree, for table "testschema.test_default_tab" +Tablespace: "regress_tblspace" + +DROP TABLE testschema.test_default_tab; +-- check that default_tablespace doesn't affect ALTER TABLE index rebuilds +-- (this time with a partitioned table) +CREATE TABLE testschema.test_default_tab_p(id bigint, val bigint) + PARTITION BY LIST (id) TABLESPACE regress_tblspace; +CREATE TABLE testschema.test_default_tab_p1 PARTITION OF testschema.test_default_tab_p + FOR VALUES IN (1); +INSERT INTO testschema.test_default_tab_p VALUES (1); +CREATE INDEX test_index1 on testschema.test_default_tab_p (val); +CREATE INDEX test_index2 on testschema.test_default_tab_p (val) TABLESPACE regress_tblspace; +ALTER TABLE testschema.test_default_tab_p ADD CONSTRAINT test_index3 PRIMARY KEY (id); +ALTER TABLE testschema.test_default_tab_p ADD CONSTRAINT test_index4 UNIQUE (id) USING INDEX TABLESPACE regress_tblspace; +\d testschema.test_index1 +Partitioned index "testschema.test_index1" + Column | Type | Key? | Definition +--------+--------+------+------------ + val | bigint | yes | val +btree, for table "testschema.test_default_tab_p" +Number of partitions: 1 (Use \d+ to list them.) + +\d testschema.test_index2 +Partitioned index "testschema.test_index2" + Column | Type | Key? | Definition +--------+--------+------+------------ + val | bigint | yes | val +btree, for table "testschema.test_default_tab_p" +Number of partitions: 1 (Use \d+ to list them.) +Tablespace: "regress_tblspace" + +\d testschema.test_index3 +Partitioned index "testschema.test_index3" + Column | Type | Key? | Definition +--------+--------+------+------------ + id | bigint | yes | id +primary key, btree, for table "testschema.test_default_tab_p" +Number of partitions: 1 (Use \d+ to list them.) + +\d testschema.test_index4 +Partitioned index "testschema.test_index4" + Column | Type | Key? | Definition +--------+--------+------+------------ + id | bigint | yes | id +unique, btree, for table "testschema.test_default_tab_p" +Number of partitions: 1 (Use \d+ to list them.) +Tablespace: "regress_tblspace" + +-- use a custom tablespace for default_tablespace +SET default_tablespace TO regress_tblspace; +-- tablespace should not change if no rewrite +ALTER TABLE testschema.test_default_tab_p ALTER val TYPE bigint; +\d testschema.test_index1 +Partitioned index "testschema.test_index1" + Column | Type | Key? | Definition +--------+--------+------+------------ + val | bigint | yes | val +btree, for table "testschema.test_default_tab_p" +Number of partitions: 1 (Use \d+ to list them.) + +\d testschema.test_index2 +Partitioned index "testschema.test_index2" + Column | Type | Key? | Definition +--------+--------+------+------------ + val | bigint | yes | val +btree, for table "testschema.test_default_tab_p" +Number of partitions: 1 (Use \d+ to list them.) +Tablespace: "regress_tblspace" + +\d testschema.test_index3 +Partitioned index "testschema.test_index3" + Column | Type | Key? | Definition +--------+--------+------+------------ + id | bigint | yes | id +primary key, btree, for table "testschema.test_default_tab_p" +Number of partitions: 1 (Use \d+ to list them.) + +\d testschema.test_index4 +Partitioned index "testschema.test_index4" + Column | Type | Key? | Definition +--------+--------+------+------------ + id | bigint | yes | id +unique, btree, for table "testschema.test_default_tab_p" +Number of partitions: 1 (Use \d+ to list them.) +Tablespace: "regress_tblspace" + +SELECT * FROM testschema.test_default_tab_p; + id | val +----+----- + 1 | +(1 row) + +-- tablespace should not change even if there is an index rewrite +ALTER TABLE testschema.test_default_tab_p ALTER val TYPE int; +\d testschema.test_index1 +Partitioned index "testschema.test_index1" + Column | Type | Key? | Definition +--------+---------+------+------------ + val | integer | yes | val +btree, for table "testschema.test_default_tab_p" +Number of partitions: 1 (Use \d+ to list them.) + +\d testschema.test_index2 +Partitioned index "testschema.test_index2" + Column | Type | Key? | Definition +--------+---------+------+------------ + val | integer | yes | val +btree, for table "testschema.test_default_tab_p" +Number of partitions: 1 (Use \d+ to list them.) +Tablespace: "regress_tblspace" + +\d testschema.test_index3 +Partitioned index "testschema.test_index3" + Column | Type | Key? | Definition +--------+--------+------+------------ + id | bigint | yes | id +primary key, btree, for table "testschema.test_default_tab_p" +Number of partitions: 1 (Use \d+ to list them.) + +\d testschema.test_index4 +Partitioned index "testschema.test_index4" + Column | Type | Key? | Definition +--------+--------+------+------------ + id | bigint | yes | id +unique, btree, for table "testschema.test_default_tab_p" +Number of partitions: 1 (Use \d+ to list them.) +Tablespace: "regress_tblspace" + +SELECT * FROM testschema.test_default_tab_p; + id | val +----+----- + 1 | +(1 row) + +-- now use the default tablespace for default_tablespace +SET default_tablespace TO ''; +-- tablespace should not change if no rewrite +ALTER TABLE testschema.test_default_tab_p ALTER val TYPE int; +\d testschema.test_index1 +Partitioned index "testschema.test_index1" + Column | Type | Key? | Definition +--------+---------+------+------------ + val | integer | yes | val +btree, for table "testschema.test_default_tab_p" +Number of partitions: 1 (Use \d+ to list them.) + +\d testschema.test_index2 +Partitioned index "testschema.test_index2" + Column | Type | Key? | Definition +--------+---------+------+------------ + val | integer | yes | val +btree, for table "testschema.test_default_tab_p" +Number of partitions: 1 (Use \d+ to list them.) +Tablespace: "regress_tblspace" + +\d testschema.test_index3 +Partitioned index "testschema.test_index3" + Column | Type | Key? | Definition +--------+--------+------+------------ + id | bigint | yes | id +primary key, btree, for table "testschema.test_default_tab_p" +Number of partitions: 1 (Use \d+ to list them.) + +\d testschema.test_index4 +Partitioned index "testschema.test_index4" + Column | Type | Key? | Definition +--------+--------+------+------------ + id | bigint | yes | id +unique, btree, for table "testschema.test_default_tab_p" +Number of partitions: 1 (Use \d+ to list them.) +Tablespace: "regress_tblspace" + +-- tablespace should not change even if there is an index rewrite +ALTER TABLE testschema.test_default_tab_p ALTER val TYPE bigint; +\d testschema.test_index1 +Partitioned index "testschema.test_index1" + Column | Type | Key? | Definition +--------+--------+------+------------ + val | bigint | yes | val +btree, for table "testschema.test_default_tab_p" +Number of partitions: 1 (Use \d+ to list them.) + +\d testschema.test_index2 +Partitioned index "testschema.test_index2" + Column | Type | Key? | Definition +--------+--------+------+------------ + val | bigint | yes | val +btree, for table "testschema.test_default_tab_p" +Number of partitions: 1 (Use \d+ to list them.) +Tablespace: "regress_tblspace" + +\d testschema.test_index3 +Partitioned index "testschema.test_index3" + Column | Type | Key? | Definition +--------+--------+------+------------ + id | bigint | yes | id +primary key, btree, for table "testschema.test_default_tab_p" +Number of partitions: 1 (Use \d+ to list them.) + +\d testschema.test_index4 +Partitioned index "testschema.test_index4" + Column | Type | Key? | Definition +--------+--------+------+------------ + id | bigint | yes | id +unique, btree, for table "testschema.test_default_tab_p" +Number of partitions: 1 (Use \d+ to list them.) +Tablespace: "regress_tblspace" + +DROP TABLE testschema.test_default_tab_p; +-- check that default_tablespace affects index additions in ALTER TABLE +CREATE TABLE testschema.test_tab(id int) TABLESPACE regress_tblspace; +INSERT INTO testschema.test_tab VALUES (1); +SET default_tablespace TO regress_tblspace; +ALTER TABLE testschema.test_tab ADD CONSTRAINT test_tab_unique UNIQUE (id); +SET default_tablespace TO ''; +ALTER TABLE testschema.test_tab ADD CONSTRAINT test_tab_pkey PRIMARY KEY (id); +\d testschema.test_tab_unique + Index "testschema.test_tab_unique" + Column | Type | Key? | Definition +--------+---------+------+------------ + id | integer | yes | id +unique, btree, for table "testschema.test_tab" +Tablespace: "regress_tblspace" + +\d testschema.test_tab_pkey + Index "testschema.test_tab_pkey" + Column | Type | Key? | Definition +--------+---------+------+------------ + id | integer | yes | id +primary key, btree, for table "testschema.test_tab" + +SELECT * FROM testschema.test_tab; + id +---- + 1 +(1 row) + +DROP TABLE testschema.test_tab; +-- check that default_tablespace is handled correctly by multi-command +-- ALTER TABLE that includes a tablespace-preserving rewrite +CREATE TABLE testschema.test_tab(a int, b int, c int); +SET default_tablespace TO regress_tblspace; +ALTER TABLE testschema.test_tab ADD CONSTRAINT test_tab_unique UNIQUE (a); +CREATE INDEX test_tab_a_idx ON testschema.test_tab (a); +SET default_tablespace TO ''; +CREATE INDEX test_tab_b_idx ON testschema.test_tab (b); +\d testschema.test_tab_unique + Index "testschema.test_tab_unique" + Column | Type | Key? | Definition +--------+---------+------+------------ + a | integer | yes | a +unique, btree, for table "testschema.test_tab" +Tablespace: "regress_tblspace" + +\d testschema.test_tab_a_idx + Index "testschema.test_tab_a_idx" + Column | Type | Key? | Definition +--------+---------+------+------------ + a | integer | yes | a +btree, for table "testschema.test_tab" +Tablespace: "regress_tblspace" + +\d testschema.test_tab_b_idx + Index "testschema.test_tab_b_idx" + Column | Type | Key? | Definition +--------+---------+------+------------ + b | integer | yes | b +btree, for table "testschema.test_tab" + +ALTER TABLE testschema.test_tab ALTER b TYPE bigint, ADD UNIQUE (c); +\d testschema.test_tab_unique + Index "testschema.test_tab_unique" + Column | Type | Key? | Definition +--------+---------+------+------------ + a | integer | yes | a +unique, btree, for table "testschema.test_tab" +Tablespace: "regress_tblspace" + +\d testschema.test_tab_a_idx + Index "testschema.test_tab_a_idx" + Column | Type | Key? | Definition +--------+---------+------+------------ + a | integer | yes | a +btree, for table "testschema.test_tab" +Tablespace: "regress_tblspace" + +\d testschema.test_tab_b_idx + Index "testschema.test_tab_b_idx" + Column | Type | Key? | Definition +--------+--------+------+------------ + b | bigint | yes | b +btree, for table "testschema.test_tab" + +DROP TABLE testschema.test_tab; +-- let's try moving a table from one place to another +CREATE TABLE testschema.atable AS VALUES (1), (2); +CREATE UNIQUE INDEX anindex ON testschema.atable(column1); +ALTER TABLE testschema.atable SET TABLESPACE regress_tblspace; +ALTER INDEX testschema.anindex SET TABLESPACE regress_tblspace; +ALTER INDEX testschema.part_a_idx SET TABLESPACE pg_global; +ERROR: only shared relations can be placed in pg_global tablespace +ALTER INDEX testschema.part_a_idx SET TABLESPACE pg_default; +ALTER INDEX testschema.part_a_idx SET TABLESPACE regress_tblspace; +INSERT INTO testschema.atable VALUES(3); -- ok +INSERT INTO testschema.atable VALUES(1); -- fail (checks index) +ERROR: duplicate key value violates unique constraint "anindex" +DETAIL: Key (column1)=(1) already exists. +SELECT COUNT(*) FROM testschema.atable; -- checks heap + count +------- + 3 +(1 row) + +-- Will fail with bad path +CREATE TABLESPACE regress_badspace LOCATION '/no/such/location'; +ERROR: directory "/no/such/location" does not exist +-- No such tablespace +CREATE TABLE bar (i int) TABLESPACE regress_nosuchspace; +ERROR: tablespace "regress_nosuchspace" does not exist +-- Fail, in use for some partitioned object +DROP TABLESPACE regress_tblspace; +ERROR: tablespace "regress_tblspace" cannot be dropped because some objects depend on it +DETAIL: tablespace for index testschema.part_a_idx +ALTER INDEX testschema.part_a_idx SET TABLESPACE pg_default; +-- Fail, not empty +DROP TABLESPACE regress_tblspace; +ERROR: tablespace "regress_tblspace" is not empty +CREATE ROLE regress_tablespace_user1 login; +CREATE ROLE regress_tablespace_user2 login; +GRANT USAGE ON SCHEMA testschema TO regress_tablespace_user2; +ALTER TABLESPACE regress_tblspace OWNER TO regress_tablespace_user1; +CREATE TABLE testschema.tablespace_acl (c int); +-- new owner lacks permission to create this index from scratch +CREATE INDEX k ON testschema.tablespace_acl (c) TABLESPACE regress_tblspace; +ALTER TABLE testschema.tablespace_acl OWNER TO regress_tablespace_user2; +SET SESSION ROLE regress_tablespace_user2; +CREATE TABLE tablespace_table (i int) TABLESPACE regress_tblspace; -- fail +ERROR: permission denied for tablespace regress_tblspace +ALTER TABLE testschema.tablespace_acl ALTER c TYPE bigint; +REINDEX (TABLESPACE regress_tblspace) TABLE tablespace_table; -- fail +ERROR: permission denied for tablespace regress_tblspace +REINDEX (TABLESPACE regress_tblspace, CONCURRENTLY) TABLE tablespace_table; -- fail +ERROR: permission denied for tablespace regress_tblspace +RESET ROLE; +ALTER TABLESPACE regress_tblspace RENAME TO regress_tblspace_renamed; +ALTER TABLE ALL IN TABLESPACE regress_tblspace_renamed SET TABLESPACE pg_default; +ALTER INDEX ALL IN TABLESPACE regress_tblspace_renamed SET TABLESPACE pg_default; +-- Should show notice that nothing was done +ALTER TABLE ALL IN TABLESPACE regress_tblspace_renamed SET TABLESPACE pg_default; +NOTICE: no matching relations in tablespace "regress_tblspace_renamed" found +-- Should succeed +DROP TABLESPACE regress_tblspace_renamed; +DROP SCHEMA testschema CASCADE; +NOTICE: drop cascades to 6 other objects +DETAIL: drop cascades to table testschema.foo +drop cascades to table testschema.asselect +drop cascades to table testschema.asexecute +drop cascades to table testschema.part +drop cascades to table testschema.atable +drop cascades to table testschema.tablespace_acl +DROP ROLE regress_tablespace_user1; +DROP ROLE regress_tablespace_user2; diff --git a/src/test/regress/input/constraints.source b/src/test/regress/input/constraints.source deleted file mode 100644 index 458f8057785..00000000000 --- a/src/test/regress/input/constraints.source +++ /dev/null @@ -1,559 +0,0 @@ --- --- CONSTRAINTS --- Constraints can be specified with: --- - DEFAULT clause --- - CHECK clauses --- - PRIMARY KEY clauses --- - UNIQUE clauses --- - EXCLUDE clauses --- - --- directory paths are passed to us in environment variables -\getenv abs_srcdir PG_ABS_SRCDIR - --- --- DEFAULT syntax --- - -CREATE TABLE DEFAULT_TBL (i int DEFAULT 100, - x text DEFAULT 'vadim', f float8 DEFAULT 123.456); - -INSERT INTO DEFAULT_TBL VALUES (1, 'thomas', 57.0613); -INSERT INTO DEFAULT_TBL VALUES (1, 'bruce'); -INSERT INTO DEFAULT_TBL (i, f) VALUES (2, 987.654); -INSERT INTO DEFAULT_TBL (x) VALUES ('marc'); -INSERT INTO DEFAULT_TBL VALUES (3, null, 1.0); - -SELECT * FROM DEFAULT_TBL; - -CREATE SEQUENCE DEFAULT_SEQ; - -CREATE TABLE DEFAULTEXPR_TBL (i1 int DEFAULT 100 + (200-199) * 2, - i2 int DEFAULT nextval('default_seq')); - -INSERT INTO DEFAULTEXPR_TBL VALUES (-1, -2); -INSERT INTO DEFAULTEXPR_TBL (i1) VALUES (-3); -INSERT INTO DEFAULTEXPR_TBL (i2) VALUES (-4); -INSERT INTO DEFAULTEXPR_TBL (i2) VALUES (NULL); - -SELECT * FROM DEFAULTEXPR_TBL; - --- syntax errors --- test for extraneous comma -CREATE TABLE error_tbl (i int DEFAULT (100, )); --- this will fail because gram.y uses b_expr not a_expr for defaults, --- to avoid a shift/reduce conflict that arises from NOT NULL being --- part of the column definition syntax: -CREATE TABLE error_tbl (b1 bool DEFAULT 1 IN (1, 2)); --- this should work, however: -CREATE TABLE error_tbl (b1 bool DEFAULT (1 IN (1, 2))); - -DROP TABLE error_tbl; - --- --- CHECK syntax --- - -CREATE TABLE CHECK_TBL (x int, - CONSTRAINT CHECK_CON CHECK (x > 3)); - -INSERT INTO CHECK_TBL VALUES (5); -INSERT INTO CHECK_TBL VALUES (4); -INSERT INTO CHECK_TBL VALUES (3); -INSERT INTO CHECK_TBL VALUES (2); -INSERT INTO CHECK_TBL VALUES (6); -INSERT INTO CHECK_TBL VALUES (1); - -SELECT * FROM CHECK_TBL; - -CREATE SEQUENCE CHECK_SEQ; - -CREATE TABLE CHECK2_TBL (x int, y text, z int, - CONSTRAINT SEQUENCE_CON - CHECK (x > 3 and y <> 'check failed' and z < 8)); - -INSERT INTO CHECK2_TBL VALUES (4, 'check ok', -2); -INSERT INTO CHECK2_TBL VALUES (1, 'x check failed', -2); -INSERT INTO CHECK2_TBL VALUES (5, 'z check failed', 10); -INSERT INTO CHECK2_TBL VALUES (0, 'check failed', -2); -INSERT INTO CHECK2_TBL VALUES (6, 'check failed', 11); -INSERT INTO CHECK2_TBL VALUES (7, 'check ok', 7); - -SELECT * from CHECK2_TBL; - --- --- Check constraints on INSERT --- - -CREATE SEQUENCE INSERT_SEQ; - -CREATE TABLE INSERT_TBL (x INT DEFAULT nextval('insert_seq'), - y TEXT DEFAULT '-NULL-', - z INT DEFAULT -1 * currval('insert_seq'), - CONSTRAINT INSERT_TBL_CON CHECK (x >= 3 AND y <> 'check failed' AND x < 8), - CHECK (x + z = 0)); - -INSERT INTO INSERT_TBL(x,z) VALUES (2, -2); - -SELECT * FROM INSERT_TBL; - -SELECT 'one' AS one, nextval('insert_seq'); - -INSERT INTO INSERT_TBL(y) VALUES ('Y'); -INSERT INTO INSERT_TBL(y) VALUES ('Y'); -INSERT INTO INSERT_TBL(x,z) VALUES (1, -2); -INSERT INTO INSERT_TBL(z,x) VALUES (-7, 7); -INSERT INTO INSERT_TBL VALUES (5, 'check failed', -5); -INSERT INTO INSERT_TBL VALUES (7, '!check failed', -7); -INSERT INTO INSERT_TBL(y) VALUES ('-!NULL-'); - -SELECT * FROM INSERT_TBL; - -INSERT INTO INSERT_TBL(y,z) VALUES ('check failed', 4); -INSERT INTO INSERT_TBL(x,y) VALUES (5, 'check failed'); -INSERT INTO INSERT_TBL(x,y) VALUES (5, '!check failed'); -INSERT INTO INSERT_TBL(y) VALUES ('-!NULL-'); - -SELECT * FROM INSERT_TBL; - -SELECT 'seven' AS one, nextval('insert_seq'); - -INSERT INTO INSERT_TBL(y) VALUES ('Y'); - -SELECT 'eight' AS one, currval('insert_seq'); - --- According to SQL, it is OK to insert a record that gives rise to NULL --- constraint-condition results. Postgres used to reject this, but it --- was wrong: -INSERT INTO INSERT_TBL VALUES (null, null, null); - -SELECT * FROM INSERT_TBL; - --- --- Check constraints on system columns --- - -CREATE TABLE SYS_COL_CHECK_TBL (city text, state text, is_capital bool, - altitude int, - CHECK (NOT (is_capital AND tableoid::regclass::text = 'sys_col_check_tbl'))); - -INSERT INTO SYS_COL_CHECK_TBL VALUES ('Seattle', 'Washington', false, 100); -INSERT INTO SYS_COL_CHECK_TBL VALUES ('Olympia', 'Washington', true, 100); - -SELECT *, tableoid::regclass::text FROM SYS_COL_CHECK_TBL; - -DROP TABLE SYS_COL_CHECK_TBL; - --- --- Check constraints on system columns other then TableOid should return error --- -CREATE TABLE SYS_COL_CHECK_TBL (city text, state text, is_capital bool, - altitude int, - CHECK (NOT (is_capital AND ctid::text = 'sys_col_check_tbl'))); - --- --- Check inheritance of defaults and constraints --- - -CREATE TABLE INSERT_CHILD (cx INT default 42, - cy INT CHECK (cy > x)) - INHERITS (INSERT_TBL); - -INSERT INTO INSERT_CHILD(x,z,cy) VALUES (7,-7,11); -INSERT INTO INSERT_CHILD(x,z,cy) VALUES (7,-7,6); -INSERT INTO INSERT_CHILD(x,z,cy) VALUES (6,-7,7); -INSERT INTO INSERT_CHILD(x,y,z,cy) VALUES (6,'check failed',-6,7); - -SELECT * FROM INSERT_CHILD; - -DROP TABLE INSERT_CHILD; - --- --- Check NO INHERIT type of constraints and inheritance --- - -CREATE TABLE ATACC1 (TEST INT - CHECK (TEST > 0) NO INHERIT); - -CREATE TABLE ATACC2 (TEST2 INT) INHERITS (ATACC1); --- check constraint is not there on child -INSERT INTO ATACC2 (TEST) VALUES (-3); --- check constraint is there on parent -INSERT INTO ATACC1 (TEST) VALUES (-3); -DROP TABLE ATACC1 CASCADE; - -CREATE TABLE ATACC1 (TEST INT, TEST2 INT - CHECK (TEST > 0), CHECK (TEST2 > 10) NO INHERIT); - -CREATE TABLE ATACC2 () INHERITS (ATACC1); --- check constraint is there on child -INSERT INTO ATACC2 (TEST) VALUES (-3); --- check constraint is there on parent -INSERT INTO ATACC1 (TEST) VALUES (-3); --- check constraint is not there on child -INSERT INTO ATACC2 (TEST2) VALUES (3); --- check constraint is there on parent -INSERT INTO ATACC1 (TEST2) VALUES (3); -DROP TABLE ATACC1 CASCADE; - --- --- Check constraints on INSERT INTO --- - -DELETE FROM INSERT_TBL; - -ALTER SEQUENCE INSERT_SEQ RESTART WITH 4; - -CREATE TEMP TABLE tmp (xd INT, yd TEXT, zd INT); - -INSERT INTO tmp VALUES (null, 'Y', null); -INSERT INTO tmp VALUES (5, '!check failed', null); -INSERT INTO tmp VALUES (null, 'try again', null); -INSERT INTO INSERT_TBL(y) select yd from tmp; - -SELECT * FROM INSERT_TBL; - -INSERT INTO INSERT_TBL SELECT * FROM tmp WHERE yd = 'try again'; -INSERT INTO INSERT_TBL(y,z) SELECT yd, -7 FROM tmp WHERE yd = 'try again'; -INSERT INTO INSERT_TBL(y,z) SELECT yd, -8 FROM tmp WHERE yd = 'try again'; - -SELECT * FROM INSERT_TBL; - -DROP TABLE tmp; - --- --- Check constraints on UPDATE --- - -UPDATE INSERT_TBL SET x = NULL WHERE x = 5; -UPDATE INSERT_TBL SET x = 6 WHERE x = 6; -UPDATE INSERT_TBL SET x = -z, z = -x; -UPDATE INSERT_TBL SET x = z, z = x; - -SELECT * FROM INSERT_TBL; - --- DROP TABLE INSERT_TBL; - --- --- Check constraints on COPY FROM --- - -CREATE TABLE COPY_TBL (x INT, y TEXT, z INT, - CONSTRAINT COPY_CON - CHECK (x > 3 AND y <> 'check failed' AND x < 7 )); - -\set filename :abs_srcdir '/data/constro.data' -COPY COPY_TBL FROM :'filename'; - -SELECT * FROM COPY_TBL; - -\set filename :abs_srcdir '/data/constrf.data' -COPY COPY_TBL FROM :'filename'; - -SELECT * FROM COPY_TBL; - --- --- Primary keys --- - -CREATE TABLE PRIMARY_TBL (i int PRIMARY KEY, t text); - -INSERT INTO PRIMARY_TBL VALUES (1, 'one'); -INSERT INTO PRIMARY_TBL VALUES (2, 'two'); -INSERT INTO PRIMARY_TBL VALUES (1, 'three'); -INSERT INTO PRIMARY_TBL VALUES (4, 'three'); -INSERT INTO PRIMARY_TBL VALUES (5, 'one'); -INSERT INTO PRIMARY_TBL (t) VALUES ('six'); - -SELECT * FROM PRIMARY_TBL; - -DROP TABLE PRIMARY_TBL; - -CREATE TABLE PRIMARY_TBL (i int, t text, - PRIMARY KEY(i,t)); - -INSERT INTO PRIMARY_TBL VALUES (1, 'one'); -INSERT INTO PRIMARY_TBL VALUES (2, 'two'); -INSERT INTO PRIMARY_TBL VALUES (1, 'three'); -INSERT INTO PRIMARY_TBL VALUES (4, 'three'); -INSERT INTO PRIMARY_TBL VALUES (5, 'one'); -INSERT INTO PRIMARY_TBL (t) VALUES ('six'); - -SELECT * FROM PRIMARY_TBL; - -DROP TABLE PRIMARY_TBL; - --- --- Unique keys --- - -CREATE TABLE UNIQUE_TBL (i int UNIQUE, t text); - -INSERT INTO UNIQUE_TBL VALUES (1, 'one'); -INSERT INTO UNIQUE_TBL VALUES (2, 'two'); -INSERT INTO UNIQUE_TBL VALUES (1, 'three'); -INSERT INTO UNIQUE_TBL VALUES (4, 'four'); -INSERT INTO UNIQUE_TBL VALUES (5, 'one'); -INSERT INTO UNIQUE_TBL (t) VALUES ('six'); -INSERT INTO UNIQUE_TBL (t) VALUES ('seven'); - -INSERT INTO UNIQUE_TBL VALUES (5, 'five-upsert-insert') ON CONFLICT (i) DO UPDATE SET t = 'five-upsert-update'; -INSERT INTO UNIQUE_TBL VALUES (6, 'six-upsert-insert') ON CONFLICT (i) DO UPDATE SET t = 'six-upsert-update'; --- should fail -INSERT INTO UNIQUE_TBL VALUES (1, 'a'), (2, 'b'), (2, 'b') ON CONFLICT (i) DO UPDATE SET t = 'fails'; - -SELECT * FROM UNIQUE_TBL; - -DROP TABLE UNIQUE_TBL; - -CREATE TABLE UNIQUE_TBL (i int, t text, - UNIQUE(i,t)); - -INSERT INTO UNIQUE_TBL VALUES (1, 'one'); -INSERT INTO UNIQUE_TBL VALUES (2, 'two'); -INSERT INTO UNIQUE_TBL VALUES (1, 'three'); -INSERT INTO UNIQUE_TBL VALUES (1, 'one'); -INSERT INTO UNIQUE_TBL VALUES (5, 'one'); -INSERT INTO UNIQUE_TBL (t) VALUES ('six'); - -SELECT * FROM UNIQUE_TBL; - -DROP TABLE UNIQUE_TBL; - --- --- Deferrable unique constraints --- - -CREATE TABLE unique_tbl (i int UNIQUE DEFERRABLE, t text); - -INSERT INTO unique_tbl VALUES (0, 'one'); -INSERT INTO unique_tbl VALUES (1, 'two'); -INSERT INTO unique_tbl VALUES (2, 'tree'); -INSERT INTO unique_tbl VALUES (3, 'four'); -INSERT INTO unique_tbl VALUES (4, 'five'); - -BEGIN; - --- default is immediate so this should fail right away -UPDATE unique_tbl SET i = 1 WHERE i = 0; - -ROLLBACK; - --- check is done at end of statement, so this should succeed -UPDATE unique_tbl SET i = i+1; - -SELECT * FROM unique_tbl; - --- explicitly defer the constraint -BEGIN; - -SET CONSTRAINTS unique_tbl_i_key DEFERRED; - -INSERT INTO unique_tbl VALUES (3, 'three'); -DELETE FROM unique_tbl WHERE t = 'tree'; -- makes constraint valid again - -COMMIT; -- should succeed - -SELECT * FROM unique_tbl; - --- try adding an initially deferred constraint -ALTER TABLE unique_tbl DROP CONSTRAINT unique_tbl_i_key; -ALTER TABLE unique_tbl ADD CONSTRAINT unique_tbl_i_key - UNIQUE (i) DEFERRABLE INITIALLY DEFERRED; - -BEGIN; - -INSERT INTO unique_tbl VALUES (1, 'five'); -INSERT INTO unique_tbl VALUES (5, 'one'); -UPDATE unique_tbl SET i = 4 WHERE i = 2; -UPDATE unique_tbl SET i = 2 WHERE i = 4 AND t = 'four'; -DELETE FROM unique_tbl WHERE i = 1 AND t = 'one'; -DELETE FROM unique_tbl WHERE i = 5 AND t = 'five'; - -COMMIT; - -SELECT * FROM unique_tbl; - --- should fail at commit-time -BEGIN; -INSERT INTO unique_tbl VALUES (3, 'Three'); -- should succeed for now -COMMIT; -- should fail - --- make constraint check immediate -BEGIN; - -SET CONSTRAINTS ALL IMMEDIATE; - -INSERT INTO unique_tbl VALUES (3, 'Three'); -- should fail - -COMMIT; - --- forced check when SET CONSTRAINTS is called -BEGIN; - -SET CONSTRAINTS ALL DEFERRED; - -INSERT INTO unique_tbl VALUES (3, 'Three'); -- should succeed for now - -SET CONSTRAINTS ALL IMMEDIATE; -- should fail - -COMMIT; - --- test deferrable UNIQUE with a partitioned table -CREATE TABLE parted_uniq_tbl (i int UNIQUE DEFERRABLE) partition by range (i); -CREATE TABLE parted_uniq_tbl_1 PARTITION OF parted_uniq_tbl FOR VALUES FROM (0) TO (10); -CREATE TABLE parted_uniq_tbl_2 PARTITION OF parted_uniq_tbl FOR VALUES FROM (20) TO (30); -SELECT conname, conrelid::regclass FROM pg_constraint - WHERE conname LIKE 'parted_uniq%' ORDER BY conname; -BEGIN; -INSERT INTO parted_uniq_tbl VALUES (1); -SAVEPOINT f; -INSERT INTO parted_uniq_tbl VALUES (1); -- unique violation -ROLLBACK TO f; -SET CONSTRAINTS parted_uniq_tbl_i_key DEFERRED; -INSERT INTO parted_uniq_tbl VALUES (1); -- OK now, fail at commit -COMMIT; -DROP TABLE parted_uniq_tbl; - --- test a HOT update that invalidates the conflicting tuple. --- the trigger should still fire and catch the violation - -BEGIN; - -INSERT INTO unique_tbl VALUES (3, 'Three'); -- should succeed for now -UPDATE unique_tbl SET t = 'THREE' WHERE i = 3 AND t = 'Three'; - -COMMIT; -- should fail - -SELECT * FROM unique_tbl; - --- test a HOT update that modifies the newly inserted tuple, --- but should succeed because we then remove the other conflicting tuple. - -BEGIN; - -INSERT INTO unique_tbl VALUES(3, 'tree'); -- should succeed for now -UPDATE unique_tbl SET t = 'threex' WHERE t = 'tree'; -DELETE FROM unique_tbl WHERE t = 'three'; - -SELECT * FROM unique_tbl; - -COMMIT; - -SELECT * FROM unique_tbl; - -DROP TABLE unique_tbl; - --- --- EXCLUDE constraints --- - -CREATE TABLE circles ( - c1 CIRCLE, - c2 TEXT, - EXCLUDE USING gist - (c1 WITH &&, (c2::circle) WITH &&) - WHERE (circle_center(c1) <> '(0,0)') -); - --- these should succeed because they don't match the index predicate -INSERT INTO circles VALUES('<(0,0), 5>', '<(0,0), 5>'); -INSERT INTO circles VALUES('<(0,0), 5>', '<(0,0), 4>'); - --- succeed -INSERT INTO circles VALUES('<(10,10), 10>', '<(0,0), 5>'); --- fail, overlaps -INSERT INTO circles VALUES('<(20,20), 10>', '<(0,0), 4>'); --- succeed, because violation is ignored -INSERT INTO circles VALUES('<(20,20), 10>', '<(0,0), 4>') - ON CONFLICT ON CONSTRAINT circles_c1_c2_excl DO NOTHING; --- fail, because DO UPDATE variant requires unique index -INSERT INTO circles VALUES('<(20,20), 10>', '<(0,0), 4>') - ON CONFLICT ON CONSTRAINT circles_c1_c2_excl DO UPDATE SET c2 = EXCLUDED.c2; --- succeed because c1 doesn't overlap -INSERT INTO circles VALUES('<(20,20), 1>', '<(0,0), 5>'); --- succeed because c2 doesn't overlap -INSERT INTO circles VALUES('<(20,20), 10>', '<(10,10), 5>'); - --- should fail on existing data without the WHERE clause -ALTER TABLE circles ADD EXCLUDE USING gist - (c1 WITH &&, (c2::circle) WITH &&); - --- try reindexing an existing constraint -REINDEX INDEX circles_c1_c2_excl; - -DROP TABLE circles; - --- Check deferred exclusion constraint - -CREATE TABLE deferred_excl ( - f1 int, - f2 int, - CONSTRAINT deferred_excl_con EXCLUDE (f1 WITH =) INITIALLY DEFERRED -); - -INSERT INTO deferred_excl VALUES(1); -INSERT INTO deferred_excl VALUES(2); -INSERT INTO deferred_excl VALUES(1); -- fail -INSERT INTO deferred_excl VALUES(1) ON CONFLICT ON CONSTRAINT deferred_excl_con DO NOTHING; -- fail -BEGIN; -INSERT INTO deferred_excl VALUES(2); -- no fail here -COMMIT; -- should fail here -BEGIN; -INSERT INTO deferred_excl VALUES(3); -INSERT INTO deferred_excl VALUES(3); -- no fail here -COMMIT; -- should fail here - --- bug #13148: deferred constraint versus HOT update -BEGIN; -INSERT INTO deferred_excl VALUES(2, 1); -- no fail here -DELETE FROM deferred_excl WHERE f1 = 2 AND f2 IS NULL; -- remove old row -UPDATE deferred_excl SET f2 = 2 WHERE f1 = 2; -COMMIT; -- should not fail - -SELECT * FROM deferred_excl; - -ALTER TABLE deferred_excl DROP CONSTRAINT deferred_excl_con; - --- This should fail, but worth testing because of HOT updates -UPDATE deferred_excl SET f1 = 3; - -ALTER TABLE deferred_excl ADD EXCLUDE (f1 WITH =); - -DROP TABLE deferred_excl; - --- Comments --- Setup a low-level role to enforce non-superuser checks. -CREATE ROLE regress_constraint_comments; -SET SESSION AUTHORIZATION regress_constraint_comments; - -CREATE TABLE constraint_comments_tbl (a int CONSTRAINT the_constraint CHECK (a > 0)); -CREATE DOMAIN constraint_comments_dom AS int CONSTRAINT the_constraint CHECK (value > 0); - -COMMENT ON CONSTRAINT the_constraint ON constraint_comments_tbl IS 'yes, the comment'; -COMMENT ON CONSTRAINT the_constraint ON DOMAIN constraint_comments_dom IS 'yes, another comment'; - --- no such constraint -COMMENT ON CONSTRAINT no_constraint ON constraint_comments_tbl IS 'yes, the comment'; -COMMENT ON CONSTRAINT no_constraint ON DOMAIN constraint_comments_dom IS 'yes, another comment'; - --- no such table/domain -COMMENT ON CONSTRAINT the_constraint ON no_comments_tbl IS 'bad comment'; -COMMENT ON CONSTRAINT the_constraint ON DOMAIN no_comments_dom IS 'another bad comment'; - -COMMENT ON CONSTRAINT the_constraint ON constraint_comments_tbl IS NULL; -COMMENT ON CONSTRAINT the_constraint ON DOMAIN constraint_comments_dom IS NULL; - --- unauthorized user -RESET SESSION AUTHORIZATION; -CREATE ROLE regress_constraint_comments_noaccess; -SET SESSION AUTHORIZATION regress_constraint_comments_noaccess; -COMMENT ON CONSTRAINT the_constraint ON constraint_comments_tbl IS 'no, the comment'; -COMMENT ON CONSTRAINT the_constraint ON DOMAIN constraint_comments_dom IS 'no, another comment'; -RESET SESSION AUTHORIZATION; - -DROP TABLE constraint_comments_tbl; -DROP DOMAIN constraint_comments_dom; - -DROP ROLE regress_constraint_comments; -DROP ROLE regress_constraint_comments_noaccess; diff --git a/src/test/regress/input/copy.source b/src/test/regress/input/copy.source deleted file mode 100644 index 15e26517ecb..00000000000 --- a/src/test/regress/input/copy.source +++ /dev/null @@ -1,293 +0,0 @@ --- --- COPY --- - --- directory paths are passed to us in environment variables -\getenv abs_srcdir PG_ABS_SRCDIR -\getenv abs_builddir PG_ABS_BUILDDIR - --- CLASS POPULATION --- (any resemblance to real life is purely coincidental) --- -\set filename :abs_srcdir '/data/agg.data' -COPY aggtest FROM :'filename'; - -\set filename :abs_srcdir '/data/onek.data' -COPY onek FROM :'filename'; - -\set filename :abs_builddir '/results/onek.data' -COPY onek TO :'filename'; - -DELETE FROM onek; - -COPY onek FROM :'filename'; - -\set filename :abs_srcdir '/data/tenk.data' -COPY tenk1 FROM :'filename'; - -\set filename :abs_srcdir '/data/rect.data' -COPY slow_emp4000 FROM :'filename'; - -\set filename :abs_srcdir '/data/person.data' -COPY person FROM :'filename'; - -\set filename :abs_srcdir '/data/emp.data' -COPY emp FROM :'filename'; - -\set filename :abs_srcdir '/data/student.data' -COPY student FROM :'filename'; - -\set filename :abs_srcdir '/data/stud_emp.data' -COPY stud_emp FROM :'filename'; - -\set filename :abs_srcdir '/data/streets.data' -COPY road FROM :'filename'; - -\set filename :abs_srcdir '/data/real_city.data' -COPY real_city FROM :'filename'; - -\set filename :abs_srcdir '/data/hash.data' -COPY hash_i4_heap FROM :'filename'; - -COPY hash_name_heap FROM :'filename'; - -COPY hash_txt_heap FROM :'filename'; - -COPY hash_f8_heap FROM :'filename'; - -\set filename :abs_srcdir '/data/tsearch.data' -COPY test_tsvector FROM :'filename'; - -\set filename :abs_srcdir '/data/jsonb.data' -COPY testjsonb FROM :'filename'; - --- the data in this file has a lot of duplicates in the index key --- fields, leading to long bucket chains and lots of table expansion. --- this is therefore a stress test of the bucket overflow code (unlike --- the data in hash.data, which has unique index keys). --- --- \set filename :abs_srcdir '/data/hashovfl.data' --- COPY hash_ovfl_heap FROM :'filename'; - -\set filename :abs_srcdir '/data/desc.data' -COPY bt_i4_heap FROM :'filename'; - -\set filename :abs_srcdir '/data/hash.data' -COPY bt_name_heap FROM :'filename'; - -\set filename :abs_srcdir '/data/desc.data' -COPY bt_txt_heap FROM :'filename'; - -\set filename :abs_srcdir '/data/hash.data' -COPY bt_f8_heap FROM :'filename'; - -\set filename :abs_srcdir '/data/array.data' -COPY array_op_test FROM :'filename'; - -\set filename :abs_srcdir '/data/array.data' -COPY array_index_op_test FROM :'filename'; - --- analyze all the data we just loaded, to ensure plan consistency --- in later tests - -ANALYZE aggtest; -ANALYZE onek; -ANALYZE tenk1; -ANALYZE slow_emp4000; -ANALYZE person; -ANALYZE emp; -ANALYZE student; -ANALYZE stud_emp; -ANALYZE road; -ANALYZE real_city; -ANALYZE hash_i4_heap; -ANALYZE hash_name_heap; -ANALYZE hash_txt_heap; -ANALYZE hash_f8_heap; -ANALYZE test_tsvector; -ANALYZE bt_i4_heap; -ANALYZE bt_name_heap; -ANALYZE bt_txt_heap; -ANALYZE bt_f8_heap; -ANALYZE array_op_test; -ANALYZE array_index_op_test; - ---- test copying in CSV mode with various styles ---- of embedded line ending characters - -create temp table copytest ( - style text, - test text, - filler int); - -insert into copytest values('DOS',E'abc\r\ndef',1); -insert into copytest values('Unix',E'abc\ndef',2); -insert into copytest values('Mac',E'abc\rdef',3); -insert into copytest values(E'esc\\ape',E'a\\r\\\r\\\n\\nb',4); - -\set filename :abs_builddir '/results/copytest.csv' -copy copytest to :'filename' csv; - -create temp table copytest2 (like copytest); - -copy copytest2 from :'filename' csv; - -select * from copytest except select * from copytest2; - -truncate copytest2; - ---- same test but with an escape char different from quote char - -copy copytest to :'filename' csv quote '''' escape E'\\'; - -copy copytest2 from :'filename' csv quote '''' escape E'\\'; - -select * from copytest except select * from copytest2; - - --- test header line feature - -create temp table copytest3 ( - c1 int, - "col with , comma" text, - "col with "" quote" int); - -copy copytest3 from stdin csv header; -this is just a line full of junk that would error out if parsed -1,a,1 -2,b,2 -\. - -copy copytest3 to stdout csv header; - --- test copy from with a partitioned table -create table parted_copytest ( - a int, - b int, - c text -) partition by list (b); - -create table parted_copytest_a1 (c text, b int, a int); -create table parted_copytest_a2 (a int, c text, b int); - -alter table parted_copytest attach partition parted_copytest_a1 for values in(1); -alter table parted_copytest attach partition parted_copytest_a2 for values in(2); - --- We must insert enough rows to trigger multi-inserts. These are only --- enabled adaptively when there are few enough partition changes. -insert into parted_copytest select x,1,'One' from generate_series(1,1000) x; -insert into parted_copytest select x,2,'Two' from generate_series(1001,1010) x; -insert into parted_copytest select x,1,'One' from generate_series(1011,1020) x; - -\set filename :abs_builddir '/results/parted_copytest.csv' -copy (select * from parted_copytest order by a) to :'filename'; - -truncate parted_copytest; - -copy parted_copytest from :'filename'; - --- Ensure COPY FREEZE errors for partitioned tables. -begin; -truncate parted_copytest; -copy parted_copytest from :'filename' (freeze); -rollback; - -select tableoid::regclass,count(*),sum(a) from parted_copytest -group by tableoid order by tableoid::regclass::name; - -truncate parted_copytest; - --- create before insert row trigger on parted_copytest_a2 -create function part_ins_func() returns trigger language plpgsql as $$ -begin - return new; -end; -$$; - -create trigger part_ins_trig - before insert on parted_copytest_a2 - for each row - execute procedure part_ins_func(); - -copy parted_copytest from :'filename'; - -select tableoid::regclass,count(*),sum(a) from parted_copytest -group by tableoid order by tableoid::regclass::name; - -truncate table parted_copytest; -create index on parted_copytest (b); -drop trigger part_ins_trig on parted_copytest_a2; - -copy parted_copytest from stdin; -1 1 str1 -2 2 str2 -\. - --- Ensure index entries were properly added during the copy. -select * from parted_copytest where b = 1; -select * from parted_copytest where b = 2; - -drop table parted_copytest; - --- --- Progress reporting for COPY --- -create table tab_progress_reporting ( - name text, - age int4, - location point, - salary int4, - manager name -); - --- Add a trigger to catch and print the contents of the catalog view --- pg_stat_progress_copy during data insertion. This allows to test --- the validation of some progress reports for COPY FROM where the trigger --- would fire. -create function notice_after_tab_progress_reporting() returns trigger AS -$$ -declare report record; -begin - -- The fields ignored here are the ones that may not remain - -- consistent across multiple runs. The sizes reported may differ - -- across platforms, so just check if these are strictly positive. - with progress_data as ( - select - relid::regclass::text as relname, - command, - type, - bytes_processed > 0 as has_bytes_processed, - bytes_total > 0 as has_bytes_total, - tuples_processed, - tuples_excluded - from pg_stat_progress_copy - where pid = pg_backend_pid()) - select into report (to_jsonb(r)) as value - from progress_data r; - - raise info 'progress: %', report.value::text; - return new; -end; -$$ language plpgsql; - -create trigger check_after_tab_progress_reporting - after insert on tab_progress_reporting - for each statement - execute function notice_after_tab_progress_reporting(); - --- Generate COPY FROM report with PIPE. -copy tab_progress_reporting from stdin; -sharon 25 (15,12) 1000 sam -sam 30 (10,5) 2000 bill -bill 20 (11,10) 1000 sharon -\. - --- Generate COPY FROM report with FILE, with some excluded tuples. -truncate tab_progress_reporting; -\set filename :abs_srcdir '/data/emp.data' -copy tab_progress_reporting from :'filename' - where (salary < 2000); - -drop trigger check_after_tab_progress_reporting on tab_progress_reporting; -drop function notice_after_tab_progress_reporting(); -drop table tab_progress_reporting; diff --git a/src/test/regress/input/create_function_0.source b/src/test/regress/input/create_function_0.source deleted file mode 100644 index c5224742f94..00000000000 --- a/src/test/regress/input/create_function_0.source +++ /dev/null @@ -1,108 +0,0 @@ --- --- CREATE_FUNCTION_0 --- - --- directory path and dlsuffix are passed to us in environment variables -\getenv libdir PG_LIBDIR -\getenv dlsuffix PG_DLSUFFIX - -\set autoinclib :libdir '/autoinc' :dlsuffix -\set refintlib :libdir '/refint' :dlsuffix -\set regresslib :libdir '/regress' :dlsuffix - --- Create a bunch of C functions that will be used by later tests: - -CREATE FUNCTION check_primary_key () - RETURNS trigger - AS :'refintlib' - LANGUAGE C; - -CREATE FUNCTION check_foreign_key () - RETURNS trigger - AS :'refintlib' - LANGUAGE C; - -CREATE FUNCTION autoinc () - RETURNS trigger - AS :'autoinclib' - LANGUAGE C; - -CREATE FUNCTION trigger_return_old () - RETURNS trigger - AS :'regresslib' - LANGUAGE C; - -CREATE FUNCTION ttdummy () - RETURNS trigger - AS :'regresslib' - LANGUAGE C; - -CREATE FUNCTION set_ttdummy (int4) - RETURNS int4 - AS :'regresslib' - LANGUAGE C STRICT; - -CREATE FUNCTION make_tuple_indirect (record) - RETURNS record - AS :'regresslib' - LANGUAGE C STRICT; - -CREATE FUNCTION test_atomic_ops() - RETURNS bool - AS :'regresslib' - LANGUAGE C; - -CREATE FUNCTION test_fdw_handler() - RETURNS fdw_handler - AS :'regresslib', 'test_fdw_handler' - LANGUAGE C; - -CREATE FUNCTION test_support_func(internal) - RETURNS internal - AS :'regresslib', 'test_support_func' - LANGUAGE C STRICT; - -CREATE FUNCTION test_opclass_options_func(internal) - RETURNS void - AS :'regresslib', 'test_opclass_options_func' - LANGUAGE C; - -CREATE FUNCTION test_enc_conversion(bytea, name, name, bool, validlen OUT int, result OUT bytea) - AS :'regresslib', 'test_enc_conversion' - LANGUAGE C STRICT; - -CREATE FUNCTION binary_coercible(oid, oid) - RETURNS bool - AS :'regresslib', 'binary_coercible' - LANGUAGE C STRICT STABLE PARALLEL SAFE; - --- Things that shouldn't work: - -CREATE FUNCTION test1 (int) RETURNS int LANGUAGE SQL - AS 'SELECT ''not an integer'';'; - -CREATE FUNCTION test1 (int) RETURNS int LANGUAGE SQL - AS 'not even SQL'; - -CREATE FUNCTION test1 (int) RETURNS int LANGUAGE SQL - AS 'SELECT 1, 2, 3;'; - -CREATE FUNCTION test1 (int) RETURNS int LANGUAGE SQL - AS 'SELECT $2;'; - -CREATE FUNCTION test1 (int) RETURNS int LANGUAGE SQL - AS 'a', 'b'; - -CREATE FUNCTION test1 (int) RETURNS int LANGUAGE C - AS 'nosuchfile'; - --- To produce stable regression test output, we have to filter the name --- of the regresslib file out of the error message in this test. -\set VERBOSITY sqlstate -CREATE FUNCTION test1 (int) RETURNS int LANGUAGE C - AS :'regresslib', 'nosuchsymbol'; -\set VERBOSITY default -SELECT regexp_replace(:'LAST_ERROR_MESSAGE', 'file ".*"', 'file "..."'); - -CREATE FUNCTION test1 (int) RETURNS int LANGUAGE internal - AS 'nosuch'; diff --git a/src/test/regress/input/create_function_1.source b/src/test/regress/input/create_function_1.source deleted file mode 100644 index 4170b16fe6b..00000000000 --- a/src/test/regress/input/create_function_1.source +++ /dev/null @@ -1,31 +0,0 @@ --- --- CREATE_FUNCTION_1 --- - --- directory path and dlsuffix are passed to us in environment variables -\getenv libdir PG_LIBDIR -\getenv dlsuffix PG_DLSUFFIX - -\set regresslib :libdir '/regress' :dlsuffix - --- Create C functions needed by create_type.sql - -CREATE FUNCTION widget_in(cstring) - RETURNS widget - AS :'regresslib' - LANGUAGE C STRICT IMMUTABLE; - -CREATE FUNCTION widget_out(widget) - RETURNS cstring - AS :'regresslib' - LANGUAGE C STRICT IMMUTABLE; - -CREATE FUNCTION int44in(cstring) - RETURNS city_budget - AS :'regresslib' - LANGUAGE C STRICT IMMUTABLE; - -CREATE FUNCTION int44out(city_budget) - RETURNS cstring - AS :'regresslib' - LANGUAGE C STRICT IMMUTABLE; diff --git a/src/test/regress/input/create_function_2.source b/src/test/regress/input/create_function_2.source deleted file mode 100644 index 67510aed239..00000000000 --- a/src/test/regress/input/create_function_2.source +++ /dev/null @@ -1,96 +0,0 @@ --- --- CREATE_FUNCTION_2 --- - --- directory path and dlsuffix are passed to us in environment variables -\getenv libdir PG_LIBDIR -\getenv dlsuffix PG_DLSUFFIX - -\set regresslib :libdir '/regress' :dlsuffix - - -CREATE FUNCTION hobbies(person) - RETURNS setof hobbies_r - AS 'select * from hobbies_r where person = $1.name' - LANGUAGE SQL; - - -CREATE FUNCTION hobby_construct(text, text) - RETURNS hobbies_r - AS 'select $1 as name, $2 as hobby' - LANGUAGE SQL; - - -CREATE FUNCTION hobby_construct_named(name text, hobby text) - RETURNS hobbies_r - AS 'select name, hobby' - LANGUAGE SQL; - - -CREATE FUNCTION hobbies_by_name(hobbies_r.name%TYPE) - RETURNS hobbies_r.person%TYPE - AS 'select person from hobbies_r where name = $1' - LANGUAGE SQL; - - -CREATE FUNCTION equipment(hobbies_r) - RETURNS setof equipment_r - AS 'select * from equipment_r where hobby = $1.name' - LANGUAGE SQL; - - -CREATE FUNCTION equipment_named(hobby hobbies_r) - RETURNS setof equipment_r - AS 'select * from equipment_r where equipment_r.hobby = equipment_named.hobby.name' - LANGUAGE SQL; - -CREATE FUNCTION equipment_named_ambiguous_1a(hobby hobbies_r) - RETURNS setof equipment_r - AS 'select * from equipment_r where hobby = equipment_named_ambiguous_1a.hobby.name' - LANGUAGE SQL; - -CREATE FUNCTION equipment_named_ambiguous_1b(hobby hobbies_r) - RETURNS setof equipment_r - AS 'select * from equipment_r where equipment_r.hobby = hobby.name' - LANGUAGE SQL; - -CREATE FUNCTION equipment_named_ambiguous_1c(hobby hobbies_r) - RETURNS setof equipment_r - AS 'select * from equipment_r where hobby = hobby.name' - LANGUAGE SQL; - -CREATE FUNCTION equipment_named_ambiguous_2a(hobby text) - RETURNS setof equipment_r - AS 'select * from equipment_r where hobby = equipment_named_ambiguous_2a.hobby' - LANGUAGE SQL; - -CREATE FUNCTION equipment_named_ambiguous_2b(hobby text) - RETURNS setof equipment_r - AS 'select * from equipment_r where equipment_r.hobby = hobby' - LANGUAGE SQL; - - -CREATE FUNCTION pt_in_widget(point, widget) - RETURNS bool - AS :'regresslib' - LANGUAGE C STRICT; - -CREATE FUNCTION overpaid(emp) - RETURNS bool - AS :'regresslib' - LANGUAGE C STRICT; - -CREATE FUNCTION interpt_pp(path, path) - RETURNS point - AS :'regresslib' - LANGUAGE C STRICT; - -CREATE FUNCTION reverse_name(name) - RETURNS name - AS :'regresslib' - LANGUAGE C STRICT; - --- --- Function dynamic loading --- -LOAD :'regresslib'; diff --git a/src/test/regress/input/largeobject.source b/src/test/regress/input/largeobject.source deleted file mode 100644 index 16da077f3a2..00000000000 --- a/src/test/regress/input/largeobject.source +++ /dev/null @@ -1,279 +0,0 @@ --- --- Test large object support --- - --- directory paths are passed to us in environment variables -\getenv abs_srcdir PG_ABS_SRCDIR -\getenv abs_builddir PG_ABS_BUILDDIR - --- ensure consistent test output regardless of the default bytea format -SET bytea_output TO escape; - --- Load a file -CREATE TABLE lotest_stash_values (loid oid, fd integer); --- lo_creat(mode integer) returns oid --- The mode arg to lo_creat is unused, some vestigal holdover from ancient times --- returns the large object id -INSERT INTO lotest_stash_values (loid) SELECT lo_creat(42); - --- Test ALTER LARGE OBJECT -CREATE ROLE regress_lo_user; -DO $$ - BEGIN - EXECUTE 'ALTER LARGE OBJECT ' || (select loid from lotest_stash_values) - || ' OWNER TO regress_lo_user'; - END -$$; -SELECT - rol.rolname -FROM - lotest_stash_values s - JOIN pg_largeobject_metadata lo ON s.loid = lo.oid - JOIN pg_authid rol ON lo.lomowner = rol.oid; - --- NOTE: large objects require transactions -BEGIN; - --- lo_open(lobjId oid, mode integer) returns integer --- The mode parameter to lo_open uses two constants: --- INV_READ = 0x20000 --- INV_WRITE = 0x40000 --- The return value is a file descriptor-like value which remains valid for the --- transaction. -UPDATE lotest_stash_values SET fd = lo_open(loid, CAST(x'20000' | x'40000' AS integer)); - --- loread/lowrite names are wonky, different from other functions which are lo_* --- lowrite(fd integer, data bytea) returns integer --- the integer is the number of bytes written -SELECT lowrite(fd, ' -I wandered lonely as a cloud -That floats on high o''er vales and hills, -When all at once I saw a crowd, -A host, of golden daffodils; -Beside the lake, beneath the trees, -Fluttering and dancing in the breeze. - -Continuous as the stars that shine -And twinkle on the milky way, -They stretched in never-ending line -Along the margin of a bay: -Ten thousand saw I at a glance, -Tossing their heads in sprightly dance. - -The waves beside them danced; but they -Out-did the sparkling waves in glee: -A poet could not but be gay, -In such a jocund company: -I gazed--and gazed--but little thought -What wealth the show to me had brought: - -For oft, when on my couch I lie -In vacant or in pensive mood, -They flash upon that inward eye -Which is the bliss of solitude; -And then my heart with pleasure fills, -And dances with the daffodils. - - -- William Wordsworth -') FROM lotest_stash_values; - --- lo_close(fd integer) returns integer --- return value is 0 for success, or <0 for error (actually only -1, but...) -SELECT lo_close(fd) FROM lotest_stash_values; - -END; - --- Copy to another large object. --- Note: we intentionally don't remove the object created here; --- it's left behind to help test pg_dump. - -SELECT lo_from_bytea(0, lo_get(loid)) AS newloid FROM lotest_stash_values -\gset - --- Add a comment to it, as well, for pg_dump/pg_upgrade testing. -COMMENT ON LARGE OBJECT :newloid IS 'I Wandered Lonely as a Cloud'; - --- Read out a portion -BEGIN; -UPDATE lotest_stash_values SET fd=lo_open(loid, CAST(x'20000' | x'40000' AS integer)); - --- lo_lseek(fd integer, offset integer, whence integer) returns integer --- offset is in bytes, whence is one of three values: --- SEEK_SET (= 0) meaning relative to beginning --- SEEK_CUR (= 1) meaning relative to current position --- SEEK_END (= 2) meaning relative to end (offset better be negative) --- returns current position in file -SELECT lo_lseek(fd, 104, 0) FROM lotest_stash_values; - --- loread/lowrite names are wonky, different from other functions which are lo_* --- loread(fd integer, len integer) returns bytea -SELECT loread(fd, 28) FROM lotest_stash_values; - -SELECT lo_lseek(fd, -19, 1) FROM lotest_stash_values; - -SELECT lowrite(fd, 'n') FROM lotest_stash_values; - -SELECT lo_tell(fd) FROM lotest_stash_values; - -SELECT lo_lseek(fd, -744, 2) FROM lotest_stash_values; - -SELECT loread(fd, 28) FROM lotest_stash_values; - -SELECT lo_close(fd) FROM lotest_stash_values; - -END; - --- Test resource management -BEGIN; -SELECT lo_open(loid, x'40000'::int) from lotest_stash_values; -ABORT; - -\set filename :abs_builddir '/results/invalid/path' -\set dobody 'DECLARE loid oid; BEGIN ' -\set dobody :dobody 'SELECT tbl.loid INTO loid FROM lotest_stash_values tbl; ' -\set dobody :dobody 'PERFORM lo_export(loid, ' :'filename' '); ' -\set dobody :dobody 'EXCEPTION WHEN UNDEFINED_FILE THEN ' -\set dobody :dobody 'RAISE NOTICE ''could not open file, as expected''; END' -DO :'dobody'; - --- Test truncation. -BEGIN; -UPDATE lotest_stash_values SET fd=lo_open(loid, CAST(x'20000' | x'40000' AS integer)); - -SELECT lo_truncate(fd, 11) FROM lotest_stash_values; -SELECT loread(fd, 15) FROM lotest_stash_values; - -SELECT lo_truncate(fd, 10000) FROM lotest_stash_values; -SELECT loread(fd, 10) FROM lotest_stash_values; -SELECT lo_lseek(fd, 0, 2) FROM lotest_stash_values; -SELECT lo_tell(fd) FROM lotest_stash_values; - -SELECT lo_truncate(fd, 5000) FROM lotest_stash_values; -SELECT lo_lseek(fd, 0, 2) FROM lotest_stash_values; -SELECT lo_tell(fd) FROM lotest_stash_values; - -SELECT lo_close(fd) FROM lotest_stash_values; -END; - --- Test 64-bit large object functions. -BEGIN; -UPDATE lotest_stash_values SET fd = lo_open(loid, CAST(x'20000' | x'40000' AS integer)); - -SELECT lo_lseek64(fd, 4294967296, 0) FROM lotest_stash_values; -SELECT lowrite(fd, 'offset:4GB') FROM lotest_stash_values; -SELECT lo_tell64(fd) FROM lotest_stash_values; - -SELECT lo_lseek64(fd, -10, 1) FROM lotest_stash_values; -SELECT lo_tell64(fd) FROM lotest_stash_values; -SELECT loread(fd, 10) FROM lotest_stash_values; - -SELECT lo_truncate64(fd, 5000000000) FROM lotest_stash_values; -SELECT lo_lseek64(fd, 0, 2) FROM lotest_stash_values; -SELECT lo_tell64(fd) FROM lotest_stash_values; - -SELECT lo_truncate64(fd, 3000000000) FROM lotest_stash_values; -SELECT lo_lseek64(fd, 0, 2) FROM lotest_stash_values; -SELECT lo_tell64(fd) FROM lotest_stash_values; - -SELECT lo_close(fd) FROM lotest_stash_values; -END; - --- lo_unlink(lobjId oid) returns integer --- return value appears to always be 1 -SELECT lo_unlink(loid) from lotest_stash_values; - -TRUNCATE lotest_stash_values; - -\set filename :abs_srcdir '/data/tenk.data' -INSERT INTO lotest_stash_values (loid) SELECT lo_import(:'filename'); - -BEGIN; -UPDATE lotest_stash_values SET fd=lo_open(loid, CAST(x'20000' | x'40000' AS integer)); - --- verify length of large object -SELECT lo_lseek(fd, 0, 2) FROM lotest_stash_values; - --- with the default BLCKSZ, LOBLKSIZE = 2048, so this positions us for a block --- edge case -SELECT lo_lseek(fd, 2030, 0) FROM lotest_stash_values; - --- this should get half of the value from page 0 and half from page 1 of the --- large object -SELECT loread(fd, 36) FROM lotest_stash_values; - -SELECT lo_tell(fd) FROM lotest_stash_values; - -SELECT lo_lseek(fd, -26, 1) FROM lotest_stash_values; - -SELECT lowrite(fd, 'abcdefghijklmnop') FROM lotest_stash_values; - -SELECT lo_lseek(fd, 2030, 0) FROM lotest_stash_values; - -SELECT loread(fd, 36) FROM lotest_stash_values; - -SELECT lo_close(fd) FROM lotest_stash_values; -END; - -\set filename :abs_builddir '/results/lotest.txt' -SELECT lo_export(loid, :'filename') FROM lotest_stash_values; - -\lo_import :filename - -\set newloid :LASTOID - --- just make sure \lo_export does not barf -\set filename :abs_builddir '/results/lotest2.txt' -\lo_export :newloid :filename - --- This is a hack to test that export/import are reversible --- This uses knowledge about the inner workings of large object mechanism --- which should not be used outside it. This makes it a HACK -SELECT pageno, data FROM pg_largeobject WHERE loid = (SELECT loid from lotest_stash_values) -EXCEPT -SELECT pageno, data FROM pg_largeobject WHERE loid = :newloid; - -SELECT lo_unlink(loid) FROM lotest_stash_values; - -TRUNCATE lotest_stash_values; - -\lo_unlink :newloid - -\set filename :abs_builddir '/results/lotest.txt' -\lo_import :filename - -\set newloid_1 :LASTOID - -SELECT lo_from_bytea(0, lo_get(:newloid_1)) AS newloid_2 -\gset - -SELECT md5(lo_get(:newloid_1)) = md5(lo_get(:newloid_2)); - -SELECT lo_get(:newloid_1, 0, 20); -SELECT lo_get(:newloid_1, 10, 20); -SELECT lo_put(:newloid_1, 5, decode('afafafaf', 'hex')); -SELECT lo_get(:newloid_1, 0, 20); - -SELECT lo_put(:newloid_1, 4294967310, 'foo'); -SELECT lo_get(:newloid_1); -SELECT lo_get(:newloid_1, 4294967294, 100); - -\lo_unlink :newloid_1 -\lo_unlink :newloid_2 - --- This object is left in the database for pg_dump test purposes -SELECT lo_from_bytea(0, E'\\xdeadbeef') AS newloid -\gset - -SET bytea_output TO hex; -SELECT lo_get(:newloid); - --- Create one more object that we leave behind for testing pg_dump/pg_upgrade; --- this one intentionally has an OID in the system range -SELECT lo_create(2121); - -COMMENT ON LARGE OBJECT 2121 IS 'testing comments'; - --- Clean up -DROP TABLE lotest_stash_values; - -DROP ROLE regress_lo_user; diff --git a/src/test/regress/input/misc.source b/src/test/regress/input/misc.source deleted file mode 100644 index a1e2f779ba7..00000000000 --- a/src/test/regress/input/misc.source +++ /dev/null @@ -1,268 +0,0 @@ --- --- MISC --- - --- directory paths are passed to us in environment variables -\getenv abs_srcdir PG_ABS_SRCDIR -\getenv abs_builddir PG_ABS_BUILDDIR - --- --- BTREE --- -UPDATE onek - SET unique1 = onek.unique1 + 1; - -UPDATE onek - SET unique1 = onek.unique1 - 1; - --- --- BTREE partial --- --- UPDATE onek2 --- SET unique1 = onek2.unique1 + 1; - ---UPDATE onek2 --- SET unique1 = onek2.unique1 - 1; - --- --- BTREE shutting out non-functional updates --- --- the following two tests seem to take a long time on some --- systems. This non-func update stuff needs to be examined --- more closely. - jolly (2/22/96) --- -UPDATE tmp - SET stringu1 = reverse_name(onek.stringu1) - FROM onek - WHERE onek.stringu1 = 'JBAAAA' and - onek.stringu1 = tmp.stringu1; - -UPDATE tmp - SET stringu1 = reverse_name(onek2.stringu1) - FROM onek2 - WHERE onek2.stringu1 = 'JCAAAA' and - onek2.stringu1 = tmp.stringu1; - -DROP TABLE tmp; - ---UPDATE person* --- SET age = age + 1; - ---UPDATE person* --- SET age = age + 3 --- WHERE name = 'linda'; - --- --- copy --- -\set filename :abs_builddir '/results/onek.data' -COPY onek TO :'filename'; - -DELETE FROM onek; - -COPY onek FROM :'filename'; - -SELECT unique1 FROM onek WHERE unique1 < 2 ORDER BY unique1; - -DELETE FROM onek2; - -COPY onek2 FROM :'filename'; - -SELECT unique1 FROM onek2 WHERE unique1 < 2 ORDER BY unique1; - -\set filename :abs_builddir '/results/stud_emp.data' -COPY BINARY stud_emp TO :'filename'; - -DELETE FROM stud_emp; - -COPY BINARY stud_emp FROM :'filename'; - -SELECT * FROM stud_emp; - --- COPY aggtest FROM stdin; --- 56 7.8 --- 100 99.097 --- 0 0.09561 --- 42 324.78 --- . --- COPY aggtest TO stdout; - - --- --- inheritance stress test --- -SELECT * FROM a_star*; - -SELECT * - FROM b_star* x - WHERE x.b = text 'bumble' or x.a < 3; - -SELECT class, a - FROM c_star* x - WHERE x.c ~ text 'hi'; - -SELECT class, b, c - FROM d_star* x - WHERE x.a < 100; - -SELECT class, c FROM e_star* x WHERE x.c NOTNULL; - -SELECT * FROM f_star* x WHERE x.c ISNULL; - --- grouping and aggregation on inherited sets have been busted in the past... - -SELECT sum(a) FROM a_star*; - -SELECT class, sum(a) FROM a_star* GROUP BY class ORDER BY class; - - -ALTER TABLE f_star RENAME COLUMN f TO ff; - -ALTER TABLE e_star* RENAME COLUMN e TO ee; - -ALTER TABLE d_star* RENAME COLUMN d TO dd; - -ALTER TABLE c_star* RENAME COLUMN c TO cc; - -ALTER TABLE b_star* RENAME COLUMN b TO bb; - -ALTER TABLE a_star* RENAME COLUMN a TO aa; - -SELECT class, aa - FROM a_star* x - WHERE aa ISNULL; - --- As of Postgres 7.1, ALTER implicitly recurses, --- so this should be same as ALTER a_star* - -ALTER TABLE a_star RENAME COLUMN aa TO foo; - -SELECT class, foo - FROM a_star* x - WHERE x.foo >= 2; - -ALTER TABLE a_star RENAME COLUMN foo TO aa; - -SELECT * - from a_star* - WHERE aa < 1000; - -ALTER TABLE f_star ADD COLUMN f int4; - -UPDATE f_star SET f = 10; - -ALTER TABLE e_star* ADD COLUMN e int4; - ---UPDATE e_star* SET e = 42; - -SELECT * FROM e_star*; - -ALTER TABLE a_star* ADD COLUMN a text; - --- That ALTER TABLE should have added TOAST tables. -SELECT relname, reltoastrelid <> 0 AS has_toast_table - FROM pg_class - WHERE oid::regclass IN ('a_star', 'c_star') - ORDER BY 1; - ---UPDATE b_star* --- SET a = text 'gazpacho' --- WHERE aa > 4; - -SELECT class, aa, a FROM a_star*; - - --- --- versions --- - --- --- postquel functions --- --- --- mike does post_hacking, --- joe and sally play basketball, and --- everyone else does nothing. --- -SELECT p.name, name(p.hobbies) FROM ONLY person p; - --- --- as above, but jeff also does post_hacking. --- -SELECT p.name, name(p.hobbies) FROM person* p; - --- --- the next two queries demonstrate how functions generate bogus duplicates. --- this is a "feature" .. --- -SELECT DISTINCT hobbies_r.name, name(hobbies_r.equipment) FROM hobbies_r - ORDER BY 1,2; - -SELECT hobbies_r.name, (hobbies_r.equipment).name FROM hobbies_r; - --- --- mike needs advil and peet's coffee, --- joe and sally need hightops, and --- everyone else is fine. --- -SELECT p.name, name(p.hobbies), name(equipment(p.hobbies)) FROM ONLY person p; - --- --- as above, but jeff needs advil and peet's coffee as well. --- -SELECT p.name, name(p.hobbies), name(equipment(p.hobbies)) FROM person* p; - --- --- just like the last two, but make sure that the target list fixup and --- unflattening is being done correctly. --- -SELECT name(equipment(p.hobbies)), p.name, name(p.hobbies) FROM ONLY person p; - -SELECT (p.hobbies).equipment.name, p.name, name(p.hobbies) FROM person* p; - -SELECT (p.hobbies).equipment.name, name(p.hobbies), p.name FROM ONLY person p; - -SELECT name(equipment(p.hobbies)), name(p.hobbies), p.name FROM person* p; - -SELECT name(equipment(hobby_construct(text 'skywalking', text 'mer'))); - -SELECT name(equipment(hobby_construct_named(text 'skywalking', text 'mer'))); - -SELECT name(equipment_named(hobby_construct_named(text 'skywalking', text 'mer'))); - -SELECT name(equipment_named_ambiguous_1a(hobby_construct_named(text 'skywalking', text 'mer'))); - -SELECT name(equipment_named_ambiguous_1b(hobby_construct_named(text 'skywalking', text 'mer'))); - -SELECT name(equipment_named_ambiguous_1c(hobby_construct_named(text 'skywalking', text 'mer'))); - -SELECT name(equipment_named_ambiguous_2a(text 'skywalking')); - -SELECT name(equipment_named_ambiguous_2b(text 'skywalking')); - -SELECT hobbies_by_name('basketball'); - -SELECT name, overpaid(emp.*) FROM emp; - --- --- Try a few cases with SQL-spec row constructor expressions --- -SELECT * FROM equipment(ROW('skywalking', 'mer')); - -SELECT name(equipment(ROW('skywalking', 'mer'))); - -SELECT *, name(equipment(h.*)) FROM hobbies_r h; - -SELECT *, (equipment(CAST((h.*) AS hobbies_r))).name FROM hobbies_r h; - --- --- functional joins --- - --- --- instance rules --- - --- --- rewrite rules --- diff --git a/src/test/regress/input/tablespace.source b/src/test/regress/input/tablespace.source deleted file mode 100644 index 92076db9a13..00000000000 --- a/src/test/regress/input/tablespace.source +++ /dev/null @@ -1,416 +0,0 @@ --- directory paths are passed to us in environment variables -\getenv abs_builddir PG_ABS_BUILDDIR - -\set testtablespace :abs_builddir '/testtablespace' - --- create a tablespace using WITH clause -CREATE TABLESPACE regress_tblspacewith LOCATION :'testtablespace' WITH (some_nonexistent_parameter = true); -- fail -CREATE TABLESPACE regress_tblspacewith LOCATION :'testtablespace' WITH (random_page_cost = 3.0); -- ok - --- check to see the parameter was used -SELECT spcoptions FROM pg_tablespace WHERE spcname = 'regress_tblspacewith'; - --- drop the tablespace so we can re-use the location -DROP TABLESPACE regress_tblspacewith; - --- create a tablespace we can use -CREATE TABLESPACE regress_tblspace LOCATION :'testtablespace'; - --- try setting and resetting some properties for the new tablespace -ALTER TABLESPACE regress_tblspace SET (random_page_cost = 1.0, seq_page_cost = 1.1); -ALTER TABLESPACE regress_tblspace SET (some_nonexistent_parameter = true); -- fail -ALTER TABLESPACE regress_tblspace RESET (random_page_cost = 2.0); -- fail -ALTER TABLESPACE regress_tblspace RESET (random_page_cost, effective_io_concurrency); -- ok - --- REINDEX (TABLESPACE) --- catalogs and system tablespaces --- system catalog, fail -REINDEX (TABLESPACE regress_tblspace) TABLE pg_am; -REINDEX (TABLESPACE regress_tblspace) TABLE CONCURRENTLY pg_am; --- shared catalog, fail -REINDEX (TABLESPACE regress_tblspace) TABLE pg_authid; -REINDEX (TABLESPACE regress_tblspace) TABLE CONCURRENTLY pg_authid; --- toast relations, fail -REINDEX (TABLESPACE regress_tblspace) INDEX pg_toast.pg_toast_1260_index; -REINDEX (TABLESPACE regress_tblspace) INDEX CONCURRENTLY pg_toast.pg_toast_1260_index; -REINDEX (TABLESPACE regress_tblspace) TABLE pg_toast.pg_toast_1260; -REINDEX (TABLESPACE regress_tblspace) TABLE CONCURRENTLY pg_toast.pg_toast_1260; --- system catalog, fail -REINDEX (TABLESPACE pg_global) TABLE pg_authid; -REINDEX (TABLESPACE pg_global) TABLE CONCURRENTLY pg_authid; - --- table with toast relation -CREATE TABLE regress_tblspace_test_tbl (num1 bigint, num2 double precision, t text); -INSERT INTO regress_tblspace_test_tbl (num1, num2, t) - SELECT round(random()*100), random(), 'text' - FROM generate_series(1, 10) s(i); -CREATE INDEX regress_tblspace_test_tbl_idx ON regress_tblspace_test_tbl (num1); --- move to global tablespace, fail -REINDEX (TABLESPACE pg_global) INDEX regress_tblspace_test_tbl_idx; -REINDEX (TABLESPACE pg_global) INDEX CONCURRENTLY regress_tblspace_test_tbl_idx; - --- check transactional behavior of REINDEX (TABLESPACE) -BEGIN; -REINDEX (TABLESPACE regress_tblspace) INDEX regress_tblspace_test_tbl_idx; -REINDEX (TABLESPACE regress_tblspace) TABLE regress_tblspace_test_tbl; -ROLLBACK; --- no relation moved to the new tablespace -SELECT c.relname FROM pg_class c, pg_tablespace s - WHERE c.reltablespace = s.oid AND s.spcname = 'regress_tblspace'; - --- check that all indexes are moved to a new tablespace with different --- relfilenode. --- Save first the existing relfilenode for the toast and main relations. -SELECT relfilenode as main_filenode FROM pg_class - WHERE relname = 'regress_tblspace_test_tbl_idx' \gset -SELECT relfilenode as toast_filenode FROM pg_class - WHERE oid = - (SELECT i.indexrelid - FROM pg_class c, - pg_index i - WHERE i.indrelid = c.reltoastrelid AND - c.relname = 'regress_tblspace_test_tbl') \gset -REINDEX (TABLESPACE regress_tblspace) TABLE regress_tblspace_test_tbl; -SELECT c.relname FROM pg_class c, pg_tablespace s - WHERE c.reltablespace = s.oid AND s.spcname = 'regress_tblspace' - ORDER BY c.relname; -ALTER TABLE regress_tblspace_test_tbl SET TABLESPACE regress_tblspace; -ALTER TABLE regress_tblspace_test_tbl SET TABLESPACE pg_default; -SELECT c.relname FROM pg_class c, pg_tablespace s - WHERE c.reltablespace = s.oid AND s.spcname = 'regress_tblspace' - ORDER BY c.relname; --- Move back to the default tablespace. -ALTER INDEX regress_tblspace_test_tbl_idx SET TABLESPACE pg_default; -SELECT c.relname FROM pg_class c, pg_tablespace s - WHERE c.reltablespace = s.oid AND s.spcname = 'regress_tblspace' - ORDER BY c.relname; -REINDEX (TABLESPACE regress_tblspace, CONCURRENTLY) TABLE regress_tblspace_test_tbl; -SELECT c.relname FROM pg_class c, pg_tablespace s - WHERE c.reltablespace = s.oid AND s.spcname = 'regress_tblspace' - ORDER BY c.relname; -SELECT relfilenode = :main_filenode AS main_same FROM pg_class - WHERE relname = 'regress_tblspace_test_tbl_idx'; -SELECT relfilenode = :toast_filenode as toast_same FROM pg_class - WHERE oid = - (SELECT i.indexrelid - FROM pg_class c, - pg_index i - WHERE i.indrelid = c.reltoastrelid AND - c.relname = 'regress_tblspace_test_tbl'); -DROP TABLE regress_tblspace_test_tbl; - --- REINDEX (TABLESPACE) with partitions --- Create a partition tree and check the set of relations reindexed --- with their new tablespace. -CREATE TABLE tbspace_reindex_part (c1 int, c2 int) PARTITION BY RANGE (c1); -CREATE TABLE tbspace_reindex_part_0 PARTITION OF tbspace_reindex_part - FOR VALUES FROM (0) TO (10) PARTITION BY list (c2); -CREATE TABLE tbspace_reindex_part_0_1 PARTITION OF tbspace_reindex_part_0 - FOR VALUES IN (1); -CREATE TABLE tbspace_reindex_part_0_2 PARTITION OF tbspace_reindex_part_0 - FOR VALUES IN (2); --- This partitioned table will have no partitions. -CREATE TABLE tbspace_reindex_part_10 PARTITION OF tbspace_reindex_part - FOR VALUES FROM (10) TO (20) PARTITION BY list (c2); --- Create some partitioned indexes -CREATE INDEX tbspace_reindex_part_index ON ONLY tbspace_reindex_part (c1); -CREATE INDEX tbspace_reindex_part_index_0 ON ONLY tbspace_reindex_part_0 (c1); -ALTER INDEX tbspace_reindex_part_index ATTACH PARTITION tbspace_reindex_part_index_0; --- This partitioned index will have no partitions. -CREATE INDEX tbspace_reindex_part_index_10 ON ONLY tbspace_reindex_part_10 (c1); -ALTER INDEX tbspace_reindex_part_index ATTACH PARTITION tbspace_reindex_part_index_10; -CREATE INDEX tbspace_reindex_part_index_0_1 ON ONLY tbspace_reindex_part_0_1 (c1); -ALTER INDEX tbspace_reindex_part_index_0 ATTACH PARTITION tbspace_reindex_part_index_0_1; -CREATE INDEX tbspace_reindex_part_index_0_2 ON ONLY tbspace_reindex_part_0_2 (c1); -ALTER INDEX tbspace_reindex_part_index_0 ATTACH PARTITION tbspace_reindex_part_index_0_2; -SELECT relid, parentrelid, level FROM pg_partition_tree('tbspace_reindex_part_index') - ORDER BY relid, level; --- Track the original tablespace, relfilenode and OID of each index --- in the tree. -CREATE TEMP TABLE reindex_temp_before AS - SELECT oid, relname, relfilenode, reltablespace - FROM pg_class - WHERE relname ~ 'tbspace_reindex_part_index'; -REINDEX (TABLESPACE regress_tblspace, CONCURRENTLY) TABLE tbspace_reindex_part; --- REINDEX CONCURRENTLY changes the OID of the old relation, hence a check --- based on the relation name below. -SELECT b.relname, - CASE WHEN a.relfilenode = b.relfilenode THEN 'relfilenode is unchanged' - ELSE 'relfilenode has changed' END AS filenode, - CASE WHEN a.reltablespace = b.reltablespace THEN 'reltablespace is unchanged' - ELSE 'reltablespace has changed' END AS tbspace - FROM reindex_temp_before b JOIN pg_class a ON b.relname = a.relname - ORDER BY 1; -DROP TABLE tbspace_reindex_part; - --- create a schema we can use -CREATE SCHEMA testschema; - --- try a table -CREATE TABLE testschema.foo (i int) TABLESPACE regress_tblspace; -SELECT relname, spcname FROM pg_catalog.pg_tablespace t, pg_catalog.pg_class c - where c.reltablespace = t.oid AND c.relname = 'foo'; - -INSERT INTO testschema.foo VALUES(1); -INSERT INTO testschema.foo VALUES(2); - --- tables from dynamic sources -CREATE TABLE testschema.asselect TABLESPACE regress_tblspace AS SELECT 1; -SELECT relname, spcname FROM pg_catalog.pg_tablespace t, pg_catalog.pg_class c - where c.reltablespace = t.oid AND c.relname = 'asselect'; - -PREPARE selectsource(int) AS SELECT $1; -CREATE TABLE testschema.asexecute TABLESPACE regress_tblspace - AS EXECUTE selectsource(2); -SELECT relname, spcname FROM pg_catalog.pg_tablespace t, pg_catalog.pg_class c - where c.reltablespace = t.oid AND c.relname = 'asexecute'; - --- index -CREATE INDEX foo_idx on testschema.foo(i) TABLESPACE regress_tblspace; -SELECT relname, spcname FROM pg_catalog.pg_tablespace t, pg_catalog.pg_class c - where c.reltablespace = t.oid AND c.relname = 'foo_idx'; - --- check \d output -\d testschema.foo -\d testschema.foo_idx - --- --- partitioned table --- -CREATE TABLE testschema.part (a int) PARTITION BY LIST (a); -SET default_tablespace TO pg_global; -CREATE TABLE testschema.part_1 PARTITION OF testschema.part FOR VALUES IN (1); -RESET default_tablespace; -CREATE TABLE testschema.part_1 PARTITION OF testschema.part FOR VALUES IN (1); -SET default_tablespace TO regress_tblspace; -CREATE TABLE testschema.part_2 PARTITION OF testschema.part FOR VALUES IN (2); -SET default_tablespace TO pg_global; -CREATE TABLE testschema.part_3 PARTITION OF testschema.part FOR VALUES IN (3); -ALTER TABLE testschema.part SET TABLESPACE regress_tblspace; -CREATE TABLE testschema.part_3 PARTITION OF testschema.part FOR VALUES IN (3); -CREATE TABLE testschema.part_4 PARTITION OF testschema.part FOR VALUES IN (4) - TABLESPACE pg_default; -CREATE TABLE testschema.part_56 PARTITION OF testschema.part FOR VALUES IN (5, 6) - PARTITION BY LIST (a); -ALTER TABLE testschema.part SET TABLESPACE pg_default; -CREATE TABLE testschema.part_78 PARTITION OF testschema.part FOR VALUES IN (7, 8) - PARTITION BY LIST (a); -CREATE TABLE testschema.part_910 PARTITION OF testschema.part FOR VALUES IN (9, 10) - PARTITION BY LIST (a) TABLESPACE regress_tblspace; -RESET default_tablespace; -CREATE TABLE testschema.part_78 PARTITION OF testschema.part FOR VALUES IN (7, 8) - PARTITION BY LIST (a); - -SELECT relname, spcname FROM pg_catalog.pg_class c - JOIN pg_catalog.pg_namespace n ON (c.relnamespace = n.oid) - LEFT JOIN pg_catalog.pg_tablespace t ON c.reltablespace = t.oid - where c.relname LIKE 'part%' AND n.nspname = 'testschema' order by relname; -RESET default_tablespace; -DROP TABLE testschema.part; - --- partitioned index -CREATE TABLE testschema.part (a int) PARTITION BY LIST (a); -CREATE TABLE testschema.part1 PARTITION OF testschema.part FOR VALUES IN (1); -CREATE INDEX part_a_idx ON testschema.part (a) TABLESPACE regress_tblspace; -CREATE TABLE testschema.part2 PARTITION OF testschema.part FOR VALUES IN (2); -SELECT relname, spcname FROM pg_catalog.pg_tablespace t, pg_catalog.pg_class c - where c.reltablespace = t.oid AND c.relname LIKE 'part%_idx'; -\d testschema.part -\d+ testschema.part -\d testschema.part1 -\d+ testschema.part1 -\d testschema.part_a_idx -\d+ testschema.part_a_idx - --- partitioned rels cannot specify the default tablespace. These fail: -CREATE TABLE testschema.dflt (a int PRIMARY KEY) PARTITION BY LIST (a) TABLESPACE pg_default; -CREATE TABLE testschema.dflt (a int PRIMARY KEY USING INDEX TABLESPACE pg_default) PARTITION BY LIST (a); -SET default_tablespace TO 'pg_default'; -CREATE TABLE testschema.dflt (a int PRIMARY KEY) PARTITION BY LIST (a) TABLESPACE regress_tblspace; -CREATE TABLE testschema.dflt (a int PRIMARY KEY USING INDEX TABLESPACE regress_tblspace) PARTITION BY LIST (a); --- but these work: -CREATE TABLE testschema.dflt (a int PRIMARY KEY USING INDEX TABLESPACE regress_tblspace) PARTITION BY LIST (a) TABLESPACE regress_tblspace; -SET default_tablespace TO ''; -CREATE TABLE testschema.dflt2 (a int PRIMARY KEY) PARTITION BY LIST (a); -DROP TABLE testschema.dflt, testschema.dflt2; - --- check that default_tablespace doesn't affect ALTER TABLE index rebuilds -CREATE TABLE testschema.test_default_tab(id bigint) TABLESPACE regress_tblspace; -INSERT INTO testschema.test_default_tab VALUES (1); -CREATE INDEX test_index1 on testschema.test_default_tab (id); -CREATE INDEX test_index2 on testschema.test_default_tab (id) TABLESPACE regress_tblspace; -ALTER TABLE testschema.test_default_tab ADD CONSTRAINT test_index3 PRIMARY KEY (id); -ALTER TABLE testschema.test_default_tab ADD CONSTRAINT test_index4 UNIQUE (id) USING INDEX TABLESPACE regress_tblspace; - -\d testschema.test_index1 -\d testschema.test_index2 -\d testschema.test_index3 -\d testschema.test_index4 --- use a custom tablespace for default_tablespace -SET default_tablespace TO regress_tblspace; --- tablespace should not change if no rewrite -ALTER TABLE testschema.test_default_tab ALTER id TYPE bigint; -\d testschema.test_index1 -\d testschema.test_index2 -\d testschema.test_index3 -\d testschema.test_index4 -SELECT * FROM testschema.test_default_tab; --- tablespace should not change even if there is an index rewrite -ALTER TABLE testschema.test_default_tab ALTER id TYPE int; -\d testschema.test_index1 -\d testschema.test_index2 -\d testschema.test_index3 -\d testschema.test_index4 -SELECT * FROM testschema.test_default_tab; --- now use the default tablespace for default_tablespace -SET default_tablespace TO ''; --- tablespace should not change if no rewrite -ALTER TABLE testschema.test_default_tab ALTER id TYPE int; -\d testschema.test_index1 -\d testschema.test_index2 -\d testschema.test_index3 -\d testschema.test_index4 --- tablespace should not change even if there is an index rewrite -ALTER TABLE testschema.test_default_tab ALTER id TYPE bigint; -\d testschema.test_index1 -\d testschema.test_index2 -\d testschema.test_index3 -\d testschema.test_index4 -DROP TABLE testschema.test_default_tab; - --- check that default_tablespace doesn't affect ALTER TABLE index rebuilds --- (this time with a partitioned table) -CREATE TABLE testschema.test_default_tab_p(id bigint, val bigint) - PARTITION BY LIST (id) TABLESPACE regress_tblspace; -CREATE TABLE testschema.test_default_tab_p1 PARTITION OF testschema.test_default_tab_p - FOR VALUES IN (1); -INSERT INTO testschema.test_default_tab_p VALUES (1); -CREATE INDEX test_index1 on testschema.test_default_tab_p (val); -CREATE INDEX test_index2 on testschema.test_default_tab_p (val) TABLESPACE regress_tblspace; -ALTER TABLE testschema.test_default_tab_p ADD CONSTRAINT test_index3 PRIMARY KEY (id); -ALTER TABLE testschema.test_default_tab_p ADD CONSTRAINT test_index4 UNIQUE (id) USING INDEX TABLESPACE regress_tblspace; - -\d testschema.test_index1 -\d testschema.test_index2 -\d testschema.test_index3 -\d testschema.test_index4 --- use a custom tablespace for default_tablespace -SET default_tablespace TO regress_tblspace; --- tablespace should not change if no rewrite -ALTER TABLE testschema.test_default_tab_p ALTER val TYPE bigint; -\d testschema.test_index1 -\d testschema.test_index2 -\d testschema.test_index3 -\d testschema.test_index4 -SELECT * FROM testschema.test_default_tab_p; --- tablespace should not change even if there is an index rewrite -ALTER TABLE testschema.test_default_tab_p ALTER val TYPE int; -\d testschema.test_index1 -\d testschema.test_index2 -\d testschema.test_index3 -\d testschema.test_index4 -SELECT * FROM testschema.test_default_tab_p; --- now use the default tablespace for default_tablespace -SET default_tablespace TO ''; --- tablespace should not change if no rewrite -ALTER TABLE testschema.test_default_tab_p ALTER val TYPE int; -\d testschema.test_index1 -\d testschema.test_index2 -\d testschema.test_index3 -\d testschema.test_index4 --- tablespace should not change even if there is an index rewrite -ALTER TABLE testschema.test_default_tab_p ALTER val TYPE bigint; -\d testschema.test_index1 -\d testschema.test_index2 -\d testschema.test_index3 -\d testschema.test_index4 -DROP TABLE testschema.test_default_tab_p; - --- check that default_tablespace affects index additions in ALTER TABLE -CREATE TABLE testschema.test_tab(id int) TABLESPACE regress_tblspace; -INSERT INTO testschema.test_tab VALUES (1); -SET default_tablespace TO regress_tblspace; -ALTER TABLE testschema.test_tab ADD CONSTRAINT test_tab_unique UNIQUE (id); -SET default_tablespace TO ''; -ALTER TABLE testschema.test_tab ADD CONSTRAINT test_tab_pkey PRIMARY KEY (id); -\d testschema.test_tab_unique -\d testschema.test_tab_pkey -SELECT * FROM testschema.test_tab; -DROP TABLE testschema.test_tab; - --- check that default_tablespace is handled correctly by multi-command --- ALTER TABLE that includes a tablespace-preserving rewrite -CREATE TABLE testschema.test_tab(a int, b int, c int); -SET default_tablespace TO regress_tblspace; -ALTER TABLE testschema.test_tab ADD CONSTRAINT test_tab_unique UNIQUE (a); -CREATE INDEX test_tab_a_idx ON testschema.test_tab (a); -SET default_tablespace TO ''; -CREATE INDEX test_tab_b_idx ON testschema.test_tab (b); -\d testschema.test_tab_unique -\d testschema.test_tab_a_idx -\d testschema.test_tab_b_idx -ALTER TABLE testschema.test_tab ALTER b TYPE bigint, ADD UNIQUE (c); -\d testschema.test_tab_unique -\d testschema.test_tab_a_idx -\d testschema.test_tab_b_idx -DROP TABLE testschema.test_tab; - --- let's try moving a table from one place to another -CREATE TABLE testschema.atable AS VALUES (1), (2); -CREATE UNIQUE INDEX anindex ON testschema.atable(column1); - -ALTER TABLE testschema.atable SET TABLESPACE regress_tblspace; -ALTER INDEX testschema.anindex SET TABLESPACE regress_tblspace; -ALTER INDEX testschema.part_a_idx SET TABLESPACE pg_global; -ALTER INDEX testschema.part_a_idx SET TABLESPACE pg_default; -ALTER INDEX testschema.part_a_idx SET TABLESPACE regress_tblspace; - -INSERT INTO testschema.atable VALUES(3); -- ok -INSERT INTO testschema.atable VALUES(1); -- fail (checks index) -SELECT COUNT(*) FROM testschema.atable; -- checks heap - --- Will fail with bad path -CREATE TABLESPACE regress_badspace LOCATION '/no/such/location'; - --- No such tablespace -CREATE TABLE bar (i int) TABLESPACE regress_nosuchspace; - --- Fail, in use for some partitioned object -DROP TABLESPACE regress_tblspace; -ALTER INDEX testschema.part_a_idx SET TABLESPACE pg_default; --- Fail, not empty -DROP TABLESPACE regress_tblspace; - -CREATE ROLE regress_tablespace_user1 login; -CREATE ROLE regress_tablespace_user2 login; -GRANT USAGE ON SCHEMA testschema TO regress_tablespace_user2; - -ALTER TABLESPACE regress_tblspace OWNER TO regress_tablespace_user1; - -CREATE TABLE testschema.tablespace_acl (c int); --- new owner lacks permission to create this index from scratch -CREATE INDEX k ON testschema.tablespace_acl (c) TABLESPACE regress_tblspace; -ALTER TABLE testschema.tablespace_acl OWNER TO regress_tablespace_user2; - -SET SESSION ROLE regress_tablespace_user2; -CREATE TABLE tablespace_table (i int) TABLESPACE regress_tblspace; -- fail -ALTER TABLE testschema.tablespace_acl ALTER c TYPE bigint; -REINDEX (TABLESPACE regress_tblspace) TABLE tablespace_table; -- fail -REINDEX (TABLESPACE regress_tblspace, CONCURRENTLY) TABLE tablespace_table; -- fail -RESET ROLE; - -ALTER TABLESPACE regress_tblspace RENAME TO regress_tblspace_renamed; - -ALTER TABLE ALL IN TABLESPACE regress_tblspace_renamed SET TABLESPACE pg_default; -ALTER INDEX ALL IN TABLESPACE regress_tblspace_renamed SET TABLESPACE pg_default; - --- Should show notice that nothing was done -ALTER TABLE ALL IN TABLESPACE regress_tblspace_renamed SET TABLESPACE pg_default; - --- Should succeed -DROP TABLESPACE regress_tblspace_renamed; - -DROP SCHEMA testschema CASCADE; - -DROP ROLE regress_tablespace_user1; -DROP ROLE regress_tablespace_user2; diff --git a/src/test/regress/output/constraints.source b/src/test/regress/output/constraints.source deleted file mode 100644 index e32cf8bb574..00000000000 --- a/src/test/regress/output/constraints.source +++ /dev/null @@ -1,742 +0,0 @@ --- --- CONSTRAINTS --- Constraints can be specified with: --- - DEFAULT clause --- - CHECK clauses --- - PRIMARY KEY clauses --- - UNIQUE clauses --- - EXCLUDE clauses --- --- directory paths are passed to us in environment variables -\getenv abs_srcdir PG_ABS_SRCDIR --- --- DEFAULT syntax --- -CREATE TABLE DEFAULT_TBL (i int DEFAULT 100, - x text DEFAULT 'vadim', f float8 DEFAULT 123.456); -INSERT INTO DEFAULT_TBL VALUES (1, 'thomas', 57.0613); -INSERT INTO DEFAULT_TBL VALUES (1, 'bruce'); -INSERT INTO DEFAULT_TBL (i, f) VALUES (2, 987.654); -INSERT INTO DEFAULT_TBL (x) VALUES ('marc'); -INSERT INTO DEFAULT_TBL VALUES (3, null, 1.0); -SELECT * FROM DEFAULT_TBL; - i | x | f ------+--------+--------- - 1 | thomas | 57.0613 - 1 | bruce | 123.456 - 2 | vadim | 987.654 - 100 | marc | 123.456 - 3 | | 1 -(5 rows) - -CREATE SEQUENCE DEFAULT_SEQ; -CREATE TABLE DEFAULTEXPR_TBL (i1 int DEFAULT 100 + (200-199) * 2, - i2 int DEFAULT nextval('default_seq')); -INSERT INTO DEFAULTEXPR_TBL VALUES (-1, -2); -INSERT INTO DEFAULTEXPR_TBL (i1) VALUES (-3); -INSERT INTO DEFAULTEXPR_TBL (i2) VALUES (-4); -INSERT INTO DEFAULTEXPR_TBL (i2) VALUES (NULL); -SELECT * FROM DEFAULTEXPR_TBL; - i1 | i2 ------+---- - -1 | -2 - -3 | 1 - 102 | -4 - 102 | -(4 rows) - --- syntax errors --- test for extraneous comma -CREATE TABLE error_tbl (i int DEFAULT (100, )); -ERROR: syntax error at or near ")" -LINE 1: CREATE TABLE error_tbl (i int DEFAULT (100, )); - ^ --- this will fail because gram.y uses b_expr not a_expr for defaults, --- to avoid a shift/reduce conflict that arises from NOT NULL being --- part of the column definition syntax: -CREATE TABLE error_tbl (b1 bool DEFAULT 1 IN (1, 2)); -ERROR: syntax error at or near "IN" -LINE 1: CREATE TABLE error_tbl (b1 bool DEFAULT 1 IN (1, 2)); - ^ --- this should work, however: -CREATE TABLE error_tbl (b1 bool DEFAULT (1 IN (1, 2))); -DROP TABLE error_tbl; --- --- CHECK syntax --- -CREATE TABLE CHECK_TBL (x int, - CONSTRAINT CHECK_CON CHECK (x > 3)); -INSERT INTO CHECK_TBL VALUES (5); -INSERT INTO CHECK_TBL VALUES (4); -INSERT INTO CHECK_TBL VALUES (3); -ERROR: new row for relation "check_tbl" violates check constraint "check_con" -DETAIL: Failing row contains (3). -INSERT INTO CHECK_TBL VALUES (2); -ERROR: new row for relation "check_tbl" violates check constraint "check_con" -DETAIL: Failing row contains (2). -INSERT INTO CHECK_TBL VALUES (6); -INSERT INTO CHECK_TBL VALUES (1); -ERROR: new row for relation "check_tbl" violates check constraint "check_con" -DETAIL: Failing row contains (1). -SELECT * FROM CHECK_TBL; - x ---- - 5 - 4 - 6 -(3 rows) - -CREATE SEQUENCE CHECK_SEQ; -CREATE TABLE CHECK2_TBL (x int, y text, z int, - CONSTRAINT SEQUENCE_CON - CHECK (x > 3 and y <> 'check failed' and z < 8)); -INSERT INTO CHECK2_TBL VALUES (4, 'check ok', -2); -INSERT INTO CHECK2_TBL VALUES (1, 'x check failed', -2); -ERROR: new row for relation "check2_tbl" violates check constraint "sequence_con" -DETAIL: Failing row contains (1, x check failed, -2). -INSERT INTO CHECK2_TBL VALUES (5, 'z check failed', 10); -ERROR: new row for relation "check2_tbl" violates check constraint "sequence_con" -DETAIL: Failing row contains (5, z check failed, 10). -INSERT INTO CHECK2_TBL VALUES (0, 'check failed', -2); -ERROR: new row for relation "check2_tbl" violates check constraint "sequence_con" -DETAIL: Failing row contains (0, check failed, -2). -INSERT INTO CHECK2_TBL VALUES (6, 'check failed', 11); -ERROR: new row for relation "check2_tbl" violates check constraint "sequence_con" -DETAIL: Failing row contains (6, check failed, 11). -INSERT INTO CHECK2_TBL VALUES (7, 'check ok', 7); -SELECT * from CHECK2_TBL; - x | y | z ----+----------+---- - 4 | check ok | -2 - 7 | check ok | 7 -(2 rows) - --- --- Check constraints on INSERT --- -CREATE SEQUENCE INSERT_SEQ; -CREATE TABLE INSERT_TBL (x INT DEFAULT nextval('insert_seq'), - y TEXT DEFAULT '-NULL-', - z INT DEFAULT -1 * currval('insert_seq'), - CONSTRAINT INSERT_TBL_CON CHECK (x >= 3 AND y <> 'check failed' AND x < 8), - CHECK (x + z = 0)); -INSERT INTO INSERT_TBL(x,z) VALUES (2, -2); -ERROR: new row for relation "insert_tbl" violates check constraint "insert_tbl_con" -DETAIL: Failing row contains (2, -NULL-, -2). -SELECT * FROM INSERT_TBL; - x | y | z ----+---+--- -(0 rows) - -SELECT 'one' AS one, nextval('insert_seq'); - one | nextval ------+--------- - one | 1 -(1 row) - -INSERT INTO INSERT_TBL(y) VALUES ('Y'); -ERROR: new row for relation "insert_tbl" violates check constraint "insert_tbl_con" -DETAIL: Failing row contains (2, Y, -2). -INSERT INTO INSERT_TBL(y) VALUES ('Y'); -INSERT INTO INSERT_TBL(x,z) VALUES (1, -2); -ERROR: new row for relation "insert_tbl" violates check constraint "insert_tbl_check" -DETAIL: Failing row contains (1, -NULL-, -2). -INSERT INTO INSERT_TBL(z,x) VALUES (-7, 7); -INSERT INTO INSERT_TBL VALUES (5, 'check failed', -5); -ERROR: new row for relation "insert_tbl" violates check constraint "insert_tbl_con" -DETAIL: Failing row contains (5, check failed, -5). -INSERT INTO INSERT_TBL VALUES (7, '!check failed', -7); -INSERT INTO INSERT_TBL(y) VALUES ('-!NULL-'); -SELECT * FROM INSERT_TBL; - x | y | z ----+---------------+---- - 3 | Y | -3 - 7 | -NULL- | -7 - 7 | !check failed | -7 - 4 | -!NULL- | -4 -(4 rows) - -INSERT INTO INSERT_TBL(y,z) VALUES ('check failed', 4); -ERROR: new row for relation "insert_tbl" violates check constraint "insert_tbl_check" -DETAIL: Failing row contains (5, check failed, 4). -INSERT INTO INSERT_TBL(x,y) VALUES (5, 'check failed'); -ERROR: new row for relation "insert_tbl" violates check constraint "insert_tbl_con" -DETAIL: Failing row contains (5, check failed, -5). -INSERT INTO INSERT_TBL(x,y) VALUES (5, '!check failed'); -INSERT INTO INSERT_TBL(y) VALUES ('-!NULL-'); -SELECT * FROM INSERT_TBL; - x | y | z ----+---------------+---- - 3 | Y | -3 - 7 | -NULL- | -7 - 7 | !check failed | -7 - 4 | -!NULL- | -4 - 5 | !check failed | -5 - 6 | -!NULL- | -6 -(6 rows) - -SELECT 'seven' AS one, nextval('insert_seq'); - one | nextval --------+--------- - seven | 7 -(1 row) - -INSERT INTO INSERT_TBL(y) VALUES ('Y'); -ERROR: new row for relation "insert_tbl" violates check constraint "insert_tbl_con" -DETAIL: Failing row contains (8, Y, -8). -SELECT 'eight' AS one, currval('insert_seq'); - one | currval --------+--------- - eight | 8 -(1 row) - --- According to SQL, it is OK to insert a record that gives rise to NULL --- constraint-condition results. Postgres used to reject this, but it --- was wrong: -INSERT INTO INSERT_TBL VALUES (null, null, null); -SELECT * FROM INSERT_TBL; - x | y | z ----+---------------+---- - 3 | Y | -3 - 7 | -NULL- | -7 - 7 | !check failed | -7 - 4 | -!NULL- | -4 - 5 | !check failed | -5 - 6 | -!NULL- | -6 - | | -(7 rows) - --- --- Check constraints on system columns --- -CREATE TABLE SYS_COL_CHECK_TBL (city text, state text, is_capital bool, - altitude int, - CHECK (NOT (is_capital AND tableoid::regclass::text = 'sys_col_check_tbl'))); -INSERT INTO SYS_COL_CHECK_TBL VALUES ('Seattle', 'Washington', false, 100); -INSERT INTO SYS_COL_CHECK_TBL VALUES ('Olympia', 'Washington', true, 100); -ERROR: new row for relation "sys_col_check_tbl" violates check constraint "sys_col_check_tbl_check" -DETAIL: Failing row contains (Olympia, Washington, t, 100). -SELECT *, tableoid::regclass::text FROM SYS_COL_CHECK_TBL; - city | state | is_capital | altitude | tableoid ----------+------------+------------+----------+------------------- - Seattle | Washington | f | 100 | sys_col_check_tbl -(1 row) - -DROP TABLE SYS_COL_CHECK_TBL; --- --- Check constraints on system columns other then TableOid should return error --- -CREATE TABLE SYS_COL_CHECK_TBL (city text, state text, is_capital bool, - altitude int, - CHECK (NOT (is_capital AND ctid::text = 'sys_col_check_tbl'))); -ERROR: system column "ctid" reference in check constraint is invalid -LINE 3: CHECK (NOT (is_capital AND ctid::text = 'sys_col_check... - ^ --- --- Check inheritance of defaults and constraints --- -CREATE TABLE INSERT_CHILD (cx INT default 42, - cy INT CHECK (cy > x)) - INHERITS (INSERT_TBL); -INSERT INTO INSERT_CHILD(x,z,cy) VALUES (7,-7,11); -INSERT INTO INSERT_CHILD(x,z,cy) VALUES (7,-7,6); -ERROR: new row for relation "insert_child" violates check constraint "insert_child_check" -DETAIL: Failing row contains (7, -NULL-, -7, 42, 6). -INSERT INTO INSERT_CHILD(x,z,cy) VALUES (6,-7,7); -ERROR: new row for relation "insert_child" violates check constraint "insert_tbl_check" -DETAIL: Failing row contains (6, -NULL-, -7, 42, 7). -INSERT INTO INSERT_CHILD(x,y,z,cy) VALUES (6,'check failed',-6,7); -ERROR: new row for relation "insert_child" violates check constraint "insert_tbl_con" -DETAIL: Failing row contains (6, check failed, -6, 42, 7). -SELECT * FROM INSERT_CHILD; - x | y | z | cx | cy ----+--------+----+----+---- - 7 | -NULL- | -7 | 42 | 11 -(1 row) - -DROP TABLE INSERT_CHILD; --- --- Check NO INHERIT type of constraints and inheritance --- -CREATE TABLE ATACC1 (TEST INT - CHECK (TEST > 0) NO INHERIT); -CREATE TABLE ATACC2 (TEST2 INT) INHERITS (ATACC1); --- check constraint is not there on child -INSERT INTO ATACC2 (TEST) VALUES (-3); --- check constraint is there on parent -INSERT INTO ATACC1 (TEST) VALUES (-3); -ERROR: new row for relation "atacc1" violates check constraint "atacc1_test_check" -DETAIL: Failing row contains (-3). -DROP TABLE ATACC1 CASCADE; -NOTICE: drop cascades to table atacc2 -CREATE TABLE ATACC1 (TEST INT, TEST2 INT - CHECK (TEST > 0), CHECK (TEST2 > 10) NO INHERIT); -CREATE TABLE ATACC2 () INHERITS (ATACC1); --- check constraint is there on child -INSERT INTO ATACC2 (TEST) VALUES (-3); -ERROR: new row for relation "atacc2" violates check constraint "atacc1_test_check" -DETAIL: Failing row contains (-3, null). --- check constraint is there on parent -INSERT INTO ATACC1 (TEST) VALUES (-3); -ERROR: new row for relation "atacc1" violates check constraint "atacc1_test_check" -DETAIL: Failing row contains (-3, null). --- check constraint is not there on child -INSERT INTO ATACC2 (TEST2) VALUES (3); --- check constraint is there on parent -INSERT INTO ATACC1 (TEST2) VALUES (3); -ERROR: new row for relation "atacc1" violates check constraint "atacc1_test2_check" -DETAIL: Failing row contains (null, 3). -DROP TABLE ATACC1 CASCADE; -NOTICE: drop cascades to table atacc2 --- --- Check constraints on INSERT INTO --- -DELETE FROM INSERT_TBL; -ALTER SEQUENCE INSERT_SEQ RESTART WITH 4; -CREATE TEMP TABLE tmp (xd INT, yd TEXT, zd INT); -INSERT INTO tmp VALUES (null, 'Y', null); -INSERT INTO tmp VALUES (5, '!check failed', null); -INSERT INTO tmp VALUES (null, 'try again', null); -INSERT INTO INSERT_TBL(y) select yd from tmp; -SELECT * FROM INSERT_TBL; - x | y | z ----+---------------+---- - 4 | Y | -4 - 5 | !check failed | -5 - 6 | try again | -6 -(3 rows) - -INSERT INTO INSERT_TBL SELECT * FROM tmp WHERE yd = 'try again'; -INSERT INTO INSERT_TBL(y,z) SELECT yd, -7 FROM tmp WHERE yd = 'try again'; -INSERT INTO INSERT_TBL(y,z) SELECT yd, -8 FROM tmp WHERE yd = 'try again'; -ERROR: new row for relation "insert_tbl" violates check constraint "insert_tbl_con" -DETAIL: Failing row contains (8, try again, -8). -SELECT * FROM INSERT_TBL; - x | y | z ----+---------------+---- - 4 | Y | -4 - 5 | !check failed | -5 - 6 | try again | -6 - | try again | - 7 | try again | -7 -(5 rows) - -DROP TABLE tmp; --- --- Check constraints on UPDATE --- -UPDATE INSERT_TBL SET x = NULL WHERE x = 5; -UPDATE INSERT_TBL SET x = 6 WHERE x = 6; -UPDATE INSERT_TBL SET x = -z, z = -x; -UPDATE INSERT_TBL SET x = z, z = x; -ERROR: new row for relation "insert_tbl" violates check constraint "insert_tbl_con" -DETAIL: Failing row contains (-4, Y, 4). -SELECT * FROM INSERT_TBL; - x | y | z ----+---------------+---- - 4 | Y | -4 - | try again | - 7 | try again | -7 - 5 | !check failed | - 6 | try again | -6 -(5 rows) - --- DROP TABLE INSERT_TBL; --- --- Check constraints on COPY FROM --- -CREATE TABLE COPY_TBL (x INT, y TEXT, z INT, - CONSTRAINT COPY_CON - CHECK (x > 3 AND y <> 'check failed' AND x < 7 )); -\set filename :abs_srcdir '/data/constro.data' -COPY COPY_TBL FROM :'filename'; -SELECT * FROM COPY_TBL; - x | y | z ----+---------------+--- - 4 | !check failed | 5 - 6 | OK | 4 -(2 rows) - -\set filename :abs_srcdir '/data/constrf.data' -COPY COPY_TBL FROM :'filename'; -ERROR: new row for relation "copy_tbl" violates check constraint "copy_con" -DETAIL: Failing row contains (7, check failed, 6). -CONTEXT: COPY copy_tbl, line 2: "7 check failed 6" -SELECT * FROM COPY_TBL; - x | y | z ----+---------------+--- - 4 | !check failed | 5 - 6 | OK | 4 -(2 rows) - --- --- Primary keys --- -CREATE TABLE PRIMARY_TBL (i int PRIMARY KEY, t text); -INSERT INTO PRIMARY_TBL VALUES (1, 'one'); -INSERT INTO PRIMARY_TBL VALUES (2, 'two'); -INSERT INTO PRIMARY_TBL VALUES (1, 'three'); -ERROR: duplicate key value violates unique constraint "primary_tbl_pkey" -DETAIL: Key (i)=(1) already exists. -INSERT INTO PRIMARY_TBL VALUES (4, 'three'); -INSERT INTO PRIMARY_TBL VALUES (5, 'one'); -INSERT INTO PRIMARY_TBL (t) VALUES ('six'); -ERROR: null value in column "i" of relation "primary_tbl" violates not-null constraint -DETAIL: Failing row contains (null, six). -SELECT * FROM PRIMARY_TBL; - i | t ----+------- - 1 | one - 2 | two - 4 | three - 5 | one -(4 rows) - -DROP TABLE PRIMARY_TBL; -CREATE TABLE PRIMARY_TBL (i int, t text, - PRIMARY KEY(i,t)); -INSERT INTO PRIMARY_TBL VALUES (1, 'one'); -INSERT INTO PRIMARY_TBL VALUES (2, 'two'); -INSERT INTO PRIMARY_TBL VALUES (1, 'three'); -INSERT INTO PRIMARY_TBL VALUES (4, 'three'); -INSERT INTO PRIMARY_TBL VALUES (5, 'one'); -INSERT INTO PRIMARY_TBL (t) VALUES ('six'); -ERROR: null value in column "i" of relation "primary_tbl" violates not-null constraint -DETAIL: Failing row contains (null, six). -SELECT * FROM PRIMARY_TBL; - i | t ----+------- - 1 | one - 2 | two - 1 | three - 4 | three - 5 | one -(5 rows) - -DROP TABLE PRIMARY_TBL; --- --- Unique keys --- -CREATE TABLE UNIQUE_TBL (i int UNIQUE, t text); -INSERT INTO UNIQUE_TBL VALUES (1, 'one'); -INSERT INTO UNIQUE_TBL VALUES (2, 'two'); -INSERT INTO UNIQUE_TBL VALUES (1, 'three'); -ERROR: duplicate key value violates unique constraint "unique_tbl_i_key" -DETAIL: Key (i)=(1) already exists. -INSERT INTO UNIQUE_TBL VALUES (4, 'four'); -INSERT INTO UNIQUE_TBL VALUES (5, 'one'); -INSERT INTO UNIQUE_TBL (t) VALUES ('six'); -INSERT INTO UNIQUE_TBL (t) VALUES ('seven'); -INSERT INTO UNIQUE_TBL VALUES (5, 'five-upsert-insert') ON CONFLICT (i) DO UPDATE SET t = 'five-upsert-update'; -INSERT INTO UNIQUE_TBL VALUES (6, 'six-upsert-insert') ON CONFLICT (i) DO UPDATE SET t = 'six-upsert-update'; --- should fail -INSERT INTO UNIQUE_TBL VALUES (1, 'a'), (2, 'b'), (2, 'b') ON CONFLICT (i) DO UPDATE SET t = 'fails'; -ERROR: ON CONFLICT DO UPDATE command cannot affect row a second time -HINT: Ensure that no rows proposed for insertion within the same command have duplicate constrained values. -SELECT * FROM UNIQUE_TBL; - i | t ----+-------------------- - 1 | one - 2 | two - 4 | four - | six - | seven - 5 | five-upsert-update - 6 | six-upsert-insert -(7 rows) - -DROP TABLE UNIQUE_TBL; -CREATE TABLE UNIQUE_TBL (i int, t text, - UNIQUE(i,t)); -INSERT INTO UNIQUE_TBL VALUES (1, 'one'); -INSERT INTO UNIQUE_TBL VALUES (2, 'two'); -INSERT INTO UNIQUE_TBL VALUES (1, 'three'); -INSERT INTO UNIQUE_TBL VALUES (1, 'one'); -ERROR: duplicate key value violates unique constraint "unique_tbl_i_t_key" -DETAIL: Key (i, t)=(1, one) already exists. -INSERT INTO UNIQUE_TBL VALUES (5, 'one'); -INSERT INTO UNIQUE_TBL (t) VALUES ('six'); -SELECT * FROM UNIQUE_TBL; - i | t ----+------- - 1 | one - 2 | two - 1 | three - 5 | one - | six -(5 rows) - -DROP TABLE UNIQUE_TBL; --- --- Deferrable unique constraints --- -CREATE TABLE unique_tbl (i int UNIQUE DEFERRABLE, t text); -INSERT INTO unique_tbl VALUES (0, 'one'); -INSERT INTO unique_tbl VALUES (1, 'two'); -INSERT INTO unique_tbl VALUES (2, 'tree'); -INSERT INTO unique_tbl VALUES (3, 'four'); -INSERT INTO unique_tbl VALUES (4, 'five'); -BEGIN; --- default is immediate so this should fail right away -UPDATE unique_tbl SET i = 1 WHERE i = 0; -ERROR: duplicate key value violates unique constraint "unique_tbl_i_key" -DETAIL: Key (i)=(1) already exists. -ROLLBACK; --- check is done at end of statement, so this should succeed -UPDATE unique_tbl SET i = i+1; -SELECT * FROM unique_tbl; - i | t ----+------ - 1 | one - 2 | two - 3 | tree - 4 | four - 5 | five -(5 rows) - --- explicitly defer the constraint -BEGIN; -SET CONSTRAINTS unique_tbl_i_key DEFERRED; -INSERT INTO unique_tbl VALUES (3, 'three'); -DELETE FROM unique_tbl WHERE t = 'tree'; -- makes constraint valid again -COMMIT; -- should succeed -SELECT * FROM unique_tbl; - i | t ----+------- - 1 | one - 2 | two - 4 | four - 5 | five - 3 | three -(5 rows) - --- try adding an initially deferred constraint -ALTER TABLE unique_tbl DROP CONSTRAINT unique_tbl_i_key; -ALTER TABLE unique_tbl ADD CONSTRAINT unique_tbl_i_key - UNIQUE (i) DEFERRABLE INITIALLY DEFERRED; -BEGIN; -INSERT INTO unique_tbl VALUES (1, 'five'); -INSERT INTO unique_tbl VALUES (5, 'one'); -UPDATE unique_tbl SET i = 4 WHERE i = 2; -UPDATE unique_tbl SET i = 2 WHERE i = 4 AND t = 'four'; -DELETE FROM unique_tbl WHERE i = 1 AND t = 'one'; -DELETE FROM unique_tbl WHERE i = 5 AND t = 'five'; -COMMIT; -SELECT * FROM unique_tbl; - i | t ----+------- - 3 | three - 1 | five - 5 | one - 4 | two - 2 | four -(5 rows) - --- should fail at commit-time -BEGIN; -INSERT INTO unique_tbl VALUES (3, 'Three'); -- should succeed for now -COMMIT; -- should fail -ERROR: duplicate key value violates unique constraint "unique_tbl_i_key" -DETAIL: Key (i)=(3) already exists. --- make constraint check immediate -BEGIN; -SET CONSTRAINTS ALL IMMEDIATE; -INSERT INTO unique_tbl VALUES (3, 'Three'); -- should fail -ERROR: duplicate key value violates unique constraint "unique_tbl_i_key" -DETAIL: Key (i)=(3) already exists. -COMMIT; --- forced check when SET CONSTRAINTS is called -BEGIN; -SET CONSTRAINTS ALL DEFERRED; -INSERT INTO unique_tbl VALUES (3, 'Three'); -- should succeed for now -SET CONSTRAINTS ALL IMMEDIATE; -- should fail -ERROR: duplicate key value violates unique constraint "unique_tbl_i_key" -DETAIL: Key (i)=(3) already exists. -COMMIT; --- test deferrable UNIQUE with a partitioned table -CREATE TABLE parted_uniq_tbl (i int UNIQUE DEFERRABLE) partition by range (i); -CREATE TABLE parted_uniq_tbl_1 PARTITION OF parted_uniq_tbl FOR VALUES FROM (0) TO (10); -CREATE TABLE parted_uniq_tbl_2 PARTITION OF parted_uniq_tbl FOR VALUES FROM (20) TO (30); -SELECT conname, conrelid::regclass FROM pg_constraint - WHERE conname LIKE 'parted_uniq%' ORDER BY conname; - conname | conrelid --------------------------+------------------- - parted_uniq_tbl_1_i_key | parted_uniq_tbl_1 - parted_uniq_tbl_2_i_key | parted_uniq_tbl_2 - parted_uniq_tbl_i_key | parted_uniq_tbl -(3 rows) - -BEGIN; -INSERT INTO parted_uniq_tbl VALUES (1); -SAVEPOINT f; -INSERT INTO parted_uniq_tbl VALUES (1); -- unique violation -ERROR: duplicate key value violates unique constraint "parted_uniq_tbl_1_i_key" -DETAIL: Key (i)=(1) already exists. -ROLLBACK TO f; -SET CONSTRAINTS parted_uniq_tbl_i_key DEFERRED; -INSERT INTO parted_uniq_tbl VALUES (1); -- OK now, fail at commit -COMMIT; -ERROR: duplicate key value violates unique constraint "parted_uniq_tbl_1_i_key" -DETAIL: Key (i)=(1) already exists. -DROP TABLE parted_uniq_tbl; --- test a HOT update that invalidates the conflicting tuple. --- the trigger should still fire and catch the violation -BEGIN; -INSERT INTO unique_tbl VALUES (3, 'Three'); -- should succeed for now -UPDATE unique_tbl SET t = 'THREE' WHERE i = 3 AND t = 'Three'; -COMMIT; -- should fail -ERROR: duplicate key value violates unique constraint "unique_tbl_i_key" -DETAIL: Key (i)=(3) already exists. -SELECT * FROM unique_tbl; - i | t ----+------- - 3 | three - 1 | five - 5 | one - 4 | two - 2 | four -(5 rows) - --- test a HOT update that modifies the newly inserted tuple, --- but should succeed because we then remove the other conflicting tuple. -BEGIN; -INSERT INTO unique_tbl VALUES(3, 'tree'); -- should succeed for now -UPDATE unique_tbl SET t = 'threex' WHERE t = 'tree'; -DELETE FROM unique_tbl WHERE t = 'three'; -SELECT * FROM unique_tbl; - i | t ----+-------- - 1 | five - 5 | one - 4 | two - 2 | four - 3 | threex -(5 rows) - -COMMIT; -SELECT * FROM unique_tbl; - i | t ----+-------- - 1 | five - 5 | one - 4 | two - 2 | four - 3 | threex -(5 rows) - -DROP TABLE unique_tbl; --- --- EXCLUDE constraints --- -CREATE TABLE circles ( - c1 CIRCLE, - c2 TEXT, - EXCLUDE USING gist - (c1 WITH &&, (c2::circle) WITH &&) - WHERE (circle_center(c1) <> '(0,0)') -); --- these should succeed because they don't match the index predicate -INSERT INTO circles VALUES('<(0,0), 5>', '<(0,0), 5>'); -INSERT INTO circles VALUES('<(0,0), 5>', '<(0,0), 4>'); --- succeed -INSERT INTO circles VALUES('<(10,10), 10>', '<(0,0), 5>'); --- fail, overlaps -INSERT INTO circles VALUES('<(20,20), 10>', '<(0,0), 4>'); -ERROR: conflicting key value violates exclusion constraint "circles_c1_c2_excl" -DETAIL: Key (c1, (c2::circle))=(<(20,20),10>, <(0,0),4>) conflicts with existing key (c1, (c2::circle))=(<(10,10),10>, <(0,0),5>). --- succeed, because violation is ignored -INSERT INTO circles VALUES('<(20,20), 10>', '<(0,0), 4>') - ON CONFLICT ON CONSTRAINT circles_c1_c2_excl DO NOTHING; --- fail, because DO UPDATE variant requires unique index -INSERT INTO circles VALUES('<(20,20), 10>', '<(0,0), 4>') - ON CONFLICT ON CONSTRAINT circles_c1_c2_excl DO UPDATE SET c2 = EXCLUDED.c2; -ERROR: ON CONFLICT DO UPDATE not supported with exclusion constraints --- succeed because c1 doesn't overlap -INSERT INTO circles VALUES('<(20,20), 1>', '<(0,0), 5>'); --- succeed because c2 doesn't overlap -INSERT INTO circles VALUES('<(20,20), 10>', '<(10,10), 5>'); --- should fail on existing data without the WHERE clause -ALTER TABLE circles ADD EXCLUDE USING gist - (c1 WITH &&, (c2::circle) WITH &&); -ERROR: could not create exclusion constraint "circles_c1_c2_excl1" -DETAIL: Key (c1, (c2::circle))=(<(0,0),5>, <(0,0),5>) conflicts with key (c1, (c2::circle))=(<(0,0),5>, <(0,0),4>). --- try reindexing an existing constraint -REINDEX INDEX circles_c1_c2_excl; -DROP TABLE circles; --- Check deferred exclusion constraint -CREATE TABLE deferred_excl ( - f1 int, - f2 int, - CONSTRAINT deferred_excl_con EXCLUDE (f1 WITH =) INITIALLY DEFERRED -); -INSERT INTO deferred_excl VALUES(1); -INSERT INTO deferred_excl VALUES(2); -INSERT INTO deferred_excl VALUES(1); -- fail -ERROR: conflicting key value violates exclusion constraint "deferred_excl_con" -DETAIL: Key (f1)=(1) conflicts with existing key (f1)=(1). -INSERT INTO deferred_excl VALUES(1) ON CONFLICT ON CONSTRAINT deferred_excl_con DO NOTHING; -- fail -ERROR: ON CONFLICT does not support deferrable unique constraints/exclusion constraints as arbiters -BEGIN; -INSERT INTO deferred_excl VALUES(2); -- no fail here -COMMIT; -- should fail here -ERROR: conflicting key value violates exclusion constraint "deferred_excl_con" -DETAIL: Key (f1)=(2) conflicts with existing key (f1)=(2). -BEGIN; -INSERT INTO deferred_excl VALUES(3); -INSERT INTO deferred_excl VALUES(3); -- no fail here -COMMIT; -- should fail here -ERROR: conflicting key value violates exclusion constraint "deferred_excl_con" -DETAIL: Key (f1)=(3) conflicts with existing key (f1)=(3). --- bug #13148: deferred constraint versus HOT update -BEGIN; -INSERT INTO deferred_excl VALUES(2, 1); -- no fail here -DELETE FROM deferred_excl WHERE f1 = 2 AND f2 IS NULL; -- remove old row -UPDATE deferred_excl SET f2 = 2 WHERE f1 = 2; -COMMIT; -- should not fail -SELECT * FROM deferred_excl; - f1 | f2 -----+---- - 1 | - 2 | 2 -(2 rows) - -ALTER TABLE deferred_excl DROP CONSTRAINT deferred_excl_con; --- This should fail, but worth testing because of HOT updates -UPDATE deferred_excl SET f1 = 3; -ALTER TABLE deferred_excl ADD EXCLUDE (f1 WITH =); -ERROR: could not create exclusion constraint "deferred_excl_f1_excl" -DETAIL: Key (f1)=(3) conflicts with key (f1)=(3). -DROP TABLE deferred_excl; --- Comments --- Setup a low-level role to enforce non-superuser checks. -CREATE ROLE regress_constraint_comments; -SET SESSION AUTHORIZATION regress_constraint_comments; -CREATE TABLE constraint_comments_tbl (a int CONSTRAINT the_constraint CHECK (a > 0)); -CREATE DOMAIN constraint_comments_dom AS int CONSTRAINT the_constraint CHECK (value > 0); -COMMENT ON CONSTRAINT the_constraint ON constraint_comments_tbl IS 'yes, the comment'; -COMMENT ON CONSTRAINT the_constraint ON DOMAIN constraint_comments_dom IS 'yes, another comment'; --- no such constraint -COMMENT ON CONSTRAINT no_constraint ON constraint_comments_tbl IS 'yes, the comment'; -ERROR: constraint "no_constraint" for table "constraint_comments_tbl" does not exist -COMMENT ON CONSTRAINT no_constraint ON DOMAIN constraint_comments_dom IS 'yes, another comment'; -ERROR: constraint "no_constraint" for domain constraint_comments_dom does not exist --- no such table/domain -COMMENT ON CONSTRAINT the_constraint ON no_comments_tbl IS 'bad comment'; -ERROR: relation "no_comments_tbl" does not exist -COMMENT ON CONSTRAINT the_constraint ON DOMAIN no_comments_dom IS 'another bad comment'; -ERROR: type "no_comments_dom" does not exist -COMMENT ON CONSTRAINT the_constraint ON constraint_comments_tbl IS NULL; -COMMENT ON CONSTRAINT the_constraint ON DOMAIN constraint_comments_dom IS NULL; --- unauthorized user -RESET SESSION AUTHORIZATION; -CREATE ROLE regress_constraint_comments_noaccess; -SET SESSION AUTHORIZATION regress_constraint_comments_noaccess; -COMMENT ON CONSTRAINT the_constraint ON constraint_comments_tbl IS 'no, the comment'; -ERROR: must be owner of relation constraint_comments_tbl -COMMENT ON CONSTRAINT the_constraint ON DOMAIN constraint_comments_dom IS 'no, another comment'; -ERROR: must be owner of type constraint_comments_dom -RESET SESSION AUTHORIZATION; -DROP TABLE constraint_comments_tbl; -DROP DOMAIN constraint_comments_dom; -DROP ROLE regress_constraint_comments; -DROP ROLE regress_constraint_comments_noaccess; diff --git a/src/test/regress/output/copy.source b/src/test/regress/output/copy.source deleted file mode 100644 index 931e7b2e699..00000000000 --- a/src/test/regress/output/copy.source +++ /dev/null @@ -1,248 +0,0 @@ --- --- COPY --- --- directory paths are passed to us in environment variables -\getenv abs_srcdir PG_ABS_SRCDIR -\getenv abs_builddir PG_ABS_BUILDDIR --- CLASS POPULATION --- (any resemblance to real life is purely coincidental) --- -\set filename :abs_srcdir '/data/agg.data' -COPY aggtest FROM :'filename'; -\set filename :abs_srcdir '/data/onek.data' -COPY onek FROM :'filename'; -\set filename :abs_builddir '/results/onek.data' -COPY onek TO :'filename'; -DELETE FROM onek; -COPY onek FROM :'filename'; -\set filename :abs_srcdir '/data/tenk.data' -COPY tenk1 FROM :'filename'; -\set filename :abs_srcdir '/data/rect.data' -COPY slow_emp4000 FROM :'filename'; -\set filename :abs_srcdir '/data/person.data' -COPY person FROM :'filename'; -\set filename :abs_srcdir '/data/emp.data' -COPY emp FROM :'filename'; -\set filename :abs_srcdir '/data/student.data' -COPY student FROM :'filename'; -\set filename :abs_srcdir '/data/stud_emp.data' -COPY stud_emp FROM :'filename'; -\set filename :abs_srcdir '/data/streets.data' -COPY road FROM :'filename'; -\set filename :abs_srcdir '/data/real_city.data' -COPY real_city FROM :'filename'; -\set filename :abs_srcdir '/data/hash.data' -COPY hash_i4_heap FROM :'filename'; -COPY hash_name_heap FROM :'filename'; -COPY hash_txt_heap FROM :'filename'; -COPY hash_f8_heap FROM :'filename'; -\set filename :abs_srcdir '/data/tsearch.data' -COPY test_tsvector FROM :'filename'; -\set filename :abs_srcdir '/data/jsonb.data' -COPY testjsonb FROM :'filename'; --- the data in this file has a lot of duplicates in the index key --- fields, leading to long bucket chains and lots of table expansion. --- this is therefore a stress test of the bucket overflow code (unlike --- the data in hash.data, which has unique index keys). --- --- \set filename :abs_srcdir '/data/hashovfl.data' --- COPY hash_ovfl_heap FROM :'filename'; -\set filename :abs_srcdir '/data/desc.data' -COPY bt_i4_heap FROM :'filename'; -\set filename :abs_srcdir '/data/hash.data' -COPY bt_name_heap FROM :'filename'; -\set filename :abs_srcdir '/data/desc.data' -COPY bt_txt_heap FROM :'filename'; -\set filename :abs_srcdir '/data/hash.data' -COPY bt_f8_heap FROM :'filename'; -\set filename :abs_srcdir '/data/array.data' -COPY array_op_test FROM :'filename'; -\set filename :abs_srcdir '/data/array.data' -COPY array_index_op_test FROM :'filename'; --- analyze all the data we just loaded, to ensure plan consistency --- in later tests -ANALYZE aggtest; -ANALYZE onek; -ANALYZE tenk1; -ANALYZE slow_emp4000; -ANALYZE person; -ANALYZE emp; -ANALYZE student; -ANALYZE stud_emp; -ANALYZE road; -ANALYZE real_city; -ANALYZE hash_i4_heap; -ANALYZE hash_name_heap; -ANALYZE hash_txt_heap; -ANALYZE hash_f8_heap; -ANALYZE test_tsvector; -ANALYZE bt_i4_heap; -ANALYZE bt_name_heap; -ANALYZE bt_txt_heap; -ANALYZE bt_f8_heap; -ANALYZE array_op_test; -ANALYZE array_index_op_test; ---- test copying in CSV mode with various styles ---- of embedded line ending characters -create temp table copytest ( - style text, - test text, - filler int); -insert into copytest values('DOS',E'abc\r\ndef',1); -insert into copytest values('Unix',E'abc\ndef',2); -insert into copytest values('Mac',E'abc\rdef',3); -insert into copytest values(E'esc\\ape',E'a\\r\\\r\\\n\\nb',4); -\set filename :abs_builddir '/results/copytest.csv' -copy copytest to :'filename' csv; -create temp table copytest2 (like copytest); -copy copytest2 from :'filename' csv; -select * from copytest except select * from copytest2; - style | test | filler --------+------+-------- -(0 rows) - -truncate copytest2; ---- same test but with an escape char different from quote char -copy copytest to :'filename' csv quote '''' escape E'\\'; -copy copytest2 from :'filename' csv quote '''' escape E'\\'; -select * from copytest except select * from copytest2; - style | test | filler --------+------+-------- -(0 rows) - --- test header line feature -create temp table copytest3 ( - c1 int, - "col with , comma" text, - "col with "" quote" int); -copy copytest3 from stdin csv header; -copy copytest3 to stdout csv header; -c1,"col with , comma","col with "" quote" -1,a,1 -2,b,2 --- test copy from with a partitioned table -create table parted_copytest ( - a int, - b int, - c text -) partition by list (b); -create table parted_copytest_a1 (c text, b int, a int); -create table parted_copytest_a2 (a int, c text, b int); -alter table parted_copytest attach partition parted_copytest_a1 for values in(1); -alter table parted_copytest attach partition parted_copytest_a2 for values in(2); --- We must insert enough rows to trigger multi-inserts. These are only --- enabled adaptively when there are few enough partition changes. -insert into parted_copytest select x,1,'One' from generate_series(1,1000) x; -insert into parted_copytest select x,2,'Two' from generate_series(1001,1010) x; -insert into parted_copytest select x,1,'One' from generate_series(1011,1020) x; -\set filename :abs_builddir '/results/parted_copytest.csv' -copy (select * from parted_copytest order by a) to :'filename'; -truncate parted_copytest; -copy parted_copytest from :'filename'; --- Ensure COPY FREEZE errors for partitioned tables. -begin; -truncate parted_copytest; -copy parted_copytest from :'filename' (freeze); -ERROR: cannot perform COPY FREEZE on a partitioned table -rollback; -select tableoid::regclass,count(*),sum(a) from parted_copytest -group by tableoid order by tableoid::regclass::name; - tableoid | count | sum ---------------------+-------+-------- - parted_copytest_a1 | 1010 | 510655 - parted_copytest_a2 | 10 | 10055 -(2 rows) - -truncate parted_copytest; --- create before insert row trigger on parted_copytest_a2 -create function part_ins_func() returns trigger language plpgsql as $$ -begin - return new; -end; -$$; -create trigger part_ins_trig - before insert on parted_copytest_a2 - for each row - execute procedure part_ins_func(); -copy parted_copytest from :'filename'; -select tableoid::regclass,count(*),sum(a) from parted_copytest -group by tableoid order by tableoid::regclass::name; - tableoid | count | sum ---------------------+-------+-------- - parted_copytest_a1 | 1010 | 510655 - parted_copytest_a2 | 10 | 10055 -(2 rows) - -truncate table parted_copytest; -create index on parted_copytest (b); -drop trigger part_ins_trig on parted_copytest_a2; -copy parted_copytest from stdin; --- Ensure index entries were properly added during the copy. -select * from parted_copytest where b = 1; - a | b | c ----+---+------ - 1 | 1 | str1 -(1 row) - -select * from parted_copytest where b = 2; - a | b | c ----+---+------ - 2 | 2 | str2 -(1 row) - -drop table parted_copytest; --- --- Progress reporting for COPY --- -create table tab_progress_reporting ( - name text, - age int4, - location point, - salary int4, - manager name -); --- Add a trigger to catch and print the contents of the catalog view --- pg_stat_progress_copy during data insertion. This allows to test --- the validation of some progress reports for COPY FROM where the trigger --- would fire. -create function notice_after_tab_progress_reporting() returns trigger AS -$$ -declare report record; -begin - -- The fields ignored here are the ones that may not remain - -- consistent across multiple runs. The sizes reported may differ - -- across platforms, so just check if these are strictly positive. - with progress_data as ( - select - relid::regclass::text as relname, - command, - type, - bytes_processed > 0 as has_bytes_processed, - bytes_total > 0 as has_bytes_total, - tuples_processed, - tuples_excluded - from pg_stat_progress_copy - where pid = pg_backend_pid()) - select into report (to_jsonb(r)) as value - from progress_data r; - - raise info 'progress: %', report.value::text; - return new; -end; -$$ language plpgsql; -create trigger check_after_tab_progress_reporting - after insert on tab_progress_reporting - for each statement - execute function notice_after_tab_progress_reporting(); --- Generate COPY FROM report with PIPE. -copy tab_progress_reporting from stdin; -INFO: progress: {"type": "PIPE", "command": "COPY FROM", "relname": "tab_progress_reporting", "has_bytes_total": false, "tuples_excluded": 0, "tuples_processed": 3, "has_bytes_processed": true} --- Generate COPY FROM report with FILE, with some excluded tuples. -truncate tab_progress_reporting; -\set filename :abs_srcdir '/data/emp.data' -copy tab_progress_reporting from :'filename' - where (salary < 2000); -INFO: progress: {"type": "FILE", "command": "COPY FROM", "relname": "tab_progress_reporting", "has_bytes_total": true, "tuples_excluded": 1, "tuples_processed": 2, "has_bytes_processed": true} -drop trigger check_after_tab_progress_reporting on tab_progress_reporting; -drop function notice_after_tab_progress_reporting(); -drop table tab_progress_reporting; diff --git a/src/test/regress/output/create_function_0.source b/src/test/regress/output/create_function_0.source deleted file mode 100644 index 6e96d6c5d66..00000000000 --- a/src/test/regress/output/create_function_0.source +++ /dev/null @@ -1,104 +0,0 @@ --- --- CREATE_FUNCTION_0 --- --- directory path and dlsuffix are passed to us in environment variables -\getenv libdir PG_LIBDIR -\getenv dlsuffix PG_DLSUFFIX -\set autoinclib :libdir '/autoinc' :dlsuffix -\set refintlib :libdir '/refint' :dlsuffix -\set regresslib :libdir '/regress' :dlsuffix --- Create a bunch of C functions that will be used by later tests: -CREATE FUNCTION check_primary_key () - RETURNS trigger - AS :'refintlib' - LANGUAGE C; -CREATE FUNCTION check_foreign_key () - RETURNS trigger - AS :'refintlib' - LANGUAGE C; -CREATE FUNCTION autoinc () - RETURNS trigger - AS :'autoinclib' - LANGUAGE C; -CREATE FUNCTION trigger_return_old () - RETURNS trigger - AS :'regresslib' - LANGUAGE C; -CREATE FUNCTION ttdummy () - RETURNS trigger - AS :'regresslib' - LANGUAGE C; -CREATE FUNCTION set_ttdummy (int4) - RETURNS int4 - AS :'regresslib' - LANGUAGE C STRICT; -CREATE FUNCTION make_tuple_indirect (record) - RETURNS record - AS :'regresslib' - LANGUAGE C STRICT; -CREATE FUNCTION test_atomic_ops() - RETURNS bool - AS :'regresslib' - LANGUAGE C; -CREATE FUNCTION test_fdw_handler() - RETURNS fdw_handler - AS :'regresslib', 'test_fdw_handler' - LANGUAGE C; -CREATE FUNCTION test_support_func(internal) - RETURNS internal - AS :'regresslib', 'test_support_func' - LANGUAGE C STRICT; -CREATE FUNCTION test_opclass_options_func(internal) - RETURNS void - AS :'regresslib', 'test_opclass_options_func' - LANGUAGE C; -CREATE FUNCTION test_enc_conversion(bytea, name, name, bool, validlen OUT int, result OUT bytea) - AS :'regresslib', 'test_enc_conversion' - LANGUAGE C STRICT; -CREATE FUNCTION binary_coercible(oid, oid) - RETURNS bool - AS :'regresslib', 'binary_coercible' - LANGUAGE C STRICT STABLE PARALLEL SAFE; --- Things that shouldn't work: -CREATE FUNCTION test1 (int) RETURNS int LANGUAGE SQL - AS 'SELECT ''not an integer'';'; -ERROR: return type mismatch in function declared to return integer -DETAIL: Actual return type is text. -CONTEXT: SQL function "test1" -CREATE FUNCTION test1 (int) RETURNS int LANGUAGE SQL - AS 'not even SQL'; -ERROR: syntax error at or near "not" -LINE 2: AS 'not even SQL'; - ^ -CREATE FUNCTION test1 (int) RETURNS int LANGUAGE SQL - AS 'SELECT 1, 2, 3;'; -ERROR: return type mismatch in function declared to return integer -DETAIL: Final statement must return exactly one column. -CONTEXT: SQL function "test1" -CREATE FUNCTION test1 (int) RETURNS int LANGUAGE SQL - AS 'SELECT $2;'; -ERROR: there is no parameter $2 -LINE 2: AS 'SELECT $2;'; - ^ -CREATE FUNCTION test1 (int) RETURNS int LANGUAGE SQL - AS 'a', 'b'; -ERROR: only one AS item needed for language "sql" -CREATE FUNCTION test1 (int) RETURNS int LANGUAGE C - AS 'nosuchfile'; -ERROR: could not access file "nosuchfile": No such file or directory --- To produce stable regression test output, we have to filter the name --- of the regresslib file out of the error message in this test. -\set VERBOSITY sqlstate -CREATE FUNCTION test1 (int) RETURNS int LANGUAGE C - AS :'regresslib', 'nosuchsymbol'; -ERROR: 42883 -\set VERBOSITY default -SELECT regexp_replace(:'LAST_ERROR_MESSAGE', 'file ".*"', 'file "..."'); - regexp_replace ------------------------------------------------------- - could not find function "nosuchsymbol" in file "..." -(1 row) - -CREATE FUNCTION test1 (int) RETURNS int LANGUAGE internal - AS 'nosuch'; -ERROR: there is no built-in function named "nosuch" diff --git a/src/test/regress/output/create_function_1.source b/src/test/regress/output/create_function_1.source deleted file mode 100644 index 5345ed08400..00000000000 --- a/src/test/regress/output/create_function_1.source +++ /dev/null @@ -1,30 +0,0 @@ --- --- CREATE_FUNCTION_1 --- --- directory path and dlsuffix are passed to us in environment variables -\getenv libdir PG_LIBDIR -\getenv dlsuffix PG_DLSUFFIX -\set regresslib :libdir '/regress' :dlsuffix --- Create C functions needed by create_type.sql -CREATE FUNCTION widget_in(cstring) - RETURNS widget - AS :'regresslib' - LANGUAGE C STRICT IMMUTABLE; -NOTICE: type "widget" is not yet defined -DETAIL: Creating a shell type definition. -CREATE FUNCTION widget_out(widget) - RETURNS cstring - AS :'regresslib' - LANGUAGE C STRICT IMMUTABLE; -NOTICE: argument type widget is only a shell -CREATE FUNCTION int44in(cstring) - RETURNS city_budget - AS :'regresslib' - LANGUAGE C STRICT IMMUTABLE; -NOTICE: type "city_budget" is not yet defined -DETAIL: Creating a shell type definition. -CREATE FUNCTION int44out(city_budget) - RETURNS cstring - AS :'regresslib' - LANGUAGE C STRICT IMMUTABLE; -NOTICE: argument type city_budget is only a shell diff --git a/src/test/regress/output/create_function_2.source b/src/test/regress/output/create_function_2.source deleted file mode 100644 index a366294add7..00000000000 --- a/src/test/regress/output/create_function_2.source +++ /dev/null @@ -1,73 +0,0 @@ --- --- CREATE_FUNCTION_2 --- --- directory path and dlsuffix are passed to us in environment variables -\getenv libdir PG_LIBDIR -\getenv dlsuffix PG_DLSUFFIX -\set regresslib :libdir '/regress' :dlsuffix -CREATE FUNCTION hobbies(person) - RETURNS setof hobbies_r - AS 'select * from hobbies_r where person = $1.name' - LANGUAGE SQL; -CREATE FUNCTION hobby_construct(text, text) - RETURNS hobbies_r - AS 'select $1 as name, $2 as hobby' - LANGUAGE SQL; -CREATE FUNCTION hobby_construct_named(name text, hobby text) - RETURNS hobbies_r - AS 'select name, hobby' - LANGUAGE SQL; -CREATE FUNCTION hobbies_by_name(hobbies_r.name%TYPE) - RETURNS hobbies_r.person%TYPE - AS 'select person from hobbies_r where name = $1' - LANGUAGE SQL; -NOTICE: type reference hobbies_r.name%TYPE converted to text -NOTICE: type reference hobbies_r.person%TYPE converted to text -CREATE FUNCTION equipment(hobbies_r) - RETURNS setof equipment_r - AS 'select * from equipment_r where hobby = $1.name' - LANGUAGE SQL; -CREATE FUNCTION equipment_named(hobby hobbies_r) - RETURNS setof equipment_r - AS 'select * from equipment_r where equipment_r.hobby = equipment_named.hobby.name' - LANGUAGE SQL; -CREATE FUNCTION equipment_named_ambiguous_1a(hobby hobbies_r) - RETURNS setof equipment_r - AS 'select * from equipment_r where hobby = equipment_named_ambiguous_1a.hobby.name' - LANGUAGE SQL; -CREATE FUNCTION equipment_named_ambiguous_1b(hobby hobbies_r) - RETURNS setof equipment_r - AS 'select * from equipment_r where equipment_r.hobby = hobby.name' - LANGUAGE SQL; -CREATE FUNCTION equipment_named_ambiguous_1c(hobby hobbies_r) - RETURNS setof equipment_r - AS 'select * from equipment_r where hobby = hobby.name' - LANGUAGE SQL; -CREATE FUNCTION equipment_named_ambiguous_2a(hobby text) - RETURNS setof equipment_r - AS 'select * from equipment_r where hobby = equipment_named_ambiguous_2a.hobby' - LANGUAGE SQL; -CREATE FUNCTION equipment_named_ambiguous_2b(hobby text) - RETURNS setof equipment_r - AS 'select * from equipment_r where equipment_r.hobby = hobby' - LANGUAGE SQL; -CREATE FUNCTION pt_in_widget(point, widget) - RETURNS bool - AS :'regresslib' - LANGUAGE C STRICT; -CREATE FUNCTION overpaid(emp) - RETURNS bool - AS :'regresslib' - LANGUAGE C STRICT; -CREATE FUNCTION interpt_pp(path, path) - RETURNS point - AS :'regresslib' - LANGUAGE C STRICT; -CREATE FUNCTION reverse_name(name) - RETURNS name - AS :'regresslib' - LANGUAGE C STRICT; --- --- Function dynamic loading --- -LOAD :'regresslib'; diff --git a/src/test/regress/output/largeobject.source b/src/test/regress/output/largeobject.source deleted file mode 100644 index f461ca3b9f4..00000000000 --- a/src/test/regress/output/largeobject.source +++ /dev/null @@ -1,496 +0,0 @@ --- --- Test large object support --- --- directory paths are passed to us in environment variables -\getenv abs_srcdir PG_ABS_SRCDIR -\getenv abs_builddir PG_ABS_BUILDDIR --- ensure consistent test output regardless of the default bytea format -SET bytea_output TO escape; --- Load a file -CREATE TABLE lotest_stash_values (loid oid, fd integer); --- lo_creat(mode integer) returns oid --- The mode arg to lo_creat is unused, some vestigal holdover from ancient times --- returns the large object id -INSERT INTO lotest_stash_values (loid) SELECT lo_creat(42); --- Test ALTER LARGE OBJECT -CREATE ROLE regress_lo_user; -DO $$ - BEGIN - EXECUTE 'ALTER LARGE OBJECT ' || (select loid from lotest_stash_values) - || ' OWNER TO regress_lo_user'; - END -$$; -SELECT - rol.rolname -FROM - lotest_stash_values s - JOIN pg_largeobject_metadata lo ON s.loid = lo.oid - JOIN pg_authid rol ON lo.lomowner = rol.oid; - rolname ------------------ - regress_lo_user -(1 row) - --- NOTE: large objects require transactions -BEGIN; --- lo_open(lobjId oid, mode integer) returns integer --- The mode parameter to lo_open uses two constants: --- INV_READ = 0x20000 --- INV_WRITE = 0x40000 --- The return value is a file descriptor-like value which remains valid for the --- transaction. -UPDATE lotest_stash_values SET fd = lo_open(loid, CAST(x'20000' | x'40000' AS integer)); --- loread/lowrite names are wonky, different from other functions which are lo_* --- lowrite(fd integer, data bytea) returns integer --- the integer is the number of bytes written -SELECT lowrite(fd, ' -I wandered lonely as a cloud -That floats on high o''er vales and hills, -When all at once I saw a crowd, -A host, of golden daffodils; -Beside the lake, beneath the trees, -Fluttering and dancing in the breeze. - -Continuous as the stars that shine -And twinkle on the milky way, -They stretched in never-ending line -Along the margin of a bay: -Ten thousand saw I at a glance, -Tossing their heads in sprightly dance. - -The waves beside them danced; but they -Out-did the sparkling waves in glee: -A poet could not but be gay, -In such a jocund company: -I gazed--and gazed--but little thought -What wealth the show to me had brought: - -For oft, when on my couch I lie -In vacant or in pensive mood, -They flash upon that inward eye -Which is the bliss of solitude; -And then my heart with pleasure fills, -And dances with the daffodils. - - -- William Wordsworth -') FROM lotest_stash_values; - lowrite ---------- - 848 -(1 row) - --- lo_close(fd integer) returns integer --- return value is 0 for success, or <0 for error (actually only -1, but...) -SELECT lo_close(fd) FROM lotest_stash_values; - lo_close ----------- - 0 -(1 row) - -END; --- Copy to another large object. --- Note: we intentionally don't remove the object created here; --- it's left behind to help test pg_dump. -SELECT lo_from_bytea(0, lo_get(loid)) AS newloid FROM lotest_stash_values -\gset --- Add a comment to it, as well, for pg_dump/pg_upgrade testing. -COMMENT ON LARGE OBJECT :newloid IS 'I Wandered Lonely as a Cloud'; --- Read out a portion -BEGIN; -UPDATE lotest_stash_values SET fd=lo_open(loid, CAST(x'20000' | x'40000' AS integer)); --- lo_lseek(fd integer, offset integer, whence integer) returns integer --- offset is in bytes, whence is one of three values: --- SEEK_SET (= 0) meaning relative to beginning --- SEEK_CUR (= 1) meaning relative to current position --- SEEK_END (= 2) meaning relative to end (offset better be negative) --- returns current position in file -SELECT lo_lseek(fd, 104, 0) FROM lotest_stash_values; - lo_lseek ----------- - 104 -(1 row) - --- loread/lowrite names are wonky, different from other functions which are lo_* --- loread(fd integer, len integer) returns bytea -SELECT loread(fd, 28) FROM lotest_stash_values; - loread ------------------------------- - A host, of golden daffodils; -(1 row) - -SELECT lo_lseek(fd, -19, 1) FROM lotest_stash_values; - lo_lseek ----------- - 113 -(1 row) - -SELECT lowrite(fd, 'n') FROM lotest_stash_values; - lowrite ---------- - 1 -(1 row) - -SELECT lo_tell(fd) FROM lotest_stash_values; - lo_tell ---------- - 114 -(1 row) - -SELECT lo_lseek(fd, -744, 2) FROM lotest_stash_values; - lo_lseek ----------- - 104 -(1 row) - -SELECT loread(fd, 28) FROM lotest_stash_values; - loread ------------------------------- - A host, on golden daffodils; -(1 row) - -SELECT lo_close(fd) FROM lotest_stash_values; - lo_close ----------- - 0 -(1 row) - -END; --- Test resource management -BEGIN; -SELECT lo_open(loid, x'40000'::int) from lotest_stash_values; - lo_open ---------- - 0 -(1 row) - -ABORT; -\set filename :abs_builddir '/results/invalid/path' -\set dobody 'DECLARE loid oid; BEGIN ' -\set dobody :dobody 'SELECT tbl.loid INTO loid FROM lotest_stash_values tbl; ' -\set dobody :dobody 'PERFORM lo_export(loid, ' :'filename' '); ' -\set dobody :dobody 'EXCEPTION WHEN UNDEFINED_FILE THEN ' -\set dobody :dobody 'RAISE NOTICE ''could not open file, as expected''; END' -DO :'dobody'; -NOTICE: could not open file, as expected --- Test truncation. -BEGIN; -UPDATE lotest_stash_values SET fd=lo_open(loid, CAST(x'20000' | x'40000' AS integer)); -SELECT lo_truncate(fd, 11) FROM lotest_stash_values; - lo_truncate -------------- - 0 -(1 row) - -SELECT loread(fd, 15) FROM lotest_stash_values; - loread ----------------- - \012I wandered -(1 row) - -SELECT lo_truncate(fd, 10000) FROM lotest_stash_values; - lo_truncate -------------- - 0 -(1 row) - -SELECT loread(fd, 10) FROM lotest_stash_values; - loread ------------------------------------------- - \000\000\000\000\000\000\000\000\000\000 -(1 row) - -SELECT lo_lseek(fd, 0, 2) FROM lotest_stash_values; - lo_lseek ----------- - 10000 -(1 row) - -SELECT lo_tell(fd) FROM lotest_stash_values; - lo_tell ---------- - 10000 -(1 row) - -SELECT lo_truncate(fd, 5000) FROM lotest_stash_values; - lo_truncate -------------- - 0 -(1 row) - -SELECT lo_lseek(fd, 0, 2) FROM lotest_stash_values; - lo_lseek ----------- - 5000 -(1 row) - -SELECT lo_tell(fd) FROM lotest_stash_values; - lo_tell ---------- - 5000 -(1 row) - -SELECT lo_close(fd) FROM lotest_stash_values; - lo_close ----------- - 0 -(1 row) - -END; --- Test 64-bit large object functions. -BEGIN; -UPDATE lotest_stash_values SET fd = lo_open(loid, CAST(x'20000' | x'40000' AS integer)); -SELECT lo_lseek64(fd, 4294967296, 0) FROM lotest_stash_values; - lo_lseek64 ------------- - 4294967296 -(1 row) - -SELECT lowrite(fd, 'offset:4GB') FROM lotest_stash_values; - lowrite ---------- - 10 -(1 row) - -SELECT lo_tell64(fd) FROM lotest_stash_values; - lo_tell64 ------------- - 4294967306 -(1 row) - -SELECT lo_lseek64(fd, -10, 1) FROM lotest_stash_values; - lo_lseek64 ------------- - 4294967296 -(1 row) - -SELECT lo_tell64(fd) FROM lotest_stash_values; - lo_tell64 ------------- - 4294967296 -(1 row) - -SELECT loread(fd, 10) FROM lotest_stash_values; - loread ------------- - offset:4GB -(1 row) - -SELECT lo_truncate64(fd, 5000000000) FROM lotest_stash_values; - lo_truncate64 ---------------- - 0 -(1 row) - -SELECT lo_lseek64(fd, 0, 2) FROM lotest_stash_values; - lo_lseek64 ------------- - 5000000000 -(1 row) - -SELECT lo_tell64(fd) FROM lotest_stash_values; - lo_tell64 ------------- - 5000000000 -(1 row) - -SELECT lo_truncate64(fd, 3000000000) FROM lotest_stash_values; - lo_truncate64 ---------------- - 0 -(1 row) - -SELECT lo_lseek64(fd, 0, 2) FROM lotest_stash_values; - lo_lseek64 ------------- - 3000000000 -(1 row) - -SELECT lo_tell64(fd) FROM lotest_stash_values; - lo_tell64 ------------- - 3000000000 -(1 row) - -SELECT lo_close(fd) FROM lotest_stash_values; - lo_close ----------- - 0 -(1 row) - -END; --- lo_unlink(lobjId oid) returns integer --- return value appears to always be 1 -SELECT lo_unlink(loid) from lotest_stash_values; - lo_unlink ------------ - 1 -(1 row) - -TRUNCATE lotest_stash_values; -\set filename :abs_srcdir '/data/tenk.data' -INSERT INTO lotest_stash_values (loid) SELECT lo_import(:'filename'); -BEGIN; -UPDATE lotest_stash_values SET fd=lo_open(loid, CAST(x'20000' | x'40000' AS integer)); --- verify length of large object -SELECT lo_lseek(fd, 0, 2) FROM lotest_stash_values; - lo_lseek ----------- - 670800 -(1 row) - --- with the default BLCKSZ, LOBLKSIZE = 2048, so this positions us for a block --- edge case -SELECT lo_lseek(fd, 2030, 0) FROM lotest_stash_values; - lo_lseek ----------- - 2030 -(1 row) - --- this should get half of the value from page 0 and half from page 1 of the --- large object -SELECT loread(fd, 36) FROM lotest_stash_values; - loread ------------------------------------------------------------------ - AAA\011FBAAAA\011VVVVxx\0122513\01132\0111\0111\0113\01113\0111 -(1 row) - -SELECT lo_tell(fd) FROM lotest_stash_values; - lo_tell ---------- - 2066 -(1 row) - -SELECT lo_lseek(fd, -26, 1) FROM lotest_stash_values; - lo_lseek ----------- - 2040 -(1 row) - -SELECT lowrite(fd, 'abcdefghijklmnop') FROM lotest_stash_values; - lowrite ---------- - 16 -(1 row) - -SELECT lo_lseek(fd, 2030, 0) FROM lotest_stash_values; - lo_lseek ----------- - 2030 -(1 row) - -SELECT loread(fd, 36) FROM lotest_stash_values; - loread ------------------------------------------------------ - AAA\011FBAAAAabcdefghijklmnop1\0111\0113\01113\0111 -(1 row) - -SELECT lo_close(fd) FROM lotest_stash_values; - lo_close ----------- - 0 -(1 row) - -END; -\set filename :abs_builddir '/results/lotest.txt' -SELECT lo_export(loid, :'filename') FROM lotest_stash_values; - lo_export ------------ - 1 -(1 row) - -\lo_import :filename -\set newloid :LASTOID --- just make sure \lo_export does not barf -\set filename :abs_builddir '/results/lotest2.txt' -\lo_export :newloid :filename --- This is a hack to test that export/import are reversible --- This uses knowledge about the inner workings of large object mechanism --- which should not be used outside it. This makes it a HACK -SELECT pageno, data FROM pg_largeobject WHERE loid = (SELECT loid from lotest_stash_values) -EXCEPT -SELECT pageno, data FROM pg_largeobject WHERE loid = :newloid; - pageno | data ---------+------ -(0 rows) - -SELECT lo_unlink(loid) FROM lotest_stash_values; - lo_unlink ------------ - 1 -(1 row) - -TRUNCATE lotest_stash_values; -\lo_unlink :newloid -\set filename :abs_builddir '/results/lotest.txt' -\lo_import :filename -\set newloid_1 :LASTOID -SELECT lo_from_bytea(0, lo_get(:newloid_1)) AS newloid_2 -\gset -SELECT md5(lo_get(:newloid_1)) = md5(lo_get(:newloid_2)); - ?column? ----------- - t -(1 row) - -SELECT lo_get(:newloid_1, 0, 20); - lo_get -------------------------------------------- - 8800\0110\0110\0110\0110\0110\0110\011800 -(1 row) - -SELECT lo_get(:newloid_1, 10, 20); - lo_get -------------------------------------------- - \0110\0110\0110\011800\011800\0113800\011 -(1 row) - -SELECT lo_put(:newloid_1, 5, decode('afafafaf', 'hex')); - lo_put --------- - -(1 row) - -SELECT lo_get(:newloid_1, 0, 20); - lo_get -------------------------------------------------- - 8800\011\257\257\257\2570\0110\0110\0110\011800 -(1 row) - -SELECT lo_put(:newloid_1, 4294967310, 'foo'); - lo_put --------- - -(1 row) - -SELECT lo_get(:newloid_1); -ERROR: large object read request is too large -SELECT lo_get(:newloid_1, 4294967294, 100); - lo_get ---------------------------------------------------------------------- - \000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000foo -(1 row) - -\lo_unlink :newloid_1 -\lo_unlink :newloid_2 --- This object is left in the database for pg_dump test purposes -SELECT lo_from_bytea(0, E'\\xdeadbeef') AS newloid -\gset -SET bytea_output TO hex; -SELECT lo_get(:newloid); - lo_get ------------- - \xdeadbeef -(1 row) - --- Create one more object that we leave behind for testing pg_dump/pg_upgrade; --- this one intentionally has an OID in the system range -SELECT lo_create(2121); - lo_create ------------ - 2121 -(1 row) - -COMMENT ON LARGE OBJECT 2121 IS 'testing comments'; --- Clean up -DROP TABLE lotest_stash_values; -DROP ROLE regress_lo_user; diff --git a/src/test/regress/output/largeobject_1.source b/src/test/regress/output/largeobject_1.source deleted file mode 100644 index a9725c375d5..00000000000 --- a/src/test/regress/output/largeobject_1.source +++ /dev/null @@ -1,496 +0,0 @@ --- --- Test large object support --- --- directory paths are passed to us in environment variables -\getenv abs_srcdir PG_ABS_SRCDIR -\getenv abs_builddir PG_ABS_BUILDDIR --- ensure consistent test output regardless of the default bytea format -SET bytea_output TO escape; --- Load a file -CREATE TABLE lotest_stash_values (loid oid, fd integer); --- lo_creat(mode integer) returns oid --- The mode arg to lo_creat is unused, some vestigal holdover from ancient times --- returns the large object id -INSERT INTO lotest_stash_values (loid) SELECT lo_creat(42); --- Test ALTER LARGE OBJECT -CREATE ROLE regress_lo_user; -DO $$ - BEGIN - EXECUTE 'ALTER LARGE OBJECT ' || (select loid from lotest_stash_values) - || ' OWNER TO regress_lo_user'; - END -$$; -SELECT - rol.rolname -FROM - lotest_stash_values s - JOIN pg_largeobject_metadata lo ON s.loid = lo.oid - JOIN pg_authid rol ON lo.lomowner = rol.oid; - rolname ------------------ - regress_lo_user -(1 row) - --- NOTE: large objects require transactions -BEGIN; --- lo_open(lobjId oid, mode integer) returns integer --- The mode parameter to lo_open uses two constants: --- INV_READ = 0x20000 --- INV_WRITE = 0x40000 --- The return value is a file descriptor-like value which remains valid for the --- transaction. -UPDATE lotest_stash_values SET fd = lo_open(loid, CAST(x'20000' | x'40000' AS integer)); --- loread/lowrite names are wonky, different from other functions which are lo_* --- lowrite(fd integer, data bytea) returns integer --- the integer is the number of bytes written -SELECT lowrite(fd, ' -I wandered lonely as a cloud -That floats on high o''er vales and hills, -When all at once I saw a crowd, -A host, of golden daffodils; -Beside the lake, beneath the trees, -Fluttering and dancing in the breeze. - -Continuous as the stars that shine -And twinkle on the milky way, -They stretched in never-ending line -Along the margin of a bay: -Ten thousand saw I at a glance, -Tossing their heads in sprightly dance. - -The waves beside them danced; but they -Out-did the sparkling waves in glee: -A poet could not but be gay, -In such a jocund company: -I gazed--and gazed--but little thought -What wealth the show to me had brought: - -For oft, when on my couch I lie -In vacant or in pensive mood, -They flash upon that inward eye -Which is the bliss of solitude; -And then my heart with pleasure fills, -And dances with the daffodils. - - -- William Wordsworth -') FROM lotest_stash_values; - lowrite ---------- - 848 -(1 row) - --- lo_close(fd integer) returns integer --- return value is 0 for success, or <0 for error (actually only -1, but...) -SELECT lo_close(fd) FROM lotest_stash_values; - lo_close ----------- - 0 -(1 row) - -END; --- Copy to another large object. --- Note: we intentionally don't remove the object created here; --- it's left behind to help test pg_dump. -SELECT lo_from_bytea(0, lo_get(loid)) AS newloid FROM lotest_stash_values -\gset --- Add a comment to it, as well, for pg_dump/pg_upgrade testing. -COMMENT ON LARGE OBJECT :newloid IS 'I Wandered Lonely as a Cloud'; --- Read out a portion -BEGIN; -UPDATE lotest_stash_values SET fd=lo_open(loid, CAST(x'20000' | x'40000' AS integer)); --- lo_lseek(fd integer, offset integer, whence integer) returns integer --- offset is in bytes, whence is one of three values: --- SEEK_SET (= 0) meaning relative to beginning --- SEEK_CUR (= 1) meaning relative to current position --- SEEK_END (= 2) meaning relative to end (offset better be negative) --- returns current position in file -SELECT lo_lseek(fd, 104, 0) FROM lotest_stash_values; - lo_lseek ----------- - 104 -(1 row) - --- loread/lowrite names are wonky, different from other functions which are lo_* --- loread(fd integer, len integer) returns bytea -SELECT loread(fd, 28) FROM lotest_stash_values; - loread ------------------------------- - A host, of golden daffodils; -(1 row) - -SELECT lo_lseek(fd, -19, 1) FROM lotest_stash_values; - lo_lseek ----------- - 113 -(1 row) - -SELECT lowrite(fd, 'n') FROM lotest_stash_values; - lowrite ---------- - 1 -(1 row) - -SELECT lo_tell(fd) FROM lotest_stash_values; - lo_tell ---------- - 114 -(1 row) - -SELECT lo_lseek(fd, -744, 2) FROM lotest_stash_values; - lo_lseek ----------- - 104 -(1 row) - -SELECT loread(fd, 28) FROM lotest_stash_values; - loread ------------------------------- - A host, on golden daffodils; -(1 row) - -SELECT lo_close(fd) FROM lotest_stash_values; - lo_close ----------- - 0 -(1 row) - -END; --- Test resource management -BEGIN; -SELECT lo_open(loid, x'40000'::int) from lotest_stash_values; - lo_open ---------- - 0 -(1 row) - -ABORT; -\set filename :abs_builddir '/results/invalid/path' -\set dobody 'DECLARE loid oid; BEGIN ' -\set dobody :dobody 'SELECT tbl.loid INTO loid FROM lotest_stash_values tbl; ' -\set dobody :dobody 'PERFORM lo_export(loid, ' :'filename' '); ' -\set dobody :dobody 'EXCEPTION WHEN UNDEFINED_FILE THEN ' -\set dobody :dobody 'RAISE NOTICE ''could not open file, as expected''; END' -DO :'dobody'; -NOTICE: could not open file, as expected --- Test truncation. -BEGIN; -UPDATE lotest_stash_values SET fd=lo_open(loid, CAST(x'20000' | x'40000' AS integer)); -SELECT lo_truncate(fd, 11) FROM lotest_stash_values; - lo_truncate -------------- - 0 -(1 row) - -SELECT loread(fd, 15) FROM lotest_stash_values; - loread ----------------- - \012I wandered -(1 row) - -SELECT lo_truncate(fd, 10000) FROM lotest_stash_values; - lo_truncate -------------- - 0 -(1 row) - -SELECT loread(fd, 10) FROM lotest_stash_values; - loread ------------------------------------------- - \000\000\000\000\000\000\000\000\000\000 -(1 row) - -SELECT lo_lseek(fd, 0, 2) FROM lotest_stash_values; - lo_lseek ----------- - 10000 -(1 row) - -SELECT lo_tell(fd) FROM lotest_stash_values; - lo_tell ---------- - 10000 -(1 row) - -SELECT lo_truncate(fd, 5000) FROM lotest_stash_values; - lo_truncate -------------- - 0 -(1 row) - -SELECT lo_lseek(fd, 0, 2) FROM lotest_stash_values; - lo_lseek ----------- - 5000 -(1 row) - -SELECT lo_tell(fd) FROM lotest_stash_values; - lo_tell ---------- - 5000 -(1 row) - -SELECT lo_close(fd) FROM lotest_stash_values; - lo_close ----------- - 0 -(1 row) - -END; --- Test 64-bit large object functions. -BEGIN; -UPDATE lotest_stash_values SET fd = lo_open(loid, CAST(x'20000' | x'40000' AS integer)); -SELECT lo_lseek64(fd, 4294967296, 0) FROM lotest_stash_values; - lo_lseek64 ------------- - 4294967296 -(1 row) - -SELECT lowrite(fd, 'offset:4GB') FROM lotest_stash_values; - lowrite ---------- - 10 -(1 row) - -SELECT lo_tell64(fd) FROM lotest_stash_values; - lo_tell64 ------------- - 4294967306 -(1 row) - -SELECT lo_lseek64(fd, -10, 1) FROM lotest_stash_values; - lo_lseek64 ------------- - 4294967296 -(1 row) - -SELECT lo_tell64(fd) FROM lotest_stash_values; - lo_tell64 ------------- - 4294967296 -(1 row) - -SELECT loread(fd, 10) FROM lotest_stash_values; - loread ------------- - offset:4GB -(1 row) - -SELECT lo_truncate64(fd, 5000000000) FROM lotest_stash_values; - lo_truncate64 ---------------- - 0 -(1 row) - -SELECT lo_lseek64(fd, 0, 2) FROM lotest_stash_values; - lo_lseek64 ------------- - 5000000000 -(1 row) - -SELECT lo_tell64(fd) FROM lotest_stash_values; - lo_tell64 ------------- - 5000000000 -(1 row) - -SELECT lo_truncate64(fd, 3000000000) FROM lotest_stash_values; - lo_truncate64 ---------------- - 0 -(1 row) - -SELECT lo_lseek64(fd, 0, 2) FROM lotest_stash_values; - lo_lseek64 ------------- - 3000000000 -(1 row) - -SELECT lo_tell64(fd) FROM lotest_stash_values; - lo_tell64 ------------- - 3000000000 -(1 row) - -SELECT lo_close(fd) FROM lotest_stash_values; - lo_close ----------- - 0 -(1 row) - -END; --- lo_unlink(lobjId oid) returns integer --- return value appears to always be 1 -SELECT lo_unlink(loid) from lotest_stash_values; - lo_unlink ------------ - 1 -(1 row) - -TRUNCATE lotest_stash_values; -\set filename :abs_srcdir '/data/tenk.data' -INSERT INTO lotest_stash_values (loid) SELECT lo_import(:'filename'); -BEGIN; -UPDATE lotest_stash_values SET fd=lo_open(loid, CAST(x'20000' | x'40000' AS integer)); --- verify length of large object -SELECT lo_lseek(fd, 0, 2) FROM lotest_stash_values; - lo_lseek ----------- - 680800 -(1 row) - --- with the default BLCKSZ, LOBLKSIZE = 2048, so this positions us for a block --- edge case -SELECT lo_lseek(fd, 2030, 0) FROM lotest_stash_values; - lo_lseek ----------- - 2030 -(1 row) - --- this should get half of the value from page 0 and half from page 1 of the --- large object -SELECT loread(fd, 36) FROM lotest_stash_values; - loread --------------------------------------------------------------- - 44\011144\0111144\0114144\0119144\01188\01189\011SNAAAA\011F -(1 row) - -SELECT lo_tell(fd) FROM lotest_stash_values; - lo_tell ---------- - 2066 -(1 row) - -SELECT lo_lseek(fd, -26, 1) FROM lotest_stash_values; - lo_lseek ----------- - 2040 -(1 row) - -SELECT lowrite(fd, 'abcdefghijklmnop') FROM lotest_stash_values; - lowrite ---------- - 16 -(1 row) - -SELECT lo_lseek(fd, 2030, 0) FROM lotest_stash_values; - lo_lseek ----------- - 2030 -(1 row) - -SELECT loread(fd, 36) FROM lotest_stash_values; - loread --------------------------------------------------- - 44\011144\011114abcdefghijklmnop9\011SNAAAA\011F -(1 row) - -SELECT lo_close(fd) FROM lotest_stash_values; - lo_close ----------- - 0 -(1 row) - -END; -\set filename :abs_builddir '/results/lotest.txt' -SELECT lo_export(loid, :'filename') FROM lotest_stash_values; - lo_export ------------ - 1 -(1 row) - -\lo_import :filename -\set newloid :LASTOID --- just make sure \lo_export does not barf -\set filename :abs_builddir '/results/lotest2.txt' -\lo_export :newloid :filename --- This is a hack to test that export/import are reversible --- This uses knowledge about the inner workings of large object mechanism --- which should not be used outside it. This makes it a HACK -SELECT pageno, data FROM pg_largeobject WHERE loid = (SELECT loid from lotest_stash_values) -EXCEPT -SELECT pageno, data FROM pg_largeobject WHERE loid = :newloid; - pageno | data ---------+------ -(0 rows) - -SELECT lo_unlink(loid) FROM lotest_stash_values; - lo_unlink ------------ - 1 -(1 row) - -TRUNCATE lotest_stash_values; -\lo_unlink :newloid -\set filename :abs_builddir '/results/lotest.txt' -\lo_import :filename -\set newloid_1 :LASTOID -SELECT lo_from_bytea(0, lo_get(:newloid_1)) AS newloid_2 -\gset -SELECT md5(lo_get(:newloid_1)) = md5(lo_get(:newloid_2)); - ?column? ----------- - t -(1 row) - -SELECT lo_get(:newloid_1, 0, 20); - lo_get -------------------------------------------- - 8800\0110\0110\0110\0110\0110\0110\011800 -(1 row) - -SELECT lo_get(:newloid_1, 10, 20); - lo_get -------------------------------------------- - \0110\0110\0110\011800\011800\0113800\011 -(1 row) - -SELECT lo_put(:newloid_1, 5, decode('afafafaf', 'hex')); - lo_put --------- - -(1 row) - -SELECT lo_get(:newloid_1, 0, 20); - lo_get -------------------------------------------------- - 8800\011\257\257\257\2570\0110\0110\0110\011800 -(1 row) - -SELECT lo_put(:newloid_1, 4294967310, 'foo'); - lo_put --------- - -(1 row) - -SELECT lo_get(:newloid_1); -ERROR: large object read request is too large -SELECT lo_get(:newloid_1, 4294967294, 100); - lo_get ---------------------------------------------------------------------- - \000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000foo -(1 row) - -\lo_unlink :newloid_1 -\lo_unlink :newloid_2 --- This object is left in the database for pg_dump test purposes -SELECT lo_from_bytea(0, E'\\xdeadbeef') AS newloid -\gset -SET bytea_output TO hex; -SELECT lo_get(:newloid); - lo_get ------------- - \xdeadbeef -(1 row) - --- Create one more object that we leave behind for testing pg_dump/pg_upgrade; --- this one intentionally has an OID in the system range -SELECT lo_create(2121); - lo_create ------------ - 2121 -(1 row) - -COMMENT ON LARGE OBJECT 2121 IS 'testing comments'; --- Clean up -DROP TABLE lotest_stash_values; -DROP ROLE regress_lo_user; diff --git a/src/test/regress/output/misc.source b/src/test/regress/output/misc.source deleted file mode 100644 index dd65e6ed62f..00000000000 --- a/src/test/regress/output/misc.source +++ /dev/null @@ -1,697 +0,0 @@ --- --- MISC --- --- directory paths are passed to us in environment variables -\getenv abs_srcdir PG_ABS_SRCDIR -\getenv abs_builddir PG_ABS_BUILDDIR --- --- BTREE --- -UPDATE onek - SET unique1 = onek.unique1 + 1; -UPDATE onek - SET unique1 = onek.unique1 - 1; --- --- BTREE partial --- --- UPDATE onek2 --- SET unique1 = onek2.unique1 + 1; ---UPDATE onek2 --- SET unique1 = onek2.unique1 - 1; --- --- BTREE shutting out non-functional updates --- --- the following two tests seem to take a long time on some --- systems. This non-func update stuff needs to be examined --- more closely. - jolly (2/22/96) --- -UPDATE tmp - SET stringu1 = reverse_name(onek.stringu1) - FROM onek - WHERE onek.stringu1 = 'JBAAAA' and - onek.stringu1 = tmp.stringu1; -UPDATE tmp - SET stringu1 = reverse_name(onek2.stringu1) - FROM onek2 - WHERE onek2.stringu1 = 'JCAAAA' and - onek2.stringu1 = tmp.stringu1; -DROP TABLE tmp; ---UPDATE person* --- SET age = age + 1; ---UPDATE person* --- SET age = age + 3 --- WHERE name = 'linda'; --- --- copy --- -\set filename :abs_builddir '/results/onek.data' -COPY onek TO :'filename'; -DELETE FROM onek; -COPY onek FROM :'filename'; -SELECT unique1 FROM onek WHERE unique1 < 2 ORDER BY unique1; - unique1 ---------- - 0 - 1 -(2 rows) - -DELETE FROM onek2; -COPY onek2 FROM :'filename'; -SELECT unique1 FROM onek2 WHERE unique1 < 2 ORDER BY unique1; - unique1 ---------- - 0 - 1 -(2 rows) - -\set filename :abs_builddir '/results/stud_emp.data' -COPY BINARY stud_emp TO :'filename'; -DELETE FROM stud_emp; -COPY BINARY stud_emp FROM :'filename'; -SELECT * FROM stud_emp; - name | age | location | salary | manager | gpa | percent --------+-----+------------+--------+---------+-----+--------- - jeff | 23 | (8,7.7) | 600 | sharon | 3.5 | - cim | 30 | (10.5,4.7) | 400 | | 3.4 | - linda | 19 | (0.9,6.1) | 100 | | 2.9 | -(3 rows) - --- COPY aggtest FROM stdin; --- 56 7.8 --- 100 99.097 --- 0 0.09561 --- 42 324.78 --- . --- COPY aggtest TO stdout; --- --- inheritance stress test --- -SELECT * FROM a_star*; - class | a --------+---- - a | 1 - a | 2 - a | - b | 3 - b | 4 - b | - b | - c | 5 - c | 6 - c | - c | - d | 7 - d | 8 - d | 9 - d | 10 - d | - d | 11 - d | 12 - d | 13 - d | - d | - d | - d | 14 - d | - d | - d | - d | - e | 15 - e | 16 - e | 17 - e | - e | 18 - e | - e | - f | 19 - f | 20 - f | 21 - f | 22 - f | - f | 24 - f | 25 - f | 26 - f | - f | - f | - f | 27 - f | - f | - f | - f | -(50 rows) - -SELECT * - FROM b_star* x - WHERE x.b = text 'bumble' or x.a < 3; - class | a | b --------+---+-------- - b | | bumble -(1 row) - -SELECT class, a - FROM c_star* x - WHERE x.c ~ text 'hi'; - class | a --------+---- - c | 5 - c | - d | 7 - d | 8 - d | 10 - d | - d | 12 - d | - d | - d | - e | 15 - e | 16 - e | - e | - f | 19 - f | 20 - f | 21 - f | - f | 24 - f | - f | - f | -(22 rows) - -SELECT class, b, c - FROM d_star* x - WHERE x.a < 100; - class | b | c --------+---------+------------ - d | grumble | hi sunita - d | stumble | hi koko - d | rumble | - d | | hi kristin - d | fumble | - d | | hi avi - d | | - d | | -(8 rows) - -SELECT class, c FROM e_star* x WHERE x.c NOTNULL; - class | c --------+------------- - e | hi carol - e | hi bob - e | hi michelle - e | hi elisa - f | hi claire - f | hi mike - f | hi marcel - f | hi keith - f | hi marc - f | hi allison - f | hi jeff - f | hi carl -(12 rows) - -SELECT * FROM f_star* x WHERE x.c ISNULL; - class | a | c | e | f --------+----+---+-----+------------------------------------------- - f | 22 | | -7 | ((111,555),(222,666),(333,777),(444,888)) - f | 25 | | -9 | - f | 26 | | | ((11111,33333),(22222,44444)) - f | | | -11 | ((1111111,3333333),(2222222,4444444)) - f | 27 | | | - f | | | -12 | - f | | | | ((11111111,33333333),(22222222,44444444)) - f | | | | -(8 rows) - --- grouping and aggregation on inherited sets have been busted in the past... -SELECT sum(a) FROM a_star*; - sum ------ - 355 -(1 row) - -SELECT class, sum(a) FROM a_star* GROUP BY class ORDER BY class; - class | sum --------+----- - a | 3 - b | 7 - c | 11 - d | 84 - e | 66 - f | 184 -(6 rows) - -ALTER TABLE f_star RENAME COLUMN f TO ff; -ALTER TABLE e_star* RENAME COLUMN e TO ee; -ALTER TABLE d_star* RENAME COLUMN d TO dd; -ALTER TABLE c_star* RENAME COLUMN c TO cc; -ALTER TABLE b_star* RENAME COLUMN b TO bb; -ALTER TABLE a_star* RENAME COLUMN a TO aa; -SELECT class, aa - FROM a_star* x - WHERE aa ISNULL; - class | aa --------+---- - a | - b | - b | - c | - c | - d | - d | - d | - d | - d | - d | - d | - d | - e | - e | - e | - f | - f | - f | - f | - f | - f | - f | - f | -(24 rows) - --- As of Postgres 7.1, ALTER implicitly recurses, --- so this should be same as ALTER a_star* -ALTER TABLE a_star RENAME COLUMN aa TO foo; -SELECT class, foo - FROM a_star* x - WHERE x.foo >= 2; - class | foo --------+----- - a | 2 - b | 3 - b | 4 - c | 5 - c | 6 - d | 7 - d | 8 - d | 9 - d | 10 - d | 11 - d | 12 - d | 13 - d | 14 - e | 15 - e | 16 - e | 17 - e | 18 - f | 19 - f | 20 - f | 21 - f | 22 - f | 24 - f | 25 - f | 26 - f | 27 -(25 rows) - -ALTER TABLE a_star RENAME COLUMN foo TO aa; -SELECT * - from a_star* - WHERE aa < 1000; - class | aa --------+---- - a | 1 - a | 2 - b | 3 - b | 4 - c | 5 - c | 6 - d | 7 - d | 8 - d | 9 - d | 10 - d | 11 - d | 12 - d | 13 - d | 14 - e | 15 - e | 16 - e | 17 - e | 18 - f | 19 - f | 20 - f | 21 - f | 22 - f | 24 - f | 25 - f | 26 - f | 27 -(26 rows) - -ALTER TABLE f_star ADD COLUMN f int4; -UPDATE f_star SET f = 10; -ALTER TABLE e_star* ADD COLUMN e int4; ---UPDATE e_star* SET e = 42; -SELECT * FROM e_star*; - class | aa | cc | ee | e --------+----+-------------+-----+--- - e | 15 | hi carol | -1 | - e | 16 | hi bob | | - e | 17 | | -2 | - e | | hi michelle | -3 | - e | 18 | | | - e | | hi elisa | | - e | | | -4 | - f | 19 | hi claire | -5 | - f | 20 | hi mike | -6 | - f | 21 | hi marcel | | - f | 22 | | -7 | - f | | hi keith | -8 | - f | 24 | hi marc | | - f | 25 | | -9 | - f | 26 | | | - f | | hi allison | -10 | - f | | hi jeff | | - f | | | -11 | - f | 27 | | | - f | | hi carl | | - f | | | -12 | - f | | | | - f | | | | -(23 rows) - -ALTER TABLE a_star* ADD COLUMN a text; -NOTICE: merging definition of column "a" for child "d_star" --- That ALTER TABLE should have added TOAST tables. -SELECT relname, reltoastrelid <> 0 AS has_toast_table - FROM pg_class - WHERE oid::regclass IN ('a_star', 'c_star') - ORDER BY 1; - relname | has_toast_table ----------+----------------- - a_star | t - c_star | t -(2 rows) - ---UPDATE b_star* --- SET a = text 'gazpacho' --- WHERE aa > 4; -SELECT class, aa, a FROM a_star*; - class | aa | a --------+----+--- - a | 1 | - a | 2 | - a | | - b | 3 | - b | 4 | - b | | - b | | - c | 5 | - c | 6 | - c | | - c | | - d | 7 | - d | 8 | - d | 9 | - d | 10 | - d | | - d | 11 | - d | 12 | - d | 13 | - d | | - d | | - d | | - d | 14 | - d | | - d | | - d | | - d | | - e | 15 | - e | 16 | - e | 17 | - e | | - e | 18 | - e | | - e | | - f | 19 | - f | 20 | - f | 21 | - f | 22 | - f | | - f | 24 | - f | 25 | - f | 26 | - f | | - f | | - f | | - f | 27 | - f | | - f | | - f | | - f | | -(50 rows) - --- --- versions --- --- --- postquel functions --- --- --- mike does post_hacking, --- joe and sally play basketball, and --- everyone else does nothing. --- -SELECT p.name, name(p.hobbies) FROM ONLY person p; - name | name --------+------------- - mike | posthacking - joe | basketball - sally | basketball -(3 rows) - --- --- as above, but jeff also does post_hacking. --- -SELECT p.name, name(p.hobbies) FROM person* p; - name | name --------+------------- - mike | posthacking - joe | basketball - sally | basketball - jeff | posthacking -(4 rows) - --- --- the next two queries demonstrate how functions generate bogus duplicates. --- this is a "feature" .. --- -SELECT DISTINCT hobbies_r.name, name(hobbies_r.equipment) FROM hobbies_r - ORDER BY 1,2; - name | name --------------+--------------- - basketball | hightops - posthacking | advil - posthacking | peet's coffee - skywalking | guts -(4 rows) - -SELECT hobbies_r.name, (hobbies_r.equipment).name FROM hobbies_r; - name | name --------------+--------------- - posthacking | advil - posthacking | peet's coffee - posthacking | advil - posthacking | peet's coffee - basketball | hightops - basketball | hightops - skywalking | guts -(7 rows) - --- --- mike needs advil and peet's coffee, --- joe and sally need hightops, and --- everyone else is fine. --- -SELECT p.name, name(p.hobbies), name(equipment(p.hobbies)) FROM ONLY person p; - name | name | name --------+-------------+--------------- - mike | posthacking | advil - mike | posthacking | peet's coffee - joe | basketball | hightops - sally | basketball | hightops -(4 rows) - --- --- as above, but jeff needs advil and peet's coffee as well. --- -SELECT p.name, name(p.hobbies), name(equipment(p.hobbies)) FROM person* p; - name | name | name --------+-------------+--------------- - mike | posthacking | advil - mike | posthacking | peet's coffee - joe | basketball | hightops - sally | basketball | hightops - jeff | posthacking | advil - jeff | posthacking | peet's coffee -(6 rows) - --- --- just like the last two, but make sure that the target list fixup and --- unflattening is being done correctly. --- -SELECT name(equipment(p.hobbies)), p.name, name(p.hobbies) FROM ONLY person p; - name | name | name ----------------+-------+------------- - advil | mike | posthacking - peet's coffee | mike | posthacking - hightops | joe | basketball - hightops | sally | basketball -(4 rows) - -SELECT (p.hobbies).equipment.name, p.name, name(p.hobbies) FROM person* p; - name | name | name ----------------+-------+------------- - advil | mike | posthacking - peet's coffee | mike | posthacking - hightops | joe | basketball - hightops | sally | basketball - advil | jeff | posthacking - peet's coffee | jeff | posthacking -(6 rows) - -SELECT (p.hobbies).equipment.name, name(p.hobbies), p.name FROM ONLY person p; - name | name | name ----------------+-------------+------- - advil | posthacking | mike - peet's coffee | posthacking | mike - hightops | basketball | joe - hightops | basketball | sally -(4 rows) - -SELECT name(equipment(p.hobbies)), name(p.hobbies), p.name FROM person* p; - name | name | name ----------------+-------------+------- - advil | posthacking | mike - peet's coffee | posthacking | mike - hightops | basketball | joe - hightops | basketball | sally - advil | posthacking | jeff - peet's coffee | posthacking | jeff -(6 rows) - -SELECT name(equipment(hobby_construct(text 'skywalking', text 'mer'))); - name ------- - guts -(1 row) - -SELECT name(equipment(hobby_construct_named(text 'skywalking', text 'mer'))); - name ------- - guts -(1 row) - -SELECT name(equipment_named(hobby_construct_named(text 'skywalking', text 'mer'))); - name ------- - guts -(1 row) - -SELECT name(equipment_named_ambiguous_1a(hobby_construct_named(text 'skywalking', text 'mer'))); - name ------- - guts -(1 row) - -SELECT name(equipment_named_ambiguous_1b(hobby_construct_named(text 'skywalking', text 'mer'))); - name ------- - guts -(1 row) - -SELECT name(equipment_named_ambiguous_1c(hobby_construct_named(text 'skywalking', text 'mer'))); - name ------- - guts -(1 row) - -SELECT name(equipment_named_ambiguous_2a(text 'skywalking')); - name ------- - guts -(1 row) - -SELECT name(equipment_named_ambiguous_2b(text 'skywalking')); - name ---------------- - advil - peet's coffee - hightops - guts -(4 rows) - -SELECT hobbies_by_name('basketball'); - hobbies_by_name ------------------ - joe -(1 row) - -SELECT name, overpaid(emp.*) FROM emp; - name | overpaid ---------+---------- - sharon | t - sam | t - bill | t - jeff | f - cim | f - linda | f -(6 rows) - --- --- Try a few cases with SQL-spec row constructor expressions --- -SELECT * FROM equipment(ROW('skywalking', 'mer')); - name | hobby -------+------------ - guts | skywalking -(1 row) - -SELECT name(equipment(ROW('skywalking', 'mer'))); - name ------- - guts -(1 row) - -SELECT *, name(equipment(h.*)) FROM hobbies_r h; - name | person | name --------------+--------+--------------- - posthacking | mike | advil - posthacking | mike | peet's coffee - posthacking | jeff | advil - posthacking | jeff | peet's coffee - basketball | joe | hightops - basketball | sally | hightops - skywalking | | guts -(7 rows) - -SELECT *, (equipment(CAST((h.*) AS hobbies_r))).name FROM hobbies_r h; - name | person | name --------------+--------+--------------- - posthacking | mike | advil - posthacking | mike | peet's coffee - posthacking | jeff | advil - posthacking | jeff | peet's coffee - basketball | joe | hightops - basketball | sally | hightops - skywalking | | guts -(7 rows) - --- --- functional joins --- --- --- instance rules --- --- --- rewrite rules --- diff --git a/src/test/regress/output/tablespace.source b/src/test/regress/output/tablespace.source deleted file mode 100644 index 864f4b6e208..00000000000 --- a/src/test/regress/output/tablespace.source +++ /dev/null @@ -1,939 +0,0 @@ --- directory paths are passed to us in environment variables -\getenv abs_builddir PG_ABS_BUILDDIR -\set testtablespace :abs_builddir '/testtablespace' --- create a tablespace using WITH clause -CREATE TABLESPACE regress_tblspacewith LOCATION :'testtablespace' WITH (some_nonexistent_parameter = true); -- fail -ERROR: unrecognized parameter "some_nonexistent_parameter" -CREATE TABLESPACE regress_tblspacewith LOCATION :'testtablespace' WITH (random_page_cost = 3.0); -- ok --- check to see the parameter was used -SELECT spcoptions FROM pg_tablespace WHERE spcname = 'regress_tblspacewith'; - spcoptions ------------------------- - {random_page_cost=3.0} -(1 row) - --- drop the tablespace so we can re-use the location -DROP TABLESPACE regress_tblspacewith; --- create a tablespace we can use -CREATE TABLESPACE regress_tblspace LOCATION :'testtablespace'; --- try setting and resetting some properties for the new tablespace -ALTER TABLESPACE regress_tblspace SET (random_page_cost = 1.0, seq_page_cost = 1.1); -ALTER TABLESPACE regress_tblspace SET (some_nonexistent_parameter = true); -- fail -ERROR: unrecognized parameter "some_nonexistent_parameter" -ALTER TABLESPACE regress_tblspace RESET (random_page_cost = 2.0); -- fail -ERROR: RESET must not include values for parameters -ALTER TABLESPACE regress_tblspace RESET (random_page_cost, effective_io_concurrency); -- ok --- REINDEX (TABLESPACE) --- catalogs and system tablespaces --- system catalog, fail -REINDEX (TABLESPACE regress_tblspace) TABLE pg_am; -ERROR: cannot move system relation "pg_am_name_index" -REINDEX (TABLESPACE regress_tblspace) TABLE CONCURRENTLY pg_am; -ERROR: cannot reindex system catalogs concurrently --- shared catalog, fail -REINDEX (TABLESPACE regress_tblspace) TABLE pg_authid; -ERROR: cannot move system relation "pg_authid_rolname_index" -REINDEX (TABLESPACE regress_tblspace) TABLE CONCURRENTLY pg_authid; -ERROR: cannot reindex system catalogs concurrently --- toast relations, fail -REINDEX (TABLESPACE regress_tblspace) INDEX pg_toast.pg_toast_1260_index; -ERROR: cannot move system relation "pg_toast_1260_index" -REINDEX (TABLESPACE regress_tblspace) INDEX CONCURRENTLY pg_toast.pg_toast_1260_index; -ERROR: cannot reindex system catalogs concurrently -REINDEX (TABLESPACE regress_tblspace) TABLE pg_toast.pg_toast_1260; -ERROR: cannot move system relation "pg_toast_1260_index" -REINDEX (TABLESPACE regress_tblspace) TABLE CONCURRENTLY pg_toast.pg_toast_1260; -ERROR: cannot reindex system catalogs concurrently --- system catalog, fail -REINDEX (TABLESPACE pg_global) TABLE pg_authid; -ERROR: cannot move system relation "pg_authid_rolname_index" -REINDEX (TABLESPACE pg_global) TABLE CONCURRENTLY pg_authid; -ERROR: cannot reindex system catalogs concurrently --- table with toast relation -CREATE TABLE regress_tblspace_test_tbl (num1 bigint, num2 double precision, t text); -INSERT INTO regress_tblspace_test_tbl (num1, num2, t) - SELECT round(random()*100), random(), 'text' - FROM generate_series(1, 10) s(i); -CREATE INDEX regress_tblspace_test_tbl_idx ON regress_tblspace_test_tbl (num1); --- move to global tablespace, fail -REINDEX (TABLESPACE pg_global) INDEX regress_tblspace_test_tbl_idx; -ERROR: only shared relations can be placed in pg_global tablespace -REINDEX (TABLESPACE pg_global) INDEX CONCURRENTLY regress_tblspace_test_tbl_idx; -ERROR: cannot move non-shared relation to tablespace "pg_global" --- check transactional behavior of REINDEX (TABLESPACE) -BEGIN; -REINDEX (TABLESPACE regress_tblspace) INDEX regress_tblspace_test_tbl_idx; -REINDEX (TABLESPACE regress_tblspace) TABLE regress_tblspace_test_tbl; -ROLLBACK; --- no relation moved to the new tablespace -SELECT c.relname FROM pg_class c, pg_tablespace s - WHERE c.reltablespace = s.oid AND s.spcname = 'regress_tblspace'; - relname ---------- -(0 rows) - --- check that all indexes are moved to a new tablespace with different --- relfilenode. --- Save first the existing relfilenode for the toast and main relations. -SELECT relfilenode as main_filenode FROM pg_class - WHERE relname = 'regress_tblspace_test_tbl_idx' \gset -SELECT relfilenode as toast_filenode FROM pg_class - WHERE oid = - (SELECT i.indexrelid - FROM pg_class c, - pg_index i - WHERE i.indrelid = c.reltoastrelid AND - c.relname = 'regress_tblspace_test_tbl') \gset -REINDEX (TABLESPACE regress_tblspace) TABLE regress_tblspace_test_tbl; -SELECT c.relname FROM pg_class c, pg_tablespace s - WHERE c.reltablespace = s.oid AND s.spcname = 'regress_tblspace' - ORDER BY c.relname; - relname -------------------------------- - regress_tblspace_test_tbl_idx -(1 row) - -ALTER TABLE regress_tblspace_test_tbl SET TABLESPACE regress_tblspace; -ALTER TABLE regress_tblspace_test_tbl SET TABLESPACE pg_default; -SELECT c.relname FROM pg_class c, pg_tablespace s - WHERE c.reltablespace = s.oid AND s.spcname = 'regress_tblspace' - ORDER BY c.relname; - relname -------------------------------- - regress_tblspace_test_tbl_idx -(1 row) - --- Move back to the default tablespace. -ALTER INDEX regress_tblspace_test_tbl_idx SET TABLESPACE pg_default; -SELECT c.relname FROM pg_class c, pg_tablespace s - WHERE c.reltablespace = s.oid AND s.spcname = 'regress_tblspace' - ORDER BY c.relname; - relname ---------- -(0 rows) - -REINDEX (TABLESPACE regress_tblspace, CONCURRENTLY) TABLE regress_tblspace_test_tbl; -SELECT c.relname FROM pg_class c, pg_tablespace s - WHERE c.reltablespace = s.oid AND s.spcname = 'regress_tblspace' - ORDER BY c.relname; - relname -------------------------------- - regress_tblspace_test_tbl_idx -(1 row) - -SELECT relfilenode = :main_filenode AS main_same FROM pg_class - WHERE relname = 'regress_tblspace_test_tbl_idx'; - main_same ------------ - f -(1 row) - -SELECT relfilenode = :toast_filenode as toast_same FROM pg_class - WHERE oid = - (SELECT i.indexrelid - FROM pg_class c, - pg_index i - WHERE i.indrelid = c.reltoastrelid AND - c.relname = 'regress_tblspace_test_tbl'); - toast_same ------------- - f -(1 row) - -DROP TABLE regress_tblspace_test_tbl; --- REINDEX (TABLESPACE) with partitions --- Create a partition tree and check the set of relations reindexed --- with their new tablespace. -CREATE TABLE tbspace_reindex_part (c1 int, c2 int) PARTITION BY RANGE (c1); -CREATE TABLE tbspace_reindex_part_0 PARTITION OF tbspace_reindex_part - FOR VALUES FROM (0) TO (10) PARTITION BY list (c2); -CREATE TABLE tbspace_reindex_part_0_1 PARTITION OF tbspace_reindex_part_0 - FOR VALUES IN (1); -CREATE TABLE tbspace_reindex_part_0_2 PARTITION OF tbspace_reindex_part_0 - FOR VALUES IN (2); --- This partitioned table will have no partitions. -CREATE TABLE tbspace_reindex_part_10 PARTITION OF tbspace_reindex_part - FOR VALUES FROM (10) TO (20) PARTITION BY list (c2); --- Create some partitioned indexes -CREATE INDEX tbspace_reindex_part_index ON ONLY tbspace_reindex_part (c1); -CREATE INDEX tbspace_reindex_part_index_0 ON ONLY tbspace_reindex_part_0 (c1); -ALTER INDEX tbspace_reindex_part_index ATTACH PARTITION tbspace_reindex_part_index_0; --- This partitioned index will have no partitions. -CREATE INDEX tbspace_reindex_part_index_10 ON ONLY tbspace_reindex_part_10 (c1); -ALTER INDEX tbspace_reindex_part_index ATTACH PARTITION tbspace_reindex_part_index_10; -CREATE INDEX tbspace_reindex_part_index_0_1 ON ONLY tbspace_reindex_part_0_1 (c1); -ALTER INDEX tbspace_reindex_part_index_0 ATTACH PARTITION tbspace_reindex_part_index_0_1; -CREATE INDEX tbspace_reindex_part_index_0_2 ON ONLY tbspace_reindex_part_0_2 (c1); -ALTER INDEX tbspace_reindex_part_index_0 ATTACH PARTITION tbspace_reindex_part_index_0_2; -SELECT relid, parentrelid, level FROM pg_partition_tree('tbspace_reindex_part_index') - ORDER BY relid, level; - relid | parentrelid | level ---------------------------------+------------------------------+------- - tbspace_reindex_part_index | | 0 - tbspace_reindex_part_index_0 | tbspace_reindex_part_index | 1 - tbspace_reindex_part_index_10 | tbspace_reindex_part_index | 1 - tbspace_reindex_part_index_0_1 | tbspace_reindex_part_index_0 | 2 - tbspace_reindex_part_index_0_2 | tbspace_reindex_part_index_0 | 2 -(5 rows) - --- Track the original tablespace, relfilenode and OID of each index --- in the tree. -CREATE TEMP TABLE reindex_temp_before AS - SELECT oid, relname, relfilenode, reltablespace - FROM pg_class - WHERE relname ~ 'tbspace_reindex_part_index'; -REINDEX (TABLESPACE regress_tblspace, CONCURRENTLY) TABLE tbspace_reindex_part; --- REINDEX CONCURRENTLY changes the OID of the old relation, hence a check --- based on the relation name below. -SELECT b.relname, - CASE WHEN a.relfilenode = b.relfilenode THEN 'relfilenode is unchanged' - ELSE 'relfilenode has changed' END AS filenode, - CASE WHEN a.reltablespace = b.reltablespace THEN 'reltablespace is unchanged' - ELSE 'reltablespace has changed' END AS tbspace - FROM reindex_temp_before b JOIN pg_class a ON b.relname = a.relname - ORDER BY 1; - relname | filenode | tbspace ---------------------------------+--------------------------+---------------------------- - tbspace_reindex_part_index | relfilenode is unchanged | reltablespace is unchanged - tbspace_reindex_part_index_0 | relfilenode is unchanged | reltablespace is unchanged - tbspace_reindex_part_index_0_1 | relfilenode has changed | reltablespace has changed - tbspace_reindex_part_index_0_2 | relfilenode has changed | reltablespace has changed - tbspace_reindex_part_index_10 | relfilenode is unchanged | reltablespace is unchanged -(5 rows) - -DROP TABLE tbspace_reindex_part; --- create a schema we can use -CREATE SCHEMA testschema; --- try a table -CREATE TABLE testschema.foo (i int) TABLESPACE regress_tblspace; -SELECT relname, spcname FROM pg_catalog.pg_tablespace t, pg_catalog.pg_class c - where c.reltablespace = t.oid AND c.relname = 'foo'; - relname | spcname ----------+------------------ - foo | regress_tblspace -(1 row) - -INSERT INTO testschema.foo VALUES(1); -INSERT INTO testschema.foo VALUES(2); --- tables from dynamic sources -CREATE TABLE testschema.asselect TABLESPACE regress_tblspace AS SELECT 1; -SELECT relname, spcname FROM pg_catalog.pg_tablespace t, pg_catalog.pg_class c - where c.reltablespace = t.oid AND c.relname = 'asselect'; - relname | spcname -----------+------------------ - asselect | regress_tblspace -(1 row) - -PREPARE selectsource(int) AS SELECT $1; -CREATE TABLE testschema.asexecute TABLESPACE regress_tblspace - AS EXECUTE selectsource(2); -SELECT relname, spcname FROM pg_catalog.pg_tablespace t, pg_catalog.pg_class c - where c.reltablespace = t.oid AND c.relname = 'asexecute'; - relname | spcname ------------+------------------ - asexecute | regress_tblspace -(1 row) - --- index -CREATE INDEX foo_idx on testschema.foo(i) TABLESPACE regress_tblspace; -SELECT relname, spcname FROM pg_catalog.pg_tablespace t, pg_catalog.pg_class c - where c.reltablespace = t.oid AND c.relname = 'foo_idx'; - relname | spcname ----------+------------------ - foo_idx | regress_tblspace -(1 row) - --- check \d output -\d testschema.foo - Table "testschema.foo" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - i | integer | | | -Indexes: - "foo_idx" btree (i), tablespace "regress_tblspace" -Tablespace: "regress_tblspace" - -\d testschema.foo_idx - Index "testschema.foo_idx" - Column | Type | Key? | Definition ---------+---------+------+------------ - i | integer | yes | i -btree, for table "testschema.foo" -Tablespace: "regress_tblspace" - --- --- partitioned table --- -CREATE TABLE testschema.part (a int) PARTITION BY LIST (a); -SET default_tablespace TO pg_global; -CREATE TABLE testschema.part_1 PARTITION OF testschema.part FOR VALUES IN (1); -ERROR: only shared relations can be placed in pg_global tablespace -RESET default_tablespace; -CREATE TABLE testschema.part_1 PARTITION OF testschema.part FOR VALUES IN (1); -SET default_tablespace TO regress_tblspace; -CREATE TABLE testschema.part_2 PARTITION OF testschema.part FOR VALUES IN (2); -SET default_tablespace TO pg_global; -CREATE TABLE testschema.part_3 PARTITION OF testschema.part FOR VALUES IN (3); -ERROR: only shared relations can be placed in pg_global tablespace -ALTER TABLE testschema.part SET TABLESPACE regress_tblspace; -CREATE TABLE testschema.part_3 PARTITION OF testschema.part FOR VALUES IN (3); -CREATE TABLE testschema.part_4 PARTITION OF testschema.part FOR VALUES IN (4) - TABLESPACE pg_default; -CREATE TABLE testschema.part_56 PARTITION OF testschema.part FOR VALUES IN (5, 6) - PARTITION BY LIST (a); -ALTER TABLE testschema.part SET TABLESPACE pg_default; -CREATE TABLE testschema.part_78 PARTITION OF testschema.part FOR VALUES IN (7, 8) - PARTITION BY LIST (a); -ERROR: only shared relations can be placed in pg_global tablespace -CREATE TABLE testschema.part_910 PARTITION OF testschema.part FOR VALUES IN (9, 10) - PARTITION BY LIST (a) TABLESPACE regress_tblspace; -RESET default_tablespace; -CREATE TABLE testschema.part_78 PARTITION OF testschema.part FOR VALUES IN (7, 8) - PARTITION BY LIST (a); -SELECT relname, spcname FROM pg_catalog.pg_class c - JOIN pg_catalog.pg_namespace n ON (c.relnamespace = n.oid) - LEFT JOIN pg_catalog.pg_tablespace t ON c.reltablespace = t.oid - where c.relname LIKE 'part%' AND n.nspname = 'testschema' order by relname; - relname | spcname -----------+------------------ - part | - part_1 | - part_2 | regress_tblspace - part_3 | regress_tblspace - part_4 | - part_56 | regress_tblspace - part_78 | - part_910 | regress_tblspace -(8 rows) - -RESET default_tablespace; -DROP TABLE testschema.part; --- partitioned index -CREATE TABLE testschema.part (a int) PARTITION BY LIST (a); -CREATE TABLE testschema.part1 PARTITION OF testschema.part FOR VALUES IN (1); -CREATE INDEX part_a_idx ON testschema.part (a) TABLESPACE regress_tblspace; -CREATE TABLE testschema.part2 PARTITION OF testschema.part FOR VALUES IN (2); -SELECT relname, spcname FROM pg_catalog.pg_tablespace t, pg_catalog.pg_class c - where c.reltablespace = t.oid AND c.relname LIKE 'part%_idx'; - relname | spcname --------------+------------------ - part1_a_idx | regress_tblspace - part2_a_idx | regress_tblspace - part_a_idx | regress_tblspace -(3 rows) - -\d testschema.part - Partitioned table "testschema.part" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | -Partition key: LIST (a) -Indexes: - "part_a_idx" btree (a), tablespace "regress_tblspace" -Number of partitions: 2 (Use \d+ to list them.) - -\d+ testschema.part - Partitioned table "testschema.part" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+---------+--------------+------------- - a | integer | | | | plain | | -Partition key: LIST (a) -Indexes: - "part_a_idx" btree (a), tablespace "regress_tblspace" -Partitions: testschema.part1 FOR VALUES IN (1), - testschema.part2 FOR VALUES IN (2) - -\d testschema.part1 - Table "testschema.part1" - Column | Type | Collation | Nullable | Default ---------+---------+-----------+----------+--------- - a | integer | | | -Partition of: testschema.part FOR VALUES IN (1) -Indexes: - "part1_a_idx" btree (a), tablespace "regress_tblspace" - -\d+ testschema.part1 - Table "testschema.part1" - Column | Type | Collation | Nullable | Default | Storage | Stats target | Description ---------+---------+-----------+----------+---------+---------+--------------+------------- - a | integer | | | | plain | | -Partition of: testschema.part FOR VALUES IN (1) -Partition constraint: ((a IS NOT NULL) AND (a = 1)) -Indexes: - "part1_a_idx" btree (a), tablespace "regress_tblspace" - -\d testschema.part_a_idx -Partitioned index "testschema.part_a_idx" - Column | Type | Key? | Definition ---------+---------+------+------------ - a | integer | yes | a -btree, for table "testschema.part" -Number of partitions: 2 (Use \d+ to list them.) -Tablespace: "regress_tblspace" - -\d+ testschema.part_a_idx - Partitioned index "testschema.part_a_idx" - Column | Type | Key? | Definition | Storage | Stats target ---------+---------+------+------------+---------+-------------- - a | integer | yes | a | plain | -btree, for table "testschema.part" -Partitions: testschema.part1_a_idx, - testschema.part2_a_idx -Tablespace: "regress_tblspace" - --- partitioned rels cannot specify the default tablespace. These fail: -CREATE TABLE testschema.dflt (a int PRIMARY KEY) PARTITION BY LIST (a) TABLESPACE pg_default; -ERROR: cannot specify default tablespace for partitioned relations -CREATE TABLE testschema.dflt (a int PRIMARY KEY USING INDEX TABLESPACE pg_default) PARTITION BY LIST (a); -ERROR: cannot specify default tablespace for partitioned relations -SET default_tablespace TO 'pg_default'; -CREATE TABLE testschema.dflt (a int PRIMARY KEY) PARTITION BY LIST (a) TABLESPACE regress_tblspace; -ERROR: cannot specify default tablespace for partitioned relations -CREATE TABLE testschema.dflt (a int PRIMARY KEY USING INDEX TABLESPACE regress_tblspace) PARTITION BY LIST (a); -ERROR: cannot specify default tablespace for partitioned relations --- but these work: -CREATE TABLE testschema.dflt (a int PRIMARY KEY USING INDEX TABLESPACE regress_tblspace) PARTITION BY LIST (a) TABLESPACE regress_tblspace; -SET default_tablespace TO ''; -CREATE TABLE testschema.dflt2 (a int PRIMARY KEY) PARTITION BY LIST (a); -DROP TABLE testschema.dflt, testschema.dflt2; --- check that default_tablespace doesn't affect ALTER TABLE index rebuilds -CREATE TABLE testschema.test_default_tab(id bigint) TABLESPACE regress_tblspace; -INSERT INTO testschema.test_default_tab VALUES (1); -CREATE INDEX test_index1 on testschema.test_default_tab (id); -CREATE INDEX test_index2 on testschema.test_default_tab (id) TABLESPACE regress_tblspace; -ALTER TABLE testschema.test_default_tab ADD CONSTRAINT test_index3 PRIMARY KEY (id); -ALTER TABLE testschema.test_default_tab ADD CONSTRAINT test_index4 UNIQUE (id) USING INDEX TABLESPACE regress_tblspace; -\d testschema.test_index1 - Index "testschema.test_index1" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -btree, for table "testschema.test_default_tab" - -\d testschema.test_index2 - Index "testschema.test_index2" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -btree, for table "testschema.test_default_tab" -Tablespace: "regress_tblspace" - -\d testschema.test_index3 - Index "testschema.test_index3" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -primary key, btree, for table "testschema.test_default_tab" - -\d testschema.test_index4 - Index "testschema.test_index4" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -unique, btree, for table "testschema.test_default_tab" -Tablespace: "regress_tblspace" - --- use a custom tablespace for default_tablespace -SET default_tablespace TO regress_tblspace; --- tablespace should not change if no rewrite -ALTER TABLE testschema.test_default_tab ALTER id TYPE bigint; -\d testschema.test_index1 - Index "testschema.test_index1" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -btree, for table "testschema.test_default_tab" - -\d testschema.test_index2 - Index "testschema.test_index2" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -btree, for table "testschema.test_default_tab" -Tablespace: "regress_tblspace" - -\d testschema.test_index3 - Index "testschema.test_index3" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -primary key, btree, for table "testschema.test_default_tab" - -\d testschema.test_index4 - Index "testschema.test_index4" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -unique, btree, for table "testschema.test_default_tab" -Tablespace: "regress_tblspace" - -SELECT * FROM testschema.test_default_tab; - id ----- - 1 -(1 row) - --- tablespace should not change even if there is an index rewrite -ALTER TABLE testschema.test_default_tab ALTER id TYPE int; -\d testschema.test_index1 - Index "testschema.test_index1" - Column | Type | Key? | Definition ---------+---------+------+------------ - id | integer | yes | id -btree, for table "testschema.test_default_tab" - -\d testschema.test_index2 - Index "testschema.test_index2" - Column | Type | Key? | Definition ---------+---------+------+------------ - id | integer | yes | id -btree, for table "testschema.test_default_tab" -Tablespace: "regress_tblspace" - -\d testschema.test_index3 - Index "testschema.test_index3" - Column | Type | Key? | Definition ---------+---------+------+------------ - id | integer | yes | id -primary key, btree, for table "testschema.test_default_tab" - -\d testschema.test_index4 - Index "testschema.test_index4" - Column | Type | Key? | Definition ---------+---------+------+------------ - id | integer | yes | id -unique, btree, for table "testschema.test_default_tab" -Tablespace: "regress_tblspace" - -SELECT * FROM testschema.test_default_tab; - id ----- - 1 -(1 row) - --- now use the default tablespace for default_tablespace -SET default_tablespace TO ''; --- tablespace should not change if no rewrite -ALTER TABLE testschema.test_default_tab ALTER id TYPE int; -\d testschema.test_index1 - Index "testschema.test_index1" - Column | Type | Key? | Definition ---------+---------+------+------------ - id | integer | yes | id -btree, for table "testschema.test_default_tab" - -\d testschema.test_index2 - Index "testschema.test_index2" - Column | Type | Key? | Definition ---------+---------+------+------------ - id | integer | yes | id -btree, for table "testschema.test_default_tab" -Tablespace: "regress_tblspace" - -\d testschema.test_index3 - Index "testschema.test_index3" - Column | Type | Key? | Definition ---------+---------+------+------------ - id | integer | yes | id -primary key, btree, for table "testschema.test_default_tab" - -\d testschema.test_index4 - Index "testschema.test_index4" - Column | Type | Key? | Definition ---------+---------+------+------------ - id | integer | yes | id -unique, btree, for table "testschema.test_default_tab" -Tablespace: "regress_tblspace" - --- tablespace should not change even if there is an index rewrite -ALTER TABLE testschema.test_default_tab ALTER id TYPE bigint; -\d testschema.test_index1 - Index "testschema.test_index1" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -btree, for table "testschema.test_default_tab" - -\d testschema.test_index2 - Index "testschema.test_index2" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -btree, for table "testschema.test_default_tab" -Tablespace: "regress_tblspace" - -\d testschema.test_index3 - Index "testschema.test_index3" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -primary key, btree, for table "testschema.test_default_tab" - -\d testschema.test_index4 - Index "testschema.test_index4" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -unique, btree, for table "testschema.test_default_tab" -Tablespace: "regress_tblspace" - -DROP TABLE testschema.test_default_tab; --- check that default_tablespace doesn't affect ALTER TABLE index rebuilds --- (this time with a partitioned table) -CREATE TABLE testschema.test_default_tab_p(id bigint, val bigint) - PARTITION BY LIST (id) TABLESPACE regress_tblspace; -CREATE TABLE testschema.test_default_tab_p1 PARTITION OF testschema.test_default_tab_p - FOR VALUES IN (1); -INSERT INTO testschema.test_default_tab_p VALUES (1); -CREATE INDEX test_index1 on testschema.test_default_tab_p (val); -CREATE INDEX test_index2 on testschema.test_default_tab_p (val) TABLESPACE regress_tblspace; -ALTER TABLE testschema.test_default_tab_p ADD CONSTRAINT test_index3 PRIMARY KEY (id); -ALTER TABLE testschema.test_default_tab_p ADD CONSTRAINT test_index4 UNIQUE (id) USING INDEX TABLESPACE regress_tblspace; -\d testschema.test_index1 -Partitioned index "testschema.test_index1" - Column | Type | Key? | Definition ---------+--------+------+------------ - val | bigint | yes | val -btree, for table "testschema.test_default_tab_p" -Number of partitions: 1 (Use \d+ to list them.) - -\d testschema.test_index2 -Partitioned index "testschema.test_index2" - Column | Type | Key? | Definition ---------+--------+------+------------ - val | bigint | yes | val -btree, for table "testschema.test_default_tab_p" -Number of partitions: 1 (Use \d+ to list them.) -Tablespace: "regress_tblspace" - -\d testschema.test_index3 -Partitioned index "testschema.test_index3" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -primary key, btree, for table "testschema.test_default_tab_p" -Number of partitions: 1 (Use \d+ to list them.) - -\d testschema.test_index4 -Partitioned index "testschema.test_index4" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -unique, btree, for table "testschema.test_default_tab_p" -Number of partitions: 1 (Use \d+ to list them.) -Tablespace: "regress_tblspace" - --- use a custom tablespace for default_tablespace -SET default_tablespace TO regress_tblspace; --- tablespace should not change if no rewrite -ALTER TABLE testschema.test_default_tab_p ALTER val TYPE bigint; -\d testschema.test_index1 -Partitioned index "testschema.test_index1" - Column | Type | Key? | Definition ---------+--------+------+------------ - val | bigint | yes | val -btree, for table "testschema.test_default_tab_p" -Number of partitions: 1 (Use \d+ to list them.) - -\d testschema.test_index2 -Partitioned index "testschema.test_index2" - Column | Type | Key? | Definition ---------+--------+------+------------ - val | bigint | yes | val -btree, for table "testschema.test_default_tab_p" -Number of partitions: 1 (Use \d+ to list them.) -Tablespace: "regress_tblspace" - -\d testschema.test_index3 -Partitioned index "testschema.test_index3" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -primary key, btree, for table "testschema.test_default_tab_p" -Number of partitions: 1 (Use \d+ to list them.) - -\d testschema.test_index4 -Partitioned index "testschema.test_index4" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -unique, btree, for table "testschema.test_default_tab_p" -Number of partitions: 1 (Use \d+ to list them.) -Tablespace: "regress_tblspace" - -SELECT * FROM testschema.test_default_tab_p; - id | val -----+----- - 1 | -(1 row) - --- tablespace should not change even if there is an index rewrite -ALTER TABLE testschema.test_default_tab_p ALTER val TYPE int; -\d testschema.test_index1 -Partitioned index "testschema.test_index1" - Column | Type | Key? | Definition ---------+---------+------+------------ - val | integer | yes | val -btree, for table "testschema.test_default_tab_p" -Number of partitions: 1 (Use \d+ to list them.) - -\d testschema.test_index2 -Partitioned index "testschema.test_index2" - Column | Type | Key? | Definition ---------+---------+------+------------ - val | integer | yes | val -btree, for table "testschema.test_default_tab_p" -Number of partitions: 1 (Use \d+ to list them.) -Tablespace: "regress_tblspace" - -\d testschema.test_index3 -Partitioned index "testschema.test_index3" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -primary key, btree, for table "testschema.test_default_tab_p" -Number of partitions: 1 (Use \d+ to list them.) - -\d testschema.test_index4 -Partitioned index "testschema.test_index4" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -unique, btree, for table "testschema.test_default_tab_p" -Number of partitions: 1 (Use \d+ to list them.) -Tablespace: "regress_tblspace" - -SELECT * FROM testschema.test_default_tab_p; - id | val -----+----- - 1 | -(1 row) - --- now use the default tablespace for default_tablespace -SET default_tablespace TO ''; --- tablespace should not change if no rewrite -ALTER TABLE testschema.test_default_tab_p ALTER val TYPE int; -\d testschema.test_index1 -Partitioned index "testschema.test_index1" - Column | Type | Key? | Definition ---------+---------+------+------------ - val | integer | yes | val -btree, for table "testschema.test_default_tab_p" -Number of partitions: 1 (Use \d+ to list them.) - -\d testschema.test_index2 -Partitioned index "testschema.test_index2" - Column | Type | Key? | Definition ---------+---------+------+------------ - val | integer | yes | val -btree, for table "testschema.test_default_tab_p" -Number of partitions: 1 (Use \d+ to list them.) -Tablespace: "regress_tblspace" - -\d testschema.test_index3 -Partitioned index "testschema.test_index3" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -primary key, btree, for table "testschema.test_default_tab_p" -Number of partitions: 1 (Use \d+ to list them.) - -\d testschema.test_index4 -Partitioned index "testschema.test_index4" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -unique, btree, for table "testschema.test_default_tab_p" -Number of partitions: 1 (Use \d+ to list them.) -Tablespace: "regress_tblspace" - --- tablespace should not change even if there is an index rewrite -ALTER TABLE testschema.test_default_tab_p ALTER val TYPE bigint; -\d testschema.test_index1 -Partitioned index "testschema.test_index1" - Column | Type | Key? | Definition ---------+--------+------+------------ - val | bigint | yes | val -btree, for table "testschema.test_default_tab_p" -Number of partitions: 1 (Use \d+ to list them.) - -\d testschema.test_index2 -Partitioned index "testschema.test_index2" - Column | Type | Key? | Definition ---------+--------+------+------------ - val | bigint | yes | val -btree, for table "testschema.test_default_tab_p" -Number of partitions: 1 (Use \d+ to list them.) -Tablespace: "regress_tblspace" - -\d testschema.test_index3 -Partitioned index "testschema.test_index3" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -primary key, btree, for table "testschema.test_default_tab_p" -Number of partitions: 1 (Use \d+ to list them.) - -\d testschema.test_index4 -Partitioned index "testschema.test_index4" - Column | Type | Key? | Definition ---------+--------+------+------------ - id | bigint | yes | id -unique, btree, for table "testschema.test_default_tab_p" -Number of partitions: 1 (Use \d+ to list them.) -Tablespace: "regress_tblspace" - -DROP TABLE testschema.test_default_tab_p; --- check that default_tablespace affects index additions in ALTER TABLE -CREATE TABLE testschema.test_tab(id int) TABLESPACE regress_tblspace; -INSERT INTO testschema.test_tab VALUES (1); -SET default_tablespace TO regress_tblspace; -ALTER TABLE testschema.test_tab ADD CONSTRAINT test_tab_unique UNIQUE (id); -SET default_tablespace TO ''; -ALTER TABLE testschema.test_tab ADD CONSTRAINT test_tab_pkey PRIMARY KEY (id); -\d testschema.test_tab_unique - Index "testschema.test_tab_unique" - Column | Type | Key? | Definition ---------+---------+------+------------ - id | integer | yes | id -unique, btree, for table "testschema.test_tab" -Tablespace: "regress_tblspace" - -\d testschema.test_tab_pkey - Index "testschema.test_tab_pkey" - Column | Type | Key? | Definition ---------+---------+------+------------ - id | integer | yes | id -primary key, btree, for table "testschema.test_tab" - -SELECT * FROM testschema.test_tab; - id ----- - 1 -(1 row) - -DROP TABLE testschema.test_tab; --- check that default_tablespace is handled correctly by multi-command --- ALTER TABLE that includes a tablespace-preserving rewrite -CREATE TABLE testschema.test_tab(a int, b int, c int); -SET default_tablespace TO regress_tblspace; -ALTER TABLE testschema.test_tab ADD CONSTRAINT test_tab_unique UNIQUE (a); -CREATE INDEX test_tab_a_idx ON testschema.test_tab (a); -SET default_tablespace TO ''; -CREATE INDEX test_tab_b_idx ON testschema.test_tab (b); -\d testschema.test_tab_unique - Index "testschema.test_tab_unique" - Column | Type | Key? | Definition ---------+---------+------+------------ - a | integer | yes | a -unique, btree, for table "testschema.test_tab" -Tablespace: "regress_tblspace" - -\d testschema.test_tab_a_idx - Index "testschema.test_tab_a_idx" - Column | Type | Key? | Definition ---------+---------+------+------------ - a | integer | yes | a -btree, for table "testschema.test_tab" -Tablespace: "regress_tblspace" - -\d testschema.test_tab_b_idx - Index "testschema.test_tab_b_idx" - Column | Type | Key? | Definition ---------+---------+------+------------ - b | integer | yes | b -btree, for table "testschema.test_tab" - -ALTER TABLE testschema.test_tab ALTER b TYPE bigint, ADD UNIQUE (c); -\d testschema.test_tab_unique - Index "testschema.test_tab_unique" - Column | Type | Key? | Definition ---------+---------+------+------------ - a | integer | yes | a -unique, btree, for table "testschema.test_tab" -Tablespace: "regress_tblspace" - -\d testschema.test_tab_a_idx - Index "testschema.test_tab_a_idx" - Column | Type | Key? | Definition ---------+---------+------+------------ - a | integer | yes | a -btree, for table "testschema.test_tab" -Tablespace: "regress_tblspace" - -\d testschema.test_tab_b_idx - Index "testschema.test_tab_b_idx" - Column | Type | Key? | Definition ---------+--------+------+------------ - b | bigint | yes | b -btree, for table "testschema.test_tab" - -DROP TABLE testschema.test_tab; --- let's try moving a table from one place to another -CREATE TABLE testschema.atable AS VALUES (1), (2); -CREATE UNIQUE INDEX anindex ON testschema.atable(column1); -ALTER TABLE testschema.atable SET TABLESPACE regress_tblspace; -ALTER INDEX testschema.anindex SET TABLESPACE regress_tblspace; -ALTER INDEX testschema.part_a_idx SET TABLESPACE pg_global; -ERROR: only shared relations can be placed in pg_global tablespace -ALTER INDEX testschema.part_a_idx SET TABLESPACE pg_default; -ALTER INDEX testschema.part_a_idx SET TABLESPACE regress_tblspace; -INSERT INTO testschema.atable VALUES(3); -- ok -INSERT INTO testschema.atable VALUES(1); -- fail (checks index) -ERROR: duplicate key value violates unique constraint "anindex" -DETAIL: Key (column1)=(1) already exists. -SELECT COUNT(*) FROM testschema.atable; -- checks heap - count -------- - 3 -(1 row) - --- Will fail with bad path -CREATE TABLESPACE regress_badspace LOCATION '/no/such/location'; -ERROR: directory "/no/such/location" does not exist --- No such tablespace -CREATE TABLE bar (i int) TABLESPACE regress_nosuchspace; -ERROR: tablespace "regress_nosuchspace" does not exist --- Fail, in use for some partitioned object -DROP TABLESPACE regress_tblspace; -ERROR: tablespace "regress_tblspace" cannot be dropped because some objects depend on it -DETAIL: tablespace for index testschema.part_a_idx -ALTER INDEX testschema.part_a_idx SET TABLESPACE pg_default; --- Fail, not empty -DROP TABLESPACE regress_tblspace; -ERROR: tablespace "regress_tblspace" is not empty -CREATE ROLE regress_tablespace_user1 login; -CREATE ROLE regress_tablespace_user2 login; -GRANT USAGE ON SCHEMA testschema TO regress_tablespace_user2; -ALTER TABLESPACE regress_tblspace OWNER TO regress_tablespace_user1; -CREATE TABLE testschema.tablespace_acl (c int); --- new owner lacks permission to create this index from scratch -CREATE INDEX k ON testschema.tablespace_acl (c) TABLESPACE regress_tblspace; -ALTER TABLE testschema.tablespace_acl OWNER TO regress_tablespace_user2; -SET SESSION ROLE regress_tablespace_user2; -CREATE TABLE tablespace_table (i int) TABLESPACE regress_tblspace; -- fail -ERROR: permission denied for tablespace regress_tblspace -ALTER TABLE testschema.tablespace_acl ALTER c TYPE bigint; -REINDEX (TABLESPACE regress_tblspace) TABLE tablespace_table; -- fail -ERROR: permission denied for tablespace regress_tblspace -REINDEX (TABLESPACE regress_tblspace, CONCURRENTLY) TABLE tablespace_table; -- fail -ERROR: permission denied for tablespace regress_tblspace -RESET ROLE; -ALTER TABLESPACE regress_tblspace RENAME TO regress_tblspace_renamed; -ALTER TABLE ALL IN TABLESPACE regress_tblspace_renamed SET TABLESPACE pg_default; -ALTER INDEX ALL IN TABLESPACE regress_tblspace_renamed SET TABLESPACE pg_default; --- Should show notice that nothing was done -ALTER TABLE ALL IN TABLESPACE regress_tblspace_renamed SET TABLESPACE pg_default; -NOTICE: no matching relations in tablespace "regress_tblspace_renamed" found --- Should succeed -DROP TABLESPACE regress_tblspace_renamed; -DROP SCHEMA testschema CASCADE; -NOTICE: drop cascades to 6 other objects -DETAIL: drop cascades to table testschema.foo -drop cascades to table testschema.asselect -drop cascades to table testschema.asexecute -drop cascades to table testschema.part -drop cascades to table testschema.atable -drop cascades to table testschema.tablespace_acl -DROP ROLE regress_tablespace_user1; -DROP ROLE regress_tablespace_user2; diff --git a/src/test/regress/pg_regress.c b/src/test/regress/pg_regress.c index c2fcff55bfb..589357ba59c 100644 --- a/src/test/regress/pg_regress.c +++ b/src/test/regress/pg_regress.c @@ -438,155 +438,6 @@ string_matches_pattern(const char *str, const char *pattern) return false; } -/* - * Replace all occurrences of "replace" in "string" with "replacement". - * The StringInfo will be suitably enlarged if necessary. - * - * Note: this is optimized on the assumption that most calls will find - * no more than one occurrence of "replace", and quite likely none. - */ -void -replace_string(StringInfo string, const char *replace, const char *replacement) -{ - int pos = 0; - char *ptr; - - while ((ptr = strstr(string->data + pos, replace)) != NULL) - { - /* Must copy the remainder of the string out of the StringInfo */ - char *suffix = pg_strdup(ptr + strlen(replace)); - - /* Truncate StringInfo at start of found string ... */ - string->len = ptr - string->data; - /* ... and append the replacement (this restores the trailing '\0') */ - appendStringInfoString(string, replacement); - /* Next search should start after the replacement */ - pos = string->len; - /* Put back the remainder of the string */ - appendStringInfoString(string, suffix); - free(suffix); - } -} - -/* - * Convert *.source found in the "source" directory, replacing certain tokens - * in the file contents with their intended values, and put the resulting files - * in the "dest" directory, replacing the ".source" prefix in their names with - * the given suffix. - */ -static void -convert_sourcefiles_in(const char *source_subdir, const char *dest_dir, const char *dest_subdir, const char *suffix) -{ - char testtablespace[MAXPGPATH]; - char indir[MAXPGPATH]; - char outdir_sub[MAXPGPATH]; - char **name; - char **names; - int count = 0; - - snprintf(indir, MAXPGPATH, "%s/%s", inputdir, source_subdir); - - /* Check that indir actually exists and is a directory */ - if (!directory_exists(indir)) - { - /* - * No warning, to avoid noise in tests that do not have these - * directories; for example, ecpg, contrib and src/pl. - */ - return; - } - - names = pgfnames(indir); - if (!names) - /* Error logged in pgfnames */ - exit(2); - - /* Create the "dest" subdirectory if not present */ - snprintf(outdir_sub, MAXPGPATH, "%s/%s", dest_dir, dest_subdir); - if (!directory_exists(outdir_sub)) - make_directory(outdir_sub); - - /* We might need to replace @testtablespace@ */ - snprintf(testtablespace, MAXPGPATH, "%s/testtablespace", outputdir); - - /* finally loop on each file and do the replacement */ - for (name = names; *name; name++) - { - char srcfile[MAXPGPATH]; - char destfile[MAXPGPATH]; - char prefix[MAXPGPATH]; - FILE *infile, - *outfile; - StringInfoData line; - - /* reject filenames not finishing in ".source" */ - if (strlen(*name) < 8) - continue; - if (strcmp(*name + strlen(*name) - 7, ".source") != 0) - continue; - - count++; - - /* build the full actual paths to open */ - snprintf(prefix, strlen(*name) - 6, "%s", *name); - snprintf(srcfile, MAXPGPATH, "%s/%s", indir, *name); - snprintf(destfile, MAXPGPATH, "%s/%s/%s.%s", dest_dir, dest_subdir, - prefix, suffix); - - infile = fopen(srcfile, "r"); - if (!infile) - { - fprintf(stderr, _("%s: could not open file \"%s\" for reading: %s\n"), - progname, srcfile, strerror(errno)); - exit(2); - } - outfile = fopen(destfile, "w"); - if (!outfile) - { - fprintf(stderr, _("%s: could not open file \"%s\" for writing: %s\n"), - progname, destfile, strerror(errno)); - exit(2); - } - - initStringInfo(&line); - - while (pg_get_line_buf(infile, &line)) - { - replace_string(&line, "@abs_srcdir@", inputdir); - replace_string(&line, "@abs_builddir@", outputdir); - replace_string(&line, "@testtablespace@", testtablespace); - replace_string(&line, "@libdir@", dlpath); - replace_string(&line, "@DLSUFFIX@", DLSUFFIX); - fputs(line.data, outfile); - } - - pfree(line.data); - fclose(infile); - fclose(outfile); - } - - /* - * If we didn't process any files, complain because it probably means - * somebody neglected to pass the needed --inputdir argument. - */ - if (count <= 0) - { - fprintf(stderr, _("%s: no *.source files found in \"%s\"\n"), - progname, indir); - exit(2); - } - - pgfnames_cleanup(names); -} - -/* Create the .sql and .out files from the .source files, if any */ -static void -convert_sourcefiles(void) -{ - convert_sourcefiles_in("input", outputdir, "sql", "sql"); - convert_sourcefiles_in("output", outputdir, "expected", "out"); -} - /* * Clean out the test tablespace dir, or create it if it doesn't exist. * @@ -936,7 +787,6 @@ initialize_environment(void) printf(_("(using postmaster on Unix socket, default port)\n")); } - convert_sourcefiles(); load_resultmap(); } diff --git a/src/test/regress/pg_regress.h b/src/test/regress/pg_regress.h index c6d015c8402..ad91dfb8587 100644 --- a/src/test/regress/pg_regress.h +++ b/src/test/regress/pg_regress.h @@ -65,6 +65,4 @@ int regression_main(int argc, char *argv[], void add_stringlist_item(_stringlist **listhead, const char *str); PID_TYPE spawn_process(const char *cmdline); -void replace_string(struct StringInfoData *string, - const char *replace, const char *replacement); bool file_exists(const char *file); diff --git a/src/test/regress/sql/.gitignore b/src/test/regress/sql/.gitignore deleted file mode 100644 index fe14af6ae7a..00000000000 --- a/src/test/regress/sql/.gitignore +++ /dev/null @@ -1,9 +0,0 @@ -/constraints.sql -/copy.sql -/create_function_0.sql -/create_function_1.sql -/create_function_2.sql -/largeobject.sql -/misc.sql -/security_label.sql -/tablespace.sql diff --git a/src/test/regress/sql/constraints.sql b/src/test/regress/sql/constraints.sql new file mode 100644 index 00000000000..458f8057785 --- /dev/null +++ b/src/test/regress/sql/constraints.sql @@ -0,0 +1,559 @@ +-- +-- CONSTRAINTS +-- Constraints can be specified with: +-- - DEFAULT clause +-- - CHECK clauses +-- - PRIMARY KEY clauses +-- - UNIQUE clauses +-- - EXCLUDE clauses +-- + +-- directory paths are passed to us in environment variables +\getenv abs_srcdir PG_ABS_SRCDIR + +-- +-- DEFAULT syntax +-- + +CREATE TABLE DEFAULT_TBL (i int DEFAULT 100, + x text DEFAULT 'vadim', f float8 DEFAULT 123.456); + +INSERT INTO DEFAULT_TBL VALUES (1, 'thomas', 57.0613); +INSERT INTO DEFAULT_TBL VALUES (1, 'bruce'); +INSERT INTO DEFAULT_TBL (i, f) VALUES (2, 987.654); +INSERT INTO DEFAULT_TBL (x) VALUES ('marc'); +INSERT INTO DEFAULT_TBL VALUES (3, null, 1.0); + +SELECT * FROM DEFAULT_TBL; + +CREATE SEQUENCE DEFAULT_SEQ; + +CREATE TABLE DEFAULTEXPR_TBL (i1 int DEFAULT 100 + (200-199) * 2, + i2 int DEFAULT nextval('default_seq')); + +INSERT INTO DEFAULTEXPR_TBL VALUES (-1, -2); +INSERT INTO DEFAULTEXPR_TBL (i1) VALUES (-3); +INSERT INTO DEFAULTEXPR_TBL (i2) VALUES (-4); +INSERT INTO DEFAULTEXPR_TBL (i2) VALUES (NULL); + +SELECT * FROM DEFAULTEXPR_TBL; + +-- syntax errors +-- test for extraneous comma +CREATE TABLE error_tbl (i int DEFAULT (100, )); +-- this will fail because gram.y uses b_expr not a_expr for defaults, +-- to avoid a shift/reduce conflict that arises from NOT NULL being +-- part of the column definition syntax: +CREATE TABLE error_tbl (b1 bool DEFAULT 1 IN (1, 2)); +-- this should work, however: +CREATE TABLE error_tbl (b1 bool DEFAULT (1 IN (1, 2))); + +DROP TABLE error_tbl; + +-- +-- CHECK syntax +-- + +CREATE TABLE CHECK_TBL (x int, + CONSTRAINT CHECK_CON CHECK (x > 3)); + +INSERT INTO CHECK_TBL VALUES (5); +INSERT INTO CHECK_TBL VALUES (4); +INSERT INTO CHECK_TBL VALUES (3); +INSERT INTO CHECK_TBL VALUES (2); +INSERT INTO CHECK_TBL VALUES (6); +INSERT INTO CHECK_TBL VALUES (1); + +SELECT * FROM CHECK_TBL; + +CREATE SEQUENCE CHECK_SEQ; + +CREATE TABLE CHECK2_TBL (x int, y text, z int, + CONSTRAINT SEQUENCE_CON + CHECK (x > 3 and y <> 'check failed' and z < 8)); + +INSERT INTO CHECK2_TBL VALUES (4, 'check ok', -2); +INSERT INTO CHECK2_TBL VALUES (1, 'x check failed', -2); +INSERT INTO CHECK2_TBL VALUES (5, 'z check failed', 10); +INSERT INTO CHECK2_TBL VALUES (0, 'check failed', -2); +INSERT INTO CHECK2_TBL VALUES (6, 'check failed', 11); +INSERT INTO CHECK2_TBL VALUES (7, 'check ok', 7); + +SELECT * from CHECK2_TBL; + +-- +-- Check constraints on INSERT +-- + +CREATE SEQUENCE INSERT_SEQ; + +CREATE TABLE INSERT_TBL (x INT DEFAULT nextval('insert_seq'), + y TEXT DEFAULT '-NULL-', + z INT DEFAULT -1 * currval('insert_seq'), + CONSTRAINT INSERT_TBL_CON CHECK (x >= 3 AND y <> 'check failed' AND x < 8), + CHECK (x + z = 0)); + +INSERT INTO INSERT_TBL(x,z) VALUES (2, -2); + +SELECT * FROM INSERT_TBL; + +SELECT 'one' AS one, nextval('insert_seq'); + +INSERT INTO INSERT_TBL(y) VALUES ('Y'); +INSERT INTO INSERT_TBL(y) VALUES ('Y'); +INSERT INTO INSERT_TBL(x,z) VALUES (1, -2); +INSERT INTO INSERT_TBL(z,x) VALUES (-7, 7); +INSERT INTO INSERT_TBL VALUES (5, 'check failed', -5); +INSERT INTO INSERT_TBL VALUES (7, '!check failed', -7); +INSERT INTO INSERT_TBL(y) VALUES ('-!NULL-'); + +SELECT * FROM INSERT_TBL; + +INSERT INTO INSERT_TBL(y,z) VALUES ('check failed', 4); +INSERT INTO INSERT_TBL(x,y) VALUES (5, 'check failed'); +INSERT INTO INSERT_TBL(x,y) VALUES (5, '!check failed'); +INSERT INTO INSERT_TBL(y) VALUES ('-!NULL-'); + +SELECT * FROM INSERT_TBL; + +SELECT 'seven' AS one, nextval('insert_seq'); + +INSERT INTO INSERT_TBL(y) VALUES ('Y'); + +SELECT 'eight' AS one, currval('insert_seq'); + +-- According to SQL, it is OK to insert a record that gives rise to NULL +-- constraint-condition results. Postgres used to reject this, but it +-- was wrong: +INSERT INTO INSERT_TBL VALUES (null, null, null); + +SELECT * FROM INSERT_TBL; + +-- +-- Check constraints on system columns +-- + +CREATE TABLE SYS_COL_CHECK_TBL (city text, state text, is_capital bool, + altitude int, + CHECK (NOT (is_capital AND tableoid::regclass::text = 'sys_col_check_tbl'))); + +INSERT INTO SYS_COL_CHECK_TBL VALUES ('Seattle', 'Washington', false, 100); +INSERT INTO SYS_COL_CHECK_TBL VALUES ('Olympia', 'Washington', true, 100); + +SELECT *, tableoid::regclass::text FROM SYS_COL_CHECK_TBL; + +DROP TABLE SYS_COL_CHECK_TBL; + +-- +-- Check constraints on system columns other then TableOid should return error +-- +CREATE TABLE SYS_COL_CHECK_TBL (city text, state text, is_capital bool, + altitude int, + CHECK (NOT (is_capital AND ctid::text = 'sys_col_check_tbl'))); + +-- +-- Check inheritance of defaults and constraints +-- + +CREATE TABLE INSERT_CHILD (cx INT default 42, + cy INT CHECK (cy > x)) + INHERITS (INSERT_TBL); + +INSERT INTO INSERT_CHILD(x,z,cy) VALUES (7,-7,11); +INSERT INTO INSERT_CHILD(x,z,cy) VALUES (7,-7,6); +INSERT INTO INSERT_CHILD(x,z,cy) VALUES (6,-7,7); +INSERT INTO INSERT_CHILD(x,y,z,cy) VALUES (6,'check failed',-6,7); + +SELECT * FROM INSERT_CHILD; + +DROP TABLE INSERT_CHILD; + +-- +-- Check NO INHERIT type of constraints and inheritance +-- + +CREATE TABLE ATACC1 (TEST INT + CHECK (TEST > 0) NO INHERIT); + +CREATE TABLE ATACC2 (TEST2 INT) INHERITS (ATACC1); +-- check constraint is not there on child +INSERT INTO ATACC2 (TEST) VALUES (-3); +-- check constraint is there on parent +INSERT INTO ATACC1 (TEST) VALUES (-3); +DROP TABLE ATACC1 CASCADE; + +CREATE TABLE ATACC1 (TEST INT, TEST2 INT + CHECK (TEST > 0), CHECK (TEST2 > 10) NO INHERIT); + +CREATE TABLE ATACC2 () INHERITS (ATACC1); +-- check constraint is there on child +INSERT INTO ATACC2 (TEST) VALUES (-3); +-- check constraint is there on parent +INSERT INTO ATACC1 (TEST) VALUES (-3); +-- check constraint is not there on child +INSERT INTO ATACC2 (TEST2) VALUES (3); +-- check constraint is there on parent +INSERT INTO ATACC1 (TEST2) VALUES (3); +DROP TABLE ATACC1 CASCADE; + +-- +-- Check constraints on INSERT INTO +-- + +DELETE FROM INSERT_TBL; + +ALTER SEQUENCE INSERT_SEQ RESTART WITH 4; + +CREATE TEMP TABLE tmp (xd INT, yd TEXT, zd INT); + +INSERT INTO tmp VALUES (null, 'Y', null); +INSERT INTO tmp VALUES (5, '!check failed', null); +INSERT INTO tmp VALUES (null, 'try again', null); +INSERT INTO INSERT_TBL(y) select yd from tmp; + +SELECT * FROM INSERT_TBL; + +INSERT INTO INSERT_TBL SELECT * FROM tmp WHERE yd = 'try again'; +INSERT INTO INSERT_TBL(y,z) SELECT yd, -7 FROM tmp WHERE yd = 'try again'; +INSERT INTO INSERT_TBL(y,z) SELECT yd, -8 FROM tmp WHERE yd = 'try again'; + +SELECT * FROM INSERT_TBL; + +DROP TABLE tmp; + +-- +-- Check constraints on UPDATE +-- + +UPDATE INSERT_TBL SET x = NULL WHERE x = 5; +UPDATE INSERT_TBL SET x = 6 WHERE x = 6; +UPDATE INSERT_TBL SET x = -z, z = -x; +UPDATE INSERT_TBL SET x = z, z = x; + +SELECT * FROM INSERT_TBL; + +-- DROP TABLE INSERT_TBL; + +-- +-- Check constraints on COPY FROM +-- + +CREATE TABLE COPY_TBL (x INT, y TEXT, z INT, + CONSTRAINT COPY_CON + CHECK (x > 3 AND y <> 'check failed' AND x < 7 )); + +\set filename :abs_srcdir '/data/constro.data' +COPY COPY_TBL FROM :'filename'; + +SELECT * FROM COPY_TBL; + +\set filename :abs_srcdir '/data/constrf.data' +COPY COPY_TBL FROM :'filename'; + +SELECT * FROM COPY_TBL; + +-- +-- Primary keys +-- + +CREATE TABLE PRIMARY_TBL (i int PRIMARY KEY, t text); + +INSERT INTO PRIMARY_TBL VALUES (1, 'one'); +INSERT INTO PRIMARY_TBL VALUES (2, 'two'); +INSERT INTO PRIMARY_TBL VALUES (1, 'three'); +INSERT INTO PRIMARY_TBL VALUES (4, 'three'); +INSERT INTO PRIMARY_TBL VALUES (5, 'one'); +INSERT INTO PRIMARY_TBL (t) VALUES ('six'); + +SELECT * FROM PRIMARY_TBL; + +DROP TABLE PRIMARY_TBL; + +CREATE TABLE PRIMARY_TBL (i int, t text, + PRIMARY KEY(i,t)); + +INSERT INTO PRIMARY_TBL VALUES (1, 'one'); +INSERT INTO PRIMARY_TBL VALUES (2, 'two'); +INSERT INTO PRIMARY_TBL VALUES (1, 'three'); +INSERT INTO PRIMARY_TBL VALUES (4, 'three'); +INSERT INTO PRIMARY_TBL VALUES (5, 'one'); +INSERT INTO PRIMARY_TBL (t) VALUES ('six'); + +SELECT * FROM PRIMARY_TBL; + +DROP TABLE PRIMARY_TBL; + +-- +-- Unique keys +-- + +CREATE TABLE UNIQUE_TBL (i int UNIQUE, t text); + +INSERT INTO UNIQUE_TBL VALUES (1, 'one'); +INSERT INTO UNIQUE_TBL VALUES (2, 'two'); +INSERT INTO UNIQUE_TBL VALUES (1, 'three'); +INSERT INTO UNIQUE_TBL VALUES (4, 'four'); +INSERT INTO UNIQUE_TBL VALUES (5, 'one'); +INSERT INTO UNIQUE_TBL (t) VALUES ('six'); +INSERT INTO UNIQUE_TBL (t) VALUES ('seven'); + +INSERT INTO UNIQUE_TBL VALUES (5, 'five-upsert-insert') ON CONFLICT (i) DO UPDATE SET t = 'five-upsert-update'; +INSERT INTO UNIQUE_TBL VALUES (6, 'six-upsert-insert') ON CONFLICT (i) DO UPDATE SET t = 'six-upsert-update'; +-- should fail +INSERT INTO UNIQUE_TBL VALUES (1, 'a'), (2, 'b'), (2, 'b') ON CONFLICT (i) DO UPDATE SET t = 'fails'; + +SELECT * FROM UNIQUE_TBL; + +DROP TABLE UNIQUE_TBL; + +CREATE TABLE UNIQUE_TBL (i int, t text, + UNIQUE(i,t)); + +INSERT INTO UNIQUE_TBL VALUES (1, 'one'); +INSERT INTO UNIQUE_TBL VALUES (2, 'two'); +INSERT INTO UNIQUE_TBL VALUES (1, 'three'); +INSERT INTO UNIQUE_TBL VALUES (1, 'one'); +INSERT INTO UNIQUE_TBL VALUES (5, 'one'); +INSERT INTO UNIQUE_TBL (t) VALUES ('six'); + +SELECT * FROM UNIQUE_TBL; + +DROP TABLE UNIQUE_TBL; + +-- +-- Deferrable unique constraints +-- + +CREATE TABLE unique_tbl (i int UNIQUE DEFERRABLE, t text); + +INSERT INTO unique_tbl VALUES (0, 'one'); +INSERT INTO unique_tbl VALUES (1, 'two'); +INSERT INTO unique_tbl VALUES (2, 'tree'); +INSERT INTO unique_tbl VALUES (3, 'four'); +INSERT INTO unique_tbl VALUES (4, 'five'); + +BEGIN; + +-- default is immediate so this should fail right away +UPDATE unique_tbl SET i = 1 WHERE i = 0; + +ROLLBACK; + +-- check is done at end of statement, so this should succeed +UPDATE unique_tbl SET i = i+1; + +SELECT * FROM unique_tbl; + +-- explicitly defer the constraint +BEGIN; + +SET CONSTRAINTS unique_tbl_i_key DEFERRED; + +INSERT INTO unique_tbl VALUES (3, 'three'); +DELETE FROM unique_tbl WHERE t = 'tree'; -- makes constraint valid again + +COMMIT; -- should succeed + +SELECT * FROM unique_tbl; + +-- try adding an initially deferred constraint +ALTER TABLE unique_tbl DROP CONSTRAINT unique_tbl_i_key; +ALTER TABLE unique_tbl ADD CONSTRAINT unique_tbl_i_key + UNIQUE (i) DEFERRABLE INITIALLY DEFERRED; + +BEGIN; + +INSERT INTO unique_tbl VALUES (1, 'five'); +INSERT INTO unique_tbl VALUES (5, 'one'); +UPDATE unique_tbl SET i = 4 WHERE i = 2; +UPDATE unique_tbl SET i = 2 WHERE i = 4 AND t = 'four'; +DELETE FROM unique_tbl WHERE i = 1 AND t = 'one'; +DELETE FROM unique_tbl WHERE i = 5 AND t = 'five'; + +COMMIT; + +SELECT * FROM unique_tbl; + +-- should fail at commit-time +BEGIN; +INSERT INTO unique_tbl VALUES (3, 'Three'); -- should succeed for now +COMMIT; -- should fail + +-- make constraint check immediate +BEGIN; + +SET CONSTRAINTS ALL IMMEDIATE; + +INSERT INTO unique_tbl VALUES (3, 'Three'); -- should fail + +COMMIT; + +-- forced check when SET CONSTRAINTS is called +BEGIN; + +SET CONSTRAINTS ALL DEFERRED; + +INSERT INTO unique_tbl VALUES (3, 'Three'); -- should succeed for now + +SET CONSTRAINTS ALL IMMEDIATE; -- should fail + +COMMIT; + +-- test deferrable UNIQUE with a partitioned table +CREATE TABLE parted_uniq_tbl (i int UNIQUE DEFERRABLE) partition by range (i); +CREATE TABLE parted_uniq_tbl_1 PARTITION OF parted_uniq_tbl FOR VALUES FROM (0) TO (10); +CREATE TABLE parted_uniq_tbl_2 PARTITION OF parted_uniq_tbl FOR VALUES FROM (20) TO (30); +SELECT conname, conrelid::regclass FROM pg_constraint + WHERE conname LIKE 'parted_uniq%' ORDER BY conname; +BEGIN; +INSERT INTO parted_uniq_tbl VALUES (1); +SAVEPOINT f; +INSERT INTO parted_uniq_tbl VALUES (1); -- unique violation +ROLLBACK TO f; +SET CONSTRAINTS parted_uniq_tbl_i_key DEFERRED; +INSERT INTO parted_uniq_tbl VALUES (1); -- OK now, fail at commit +COMMIT; +DROP TABLE parted_uniq_tbl; + +-- test a HOT update that invalidates the conflicting tuple. +-- the trigger should still fire and catch the violation + +BEGIN; + +INSERT INTO unique_tbl VALUES (3, 'Three'); -- should succeed for now +UPDATE unique_tbl SET t = 'THREE' WHERE i = 3 AND t = 'Three'; + +COMMIT; -- should fail + +SELECT * FROM unique_tbl; + +-- test a HOT update that modifies the newly inserted tuple, +-- but should succeed because we then remove the other conflicting tuple. + +BEGIN; + +INSERT INTO unique_tbl VALUES(3, 'tree'); -- should succeed for now +UPDATE unique_tbl SET t = 'threex' WHERE t = 'tree'; +DELETE FROM unique_tbl WHERE t = 'three'; + +SELECT * FROM unique_tbl; + +COMMIT; + +SELECT * FROM unique_tbl; + +DROP TABLE unique_tbl; + +-- +-- EXCLUDE constraints +-- + +CREATE TABLE circles ( + c1 CIRCLE, + c2 TEXT, + EXCLUDE USING gist + (c1 WITH &&, (c2::circle) WITH &&) + WHERE (circle_center(c1) <> '(0,0)') +); + +-- these should succeed because they don't match the index predicate +INSERT INTO circles VALUES('<(0,0), 5>', '<(0,0), 5>'); +INSERT INTO circles VALUES('<(0,0), 5>', '<(0,0), 4>'); + +-- succeed +INSERT INTO circles VALUES('<(10,10), 10>', '<(0,0), 5>'); +-- fail, overlaps +INSERT INTO circles VALUES('<(20,20), 10>', '<(0,0), 4>'); +-- succeed, because violation is ignored +INSERT INTO circles VALUES('<(20,20), 10>', '<(0,0), 4>') + ON CONFLICT ON CONSTRAINT circles_c1_c2_excl DO NOTHING; +-- fail, because DO UPDATE variant requires unique index +INSERT INTO circles VALUES('<(20,20), 10>', '<(0,0), 4>') + ON CONFLICT ON CONSTRAINT circles_c1_c2_excl DO UPDATE SET c2 = EXCLUDED.c2; +-- succeed because c1 doesn't overlap +INSERT INTO circles VALUES('<(20,20), 1>', '<(0,0), 5>'); +-- succeed because c2 doesn't overlap +INSERT INTO circles VALUES('<(20,20), 10>', '<(10,10), 5>'); + +-- should fail on existing data without the WHERE clause +ALTER TABLE circles ADD EXCLUDE USING gist + (c1 WITH &&, (c2::circle) WITH &&); + +-- try reindexing an existing constraint +REINDEX INDEX circles_c1_c2_excl; + +DROP TABLE circles; + +-- Check deferred exclusion constraint + +CREATE TABLE deferred_excl ( + f1 int, + f2 int, + CONSTRAINT deferred_excl_con EXCLUDE (f1 WITH =) INITIALLY DEFERRED +); + +INSERT INTO deferred_excl VALUES(1); +INSERT INTO deferred_excl VALUES(2); +INSERT INTO deferred_excl VALUES(1); -- fail +INSERT INTO deferred_excl VALUES(1) ON CONFLICT ON CONSTRAINT deferred_excl_con DO NOTHING; -- fail +BEGIN; +INSERT INTO deferred_excl VALUES(2); -- no fail here +COMMIT; -- should fail here +BEGIN; +INSERT INTO deferred_excl VALUES(3); +INSERT INTO deferred_excl VALUES(3); -- no fail here +COMMIT; -- should fail here + +-- bug #13148: deferred constraint versus HOT update +BEGIN; +INSERT INTO deferred_excl VALUES(2, 1); -- no fail here +DELETE FROM deferred_excl WHERE f1 = 2 AND f2 IS NULL; -- remove old row +UPDATE deferred_excl SET f2 = 2 WHERE f1 = 2; +COMMIT; -- should not fail + +SELECT * FROM deferred_excl; + +ALTER TABLE deferred_excl DROP CONSTRAINT deferred_excl_con; + +-- This should fail, but worth testing because of HOT updates +UPDATE deferred_excl SET f1 = 3; + +ALTER TABLE deferred_excl ADD EXCLUDE (f1 WITH =); + +DROP TABLE deferred_excl; + +-- Comments +-- Setup a low-level role to enforce non-superuser checks. +CREATE ROLE regress_constraint_comments; +SET SESSION AUTHORIZATION regress_constraint_comments; + +CREATE TABLE constraint_comments_tbl (a int CONSTRAINT the_constraint CHECK (a > 0)); +CREATE DOMAIN constraint_comments_dom AS int CONSTRAINT the_constraint CHECK (value > 0); + +COMMENT ON CONSTRAINT the_constraint ON constraint_comments_tbl IS 'yes, the comment'; +COMMENT ON CONSTRAINT the_constraint ON DOMAIN constraint_comments_dom IS 'yes, another comment'; + +-- no such constraint +COMMENT ON CONSTRAINT no_constraint ON constraint_comments_tbl IS 'yes, the comment'; +COMMENT ON CONSTRAINT no_constraint ON DOMAIN constraint_comments_dom IS 'yes, another comment'; + +-- no such table/domain +COMMENT ON CONSTRAINT the_constraint ON no_comments_tbl IS 'bad comment'; +COMMENT ON CONSTRAINT the_constraint ON DOMAIN no_comments_dom IS 'another bad comment'; + +COMMENT ON CONSTRAINT the_constraint ON constraint_comments_tbl IS NULL; +COMMENT ON CONSTRAINT the_constraint ON DOMAIN constraint_comments_dom IS NULL; + +-- unauthorized user +RESET SESSION AUTHORIZATION; +CREATE ROLE regress_constraint_comments_noaccess; +SET SESSION AUTHORIZATION regress_constraint_comments_noaccess; +COMMENT ON CONSTRAINT the_constraint ON constraint_comments_tbl IS 'no, the comment'; +COMMENT ON CONSTRAINT the_constraint ON DOMAIN constraint_comments_dom IS 'no, another comment'; +RESET SESSION AUTHORIZATION; + +DROP TABLE constraint_comments_tbl; +DROP DOMAIN constraint_comments_dom; + +DROP ROLE regress_constraint_comments; +DROP ROLE regress_constraint_comments_noaccess; diff --git a/src/test/regress/sql/copy.sql b/src/test/regress/sql/copy.sql new file mode 100644 index 00000000000..15e26517ecb --- /dev/null +++ b/src/test/regress/sql/copy.sql @@ -0,0 +1,293 @@ +-- +-- COPY +-- + +-- directory paths are passed to us in environment variables +\getenv abs_srcdir PG_ABS_SRCDIR +\getenv abs_builddir PG_ABS_BUILDDIR + +-- CLASS POPULATION +-- (any resemblance to real life is purely coincidental) +-- +\set filename :abs_srcdir '/data/agg.data' +COPY aggtest FROM :'filename'; + +\set filename :abs_srcdir '/data/onek.data' +COPY onek FROM :'filename'; + +\set filename :abs_builddir '/results/onek.data' +COPY onek TO :'filename'; + +DELETE FROM onek; + +COPY onek FROM :'filename'; + +\set filename :abs_srcdir '/data/tenk.data' +COPY tenk1 FROM :'filename'; + +\set filename :abs_srcdir '/data/rect.data' +COPY slow_emp4000 FROM :'filename'; + +\set filename :abs_srcdir '/data/person.data' +COPY person FROM :'filename'; + +\set filename :abs_srcdir '/data/emp.data' +COPY emp FROM :'filename'; + +\set filename :abs_srcdir '/data/student.data' +COPY student FROM :'filename'; + +\set filename :abs_srcdir '/data/stud_emp.data' +COPY stud_emp FROM :'filename'; + +\set filename :abs_srcdir '/data/streets.data' +COPY road FROM :'filename'; + +\set filename :abs_srcdir '/data/real_city.data' +COPY real_city FROM :'filename'; + +\set filename :abs_srcdir '/data/hash.data' +COPY hash_i4_heap FROM :'filename'; + +COPY hash_name_heap FROM :'filename'; + +COPY hash_txt_heap FROM :'filename'; + +COPY hash_f8_heap FROM :'filename'; + +\set filename :abs_srcdir '/data/tsearch.data' +COPY test_tsvector FROM :'filename'; + +\set filename :abs_srcdir '/data/jsonb.data' +COPY testjsonb FROM :'filename'; + +-- the data in this file has a lot of duplicates in the index key +-- fields, leading to long bucket chains and lots of table expansion. +-- this is therefore a stress test of the bucket overflow code (unlike +-- the data in hash.data, which has unique index keys). +-- +-- \set filename :abs_srcdir '/data/hashovfl.data' +-- COPY hash_ovfl_heap FROM :'filename'; + +\set filename :abs_srcdir '/data/desc.data' +COPY bt_i4_heap FROM :'filename'; + +\set filename :abs_srcdir '/data/hash.data' +COPY bt_name_heap FROM :'filename'; + +\set filename :abs_srcdir '/data/desc.data' +COPY bt_txt_heap FROM :'filename'; + +\set filename :abs_srcdir '/data/hash.data' +COPY bt_f8_heap FROM :'filename'; + +\set filename :abs_srcdir '/data/array.data' +COPY array_op_test FROM :'filename'; + +\set filename :abs_srcdir '/data/array.data' +COPY array_index_op_test FROM :'filename'; + +-- analyze all the data we just loaded, to ensure plan consistency +-- in later tests + +ANALYZE aggtest; +ANALYZE onek; +ANALYZE tenk1; +ANALYZE slow_emp4000; +ANALYZE person; +ANALYZE emp; +ANALYZE student; +ANALYZE stud_emp; +ANALYZE road; +ANALYZE real_city; +ANALYZE hash_i4_heap; +ANALYZE hash_name_heap; +ANALYZE hash_txt_heap; +ANALYZE hash_f8_heap; +ANALYZE test_tsvector; +ANALYZE bt_i4_heap; +ANALYZE bt_name_heap; +ANALYZE bt_txt_heap; +ANALYZE bt_f8_heap; +ANALYZE array_op_test; +ANALYZE array_index_op_test; + +--- test copying in CSV mode with various styles +--- of embedded line ending characters + +create temp table copytest ( + style text, + test text, + filler int); + +insert into copytest values('DOS',E'abc\r\ndef',1); +insert into copytest values('Unix',E'abc\ndef',2); +insert into copytest values('Mac',E'abc\rdef',3); +insert into copytest values(E'esc\\ape',E'a\\r\\\r\\\n\\nb',4); + +\set filename :abs_builddir '/results/copytest.csv' +copy copytest to :'filename' csv; + +create temp table copytest2 (like copytest); + +copy copytest2 from :'filename' csv; + +select * from copytest except select * from copytest2; + +truncate copytest2; + +--- same test but with an escape char different from quote char + +copy copytest to :'filename' csv quote '''' escape E'\\'; + +copy copytest2 from :'filename' csv quote '''' escape E'\\'; + +select * from copytest except select * from copytest2; + + +-- test header line feature + +create temp table copytest3 ( + c1 int, + "col with , comma" text, + "col with "" quote" int); + +copy copytest3 from stdin csv header; +this is just a line full of junk that would error out if parsed +1,a,1 +2,b,2 +\. + +copy copytest3 to stdout csv header; + +-- test copy from with a partitioned table +create table parted_copytest ( + a int, + b int, + c text +) partition by list (b); + +create table parted_copytest_a1 (c text, b int, a int); +create table parted_copytest_a2 (a int, c text, b int); + +alter table parted_copytest attach partition parted_copytest_a1 for values in(1); +alter table parted_copytest attach partition parted_copytest_a2 for values in(2); + +-- We must insert enough rows to trigger multi-inserts. These are only +-- enabled adaptively when there are few enough partition changes. +insert into parted_copytest select x,1,'One' from generate_series(1,1000) x; +insert into parted_copytest select x,2,'Two' from generate_series(1001,1010) x; +insert into parted_copytest select x,1,'One' from generate_series(1011,1020) x; + +\set filename :abs_builddir '/results/parted_copytest.csv' +copy (select * from parted_copytest order by a) to :'filename'; + +truncate parted_copytest; + +copy parted_copytest from :'filename'; + +-- Ensure COPY FREEZE errors for partitioned tables. +begin; +truncate parted_copytest; +copy parted_copytest from :'filename' (freeze); +rollback; + +select tableoid::regclass,count(*),sum(a) from parted_copytest +group by tableoid order by tableoid::regclass::name; + +truncate parted_copytest; + +-- create before insert row trigger on parted_copytest_a2 +create function part_ins_func() returns trigger language plpgsql as $$ +begin + return new; +end; +$$; + +create trigger part_ins_trig + before insert on parted_copytest_a2 + for each row + execute procedure part_ins_func(); + +copy parted_copytest from :'filename'; + +select tableoid::regclass,count(*),sum(a) from parted_copytest +group by tableoid order by tableoid::regclass::name; + +truncate table parted_copytest; +create index on parted_copytest (b); +drop trigger part_ins_trig on parted_copytest_a2; + +copy parted_copytest from stdin; +1 1 str1 +2 2 str2 +\. + +-- Ensure index entries were properly added during the copy. +select * from parted_copytest where b = 1; +select * from parted_copytest where b = 2; + +drop table parted_copytest; + +-- +-- Progress reporting for COPY +-- +create table tab_progress_reporting ( + name text, + age int4, + location point, + salary int4, + manager name +); + +-- Add a trigger to catch and print the contents of the catalog view +-- pg_stat_progress_copy during data insertion. This allows to test +-- the validation of some progress reports for COPY FROM where the trigger +-- would fire. +create function notice_after_tab_progress_reporting() returns trigger AS +$$ +declare report record; +begin + -- The fields ignored here are the ones that may not remain + -- consistent across multiple runs. The sizes reported may differ + -- across platforms, so just check if these are strictly positive. + with progress_data as ( + select + relid::regclass::text as relname, + command, + type, + bytes_processed > 0 as has_bytes_processed, + bytes_total > 0 as has_bytes_total, + tuples_processed, + tuples_excluded + from pg_stat_progress_copy + where pid = pg_backend_pid()) + select into report (to_jsonb(r)) as value + from progress_data r; + + raise info 'progress: %', report.value::text; + return new; +end; +$$ language plpgsql; + +create trigger check_after_tab_progress_reporting + after insert on tab_progress_reporting + for each statement + execute function notice_after_tab_progress_reporting(); + +-- Generate COPY FROM report with PIPE. +copy tab_progress_reporting from stdin; +sharon 25 (15,12) 1000 sam +sam 30 (10,5) 2000 bill +bill 20 (11,10) 1000 sharon +\. + +-- Generate COPY FROM report with FILE, with some excluded tuples. +truncate tab_progress_reporting; +\set filename :abs_srcdir '/data/emp.data' +copy tab_progress_reporting from :'filename' + where (salary < 2000); + +drop trigger check_after_tab_progress_reporting on tab_progress_reporting; +drop function notice_after_tab_progress_reporting(); +drop table tab_progress_reporting; diff --git a/src/test/regress/sql/create_function_0.sql b/src/test/regress/sql/create_function_0.sql new file mode 100644 index 00000000000..c5224742f94 --- /dev/null +++ b/src/test/regress/sql/create_function_0.sql @@ -0,0 +1,108 @@ +-- +-- CREATE_FUNCTION_0 +-- + +-- directory path and dlsuffix are passed to us in environment variables +\getenv libdir PG_LIBDIR +\getenv dlsuffix PG_DLSUFFIX + +\set autoinclib :libdir '/autoinc' :dlsuffix +\set refintlib :libdir '/refint' :dlsuffix +\set regresslib :libdir '/regress' :dlsuffix + +-- Create a bunch of C functions that will be used by later tests: + +CREATE FUNCTION check_primary_key () + RETURNS trigger + AS :'refintlib' + LANGUAGE C; + +CREATE FUNCTION check_foreign_key () + RETURNS trigger + AS :'refintlib' + LANGUAGE C; + +CREATE FUNCTION autoinc () + RETURNS trigger + AS :'autoinclib' + LANGUAGE C; + +CREATE FUNCTION trigger_return_old () + RETURNS trigger + AS :'regresslib' + LANGUAGE C; + +CREATE FUNCTION ttdummy () + RETURNS trigger + AS :'regresslib' + LANGUAGE C; + +CREATE FUNCTION set_ttdummy (int4) + RETURNS int4 + AS :'regresslib' + LANGUAGE C STRICT; + +CREATE FUNCTION make_tuple_indirect (record) + RETURNS record + AS :'regresslib' + LANGUAGE C STRICT; + +CREATE FUNCTION test_atomic_ops() + RETURNS bool + AS :'regresslib' + LANGUAGE C; + +CREATE FUNCTION test_fdw_handler() + RETURNS fdw_handler + AS :'regresslib', 'test_fdw_handler' + LANGUAGE C; + +CREATE FUNCTION test_support_func(internal) + RETURNS internal + AS :'regresslib', 'test_support_func' + LANGUAGE C STRICT; + +CREATE FUNCTION test_opclass_options_func(internal) + RETURNS void + AS :'regresslib', 'test_opclass_options_func' + LANGUAGE C; + +CREATE FUNCTION test_enc_conversion(bytea, name, name, bool, validlen OUT int, result OUT bytea) + AS :'regresslib', 'test_enc_conversion' + LANGUAGE C STRICT; + +CREATE FUNCTION binary_coercible(oid, oid) + RETURNS bool + AS :'regresslib', 'binary_coercible' + LANGUAGE C STRICT STABLE PARALLEL SAFE; + +-- Things that shouldn't work: + +CREATE FUNCTION test1 (int) RETURNS int LANGUAGE SQL + AS 'SELECT ''not an integer'';'; + +CREATE FUNCTION test1 (int) RETURNS int LANGUAGE SQL + AS 'not even SQL'; + +CREATE FUNCTION test1 (int) RETURNS int LANGUAGE SQL + AS 'SELECT 1, 2, 3;'; + +CREATE FUNCTION test1 (int) RETURNS int LANGUAGE SQL + AS 'SELECT $2;'; + +CREATE FUNCTION test1 (int) RETURNS int LANGUAGE SQL + AS 'a', 'b'; + +CREATE FUNCTION test1 (int) RETURNS int LANGUAGE C + AS 'nosuchfile'; + +-- To produce stable regression test output, we have to filter the name +-- of the regresslib file out of the error message in this test. +\set VERBOSITY sqlstate +CREATE FUNCTION test1 (int) RETURNS int LANGUAGE C + AS :'regresslib', 'nosuchsymbol'; +\set VERBOSITY default +SELECT regexp_replace(:'LAST_ERROR_MESSAGE', 'file ".*"', 'file "..."'); + +CREATE FUNCTION test1 (int) RETURNS int LANGUAGE internal + AS 'nosuch'; diff --git a/src/test/regress/sql/create_function_1.sql b/src/test/regress/sql/create_function_1.sql new file mode 100644 index 00000000000..4170b16fe6b --- /dev/null +++ b/src/test/regress/sql/create_function_1.sql @@ -0,0 +1,31 @@ +-- +-- CREATE_FUNCTION_1 +-- + +-- directory path and dlsuffix are passed to us in environment variables +\getenv libdir PG_LIBDIR +\getenv dlsuffix PG_DLSUFFIX + +\set regresslib :libdir '/regress' :dlsuffix + +-- Create C functions needed by create_type.sql + +CREATE FUNCTION widget_in(cstring) + RETURNS widget + AS :'regresslib' + LANGUAGE C STRICT IMMUTABLE; + +CREATE FUNCTION widget_out(widget) + RETURNS cstring + AS :'regresslib' + LANGUAGE C STRICT IMMUTABLE; + +CREATE FUNCTION int44in(cstring) + RETURNS city_budget + AS :'regresslib' + LANGUAGE C STRICT IMMUTABLE; + +CREATE FUNCTION int44out(city_budget) + RETURNS cstring + AS :'regresslib' + LANGUAGE C STRICT IMMUTABLE; diff --git a/src/test/regress/sql/create_function_2.sql b/src/test/regress/sql/create_function_2.sql new file mode 100644 index 00000000000..67510aed239 --- /dev/null +++ b/src/test/regress/sql/create_function_2.sql @@ -0,0 +1,96 @@ +-- +-- CREATE_FUNCTION_2 +-- + +-- directory path and dlsuffix are passed to us in environment variables +\getenv libdir PG_LIBDIR +\getenv dlsuffix PG_DLSUFFIX + +\set regresslib :libdir '/regress' :dlsuffix + + +CREATE FUNCTION hobbies(person) + RETURNS setof hobbies_r + AS 'select * from hobbies_r where person = $1.name' + LANGUAGE SQL; + + +CREATE FUNCTION hobby_construct(text, text) + RETURNS hobbies_r + AS 'select $1 as name, $2 as hobby' + LANGUAGE SQL; + + +CREATE FUNCTION hobby_construct_named(name text, hobby text) + RETURNS hobbies_r + AS 'select name, hobby' + LANGUAGE SQL; + + +CREATE FUNCTION hobbies_by_name(hobbies_r.name%TYPE) + RETURNS hobbies_r.person%TYPE + AS 'select person from hobbies_r where name = $1' + LANGUAGE SQL; + + +CREATE FUNCTION equipment(hobbies_r) + RETURNS setof equipment_r + AS 'select * from equipment_r where hobby = $1.name' + LANGUAGE SQL; + + +CREATE FUNCTION equipment_named(hobby hobbies_r) + RETURNS setof equipment_r + AS 'select * from equipment_r where equipment_r.hobby = equipment_named.hobby.name' + LANGUAGE SQL; + +CREATE FUNCTION equipment_named_ambiguous_1a(hobby hobbies_r) + RETURNS setof equipment_r + AS 'select * from equipment_r where hobby = equipment_named_ambiguous_1a.hobby.name' + LANGUAGE SQL; + +CREATE FUNCTION equipment_named_ambiguous_1b(hobby hobbies_r) + RETURNS setof equipment_r + AS 'select * from equipment_r where equipment_r.hobby = hobby.name' + LANGUAGE SQL; + +CREATE FUNCTION equipment_named_ambiguous_1c(hobby hobbies_r) + RETURNS setof equipment_r + AS 'select * from equipment_r where hobby = hobby.name' + LANGUAGE SQL; + +CREATE FUNCTION equipment_named_ambiguous_2a(hobby text) + RETURNS setof equipment_r + AS 'select * from equipment_r where hobby = equipment_named_ambiguous_2a.hobby' + LANGUAGE SQL; + +CREATE FUNCTION equipment_named_ambiguous_2b(hobby text) + RETURNS setof equipment_r + AS 'select * from equipment_r where equipment_r.hobby = hobby' + LANGUAGE SQL; + + +CREATE FUNCTION pt_in_widget(point, widget) + RETURNS bool + AS :'regresslib' + LANGUAGE C STRICT; + +CREATE FUNCTION overpaid(emp) + RETURNS bool + AS :'regresslib' + LANGUAGE C STRICT; + +CREATE FUNCTION interpt_pp(path, path) + RETURNS point + AS :'regresslib' + LANGUAGE C STRICT; + +CREATE FUNCTION reverse_name(name) + RETURNS name + AS :'regresslib' + LANGUAGE C STRICT; + +-- +-- Function dynamic loading +-- +LOAD :'regresslib'; diff --git a/src/test/regress/sql/largeobject.sql b/src/test/regress/sql/largeobject.sql new file mode 100644 index 00000000000..16da077f3a2 --- /dev/null +++ b/src/test/regress/sql/largeobject.sql @@ -0,0 +1,279 @@ +-- +-- Test large object support +-- + +-- directory paths are passed to us in environment variables +\getenv abs_srcdir PG_ABS_SRCDIR +\getenv abs_builddir PG_ABS_BUILDDIR + +-- ensure consistent test output regardless of the default bytea format +SET bytea_output TO escape; + +-- Load a file +CREATE TABLE lotest_stash_values (loid oid, fd integer); +-- lo_creat(mode integer) returns oid +-- The mode arg to lo_creat is unused, some vestigal holdover from ancient times +-- returns the large object id +INSERT INTO lotest_stash_values (loid) SELECT lo_creat(42); + +-- Test ALTER LARGE OBJECT +CREATE ROLE regress_lo_user; +DO $$ + BEGIN + EXECUTE 'ALTER LARGE OBJECT ' || (select loid from lotest_stash_values) + || ' OWNER TO regress_lo_user'; + END +$$; +SELECT + rol.rolname +FROM + lotest_stash_values s + JOIN pg_largeobject_metadata lo ON s.loid = lo.oid + JOIN pg_authid rol ON lo.lomowner = rol.oid; + +-- NOTE: large objects require transactions +BEGIN; + +-- lo_open(lobjId oid, mode integer) returns integer +-- The mode parameter to lo_open uses two constants: +-- INV_READ = 0x20000 +-- INV_WRITE = 0x40000 +-- The return value is a file descriptor-like value which remains valid for the +-- transaction. +UPDATE lotest_stash_values SET fd = lo_open(loid, CAST(x'20000' | x'40000' AS integer)); + +-- loread/lowrite names are wonky, different from other functions which are lo_* +-- lowrite(fd integer, data bytea) returns integer +-- the integer is the number of bytes written +SELECT lowrite(fd, ' +I wandered lonely as a cloud +That floats on high o''er vales and hills, +When all at once I saw a crowd, +A host, of golden daffodils; +Beside the lake, beneath the trees, +Fluttering and dancing in the breeze. + +Continuous as the stars that shine +And twinkle on the milky way, +They stretched in never-ending line +Along the margin of a bay: +Ten thousand saw I at a glance, +Tossing their heads in sprightly dance. + +The waves beside them danced; but they +Out-did the sparkling waves in glee: +A poet could not but be gay, +In such a jocund company: +I gazed--and gazed--but little thought +What wealth the show to me had brought: + +For oft, when on my couch I lie +In vacant or in pensive mood, +They flash upon that inward eye +Which is the bliss of solitude; +And then my heart with pleasure fills, +And dances with the daffodils. + + -- William Wordsworth +') FROM lotest_stash_values; + +-- lo_close(fd integer) returns integer +-- return value is 0 for success, or <0 for error (actually only -1, but...) +SELECT lo_close(fd) FROM lotest_stash_values; + +END; + +-- Copy to another large object. +-- Note: we intentionally don't remove the object created here; +-- it's left behind to help test pg_dump. + +SELECT lo_from_bytea(0, lo_get(loid)) AS newloid FROM lotest_stash_values +\gset + +-- Add a comment to it, as well, for pg_dump/pg_upgrade testing. +COMMENT ON LARGE OBJECT :newloid IS 'I Wandered Lonely as a Cloud'; + +-- Read out a portion +BEGIN; +UPDATE lotest_stash_values SET fd=lo_open(loid, CAST(x'20000' | x'40000' AS integer)); + +-- lo_lseek(fd integer, offset integer, whence integer) returns integer +-- offset is in bytes, whence is one of three values: +-- SEEK_SET (= 0) meaning relative to beginning +-- SEEK_CUR (= 1) meaning relative to current position +-- SEEK_END (= 2) meaning relative to end (offset better be negative) +-- returns current position in file +SELECT lo_lseek(fd, 104, 0) FROM lotest_stash_values; + +-- loread/lowrite names are wonky, different from other functions which are lo_* +-- loread(fd integer, len integer) returns bytea +SELECT loread(fd, 28) FROM lotest_stash_values; + +SELECT lo_lseek(fd, -19, 1) FROM lotest_stash_values; + +SELECT lowrite(fd, 'n') FROM lotest_stash_values; + +SELECT lo_tell(fd) FROM lotest_stash_values; + +SELECT lo_lseek(fd, -744, 2) FROM lotest_stash_values; + +SELECT loread(fd, 28) FROM lotest_stash_values; + +SELECT lo_close(fd) FROM lotest_stash_values; + +END; + +-- Test resource management +BEGIN; +SELECT lo_open(loid, x'40000'::int) from lotest_stash_values; +ABORT; + +\set filename :abs_builddir '/results/invalid/path' +\set dobody 'DECLARE loid oid; BEGIN ' +\set dobody :dobody 'SELECT tbl.loid INTO loid FROM lotest_stash_values tbl; ' +\set dobody :dobody 'PERFORM lo_export(loid, ' :'filename' '); ' +\set dobody :dobody 'EXCEPTION WHEN UNDEFINED_FILE THEN ' +\set dobody :dobody 'RAISE NOTICE ''could not open file, as expected''; END' +DO :'dobody'; + +-- Test truncation. +BEGIN; +UPDATE lotest_stash_values SET fd=lo_open(loid, CAST(x'20000' | x'40000' AS integer)); + +SELECT lo_truncate(fd, 11) FROM lotest_stash_values; +SELECT loread(fd, 15) FROM lotest_stash_values; + +SELECT lo_truncate(fd, 10000) FROM lotest_stash_values; +SELECT loread(fd, 10) FROM lotest_stash_values; +SELECT lo_lseek(fd, 0, 2) FROM lotest_stash_values; +SELECT lo_tell(fd) FROM lotest_stash_values; + +SELECT lo_truncate(fd, 5000) FROM lotest_stash_values; +SELECT lo_lseek(fd, 0, 2) FROM lotest_stash_values; +SELECT lo_tell(fd) FROM lotest_stash_values; + +SELECT lo_close(fd) FROM lotest_stash_values; +END; + +-- Test 64-bit large object functions. +BEGIN; +UPDATE lotest_stash_values SET fd = lo_open(loid, CAST(x'20000' | x'40000' AS integer)); + +SELECT lo_lseek64(fd, 4294967296, 0) FROM lotest_stash_values; +SELECT lowrite(fd, 'offset:4GB') FROM lotest_stash_values; +SELECT lo_tell64(fd) FROM lotest_stash_values; + +SELECT lo_lseek64(fd, -10, 1) FROM lotest_stash_values; +SELECT lo_tell64(fd) FROM lotest_stash_values; +SELECT loread(fd, 10) FROM lotest_stash_values; + +SELECT lo_truncate64(fd, 5000000000) FROM lotest_stash_values; +SELECT lo_lseek64(fd, 0, 2) FROM lotest_stash_values; +SELECT lo_tell64(fd) FROM lotest_stash_values; + +SELECT lo_truncate64(fd, 3000000000) FROM lotest_stash_values; +SELECT lo_lseek64(fd, 0, 2) FROM lotest_stash_values; +SELECT lo_tell64(fd) FROM lotest_stash_values; + +SELECT lo_close(fd) FROM lotest_stash_values; +END; + +-- lo_unlink(lobjId oid) returns integer +-- return value appears to always be 1 +SELECT lo_unlink(loid) from lotest_stash_values; + +TRUNCATE lotest_stash_values; + +\set filename :abs_srcdir '/data/tenk.data' +INSERT INTO lotest_stash_values (loid) SELECT lo_import(:'filename'); + +BEGIN; +UPDATE lotest_stash_values SET fd=lo_open(loid, CAST(x'20000' | x'40000' AS integer)); + +-- verify length of large object +SELECT lo_lseek(fd, 0, 2) FROM lotest_stash_values; + +-- with the default BLCKSZ, LOBLKSIZE = 2048, so this positions us for a block +-- edge case +SELECT lo_lseek(fd, 2030, 0) FROM lotest_stash_values; + +-- this should get half of the value from page 0 and half from page 1 of the +-- large object +SELECT loread(fd, 36) FROM lotest_stash_values; + +SELECT lo_tell(fd) FROM lotest_stash_values; + +SELECT lo_lseek(fd, -26, 1) FROM lotest_stash_values; + +SELECT lowrite(fd, 'abcdefghijklmnop') FROM lotest_stash_values; + +SELECT lo_lseek(fd, 2030, 0) FROM lotest_stash_values; + +SELECT loread(fd, 36) FROM lotest_stash_values; + +SELECT lo_close(fd) FROM lotest_stash_values; +END; + +\set filename :abs_builddir '/results/lotest.txt' +SELECT lo_export(loid, :'filename') FROM lotest_stash_values; + +\lo_import :filename + +\set newloid :LASTOID + +-- just make sure \lo_export does not barf +\set filename :abs_builddir '/results/lotest2.txt' +\lo_export :newloid :filename + +-- This is a hack to test that export/import are reversible +-- This uses knowledge about the inner workings of large object mechanism +-- which should not be used outside it. This makes it a HACK +SELECT pageno, data FROM pg_largeobject WHERE loid = (SELECT loid from lotest_stash_values) +EXCEPT +SELECT pageno, data FROM pg_largeobject WHERE loid = :newloid; + +SELECT lo_unlink(loid) FROM lotest_stash_values; + +TRUNCATE lotest_stash_values; + +\lo_unlink :newloid + +\set filename :abs_builddir '/results/lotest.txt' +\lo_import :filename + +\set newloid_1 :LASTOID + +SELECT lo_from_bytea(0, lo_get(:newloid_1)) AS newloid_2 +\gset + +SELECT md5(lo_get(:newloid_1)) = md5(lo_get(:newloid_2)); + +SELECT lo_get(:newloid_1, 0, 20); +SELECT lo_get(:newloid_1, 10, 20); +SELECT lo_put(:newloid_1, 5, decode('afafafaf', 'hex')); +SELECT lo_get(:newloid_1, 0, 20); + +SELECT lo_put(:newloid_1, 4294967310, 'foo'); +SELECT lo_get(:newloid_1); +SELECT lo_get(:newloid_1, 4294967294, 100); + +\lo_unlink :newloid_1 +\lo_unlink :newloid_2 + +-- This object is left in the database for pg_dump test purposes +SELECT lo_from_bytea(0, E'\\xdeadbeef') AS newloid +\gset + +SET bytea_output TO hex; +SELECT lo_get(:newloid); + +-- Create one more object that we leave behind for testing pg_dump/pg_upgrade; +-- this one intentionally has an OID in the system range +SELECT lo_create(2121); + +COMMENT ON LARGE OBJECT 2121 IS 'testing comments'; + +-- Clean up +DROP TABLE lotest_stash_values; + +DROP ROLE regress_lo_user; diff --git a/src/test/regress/sql/misc.sql b/src/test/regress/sql/misc.sql new file mode 100644 index 00000000000..a1e2f779ba7 --- /dev/null +++ b/src/test/regress/sql/misc.sql @@ -0,0 +1,268 @@ +-- +-- MISC +-- + +-- directory paths are passed to us in environment variables +\getenv abs_srcdir PG_ABS_SRCDIR +\getenv abs_builddir PG_ABS_BUILDDIR + +-- +-- BTREE +-- +UPDATE onek + SET unique1 = onek.unique1 + 1; + +UPDATE onek + SET unique1 = onek.unique1 - 1; + +-- +-- BTREE partial +-- +-- UPDATE onek2 +-- SET unique1 = onek2.unique1 + 1; + +--UPDATE onek2 +-- SET unique1 = onek2.unique1 - 1; + +-- +-- BTREE shutting out non-functional updates +-- +-- the following two tests seem to take a long time on some +-- systems. This non-func update stuff needs to be examined +-- more closely. - jolly (2/22/96) +-- +UPDATE tmp + SET stringu1 = reverse_name(onek.stringu1) + FROM onek + WHERE onek.stringu1 = 'JBAAAA' and + onek.stringu1 = tmp.stringu1; + +UPDATE tmp + SET stringu1 = reverse_name(onek2.stringu1) + FROM onek2 + WHERE onek2.stringu1 = 'JCAAAA' and + onek2.stringu1 = tmp.stringu1; + +DROP TABLE tmp; + +--UPDATE person* +-- SET age = age + 1; + +--UPDATE person* +-- SET age = age + 3 +-- WHERE name = 'linda'; + +-- +-- copy +-- +\set filename :abs_builddir '/results/onek.data' +COPY onek TO :'filename'; + +DELETE FROM onek; + +COPY onek FROM :'filename'; + +SELECT unique1 FROM onek WHERE unique1 < 2 ORDER BY unique1; + +DELETE FROM onek2; + +COPY onek2 FROM :'filename'; + +SELECT unique1 FROM onek2 WHERE unique1 < 2 ORDER BY unique1; + +\set filename :abs_builddir '/results/stud_emp.data' +COPY BINARY stud_emp TO :'filename'; + +DELETE FROM stud_emp; + +COPY BINARY stud_emp FROM :'filename'; + +SELECT * FROM stud_emp; + +-- COPY aggtest FROM stdin; +-- 56 7.8 +-- 100 99.097 +-- 0 0.09561 +-- 42 324.78 +-- . +-- COPY aggtest TO stdout; + + +-- +-- inheritance stress test +-- +SELECT * FROM a_star*; + +SELECT * + FROM b_star* x + WHERE x.b = text 'bumble' or x.a < 3; + +SELECT class, a + FROM c_star* x + WHERE x.c ~ text 'hi'; + +SELECT class, b, c + FROM d_star* x + WHERE x.a < 100; + +SELECT class, c FROM e_star* x WHERE x.c NOTNULL; + +SELECT * FROM f_star* x WHERE x.c ISNULL; + +-- grouping and aggregation on inherited sets have been busted in the past... + +SELECT sum(a) FROM a_star*; + +SELECT class, sum(a) FROM a_star* GROUP BY class ORDER BY class; + + +ALTER TABLE f_star RENAME COLUMN f TO ff; + +ALTER TABLE e_star* RENAME COLUMN e TO ee; + +ALTER TABLE d_star* RENAME COLUMN d TO dd; + +ALTER TABLE c_star* RENAME COLUMN c TO cc; + +ALTER TABLE b_star* RENAME COLUMN b TO bb; + +ALTER TABLE a_star* RENAME COLUMN a TO aa; + +SELECT class, aa + FROM a_star* x + WHERE aa ISNULL; + +-- As of Postgres 7.1, ALTER implicitly recurses, +-- so this should be same as ALTER a_star* + +ALTER TABLE a_star RENAME COLUMN aa TO foo; + +SELECT class, foo + FROM a_star* x + WHERE x.foo >= 2; + +ALTER TABLE a_star RENAME COLUMN foo TO aa; + +SELECT * + from a_star* + WHERE aa < 1000; + +ALTER TABLE f_star ADD COLUMN f int4; + +UPDATE f_star SET f = 10; + +ALTER TABLE e_star* ADD COLUMN e int4; + +--UPDATE e_star* SET e = 42; + +SELECT * FROM e_star*; + +ALTER TABLE a_star* ADD COLUMN a text; + +-- That ALTER TABLE should have added TOAST tables. +SELECT relname, reltoastrelid <> 0 AS has_toast_table + FROM pg_class + WHERE oid::regclass IN ('a_star', 'c_star') + ORDER BY 1; + +--UPDATE b_star* +-- SET a = text 'gazpacho' +-- WHERE aa > 4; + +SELECT class, aa, a FROM a_star*; + + +-- +-- versions +-- + +-- +-- postquel functions +-- +-- +-- mike does post_hacking, +-- joe and sally play basketball, and +-- everyone else does nothing. +-- +SELECT p.name, name(p.hobbies) FROM ONLY person p; + +-- +-- as above, but jeff also does post_hacking. +-- +SELECT p.name, name(p.hobbies) FROM person* p; + +-- +-- the next two queries demonstrate how functions generate bogus duplicates. +-- this is a "feature" .. +-- +SELECT DISTINCT hobbies_r.name, name(hobbies_r.equipment) FROM hobbies_r + ORDER BY 1,2; + +SELECT hobbies_r.name, (hobbies_r.equipment).name FROM hobbies_r; + +-- +-- mike needs advil and peet's coffee, +-- joe and sally need hightops, and +-- everyone else is fine. +-- +SELECT p.name, name(p.hobbies), name(equipment(p.hobbies)) FROM ONLY person p; + +-- +-- as above, but jeff needs advil and peet's coffee as well. +-- +SELECT p.name, name(p.hobbies), name(equipment(p.hobbies)) FROM person* p; + +-- +-- just like the last two, but make sure that the target list fixup and +-- unflattening is being done correctly. +-- +SELECT name(equipment(p.hobbies)), p.name, name(p.hobbies) FROM ONLY person p; + +SELECT (p.hobbies).equipment.name, p.name, name(p.hobbies) FROM person* p; + +SELECT (p.hobbies).equipment.name, name(p.hobbies), p.name FROM ONLY person p; + +SELECT name(equipment(p.hobbies)), name(p.hobbies), p.name FROM person* p; + +SELECT name(equipment(hobby_construct(text 'skywalking', text 'mer'))); + +SELECT name(equipment(hobby_construct_named(text 'skywalking', text 'mer'))); + +SELECT name(equipment_named(hobby_construct_named(text 'skywalking', text 'mer'))); + +SELECT name(equipment_named_ambiguous_1a(hobby_construct_named(text 'skywalking', text 'mer'))); + +SELECT name(equipment_named_ambiguous_1b(hobby_construct_named(text 'skywalking', text 'mer'))); + +SELECT name(equipment_named_ambiguous_1c(hobby_construct_named(text 'skywalking', text 'mer'))); + +SELECT name(equipment_named_ambiguous_2a(text 'skywalking')); + +SELECT name(equipment_named_ambiguous_2b(text 'skywalking')); + +SELECT hobbies_by_name('basketball'); + +SELECT name, overpaid(emp.*) FROM emp; + +-- +-- Try a few cases with SQL-spec row constructor expressions +-- +SELECT * FROM equipment(ROW('skywalking', 'mer')); + +SELECT name(equipment(ROW('skywalking', 'mer'))); + +SELECT *, name(equipment(h.*)) FROM hobbies_r h; + +SELECT *, (equipment(CAST((h.*) AS hobbies_r))).name FROM hobbies_r h; + +-- +-- functional joins +-- + +-- +-- instance rules +-- + +-- +-- rewrite rules +-- diff --git a/src/test/regress/sql/tablespace.sql b/src/test/regress/sql/tablespace.sql new file mode 100644 index 00000000000..92076db9a13 --- /dev/null +++ b/src/test/regress/sql/tablespace.sql @@ -0,0 +1,416 @@ +-- directory paths are passed to us in environment variables +\getenv abs_builddir PG_ABS_BUILDDIR + +\set testtablespace :abs_builddir '/testtablespace' + +-- create a tablespace using WITH clause +CREATE TABLESPACE regress_tblspacewith LOCATION :'testtablespace' WITH (some_nonexistent_parameter = true); -- fail +CREATE TABLESPACE regress_tblspacewith LOCATION :'testtablespace' WITH (random_page_cost = 3.0); -- ok + +-- check to see the parameter was used +SELECT spcoptions FROM pg_tablespace WHERE spcname = 'regress_tblspacewith'; + +-- drop the tablespace so we can re-use the location +DROP TABLESPACE regress_tblspacewith; + +-- create a tablespace we can use +CREATE TABLESPACE regress_tblspace LOCATION :'testtablespace'; + +-- try setting and resetting some properties for the new tablespace +ALTER TABLESPACE regress_tblspace SET (random_page_cost = 1.0, seq_page_cost = 1.1); +ALTER TABLESPACE regress_tblspace SET (some_nonexistent_parameter = true); -- fail +ALTER TABLESPACE regress_tblspace RESET (random_page_cost = 2.0); -- fail +ALTER TABLESPACE regress_tblspace RESET (random_page_cost, effective_io_concurrency); -- ok + +-- REINDEX (TABLESPACE) +-- catalogs and system tablespaces +-- system catalog, fail +REINDEX (TABLESPACE regress_tblspace) TABLE pg_am; +REINDEX (TABLESPACE regress_tblspace) TABLE CONCURRENTLY pg_am; +-- shared catalog, fail +REINDEX (TABLESPACE regress_tblspace) TABLE pg_authid; +REINDEX (TABLESPACE regress_tblspace) TABLE CONCURRENTLY pg_authid; +-- toast relations, fail +REINDEX (TABLESPACE regress_tblspace) INDEX pg_toast.pg_toast_1260_index; +REINDEX (TABLESPACE regress_tblspace) INDEX CONCURRENTLY pg_toast.pg_toast_1260_index; +REINDEX (TABLESPACE regress_tblspace) TABLE pg_toast.pg_toast_1260; +REINDEX (TABLESPACE regress_tblspace) TABLE CONCURRENTLY pg_toast.pg_toast_1260; +-- system catalog, fail +REINDEX (TABLESPACE pg_global) TABLE pg_authid; +REINDEX (TABLESPACE pg_global) TABLE CONCURRENTLY pg_authid; + +-- table with toast relation +CREATE TABLE regress_tblspace_test_tbl (num1 bigint, num2 double precision, t text); +INSERT INTO regress_tblspace_test_tbl (num1, num2, t) + SELECT round(random()*100), random(), 'text' + FROM generate_series(1, 10) s(i); +CREATE INDEX regress_tblspace_test_tbl_idx ON regress_tblspace_test_tbl (num1); +-- move to global tablespace, fail +REINDEX (TABLESPACE pg_global) INDEX regress_tblspace_test_tbl_idx; +REINDEX (TABLESPACE pg_global) INDEX CONCURRENTLY regress_tblspace_test_tbl_idx; + +-- check transactional behavior of REINDEX (TABLESPACE) +BEGIN; +REINDEX (TABLESPACE regress_tblspace) INDEX regress_tblspace_test_tbl_idx; +REINDEX (TABLESPACE regress_tblspace) TABLE regress_tblspace_test_tbl; +ROLLBACK; +-- no relation moved to the new tablespace +SELECT c.relname FROM pg_class c, pg_tablespace s + WHERE c.reltablespace = s.oid AND s.spcname = 'regress_tblspace'; + +-- check that all indexes are moved to a new tablespace with different +-- relfilenode. +-- Save first the existing relfilenode for the toast and main relations. +SELECT relfilenode as main_filenode FROM pg_class + WHERE relname = 'regress_tblspace_test_tbl_idx' \gset +SELECT relfilenode as toast_filenode FROM pg_class + WHERE oid = + (SELECT i.indexrelid + FROM pg_class c, + pg_index i + WHERE i.indrelid = c.reltoastrelid AND + c.relname = 'regress_tblspace_test_tbl') \gset +REINDEX (TABLESPACE regress_tblspace) TABLE regress_tblspace_test_tbl; +SELECT c.relname FROM pg_class c, pg_tablespace s + WHERE c.reltablespace = s.oid AND s.spcname = 'regress_tblspace' + ORDER BY c.relname; +ALTER TABLE regress_tblspace_test_tbl SET TABLESPACE regress_tblspace; +ALTER TABLE regress_tblspace_test_tbl SET TABLESPACE pg_default; +SELECT c.relname FROM pg_class c, pg_tablespace s + WHERE c.reltablespace = s.oid AND s.spcname = 'regress_tblspace' + ORDER BY c.relname; +-- Move back to the default tablespace. +ALTER INDEX regress_tblspace_test_tbl_idx SET TABLESPACE pg_default; +SELECT c.relname FROM pg_class c, pg_tablespace s + WHERE c.reltablespace = s.oid AND s.spcname = 'regress_tblspace' + ORDER BY c.relname; +REINDEX (TABLESPACE regress_tblspace, CONCURRENTLY) TABLE regress_tblspace_test_tbl; +SELECT c.relname FROM pg_class c, pg_tablespace s + WHERE c.reltablespace = s.oid AND s.spcname = 'regress_tblspace' + ORDER BY c.relname; +SELECT relfilenode = :main_filenode AS main_same FROM pg_class + WHERE relname = 'regress_tblspace_test_tbl_idx'; +SELECT relfilenode = :toast_filenode as toast_same FROM pg_class + WHERE oid = + (SELECT i.indexrelid + FROM pg_class c, + pg_index i + WHERE i.indrelid = c.reltoastrelid AND + c.relname = 'regress_tblspace_test_tbl'); +DROP TABLE regress_tblspace_test_tbl; + +-- REINDEX (TABLESPACE) with partitions +-- Create a partition tree and check the set of relations reindexed +-- with their new tablespace. +CREATE TABLE tbspace_reindex_part (c1 int, c2 int) PARTITION BY RANGE (c1); +CREATE TABLE tbspace_reindex_part_0 PARTITION OF tbspace_reindex_part + FOR VALUES FROM (0) TO (10) PARTITION BY list (c2); +CREATE TABLE tbspace_reindex_part_0_1 PARTITION OF tbspace_reindex_part_0 + FOR VALUES IN (1); +CREATE TABLE tbspace_reindex_part_0_2 PARTITION OF tbspace_reindex_part_0 + FOR VALUES IN (2); +-- This partitioned table will have no partitions. +CREATE TABLE tbspace_reindex_part_10 PARTITION OF tbspace_reindex_part + FOR VALUES FROM (10) TO (20) PARTITION BY list (c2); +-- Create some partitioned indexes +CREATE INDEX tbspace_reindex_part_index ON ONLY tbspace_reindex_part (c1); +CREATE INDEX tbspace_reindex_part_index_0 ON ONLY tbspace_reindex_part_0 (c1); +ALTER INDEX tbspace_reindex_part_index ATTACH PARTITION tbspace_reindex_part_index_0; +-- This partitioned index will have no partitions. +CREATE INDEX tbspace_reindex_part_index_10 ON ONLY tbspace_reindex_part_10 (c1); +ALTER INDEX tbspace_reindex_part_index ATTACH PARTITION tbspace_reindex_part_index_10; +CREATE INDEX tbspace_reindex_part_index_0_1 ON ONLY tbspace_reindex_part_0_1 (c1); +ALTER INDEX tbspace_reindex_part_index_0 ATTACH PARTITION tbspace_reindex_part_index_0_1; +CREATE INDEX tbspace_reindex_part_index_0_2 ON ONLY tbspace_reindex_part_0_2 (c1); +ALTER INDEX tbspace_reindex_part_index_0 ATTACH PARTITION tbspace_reindex_part_index_0_2; +SELECT relid, parentrelid, level FROM pg_partition_tree('tbspace_reindex_part_index') + ORDER BY relid, level; +-- Track the original tablespace, relfilenode and OID of each index +-- in the tree. +CREATE TEMP TABLE reindex_temp_before AS + SELECT oid, relname, relfilenode, reltablespace + FROM pg_class + WHERE relname ~ 'tbspace_reindex_part_index'; +REINDEX (TABLESPACE regress_tblspace, CONCURRENTLY) TABLE tbspace_reindex_part; +-- REINDEX CONCURRENTLY changes the OID of the old relation, hence a check +-- based on the relation name below. +SELECT b.relname, + CASE WHEN a.relfilenode = b.relfilenode THEN 'relfilenode is unchanged' + ELSE 'relfilenode has changed' END AS filenode, + CASE WHEN a.reltablespace = b.reltablespace THEN 'reltablespace is unchanged' + ELSE 'reltablespace has changed' END AS tbspace + FROM reindex_temp_before b JOIN pg_class a ON b.relname = a.relname + ORDER BY 1; +DROP TABLE tbspace_reindex_part; + +-- create a schema we can use +CREATE SCHEMA testschema; + +-- try a table +CREATE TABLE testschema.foo (i int) TABLESPACE regress_tblspace; +SELECT relname, spcname FROM pg_catalog.pg_tablespace t, pg_catalog.pg_class c + where c.reltablespace = t.oid AND c.relname = 'foo'; + +INSERT INTO testschema.foo VALUES(1); +INSERT INTO testschema.foo VALUES(2); + +-- tables from dynamic sources +CREATE TABLE testschema.asselect TABLESPACE regress_tblspace AS SELECT 1; +SELECT relname, spcname FROM pg_catalog.pg_tablespace t, pg_catalog.pg_class c + where c.reltablespace = t.oid AND c.relname = 'asselect'; + +PREPARE selectsource(int) AS SELECT $1; +CREATE TABLE testschema.asexecute TABLESPACE regress_tblspace + AS EXECUTE selectsource(2); +SELECT relname, spcname FROM pg_catalog.pg_tablespace t, pg_catalog.pg_class c + where c.reltablespace = t.oid AND c.relname = 'asexecute'; + +-- index +CREATE INDEX foo_idx on testschema.foo(i) TABLESPACE regress_tblspace; +SELECT relname, spcname FROM pg_catalog.pg_tablespace t, pg_catalog.pg_class c + where c.reltablespace = t.oid AND c.relname = 'foo_idx'; + +-- check \d output +\d testschema.foo +\d testschema.foo_idx + +-- +-- partitioned table +-- +CREATE TABLE testschema.part (a int) PARTITION BY LIST (a); +SET default_tablespace TO pg_global; +CREATE TABLE testschema.part_1 PARTITION OF testschema.part FOR VALUES IN (1); +RESET default_tablespace; +CREATE TABLE testschema.part_1 PARTITION OF testschema.part FOR VALUES IN (1); +SET default_tablespace TO regress_tblspace; +CREATE TABLE testschema.part_2 PARTITION OF testschema.part FOR VALUES IN (2); +SET default_tablespace TO pg_global; +CREATE TABLE testschema.part_3 PARTITION OF testschema.part FOR VALUES IN (3); +ALTER TABLE testschema.part SET TABLESPACE regress_tblspace; +CREATE TABLE testschema.part_3 PARTITION OF testschema.part FOR VALUES IN (3); +CREATE TABLE testschema.part_4 PARTITION OF testschema.part FOR VALUES IN (4) + TABLESPACE pg_default; +CREATE TABLE testschema.part_56 PARTITION OF testschema.part FOR VALUES IN (5, 6) + PARTITION BY LIST (a); +ALTER TABLE testschema.part SET TABLESPACE pg_default; +CREATE TABLE testschema.part_78 PARTITION OF testschema.part FOR VALUES IN (7, 8) + PARTITION BY LIST (a); +CREATE TABLE testschema.part_910 PARTITION OF testschema.part FOR VALUES IN (9, 10) + PARTITION BY LIST (a) TABLESPACE regress_tblspace; +RESET default_tablespace; +CREATE TABLE testschema.part_78 PARTITION OF testschema.part FOR VALUES IN (7, 8) + PARTITION BY LIST (a); + +SELECT relname, spcname FROM pg_catalog.pg_class c + JOIN pg_catalog.pg_namespace n ON (c.relnamespace = n.oid) + LEFT JOIN pg_catalog.pg_tablespace t ON c.reltablespace = t.oid + where c.relname LIKE 'part%' AND n.nspname = 'testschema' order by relname; +RESET default_tablespace; +DROP TABLE testschema.part; + +-- partitioned index +CREATE TABLE testschema.part (a int) PARTITION BY LIST (a); +CREATE TABLE testschema.part1 PARTITION OF testschema.part FOR VALUES IN (1); +CREATE INDEX part_a_idx ON testschema.part (a) TABLESPACE regress_tblspace; +CREATE TABLE testschema.part2 PARTITION OF testschema.part FOR VALUES IN (2); +SELECT relname, spcname FROM pg_catalog.pg_tablespace t, pg_catalog.pg_class c + where c.reltablespace = t.oid AND c.relname LIKE 'part%_idx'; +\d testschema.part +\d+ testschema.part +\d testschema.part1 +\d+ testschema.part1 +\d testschema.part_a_idx +\d+ testschema.part_a_idx + +-- partitioned rels cannot specify the default tablespace. These fail: +CREATE TABLE testschema.dflt (a int PRIMARY KEY) PARTITION BY LIST (a) TABLESPACE pg_default; +CREATE TABLE testschema.dflt (a int PRIMARY KEY USING INDEX TABLESPACE pg_default) PARTITION BY LIST (a); +SET default_tablespace TO 'pg_default'; +CREATE TABLE testschema.dflt (a int PRIMARY KEY) PARTITION BY LIST (a) TABLESPACE regress_tblspace; +CREATE TABLE testschema.dflt (a int PRIMARY KEY USING INDEX TABLESPACE regress_tblspace) PARTITION BY LIST (a); +-- but these work: +CREATE TABLE testschema.dflt (a int PRIMARY KEY USING INDEX TABLESPACE regress_tblspace) PARTITION BY LIST (a) TABLESPACE regress_tblspace; +SET default_tablespace TO ''; +CREATE TABLE testschema.dflt2 (a int PRIMARY KEY) PARTITION BY LIST (a); +DROP TABLE testschema.dflt, testschema.dflt2; + +-- check that default_tablespace doesn't affect ALTER TABLE index rebuilds +CREATE TABLE testschema.test_default_tab(id bigint) TABLESPACE regress_tblspace; +INSERT INTO testschema.test_default_tab VALUES (1); +CREATE INDEX test_index1 on testschema.test_default_tab (id); +CREATE INDEX test_index2 on testschema.test_default_tab (id) TABLESPACE regress_tblspace; +ALTER TABLE testschema.test_default_tab ADD CONSTRAINT test_index3 PRIMARY KEY (id); +ALTER TABLE testschema.test_default_tab ADD CONSTRAINT test_index4 UNIQUE (id) USING INDEX TABLESPACE regress_tblspace; + +\d testschema.test_index1 +\d testschema.test_index2 +\d testschema.test_index3 +\d testschema.test_index4 +-- use a custom tablespace for default_tablespace +SET default_tablespace TO regress_tblspace; +-- tablespace should not change if no rewrite +ALTER TABLE testschema.test_default_tab ALTER id TYPE bigint; +\d testschema.test_index1 +\d testschema.test_index2 +\d testschema.test_index3 +\d testschema.test_index4 +SELECT * FROM testschema.test_default_tab; +-- tablespace should not change even if there is an index rewrite +ALTER TABLE testschema.test_default_tab ALTER id TYPE int; +\d testschema.test_index1 +\d testschema.test_index2 +\d testschema.test_index3 +\d testschema.test_index4 +SELECT * FROM testschema.test_default_tab; +-- now use the default tablespace for default_tablespace +SET default_tablespace TO ''; +-- tablespace should not change if no rewrite +ALTER TABLE testschema.test_default_tab ALTER id TYPE int; +\d testschema.test_index1 +\d testschema.test_index2 +\d testschema.test_index3 +\d testschema.test_index4 +-- tablespace should not change even if there is an index rewrite +ALTER TABLE testschema.test_default_tab ALTER id TYPE bigint; +\d testschema.test_index1 +\d testschema.test_index2 +\d testschema.test_index3 +\d testschema.test_index4 +DROP TABLE testschema.test_default_tab; + +-- check that default_tablespace doesn't affect ALTER TABLE index rebuilds +-- (this time with a partitioned table) +CREATE TABLE testschema.test_default_tab_p(id bigint, val bigint) + PARTITION BY LIST (id) TABLESPACE regress_tblspace; +CREATE TABLE testschema.test_default_tab_p1 PARTITION OF testschema.test_default_tab_p + FOR VALUES IN (1); +INSERT INTO testschema.test_default_tab_p VALUES (1); +CREATE INDEX test_index1 on testschema.test_default_tab_p (val); +CREATE INDEX test_index2 on testschema.test_default_tab_p (val) TABLESPACE regress_tblspace; +ALTER TABLE testschema.test_default_tab_p ADD CONSTRAINT test_index3 PRIMARY KEY (id); +ALTER TABLE testschema.test_default_tab_p ADD CONSTRAINT test_index4 UNIQUE (id) USING INDEX TABLESPACE regress_tblspace; + +\d testschema.test_index1 +\d testschema.test_index2 +\d testschema.test_index3 +\d testschema.test_index4 +-- use a custom tablespace for default_tablespace +SET default_tablespace TO regress_tblspace; +-- tablespace should not change if no rewrite +ALTER TABLE testschema.test_default_tab_p ALTER val TYPE bigint; +\d testschema.test_index1 +\d testschema.test_index2 +\d testschema.test_index3 +\d testschema.test_index4 +SELECT * FROM testschema.test_default_tab_p; +-- tablespace should not change even if there is an index rewrite +ALTER TABLE testschema.test_default_tab_p ALTER val TYPE int; +\d testschema.test_index1 +\d testschema.test_index2 +\d testschema.test_index3 +\d testschema.test_index4 +SELECT * FROM testschema.test_default_tab_p; +-- now use the default tablespace for default_tablespace +SET default_tablespace TO ''; +-- tablespace should not change if no rewrite +ALTER TABLE testschema.test_default_tab_p ALTER val TYPE int; +\d testschema.test_index1 +\d testschema.test_index2 +\d testschema.test_index3 +\d testschema.test_index4 +-- tablespace should not change even if there is an index rewrite +ALTER TABLE testschema.test_default_tab_p ALTER val TYPE bigint; +\d testschema.test_index1 +\d testschema.test_index2 +\d testschema.test_index3 +\d testschema.test_index4 +DROP TABLE testschema.test_default_tab_p; + +-- check that default_tablespace affects index additions in ALTER TABLE +CREATE TABLE testschema.test_tab(id int) TABLESPACE regress_tblspace; +INSERT INTO testschema.test_tab VALUES (1); +SET default_tablespace TO regress_tblspace; +ALTER TABLE testschema.test_tab ADD CONSTRAINT test_tab_unique UNIQUE (id); +SET default_tablespace TO ''; +ALTER TABLE testschema.test_tab ADD CONSTRAINT test_tab_pkey PRIMARY KEY (id); +\d testschema.test_tab_unique +\d testschema.test_tab_pkey +SELECT * FROM testschema.test_tab; +DROP TABLE testschema.test_tab; + +-- check that default_tablespace is handled correctly by multi-command +-- ALTER TABLE that includes a tablespace-preserving rewrite +CREATE TABLE testschema.test_tab(a int, b int, c int); +SET default_tablespace TO regress_tblspace; +ALTER TABLE testschema.test_tab ADD CONSTRAINT test_tab_unique UNIQUE (a); +CREATE INDEX test_tab_a_idx ON testschema.test_tab (a); +SET default_tablespace TO ''; +CREATE INDEX test_tab_b_idx ON testschema.test_tab (b); +\d testschema.test_tab_unique +\d testschema.test_tab_a_idx +\d testschema.test_tab_b_idx +ALTER TABLE testschema.test_tab ALTER b TYPE bigint, ADD UNIQUE (c); +\d testschema.test_tab_unique +\d testschema.test_tab_a_idx +\d testschema.test_tab_b_idx +DROP TABLE testschema.test_tab; + +-- let's try moving a table from one place to another +CREATE TABLE testschema.atable AS VALUES (1), (2); +CREATE UNIQUE INDEX anindex ON testschema.atable(column1); + +ALTER TABLE testschema.atable SET TABLESPACE regress_tblspace; +ALTER INDEX testschema.anindex SET TABLESPACE regress_tblspace; +ALTER INDEX testschema.part_a_idx SET TABLESPACE pg_global; +ALTER INDEX testschema.part_a_idx SET TABLESPACE pg_default; +ALTER INDEX testschema.part_a_idx SET TABLESPACE regress_tblspace; + +INSERT INTO testschema.atable VALUES(3); -- ok +INSERT INTO testschema.atable VALUES(1); -- fail (checks index) +SELECT COUNT(*) FROM testschema.atable; -- checks heap + +-- Will fail with bad path +CREATE TABLESPACE regress_badspace LOCATION '/no/such/location'; + +-- No such tablespace +CREATE TABLE bar (i int) TABLESPACE regress_nosuchspace; + +-- Fail, in use for some partitioned object +DROP TABLESPACE regress_tblspace; +ALTER INDEX testschema.part_a_idx SET TABLESPACE pg_default; +-- Fail, not empty +DROP TABLESPACE regress_tblspace; + +CREATE ROLE regress_tablespace_user1 login; +CREATE ROLE regress_tablespace_user2 login; +GRANT USAGE ON SCHEMA testschema TO regress_tablespace_user2; + +ALTER TABLESPACE regress_tblspace OWNER TO regress_tablespace_user1; + +CREATE TABLE testschema.tablespace_acl (c int); +-- new owner lacks permission to create this index from scratch +CREATE INDEX k ON testschema.tablespace_acl (c) TABLESPACE regress_tblspace; +ALTER TABLE testschema.tablespace_acl OWNER TO regress_tablespace_user2; + +SET SESSION ROLE regress_tablespace_user2; +CREATE TABLE tablespace_table (i int) TABLESPACE regress_tblspace; -- fail +ALTER TABLE testschema.tablespace_acl ALTER c TYPE bigint; +REINDEX (TABLESPACE regress_tblspace) TABLE tablespace_table; -- fail +REINDEX (TABLESPACE regress_tblspace, CONCURRENTLY) TABLE tablespace_table; -- fail +RESET ROLE; + +ALTER TABLESPACE regress_tblspace RENAME TO regress_tblspace_renamed; + +ALTER TABLE ALL IN TABLESPACE regress_tblspace_renamed SET TABLESPACE pg_default; +ALTER INDEX ALL IN TABLESPACE regress_tblspace_renamed SET TABLESPACE pg_default; + +-- Should show notice that nothing was done +ALTER TABLE ALL IN TABLESPACE regress_tblspace_renamed SET TABLESPACE pg_default; + +-- Should succeed +DROP TABLESPACE regress_tblspace_renamed; + +DROP SCHEMA testschema CASCADE; + +DROP ROLE regress_tablespace_user1; +DROP ROLE regress_tablespace_user2; -- cgit v1.2.3