aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/backend/utils/activity/pgstat.c23
-rw-r--r--src/backend/utils/adt/pgstatfuncs.c10
-rw-r--r--src/include/catalog/pg_proc.dat5
-rw-r--r--src/include/pgstat.h1
-rw-r--r--src/test/regress/expected/brin.out22
-rw-r--r--src/test/regress/expected/stats.out123
-rw-r--r--src/test/regress/parallel_schedule5
-rw-r--r--src/test/regress/sql/brin.sql19
-rw-r--r--src/test/regress/sql/stats.sql119
9 files changed, 87 insertions, 240 deletions
diff --git a/src/backend/utils/activity/pgstat.c b/src/backend/utils/activity/pgstat.c
index db30c72073d..111869b2115 100644
--- a/src/backend/utils/activity/pgstat.c
+++ b/src/backend/utils/activity/pgstat.c
@@ -220,6 +220,12 @@ static dlist_head pgStatPending = DLIST_STATIC_INIT(pgStatPending);
/*
+ * Force the next stats flush to happen regardless of
+ * PGSTAT_MIN_INTERVAL. Useful in test scripts.
+ */
+static bool pgStatForceNextFlush = false;
+
+/*
* For assertions that check pgstat is not used before initialization / after
* shutdown.
*/
@@ -560,6 +566,13 @@ pgstat_report_stat(bool force)
pgstat_assert_is_up();
Assert(!IsTransactionBlock());
+ /* "absorb" the forced flush even if there's nothing to flush */
+ if (pgStatForceNextFlush)
+ {
+ force = true;
+ pgStatForceNextFlush = false;
+ }
+
/* Don't expend a clock check if nothing to do */
if (dlist_is_empty(&pgStatPending) &&
!have_slrustats &&
@@ -638,6 +651,16 @@ pgstat_report_stat(bool force)
}
/*
+ * Force locally pending stats to be flushed during the next
+ * pgstat_report_stat() call. This is useful for writing tests.
+ */
+void
+pgstat_force_next_flush(void)
+{
+ pgStatForceNextFlush = true;
+}
+
+/*
* Only for use by pgstat_reset_counters()
*/
static bool
diff --git a/src/backend/utils/adt/pgstatfuncs.c b/src/backend/utils/adt/pgstatfuncs.c
index 248d318f866..5f1815727de 100644
--- a/src/backend/utils/adt/pgstatfuncs.c
+++ b/src/backend/utils/adt/pgstatfuncs.c
@@ -2067,6 +2067,16 @@ pg_stat_clear_snapshot(PG_FUNCTION_ARGS)
}
+/* Force statistics to be reported at the next occasion */
+Datum
+pg_stat_force_next_flush(PG_FUNCTION_ARGS)
+{
+ pgstat_force_next_flush();
+
+ PG_RETURN_VOID();
+}
+
+
/* Reset all counters for the current database */
Datum
pg_stat_reset(PG_FUNCTION_ARGS)
diff --git a/src/include/catalog/pg_proc.dat b/src/include/catalog/pg_proc.dat
index 0f0f41b2f92..2b4438bc3d5 100644
--- a/src/include/catalog/pg_proc.dat
+++ b/src/include/catalog/pg_proc.dat
@@ -5744,6 +5744,11 @@
proname => 'pg_stat_clear_snapshot', proisstrict => 'f', provolatile => 'v',
proparallel => 'r', prorettype => 'void', proargtypes => '',
prosrc => 'pg_stat_clear_snapshot' },
+{ oid => '2137',
+ descr => 'statistics: force stats to be flushed after the next commit',
+ proname => 'pg_stat_force_next_flush', proisstrict => 'f', provolatile => 'v',
+ proparallel => 'r', prorettype => 'void', proargtypes => '',
+ prosrc => 'pg_stat_force_next_flush' },
{ oid => '2274',
descr => 'statistics: reset collected statistics for current database',
proname => 'pg_stat_reset', proisstrict => 'f', provolatile => 'v',
diff --git a/src/include/pgstat.h b/src/include/pgstat.h
index 8ac3860e8d4..a0b853f4a69 100644
--- a/src/include/pgstat.h
+++ b/src/include/pgstat.h
@@ -414,6 +414,7 @@ extern void pgstat_initialize(void);
/* Functions called from backends */
extern long pgstat_report_stat(bool force);
+extern void pgstat_force_next_flush(void);
extern void pgstat_reset_counters(void);
extern void pgstat_reset(PgStat_Kind kind, Oid dboid, Oid objectid);
diff --git a/src/test/regress/expected/brin.out b/src/test/regress/expected/brin.out
index 2d03d8e134e..ed7879f5831 100644
--- a/src/test/regress/expected/brin.out
+++ b/src/test/regress/expected/brin.out
@@ -603,3 +603,25 @@ SELECT COUNT(*) FROM brin_hot_2 WHERE a = 2 AND b = 100;
1
(1 row)
+-- test BRIN index doesn't block HOT update
+CREATE TABLE brin_hot (
+ id integer PRIMARY KEY,
+ val integer NOT NULL
+) WITH (autovacuum_enabled = off, fillfactor = 70);
+INSERT INTO brin_hot SELECT *, 0 FROM generate_series(1, 235);
+CREATE INDEX val_brin ON brin_hot using brin(val);
+UPDATE brin_hot SET val = -3 WHERE id = 42;
+-- ensure pending stats are flushed
+SELECT pg_stat_force_next_flush();
+ pg_stat_force_next_flush
+--------------------------
+
+(1 row)
+
+SELECT pg_stat_get_tuples_hot_updated('brin_hot'::regclass::oid);
+ pg_stat_get_tuples_hot_updated
+--------------------------------
+ 1
+(1 row)
+
+DROP TABLE brin_hot;
diff --git a/src/test/regress/expected/stats.out b/src/test/regress/expected/stats.out
index 64e2ff6b296..aa05b3b78af 100644
--- a/src/test/regress/expected/stats.out
+++ b/src/test/regress/expected/stats.out
@@ -28,63 +28,6 @@ SELECT t.seq_scan, t.seq_tup_read, t.idx_scan, t.idx_tup_fetch,
pg_catalog.pg_statio_user_tables AS b
WHERE t.relname='tenk2' AND b.relname='tenk2';
COMMIT;
--- function to wait for counters to advance
-create function wait_for_stats() returns void as $$
-declare
- start_time timestamptz := clock_timestamp();
- updated1 bool;
- updated2 bool;
- updated3 bool;
- updated4 bool;
-begin
- SET LOCAL stats_fetch_consistency = snapshot;
-
- -- We don't want to wait forever. No timeout suffices if the OS drops our
- -- stats traffic because an earlier test file left a full UDP buffer.
- -- Hence, don't use PG_TEST_TIMEOUT_DEFAULT, which may be large for
- -- can't-happen timeouts. Exit after 30 seconds.
- for i in 1 .. 300 loop
-
- -- With parallel query, the seqscan and indexscan on tenk2 might be done
- -- in parallel worker processes, which will send their stats counters
- -- asynchronously to what our own session does. So we must check for
- -- those counts to be registered separately from the update counts.
-
- -- check to see if seqscan has been sensed
- SELECT (st.seq_scan >= pr.seq_scan + 1) INTO updated1
- FROM pg_stat_user_tables AS st, pg_class AS cl, prevstats AS pr
- WHERE st.relname='tenk2' AND cl.relname='tenk2';
-
- -- check to see if indexscan has been sensed
- SELECT (st.idx_scan >= pr.idx_scan + 1) INTO updated2
- FROM pg_stat_user_tables AS st, pg_class AS cl, prevstats AS pr
- WHERE st.relname='tenk2' AND cl.relname='tenk2';
-
- -- check to see if all updates have been sensed
- SELECT (n_tup_ins > 0) INTO updated3
- FROM pg_stat_user_tables WHERE relname='trunc_stats_test4';
-
- -- We must also check explicitly that pg_stat_get_snapshot_timestamp has
- -- advanced, because that comes from the global stats file which might
- -- be older than the per-DB stats file we got the other values from.
- SELECT (pr.snap_ts < pg_stat_get_snapshot_timestamp()) INTO updated4
- FROM prevstats AS pr;
-
- exit when updated1 and updated2 and updated3 and updated4;
-
- -- wait a little
- perform pg_sleep_for('100 milliseconds');
-
- -- reset stats snapshot so we can test again
- perform pg_stat_clear_snapshot();
-
- end loop;
-
- -- report time waited in postmaster log (where it won't change test output)
- raise log 'wait_for_stats delayed % seconds',
- extract(epoch from clock_timestamp() - start_time);
-end
-$$ language plpgsql;
-- test effects of TRUNCATE on n_live_tup/n_dead_tup counters
CREATE TABLE trunc_stats_test(id serial);
CREATE TABLE trunc_stats_test1(id serial, stuff text);
@@ -153,17 +96,10 @@ SELECT count(*) FROM tenk2 WHERE unique1 = 1;
(1 row)
RESET enable_bitmapscan;
--- We can't just call wait_for_stats() at this point, because we only
--- transmit stats when the session goes idle, and we probably didn't
--- transmit the last couple of counts yet thanks to the rate-limiting logic
--- in pgstat_report_stat(). But instead of waiting for the rate limiter's
--- timeout to elapse, let's just start a new session. The old one will
--- then send its stats before dying.
-\c -
--- wait for stats collector to update
-SELECT wait_for_stats();
- wait_for_stats
-----------------
+-- ensure pending stats are flushed
+SELECT pg_stat_force_next_flush();
+ pg_stat_force_next_flush
+--------------------------
(1 row)
@@ -212,57 +148,6 @@ FROM prevstats AS pr;
COMMIT;
DROP TABLE trunc_stats_test, trunc_stats_test1, trunc_stats_test2, trunc_stats_test3, trunc_stats_test4;
DROP TABLE prevstats;
--- test BRIN index doesn't block HOT update - we include this test here, as it
--- relies on statistics collector and so it may occasionally fail, especially
--- on slower systems
-CREATE TABLE brin_hot (
- id integer PRIMARY KEY,
- val integer NOT NULL
-) WITH (autovacuum_enabled = off, fillfactor = 70);
-INSERT INTO brin_hot SELECT *, 0 FROM generate_series(1, 235);
-CREATE INDEX val_brin ON brin_hot using brin(val);
-CREATE FUNCTION wait_for_hot_stats() RETURNS void AS $$
-DECLARE
- start_time timestamptz := clock_timestamp();
- updated bool;
-BEGIN
- -- we don't want to wait forever; loop will exit after 30 seconds
- FOR i IN 1 .. 300 LOOP
- SELECT (pg_stat_get_tuples_hot_updated('brin_hot'::regclass::oid) > 0) INTO updated;
- EXIT WHEN updated;
-
- -- wait a little
- PERFORM pg_sleep_for('100 milliseconds');
- -- reset stats snapshot so we can test again
- PERFORM pg_stat_clear_snapshot();
- END LOOP;
- -- report time waited in postmaster log (where it won't change test output)
- RAISE log 'wait_for_hot_stats delayed % seconds',
- EXTRACT(epoch FROM clock_timestamp() - start_time);
-END
-$$ LANGUAGE plpgsql;
-UPDATE brin_hot SET val = -3 WHERE id = 42;
--- We can't just call wait_for_hot_stats() at this point, because we only
--- transmit stats when the session goes idle, and we probably didn't
--- transmit the last couple of counts yet thanks to the rate-limiting logic
--- in pgstat_report_stat(). But instead of waiting for the rate limiter's
--- timeout to elapse, let's just start a new session. The old one will
--- then send its stats before dying.
-\c -
-SELECT wait_for_hot_stats();
- wait_for_hot_stats
---------------------
-
-(1 row)
-
-SELECT pg_stat_get_tuples_hot_updated('brin_hot'::regclass::oid);
- pg_stat_get_tuples_hot_updated
---------------------------------
- 1
-(1 row)
-
-DROP TABLE brin_hot;
-DROP FUNCTION wait_for_hot_stats();
-- ensure that stats accessors handle NULL input correctly
SELECT pg_stat_get_replication_slot(NULL);
pg_stat_get_replication_slot
diff --git a/src/test/regress/parallel_schedule b/src/test/regress/parallel_schedule
index 5030d19c03e..1087b2c14f4 100644
--- a/src/test/regress/parallel_schedule
+++ b/src/test/regress/parallel_schedule
@@ -124,7 +124,7 @@ test: plancache limit plpgsql copy2 temp domain rangefuncs prepare conversion tr
# ----------
# Another group of parallel tests
# ----------
-test: partition_join partition_prune reloptions hash_part indexing partition_aggregate partition_info tuplesort explain compression memoize
+test: partition_join partition_prune reloptions hash_part indexing partition_aggregate partition_info tuplesort explain compression memoize stats
# event_trigger cannot run concurrently with any test that runs DDL
# oidjoins is read-only, though, and should run late for best coverage
@@ -132,6 +132,3 @@ test: event_trigger oidjoins
# this test also uses event triggers, so likewise run it by itself
test: fast_default
-
-# run stats by itself because its delay may be insufficient under heavy load
-test: stats
diff --git a/src/test/regress/sql/brin.sql b/src/test/regress/sql/brin.sql
index e12f3a0df98..920e0532494 100644
--- a/src/test/regress/sql/brin.sql
+++ b/src/test/regress/sql/brin.sql
@@ -526,3 +526,22 @@ SET enable_seqscan = off;
EXPLAIN (COSTS OFF) SELECT * FROM brin_hot_2 WHERE a = 2 AND b = 100;
SELECT COUNT(*) FROM brin_hot_2 WHERE a = 2 AND b = 100;
+
+
+-- test BRIN index doesn't block HOT update
+CREATE TABLE brin_hot (
+ id integer PRIMARY KEY,
+ val integer NOT NULL
+) WITH (autovacuum_enabled = off, fillfactor = 70);
+
+INSERT INTO brin_hot SELECT *, 0 FROM generate_series(1, 235);
+CREATE INDEX val_brin ON brin_hot using brin(val);
+
+UPDATE brin_hot SET val = -3 WHERE id = 42;
+
+-- ensure pending stats are flushed
+SELECT pg_stat_force_next_flush();
+
+SELECT pg_stat_get_tuples_hot_updated('brin_hot'::regclass::oid);
+
+DROP TABLE brin_hot;
diff --git a/src/test/regress/sql/stats.sql b/src/test/regress/sql/stats.sql
index 85a253bcd43..7c77c331215 100644
--- a/src/test/regress/sql/stats.sql
+++ b/src/test/regress/sql/stats.sql
@@ -27,64 +27,6 @@ SELECT t.seq_scan, t.seq_tup_read, t.idx_scan, t.idx_tup_fetch,
WHERE t.relname='tenk2' AND b.relname='tenk2';
COMMIT;
--- function to wait for counters to advance
-create function wait_for_stats() returns void as $$
-declare
- start_time timestamptz := clock_timestamp();
- updated1 bool;
- updated2 bool;
- updated3 bool;
- updated4 bool;
-begin
- SET LOCAL stats_fetch_consistency = snapshot;
-
- -- We don't want to wait forever. No timeout suffices if the OS drops our
- -- stats traffic because an earlier test file left a full UDP buffer.
- -- Hence, don't use PG_TEST_TIMEOUT_DEFAULT, which may be large for
- -- can't-happen timeouts. Exit after 30 seconds.
- for i in 1 .. 300 loop
-
- -- With parallel query, the seqscan and indexscan on tenk2 might be done
- -- in parallel worker processes, which will send their stats counters
- -- asynchronously to what our own session does. So we must check for
- -- those counts to be registered separately from the update counts.
-
- -- check to see if seqscan has been sensed
- SELECT (st.seq_scan >= pr.seq_scan + 1) INTO updated1
- FROM pg_stat_user_tables AS st, pg_class AS cl, prevstats AS pr
- WHERE st.relname='tenk2' AND cl.relname='tenk2';
-
- -- check to see if indexscan has been sensed
- SELECT (st.idx_scan >= pr.idx_scan + 1) INTO updated2
- FROM pg_stat_user_tables AS st, pg_class AS cl, prevstats AS pr
- WHERE st.relname='tenk2' AND cl.relname='tenk2';
-
- -- check to see if all updates have been sensed
- SELECT (n_tup_ins > 0) INTO updated3
- FROM pg_stat_user_tables WHERE relname='trunc_stats_test4';
-
- -- We must also check explicitly that pg_stat_get_snapshot_timestamp has
- -- advanced, because that comes from the global stats file which might
- -- be older than the per-DB stats file we got the other values from.
- SELECT (pr.snap_ts < pg_stat_get_snapshot_timestamp()) INTO updated4
- FROM prevstats AS pr;
-
- exit when updated1 and updated2 and updated3 and updated4;
-
- -- wait a little
- perform pg_sleep_for('100 milliseconds');
-
- -- reset stats snapshot so we can test again
- perform pg_stat_clear_snapshot();
-
- end loop;
-
- -- report time waited in postmaster log (where it won't change test output)
- raise log 'wait_for_stats delayed % seconds',
- extract(epoch from clock_timestamp() - start_time);
-end
-$$ language plpgsql;
-
-- test effects of TRUNCATE on n_live_tup/n_dead_tup counters
CREATE TABLE trunc_stats_test(id serial);
CREATE TABLE trunc_stats_test1(id serial, stuff text);
@@ -151,16 +93,8 @@ SET enable_bitmapscan TO off;
SELECT count(*) FROM tenk2 WHERE unique1 = 1;
RESET enable_bitmapscan;
--- We can't just call wait_for_stats() at this point, because we only
--- transmit stats when the session goes idle, and we probably didn't
--- transmit the last couple of counts yet thanks to the rate-limiting logic
--- in pgstat_report_stat(). But instead of waiting for the rate limiter's
--- timeout to elapse, let's just start a new session. The old one will
--- then send its stats before dying.
-\c -
-
--- wait for stats collector to update
-SELECT wait_for_stats();
+-- ensure pending stats are flushed
+SELECT pg_stat_force_next_flush();
-- check effects
BEGIN;
@@ -190,55 +124,6 @@ COMMIT;
DROP TABLE trunc_stats_test, trunc_stats_test1, trunc_stats_test2, trunc_stats_test3, trunc_stats_test4;
DROP TABLE prevstats;
--- test BRIN index doesn't block HOT update - we include this test here, as it
--- relies on statistics collector and so it may occasionally fail, especially
--- on slower systems
-CREATE TABLE brin_hot (
- id integer PRIMARY KEY,
- val integer NOT NULL
-) WITH (autovacuum_enabled = off, fillfactor = 70);
-
-INSERT INTO brin_hot SELECT *, 0 FROM generate_series(1, 235);
-CREATE INDEX val_brin ON brin_hot using brin(val);
-
-CREATE FUNCTION wait_for_hot_stats() RETURNS void AS $$
-DECLARE
- start_time timestamptz := clock_timestamp();
- updated bool;
-BEGIN
- -- we don't want to wait forever; loop will exit after 30 seconds
- FOR i IN 1 .. 300 LOOP
- SELECT (pg_stat_get_tuples_hot_updated('brin_hot'::regclass::oid) > 0) INTO updated;
- EXIT WHEN updated;
-
- -- wait a little
- PERFORM pg_sleep_for('100 milliseconds');
- -- reset stats snapshot so we can test again
- PERFORM pg_stat_clear_snapshot();
- END LOOP;
- -- report time waited in postmaster log (where it won't change test output)
- RAISE log 'wait_for_hot_stats delayed % seconds',
- EXTRACT(epoch FROM clock_timestamp() - start_time);
-END
-$$ LANGUAGE plpgsql;
-
-UPDATE brin_hot SET val = -3 WHERE id = 42;
-
--- We can't just call wait_for_hot_stats() at this point, because we only
--- transmit stats when the session goes idle, and we probably didn't
--- transmit the last couple of counts yet thanks to the rate-limiting logic
--- in pgstat_report_stat(). But instead of waiting for the rate limiter's
--- timeout to elapse, let's just start a new session. The old one will
--- then send its stats before dying.
-\c -
-
-SELECT wait_for_hot_stats();
-SELECT pg_stat_get_tuples_hot_updated('brin_hot'::regclass::oid);
-
-DROP TABLE brin_hot;
-DROP FUNCTION wait_for_hot_stats();
-
-
-- ensure that stats accessors handle NULL input correctly
SELECT pg_stat_get_replication_slot(NULL);
SELECT pg_stat_get_subscription_stats(NULL);