aboutsummaryrefslogtreecommitdiff
path: root/src/backend/commands/analyze.c
diff options
context:
space:
mode:
authorTom Lane <tgl@sss.pgh.pa.us>2008-12-13 19:13:44 +0000
committerTom Lane <tgl@sss.pgh.pa.us>2008-12-13 19:13:44 +0000
commit65e3ea76417d1baab158fd8305ebed4f43141c7a (patch)
tree289f332ba577bcbd9794c3e11ef2b3b89cb078be /src/backend/commands/analyze.c
parentb69bde774982ac4497cec9c9fe0190097890292f (diff)
downloadpostgresql-65e3ea76417d1baab158fd8305ebed4f43141c7a.tar.gz
postgresql-65e3ea76417d1baab158fd8305ebed4f43141c7a.zip
Increase the default value of default_statistics_target from 10 to 100,
and its maximum value from 1000 to 10000. ALTER TABLE SET STATISTICS similarly now allows a value up to 10000. Per discussion.
Diffstat (limited to 'src/backend/commands/analyze.c')
-rw-r--r--src/backend/commands/analyze.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c
index 6b95075be1f..2b3af54ff05 100644
--- a/src/backend/commands/analyze.c
+++ b/src/backend/commands/analyze.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/analyze.c,v 1.128 2008/11/10 00:49:37 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/analyze.c,v 1.129 2008/12/13 19:13:44 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@@ -67,7 +67,7 @@ typedef struct AnlIndexData
/* Default statistics target (GUC parameter) */
-int default_statistics_target = 10;
+int default_statistics_target = 100;
/* A few variables that don't seem worth passing around as parameters */
static int elevel = -1;
@@ -1531,10 +1531,10 @@ std_typanalyze(VacAttrStats *stats)
* error in bin size f, and error probability gamma, the minimum
* random sample size is
* r = 4 * k * ln(2*n/gamma) / f^2
- * Taking f = 0.5, gamma = 0.01, n = 1 million rows, we obtain
+ * Taking f = 0.5, gamma = 0.01, n = 10^6 rows, we obtain
* r = 305.82 * k
* Note that because of the log function, the dependence on n is
- * quite weak; even at n = 1 billion, a 300*k sample gives <= 0.59
+ * quite weak; even at n = 10^12, a 300*k sample gives <= 0.66
* bin size error with probability 0.99. So there's no real need to
* scale for n, which is a good thing because we don't necessarily
* know it at this point.