diff options
author | Tom Lane <tgl@sss.pgh.pa.us> | 2004-02-03 17:34:04 +0000 |
---|---|---|
committer | Tom Lane <tgl@sss.pgh.pa.us> | 2004-02-03 17:34:04 +0000 |
commit | 391c3811a2b7f4cd666e1b4f35534046a862abbb (patch) | |
tree | 16e534067f9cb86d99b598675fbf3929589e6629 /src | |
parent | 39d715bee6f1eb1e7b90148368a22fe24f008185 (diff) | |
download | postgresql-391c3811a2b7f4cd666e1b4f35534046a862abbb.tar.gz postgresql-391c3811a2b7f4cd666e1b4f35534046a862abbb.zip |
Rename SortMem and VacuumMem to work_mem and maintenance_work_mem.
Make btree index creation and initial validation of foreign-key constraints
use maintenance_work_mem rather than work_mem as their memory limit.
Add some code to guc.c to allow these variables to be referenced by their
old names in SHOW and SET commands, for backwards compatibility.
Diffstat (limited to 'src')
26 files changed, 197 insertions, 119 deletions
diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c index b989ee3c23c..b423c8fdbe8 100644 --- a/src/backend/access/nbtree/nbtree.c +++ b/src/backend/access/nbtree/nbtree.c @@ -12,7 +12,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtree.c,v 1.109 2004/01/07 18:56:24 neilc Exp $ + * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtree.c,v 1.110 2004/02/03 17:34:02 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -117,13 +117,14 @@ btbuild(PG_FUNCTION_ARGS) if (buildstate.usefast) { - buildstate.spool = _bt_spoolinit(index, indexInfo->ii_Unique); + buildstate.spool = _bt_spoolinit(index, indexInfo->ii_Unique, false); /* - * Different from spool, the uniqueness isn't checked for spool2. + * If building a unique index, put dead tuples in a second spool + * to keep them out of the uniqueness check. */ if (indexInfo->ii_Unique) - buildstate.spool2 = _bt_spoolinit(index, false); + buildstate.spool2 = _bt_spoolinit(index, false, true); } /* do the heap scan */ diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c index 0986f49a6e3..08be20a0271 100644 --- a/src/backend/access/nbtree/nbtsort.c +++ b/src/backend/access/nbtree/nbtsort.c @@ -36,7 +36,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsort.c,v 1.80 2004/01/07 18:56:24 neilc Exp $ + * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsort.c,v 1.81 2004/02/03 17:34:02 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -112,14 +112,25 @@ static void _bt_load(Relation index, BTSpool *btspool, BTSpool *btspool2); * create and initialize a spool structure */ BTSpool * -_bt_spoolinit(Relation index, bool isunique) +_bt_spoolinit(Relation index, bool isunique, bool isdead) { BTSpool *btspool = (BTSpool *) palloc0(sizeof(BTSpool)); + int btKbytes; btspool->index = index; btspool->isunique = isunique; - btspool->sortstate = tuplesort_begin_index(index, isunique, false); + /* + * We size the sort area as maintenance_work_mem rather than work_mem to + * speed index creation. This should be OK since a single backend can't + * run multiple index creations in parallel. Note that creation of a + * unique index actually requires two BTSpool objects. We expect that the + * second one (for dead tuples) won't get very full, so we give it only + * work_mem. + */ + btKbytes = isdead ? work_mem : maintenance_work_mem; + btspool->sortstate = tuplesort_begin_index(index, isunique, + btKbytes, false); /* * Currently, tuplesort provides sort functions on IndexTuples. If we diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c index d761f050851..14c66b498d3 100644 --- a/src/backend/commands/vacuumlazy.c +++ b/src/backend/commands/vacuumlazy.c @@ -10,11 +10,11 @@ * relations with finite memory space usage. To do that, we set upper bounds * on the number of tuples and pages we will keep track of at once. * - * We are willing to use at most VacuumMem memory space to keep track of - * dead tuples. We initially allocate an array of TIDs of that size. - * If the array threatens to overflow, we suspend the heap scan phase - * and perform a pass of index cleanup and page compaction, then resume - * the heap scan with an empty TID array. + * We are willing to use at most maintenance_work_mem memory space to keep + * track of dead tuples. We initially allocate an array of TIDs of that size. + * If the array threatens to overflow, we suspend the heap scan phase and + * perform a pass of index cleanup and page compaction, then resume the heap + * scan with an empty TID array. * * We can limit the storage for page free space to MaxFSMPages entries, * since that's the most the free space map will be willing to remember @@ -31,7 +31,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.33 2003/11/29 19:51:48 pgsql Exp $ + * $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.34 2004/02/03 17:34:02 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -908,8 +908,8 @@ lazy_space_alloc(LVRelStats *vacrelstats, BlockNumber relblocks) int maxtuples; int maxpages; - maxtuples = (int) ((VacuumMem * 1024L) / sizeof(ItemPointerData)); - /* stay sane if small VacuumMem */ + maxtuples = (int) ((maintenance_work_mem * 1024L) / sizeof(ItemPointerData)); + /* stay sane if small maintenance_work_mem */ if (maxtuples < MAX_TUPLES_PER_PAGE) maxtuples = MAX_TUPLES_PER_PAGE; @@ -942,8 +942,8 @@ lazy_record_dead_tuple(LVRelStats *vacrelstats, { /* * The array shouldn't overflow under normal behavior, but perhaps it - * could if we are given a really small VacuumMem. In that case, just - * forget the last few tuples. + * could if we are given a really small maintenance_work_mem. In that + * case, just forget the last few tuples. */ if (vacrelstats->num_dead_tuples < vacrelstats->max_dead_tuples) { diff --git a/src/backend/executor/execQual.c b/src/backend/executor/execQual.c index bbed6f014b1..643a5da2e6b 100644 --- a/src/backend/executor/execQual.c +++ b/src/backend/executor/execQual.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/execQual.c,v 1.153 2004/01/07 18:56:26 neilc Exp $ + * $PostgreSQL: pgsql/src/backend/executor/execQual.c,v 1.154 2004/02/03 17:34:02 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -1116,7 +1116,7 @@ ExecMakeTableFunctionResult(ExprState *funcexpr, 0, false); } - tupstore = tuplestore_begin_heap(true, false, SortMem); + tupstore = tuplestore_begin_heap(true, false, work_mem); MemoryContextSwitchTo(oldcontext); rsinfo.setResult = tupstore; rsinfo.setDesc = tupdesc; diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c index a2910fa420f..cb0a64c4277 100644 --- a/src/backend/executor/nodeAgg.c +++ b/src/backend/executor/nodeAgg.c @@ -45,7 +45,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/nodeAgg.c,v 1.117 2003/11/29 19:51:48 pgsql Exp $ + * $PostgreSQL: pgsql/src/backend/executor/nodeAgg.c,v 1.118 2004/02/03 17:34:02 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -248,7 +248,7 @@ initialize_aggregates(AggState *aggstate, peraggstate->sortstate = tuplesort_begin_datum(peraggstate->inputType, peraggstate->sortOperator, - false); + work_mem, false); } /* diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c index 1e40aad4d6d..834c7afd6c1 100644 --- a/src/backend/executor/nodeHash.c +++ b/src/backend/executor/nodeHash.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/nodeHash.c,v 1.81 2003/11/29 19:51:48 pgsql Exp $ + * $PostgreSQL: pgsql/src/backend/executor/nodeHash.c,v 1.82 2004/02/03 17:34:02 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -348,9 +348,9 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, inner_rel_bytes = ntuples * tupsize * FUDGE_FAC; /* - * Target in-memory hashtable size is SortMem kilobytes. + * Target in-memory hashtable size is work_mem kilobytes. */ - hash_table_bytes = SortMem * 1024L; + hash_table_bytes = work_mem * 1024L; /* * Count the number of hash buckets we want for the whole relation, diff --git a/src/backend/executor/nodeIndexscan.c b/src/backend/executor/nodeIndexscan.c index d7c62110e93..b6d6e5ac21e 100644 --- a/src/backend/executor/nodeIndexscan.c +++ b/src/backend/executor/nodeIndexscan.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/nodeIndexscan.c,v 1.90 2004/01/07 18:56:26 neilc Exp $ + * $PostgreSQL: pgsql/src/backend/executor/nodeIndexscan.c,v 1.91 2004/02/03 17:34:02 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -40,7 +40,7 @@ * preferred way to do this is to record already-returned tuples in a hash * table (using the TID as unique identifier). However, in a very large * scan this could conceivably run out of memory. We limit the hash table - * to no more than SortMem KB; if it grows past that, we fall back to the + * to no more than work_mem KB; if it grows past that, we fall back to the * pre-7.4 technique: evaluate the prior-scan index quals again for each * tuple (which is space-efficient, but slow). * @@ -1002,7 +1002,7 @@ create_duphash(IndexScanState *node) HASHCTL hash_ctl; long nbuckets; - node->iss_MaxHash = (SortMem * 1024L) / + node->iss_MaxHash = (work_mem * 1024L) / (MAXALIGN(sizeof(HASHELEMENT)) + MAXALIGN(sizeof(DupHashTabEntry))); MemSet(&hash_ctl, 0, sizeof(hash_ctl)); hash_ctl.keysize = SizeOfIptrData; diff --git a/src/backend/executor/nodeMaterial.c b/src/backend/executor/nodeMaterial.c index fb98a334dbc..8be944243f4 100644 --- a/src/backend/executor/nodeMaterial.c +++ b/src/backend/executor/nodeMaterial.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/nodeMaterial.c,v 1.45 2003/11/29 19:51:48 pgsql Exp $ + * $PostgreSQL: pgsql/src/backend/executor/nodeMaterial.c,v 1.46 2004/02/03 17:34:02 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -62,7 +62,7 @@ ExecMaterial(MaterialState *node) */ if (tuplestorestate == NULL) { - tuplestorestate = tuplestore_begin_heap(true, false, SortMem); + tuplestorestate = tuplestore_begin_heap(true, false, work_mem); node->tuplestorestate = (void *) tuplestorestate; } diff --git a/src/backend/executor/nodeSort.c b/src/backend/executor/nodeSort.c index ed0cd313428..90c96ce7461 100644 --- a/src/backend/executor/nodeSort.c +++ b/src/backend/executor/nodeSort.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/nodeSort.c,v 1.46 2003/11/29 19:51:48 pgsql Exp $ + * $PostgreSQL: pgsql/src/backend/executor/nodeSort.c,v 1.47 2004/02/03 17:34:02 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -17,6 +17,7 @@ #include "executor/execdebug.h" #include "executor/nodeSort.h" +#include "miscadmin.h" #include "utils/tuplesort.h" @@ -88,6 +89,7 @@ ExecSort(SortState *node) plannode->numCols, plannode->sortOperators, plannode->sortColIdx, + work_mem, true /* randomAccess */ ); node->tuplesortstate = (void *) tuplesortstate; diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c index 454b1db127f..c23cf4d2324 100644 --- a/src/backend/optimizer/path/costsize.c +++ b/src/backend/optimizer/path/costsize.c @@ -49,7 +49,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/optimizer/path/costsize.c,v 1.123 2004/01/19 03:52:28 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/optimizer/path/costsize.c,v 1.124 2004/02/03 17:34:03 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -503,18 +503,18 @@ cost_functionscan(Path *path, Query *root, RelOptInfo *baserel) * Determines and returns the cost of sorting a relation, including * the cost of reading the input data. * - * If the total volume of data to sort is less than SortMem, we will do + * If the total volume of data to sort is less than work_mem, we will do * an in-memory sort, which requires no I/O and about t*log2(t) tuple * comparisons for t tuples. * - * If the total volume exceeds SortMem, we switch to a tape-style merge + * If the total volume exceeds work_mem, we switch to a tape-style merge * algorithm. There will still be about t*log2(t) tuple comparisons in * total, but we will also need to write and read each tuple once per * merge pass. We expect about ceil(log6(r)) merge passes where r is the * number of initial runs formed (log6 because tuplesort.c uses six-tape - * merging). Since the average initial run should be about twice SortMem, + * merging). Since the average initial run should be about twice work_mem, * we have - * disk traffic = 2 * relsize * ceil(log6(p / (2*SortMem))) + * disk traffic = 2 * relsize * ceil(log6(p / (2*work_mem))) * cpu = comparison_cost * t * log2(t) * * The disk traffic is assumed to be half sequential and half random @@ -542,7 +542,7 @@ cost_sort(Path *path, Query *root, Cost startup_cost = input_cost; Cost run_cost = 0; double nbytes = relation_byte_size(tuples, width); - long sortmembytes = SortMem * 1024L; + long work_mem_bytes = work_mem * 1024L; if (!enable_sort) startup_cost += disable_cost; @@ -564,10 +564,10 @@ cost_sort(Path *path, Query *root, startup_cost += 2.0 * cpu_operator_cost * tuples * LOG2(tuples); /* disk costs */ - if (nbytes > sortmembytes) + if (nbytes > work_mem_bytes) { double npages = ceil(nbytes / BLCKSZ); - double nruns = nbytes / (sortmembytes * 2); + double nruns = nbytes / (work_mem_bytes * 2); double log_runs = ceil(LOG6(nruns)); double npageaccesses; @@ -594,7 +594,7 @@ cost_sort(Path *path, Query *root, * Determines and returns the cost of materializing a relation, including * the cost of reading the input data. * - * If the total volume of data to materialize exceeds SortMem, we will need + * If the total volume of data to materialize exceeds work_mem, we will need * to write it to disk, so the cost is much higher in that case. */ void @@ -604,10 +604,10 @@ cost_material(Path *path, Cost startup_cost = input_cost; Cost run_cost = 0; double nbytes = relation_byte_size(tuples, width); - long sortmembytes = SortMem * 1024L; + long work_mem_bytes = work_mem * 1024L; /* disk costs */ - if (nbytes > sortmembytes) + if (nbytes > work_mem_bytes) { double npages = ceil(nbytes / BLCKSZ); diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c index 00ba58ec8bc..e00f73c74b1 100644 --- a/src/backend/optimizer/plan/planner.c +++ b/src/backend/optimizer/plan/planner.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/optimizer/plan/planner.c,v 1.165 2004/01/18 00:50:02 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/optimizer/plan/planner.c,v 1.166 2004/02/03 17:34:03 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -966,7 +966,7 @@ grouping_planner(Query *parse, double tuple_fraction) { /* * Use hashed grouping if (a) we think we can fit the - * hashtable into SortMem, *and* (b) the estimated cost is + * hashtable into work_mem, *and* (b) the estimated cost is * no more than doing it the other way. While avoiding * the need for sorted input is usually a win, the fact * that the output won't be sorted may be a loss; so we @@ -979,7 +979,7 @@ grouping_planner(Query *parse, double tuple_fraction) */ int hashentrysize = cheapest_path_width + 64 + numAggs * 100; - if (hashentrysize * dNumGroups <= SortMem * 1024L) + if (hashentrysize * dNumGroups <= work_mem * 1024L) { /* * Okay, do the cost comparison. We need to consider diff --git a/src/backend/optimizer/plan/subselect.c b/src/backend/optimizer/plan/subselect.c index 34dca0e5aca..4e7e2fe4390 100644 --- a/src/backend/optimizer/plan/subselect.c +++ b/src/backend/optimizer/plan/subselect.c @@ -7,7 +7,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/optimizer/plan/subselect.c,v 1.87 2004/01/12 22:20:28 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/optimizer/plan/subselect.c,v 1.88 2004/02/03 17:34:03 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -614,12 +614,12 @@ subplan_is_hashable(SubLink *slink, SubPlan *node) return false; /* - * The estimated size of the subquery result must fit in SortMem. (XXX + * The estimated size of the subquery result must fit in work_mem. (XXX * what about hashtable overhead?) */ subquery_size = node->plan->plan_rows * (MAXALIGN(node->plan->plan_width) + MAXALIGN(sizeof(HeapTupleData))); - if (subquery_size > SortMem * 1024L) + if (subquery_size > work_mem * 1024L) return false; /* diff --git a/src/backend/optimizer/util/pathnode.c b/src/backend/optimizer/util/pathnode.c index 059685e76aa..549909dfa0a 100644 --- a/src/backend/optimizer/util/pathnode.c +++ b/src/backend/optimizer/util/pathnode.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/optimizer/util/pathnode.c,v 1.100 2004/01/19 03:49:41 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/optimizer/util/pathnode.c,v 1.101 2004/02/03 17:34:03 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -637,7 +637,7 @@ create_unique_path(Query *root, RelOptInfo *rel, Path *subpath) */ int hashentrysize = rel->width + 64; - if (hashentrysize * pathnode->rows <= SortMem * 1024L) + if (hashentrysize * pathnode->rows <= work_mem * 1024L) { cost_agg(&agg_path, root, AGG_HASHED, 0, diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c index 4f62112714b..1ea95d2e507 100644 --- a/src/backend/tcop/postgres.c +++ b/src/backend/tcop/postgres.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/tcop/postgres.c,v 1.387 2004/01/28 21:02:40 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/tcop/postgres.c,v 1.388 2004/02/03 17:34:03 tgl Exp $ * * NOTES * this is the "main" module of the postgres backend and @@ -1987,7 +1987,7 @@ usage(char *progname) printf(gettext(" -o FILENAME send stdout and stderr to given file\n")); printf(gettext(" -P disable system indexes\n")); printf(gettext(" -s show statistics after each query\n")); - printf(gettext(" -S SORT-MEM set amount of memory for sorts (in kbytes)\n")); + printf(gettext(" -S WORK-MEM set amount of memory for sorts (in kbytes)\n")); printf(gettext(" --describe-config describe configuration parameters, then exit\n")); printf(gettext(" --help show this help, then exit\n")); printf(gettext(" --version output version information, then exit\n")); @@ -2277,7 +2277,7 @@ PostgresMain(int argc, char *argv[], const char *username) /* * S - amount of sort memory to use in 1k bytes */ - SetConfigOption("sort_mem", optarg, ctx, gucsource); + SetConfigOption("work_mem", optarg, ctx, gucsource); break; case 's': diff --git a/src/backend/utils/adt/ri_triggers.c b/src/backend/utils/adt/ri_triggers.c index 73aab9e5781..47384fbb89f 100644 --- a/src/backend/utils/adt/ri_triggers.c +++ b/src/backend/utils/adt/ri_triggers.c @@ -17,7 +17,7 @@ * * Portions Copyright (c) 1996-2003, PostgreSQL Global Development Group * - * $PostgreSQL: pgsql/src/backend/utils/adt/ri_triggers.c,v 1.66 2004/01/07 18:56:28 neilc Exp $ + * $PostgreSQL: pgsql/src/backend/utils/adt/ri_triggers.c,v 1.67 2004/02/03 17:34:03 tgl Exp $ * * ---------- */ @@ -41,6 +41,7 @@ #include "utils/lsyscache.h" #include "utils/typcache.h" #include "utils/acl.h" +#include "utils/guc.h" #include "miscadmin.h" @@ -2572,6 +2573,8 @@ RI_Initial_Check(FkConstraint *fkconstraint, Relation rel, Relation pkrel) const char *sep; List *list; List *list2; + int old_work_mem; + char workmembuf[32]; int spi_result; void *qplan; @@ -2665,6 +2668,23 @@ RI_Initial_Check(FkConstraint *fkconstraint, Relation rel, Relation pkrel) snprintf(querystr + strlen(querystr), sizeof(querystr) - strlen(querystr), ")"); + /* + * Temporarily increase work_mem so that the check query can be executed + * more efficiently. It seems okay to do this because the query is simple + * enough to not use a multiple of work_mem, and one typically would not + * have many large foreign-key validations happening concurrently. So + * this seems to meet the criteria for being considered a "maintenance" + * operation, and accordingly we use maintenance_work_mem. + * + * We do the equivalent of "SET LOCAL work_mem" so that transaction abort + * will restore the old value if we lose control due to an error. + */ + old_work_mem = work_mem; + snprintf(workmembuf, sizeof(workmembuf), "%d", maintenance_work_mem); + (void) set_config_option("work_mem", workmembuf, + PGC_USERSET, PGC_S_SESSION, + true, true); + if (SPI_connect() != SPI_OK_CONNECT) elog(ERROR, "SPI_connect failed"); @@ -2741,6 +2761,16 @@ RI_Initial_Check(FkConstraint *fkconstraint, Relation rel, Relation pkrel) if (SPI_finish() != SPI_OK_FINISH) elog(ERROR, "SPI_finish failed"); + /* + * Restore work_mem for the remainder of the current transaction. + * This is another SET LOCAL, so it won't affect the session value, + * nor any tentative value if there is one. + */ + snprintf(workmembuf, sizeof(workmembuf), "%d", old_work_mem); + (void) set_config_option("work_mem", workmembuf, + PGC_USERSET, PGC_S_SESSION, + true, true); + return true; } diff --git a/src/backend/utils/init/globals.c b/src/backend/utils/init/globals.c index 3bddad4685a..c170ae603df 100644 --- a/src/backend/utils/init/globals.c +++ b/src/backend/utils/init/globals.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/utils/init/globals.c,v 1.81 2004/01/28 21:02:40 tgl Exp $ + * $PostgreSQL: pgsql/src/backend/utils/init/globals.c,v 1.82 2004/02/03 17:34:03 tgl Exp $ * * NOTES * Globals used all over the place should be declared here and not @@ -78,6 +78,6 @@ int CTimeZone = 0; bool enableFsync = true; bool allowSystemTableMods = false; -int SortMem = 1024; -int VacuumMem = 8192; +int work_mem = 1024; +int maintenance_work_mem = 16384; int NBuffers = 1000; diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c index ebc17830d3c..7a0deef9bbc 100644 --- a/src/backend/utils/misc/guc.c +++ b/src/backend/utils/misc/guc.c @@ -10,7 +10,7 @@ * Written by Peter Eisentraut <peter_e@gmx.net>. * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/utils/misc/guc.c,v 1.183 2004/02/02 00:17:21 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/utils/misc/guc.c,v 1.184 2004/02/03 17:34:03 tgl Exp $ * *-------------------------------------------------------------------- */ @@ -1030,23 +1030,23 @@ static struct config_int ConfigureNamesInt[] = }, { - {"sort_mem", PGC_USERSET, RESOURCES_MEM, - gettext_noop("Sets the maximum memory to be used for sorts and hash tables."), - gettext_noop("Specifies the amount of memory to be used by internal " - "sort operations and hash tables before switching to temporary disk " - "files") + {"work_mem", PGC_USERSET, RESOURCES_MEM, + gettext_noop("Sets the maximum memory to be used for query workspaces."), + gettext_noop("This much memory may be used by each internal " + "sort operation and hash table before switching to " + "temporary disk files.") }, - &SortMem, - 1024, 8 * BLCKSZ / 1024, INT_MAX, NULL, NULL + &work_mem, + 1024, 8 * BLCKSZ / 1024, INT_MAX / 1024, NULL, NULL }, { - {"vacuum_mem", PGC_USERSET, RESOURCES_MEM, - gettext_noop("Sets the maximum memory used to keep track of to-be-reclaimed rows."), - NULL + {"maintenance_work_mem", PGC_USERSET, RESOURCES_MEM, + gettext_noop("Sets the maximum memory to be used for maintenance operations."), + gettext_noop("This includes operations such as VACUUM and CREATE INDEX.") }, - &VacuumMem, - 8192, 1024, INT_MAX, NULL, NULL + &maintenance_work_mem, + 16384, 1024, INT_MAX / 1024, NULL, NULL }, { @@ -1710,6 +1710,19 @@ static struct config_string ConfigureNamesString[] = /* + * To allow continued support of obsolete names for GUC variables, we apply + * the following mappings to any unrecognized name. Note that an old name + * should be mapped to a new one only if the new variable has very similar + * semantics to the old. + */ +static const char * const map_old_guc_names[] = { + "sort_mem", "work_mem", + "vacuum_mem", "maintenance_work_mem", + NULL +}; + + +/* * Actual lookup of variables is done through this single, sorted array. */ struct config_generic **guc_variables; @@ -1723,6 +1736,7 @@ static char *guc_string_workspace; /* for avoiding memory leaks */ static int guc_var_compare(const void *a, const void *b); +static int guc_name_compare(const char *namea, const char *nameb); static void ReportGUCOption(struct config_generic * record); static char *_ShowOption(struct config_generic * record); @@ -1812,11 +1826,12 @@ find_option(const char *name) { const char **key = &name; struct config_generic **res; + int i; Assert(name); /* - * by equating const char ** with struct config_generic *, we are + * By equating const char ** with struct config_generic *, we are * assuming the name field is first in config_generic. */ res = (struct config_generic **) bsearch((void *) &key, @@ -1826,6 +1841,19 @@ find_option(const char *name) guc_var_compare); if (res) return *res; + + /* + * See if the name is an obsolete name for a variable. We assume that + * the set of supported old names is short enough that a brute-force + * search is the best way. + */ + for (i = 0; map_old_guc_names[i] != NULL; i += 2) + { + if (guc_name_compare(name, map_old_guc_names[i]) == 0) + return find_option(map_old_guc_names[i+1]); + } + + /* Unknown name */ return NULL; } @@ -1838,16 +1866,19 @@ guc_var_compare(const void *a, const void *b) { struct config_generic *confa = *(struct config_generic **) a; struct config_generic *confb = *(struct config_generic **) b; - const char *namea; - const char *nameb; + return guc_name_compare(confa->name, confb->name); +} + + +static int +guc_name_compare(const char *namea, const char *nameb) +{ /* * The temptation to use strcasecmp() here must be resisted, because * the array ordering has to remain stable across setlocale() calls. * So, build our own with a simple ASCII-only downcasing. */ - namea = confa->name; - nameb = confb->name; while (*namea && *nameb) { char cha = *namea++; diff --git a/src/backend/utils/misc/postgresql.conf.sample b/src/backend/utils/misc/postgresql.conf.sample index 530e8c7952b..0a6e15071ae 100644 --- a/src/backend/utils/misc/postgresql.conf.sample +++ b/src/backend/utils/misc/postgresql.conf.sample @@ -56,8 +56,8 @@ # - Memory - #shared_buffers = 1000 # min 16, at least max_connections*2, 8KB each -#sort_mem = 1024 # min 64, size in KB -#vacuum_mem = 8192 # min 1024, size in KB +#work_mem = 1024 # min 64, size in KB +#maintenance_work_mem = 16384 # min 1024, size in KB #debug_shared_buffers = 0 # 0-600 seconds # - Background writer - diff --git a/src/backend/utils/mmgr/portalmem.c b/src/backend/utils/mmgr/portalmem.c index 159f2fe3593..f6b72481fb9 100644 --- a/src/backend/utils/mmgr/portalmem.c +++ b/src/backend/utils/mmgr/portalmem.c @@ -12,7 +12,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/utils/mmgr/portalmem.c,v 1.63 2003/11/29 19:52:04 pgsql Exp $ + * $PostgreSQL: pgsql/src/backend/utils/mmgr/portalmem.c,v 1.64 2004/02/03 17:34:03 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -282,8 +282,8 @@ PortalCreateHoldStore(Portal portal) /* Create the tuple store, selecting cross-transaction temp files. */ oldcxt = MemoryContextSwitchTo(portal->holdContext); - /* XXX: Should SortMem be used for this? */ - portal->holdStore = tuplestore_begin_heap(true, true, SortMem); + /* XXX: Should maintenance_work_mem be used for the portal size? */ + portal->holdStore = tuplestore_begin_heap(true, true, work_mem); MemoryContextSwitchTo(oldcxt); } diff --git a/src/backend/utils/sort/tuplesort.c b/src/backend/utils/sort/tuplesort.c index a93c4742fd5..636fdedcdb4 100644 --- a/src/backend/utils/sort/tuplesort.c +++ b/src/backend/utils/sort/tuplesort.c @@ -30,15 +30,15 @@ * heap. When the run number at the top of the heap changes, we know that * no more records of the prior run are left in the heap. * - * The (approximate) amount of memory allowed for any one sort operation - * is given in kilobytes by the external variable SortMem. Initially, + * The approximate amount of memory allowed for any one sort operation + * is specified in kilobytes by the caller (most pass work_mem). Initially, * we absorb tuples and simply store them in an unsorted array as long as - * we haven't exceeded SortMem. If we reach the end of the input without - * exceeding SortMem, we sort the array using qsort() and subsequently return + * we haven't exceeded workMem. If we reach the end of the input without + * exceeding workMem, we sort the array using qsort() and subsequently return * tuples just by scanning the tuple array sequentially. If we do exceed - * SortMem, we construct a heap using Algorithm H and begin to emit tuples + * workMem, we construct a heap using Algorithm H and begin to emit tuples * into sorted runs in temporary tapes, emitting just enough tuples at each - * step to get back within the SortMem limit. Whenever the run number at + * step to get back within the workMem limit. Whenever the run number at * the top of the heap changes, we begin a new run with a new output tape * (selected per Algorithm D). After the end of the input is reached, * we dump out remaining tuples in memory into a final run (or two), @@ -49,7 +49,7 @@ * next tuple from its source tape (if any). When the heap empties, the merge * is complete. The basic merge algorithm thus needs very little memory --- * only M tuples for an M-way merge, and M is at most six in the present code. - * However, we can still make good use of our full SortMem allocation by + * However, we can still make good use of our full workMem allocation by * pre-reading additional tuples from each source tape. Without prereading, * our access pattern to the temporary file would be very erratic; on average * we'd read one block from each of M source tapes during the same time that @@ -59,7 +59,7 @@ * of the temp file, ensuring that things will be even worse when it comes * time to read that tape. A straightforward merge pass thus ends up doing a * lot of waiting for disk seeks. We can improve matters by prereading from - * each source tape sequentially, loading about SortMem/M bytes from each tape + * each source tape sequentially, loading about workMem/M bytes from each tape * in turn. Then we run the merge algorithm, writing but not reading until * one of the preloaded tuple series runs out. Then we switch back to preread * mode, fill memory again, and repeat. This approach helps to localize both @@ -78,7 +78,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/utils/sort/tuplesort.c,v 1.40 2003/11/29 19:52:04 pgsql Exp $ + * $PostgreSQL: pgsql/src/backend/utils/sort/tuplesort.c,v 1.41 2004/02/03 17:34:03 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -323,7 +323,7 @@ struct Tuplesortstate * * NOTES about memory consumption calculations: * - * We count space allocated for tuples against the SortMem limit, plus + * We count space allocated for tuples against the workMem limit, plus * the space used by the variable-size arrays memtuples and memtupindex. * Fixed-size space (primarily the LogicalTapeSet I/O buffers) is not * counted. @@ -351,7 +351,7 @@ typedef struct } DatumTuple; -static Tuplesortstate *tuplesort_begin_common(bool randomAccess); +static Tuplesortstate *tuplesort_begin_common(int workMem, bool randomAccess); static void puttuple_common(Tuplesortstate *state, void *tuple); static void inittapes(Tuplesortstate *state); static void selectnewtape(Tuplesortstate *state); @@ -406,10 +406,16 @@ static Tuplesortstate *qsort_tuplesortstate; * access was requested, rescan, markpos, and restorepos can also be called.) * For Datum sorts, putdatum/getdatum are used instead of puttuple/gettuple. * Call tuplesort_end to terminate the operation and release memory/disk space. + * + * Each variant of tuplesort_begin has a workMem parameter specifying the + * maximum number of kilobytes of RAM to use before spilling data to disk. + * (The normal value of this parameter is work_mem, but some callers use + * other values.) Each variant also has a randomAccess parameter specifying + * whether the caller needs non-sequential access to the sort result. */ static Tuplesortstate * -tuplesort_begin_common(bool randomAccess) +tuplesort_begin_common(int workMem, bool randomAccess) { Tuplesortstate *state; @@ -417,7 +423,7 @@ tuplesort_begin_common(bool randomAccess) state->status = TSS_INITIAL; state->randomAccess = randomAccess; - state->availMem = SortMem * 1024L; + state->availMem = workMem * 1024L; state->tapeset = NULL; state->memtupcount = 0; @@ -442,9 +448,9 @@ Tuplesortstate * tuplesort_begin_heap(TupleDesc tupDesc, int nkeys, Oid *sortOperators, AttrNumber *attNums, - bool randomAccess) + int workMem, bool randomAccess) { - Tuplesortstate *state = tuplesort_begin_common(randomAccess); + Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess); int i; AssertArg(nkeys > 0); @@ -488,9 +494,9 @@ tuplesort_begin_heap(TupleDesc tupDesc, Tuplesortstate * tuplesort_begin_index(Relation indexRel, bool enforceUnique, - bool randomAccess) + int workMem, bool randomAccess) { - Tuplesortstate *state = tuplesort_begin_common(randomAccess); + Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess); state->comparetup = comparetup_index; state->copytup = copytup_index; @@ -508,9 +514,9 @@ tuplesort_begin_index(Relation indexRel, Tuplesortstate * tuplesort_begin_datum(Oid datumType, Oid sortOperator, - bool randomAccess) + int workMem, bool randomAccess) { - Tuplesortstate *state = tuplesort_begin_common(randomAccess); + Tuplesortstate *state = tuplesort_begin_common(workMem, randomAccess); RegProcedure sortFunction; int16 typlen; bool typbyval; @@ -1077,7 +1083,7 @@ mergeruns(Tuplesortstate *state) /* * If we produced only one initial run (quite likely if the total data - * volume is between 1X and 2X SortMem), we can just use that tape as + * volume is between 1X and 2X workMem), we can just use that tape as * the finished output, rather than doing a useless merge. */ if (state->currentRun == 1) diff --git a/src/backend/utils/sort/tuplestore.c b/src/backend/utils/sort/tuplestore.c index 9f7efa66a3b..d0dce20fc6e 100644 --- a/src/backend/utils/sort/tuplestore.c +++ b/src/backend/utils/sort/tuplestore.c @@ -36,7 +36,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/utils/sort/tuplestore.c,v 1.17 2003/11/29 19:52:04 pgsql Exp $ + * $PostgreSQL: pgsql/src/backend/utils/sort/tuplestore.c,v 1.18 2004/02/03 17:34:03 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -219,10 +219,7 @@ tuplestore_begin_common(bool randomAccess, bool interXact, int maxKBytes) state->myfile = NULL; state->memtupcount = 0; - if (maxKBytes > 0) - state->memtupsize = 1024; /* initial guess */ - else - state->memtupsize = 1; /* won't really need any space */ + state->memtupsize = 1024; /* initial guess */ state->memtuples = (void **) palloc(state->memtupsize * sizeof(void *)); USEMEM(state, GetMemoryChunkSpace(state->memtuples)); @@ -250,7 +247,7 @@ tuplestore_begin_common(bool randomAccess, bool interXact, int maxKBytes) * no longer wanted. * * maxKBytes: how much data to store in memory (any data beyond this - * amount is paged to disk). + * amount is paged to disk). When in doubt, use work_mem. */ Tuplestorestate * tuplestore_begin_heap(bool randomAccess, bool interXact, int maxKBytes) diff --git a/src/bin/psql/tab-complete.c b/src/bin/psql/tab-complete.c index 68bd8391970..bea56852e43 100644 --- a/src/bin/psql/tab-complete.c +++ b/src/bin/psql/tab-complete.c @@ -3,7 +3,7 @@ * * Copyright (c) 2000-2003, PostgreSQL Global Development Group * - * $PostgreSQL: pgsql/src/bin/psql/tab-complete.c,v 1.100 2004/01/25 03:07:22 neilc Exp $ + * $PostgreSQL: pgsql/src/bin/psql/tab-complete.c,v 1.101 2004/02/03 17:34:03 tgl Exp $ */ /*---------------------------------------------------------------------- @@ -533,6 +533,7 @@ psql_completion(char *text, int start, int end) "log_planner_stats", "log_statement", "log_statement_stats", + "maintenance_work_mem", "max_connections", "max_expr_depth", "max_files_per_process", @@ -547,7 +548,6 @@ psql_completion(char *text, int start, int end) "shared_buffers", "seed", "server_encoding", - "sort_mem", "sql_inheritance", "ssl", "statement_timeout", @@ -567,10 +567,10 @@ psql_completion(char *text, int start, int end) "unix_socket_directory", "unix_socket_group", "unix_socket_permissions", - "vacuum_mem", "wal_buffers", "wal_debug", "wal_sync_method", + "work_mem", NULL }; diff --git a/src/include/access/nbtree.h b/src/include/access/nbtree.h index 62af3ca30e9..8601b802fcf 100644 --- a/src/include/access/nbtree.h +++ b/src/include/access/nbtree.h @@ -7,7 +7,7 @@ * Portions Copyright (c) 1996-2003, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * - * $PostgreSQL: pgsql/src/include/access/nbtree.h,v 1.75 2003/12/21 01:23:06 tgl Exp $ + * $PostgreSQL: pgsql/src/include/access/nbtree.h,v 1.76 2004/02/03 17:34:03 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -490,7 +490,7 @@ extern BTItem _bt_formitem(IndexTuple itup); */ typedef struct BTSpool BTSpool; /* opaque type known only within nbtsort.c */ -extern BTSpool *_bt_spoolinit(Relation index, bool isunique); +extern BTSpool *_bt_spoolinit(Relation index, bool isunique, bool isdead); extern void _bt_spooldestroy(BTSpool *btspool); extern void _bt_spool(BTItem btitem, BTSpool *btspool); extern void _bt_leafbuild(BTSpool *btspool, BTSpool *spool2); diff --git a/src/include/miscadmin.h b/src/include/miscadmin.h index c62ab783a34..a7fe724533e 100644 --- a/src/include/miscadmin.h +++ b/src/include/miscadmin.h @@ -12,7 +12,7 @@ * Portions Copyright (c) 1996-2003, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * - * $PostgreSQL: pgsql/src/include/miscadmin.h,v 1.149 2004/01/30 15:57:04 momjian Exp $ + * $PostgreSQL: pgsql/src/include/miscadmin.h,v 1.150 2004/02/03 17:34:03 tgl Exp $ * * NOTES * some of the information in this file should be moved to @@ -207,8 +207,8 @@ extern int CTimeZone; extern bool enableFsync; extern bool allowSystemTableMods; -extern DLLIMPORT int SortMem; -extern int VacuumMem; +extern DLLIMPORT int work_mem; +extern DLLIMPORT int maintenance_work_mem; /* * A few postmaster startup options are exported here so the diff --git a/src/include/utils/tuplesort.h b/src/include/utils/tuplesort.h index bc4f4376ecb..70cbbfbabed 100644 --- a/src/include/utils/tuplesort.h +++ b/src/include/utils/tuplesort.h @@ -13,7 +13,7 @@ * Portions Copyright (c) 1996-2003, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * - * $PostgreSQL: pgsql/src/include/utils/tuplesort.h,v 1.14 2003/11/29 22:41:16 pgsql Exp $ + * $PostgreSQL: pgsql/src/include/utils/tuplesort.h,v 1.15 2004/02/03 17:34:04 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -39,13 +39,13 @@ typedef struct Tuplesortstate Tuplesortstate; extern Tuplesortstate *tuplesort_begin_heap(TupleDesc tupDesc, int nkeys, Oid *sortOperators, AttrNumber *attNums, - bool randomAccess); + int workMem, bool randomAccess); extern Tuplesortstate *tuplesort_begin_index(Relation indexRel, bool enforceUnique, - bool randomAccess); + int workMem, bool randomAccess); extern Tuplesortstate *tuplesort_begin_datum(Oid datumType, Oid sortOperator, - bool randomAccess); + int workMem, bool randomAccess); extern void tuplesort_puttuple(Tuplesortstate *state, void *tuple); diff --git a/src/pl/plpgsql/src/pl_exec.c b/src/pl/plpgsql/src/pl_exec.c index 13d10aab0f5..27142aa2bf5 100644 --- a/src/pl/plpgsql/src/pl_exec.c +++ b/src/pl/plpgsql/src/pl_exec.c @@ -3,7 +3,7 @@ * procedural language * * IDENTIFICATION - * $PostgreSQL: pgsql/src/pl/plpgsql/src/pl_exec.c,v 1.94 2003/11/29 19:52:12 pgsql Exp $ + * $PostgreSQL: pgsql/src/pl/plpgsql/src/pl_exec.c,v 1.95 2004/02/03 17:34:04 tgl Exp $ * * This software is copyrighted by Jan Wieck - Hamburg. * @@ -1770,7 +1770,7 @@ exec_init_tuple_store(PLpgSQL_execstate * estate) estate->tuple_store_cxt = rsi->econtext->ecxt_per_query_memory; oldcxt = MemoryContextSwitchTo(estate->tuple_store_cxt); - estate->tuple_store = tuplestore_begin_heap(true, false, SortMem); + estate->tuple_store = tuplestore_begin_heap(true, false, work_mem); MemoryContextSwitchTo(oldcxt); estate->rettupdesc = rsi->expectedDesc; |