aboutsummaryrefslogtreecommitdiff
path: root/src/backend/utils
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/utils')
-rw-r--r--src/backend/utils/mmgr/aset.c5
-rw-r--r--src/backend/utils/mmgr/mcxt.c74
-rw-r--r--src/backend/utils/sort/tuplesort.c91
-rw-r--r--src/backend/utils/sort/tuplestore.c77
4 files changed, 162 insertions, 85 deletions
diff --git a/src/backend/utils/mmgr/aset.c b/src/backend/utils/mmgr/aset.c
index ab93620ae26..ff04a38cda2 100644
--- a/src/backend/utils/mmgr/aset.c
+++ b/src/backend/utils/mmgr/aset.c
@@ -458,6 +458,7 @@ AllocSetContextCreate(MemoryContext parent,
maxBlockSize = MAXALIGN(maxBlockSize);
if (maxBlockSize < initBlockSize)
maxBlockSize = initBlockSize;
+ Assert(AllocHugeSizeIsValid(maxBlockSize)); /* must be safe to double */
context->initBlockSize = initBlockSize;
context->maxBlockSize = maxBlockSize;
context->nextBlockSize = initBlockSize;
@@ -643,6 +644,10 @@ AllocSetDelete(MemoryContext context)
* AllocSetAlloc
* Returns pointer to allocated memory of given size; memory is added
* to the set.
+ *
+ * No request may exceed:
+ * MAXALIGN_DOWN(SIZE_MAX) - ALLOC_BLOCKHDRSZ - ALLOC_CHUNKHDRSZ
+ * All callers use a much-lower limit.
*/
static void *
AllocSetAlloc(MemoryContext context, Size size)
diff --git a/src/backend/utils/mmgr/mcxt.c b/src/backend/utils/mmgr/mcxt.c
index 46961e9ee99..9574fd3c7a3 100644
--- a/src/backend/utils/mmgr/mcxt.c
+++ b/src/backend/utils/mmgr/mcxt.c
@@ -455,14 +455,7 @@ MemoryContextContains(MemoryContext context, void *pointer)
header = (StandardChunkHeader *)
((char *) pointer - STANDARDCHUNKHEADERSIZE);
- /*
- * If the context link doesn't match then we certainly have a non-member
- * chunk. Also check for a reasonable-looking size as extra guard against
- * being fooled by bogus pointers.
- */
- if (header->context == context && AllocSizeIsValid(header->size))
- return true;
- return false;
+ return header->context == context;
}
/*--------------------
@@ -758,6 +751,71 @@ repalloc(void *pointer, Size size)
}
/*
+ * MemoryContextAllocHuge
+ * Allocate (possibly-expansive) space within the specified context.
+ *
+ * See considerations in comment at MaxAllocHugeSize.
+ */
+void *
+MemoryContextAllocHuge(MemoryContext context, Size size)
+{
+ void *ret;
+
+ AssertArg(MemoryContextIsValid(context));
+
+ if (!AllocHugeSizeIsValid(size))
+ elog(ERROR, "invalid memory alloc request size %lu",
+ (unsigned long) size);
+
+ context->isReset = false;
+
+ ret = (*context->methods->alloc) (context, size);
+ VALGRIND_MEMPOOL_ALLOC(context, ret, size);
+
+ return ret;
+}
+
+/*
+ * repalloc_huge
+ * Adjust the size of a previously allocated chunk, permitting a large
+ * value. The previous allocation need not have been "huge".
+ */
+void *
+repalloc_huge(void *pointer, Size size)
+{
+ MemoryContext context;
+ void *ret;
+
+ if (!AllocHugeSizeIsValid(size))
+ elog(ERROR, "invalid memory alloc request size %lu",
+ (unsigned long) size);
+
+ /*
+ * Try to detect bogus pointers handed to us, poorly though we can.
+ * Presumably, a pointer that isn't MAXALIGNED isn't pointing at an
+ * allocated chunk.
+ */
+ Assert(pointer != NULL);
+ Assert(pointer == (void *) MAXALIGN(pointer));
+
+ /*
+ * OK, it's probably safe to look at the chunk header.
+ */
+ context = ((StandardChunkHeader *)
+ ((char *) pointer - STANDARDCHUNKHEADERSIZE))->context;
+
+ AssertArg(MemoryContextIsValid(context));
+
+ /* isReset must be false already */
+ Assert(!context->isReset);
+
+ ret = (*context->methods->realloc) (context, pointer, size);
+ VALGRIND_MEMPOOL_CHANGE(context, pointer, ret, size);
+
+ return ret;
+}
+
+/*
* MemoryContextStrdup
* Like strdup(), but allocate from the specified context
*/
diff --git a/src/backend/utils/sort/tuplesort.c b/src/backend/utils/sort/tuplesort.c
index d876369c6bb..efc07606400 100644
--- a/src/backend/utils/sort/tuplesort.c
+++ b/src/backend/utils/sort/tuplesort.c
@@ -211,8 +211,8 @@ struct Tuplesortstate
* tuples to return? */
bool boundUsed; /* true if we made use of a bounded heap */
int bound; /* if bounded, the maximum number of tuples */
- long availMem; /* remaining memory available, in bytes */
- long allowedMem; /* total memory allowed, in bytes */
+ Size availMem; /* remaining memory available, in bytes */
+ Size allowedMem; /* total memory allowed, in bytes */
int maxTapes; /* number of tapes (Knuth's T) */
int tapeRange; /* maxTapes-1 (Knuth's P) */
MemoryContext sortcontext; /* memory context holding all sort data */
@@ -308,7 +308,7 @@ struct Tuplesortstate
int *mergenext; /* first preread tuple for each source */
int *mergelast; /* last preread tuple for each source */
int *mergeavailslots; /* slots left for prereading each tape */
- long *mergeavailmem; /* availMem for prereading each tape */
+ Size *mergeavailmem; /* availMem for prereading each tape */
int mergefreelist; /* head of freelist of recycled slots */
int mergefirstfree; /* first slot never used in this merge */
@@ -961,25 +961,26 @@ tuplesort_end(Tuplesortstate *state)
}
/*
- * Grow the memtuples[] array, if possible within our memory constraint.
- * Return TRUE if we were able to enlarge the array, FALSE if not.
+ * Grow the memtuples[] array, if possible within our memory constraint. We
+ * must not exceed INT_MAX tuples in memory or the caller-provided memory
+ * limit. Return TRUE if we were able to enlarge the array, FALSE if not.
*
- * Normally, at each increment we double the size of the array. When we no
- * longer have enough memory to do that, we attempt one last, smaller increase
- * (and then clear the growmemtuples flag so we don't try any more). That
- * allows us to use allowedMem as fully as possible; sticking to the pure
- * doubling rule could result in almost half of allowedMem going unused.
- * Because availMem moves around with tuple addition/removal, we need some
- * rule to prevent making repeated small increases in memtupsize, which would
- * just be useless thrashing. The growmemtuples flag accomplishes that and
- * also prevents useless recalculations in this function.
+ * Normally, at each increment we double the size of the array. When doing
+ * that would exceed a limit, we attempt one last, smaller increase (and then
+ * clear the growmemtuples flag so we don't try any more). That allows us to
+ * use memory as fully as permitted; sticking to the pure doubling rule could
+ * result in almost half going unused. Because availMem moves around with
+ * tuple addition/removal, we need some rule to prevent making repeated small
+ * increases in memtupsize, which would just be useless thrashing. The
+ * growmemtuples flag accomplishes that and also prevents useless
+ * recalculations in this function.
*/
static bool
grow_memtuples(Tuplesortstate *state)
{
int newmemtupsize;
int memtupsize = state->memtupsize;
- long memNowUsed = state->allowedMem - state->availMem;
+ Size memNowUsed = state->allowedMem - state->availMem;
/* Forget it if we've already maxed out memtuples, per comment above */
if (!state->growmemtuples)
@@ -989,14 +990,16 @@ grow_memtuples(Tuplesortstate *state)
if (memNowUsed <= state->availMem)
{
/*
- * It is surely safe to double memtupsize if we've used no more than
- * half of allowedMem.
- *
- * Note: it might seem that we need to worry about memtupsize * 2
- * overflowing an int, but the MaxAllocSize clamp applied below
- * ensures the existing memtupsize can't be large enough for that.
+ * We've used no more than half of allowedMem; double our usage,
+ * clamping at INT_MAX.
*/
- newmemtupsize = memtupsize * 2;
+ if (memtupsize < INT_MAX / 2)
+ newmemtupsize = memtupsize * 2;
+ else
+ {
+ newmemtupsize = INT_MAX;
+ state->growmemtuples = false;
+ }
}
else
{
@@ -1012,7 +1015,8 @@ grow_memtuples(Tuplesortstate *state)
* we've already seen, and thus we can extrapolate from the space
* consumption so far to estimate an appropriate new size for the
* memtuples array. The optimal value might be higher or lower than
- * this estimate, but it's hard to know that in advance.
+ * this estimate, but it's hard to know that in advance. We again
+ * clamp at INT_MAX tuples.
*
* This calculation is safe against enlarging the array so much that
* LACKMEM becomes true, because the memory currently used includes
@@ -1020,16 +1024,18 @@ grow_memtuples(Tuplesortstate *state)
* new array elements even if no other memory were currently used.
*
* We do the arithmetic in float8, because otherwise the product of
- * memtupsize and allowedMem could overflow. (A little algebra shows
- * that grow_ratio must be less than 2 here, so we are not risking
- * integer overflow this way.) Any inaccuracy in the result should be
- * insignificant; but even if we computed a completely insane result,
- * the checks below will prevent anything really bad from happening.
+ * memtupsize and allowedMem could overflow. Any inaccuracy in the
+ * result should be insignificant; but even if we computed a
+ * completely insane result, the checks below will prevent anything
+ * really bad from happening.
*/
double grow_ratio;
grow_ratio = (double) state->allowedMem / (double) memNowUsed;
- newmemtupsize = (int) (memtupsize * grow_ratio);
+ if (memtupsize * grow_ratio < INT_MAX)
+ newmemtupsize = (int) (memtupsize * grow_ratio);
+ else
+ newmemtupsize = INT_MAX;
/* We won't make any further enlargement attempts */
state->growmemtuples = false;
@@ -1040,12 +1046,13 @@ grow_memtuples(Tuplesortstate *state)
goto noalloc;
/*
- * On a 64-bit machine, allowedMem could be more than MaxAllocSize. Clamp
- * to ensure our request won't be rejected by palloc.
+ * On a 32-bit machine, allowedMem could exceed MaxAllocHugeSize. Clamp
+ * to ensure our request won't be rejected. Note that we can easily
+ * exhaust address space before facing this outcome.
*/
- if ((Size) newmemtupsize >= MaxAllocSize / sizeof(SortTuple))
+ if ((Size) newmemtupsize >= MaxAllocHugeSize / sizeof(SortTuple))
{
- newmemtupsize = (int) (MaxAllocSize / sizeof(SortTuple));
+ newmemtupsize = (int) (MaxAllocHugeSize / sizeof(SortTuple));
state->growmemtuples = false; /* can't grow any more */
}
@@ -1060,15 +1067,15 @@ grow_memtuples(Tuplesortstate *state)
* palloc would be treating both old and new arrays as separate chunks.
* But we'll check LACKMEM explicitly below just in case.)
*/
- if (state->availMem < (long) ((newmemtupsize - memtupsize) * sizeof(SortTuple)))
+ if (state->availMem < (Size) ((newmemtupsize - memtupsize) * sizeof(SortTuple)))
goto noalloc;
/* OK, do it */
FREEMEM(state, GetMemoryChunkSpace(state->memtuples));
state->memtupsize = newmemtupsize;
state->memtuples = (SortTuple *)
- repalloc(state->memtuples,
- state->memtupsize * sizeof(SortTuple));
+ repalloc_huge(state->memtuples,
+ state->memtupsize * sizeof(SortTuple));
USEMEM(state, GetMemoryChunkSpace(state->memtuples));
if (LACKMEM(state))
elog(ERROR, "unexpected out-of-memory situation during sort");
@@ -1715,7 +1722,7 @@ tuplesort_getdatum(Tuplesortstate *state, bool forward,
* This is exported for use by the planner. allowedMem is in bytes.
*/
int
-tuplesort_merge_order(long allowedMem)
+tuplesort_merge_order(Size allowedMem)
{
int mOrder;
@@ -1749,7 +1756,7 @@ inittapes(Tuplesortstate *state)
int maxTapes,
ntuples,
j;
- long tapeSpace;
+ Size tapeSpace;
/* Compute number of tapes to use: merge order plus 1 */
maxTapes = tuplesort_merge_order(state->allowedMem) + 1;
@@ -1798,7 +1805,7 @@ inittapes(Tuplesortstate *state)
state->mergenext = (int *) palloc0(maxTapes * sizeof(int));
state->mergelast = (int *) palloc0(maxTapes * sizeof(int));
state->mergeavailslots = (int *) palloc0(maxTapes * sizeof(int));
- state->mergeavailmem = (long *) palloc0(maxTapes * sizeof(long));
+ state->mergeavailmem = (Size *) palloc0(maxTapes * sizeof(Size));
state->tp_fib = (int *) palloc0(maxTapes * sizeof(int));
state->tp_runs = (int *) palloc0(maxTapes * sizeof(int));
state->tp_dummy = (int *) palloc0(maxTapes * sizeof(int));
@@ -2026,7 +2033,7 @@ mergeonerun(Tuplesortstate *state)
int srcTape;
int tupIndex;
SortTuple *tup;
- long priorAvail,
+ Size priorAvail,
spaceFreed;
/*
@@ -2100,7 +2107,7 @@ beginmerge(Tuplesortstate *state)
int tapenum;
int srcTape;
int slotsPerTape;
- long spacePerTape;
+ Size spacePerTape;
/* Heap should be empty here */
Assert(state->memtupcount == 0);
@@ -2221,7 +2228,7 @@ mergeprereadone(Tuplesortstate *state, int srcTape)
unsigned int tuplen;
SortTuple stup;
int tupIndex;
- long priorAvail,
+ Size priorAvail,
spaceUsed;
if (!state->mergeactive[srcTape])
diff --git a/src/backend/utils/sort/tuplestore.c b/src/backend/utils/sort/tuplestore.c
index ea9bc04823d..ce1d47611c2 100644
--- a/src/backend/utils/sort/tuplestore.c
+++ b/src/backend/utils/sort/tuplestore.c
@@ -104,8 +104,8 @@ struct Tuplestorestate
bool backward; /* store extra length words in file? */
bool interXact; /* keep open through transactions? */
bool truncated; /* tuplestore_trim has removed tuples? */
- long availMem; /* remaining memory available, in bytes */
- long allowedMem; /* total memory allowed, in bytes */
+ Size availMem; /* remaining memory available, in bytes */
+ Size allowedMem; /* total memory allowed, in bytes */
BufFile *myfile; /* underlying file, or NULL if none */
MemoryContext context; /* memory context for holding tuples */
ResourceOwner resowner; /* resowner for holding temp files */
@@ -531,25 +531,26 @@ tuplestore_ateof(Tuplestorestate *state)
}
/*
- * Grow the memtuples[] array, if possible within our memory constraint.
- * Return TRUE if we were able to enlarge the array, FALSE if not.
+ * Grow the memtuples[] array, if possible within our memory constraint. We
+ * must not exceed INT_MAX tuples in memory or the caller-provided memory
+ * limit. Return TRUE if we were able to enlarge the array, FALSE if not.
*
- * Normally, at each increment we double the size of the array. When we no
- * longer have enough memory to do that, we attempt one last, smaller increase
- * (and then clear the growmemtuples flag so we don't try any more). That
- * allows us to use allowedMem as fully as possible; sticking to the pure
- * doubling rule could result in almost half of allowedMem going unused.
- * Because availMem moves around with tuple addition/removal, we need some
- * rule to prevent making repeated small increases in memtupsize, which would
- * just be useless thrashing. The growmemtuples flag accomplishes that and
- * also prevents useless recalculations in this function.
+ * Normally, at each increment we double the size of the array. When doing
+ * that would exceed a limit, we attempt one last, smaller increase (and then
+ * clear the growmemtuples flag so we don't try any more). That allows us to
+ * use memory as fully as permitted; sticking to the pure doubling rule could
+ * result in almost half going unused. Because availMem moves around with
+ * tuple addition/removal, we need some rule to prevent making repeated small
+ * increases in memtupsize, which would just be useless thrashing. The
+ * growmemtuples flag accomplishes that and also prevents useless
+ * recalculations in this function.
*/
static bool
grow_memtuples(Tuplestorestate *state)
{
int newmemtupsize;
int memtupsize = state->memtupsize;
- long memNowUsed = state->allowedMem - state->availMem;
+ Size memNowUsed = state->allowedMem - state->availMem;
/* Forget it if we've already maxed out memtuples, per comment above */
if (!state->growmemtuples)
@@ -559,14 +560,16 @@ grow_memtuples(Tuplestorestate *state)
if (memNowUsed <= state->availMem)
{
/*
- * It is surely safe to double memtupsize if we've used no more than
- * half of allowedMem.
- *
- * Note: it might seem that we need to worry about memtupsize * 2
- * overflowing an int, but the MaxAllocSize clamp applied below
- * ensures the existing memtupsize can't be large enough for that.
+ * We've used no more than half of allowedMem; double our usage,
+ * clamping at INT_MAX.
*/
- newmemtupsize = memtupsize * 2;
+ if (memtupsize < INT_MAX / 2)
+ newmemtupsize = memtupsize * 2;
+ else
+ {
+ newmemtupsize = INT_MAX;
+ state->growmemtuples = false;
+ }
}
else
{
@@ -582,7 +585,8 @@ grow_memtuples(Tuplestorestate *state)
* we've already seen, and thus we can extrapolate from the space
* consumption so far to estimate an appropriate new size for the
* memtuples array. The optimal value might be higher or lower than
- * this estimate, but it's hard to know that in advance.
+ * this estimate, but it's hard to know that in advance. We again
+ * clamp at INT_MAX tuples.
*
* This calculation is safe against enlarging the array so much that
* LACKMEM becomes true, because the memory currently used includes
@@ -590,16 +594,18 @@ grow_memtuples(Tuplestorestate *state)
* new array elements even if no other memory were currently used.
*
* We do the arithmetic in float8, because otherwise the product of
- * memtupsize and allowedMem could overflow. (A little algebra shows
- * that grow_ratio must be less than 2 here, so we are not risking
- * integer overflow this way.) Any inaccuracy in the result should be
- * insignificant; but even if we computed a completely insane result,
- * the checks below will prevent anything really bad from happening.
+ * memtupsize and allowedMem could overflow. Any inaccuracy in the
+ * result should be insignificant; but even if we computed a
+ * completely insane result, the checks below will prevent anything
+ * really bad from happening.
*/
double grow_ratio;
grow_ratio = (double) state->allowedMem / (double) memNowUsed;
- newmemtupsize = (int) (memtupsize * grow_ratio);
+ if (memtupsize * grow_ratio < INT_MAX)
+ newmemtupsize = (int) (memtupsize * grow_ratio);
+ else
+ newmemtupsize = INT_MAX;
/* We won't make any further enlargement attempts */
state->growmemtuples = false;
@@ -610,12 +616,13 @@ grow_memtuples(Tuplestorestate *state)
goto noalloc;
/*
- * On a 64-bit machine, allowedMem could be more than MaxAllocSize. Clamp
- * to ensure our request won't be rejected by palloc.
+ * On a 32-bit machine, allowedMem could exceed MaxAllocHugeSize. Clamp
+ * to ensure our request won't be rejected. Note that we can easily
+ * exhaust address space before facing this outcome.
*/
- if ((Size) newmemtupsize >= MaxAllocSize / sizeof(void *))
+ if ((Size) newmemtupsize >= MaxAllocHugeSize / sizeof(void *))
{
- newmemtupsize = (int) (MaxAllocSize / sizeof(void *));
+ newmemtupsize = (int) (MaxAllocHugeSize / sizeof(void *));
state->growmemtuples = false; /* can't grow any more */
}
@@ -630,15 +637,15 @@ grow_memtuples(Tuplestorestate *state)
* palloc would be treating both old and new arrays as separate chunks.
* But we'll check LACKMEM explicitly below just in case.)
*/
- if (state->availMem < (long) ((newmemtupsize - memtupsize) * sizeof(void *)))
+ if (state->availMem < (Size) ((newmemtupsize - memtupsize) * sizeof(void *)))
goto noalloc;
/* OK, do it */
FREEMEM(state, GetMemoryChunkSpace(state->memtuples));
state->memtupsize = newmemtupsize;
state->memtuples = (void **)
- repalloc(state->memtuples,
- state->memtupsize * sizeof(void *));
+ repalloc_huge(state->memtuples,
+ state->memtupsize * sizeof(void *));
USEMEM(state, GetMemoryChunkSpace(state->memtuples));
if (LACKMEM(state))
elog(ERROR, "unexpected out-of-memory situation during sort");