aboutsummaryrefslogtreecommitdiff
path: root/src/backend/utils
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/utils')
-rw-r--r--src/backend/utils/adt/jsonb_gin.c4
-rw-r--r--src/backend/utils/adt/jsonb_op.c8
-rw-r--r--src/backend/utils/adt/jsonfuncs.c4
-rw-r--r--src/backend/utils/adt/jsonpath_exec.c4
-rw-r--r--src/backend/utils/adt/multirangetypes.c9
-rw-r--r--src/backend/utils/adt/pgstatfuncs.c14
-rw-r--r--src/backend/utils/adt/rangetypes.c8
-rw-r--r--src/backend/utils/adt/tsvector_op.c24
-rw-r--r--src/backend/utils/cache/catcache.c2
-rw-r--r--src/backend/utils/cache/evtcache.c16
-rw-r--r--src/backend/utils/cache/plancache.c21
-rw-r--r--src/backend/utils/cache/ts_cache.c4
-rw-r--r--src/backend/utils/cache/typcache.c13
-rw-r--r--src/backend/utils/error/elog.c15
-rw-r--r--src/backend/utils/hash/dynahash.c52
-rw-r--r--src/backend/utils/init/miscinit.c1
-rw-r--r--src/backend/utils/misc/guc.c59
-rw-r--r--src/backend/utils/misc/ps_status.c16
-rw-r--r--src/backend/utils/mmgr/alignedalloc.c18
-rw-r--r--src/backend/utils/mmgr/aset.c71
-rw-r--r--src/backend/utils/mmgr/bump.c31
-rw-r--r--src/backend/utils/mmgr/generation.c29
-rw-r--r--src/backend/utils/mmgr/mcxt.c77
-rw-r--r--src/backend/utils/mmgr/slab.c32
24 files changed, 414 insertions, 118 deletions
diff --git a/src/backend/utils/adt/jsonb_gin.c b/src/backend/utils/adt/jsonb_gin.c
index c1950792b5a..9b56248cf0b 100644
--- a/src/backend/utils/adt/jsonb_gin.c
+++ b/src/backend/utils/adt/jsonb_gin.c
@@ -896,8 +896,8 @@ gin_extract_jsonb_query(PG_FUNCTION_ARGS)
continue;
/* We rely on the array elements not being toasted */
entries[j++] = make_text_key(JGINFLAG_KEY,
- VARDATA_ANY(key_datums[i]),
- VARSIZE_ANY_EXHDR(key_datums[i]));
+ VARDATA_ANY(DatumGetPointer(key_datums[i])),
+ VARSIZE_ANY_EXHDR(DatumGetPointer(key_datums[i])));
}
*nentries = j;
diff --git a/src/backend/utils/adt/jsonb_op.c b/src/backend/utils/adt/jsonb_op.c
index fa5603f26e1..51d38e321fb 100644
--- a/src/backend/utils/adt/jsonb_op.c
+++ b/src/backend/utils/adt/jsonb_op.c
@@ -63,8 +63,8 @@ jsonb_exists_any(PG_FUNCTION_ARGS)
strVal.type = jbvString;
/* We rely on the array elements not being toasted */
- strVal.val.string.val = VARDATA_ANY(key_datums[i]);
- strVal.val.string.len = VARSIZE_ANY_EXHDR(key_datums[i]);
+ strVal.val.string.val = VARDATA_ANY(DatumGetPointer(key_datums[i]));
+ strVal.val.string.len = VARSIZE_ANY_EXHDR(DatumGetPointer(key_datums[i]));
if (findJsonbValueFromContainer(&jb->root,
JB_FOBJECT | JB_FARRAY,
@@ -96,8 +96,8 @@ jsonb_exists_all(PG_FUNCTION_ARGS)
strVal.type = jbvString;
/* We rely on the array elements not being toasted */
- strVal.val.string.val = VARDATA_ANY(key_datums[i]);
- strVal.val.string.len = VARSIZE_ANY_EXHDR(key_datums[i]);
+ strVal.val.string.val = VARDATA_ANY(DatumGetPointer(key_datums[i]));
+ strVal.val.string.len = VARSIZE_ANY_EXHDR(DatumGetPointer(key_datums[i]));
if (findJsonbValueFromContainer(&jb->root,
JB_FOBJECT | JB_FARRAY,
diff --git a/src/backend/utils/adt/jsonfuncs.c b/src/backend/utils/adt/jsonfuncs.c
index bcb1720b6cd..370456408bf 100644
--- a/src/backend/utils/adt/jsonfuncs.c
+++ b/src/backend/utils/adt/jsonfuncs.c
@@ -4766,8 +4766,8 @@ jsonb_delete_array(PG_FUNCTION_ARGS)
continue;
/* We rely on the array elements not being toasted */
- keyptr = VARDATA_ANY(keys_elems[i]);
- keylen = VARSIZE_ANY_EXHDR(keys_elems[i]);
+ keyptr = VARDATA_ANY(DatumGetPointer(keys_elems[i]));
+ keylen = VARSIZE_ANY_EXHDR(DatumGetPointer(keys_elems[i]));
if (keylen == v.val.string.len &&
memcmp(keyptr, v.val.string.val, keylen) == 0)
{
diff --git a/src/backend/utils/adt/jsonpath_exec.c b/src/backend/utils/adt/jsonpath_exec.c
index dbab24737ef..407041b14a1 100644
--- a/src/backend/utils/adt/jsonpath_exec.c
+++ b/src/backend/utils/adt/jsonpath_exec.c
@@ -3074,8 +3074,8 @@ JsonItemFromDatum(Datum val, Oid typid, int32 typmod, JsonbValue *res)
case TEXTOID:
case VARCHAROID:
res->type = jbvString;
- res->val.string.val = VARDATA_ANY(val);
- res->val.string.len = VARSIZE_ANY_EXHDR(val);
+ res->val.string.val = VARDATA_ANY(DatumGetPointer(val));
+ res->val.string.len = VARSIZE_ANY_EXHDR(DatumGetPointer(val));
break;
case DATEOID:
case TIMEOID:
diff --git a/src/backend/utils/adt/multirangetypes.c b/src/backend/utils/adt/multirangetypes.c
index cd84ced5b48..46f2ec0c29f 100644
--- a/src/backend/utils/adt/multirangetypes.c
+++ b/src/backend/utils/adt/multirangetypes.c
@@ -394,12 +394,13 @@ multirange_send(PG_FUNCTION_ARGS)
for (int i = 0; i < range_count; i++)
{
Datum range;
+ bytea *outputbytes;
range = RangeTypePGetDatum(ranges[i]);
- range = PointerGetDatum(SendFunctionCall(&cache->typioproc, range));
+ outputbytes = SendFunctionCall(&cache->typioproc, range);
- pq_sendint32(buf, VARSIZE(range) - VARHDRSZ);
- pq_sendbytes(buf, VARDATA(range), VARSIZE(range) - VARHDRSZ);
+ pq_sendint32(buf, VARSIZE(outputbytes) - VARHDRSZ);
+ pq_sendbytes(buf, VARDATA(outputbytes), VARSIZE(outputbytes) - VARHDRSZ);
}
PG_RETURN_BYTEA_P(pq_endtypsend(buf));
@@ -2833,7 +2834,7 @@ hash_multirange(PG_FUNCTION_ARGS)
upper_hash = 0;
/* Merge hashes of flags and bounds */
- range_hash = hash_uint32((uint32) flags);
+ range_hash = hash_bytes_uint32((uint32) flags);
range_hash ^= lower_hash;
range_hash = pg_rotate_left32(range_hash, 1);
range_hash ^= upper_hash;
diff --git a/src/backend/utils/adt/pgstatfuncs.c b/src/backend/utils/adt/pgstatfuncs.c
index 1c12ddbae49..c756c2bebaa 100644
--- a/src/backend/utils/adt/pgstatfuncs.c
+++ b/src/backend/utils/adt/pgstatfuncs.c
@@ -2171,7 +2171,7 @@ pg_stat_get_replication_slot(PG_FUNCTION_ARGS)
Datum
pg_stat_get_subscription_stats(PG_FUNCTION_ARGS)
{
-#define PG_STAT_GET_SUBSCRIPTION_STATS_COLS 11
+#define PG_STAT_GET_SUBSCRIPTION_STATS_COLS 12
Oid subid = PG_GETARG_OID(0);
TupleDesc tupdesc;
Datum values[PG_STAT_GET_SUBSCRIPTION_STATS_COLS] = {0};
@@ -2197,15 +2197,17 @@ pg_stat_get_subscription_stats(PG_FUNCTION_ARGS)
INT8OID, -1, 0);
TupleDescInitEntry(tupdesc, (AttrNumber) 6, "confl_update_exists",
INT8OID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 7, "confl_update_missing",
+ TupleDescInitEntry(tupdesc, (AttrNumber) 7, "confl_update_deleted",
INT8OID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 8, "confl_delete_origin_differs",
+ TupleDescInitEntry(tupdesc, (AttrNumber) 8, "confl_update_missing",
INT8OID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 9, "confl_delete_missing",
+ TupleDescInitEntry(tupdesc, (AttrNumber) 9, "confl_delete_origin_differs",
INT8OID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 10, "confl_multiple_unique_conflicts",
+ TupleDescInitEntry(tupdesc, (AttrNumber) 10, "confl_delete_missing",
INT8OID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 11, "stats_reset",
+ TupleDescInitEntry(tupdesc, (AttrNumber) 11, "confl_multiple_unique_conflicts",
+ INT8OID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 12, "stats_reset",
TIMESTAMPTZOID, -1, 0);
BlessTupleDesc(tupdesc);
diff --git a/src/backend/utils/adt/rangetypes.c b/src/backend/utils/adt/rangetypes.c
index 66cc0acf4a7..c83b239b3bb 100644
--- a/src/backend/utils/adt/rangetypes.c
+++ b/src/backend/utils/adt/rangetypes.c
@@ -285,8 +285,7 @@ range_send(PG_FUNCTION_ARGS)
if (RANGE_HAS_LBOUND(flags))
{
- Datum bound = PointerGetDatum(SendFunctionCall(&cache->typioproc,
- lower.val));
+ bytea *bound = SendFunctionCall(&cache->typioproc, lower.val);
uint32 bound_len = VARSIZE(bound) - VARHDRSZ;
char *bound_data = VARDATA(bound);
@@ -296,8 +295,7 @@ range_send(PG_FUNCTION_ARGS)
if (RANGE_HAS_UBOUND(flags))
{
- Datum bound = PointerGetDatum(SendFunctionCall(&cache->typioproc,
- upper.val));
+ bytea *bound = SendFunctionCall(&cache->typioproc, upper.val);
uint32 bound_len = VARSIZE(bound) - VARHDRSZ;
char *bound_data = VARDATA(bound);
@@ -1444,7 +1442,7 @@ hash_range(PG_FUNCTION_ARGS)
upper_hash = 0;
/* Merge hashes of flags and bounds */
- result = hash_uint32((uint32) flags);
+ result = hash_bytes_uint32((uint32) flags);
result ^= lower_hash;
result = pg_rotate_left32(result, 1);
result ^= upper_hash;
diff --git a/src/backend/utils/adt/tsvector_op.c b/src/backend/utils/adt/tsvector_op.c
index 1fa1275ca63..0625da9532f 100644
--- a/src/backend/utils/adt/tsvector_op.c
+++ b/src/backend/utils/adt/tsvector_op.c
@@ -329,8 +329,8 @@ tsvector_setweight_by_filter(PG_FUNCTION_ARGS)
if (nulls[i])
continue;
- lex = VARDATA(dlexemes[i]);
- lex_len = VARSIZE(dlexemes[i]) - VARHDRSZ;
+ lex = VARDATA(DatumGetPointer(dlexemes[i]));
+ lex_len = VARSIZE(DatumGetPointer(dlexemes[i])) - VARHDRSZ;
lex_pos = tsvector_bsearch(tsout, lex, lex_len);
if (lex_pos >= 0 && (j = POSDATALEN(tsout, entry + lex_pos)) != 0)
@@ -443,10 +443,10 @@ compare_text_lexemes(const void *va, const void *vb)
{
Datum a = *((const Datum *) va);
Datum b = *((const Datum *) vb);
- char *alex = VARDATA_ANY(a);
- int alex_len = VARSIZE_ANY_EXHDR(a);
- char *blex = VARDATA_ANY(b);
- int blex_len = VARSIZE_ANY_EXHDR(b);
+ char *alex = VARDATA_ANY(DatumGetPointer(a));
+ int alex_len = VARSIZE_ANY_EXHDR(DatumGetPointer(a));
+ char *blex = VARDATA_ANY(DatumGetPointer(b));
+ int blex_len = VARSIZE_ANY_EXHDR(DatumGetPointer(b));
return tsCompareString(alex, alex_len, blex, blex_len, false);
}
@@ -605,8 +605,8 @@ tsvector_delete_arr(PG_FUNCTION_ARGS)
if (nulls[i])
continue;
- lex = VARDATA(dlexemes[i]);
- lex_len = VARSIZE(dlexemes[i]) - VARHDRSZ;
+ lex = VARDATA(DatumGetPointer(dlexemes[i]));
+ lex_len = VARSIZE(DatumGetPointer(dlexemes[i])) - VARHDRSZ;
lex_pos = tsvector_bsearch(tsin, lex, lex_len);
if (lex_pos >= 0)
@@ -770,7 +770,7 @@ array_to_tsvector(PG_FUNCTION_ARGS)
(errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
errmsg("lexeme array may not contain nulls")));
- if (VARSIZE(dlexemes[i]) - VARHDRSZ == 0)
+ if (VARSIZE(DatumGetPointer(dlexemes[i])) - VARHDRSZ == 0)
ereport(ERROR,
(errcode(ERRCODE_ZERO_LENGTH_CHARACTER_STRING),
errmsg("lexeme array may not contain empty strings")));
@@ -786,7 +786,7 @@ array_to_tsvector(PG_FUNCTION_ARGS)
/* Calculate space needed for surviving lexemes. */
for (i = 0; i < nitems; i++)
- datalen += VARSIZE(dlexemes[i]) - VARHDRSZ;
+ datalen += VARSIZE(DatumGetPointer(dlexemes[i])) - VARHDRSZ;
tslen = CALCDATASIZE(nitems, datalen);
/* Allocate and fill tsvector. */
@@ -798,8 +798,8 @@ array_to_tsvector(PG_FUNCTION_ARGS)
cur = STRPTR(tsout);
for (i = 0; i < nitems; i++)
{
- char *lex = VARDATA(dlexemes[i]);
- int lex_len = VARSIZE(dlexemes[i]) - VARHDRSZ;
+ char *lex = VARDATA(DatumGetPointer(dlexemes[i]));
+ int lex_len = VARSIZE(DatumGetPointer(dlexemes[i])) - VARHDRSZ;
memcpy(cur, lex, lex_len);
arrout[i].haspos = 0;
diff --git a/src/backend/utils/cache/catcache.c b/src/backend/utils/cache/catcache.c
index d1b25214376..e2cd3feaf81 100644
--- a/src/backend/utils/cache/catcache.c
+++ b/src/backend/utils/cache/catcache.c
@@ -213,7 +213,7 @@ namehashfast(Datum datum)
{
char *key = NameStr(*DatumGetName(datum));
- return hash_any((unsigned char *) key, strlen(key));
+ return hash_bytes((unsigned char *) key, strlen(key));
}
static bool
diff --git a/src/backend/utils/cache/evtcache.c b/src/backend/utils/cache/evtcache.c
index ce596bf5638..b9d5a5998be 100644
--- a/src/backend/utils/cache/evtcache.c
+++ b/src/backend/utils/cache/evtcache.c
@@ -78,7 +78,6 @@ BuildEventTriggerCache(void)
{
HASHCTL ctl;
HTAB *cache;
- MemoryContext oldcontext;
Relation rel;
Relation irel;
SysScanDesc scan;
@@ -110,9 +109,6 @@ BuildEventTriggerCache(void)
(Datum) 0);
}
- /* Switch to correct memory context. */
- oldcontext = MemoryContextSwitchTo(EventTriggerCacheContext);
-
/* Prevent the memory context from being nuked while we're rebuilding. */
EventTriggerCacheState = ETCS_REBUILD_STARTED;
@@ -145,6 +141,7 @@ BuildEventTriggerCache(void)
bool evttags_isnull;
EventTriggerCacheEntry *entry;
bool found;
+ MemoryContext oldcontext;
/* Get next tuple. */
tup = systable_getnext_ordered(scan, ForwardScanDirection);
@@ -171,6 +168,9 @@ BuildEventTriggerCache(void)
else
continue;
+ /* Switch to correct memory context. */
+ oldcontext = MemoryContextSwitchTo(EventTriggerCacheContext);
+
/* Allocate new cache item. */
item = palloc0(sizeof(EventTriggerCacheItem));
item->fnoid = form->evtfoid;
@@ -188,6 +188,9 @@ BuildEventTriggerCache(void)
entry->triggerlist = lappend(entry->triggerlist, item);
else
entry->triggerlist = list_make1(item);
+
+ /* Restore previous memory context. */
+ MemoryContextSwitchTo(oldcontext);
}
/* Done with pg_event_trigger scan. */
@@ -195,9 +198,6 @@ BuildEventTriggerCache(void)
index_close(irel, AccessShareLock);
relation_close(rel, AccessShareLock);
- /* Restore previous memory context. */
- MemoryContextSwitchTo(oldcontext);
-
/* Install new cache. */
EventTriggerCache = cache;
@@ -240,6 +240,8 @@ DecodeTextArrayToBitmapset(Datum array)
}
pfree(elems);
+ if ((Pointer) arr != DatumGetPointer(array))
+ pfree(arr);
return bms;
}
diff --git a/src/backend/utils/cache/plancache.c b/src/backend/utils/cache/plancache.c
index 0c506d320b1..6661d2c6b73 100644
--- a/src/backend/utils/cache/plancache.c
+++ b/src/backend/utils/cache/plancache.c
@@ -463,8 +463,7 @@ CompleteCachedPlan(CachedPlanSource *plansource,
/*
* Save the final parameter types (or other parameter specification data)
- * into the source_context, as well as our other parameters. Also save
- * the result tuple descriptor.
+ * into the source_context, as well as our other parameters.
*/
MemoryContextSwitchTo(source_context);
@@ -480,9 +479,25 @@ CompleteCachedPlan(CachedPlanSource *plansource,
plansource->parserSetupArg = parserSetupArg;
plansource->cursor_options = cursor_options;
plansource->fixed_result = fixed_result;
- plansource->resultDesc = PlanCacheComputeResultDesc(querytree_list);
+ /*
+ * Also save the result tuple descriptor. PlanCacheComputeResultDesc may
+ * leak some cruft; normally we just accept that to save a copy step, but
+ * in USE_VALGRIND mode be tidy by running it in the caller's context.
+ */
+#ifdef USE_VALGRIND
+ MemoryContextSwitchTo(oldcxt);
+ plansource->resultDesc = PlanCacheComputeResultDesc(querytree_list);
+ if (plansource->resultDesc)
+ {
+ MemoryContextSwitchTo(source_context);
+ plansource->resultDesc = CreateTupleDescCopy(plansource->resultDesc);
+ MemoryContextSwitchTo(oldcxt);
+ }
+#else
+ plansource->resultDesc = PlanCacheComputeResultDesc(querytree_list);
MemoryContextSwitchTo(oldcxt);
+#endif
plansource->is_complete = true;
plansource->is_valid = true;
diff --git a/src/backend/utils/cache/ts_cache.c b/src/backend/utils/cache/ts_cache.c
index 18cccd778fd..e8ae53238d0 100644
--- a/src/backend/utils/cache/ts_cache.c
+++ b/src/backend/utils/cache/ts_cache.c
@@ -321,7 +321,9 @@ lookup_ts_dictionary_cache(Oid dictId)
/*
* Init method runs in dictionary's private memory context, and we
- * make sure the options are stored there too
+ * make sure the options are stored there too. This typically
+ * results in a small amount of memory leakage, but it's not worth
+ * complicating the API for tmplinit functions to avoid it.
*/
oldcontext = MemoryContextSwitchTo(entry->dictCtx);
diff --git a/src/backend/utils/cache/typcache.c b/src/backend/utils/cache/typcache.c
index f9aec38a11f..6a347698edf 100644
--- a/src/backend/utils/cache/typcache.c
+++ b/src/backend/utils/cache/typcache.c
@@ -1171,9 +1171,6 @@ load_domaintype_info(TypeCacheEntry *typentry)
elog(ERROR, "domain \"%s\" constraint \"%s\" has NULL conbin",
NameStr(typTup->typname), NameStr(c->conname));
- /* Convert conbin to C string in caller context */
- constring = TextDatumGetCString(val);
-
/* Create the DomainConstraintCache object and context if needed */
if (dcc == NULL)
{
@@ -1189,9 +1186,8 @@ load_domaintype_info(TypeCacheEntry *typentry)
dcc->dccRefCount = 0;
}
- /* Create node trees in DomainConstraintCache's context */
- oldcxt = MemoryContextSwitchTo(dcc->dccContext);
-
+ /* Convert conbin to a node tree, still in caller's context */
+ constring = TextDatumGetCString(val);
check_expr = (Expr *) stringToNode(constring);
/*
@@ -1206,10 +1202,13 @@ load_domaintype_info(TypeCacheEntry *typentry)
*/
check_expr = expression_planner(check_expr);
+ /* Create only the minimally needed stuff in dccContext */
+ oldcxt = MemoryContextSwitchTo(dcc->dccContext);
+
r = makeNode(DomainConstraintState);
r->constrainttype = DOM_CONSTRAINT_CHECK;
r->name = pstrdup(NameStr(c->conname));
- r->check_expr = check_expr;
+ r->check_expr = copyObject(check_expr);
r->check_exprstate = NULL;
MemoryContextSwitchTo(oldcxt);
diff --git a/src/backend/utils/error/elog.c b/src/backend/utils/error/elog.c
index 47af743990f..afce1a8e1f0 100644
--- a/src/backend/utils/error/elog.c
+++ b/src/backend/utils/error/elog.c
@@ -1128,12 +1128,15 @@ set_backtrace(ErrorData *edata, int num_skip)
nframes = backtrace(buf, lengthof(buf));
strfrms = backtrace_symbols(buf, nframes);
- if (strfrms == NULL)
- return;
-
- for (int i = num_skip; i < nframes; i++)
- appendStringInfo(&errtrace, "\n%s", strfrms[i]);
- free(strfrms);
+ if (strfrms != NULL)
+ {
+ for (int i = num_skip; i < nframes; i++)
+ appendStringInfo(&errtrace, "\n%s", strfrms[i]);
+ free(strfrms);
+ }
+ else
+ appendStringInfoString(&errtrace,
+ "insufficient memory for backtrace generation");
}
#else
appendStringInfoString(&errtrace,
diff --git a/src/backend/utils/hash/dynahash.c b/src/backend/utils/hash/dynahash.c
index 42e9be274fc..81da03629f0 100644
--- a/src/backend/utils/hash/dynahash.c
+++ b/src/backend/utils/hash/dynahash.c
@@ -22,10 +22,11 @@
* lookup key's hash value as a partition number --- this will work because
* of the way calc_bucket() maps hash values to bucket numbers.
*
- * For hash tables in shared memory, the memory allocator function should
- * match malloc's semantics of returning NULL on failure. For hash tables
- * in local memory, we typically use palloc() which will throw error on
- * failure. The code in this file has to cope with both cases.
+ * The memory allocator function should match malloc's semantics of returning
+ * NULL on failure. (This is essential for hash tables in shared memory.
+ * For hash tables in local memory, we used to use palloc() which will throw
+ * error on failure; but we no longer do, so it's untested whether this
+ * module will still cope with that behavior.)
*
* dynahash.c provides support for these types of lookup keys:
*
@@ -98,6 +99,7 @@
#include "access/xact.h"
#include "common/hashfn.h"
+#include "lib/ilist.h"
#include "port/pg_bitutils.h"
#include "storage/shmem.h"
#include "storage/spin.h"
@@ -236,6 +238,16 @@ struct HTAB
Size keysize; /* hash key length in bytes */
long ssize; /* segment size --- must be power of 2 */
int sshift; /* segment shift = log2(ssize) */
+
+ /*
+ * In a USE_VALGRIND build, non-shared hashtables keep an slist chain of
+ * all the element blocks they have allocated. This pacifies Valgrind,
+ * which would otherwise often claim that the element blocks are "possibly
+ * lost" for lack of any non-interior pointers to their starts.
+ */
+#ifdef USE_VALGRIND
+ slist_head element_blocks;
+#endif
};
/*
@@ -1712,6 +1724,8 @@ element_alloc(HTAB *hashp, int nelem, int freelist_idx)
{
HASHHDR *hctl = hashp->hctl;
Size elementSize;
+ Size requestSize;
+ char *allocedBlock;
HASHELEMENT *firstElement;
HASHELEMENT *tmpElement;
HASHELEMENT *prevElement;
@@ -1723,12 +1737,38 @@ element_alloc(HTAB *hashp, int nelem, int freelist_idx)
/* Each element has a HASHELEMENT header plus user data. */
elementSize = MAXALIGN(sizeof(HASHELEMENT)) + MAXALIGN(hctl->entrysize);
+ requestSize = nelem * elementSize;
+
+ /* Add space for slist_node list link if we need one. */
+#ifdef USE_VALGRIND
+ if (!hashp->isshared)
+ requestSize += MAXALIGN(sizeof(slist_node));
+#endif
+
+ /* Allocate the memory. */
CurrentDynaHashCxt = hashp->hcxt;
- firstElement = (HASHELEMENT *) hashp->alloc(nelem * elementSize);
+ allocedBlock = hashp->alloc(requestSize);
- if (!firstElement)
+ if (!allocedBlock)
return false;
+ /*
+ * If USE_VALGRIND, each allocated block of elements of a non-shared
+ * hashtable is chained into a list, so that Valgrind won't think it's
+ * been leaked.
+ */
+#ifdef USE_VALGRIND
+ if (hashp->isshared)
+ firstElement = (HASHELEMENT *) allocedBlock;
+ else
+ {
+ slist_push_head(&hashp->element_blocks, (slist_node *) allocedBlock);
+ firstElement = (HASHELEMENT *) (allocedBlock + MAXALIGN(sizeof(slist_node)));
+ }
+#else
+ firstElement = (HASHELEMENT *) allocedBlock;
+#endif
+
/* prepare to link all the new entries into the freelist */
prevElement = NULL;
tmpElement = firstElement;
diff --git a/src/backend/utils/init/miscinit.c b/src/backend/utils/init/miscinit.c
index 43b4dbccc3d..65d8cbfaed5 100644
--- a/src/backend/utils/init/miscinit.c
+++ b/src/backend/utils/init/miscinit.c
@@ -1183,7 +1183,6 @@ UnlinkLockFiles(int status, Datum arg)
/* Should we complain if the unlink fails? */
}
/* Since we're about to exit, no need to reclaim storage */
- lock_files = NIL;
/*
* Lock file removal should always be the last externally visible action
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index 667df448732..e404c345e6e 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -249,6 +249,7 @@ static void reapply_stacked_values(struct config_generic *variable,
const char *curvalue,
GucContext curscontext, GucSource cursource,
Oid cursrole);
+static void free_placeholder(struct config_string *pHolder);
static bool validate_option_array_item(const char *name, const char *value,
bool skipIfNoPermissions);
static void write_auto_conf_file(int fd, const char *filename, ConfigVariable *head);
@@ -4722,8 +4723,13 @@ AlterSystemSetConfigFile(AlterSystemStmt *altersysstmt)
* the config file cannot cause postmaster start to fail, so we
* don't have to be too tense about possibly installing a bad
* value.)
+ *
+ * As an exception, we skip this check if this is a RESET command
+ * for an unknown custom GUC, else there'd be no way for users to
+ * remove such settings with reserved prefixes.
*/
- (void) assignable_custom_variable_name(name, false, ERROR);
+ if (value || !valid_custom_variable_name(name))
+ (void) assignable_custom_variable_name(name, false, ERROR);
}
/*
@@ -5018,16 +5024,8 @@ define_custom_variable(struct config_generic *variable)
set_config_sourcefile(name, pHolder->gen.sourcefile,
pHolder->gen.sourceline);
- /*
- * Free up as much as we conveniently can of the placeholder structure.
- * (This neglects any stack items, so it's possible for some memory to be
- * leaked. Since this can only happen once per session per variable, it
- * doesn't seem worth spending much code on.)
- */
- set_string_field(pHolder, pHolder->variable, NULL);
- set_string_field(pHolder, &pHolder->reset_val, NULL);
-
- guc_free(pHolder);
+ /* Now we can free the no-longer-referenced placeholder variable */
+ free_placeholder(pHolder);
}
/*
@@ -5127,6 +5125,25 @@ reapply_stacked_values(struct config_generic *variable,
}
/*
+ * Free up a no-longer-referenced placeholder GUC variable.
+ *
+ * This neglects any stack items, so it's possible for some memory to be
+ * leaked. Since this can only happen once per session per variable, it
+ * doesn't seem worth spending much code on.
+ */
+static void
+free_placeholder(struct config_string *pHolder)
+{
+ /* Placeholders are always STRING type, so free their values */
+ Assert(pHolder->gen.vartype == PGC_STRING);
+ set_string_field(pHolder, pHolder->variable, NULL);
+ set_string_field(pHolder, &pHolder->reset_val, NULL);
+
+ guc_free(unconstify(char *, pHolder->gen.name));
+ guc_free(pHolder);
+}
+
+/*
* Functions for extensions to call to define their custom GUC variables.
*/
void
@@ -5286,9 +5303,7 @@ MarkGUCPrefixReserved(const char *className)
/*
* Check for existing placeholders. We must actually remove invalid
- * placeholders, else future parallel worker startups will fail. (We
- * don't bother trying to free associated memory, since this shouldn't
- * happen often.)
+ * placeholders, else future parallel worker startups will fail.
*/
hash_seq_init(&status, guc_hashtab);
while ((hentry = (GUCHashEntry *) hash_seq_search(&status)) != NULL)
@@ -5312,6 +5327,8 @@ MarkGUCPrefixReserved(const char *className)
NULL);
/* Remove it from any lists it's in, too */
RemoveGUCFromLists(var);
+ /* And free it */
+ free_placeholder((struct config_string *) var);
}
}
@@ -6711,6 +6728,7 @@ validate_option_array_item(const char *name, const char *value,
{
struct config_generic *gconf;
+ bool reset_custom;
/*
* There are three cases to consider:
@@ -6729,16 +6747,21 @@ validate_option_array_item(const char *name, const char *value,
* it's assumed to be fully validated.)
*
* name is not known and can't be created as a placeholder. Throw error,
- * unless skipIfNoPermissions is true, in which case return false.
+ * unless skipIfNoPermissions or reset_custom is true. If reset_custom is
+ * true, this is a RESET or RESET ALL operation for an unknown custom GUC
+ * with a reserved prefix, in which case we want to fall through to the
+ * placeholder case described in the preceding paragraph (else there'd be
+ * no way for users to remove them). Otherwise, return false.
*/
- gconf = find_option(name, true, skipIfNoPermissions, ERROR);
- if (!gconf)
+ reset_custom = (!value && valid_custom_variable_name(name));
+ gconf = find_option(name, true, skipIfNoPermissions || reset_custom, ERROR);
+ if (!gconf && !reset_custom)
{
/* not known, failed to make a placeholder */
return false;
}
- if (gconf->flags & GUC_CUSTOM_PLACEHOLDER)
+ if (!gconf || gconf->flags & GUC_CUSTOM_PLACEHOLDER)
{
/*
* We cannot do any meaningful check on the value, so only permissions
diff --git a/src/backend/utils/misc/ps_status.c b/src/backend/utils/misc/ps_status.c
index e08b26e8c14..4df25944deb 100644
--- a/src/backend/utils/misc/ps_status.c
+++ b/src/backend/utils/misc/ps_status.c
@@ -100,6 +100,17 @@ static void flush_ps_display(void);
static int save_argc;
static char **save_argv;
+/*
+ * Valgrind seems not to consider the global "environ" variable as a valid
+ * root pointer; so when we allocate a new environment array, it claims that
+ * data is leaked. To fix that, keep our own statically-allocated copy of the
+ * pointer. (Oddly, this doesn't seem to be a problem for "argv".)
+ */
+#if defined(PS_USE_CLOBBER_ARGV) && defined(USE_VALGRIND)
+extern char **ps_status_new_environ;
+char **ps_status_new_environ;
+#endif
+
/*
* Call this early in startup to save the original argc/argv values.
@@ -206,6 +217,11 @@ save_ps_display_args(int argc, char **argv)
}
new_environ[i] = NULL;
environ = new_environ;
+
+ /* See notes about Valgrind above. */
+#ifdef USE_VALGRIND
+ ps_status_new_environ = new_environ;
+#endif
}
/*
diff --git a/src/backend/utils/mmgr/alignedalloc.c b/src/backend/utils/mmgr/alignedalloc.c
index 7eea695de62..b1be7426914 100644
--- a/src/backend/utils/mmgr/alignedalloc.c
+++ b/src/backend/utils/mmgr/alignedalloc.c
@@ -45,6 +45,15 @@ AlignedAllocFree(void *pointer)
GetMemoryChunkContext(unaligned)->name, chunk);
#endif
+ /*
+ * Create a dummy vchunk covering the start of the unaligned chunk, but
+ * not overlapping the aligned chunk. This will be freed while pfree'ing
+ * the unaligned chunk, keeping Valgrind happy. Then when we return to
+ * the outer pfree, that will clean up the vchunk for the aligned chunk.
+ */
+ VALGRIND_MEMPOOL_ALLOC(GetMemoryChunkContext(unaligned), unaligned,
+ (char *) pointer - (char *) unaligned);
+
/* Recursively pfree the unaligned chunk */
pfree(unaligned);
}
@@ -123,6 +132,15 @@ AlignedAllocRealloc(void *pointer, Size size, int flags)
VALGRIND_MAKE_MEM_DEFINED(pointer, old_size);
memcpy(newptr, pointer, Min(size, old_size));
+ /*
+ * Create a dummy vchunk covering the start of the old unaligned chunk,
+ * but not overlapping the aligned chunk. This will be freed while
+ * pfree'ing the old unaligned chunk, keeping Valgrind happy. Then when
+ * we return to repalloc, it will move the vchunk for the aligned chunk.
+ */
+ VALGRIND_MEMPOOL_ALLOC(ctx, unaligned,
+ (char *) pointer - (char *) unaligned);
+
pfree(unaligned);
return newptr;
diff --git a/src/backend/utils/mmgr/aset.c b/src/backend/utils/mmgr/aset.c
index 666ecd8f78d..9ef109ca586 100644
--- a/src/backend/utils/mmgr/aset.c
+++ b/src/backend/utils/mmgr/aset.c
@@ -103,6 +103,8 @@
#define ALLOC_BLOCKHDRSZ MAXALIGN(sizeof(AllocBlockData))
#define ALLOC_CHUNKHDRSZ sizeof(MemoryChunk)
+#define FIRST_BLOCKHDRSZ (MAXALIGN(sizeof(AllocSetContext)) + \
+ ALLOC_BLOCKHDRSZ)
typedef struct AllocBlockData *AllocBlock; /* forward reference */
@@ -458,6 +460,21 @@ AllocSetContextCreateInternal(MemoryContext parent,
* we'd leak the header/initial block if we ereport in this stretch.
*/
+ /* Create a vpool associated with the context */
+ VALGRIND_CREATE_MEMPOOL(set, 0, false);
+
+ /*
+ * Create a vchunk covering both the AllocSetContext struct and the keeper
+ * block's header. (Perhaps it would be more sensible for these to be two
+ * separate vchunks, but doing that seems to tickle bugs in some versions
+ * of Valgrind.) We must have these vchunks, and also a vchunk for each
+ * subsequently-added block header, so that Valgrind considers the
+ * pointers within them while checking for leaked memory. Note that
+ * Valgrind doesn't distinguish between these vchunks and those created by
+ * mcxt.c for the user-accessible-data chunks we allocate.
+ */
+ VALGRIND_MEMPOOL_ALLOC(set, set, FIRST_BLOCKHDRSZ);
+
/* Fill in the initial block's block header */
block = KeeperBlock(set);
block->aset = set;
@@ -585,6 +602,14 @@ AllocSetReset(MemoryContext context)
#ifdef CLOBBER_FREED_MEMORY
wipe_mem(block, block->freeptr - ((char *) block));
#endif
+
+ /*
+ * We need to free the block header's vchunk explicitly, although
+ * the user-data vchunks within will go away in the TRIM below.
+ * Otherwise Valgrind complains about leaked allocations.
+ */
+ VALGRIND_MEMPOOL_FREE(set, block);
+
free(block);
}
block = next;
@@ -592,6 +617,14 @@ AllocSetReset(MemoryContext context)
Assert(context->mem_allocated == keepersize);
+ /*
+ * Instruct Valgrind to throw away all the vchunks associated with this
+ * context, except for the one covering the AllocSetContext and
+ * keeper-block header. This gets rid of the vchunks for whatever user
+ * data is getting discarded by the context reset.
+ */
+ VALGRIND_MEMPOOL_TRIM(set, set, FIRST_BLOCKHDRSZ);
+
/* Reset block size allocation sequence, too */
set->nextBlockSize = set->initBlockSize;
}
@@ -648,6 +681,9 @@ AllocSetDelete(MemoryContext context)
freelist->first_free = (AllocSetContext *) oldset->header.nextchild;
freelist->num_free--;
+ /* Destroy the context's vpool --- see notes below */
+ VALGRIND_DESTROY_MEMPOOL(oldset);
+
/* All that remains is to free the header/initial block */
free(oldset);
}
@@ -675,13 +711,24 @@ AllocSetDelete(MemoryContext context)
#endif
if (!IsKeeperBlock(set, block))
+ {
+ /* As in AllocSetReset, free block-header vchunks explicitly */
+ VALGRIND_MEMPOOL_FREE(set, block);
free(block);
+ }
block = next;
}
Assert(context->mem_allocated == keepersize);
+ /*
+ * Destroy the vpool. We don't seem to need to explicitly free the
+ * initial block's header vchunk, nor any user-data vchunks that Valgrind
+ * still knows about; they'll all go away automatically.
+ */
+ VALGRIND_DESTROY_MEMPOOL(set);
+
/* Finally, free the context header, including the keeper block */
free(set);
}
@@ -716,6 +763,9 @@ AllocSetAllocLarge(MemoryContext context, Size size, int flags)
if (block == NULL)
return MemoryContextAllocationFailure(context, size, flags);
+ /* Make a vchunk covering the new block's header */
+ VALGRIND_MEMPOOL_ALLOC(set, block, ALLOC_BLOCKHDRSZ);
+
context->mem_allocated += blksize;
block->aset = set;
@@ -922,6 +972,9 @@ AllocSetAllocFromNewBlock(MemoryContext context, Size size, int flags,
if (block == NULL)
return MemoryContextAllocationFailure(context, size, flags);
+ /* Make a vchunk covering the new block's header */
+ VALGRIND_MEMPOOL_ALLOC(set, block, ALLOC_BLOCKHDRSZ);
+
context->mem_allocated += blksize;
block->aset = set;
@@ -1104,6 +1157,10 @@ AllocSetFree(void *pointer)
#ifdef CLOBBER_FREED_MEMORY
wipe_mem(block, block->freeptr - ((char *) block));
#endif
+
+ /* As in AllocSetReset, free block-header vchunks explicitly */
+ VALGRIND_MEMPOOL_FREE(set, block);
+
free(block);
}
else
@@ -1184,6 +1241,7 @@ AllocSetRealloc(void *pointer, Size size, int flags)
* realloc() to make the containing block bigger, or smaller, with
* minimum space wastage.
*/
+ AllocBlock newblock;
Size chksize;
Size blksize;
Size oldblksize;
@@ -1223,14 +1281,21 @@ AllocSetRealloc(void *pointer, Size size, int flags)
blksize = chksize + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
oldblksize = block->endptr - ((char *) block);
- block = (AllocBlock) realloc(block, blksize);
- if (block == NULL)
+ newblock = (AllocBlock) realloc(block, blksize);
+ if (newblock == NULL)
{
/* Disallow access to the chunk header. */
VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
return MemoryContextAllocationFailure(&set->header, size, flags);
}
+ /*
+ * Move the block-header vchunk explicitly. (mcxt.c will take care of
+ * moving the vchunk for the user data.)
+ */
+ VALGRIND_MEMPOOL_CHANGE(set, block, newblock, ALLOC_BLOCKHDRSZ);
+ block = newblock;
+
/* updated separately, not to underflow when (oldblksize > blksize) */
set->header.mem_allocated -= oldblksize;
set->header.mem_allocated += blksize;
@@ -1294,7 +1359,7 @@ AllocSetRealloc(void *pointer, Size size, int flags)
/* Ensure any padding bytes are marked NOACCESS. */
VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size, chksize - size);
- /* Disallow access to the chunk header . */
+ /* Disallow access to the chunk header. */
VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
return pointer;
diff --git a/src/backend/utils/mmgr/bump.c b/src/backend/utils/mmgr/bump.c
index f7a37d1b3e8..2805d55a2ec 100644
--- a/src/backend/utils/mmgr/bump.c
+++ b/src/backend/utils/mmgr/bump.c
@@ -45,7 +45,9 @@
#include "utils/memutils_memorychunk.h"
#include "utils/memutils_internal.h"
-#define Bump_BLOCKHDRSZ MAXALIGN(sizeof(BumpBlock))
+#define Bump_BLOCKHDRSZ MAXALIGN(sizeof(BumpBlock))
+#define FIRST_BLOCKHDRSZ (MAXALIGN(sizeof(BumpContext)) + \
+ Bump_BLOCKHDRSZ)
/* No chunk header unless built with MEMORY_CONTEXT_CHECKING */
#ifdef MEMORY_CONTEXT_CHECKING
@@ -189,6 +191,12 @@ BumpContextCreate(MemoryContext parent, const char *name, Size minContextSize,
* Avoid writing code that can fail between here and MemoryContextCreate;
* we'd leak the header and initial block if we ereport in this stretch.
*/
+
+ /* See comments about Valgrind interactions in aset.c */
+ VALGRIND_CREATE_MEMPOOL(set, 0, false);
+ /* This vchunk covers the BumpContext and the keeper block header */
+ VALGRIND_MEMPOOL_ALLOC(set, set, FIRST_BLOCKHDRSZ);
+
dlist_init(&set->blocks);
/* Fill in the initial block's block header */
@@ -262,6 +270,14 @@ BumpReset(MemoryContext context)
BumpBlockFree(set, block);
}
+ /*
+ * Instruct Valgrind to throw away all the vchunks associated with this
+ * context, except for the one covering the BumpContext and keeper-block
+ * header. This gets rid of the vchunks for whatever user data is getting
+ * discarded by the context reset.
+ */
+ VALGRIND_MEMPOOL_TRIM(set, set, FIRST_BLOCKHDRSZ);
+
/* Reset block size allocation sequence, too */
set->nextBlockSize = set->initBlockSize;
@@ -279,6 +295,10 @@ BumpDelete(MemoryContext context)
{
/* Reset to release all releasable BumpBlocks */
BumpReset(context);
+
+ /* Destroy the vpool -- see notes in aset.c */
+ VALGRIND_DESTROY_MEMPOOL(context);
+
/* And free the context header and keeper block */
free(context);
}
@@ -318,6 +338,9 @@ BumpAllocLarge(MemoryContext context, Size size, int flags)
if (block == NULL)
return MemoryContextAllocationFailure(context, size, flags);
+ /* Make a vchunk covering the new block's header */
+ VALGRIND_MEMPOOL_ALLOC(set, block, Bump_BLOCKHDRSZ);
+
context->mem_allocated += blksize;
/* the block is completely full */
@@ -455,6 +478,9 @@ BumpAllocFromNewBlock(MemoryContext context, Size size, int flags,
if (block == NULL)
return MemoryContextAllocationFailure(context, size, flags);
+ /* Make a vchunk covering the new block's header */
+ VALGRIND_MEMPOOL_ALLOC(set, block, Bump_BLOCKHDRSZ);
+
context->mem_allocated += blksize;
/* initialize the new block */
@@ -606,6 +632,9 @@ BumpBlockFree(BumpContext *set, BumpBlock *block)
wipe_mem(block, ((char *) block->endptr - (char *) block));
#endif
+ /* As in aset.c, free block-header vchunks explicitly */
+ VALGRIND_MEMPOOL_FREE(set, block);
+
free(block);
}
diff --git a/src/backend/utils/mmgr/generation.c b/src/backend/utils/mmgr/generation.c
index 18679ad4f1e..cfafc9bf082 100644
--- a/src/backend/utils/mmgr/generation.c
+++ b/src/backend/utils/mmgr/generation.c
@@ -45,6 +45,8 @@
#define Generation_BLOCKHDRSZ MAXALIGN(sizeof(GenerationBlock))
#define Generation_CHUNKHDRSZ sizeof(MemoryChunk)
+#define FIRST_BLOCKHDRSZ (MAXALIGN(sizeof(GenerationContext)) + \
+ Generation_BLOCKHDRSZ)
#define Generation_CHUNK_FRACTION 8
@@ -221,6 +223,12 @@ GenerationContextCreate(MemoryContext parent,
* Avoid writing code that can fail between here and MemoryContextCreate;
* we'd leak the header if we ereport in this stretch.
*/
+
+ /* See comments about Valgrind interactions in aset.c */
+ VALGRIND_CREATE_MEMPOOL(set, 0, false);
+ /* This vchunk covers the GenerationContext and the keeper block header */
+ VALGRIND_MEMPOOL_ALLOC(set, set, FIRST_BLOCKHDRSZ);
+
dlist_init(&set->blocks);
/* Fill in the initial block's block header */
@@ -309,6 +317,14 @@ GenerationReset(MemoryContext context)
GenerationBlockFree(set, block);
}
+ /*
+ * Instruct Valgrind to throw away all the vchunks associated with this
+ * context, except for the one covering the GenerationContext and
+ * keeper-block header. This gets rid of the vchunks for whatever user
+ * data is getting discarded by the context reset.
+ */
+ VALGRIND_MEMPOOL_TRIM(set, set, FIRST_BLOCKHDRSZ);
+
/* set it so new allocations to make use of the keeper block */
set->block = KeeperBlock(set);
@@ -329,6 +345,10 @@ GenerationDelete(MemoryContext context)
{
/* Reset to release all releasable GenerationBlocks */
GenerationReset(context);
+
+ /* Destroy the vpool -- see notes in aset.c */
+ VALGRIND_DESTROY_MEMPOOL(context);
+
/* And free the context header and keeper block */
free(context);
}
@@ -365,6 +385,9 @@ GenerationAllocLarge(MemoryContext context, Size size, int flags)
if (block == NULL)
return MemoryContextAllocationFailure(context, size, flags);
+ /* Make a vchunk covering the new block's header */
+ VALGRIND_MEMPOOL_ALLOC(set, block, Generation_BLOCKHDRSZ);
+
context->mem_allocated += blksize;
/* block with a single (used) chunk */
@@ -487,6 +510,9 @@ GenerationAllocFromNewBlock(MemoryContext context, Size size, int flags,
if (block == NULL)
return MemoryContextAllocationFailure(context, size, flags);
+ /* Make a vchunk covering the new block's header */
+ VALGRIND_MEMPOOL_ALLOC(set, block, Generation_BLOCKHDRSZ);
+
context->mem_allocated += blksize;
/* initialize the new block */
@@ -677,6 +703,9 @@ GenerationBlockFree(GenerationContext *set, GenerationBlock *block)
wipe_mem(block, block->blksize);
#endif
+ /* As in aset.c, free block-header vchunks explicitly */
+ VALGRIND_MEMPOOL_FREE(set, block);
+
free(block);
}
diff --git a/src/backend/utils/mmgr/mcxt.c b/src/backend/utils/mmgr/mcxt.c
index ce01dce9861..47fd774c7d2 100644
--- a/src/backend/utils/mmgr/mcxt.c
+++ b/src/backend/utils/mmgr/mcxt.c
@@ -8,6 +8,23 @@
* context-type-specific operations via the function pointers in a
* context's MemoryContextMethods struct.
*
+ * A note about Valgrind support: when USE_VALGRIND is defined, we provide
+ * support for memory leak tracking at the allocation-unit level. Valgrind
+ * does leak detection by tracking allocated "chunks", which can be grouped
+ * into "pools". The "chunk" terminology is overloaded, since we use that
+ * word for our allocation units, and it's sometimes important to distinguish
+ * those from the Valgrind objects that describe them. To reduce confusion,
+ * let's use the terms "vchunk" and "vpool" for the Valgrind objects.
+ *
+ * We use a separate vpool for each memory context. The context-type-specific
+ * code is responsible for creating and deleting the vpools, and also for
+ * creating vchunks to cover its management data structures such as block
+ * headers. (There must be a vchunk that includes every pointer we want
+ * Valgrind to consider for leak-tracking purposes.) This module creates
+ * and deletes the vchunks that cover the caller-visible allocated chunks.
+ * However, the context-type-specific code must handle cleaning up those
+ * vchunks too during memory context reset operations.
+ *
*
* Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
@@ -418,8 +435,6 @@ MemoryContextResetOnly(MemoryContext context)
context->methods->reset(context);
context->isReset = true;
- VALGRIND_DESTROY_MEMPOOL(context);
- VALGRIND_CREATE_MEMPOOL(context, 0, false);
}
}
@@ -526,8 +541,6 @@ MemoryContextDeleteOnly(MemoryContext context)
context->ident = NULL;
context->methods->delete_context(context);
-
- VALGRIND_DESTROY_MEMPOOL(context);
}
/*
@@ -1170,8 +1183,6 @@ MemoryContextCreate(MemoryContext node,
node->nextchild = NULL;
node->allowInCritSection = false;
}
-
- VALGRIND_CREATE_MEMPOOL(node, 0, false);
}
/*
@@ -1454,7 +1465,13 @@ MemoryContextAllocAligned(MemoryContext context,
void *unaligned;
void *aligned;
- /* wouldn't make much sense to waste that much space */
+ /*
+ * Restrict alignto to ensure that it can fit into the "value" field of
+ * the redirection MemoryChunk, and that the distance back to the start of
+ * the unaligned chunk will fit into the space available for that. This
+ * isn't a limitation in practice, since it wouldn't make much sense to
+ * waste that much space.
+ */
Assert(alignto < (128 * 1024 * 1024));
/* ensure alignto is a power of 2 */
@@ -1491,10 +1508,15 @@ MemoryContextAllocAligned(MemoryContext context,
alloc_size += 1;
#endif
- /* perform the actual allocation */
- unaligned = MemoryContextAllocExtended(context, alloc_size, flags);
+ /*
+ * Perform the actual allocation, but do not pass down MCXT_ALLOC_ZERO.
+ * This ensures that wasted bytes beyond the aligned chunk do not become
+ * DEFINED.
+ */
+ unaligned = MemoryContextAllocExtended(context, alloc_size,
+ flags & ~MCXT_ALLOC_ZERO);
- /* set the aligned pointer */
+ /* compute the aligned pointer */
aligned = (void *) TYPEALIGN(alignto, (char *) unaligned +
sizeof(MemoryChunk));
@@ -1522,12 +1544,23 @@ MemoryContextAllocAligned(MemoryContext context,
set_sentinel(aligned, size);
#endif
- /* Mark the bytes before the redirection header as noaccess */
- VALGRIND_MAKE_MEM_NOACCESS(unaligned,
- (char *) alignedchunk - (char *) unaligned);
+ /*
+ * MemoryContextAllocExtended marked the whole unaligned chunk as a
+ * vchunk. Undo that, instead making just the aligned chunk be a vchunk.
+ * This prevents Valgrind from complaining that the vchunk is possibly
+ * leaked, since only pointers to the aligned chunk will exist.
+ *
+ * After these calls, the aligned chunk will be marked UNDEFINED, and all
+ * the rest of the unaligned chunk (the redirection chunk header, the
+ * padding bytes before it, and any wasted trailing bytes) will be marked
+ * NOACCESS, which is what we want.
+ */
+ VALGRIND_MEMPOOL_FREE(context, unaligned);
+ VALGRIND_MEMPOOL_ALLOC(context, aligned, size);
- /* Disallow access to the redirection chunk header. */
- VALGRIND_MAKE_MEM_NOACCESS(alignedchunk, sizeof(MemoryChunk));
+ /* Now zero (and make DEFINED) just the aligned chunk, if requested */
+ if ((flags & MCXT_ALLOC_ZERO) != 0)
+ MemSetAligned(aligned, 0, size);
return aligned;
}
@@ -1561,16 +1594,12 @@ void
pfree(void *pointer)
{
#ifdef USE_VALGRIND
- MemoryContextMethodID method = GetMemoryChunkMethodID(pointer);
MemoryContext context = GetMemoryChunkContext(pointer);
#endif
MCXT_METHOD(pointer, free_p) (pointer);
-#ifdef USE_VALGRIND
- if (method != MCTX_ALIGNED_REDIRECT_ID)
- VALGRIND_MEMPOOL_FREE(context, pointer);
-#endif
+ VALGRIND_MEMPOOL_FREE(context, pointer);
}
/*
@@ -1580,9 +1609,6 @@ pfree(void *pointer)
void *
repalloc(void *pointer, Size size)
{
-#ifdef USE_VALGRIND
- MemoryContextMethodID method = GetMemoryChunkMethodID(pointer);
-#endif
#if defined(USE_ASSERT_CHECKING) || defined(USE_VALGRIND)
MemoryContext context = GetMemoryChunkContext(pointer);
#endif
@@ -1605,10 +1631,7 @@ repalloc(void *pointer, Size size)
*/
ret = MCXT_METHOD(pointer, realloc) (pointer, size, 0);
-#ifdef USE_VALGRIND
- if (method != MCTX_ALIGNED_REDIRECT_ID)
- VALGRIND_MEMPOOL_CHANGE(context, pointer, ret, size);
-#endif
+ VALGRIND_MEMPOOL_CHANGE(context, pointer, ret, size);
return ret;
}
diff --git a/src/backend/utils/mmgr/slab.c b/src/backend/utils/mmgr/slab.c
index d32c0d318fb..0e35abcf5a0 100644
--- a/src/backend/utils/mmgr/slab.c
+++ b/src/backend/utils/mmgr/slab.c
@@ -377,6 +377,11 @@ SlabContextCreate(MemoryContext parent,
* we'd leak the header if we ereport in this stretch.
*/
+ /* See comments about Valgrind interactions in aset.c */
+ VALGRIND_CREATE_MEMPOOL(slab, 0, false);
+ /* This vchunk covers the SlabContext only */
+ VALGRIND_MEMPOOL_ALLOC(slab, slab, sizeof(SlabContext));
+
/* Fill in SlabContext-specific header fields */
slab->chunkSize = (uint32) chunkSize;
slab->fullChunkSize = (uint32) fullChunkSize;
@@ -451,6 +456,10 @@ SlabReset(MemoryContext context)
#ifdef CLOBBER_FREED_MEMORY
wipe_mem(block, slab->blockSize);
#endif
+
+ /* As in aset.c, free block-header vchunks explicitly */
+ VALGRIND_MEMPOOL_FREE(slab, block);
+
free(block);
context->mem_allocated -= slab->blockSize;
}
@@ -467,11 +476,23 @@ SlabReset(MemoryContext context)
#ifdef CLOBBER_FREED_MEMORY
wipe_mem(block, slab->blockSize);
#endif
+
+ /* As in aset.c, free block-header vchunks explicitly */
+ VALGRIND_MEMPOOL_FREE(slab, block);
+
free(block);
context->mem_allocated -= slab->blockSize;
}
}
+ /*
+ * Instruct Valgrind to throw away all the vchunks associated with this
+ * context, except for the one covering the SlabContext. This gets rid of
+ * the vchunks for whatever user data is getting discarded by the context
+ * reset.
+ */
+ VALGRIND_MEMPOOL_TRIM(slab, slab, sizeof(SlabContext));
+
slab->curBlocklistIndex = 0;
Assert(context->mem_allocated == 0);
@@ -486,6 +507,10 @@ SlabDelete(MemoryContext context)
{
/* Reset to release all the SlabBlocks */
SlabReset(context);
+
+ /* Destroy the vpool -- see notes in aset.c */
+ VALGRIND_DESTROY_MEMPOOL(context);
+
/* And free the context header */
free(context);
}
@@ -567,6 +592,9 @@ SlabAllocFromNewBlock(MemoryContext context, Size size, int flags)
if (unlikely(block == NULL))
return MemoryContextAllocationFailure(context, size, flags);
+ /* Make a vchunk covering the new block's header */
+ VALGRIND_MEMPOOL_ALLOC(slab, block, Slab_BLOCKHDRSZ);
+
block->slab = slab;
context->mem_allocated += slab->blockSize;
@@ -795,6 +823,10 @@ SlabFree(void *pointer)
#ifdef CLOBBER_FREED_MEMORY
wipe_mem(block, slab->blockSize);
#endif
+
+ /* As in aset.c, free block-header vchunks explicitly */
+ VALGRIND_MEMPOOL_FREE(slab, block);
+
free(block);
slab->header.mem_allocated -= slab->blockSize;
}