aboutsummaryrefslogtreecommitdiff
path: root/src/backend/executor
diff options
context:
space:
mode:
authorPeter Eisentraut <peter@eisentraut.org>2022-09-12 08:31:56 +0200
committerPeter Eisentraut <peter@eisentraut.org>2022-09-12 08:45:03 +0200
commit5015e1e1b58f81a036e4ad16291ef4b3bb7a596c (patch)
tree86ee608e961dc830e733c534db089f1e45706414 /src/backend/executor
parent2016055a92f26d648aba9f66d26cc0bcd1619eff (diff)
downloadpostgresql-5015e1e1b58f81a036e4ad16291ef4b3bb7a596c.tar.gz
postgresql-5015e1e1b58f81a036e4ad16291ef4b3bb7a596c.zip
Assorted examples of expanded type-safer palloc/pg_malloc API
This adds some uses of the new palloc/pg_malloc variants here and there as a demonstration and test. This is kept separate from the actual API patch, since the latter might be backpatched at some point. Reviewed-by: Tom Lane <tgl@sss.pgh.pa.us> Discussion: https://www.postgresql.org/message-id/flat/bb755632-2a43-d523-36f8-a1e7a389a907@enterprisedb.com
Diffstat (limited to 'src/backend/executor')
-rw-r--r--src/backend/executor/nodeHash.c57
1 files changed, 23 insertions, 34 deletions
diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c
index 841896c7781..77dd1dae8bb 100644
--- a/src/backend/executor/nodeHash.c
+++ b/src/backend/executor/nodeHash.c
@@ -479,7 +479,7 @@ ExecHashTableCreate(HashState *state, List *hashOperators, List *hashCollations,
* per-query memory context. Everything else should be kept inside the
* subsidiary hashCxt or batchCxt.
*/
- hashtable = (HashJoinTable) palloc(sizeof(HashJoinTableData));
+ hashtable = palloc_object(HashJoinTableData);
hashtable->nbuckets = nbuckets;
hashtable->nbuckets_original = nbuckets;
hashtable->nbuckets_optimal = nbuckets;
@@ -540,12 +540,10 @@ ExecHashTableCreate(HashState *state, List *hashOperators, List *hashCollations,
* remember whether the join operators are strict.
*/
nkeys = list_length(hashOperators);
- hashtable->outer_hashfunctions =
- (FmgrInfo *) palloc(nkeys * sizeof(FmgrInfo));
- hashtable->inner_hashfunctions =
- (FmgrInfo *) palloc(nkeys * sizeof(FmgrInfo));
- hashtable->hashStrict = (bool *) palloc(nkeys * sizeof(bool));
- hashtable->collations = (Oid *) palloc(nkeys * sizeof(Oid));
+ hashtable->outer_hashfunctions = palloc_array(FmgrInfo, nkeys);
+ hashtable->inner_hashfunctions = palloc_array(FmgrInfo, nkeys);
+ hashtable->hashStrict = palloc_array(bool, nkeys);
+ hashtable->collations = palloc_array(Oid, nkeys);
i = 0;
forboth(ho, hashOperators, hc, hashCollations)
{
@@ -569,10 +567,8 @@ ExecHashTableCreate(HashState *state, List *hashOperators, List *hashCollations,
* allocate and initialize the file arrays in hashCxt (not needed for
* parallel case which uses shared tuplestores instead of raw files)
*/
- hashtable->innerBatchFile = (BufFile **)
- palloc0(nbatch * sizeof(BufFile *));
- hashtable->outerBatchFile = (BufFile **)
- palloc0(nbatch * sizeof(BufFile *));
+ hashtable->innerBatchFile = palloc0_array(BufFile *, nbatch);
+ hashtable->outerBatchFile = palloc0_array(BufFile *, nbatch);
/* The files will not be opened until needed... */
/* ... but make sure we have temp tablespaces established for them */
PrepareTempTablespaces();
@@ -636,8 +632,7 @@ ExecHashTableCreate(HashState *state, List *hashOperators, List *hashCollations,
*/
MemoryContextSwitchTo(hashtable->batchCxt);
- hashtable->buckets.unshared = (HashJoinTuple *)
- palloc0(nbuckets * sizeof(HashJoinTuple));
+ hashtable->buckets.unshared = palloc0_array(HashJoinTuple, nbuckets);
/*
* Set up for skew optimization, if possible and there's a need for
@@ -937,20 +932,16 @@ ExecHashIncreaseNumBatches(HashJoinTable hashtable)
if (hashtable->innerBatchFile == NULL)
{
/* we had no file arrays before */
- hashtable->innerBatchFile = (BufFile **)
- palloc0(nbatch * sizeof(BufFile *));
- hashtable->outerBatchFile = (BufFile **)
- palloc0(nbatch * sizeof(BufFile *));
+ hashtable->innerBatchFile = palloc0_array(BufFile *, nbatch);
+ hashtable->outerBatchFile = palloc0_array(BufFile *, nbatch);
/* time to establish the temp tablespaces, too */
PrepareTempTablespaces();
}
else
{
/* enlarge arrays and zero out added entries */
- hashtable->innerBatchFile = (BufFile **)
- repalloc(hashtable->innerBatchFile, nbatch * sizeof(BufFile *));
- hashtable->outerBatchFile = (BufFile **)
- repalloc(hashtable->outerBatchFile, nbatch * sizeof(BufFile *));
+ hashtable->innerBatchFile = repalloc_array(hashtable->innerBatchFile, BufFile *, nbatch);
+ hashtable->outerBatchFile = repalloc_array(hashtable->outerBatchFile, BufFile *, nbatch);
MemSet(hashtable->innerBatchFile + oldnbatch, 0,
(nbatch - oldnbatch) * sizeof(BufFile *));
MemSet(hashtable->outerBatchFile + oldnbatch, 0,
@@ -977,8 +968,8 @@ ExecHashIncreaseNumBatches(HashJoinTable hashtable)
hashtable->log2_nbuckets = hashtable->log2_nbuckets_optimal;
hashtable->buckets.unshared =
- repalloc(hashtable->buckets.unshared,
- sizeof(HashJoinTuple) * hashtable->nbuckets);
+ repalloc_array(hashtable->buckets.unshared,
+ HashJoinTuple, hashtable->nbuckets);
}
/*
@@ -1371,7 +1362,7 @@ ExecParallelHashRepartitionRest(HashJoinTable hashtable)
/* Get our hands on the previous generation of batches. */
old_batches = (ParallelHashJoinBatch *)
dsa_get_address(hashtable->area, pstate->old_batches);
- old_inner_tuples = palloc0(sizeof(SharedTuplestoreAccessor *) * old_nbatch);
+ old_inner_tuples = palloc0_array(SharedTuplestoreAccessor *, old_nbatch);
for (i = 1; i < old_nbatch; ++i)
{
ParallelHashJoinBatch *shared =
@@ -1477,8 +1468,8 @@ ExecHashIncreaseNumBuckets(HashJoinTable hashtable)
* chunks)
*/
hashtable->buckets.unshared =
- (HashJoinTuple *) repalloc(hashtable->buckets.unshared,
- hashtable->nbuckets * sizeof(HashJoinTuple));
+ repalloc_array(hashtable->buckets.unshared,
+ HashJoinTuple, hashtable->nbuckets);
memset(hashtable->buckets.unshared, 0,
hashtable->nbuckets * sizeof(HashJoinTuple));
@@ -2170,8 +2161,7 @@ ExecHashTableReset(HashJoinTable hashtable)
oldcxt = MemoryContextSwitchTo(hashtable->batchCxt);
/* Reallocate and reinitialize the hash bucket headers. */
- hashtable->buckets.unshared = (HashJoinTuple *)
- palloc0(nbuckets * sizeof(HashJoinTuple));
+ hashtable->buckets.unshared = palloc0_array(HashJoinTuple, nbuckets);
hashtable->spaceUsed = 0;
@@ -2666,8 +2656,7 @@ ExecShutdownHash(HashState *node)
{
/* Allocate save space if EXPLAIN'ing and we didn't do so already */
if (node->ps.instrument && !node->hinstrument)
- node->hinstrument = (HashInstrumentation *)
- palloc0(sizeof(HashInstrumentation));
+ node->hinstrument = palloc0_object(HashInstrumentation);
/* Now accumulate data for the current (final) hash table */
if (node->hinstrument && node->hashtable)
ExecHashAccumInstrumentation(node->hinstrument, node->hashtable);
@@ -2977,8 +2966,8 @@ ExecParallelHashJoinSetUpBatches(HashJoinTable hashtable, int nbatch)
/* Allocate this backend's accessor array. */
hashtable->nbatch = nbatch;
- hashtable->batches = (ParallelHashJoinBatchAccessor *)
- palloc0(sizeof(ParallelHashJoinBatchAccessor) * hashtable->nbatch);
+ hashtable->batches =
+ palloc0_array(ParallelHashJoinBatchAccessor, hashtable->nbatch);
/* Set up the shared state, tuplestores and backend-local accessors. */
for (i = 0; i < hashtable->nbatch; ++i)
@@ -3083,8 +3072,8 @@ ExecParallelHashEnsureBatchAccessors(HashJoinTable hashtable)
/* Allocate this backend's accessor array. */
hashtable->nbatch = pstate->nbatch;
- hashtable->batches = (ParallelHashJoinBatchAccessor *)
- palloc0(sizeof(ParallelHashJoinBatchAccessor) * hashtable->nbatch);
+ hashtable->batches =
+ palloc0_array(ParallelHashJoinBatchAccessor, hashtable->nbatch);
/* Find the base of the pseudo-array of ParallelHashJoinBatch objects. */
batches = (ParallelHashJoinBatch *)