aboutsummaryrefslogtreecommitdiff
path: root/src/backend/access/heap/heapam.c
diff options
context:
space:
mode:
authorTom Lane <tgl@sss.pgh.pa.us>2018-09-01 15:27:12 -0400
committerTom Lane <tgl@sss.pgh.pa.us>2018-09-01 15:27:17 -0400
commit44cac9346479d4b0cc9195b0267fd13eb4e7442c (patch)
treed90876e13f78977dc571be5b70592c82fc33e3fe /src/backend/access/heap/heapam.c
parent5e8d670c313531c0dca245943fb84c94a477ddc4 (diff)
downloadpostgresql-44cac9346479d4b0cc9195b0267fd13eb4e7442c.tar.gz
postgresql-44cac9346479d4b0cc9195b0267fd13eb4e7442c.zip
Avoid using potentially-under-aligned page buffers.
There's a project policy against using plain "char buf[BLCKSZ]" local or static variables as page buffers; preferred style is to palloc or malloc each buffer to ensure it is MAXALIGN'd. However, that policy's been ignored in an increasing number of places. We've apparently got away with it so far, probably because (a) relatively few people use platforms on which misalignment causes core dumps and/or (b) the variables chance to be sufficiently aligned anyway. But this is not something to rely on. Moreover, even if we don't get a core dump, we might be paying a lot of cycles for misaligned accesses. To fix, invent new union types PGAlignedBlock and PGAlignedXLogBlock that the compiler must allocate with sufficient alignment, and use those in place of plain char arrays. I used these types even for variables where there's no risk of a misaligned access, since ensuring proper alignment should make kernel data transfers faster. I also changed some places where we had been palloc'ing short-lived buffers, for coding style uniformity and to save palloc/pfree overhead. Since this seems to be a live portability hazard (despite the lack of field reports), back-patch to all supported versions. Patch by me; thanks to Michael Paquier for review. Discussion: https://postgr.es/m/1535618100.1286.3.camel@credativ.de
Diffstat (limited to 'src/backend/access/heap/heapam.c')
-rw-r--r--src/backend/access/heap/heapam.c16
1 files changed, 4 insertions, 12 deletions
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index b8bfe23a823..56f1d82f962 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -2709,7 +2709,7 @@ heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples,
HeapTuple *heaptuples;
int i;
int ndone;
- char *scratch = NULL;
+ PGAlignedBlock scratch;
Page page;
bool needwal;
Size saveFreeSpace;
@@ -2727,14 +2727,6 @@ heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples,
xid, cid, options);
/*
- * Allocate some memory to use for constructing the WAL record. Using
- * palloc() within a critical section is not safe, so we allocate this
- * beforehand.
- */
- if (needwal)
- scratch = palloc(BLCKSZ);
-
- /*
* We're about to do the actual inserts -- but check for conflict first,
* to minimize the possibility of having to roll back work we've just
* done.
@@ -2826,7 +2818,7 @@ heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples,
uint8 info = XLOG_HEAP2_MULTI_INSERT;
char *tupledata;
int totaldatalen;
- char *scratchptr = scratch;
+ char *scratchptr = scratch.data;
bool init;
int bufflags = 0;
@@ -2885,7 +2877,7 @@ heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples,
scratchptr += datalen;
}
totaldatalen = scratchptr - tupledata;
- Assert((scratchptr - scratch) < BLCKSZ);
+ Assert((scratchptr - scratch.data) < BLCKSZ);
if (need_tuple_data)
xlrec->flags |= XLH_INSERT_CONTAINS_NEW_TUPLE;
@@ -2912,7 +2904,7 @@ heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples,
bufflags |= REGBUF_KEEP_DATA;
XLogBeginInsert();
- XLogRegisterData((char *) xlrec, tupledata - scratch);
+ XLogRegisterData((char *) xlrec, tupledata - scratch.data);
XLogRegisterBuffer(0, buffer, REGBUF_STANDARD | bufflags);
XLogRegisterBufData(0, tupledata, totaldatalen);