diff options
author | Andres Freund <andres@anarazel.de> | 2019-03-11 14:26:43 -0700 |
---|---|---|
committer | Andres Freund <andres@anarazel.de> | 2019-03-11 14:26:43 -0700 |
commit | 8cacea7a725103f1a037a5ee06112ebe31051c66 (patch) | |
tree | 6b5e8872494979b1fb6daf202c3a661972a55a62 | |
parent | c2fe139c201c48f1133e9fbea2dd99b8efe2fadd (diff) | |
download | postgresql-8cacea7a725103f1a037a5ee06112ebe31051c66.tar.gz postgresql-8cacea7a725103f1a037a5ee06112ebe31051c66.zip |
Ensure sufficient alignment for ParallelTableScanDescData in BTShared.
Previously ParallelTableScanDescData was just a member in BTShared,
but after c2fe139c2 that doesn't guarantee sufficient alignment as
specific AMs might (are likely to) need atomic variables in the
struct.
One might think that MAXALIGNing would be sufficient, but as a
comment in shm_toc_allocate() explains, that's not enough. For now,
copy the hack described there.
For parallel sequential scans no such change is needed, as its
allocations go through shm_toc_allocate().
An alternative approach would have been to allocate the parallel scan
descriptor in a separate TOC entry, but there seems little benefit in
doing so.
Per buildfarm member dromedary.
Author: Andres Freund
Discussion: https://postgr.es/m/20190311203126.ty5gbfz42gjbm6i6@alap3.anarazel.de
-rw-r--r-- | src/backend/access/nbtree/nbtsort.c | 25 |
1 files changed, 18 insertions, 7 deletions
diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c index e37cbac7b3c..28c1aeefabb 100644 --- a/src/backend/access/nbtree/nbtsort.c +++ b/src/backend/access/nbtree/nbtsort.c @@ -157,14 +157,22 @@ typedef struct BTShared bool brokenhotchain; /* - * This variable-sized field must come last. - * - * See _bt_parallel_estimate_shared() and table_parallelscan_estimate(). + * ParallelTableScanDescData data follows. Can't directly embed here, as + * implementations of the parallel table scan desc interface might need + * stronger alignment. */ - ParallelTableScanDescData heapdesc; } BTShared; /* + * Return pointer to a BTShared's parallel table scan. + * + * c.f. shm_toc_allocate as to why BUFFERALIGN is used, rather than just + * MAXALIGN. + */ +#define ParallelTableScanFromBTShared(shared) \ + (ParallelTableScanDesc) ((char *) (shared) + BUFFERALIGN(sizeof(BTShared))) + +/* * Status for leader in parallel index build. */ typedef struct BTLeader @@ -1317,7 +1325,8 @@ _bt_begin_parallel(BTBuildState *buildstate, bool isconcurrent, int request) btshared->havedead = false; btshared->indtuples = 0.0; btshared->brokenhotchain = false; - table_parallelscan_initialize(btspool->heap, &btshared->heapdesc, + table_parallelscan_initialize(btspool->heap, + ParallelTableScanFromBTShared(btshared), snapshot); /* @@ -1407,7 +1416,8 @@ _bt_end_parallel(BTLeader *btleader) static Size _bt_parallel_estimate_shared(Relation heap, Snapshot snapshot) { - return add_size(offsetof(BTShared, heapdesc), + /* c.f. shm_toc_allocate as to why BUFFERALIGN is used */ + return add_size(BUFFERALIGN(sizeof(BTShared)), table_parallelscan_estimate(heap, snapshot)); } @@ -1672,7 +1682,8 @@ _bt_parallel_scan_and_sort(BTSpool *btspool, BTSpool *btspool2, /* Join parallel scan */ indexInfo = BuildIndexInfo(btspool->index); indexInfo->ii_Concurrent = btshared->isconcurrent; - scan = table_beginscan_parallel(btspool->heap, &btshared->heapdesc); + scan = table_beginscan_parallel(btspool->heap, + ParallelTableScanFromBTShared(btshared)); reltuples = IndexBuildHeapScan(btspool->heap, btspool->index, indexInfo, true, _bt_build_callback, (void *) &buildstate, scan); |