aboutsummaryrefslogtreecommitdiff
path: root/src/include/nodes/execnodes.h
diff options
context:
space:
mode:
authorDavid Rowley <drowley@postgresql.org>2020-06-19 17:25:07 +1200
committerDavid Rowley <drowley@postgresql.org>2020-06-19 17:25:07 +1200
commitbdee4af8e07648008fe522fc5a562db453be5ad7 (patch)
treee7917a123732dd41101efa0eb74ccf12a8def19a /src/include/nodes/execnodes.h
parent5fffa8fce37b981e1a5bb79affce9a856e021265 (diff)
downloadpostgresql-bdee4af8e07648008fe522fc5a562db453be5ad7.tar.gz
postgresql-bdee4af8e07648008fe522fc5a562db453be5ad7.zip
Fix EXPLAIN ANALYZE for parallel HashAgg plans
Since 1f39bce02, HashAgg nodes have had the ability to spill to disk when memory consumption exceeds work_mem. That commit added new properties to EXPLAIN ANALYZE to show the maximum memory usage and disk usage, however, it didn't quite go as far as showing that information for parallel workers. Since workers may have experienced something very different from the main process, we should show this information per worker, as is done in Sort. Reviewed-by: Justin Pryzby Reviewed-by: Jeff Davis Discussion: https://postgr.es/m/CAApHDvpEKbfZa18mM1TD7qV6PG+w97pwCWq5tVD0dX7e11gRJw@mail.gmail.com Backpatch-through: 13, where the hashagg spilling code was added.
Diffstat (limited to 'src/include/nodes/execnodes.h')
-rw-r--r--src/include/nodes/execnodes.h22
1 files changed, 22 insertions, 0 deletions
diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h
index 98e0072b8ad..f5dfa32d55c 100644
--- a/src/include/nodes/execnodes.h
+++ b/src/include/nodes/execnodes.h
@@ -2102,6 +2102,27 @@ typedef struct GroupState
} GroupState;
/* ---------------------
+ * per-worker aggregate information
+ * ---------------------
+ */
+typedef struct AggregateInstrumentation
+{
+ Size hash_mem_peak; /* peak hash table memory usage */
+ uint64 hash_disk_used; /* kB of disk space used */
+ int hash_batches_used; /* batches used during entire execution */
+} AggregateInstrumentation;
+
+/* ----------------
+ * Shared memory container for per-worker aggregate information
+ * ----------------
+ */
+typedef struct SharedAggInfo
+{
+ int num_workers;
+ AggregateInstrumentation sinstrument[FLEXIBLE_ARRAY_MEMBER];
+} SharedAggInfo;
+
+/* ---------------------
* AggState information
*
* ss.ss_ScanTupleSlot refers to output of underlying plan.
@@ -2190,6 +2211,7 @@ typedef struct AggState
AggStatePerGroup *all_pergroups; /* array of first ->pergroups, than
* ->hash_pergroup */
ProjectionInfo *combinedproj; /* projection machinery */
+ SharedAggInfo *shared_info; /* one entry per worker */
} AggState;
/* ----------------