aboutsummaryrefslogtreecommitdiff
path: root/src/backend/executor
diff options
context:
space:
mode:
authorTom Lane <tgl@sss.pgh.pa.us>2021-05-21 14:03:53 -0400
committerTom Lane <tgl@sss.pgh.pa.us>2021-05-21 14:03:53 -0400
commit41c6a5bec25e720d98bd60d77dd5c2939189ed3c (patch)
treec434fc9c8c934c8c83374ee60c46a22e90bd2ff3 /src/backend/executor
parent18c6242b7c6da78341b6745bc9b0bcbca20d556b (diff)
downloadpostgresql-41c6a5bec25e720d98bd60d77dd5c2939189ed3c.tar.gz
postgresql-41c6a5bec25e720d98bd60d77dd5c2939189ed3c.zip
Restore the portal-level snapshot after procedure COMMIT/ROLLBACK.
COMMIT/ROLLBACK necessarily destroys all snapshots within the session. The original implementation of intra-procedure transactions just cavalierly did that, ignoring the fact that this left us executing in a rather different environment than normal. In particular, it turns out that handling of toasted datums depends rather critically on there being an outer ActiveSnapshot: otherwise, when SPI or the core executor pop whatever snapshot they used and return, it's unsafe to dereference any toasted datums that may appear in the query result. It's possible to demonstrate "no known snapshots" and "missing chunk number N for toast value" errors as a result of this oversight. Historically this outer snapshot has been held by the Portal code, and that seems like a good plan to preserve. So add infrastructure to pquery.c to allow re-establishing the Portal-owned snapshot if it's not there anymore, and add enough bookkeeping support that we can tell whether it is or not. We can't, however, just re-establish the Portal snapshot as part of COMMIT/ROLLBACK. As in normal transaction start, acquiring the first snapshot should wait until after SET and LOCK commands. Hence, teach spi.c about doing this at the right time. (Note that this patch doesn't fix the problem for any PLs that try to run intra-procedure transactions without using SPI to execute SQL commands.) This makes SPI's no_snapshots parameter rather a misnomer, so in HEAD, rename that to allow_nonatomic. replication/logical/worker.c also needs some fixes, because it wasn't careful to hold a snapshot open around AFTER trigger execution. That code doesn't use a Portal, which I suspect someday we're gonna have to fix. But for now, just rearrange the order of operations. This includes back-patching the recent addition of finish_estate() to centralize the cleanup logic there. This also back-patches commit 2ecfeda3e into v13, to improve the test coverage for worker.c (it was that test that exposed that worker.c's snapshot management is wrong). Per bug #15990 from Andreas Wicht. Back-patch to v11 where intra-procedure COMMIT was added. Discussion: https://postgr.es/m/15990-eee2ac466b11293d@postgresql.org
Diffstat (limited to 'src/backend/executor')
-rw-r--r--src/backend/executor/spi.c73
1 files changed, 49 insertions, 24 deletions
diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c
index 8eedb613a18..27849a927cd 100644
--- a/src/backend/executor/spi.c
+++ b/src/backend/executor/spi.c
@@ -251,12 +251,8 @@ _SPI_commit(bool chain)
/* Start the actual commit */
_SPI_current->internal_xact = true;
- /*
- * Before committing, pop all active snapshots to avoid error about
- * "snapshot %p still active".
- */
- while (ActiveSnapshotSet())
- PopActiveSnapshot();
+ /* Release snapshots associated with portals */
+ ForgetPortalSnapshots();
if (chain)
SaveTransactionCharacteristics();
@@ -313,6 +309,9 @@ _SPI_rollback(bool chain)
/* Start the actual rollback */
_SPI_current->internal_xact = true;
+ /* Release snapshots associated with portals */
+ ForgetPortalSnapshots();
+
if (chain)
SaveTransactionCharacteristics();
@@ -2105,6 +2104,7 @@ _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI,
uint64 my_processed = 0;
SPITupleTable *my_tuptable = NULL;
int res = 0;
+ bool allow_nonatomic = plan->no_snapshots; /* legacy API name */
bool pushed_active_snap = false;
ErrorContextCallback spierrcontext;
CachedPlan *cplan = NULL;
@@ -2137,11 +2137,12 @@ _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI,
* In the first two cases, we can just push the snap onto the stack once
* for the whole plan list.
*
- * But if the plan has no_snapshots set to true, then don't manage
- * snapshots at all. The caller should then take care of that.
+ * Note that snapshot != InvalidSnapshot implies an atomic execution
+ * context.
*/
- if (snapshot != InvalidSnapshot && !plan->no_snapshots)
+ if (snapshot != InvalidSnapshot)
{
+ Assert(!allow_nonatomic);
if (read_only)
{
PushActiveSnapshot(snapshot);
@@ -2216,15 +2217,39 @@ _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI,
stmt_list = cplan->stmt_list;
/*
- * In the default non-read-only case, get a new snapshot, replacing
- * any that we pushed in a previous cycle.
+ * If we weren't given a specific snapshot to use, and the statement
+ * list requires a snapshot, set that up.
*/
- if (snapshot == InvalidSnapshot && !read_only && !plan->no_snapshots)
+ if (snapshot == InvalidSnapshot &&
+ (list_length(stmt_list) > 1 ||
+ (list_length(stmt_list) == 1 &&
+ PlannedStmtRequiresSnapshot(linitial_node(PlannedStmt,
+ stmt_list)))))
{
- if (pushed_active_snap)
- PopActiveSnapshot();
- PushActiveSnapshot(GetTransactionSnapshot());
- pushed_active_snap = true;
+ /*
+ * First, ensure there's a Portal-level snapshot. This back-fills
+ * the snapshot stack in case the previous operation was a COMMIT
+ * or ROLLBACK inside a procedure or DO block. (We can't put back
+ * the Portal snapshot any sooner, or we'd break cases like doing
+ * SET or LOCK just after COMMIT.) It's enough to check once per
+ * statement list, since COMMIT/ROLLBACK/CALL/DO can't appear
+ * within a multi-statement list.
+ */
+ EnsurePortalSnapshotExists();
+
+ /*
+ * In the default non-read-only case, get a new per-statement-list
+ * snapshot, replacing any that we pushed in a previous cycle.
+ * Skip it when doing non-atomic execution, though (we rely
+ * entirely on the Portal snapshot in that case).
+ */
+ if (!read_only && !allow_nonatomic)
+ {
+ if (pushed_active_snap)
+ PopActiveSnapshot();
+ PushActiveSnapshot(GetTransactionSnapshot());
+ pushed_active_snap = true;
+ }
}
foreach(lc2, stmt_list)
@@ -2236,6 +2261,7 @@ _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI,
_SPI_current->processed = 0;
_SPI_current->tuptable = NULL;
+ /* Check for unsupported cases. */
if (stmt->utilityStmt)
{
if (IsA(stmt->utilityStmt, CopyStmt))
@@ -2267,9 +2293,10 @@ _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI,
/*
* If not read-only mode, advance the command counter before each
- * command and update the snapshot.
+ * command and update the snapshot. (But skip it if the snapshot
+ * isn't under our control.)
*/
- if (!read_only && !plan->no_snapshots)
+ if (!read_only && pushed_active_snap)
{
CommandCounterIncrement();
UpdateActiveSnapshotCommandId();
@@ -2303,13 +2330,11 @@ _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI,
ProcessUtilityContext context;
/*
- * If the SPI context is atomic, or we are asked to manage
- * snapshots, then we are in an atomic execution context.
- * Conversely, to propagate a nonatomic execution context, the
- * caller must be in a nonatomic SPI context and manage
- * snapshots itself.
+ * If the SPI context is atomic, or we were not told to allow
+ * nonatomic operations, tell ProcessUtility this is an atomic
+ * execution context.
*/
- if (_SPI_current->atomic || !plan->no_snapshots)
+ if (_SPI_current->atomic || !allow_nonatomic)
context = PROCESS_UTILITY_QUERY;
else
context = PROCESS_UTILITY_QUERY_NONATOMIC;