aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNoah Misch <noah@leadboat.com>2024-11-02 09:05:00 -0700
committerNoah Misch <noah@leadboat.com>2024-11-02 09:05:02 -0700
commit6f9dd2282e37589bdadf15423b0395ce11735f82 (patch)
treef6237ed05d6cb2c3490dc6e7e634103caf01d617
parentd5be10758b3645e30937509d7056d2dc28eb128e (diff)
downloadpostgresql-6f9dd2282e37589bdadf15423b0395ce11735f82.tar.gz
postgresql-6f9dd2282e37589bdadf15423b0395ce11735f82.zip
Revert "For inplace update, send nontransactional invalidations."
This reverts commit 95c5acb3fc261067ab65ddc0b2dca8e162f09442 (v17) and counterparts in each other non-master branch. If released, that commit would have caused a worst-in-years minor release regression, via undetected LWLock self-deadlock. This commit and its self-deadlock fix warrant more bake time in the master branch. Reported by Alexander Lakhin. Discussion: https://postgr.es/m/10ec0bc3-5933-1189-6bb8-5dec4114558e@gmail.com
-rw-r--r--src/backend/access/heap/heapam.c43
-rw-r--r--src/backend/access/transam/xact.c26
-rw-r--r--src/backend/catalog/index.c11
-rw-r--r--src/backend/replication/logical/decode.c26
-rw-r--r--src/backend/utils/cache/catcache.c7
-rw-r--r--src/backend/utils/cache/inval.c304
-rw-r--r--src/backend/utils/cache/syscache.c3
-rw-r--r--src/include/utils/catcache.h3
-rw-r--r--src/include/utils/inval.h6
-rw-r--r--src/test/isolation/expected/inplace-inval.out10
-rw-r--r--src/test/isolation/specs/inplace-inval.spec12
-rw-r--r--src/tools/pgindent/typedefs.list1
12 files changed, 132 insertions, 320 deletions
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 6fabc43484e..723e34e4646 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -6195,24 +6195,6 @@ heap_inplace_update_and_unlock(Relation relation,
if (oldlen != newlen || htup->t_hoff != tuple->t_data->t_hoff)
elog(ERROR, "wrong tuple length");
- /*
- * Construct shared cache inval if necessary. Note that because we only
- * pass the new version of the tuple, this mustn't be used for any
- * operations that could change catcache lookup keys. But we aren't
- * bothering with index updates either, so that's true a fortiori.
- */
- CacheInvalidateHeapTupleInplace(relation, tuple, NULL);
-
- /*
- * Unlink relcache init files as needed. If unlinking, acquire
- * RelCacheInitLock until after associated invalidations. By doing this
- * in advance, if we checkpoint and then crash between inplace
- * XLogInsert() and inval, we don't rely on StartupXLOG() ->
- * RelationCacheInitFileRemove(). That uses elevel==LOG, so replay would
- * neglect to PANIC on EIO.
- */
- PreInplace_Inval();
-
/* NO EREPORT(ERROR) from here till changes are logged */
START_CRIT_SECTION();
@@ -6256,28 +6238,17 @@ heap_inplace_update_and_unlock(Relation relation,
PageSetLSN(BufferGetPage(buffer), recptr);
}
- LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
-
- /*
- * Send invalidations to shared queue. SearchSysCacheLocked1() assumes we
- * do this before UnlockTuple().
- *
- * If we're mutating a tuple visible only to this transaction, there's an
- * equivalent transactional inval from the action that created the tuple,
- * and this inval is superfluous.
- */
- AtInplace_Inval();
-
END_CRIT_SECTION();
- UnlockTuple(relation, &tuple->t_self, InplaceUpdateTupleLock);
- AcceptInvalidationMessages(); /* local processing of just-sent inval */
+ heap_inplace_unlock(relation, oldtup, buffer);
/*
- * Queue a transactional inval. The immediate invalidation we just sent
- * is the only one known to be necessary. To reduce risk from the
- * transition to immediate invalidation, continue sending a transactional
- * invalidation like we've long done. Third-party code might rely on it.
+ * Send out shared cache inval if necessary. Note that because we only
+ * pass the new version of the tuple, this mustn't be used for any
+ * operations that could change catcache lookup keys. But we aren't
+ * bothering with index updates either, so that's true a fortiori.
+ *
+ * XXX ROLLBACK discards the invalidation. See test inplace-inval.spec.
*/
if (!IsBootstrapProcessingMode())
CacheInvalidateHeapTuple(relation, tuple, NULL);
diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c
index 91dbfcc0d78..4a2ea4adbaf 100644
--- a/src/backend/access/transam/xact.c
+++ b/src/backend/access/transam/xact.c
@@ -1337,24 +1337,14 @@ RecordTransactionCommit(void)
/*
* Transactions without an assigned xid can contain invalidation
- * messages. While inplace updates do this, this is not known to be
- * necessary; see comment at inplace CacheInvalidateHeapTuple().
- * Extensions might still rely on this capability, and standbys may
- * need to process those invals. We can't emit a commit record
- * without an xid, and we don't want to force assigning an xid,
- * because that'd be problematic for e.g. vacuum. Hence we emit a
- * bespoke record for the invalidations. We don't want to use that in
- * case a commit record is emitted, so they happen synchronously with
- * commits (besides not wanting to emit more WAL records).
- *
- * XXX Every known use of this capability is a defect. Since an XID
- * isn't controlling visibility of the change that prompted invals,
- * other sessions need the inval even if this transactions aborts.
- *
- * ON COMMIT DELETE ROWS does a nontransactional index_build(), which
- * queues a relcache inval, including in transactions without an xid
- * that had read the (empty) table. Standbys don't need any ON COMMIT
- * DELETE ROWS invals, but we've not done the work to withhold them.
+ * messages (e.g. explicit relcache invalidations or catcache
+ * invalidations for inplace updates); standbys need to process those.
+ * We can't emit a commit record without an xid, and we don't want to
+ * force assigning an xid, because that'd be problematic for e.g.
+ * vacuum. Hence we emit a bespoke record for the invalidations. We
+ * don't want to use that in case a commit record is emitted, so they
+ * happen synchronously with commits (besides not wanting to emit more
+ * WAL records).
*/
if (nmsgs != 0)
{
diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c
index b0ffe9464f4..ec56bb62182 100644
--- a/src/backend/catalog/index.c
+++ b/src/backend/catalog/index.c
@@ -2915,19 +2915,12 @@ index_update_stats(Relation rel,
if (dirty)
{
systable_inplace_update_finish(state, tuple);
- /* the above sends transactional and immediate cache inval messages */
+ /* the above sends a cache inval message */
}
else
{
systable_inplace_update_cancel(state);
-
- /*
- * While we didn't change relhasindex, CREATE INDEX needs a
- * transactional inval for when the new index's catalog rows become
- * visible. Other CREATE INDEX and REINDEX code happens to also queue
- * this inval, but keep this in case rare callers rely on this part of
- * our API contract.
- */
+ /* no need to change tuple, but force relcache inval anyway */
CacheInvalidateRelcacheByTuple(tuple);
}
diff --git a/src/backend/replication/logical/decode.c b/src/backend/replication/logical/decode.c
index 70af02cf692..d91055a4409 100644
--- a/src/backend/replication/logical/decode.c
+++ b/src/backend/replication/logical/decode.c
@@ -511,19 +511,23 @@ heap_decode(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
/*
* Inplace updates are only ever performed on catalog tuples and
- * can, per definition, not change tuple visibility. Inplace
- * updates don't affect storage or interpretation of table rows,
- * so they don't affect logicalrep_write_tuple() outcomes. Hence,
- * we don't process invalidations from the original operation. If
- * inplace updates did affect those things, invalidations wouldn't
- * make it work, since there are no snapshot-specific versions of
- * inplace-updated values. Since we also don't decode catalog
- * tuples, we're not interested in the record's contents.
+ * can, per definition, not change tuple visibility. Since we
+ * don't decode catalog tuples, we're not interested in the
+ * record's contents.
*
- * WAL contains likely-unnecessary commit-time invals from the
- * CacheInvalidateHeapTuple() call in heap_inplace_update().
- * Excess invalidation is safe.
+ * In-place updates can be used either by XID-bearing transactions
+ * (e.g. in CREATE INDEX CONCURRENTLY) or by XID-less
+ * transactions (e.g. VACUUM). In the former case, the commit
+ * record will include cache invalidations, so we mark the
+ * transaction as catalog modifying here. Currently that's
+ * redundant because the commit will do that as well, but once we
+ * support decoding in-progress relations, this will be important.
*/
+ if (!TransactionIdIsValid(xid))
+ break;
+
+ (void) SnapBuildProcessChange(builder, xid, buf->origptr);
+ ReorderBufferXidSetCatalogChanges(ctx->reorder, xid, buf->origptr);
break;
case XLOG_HEAP_CONFIRM:
diff --git a/src/backend/utils/cache/catcache.c b/src/backend/utils/cache/catcache.c
index 9f249fdccb1..000e81a2d96 100644
--- a/src/backend/utils/cache/catcache.c
+++ b/src/backend/utils/cache/catcache.c
@@ -2219,8 +2219,7 @@ void
PrepareToInvalidateCacheTuple(Relation relation,
HeapTuple tuple,
HeapTuple newtuple,
- void (*function) (int, uint32, Oid, void *),
- void *context)
+ void (*function) (int, uint32, Oid))
{
slist_iter iter;
Oid reloid;
@@ -2261,7 +2260,7 @@ PrepareToInvalidateCacheTuple(Relation relation,
hashvalue = CatalogCacheComputeTupleHashValue(ccp, ccp->cc_nkeys, tuple);
dbid = ccp->cc_relisshared ? (Oid) 0 : MyDatabaseId;
- (*function) (ccp->id, hashvalue, dbid, context);
+ (*function) (ccp->id, hashvalue, dbid);
if (newtuple)
{
@@ -2270,7 +2269,7 @@ PrepareToInvalidateCacheTuple(Relation relation,
newhashvalue = CatalogCacheComputeTupleHashValue(ccp, ccp->cc_nkeys, newtuple);
if (newhashvalue != hashvalue)
- (*function) (ccp->id, newhashvalue, dbid, context);
+ (*function) (ccp->id, newhashvalue, dbid);
}
}
}
diff --git a/src/backend/utils/cache/inval.c b/src/backend/utils/cache/inval.c
index 7ad69ee77d2..0008826f67c 100644
--- a/src/backend/utils/cache/inval.c
+++ b/src/backend/utils/cache/inval.c
@@ -94,10 +94,6 @@
* worth trying to avoid sending such inval traffic in the future, if those
* problems can be overcome cheaply.
*
- * When making a nontransactional change to a cacheable object, we must
- * likewise send the invalidation immediately, before ending the change's
- * critical section. This includes inplace heap updates, relmap, and smgr.
- *
* When wal_level=logical, write invalidations into WAL at each command end to
* support the decoding of the in-progress transactions. See
* CommandEndInvalidationMessages.
@@ -135,15 +131,13 @@
/*
* Pending requests are stored as ready-to-send SharedInvalidationMessages.
- * We keep the messages themselves in arrays in TopTransactionContext (there
- * are separate arrays for catcache and relcache messages). For transactional
- * messages, control information is kept in a chain of TransInvalidationInfo
- * structs, also allocated in TopTransactionContext. (We could keep a
- * subtransaction's TransInvalidationInfo in its CurTransactionContext; but
- * that's more wasteful not less so, since in very many scenarios it'd be the
- * only allocation in the subtransaction's CurTransactionContext.) For
- * inplace update messages, control information appears in an
- * InvalidationInfo, allocated in CurrentMemoryContext.
+ * We keep the messages themselves in arrays in TopTransactionContext
+ * (there are separate arrays for catcache and relcache messages). Control
+ * information is kept in a chain of TransInvalidationInfo structs, also
+ * allocated in TopTransactionContext. (We could keep a subtransaction's
+ * TransInvalidationInfo in its CurTransactionContext; but that's more
+ * wasteful not less so, since in very many scenarios it'd be the only
+ * allocation in the subtransaction's CurTransactionContext.)
*
* We can store the message arrays densely, and yet avoid moving data around
* within an array, because within any one subtransaction we need only
@@ -154,9 +148,7 @@
* struct. Similarly, we need distinguish messages of prior subtransactions
* from those of the current subtransaction only until the subtransaction
* completes, after which we adjust the array indexes in the parent's
- * TransInvalidationInfo to include the subtransaction's messages. Inplace
- * invalidations don't need a concept of command or subtransaction boundaries,
- * since we send them during the WAL insertion critical section.
+ * TransInvalidationInfo to include the subtransaction's messages.
*
* The ordering of the individual messages within a command's or
* subtransaction's output is not considered significant, although this
@@ -209,7 +201,7 @@ typedef struct InvalidationMsgsGroup
/*----------------
- * Transactional invalidation messages are divided into two groups:
+ * Invalidation messages are divided into two groups:
* 1) events so far in current command, not yet reflected to caches.
* 2) events in previous commands of current transaction; these have
* been reflected to local caches, and must be either broadcast to
@@ -225,36 +217,26 @@ typedef struct InvalidationMsgsGroup
*----------------
*/
-/* fields common to both transactional and inplace invalidation */
-typedef struct InvalidationInfo
-{
- /* Events emitted by current command */
- InvalidationMsgsGroup CurrentCmdInvalidMsgs;
-
- /* init file must be invalidated? */
- bool RelcacheInitFileInval;
-} InvalidationInfo;
-
-/* subclass adding fields specific to transactional invalidation */
typedef struct TransInvalidationInfo
{
- /* Base class */
- struct InvalidationInfo ii;
-
- /* Events emitted by previous commands of this (sub)transaction */
- InvalidationMsgsGroup PriorCmdInvalidMsgs;
-
/* Back link to parent transaction's info */
struct TransInvalidationInfo *parent;
/* Subtransaction nesting depth */
int my_level;
+
+ /* Events emitted by current command */
+ InvalidationMsgsGroup CurrentCmdInvalidMsgs;
+
+ /* Events emitted by previous commands of this (sub)transaction */
+ InvalidationMsgsGroup PriorCmdInvalidMsgs;
+
+ /* init file must be invalidated? */
+ bool RelcacheInitFileInval;
} TransInvalidationInfo;
static TransInvalidationInfo *transInvalInfo = NULL;
-static InvalidationInfo *inplaceInvalInfo = NULL;
-
/* GUC storage */
int debug_discard_caches = 0;
@@ -562,12 +544,9 @@ ProcessInvalidationMessagesMulti(InvalidationMsgsGroup *group,
static void
RegisterCatcacheInvalidation(int cacheId,
uint32 hashValue,
- Oid dbId,
- void *context)
+ Oid dbId)
{
- InvalidationInfo *info = (InvalidationInfo *) context;
-
- AddCatcacheInvalidationMessage(&info->CurrentCmdInvalidMsgs,
+ AddCatcacheInvalidationMessage(&transInvalInfo->CurrentCmdInvalidMsgs,
cacheId, hashValue, dbId);
}
@@ -577,9 +556,10 @@ RegisterCatcacheInvalidation(int cacheId,
* Register an invalidation event for all catcache entries from a catalog.
*/
static void
-RegisterCatalogInvalidation(InvalidationInfo *info, Oid dbId, Oid catId)
+RegisterCatalogInvalidation(Oid dbId, Oid catId)
{
- AddCatalogInvalidationMessage(&info->CurrentCmdInvalidMsgs, dbId, catId);
+ AddCatalogInvalidationMessage(&transInvalInfo->CurrentCmdInvalidMsgs,
+ dbId, catId);
}
/*
@@ -588,9 +568,10 @@ RegisterCatalogInvalidation(InvalidationInfo *info, Oid dbId, Oid catId)
* As above, but register a relcache invalidation event.
*/
static void
-RegisterRelcacheInvalidation(InvalidationInfo *info, Oid dbId, Oid relId)
+RegisterRelcacheInvalidation(Oid dbId, Oid relId)
{
- AddRelcacheInvalidationMessage(&info->CurrentCmdInvalidMsgs, dbId, relId);
+ AddRelcacheInvalidationMessage(&transInvalInfo->CurrentCmdInvalidMsgs,
+ dbId, relId);
/*
* Most of the time, relcache invalidation is associated with system
@@ -607,7 +588,7 @@ RegisterRelcacheInvalidation(InvalidationInfo *info, Oid dbId, Oid relId)
* as well. Also zap when we are invalidating whole relcache.
*/
if (relId == InvalidOid || RelationIdIsInInitFile(relId))
- info->RelcacheInitFileInval = true;
+ transInvalInfo->RelcacheInitFileInval = true;
}
/*
@@ -617,9 +598,10 @@ RegisterRelcacheInvalidation(InvalidationInfo *info, Oid dbId, Oid relId)
* Only needed for catalogs that don't have catcaches.
*/
static void
-RegisterSnapshotInvalidation(InvalidationInfo *info, Oid dbId, Oid relId)
+RegisterSnapshotInvalidation(Oid dbId, Oid relId)
{
- AddSnapshotInvalidationMessage(&info->CurrentCmdInvalidMsgs, dbId, relId);
+ AddSnapshotInvalidationMessage(&transInvalInfo->CurrentCmdInvalidMsgs,
+ dbId, relId);
}
/*
@@ -809,18 +791,14 @@ AcceptInvalidationMessages(void)
* PrepareInvalidationState
* Initialize inval data for the current (sub)transaction.
*/
-static InvalidationInfo *
+static void
PrepareInvalidationState(void)
{
TransInvalidationInfo *myInfo;
- Assert(IsTransactionState());
- /* Can't queue transactional message while collecting inplace messages. */
- Assert(inplaceInvalInfo == NULL);
-
if (transInvalInfo != NULL &&
transInvalInfo->my_level == GetCurrentTransactionNestLevel())
- return (InvalidationInfo *) transInvalInfo;
+ return;
myInfo = (TransInvalidationInfo *)
MemoryContextAllocZero(TopTransactionContext,
@@ -843,7 +821,7 @@ PrepareInvalidationState(void)
* counter. This is a convenient place to check for that, as well as
* being important to keep management of the message arrays simple.
*/
- if (NumMessagesInGroup(&transInvalInfo->ii.CurrentCmdInvalidMsgs) != 0)
+ if (NumMessagesInGroup(&transInvalInfo->CurrentCmdInvalidMsgs) != 0)
elog(ERROR, "cannot start a subtransaction when there are unprocessed inval messages");
/*
@@ -852,8 +830,8 @@ PrepareInvalidationState(void)
* to update them to follow whatever is already in the arrays.
*/
SetGroupToFollow(&myInfo->PriorCmdInvalidMsgs,
- &transInvalInfo->ii.CurrentCmdInvalidMsgs);
- SetGroupToFollow(&myInfo->ii.CurrentCmdInvalidMsgs,
+ &transInvalInfo->CurrentCmdInvalidMsgs);
+ SetGroupToFollow(&myInfo->CurrentCmdInvalidMsgs,
&myInfo->PriorCmdInvalidMsgs);
}
else
@@ -869,41 +847,6 @@ PrepareInvalidationState(void)
}
transInvalInfo = myInfo;
- return (InvalidationInfo *) myInfo;
-}
-
-/*
- * PrepareInplaceInvalidationState
- * Initialize inval data for an inplace update.
- *
- * See previous function for more background.
- */
-static InvalidationInfo *
-PrepareInplaceInvalidationState(void)
-{
- InvalidationInfo *myInfo;
-
- Assert(IsTransactionState());
- /* limit of one inplace update under assembly */
- Assert(inplaceInvalInfo == NULL);
-
- /* gone after WAL insertion CritSection ends, so use current context */
- myInfo = (InvalidationInfo *) palloc0(sizeof(InvalidationInfo));
-
- /* Stash our messages past end of the transactional messages, if any. */
- if (transInvalInfo != NULL)
- SetGroupToFollow(&myInfo->CurrentCmdInvalidMsgs,
- &transInvalInfo->ii.CurrentCmdInvalidMsgs);
- else
- {
- InvalMessageArrays[CatCacheMsgs].msgs = NULL;
- InvalMessageArrays[CatCacheMsgs].maxmsgs = 0;
- InvalMessageArrays[RelCacheMsgs].msgs = NULL;
- InvalMessageArrays[RelCacheMsgs].maxmsgs = 0;
- }
-
- inplaceInvalInfo = myInfo;
- return myInfo;
}
/*
@@ -961,7 +904,7 @@ xactGetCommittedInvalidationMessages(SharedInvalidationMessage **msgs,
* after we send the SI messages. However, we need not do anything unless
* we committed.
*/
- *RelcacheInitFileInval = transInvalInfo->ii.RelcacheInitFileInval;
+ *RelcacheInitFileInval = transInvalInfo->RelcacheInitFileInval;
/*
* Collect all the pending messages into a single contiguous array of
@@ -972,7 +915,7 @@ xactGetCommittedInvalidationMessages(SharedInvalidationMessage **msgs,
* not new ones.
*/
nummsgs = NumMessagesInGroup(&transInvalInfo->PriorCmdInvalidMsgs) +
- NumMessagesInGroup(&transInvalInfo->ii.CurrentCmdInvalidMsgs);
+ NumMessagesInGroup(&transInvalInfo->CurrentCmdInvalidMsgs);
*msgs = msgarray = (SharedInvalidationMessage *)
MemoryContextAlloc(CurTransactionContext,
@@ -985,7 +928,7 @@ xactGetCommittedInvalidationMessages(SharedInvalidationMessage **msgs,
msgs,
n * sizeof(SharedInvalidationMessage)),
nmsgs += n));
- ProcessMessageSubGroupMulti(&transInvalInfo->ii.CurrentCmdInvalidMsgs,
+ ProcessMessageSubGroupMulti(&transInvalInfo->CurrentCmdInvalidMsgs,
CatCacheMsgs,
(memcpy(msgarray + nmsgs,
msgs,
@@ -997,7 +940,7 @@ xactGetCommittedInvalidationMessages(SharedInvalidationMessage **msgs,
msgs,
n * sizeof(SharedInvalidationMessage)),
nmsgs += n));
- ProcessMessageSubGroupMulti(&transInvalInfo->ii.CurrentCmdInvalidMsgs,
+ ProcessMessageSubGroupMulti(&transInvalInfo->CurrentCmdInvalidMsgs,
RelCacheMsgs,
(memcpy(msgarray + nmsgs,
msgs,
@@ -1084,9 +1027,7 @@ ProcessCommittedInvalidationMessages(SharedInvalidationMessage *msgs,
void
AtEOXact_Inval(bool isCommit)
{
- inplaceInvalInfo = NULL;
-
- /* Quick exit if no transactional messages */
+ /* Quick exit if no messages */
if (transInvalInfo == NULL)
return;
@@ -1100,16 +1041,16 @@ AtEOXact_Inval(bool isCommit)
* after we send the SI messages. However, we need not do anything
* unless we committed.
*/
- if (transInvalInfo->ii.RelcacheInitFileInval)
+ if (transInvalInfo->RelcacheInitFileInval)
RelationCacheInitFilePreInvalidate();
AppendInvalidationMessages(&transInvalInfo->PriorCmdInvalidMsgs,
- &transInvalInfo->ii.CurrentCmdInvalidMsgs);
+ &transInvalInfo->CurrentCmdInvalidMsgs);
ProcessInvalidationMessagesMulti(&transInvalInfo->PriorCmdInvalidMsgs,
SendSharedInvalidMessages);
- if (transInvalInfo->ii.RelcacheInitFileInval)
+ if (transInvalInfo->RelcacheInitFileInval)
RelationCacheInitFilePostInvalidate();
}
else
@@ -1123,44 +1064,6 @@ AtEOXact_Inval(bool isCommit)
}
/*
- * PreInplace_Inval
- * Process queued-up invalidation before inplace update critical section.
- *
- * Tasks belong here if they are safe even if the inplace update does not
- * complete. Currently, this just unlinks a cache file, which can fail. The
- * sum of this and AtInplace_Inval() mirrors AtEOXact_Inval(isCommit=true).
- */
-void
-PreInplace_Inval(void)
-{
- Assert(CritSectionCount == 0);
-
- if (inplaceInvalInfo && inplaceInvalInfo->RelcacheInitFileInval)
- RelationCacheInitFilePreInvalidate();
-}
-
-/*
- * AtInplace_Inval
- * Process queued-up invalidations after inplace update buffer mutation.
- */
-void
-AtInplace_Inval(void)
-{
- Assert(CritSectionCount > 0);
-
- if (inplaceInvalInfo == NULL)
- return;
-
- ProcessInvalidationMessagesMulti(&inplaceInvalInfo->CurrentCmdInvalidMsgs,
- SendSharedInvalidMessages);
-
- if (inplaceInvalInfo->RelcacheInitFileInval)
- RelationCacheInitFilePostInvalidate();
-
- inplaceInvalInfo = NULL;
-}
-
-/*
* AtEOSubXact_Inval
* Process queued-up invalidation messages at end of subtransaction.
*
@@ -1182,20 +1085,9 @@ void
AtEOSubXact_Inval(bool isCommit)
{
int my_level;
- TransInvalidationInfo *myInfo;
+ TransInvalidationInfo *myInfo = transInvalInfo;
- /*
- * Successful inplace update must clear this, but we clear it on abort.
- * Inplace updates allocate this in CurrentMemoryContext, which has
- * lifespan <= subtransaction lifespan. Hence, don't free it explicitly.
- */
- if (isCommit)
- Assert(inplaceInvalInfo == NULL);
- else
- inplaceInvalInfo = NULL;
-
- /* Quick exit if no transactional messages. */
- myInfo = transInvalInfo;
+ /* Quick exit if no messages. */
if (myInfo == NULL)
return;
@@ -1236,12 +1128,12 @@ AtEOSubXact_Inval(bool isCommit)
&myInfo->PriorCmdInvalidMsgs);
/* Must readjust parent's CurrentCmdInvalidMsgs indexes now */
- SetGroupToFollow(&myInfo->parent->ii.CurrentCmdInvalidMsgs,
+ SetGroupToFollow(&myInfo->parent->CurrentCmdInvalidMsgs,
&myInfo->parent->PriorCmdInvalidMsgs);
/* Pending relcache inval becomes parent's problem too */
- if (myInfo->ii.RelcacheInitFileInval)
- myInfo->parent->ii.RelcacheInitFileInval = true;
+ if (myInfo->RelcacheInitFileInval)
+ myInfo->parent->RelcacheInitFileInval = true;
/* Pop the transaction state stack */
transInvalInfo = myInfo->parent;
@@ -1288,7 +1180,7 @@ CommandEndInvalidationMessages(void)
if (transInvalInfo == NULL)
return;
- ProcessInvalidationMessages(&transInvalInfo->ii.CurrentCmdInvalidMsgs,
+ ProcessInvalidationMessages(&transInvalInfo->CurrentCmdInvalidMsgs,
LocalExecuteInvalidationMessage);
/* WAL Log per-command invalidation messages for wal_level=logical */
@@ -1296,21 +1188,26 @@ CommandEndInvalidationMessages(void)
LogLogicalInvalidations();
AppendInvalidationMessages(&transInvalInfo->PriorCmdInvalidMsgs,
- &transInvalInfo->ii.CurrentCmdInvalidMsgs);
+ &transInvalInfo->CurrentCmdInvalidMsgs);
}
/*
- * CacheInvalidateHeapTupleCommon
- * Common logic for end-of-command and inplace variants.
+ * CacheInvalidateHeapTuple
+ * Register the given tuple for invalidation at end of command
+ * (ie, current command is creating or outdating this tuple).
+ * Also, detect whether a relcache invalidation is implied.
+ *
+ * For an insert or delete, tuple is the target tuple and newtuple is NULL.
+ * For an update, we are called just once, with tuple being the old tuple
+ * version and newtuple the new version. This allows avoidance of duplicate
+ * effort during an update.
*/
-static void
-CacheInvalidateHeapTupleCommon(Relation relation,
- HeapTuple tuple,
- HeapTuple newtuple,
- InvalidationInfo *(*prepare_callback) (void))
+void
+CacheInvalidateHeapTuple(Relation relation,
+ HeapTuple tuple,
+ HeapTuple newtuple)
{
- InvalidationInfo *info;
Oid tupleRelId;
Oid databaseId;
Oid relationId;
@@ -1334,8 +1231,11 @@ CacheInvalidateHeapTupleCommon(Relation relation,
if (IsToastRelation(relation))
return;
- /* Allocate any required resources. */
- info = prepare_callback();
+ /*
+ * If we're not prepared to queue invalidation messages for this
+ * subtransaction level, get ready now.
+ */
+ PrepareInvalidationState();
/*
* First let the catcache do its thing
@@ -1344,12 +1244,11 @@ CacheInvalidateHeapTupleCommon(Relation relation,
if (RelationInvalidatesSnapshotsOnly(tupleRelId))
{
databaseId = IsSharedRelation(tupleRelId) ? InvalidOid : MyDatabaseId;
- RegisterSnapshotInvalidation(info, databaseId, tupleRelId);
+ RegisterSnapshotInvalidation(databaseId, tupleRelId);
}
else
PrepareToInvalidateCacheTuple(relation, tuple, newtuple,
- RegisterCatcacheInvalidation,
- (void *) info);
+ RegisterCatcacheInvalidation);
/*
* Now, is this tuple one of the primary definers of a relcache entry? See
@@ -1422,44 +1321,7 @@ CacheInvalidateHeapTupleCommon(Relation relation,
/*
* Yes. We need to register a relcache invalidation event.
*/
- RegisterRelcacheInvalidation(info, databaseId, relationId);
-}
-
-/*
- * CacheInvalidateHeapTuple
- * Register the given tuple for invalidation at end of command
- * (ie, current command is creating or outdating this tuple) and end of
- * transaction. Also, detect whether a relcache invalidation is implied.
- *
- * For an insert or delete, tuple is the target tuple and newtuple is NULL.
- * For an update, we are called just once, with tuple being the old tuple
- * version and newtuple the new version. This allows avoidance of duplicate
- * effort during an update.
- */
-void
-CacheInvalidateHeapTuple(Relation relation,
- HeapTuple tuple,
- HeapTuple newtuple)
-{
- CacheInvalidateHeapTupleCommon(relation, tuple, newtuple,
- PrepareInvalidationState);
-}
-
-/*
- * CacheInvalidateHeapTupleInplace
- * Register the given tuple for nontransactional invalidation pertaining
- * to an inplace update. Also, detect whether a relcache invalidation is
- * implied.
- *
- * Like CacheInvalidateHeapTuple(), but for inplace updates.
- */
-void
-CacheInvalidateHeapTupleInplace(Relation relation,
- HeapTuple tuple,
- HeapTuple newtuple)
-{
- CacheInvalidateHeapTupleCommon(relation, tuple, newtuple,
- PrepareInplaceInvalidationState);
+ RegisterRelcacheInvalidation(databaseId, relationId);
}
/*
@@ -1478,13 +1340,14 @@ CacheInvalidateCatalog(Oid catalogId)
{
Oid databaseId;
+ PrepareInvalidationState();
+
if (IsSharedRelation(catalogId))
databaseId = InvalidOid;
else
databaseId = MyDatabaseId;
- RegisterCatalogInvalidation(PrepareInvalidationState(),
- databaseId, catalogId);
+ RegisterCatalogInvalidation(databaseId, catalogId);
}
/*
@@ -1502,14 +1365,15 @@ CacheInvalidateRelcache(Relation relation)
Oid databaseId;
Oid relationId;
+ PrepareInvalidationState();
+
relationId = RelationGetRelid(relation);
if (relation->rd_rel->relisshared)
databaseId = InvalidOid;
else
databaseId = MyDatabaseId;
- RegisterRelcacheInvalidation(PrepareInvalidationState(),
- databaseId, relationId);
+ RegisterRelcacheInvalidation(databaseId, relationId);
}
/*
@@ -1522,8 +1386,9 @@ CacheInvalidateRelcache(Relation relation)
void
CacheInvalidateRelcacheAll(void)
{
- RegisterRelcacheInvalidation(PrepareInvalidationState(),
- InvalidOid, InvalidOid);
+ PrepareInvalidationState();
+
+ RegisterRelcacheInvalidation(InvalidOid, InvalidOid);
}
/*
@@ -1537,13 +1402,14 @@ CacheInvalidateRelcacheByTuple(HeapTuple classTuple)
Oid databaseId;
Oid relationId;
+ PrepareInvalidationState();
+
relationId = classtup->oid;
if (classtup->relisshared)
databaseId = InvalidOid;
else
databaseId = MyDatabaseId;
- RegisterRelcacheInvalidation(PrepareInvalidationState(),
- databaseId, relationId);
+ RegisterRelcacheInvalidation(databaseId, relationId);
}
/*
@@ -1557,6 +1423,8 @@ CacheInvalidateRelcacheByRelid(Oid relid)
{
HeapTuple tup;
+ PrepareInvalidationState();
+
tup = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
if (!HeapTupleIsValid(tup))
elog(ERROR, "cache lookup failed for relation %u", relid);
@@ -1746,7 +1614,7 @@ LogLogicalInvalidations(void)
if (transInvalInfo == NULL)
return;
- group = &transInvalInfo->ii.CurrentCmdInvalidMsgs;
+ group = &transInvalInfo->CurrentCmdInvalidMsgs;
nmsgs = NumMessagesInGroup(group);
if (nmsgs > 0)
diff --git a/src/backend/utils/cache/syscache.c b/src/backend/utils/cache/syscache.c
index b0cc6a00eea..697b17bdd74 100644
--- a/src/backend/utils/cache/syscache.c
+++ b/src/backend/utils/cache/syscache.c
@@ -951,7 +951,8 @@ SearchSysCacheLocked1(int cacheId,
/*
* If an inplace update just finished, ensure we process the syscache
- * inval.
+ * inval. XXX this is insufficient: the inplace updater may not yet
+ * have reached AtEOXact_Inval(). See test at inplace-inval.spec.
*
* If a heap_update() call just released its LOCKTAG_TUPLE, we'll
* probably find the old tuple and reach "tuple concurrently updated".
diff --git a/src/include/utils/catcache.h b/src/include/utils/catcache.h
index 9204663a765..a32d7222a99 100644
--- a/src/include/utils/catcache.h
+++ b/src/include/utils/catcache.h
@@ -228,8 +228,7 @@ extern void CatCacheInvalidate(CatCache *cache, uint32 hashValue);
extern void PrepareToInvalidateCacheTuple(Relation relation,
HeapTuple tuple,
HeapTuple newtuple,
- void (*function) (int, uint32, Oid, void *),
- void *context);
+ void (*function) (int, uint32, Oid));
extern void PrintCatCacheLeakWarning(HeapTuple tuple);
extern void PrintCatCacheListLeakWarning(CatCList *list);
diff --git a/src/include/utils/inval.h b/src/include/utils/inval.h
index e807779e26e..14b4eac0630 100644
--- a/src/include/utils/inval.h
+++ b/src/include/utils/inval.h
@@ -28,9 +28,6 @@ extern void AcceptInvalidationMessages(void);
extern void AtEOXact_Inval(bool isCommit);
-extern void PreInplace_Inval(void);
-extern void AtInplace_Inval(void);
-
extern void AtEOSubXact_Inval(bool isCommit);
extern void PostPrepare_Inval(void);
@@ -40,9 +37,6 @@ extern void CommandEndInvalidationMessages(void);
extern void CacheInvalidateHeapTuple(Relation relation,
HeapTuple tuple,
HeapTuple newtuple);
-extern void CacheInvalidateHeapTupleInplace(Relation relation,
- HeapTuple tuple,
- HeapTuple newtuple);
extern void CacheInvalidateCatalog(Oid catalogId);
diff --git a/src/test/isolation/expected/inplace-inval.out b/src/test/isolation/expected/inplace-inval.out
index c35895a8aa7..e68eca5de98 100644
--- a/src/test/isolation/expected/inplace-inval.out
+++ b/src/test/isolation/expected/inplace-inval.out
@@ -1,6 +1,6 @@
Parsed test spec with 3 sessions
-starting permutation: cachefill3 cir1 cic2 ddl3 read1
+starting permutation: cachefill3 cir1 cic2 ddl3
step cachefill3: TABLE newly_indexed;
c
-
@@ -9,14 +9,6 @@ c
step cir1: BEGIN; CREATE INDEX i1 ON newly_indexed (c); ROLLBACK;
step cic2: CREATE INDEX i2 ON newly_indexed (c);
step ddl3: ALTER TABLE newly_indexed ADD extra int;
-step read1:
- SELECT relhasindex FROM pg_class WHERE oid = 'newly_indexed'::regclass;
-
-relhasindex
------------
-t
-(1 row)
-
starting permutation: cir1 cic2 ddl3 read1
step cir1: BEGIN; CREATE INDEX i1 ON newly_indexed (c); ROLLBACK;
diff --git a/src/test/isolation/specs/inplace-inval.spec b/src/test/isolation/specs/inplace-inval.spec
index b99112ddb88..96954fd86c4 100644
--- a/src/test/isolation/specs/inplace-inval.spec
+++ b/src/test/isolation/specs/inplace-inval.spec
@@ -1,7 +1,7 @@
-# An inplace update had been able to abort before sending the inplace
-# invalidation message to the shared queue. If a heap_update() caller then
-# retrieved its oldtup from a cache, the heap_update() could revert the
-# inplace update.
+# If a heap_update() caller retrieves its oldtup from a cache, it's possible
+# for that cache entry to predate an inplace update, causing loss of that
+# inplace update. This arises because the transaction may abort before
+# sending the inplace invalidation message to the shared queue.
setup
{
@@ -27,12 +27,14 @@ step cachefill3 { TABLE newly_indexed; }
step ddl3 { ALTER TABLE newly_indexed ADD extra int; }
+# XXX shows an extant bug. Adding step read1 at the end would usually print
+# relhasindex=f (not wanted). This does not reach the unwanted behavior under
+# -DCATCACHE_FORCE_RELEASE and friends.
permutation
cachefill3 # populates the pg_class row in the catcache
cir1 # sets relhasindex=true; rollback discards cache inval
cic2 # sees relhasindex=true, skips changing it (so no inval)
ddl3 # cached row as the oldtup of an update, losing relhasindex
- read1 # observe damage
# without cachefill3, no bug
permutation cir1 cic2 ddl3 read1
diff --git a/src/tools/pgindent/typedefs.list b/src/tools/pgindent/typedefs.list
index fcaa2e6387b..43570438e99 100644
--- a/src/tools/pgindent/typedefs.list
+++ b/src/tools/pgindent/typedefs.list
@@ -1205,7 +1205,6 @@ InternalGrant
Interval
IntoClause
InvalMessageArray
-InvalidationInfo
InvalidationMsgsGroup
IpcMemoryId
IpcMemoryKey