diff options
-rw-r--r-- | src/backend/access/heap/heapam.c | 43 | ||||
-rw-r--r-- | src/backend/access/transam/xact.c | 26 | ||||
-rw-r--r-- | src/backend/catalog/index.c | 11 | ||||
-rw-r--r-- | src/backend/replication/logical/decode.c | 26 | ||||
-rw-r--r-- | src/backend/utils/cache/catcache.c | 7 | ||||
-rw-r--r-- | src/backend/utils/cache/inval.c | 261 | ||||
-rw-r--r-- | src/backend/utils/cache/syscache.c | 3 | ||||
-rw-r--r-- | src/include/utils/catcache.h | 3 | ||||
-rw-r--r-- | src/include/utils/inval.h | 6 | ||||
-rw-r--r-- | src/test/isolation/expected/inplace-inval.out | 10 | ||||
-rw-r--r-- | src/test/isolation/specs/inplace-inval.spec | 12 |
11 files changed, 290 insertions, 118 deletions
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index f54609dc8b9..e3a516d5394 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -6383,6 +6383,24 @@ heap_inplace_update_and_unlock(Relation relation, if (oldlen != newlen || htup->t_hoff != tuple->t_data->t_hoff) elog(ERROR, "wrong tuple length"); + /* + * Construct shared cache inval if necessary. Note that because we only + * pass the new version of the tuple, this mustn't be used for any + * operations that could change catcache lookup keys. But we aren't + * bothering with index updates either, so that's true a fortiori. + */ + CacheInvalidateHeapTupleInplace(relation, tuple, NULL); + + /* + * Unlink relcache init files as needed. If unlinking, acquire + * RelCacheInitLock until after associated invalidations. By doing this + * in advance, if we checkpoint and then crash between inplace + * XLogInsert() and inval, we don't rely on StartupXLOG() -> + * RelationCacheInitFileRemove(). That uses elevel==LOG, so replay would + * neglect to PANIC on EIO. + */ + PreInplace_Inval(); + /* NO EREPORT(ERROR) from here till changes are logged */ START_CRIT_SECTION(); @@ -6426,17 +6444,28 @@ heap_inplace_update_and_unlock(Relation relation, PageSetLSN(BufferGetPage(buffer), recptr); } + LockBuffer(buffer, BUFFER_LOCK_UNLOCK); + + /* + * Send invalidations to shared queue. SearchSysCacheLocked1() assumes we + * do this before UnlockTuple(). + * + * If we're mutating a tuple visible only to this transaction, there's an + * equivalent transactional inval from the action that created the tuple, + * and this inval is superfluous. + */ + AtInplace_Inval(); + END_CRIT_SECTION(); + UnlockTuple(relation, &tuple->t_self, InplaceUpdateTupleLock); - heap_inplace_unlock(relation, oldtup, buffer); + AcceptInvalidationMessages(); /* local processing of just-sent inval */ /* - * Send out shared cache inval if necessary. Note that because we only - * pass the new version of the tuple, this mustn't be used for any - * operations that could change catcache lookup keys. But we aren't - * bothering with index updates either, so that's true a fortiori. - * - * XXX ROLLBACK discards the invalidation. See test inplace-inval.spec. + * Queue a transactional inval. The immediate invalidation we just sent + * is the only one known to be necessary. To reduce risk from the + * transition to immediate invalidation, continue sending a transactional + * invalidation like we've long done. Third-party code might rely on it. */ if (!IsBootstrapProcessingMode()) CacheInvalidateHeapTuple(relation, tuple, NULL); diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c index 3f0f711307d..395650d6406 100644 --- a/src/backend/access/transam/xact.c +++ b/src/backend/access/transam/xact.c @@ -1276,14 +1276,24 @@ RecordTransactionCommit(void) /* * Transactions without an assigned xid can contain invalidation - * messages (e.g. explicit relcache invalidations or catcache - * invalidations for inplace updates); standbys need to process those. - * We can't emit a commit record without an xid, and we don't want to - * force assigning an xid, because that'd be problematic for e.g. - * vacuum. Hence we emit a bespoke record for the invalidations. We - * don't want to use that in case a commit record is emitted, so they - * happen synchronously with commits (besides not wanting to emit more - * WAL records). + * messages. While inplace updates do this, this is not known to be + * necessary; see comment at inplace CacheInvalidateHeapTuple(). + * Extensions might still rely on this capability, and standbys may + * need to process those invals. We can't emit a commit record + * without an xid, and we don't want to force assigning an xid, + * because that'd be problematic for e.g. vacuum. Hence we emit a + * bespoke record for the invalidations. We don't want to use that in + * case a commit record is emitted, so they happen synchronously with + * commits (besides not wanting to emit more WAL records). + * + * XXX Every known use of this capability is a defect. Since an XID + * isn't controlling visibility of the change that prompted invals, + * other sessions need the inval even if this transactions aborts. + * + * ON COMMIT DELETE ROWS does a nontransactional index_build(), which + * queues a relcache inval, including in transactions without an xid + * that had read the (empty) table. Standbys don't need any ON COMMIT + * DELETE ROWS invals, but we've not done the work to withhold them. */ if (nmsgs != 0) { diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c index e4fd1fd10b9..f0dd9621fbc 100644 --- a/src/backend/catalog/index.c +++ b/src/backend/catalog/index.c @@ -2895,12 +2895,19 @@ index_update_stats(Relation rel, if (dirty) { systable_inplace_update_finish(state, tuple); - /* the above sends a cache inval message */ + /* the above sends transactional and immediate cache inval messages */ } else { systable_inplace_update_cancel(state); - /* no need to change tuple, but force relcache inval anyway */ + + /* + * While we didn't change relhasindex, CREATE INDEX needs a + * transactional inval for when the new index's catalog rows become + * visible. Other CREATE INDEX and REINDEX code happens to also queue + * this inval, but keep this in case rare callers rely on this part of + * our API contract. + */ CacheInvalidateRelcacheByTuple(tuple); } diff --git a/src/backend/replication/logical/decode.c b/src/backend/replication/logical/decode.c index 1c0058fc8fd..e1753dd7d53 100644 --- a/src/backend/replication/logical/decode.c +++ b/src/backend/replication/logical/decode.c @@ -547,23 +547,19 @@ DecodeHeapOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf) /* * Inplace updates are only ever performed on catalog tuples and - * can, per definition, not change tuple visibility. Since we - * don't decode catalog tuples, we're not interested in the - * record's contents. + * can, per definition, not change tuple visibility. Inplace + * updates don't affect storage or interpretation of table rows, + * so they don't affect logicalrep_write_tuple() outcomes. Hence, + * we don't process invalidations from the original operation. If + * inplace updates did affect those things, invalidations wouldn't + * make it work, since there are no snapshot-specific versions of + * inplace-updated values. Since we also don't decode catalog + * tuples, we're not interested in the record's contents. * - * In-place updates can be used either by XID-bearing transactions - * (e.g. in CREATE INDEX CONCURRENTLY) or by XID-less - * transactions (e.g. VACUUM). In the former case, the commit - * record will include cache invalidations, so we mark the - * transaction as catalog modifying here. Currently that's - * redundant because the commit will do that as well, but once we - * support decoding in-progress relations, this will be important. + * WAL contains likely-unnecessary commit-time invals from the + * CacheInvalidateHeapTuple() call in heap_inplace_update(). + * Excess invalidation is safe. */ - if (!TransactionIdIsValid(xid)) - break; - - SnapBuildProcessChange(builder, xid, buf->origptr); - ReorderBufferXidSetCatalogChanges(ctx->reorder, xid, buf->origptr); break; case XLOG_HEAP_CONFIRM: diff --git a/src/backend/utils/cache/catcache.c b/src/backend/utils/cache/catcache.c index 59790302a07..091b0611cb7 100644 --- a/src/backend/utils/cache/catcache.c +++ b/src/backend/utils/cache/catcache.c @@ -2131,7 +2131,8 @@ void PrepareToInvalidateCacheTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple, - void (*function) (int, uint32, Oid)) + void (*function) (int, uint32, Oid, void *), + void *context) { slist_iter iter; Oid reloid; @@ -2172,7 +2173,7 @@ PrepareToInvalidateCacheTuple(Relation relation, hashvalue = CatalogCacheComputeTupleHashValue(ccp, ccp->cc_nkeys, tuple); dbid = ccp->cc_relisshared ? (Oid) 0 : MyDatabaseId; - (*function) (ccp->id, hashvalue, dbid); + (*function) (ccp->id, hashvalue, dbid, context); if (newtuple) { @@ -2181,7 +2182,7 @@ PrepareToInvalidateCacheTuple(Relation relation, newhashvalue = CatalogCacheComputeTupleHashValue(ccp, ccp->cc_nkeys, newtuple); if (newhashvalue != hashvalue) - (*function) (ccp->id, newhashvalue, dbid); + (*function) (ccp->id, newhashvalue, dbid, context); } } } diff --git a/src/backend/utils/cache/inval.c b/src/backend/utils/cache/inval.c index e281a45155c..c793be55a58 100644 --- a/src/backend/utils/cache/inval.c +++ b/src/backend/utils/cache/inval.c @@ -99,6 +99,10 @@ * worth trying to avoid sending such inval traffic in the future, if those * problems can be overcome cheaply. * + * When making a nontransactional change to a cacheable object, we must + * likewise send the invalidation immediately, before ending the change's + * critical section. This includes inplace heap updates, relmap, and smgr. + * * When wal_level=logical, write invalidations into WAL at each command end to * support the decoding of the in-progress transactions. See * CommandEndInvalidationMessages. @@ -154,7 +158,7 @@ typedef struct InvalidationListHeader } InvalidationListHeader; /*---------------- - * Invalidation info is divided into two lists: + * Transactional invalidation info is divided into two lists: * 1) events so far in current command, not yet reflected to caches. * 2) events in previous commands of current transaction; these have * been reflected to local caches, and must be either broadcast to @@ -170,26 +174,36 @@ typedef struct InvalidationListHeader *---------------- */ -typedef struct TransInvalidationInfo +/* fields common to both transactional and inplace invalidation */ +typedef struct InvalidationInfo { - /* Back link to parent transaction's info */ - struct TransInvalidationInfo *parent; - - /* Subtransaction nesting depth */ - int my_level; - /* head of current-command event list */ InvalidationListHeader CurrentCmdInvalidMsgs; + /* init file must be invalidated? */ + bool RelcacheInitFileInval; +} InvalidationInfo; + +/* subclass adding fields specific to transactional invalidation */ +typedef struct TransInvalidationInfo +{ + /* Base class */ + struct InvalidationInfo ii; + /* head of previous-commands event list */ InvalidationListHeader PriorCmdInvalidMsgs; - /* init file must be invalidated? */ - bool RelcacheInitFileInval; + /* Back link to parent transaction's info */ + struct TransInvalidationInfo *parent; + + /* Subtransaction nesting depth */ + int my_level; } TransInvalidationInfo; static TransInvalidationInfo *transInvalInfo = NULL; +static InvalidationInfo *inplaceInvalInfo = NULL; + static SharedInvalidationMessage *SharedInvalidMessagesArray; static int numSharedInvalidMessagesArray; static int maxSharedInvalidMessagesArray; @@ -505,9 +519,12 @@ ProcessInvalidationMessagesMulti(InvalidationListHeader *hdr, static void RegisterCatcacheInvalidation(int cacheId, uint32 hashValue, - Oid dbId) + Oid dbId, + void *context) { - AddCatcacheInvalidationMessage(&transInvalInfo->CurrentCmdInvalidMsgs, + InvalidationInfo *info = (InvalidationInfo *) context; + + AddCatcacheInvalidationMessage(&info->CurrentCmdInvalidMsgs, cacheId, hashValue, dbId); } @@ -517,10 +534,9 @@ RegisterCatcacheInvalidation(int cacheId, * Register an invalidation event for all catcache entries from a catalog. */ static void -RegisterCatalogInvalidation(Oid dbId, Oid catId) +RegisterCatalogInvalidation(InvalidationInfo *info, Oid dbId, Oid catId) { - AddCatalogInvalidationMessage(&transInvalInfo->CurrentCmdInvalidMsgs, - dbId, catId); + AddCatalogInvalidationMessage(&info->CurrentCmdInvalidMsgs, dbId, catId); } /* @@ -529,10 +545,9 @@ RegisterCatalogInvalidation(Oid dbId, Oid catId) * As above, but register a relcache invalidation event. */ static void -RegisterRelcacheInvalidation(Oid dbId, Oid relId) +RegisterRelcacheInvalidation(InvalidationInfo *info, Oid dbId, Oid relId) { - AddRelcacheInvalidationMessage(&transInvalInfo->CurrentCmdInvalidMsgs, - dbId, relId); + AddRelcacheInvalidationMessage(&info->CurrentCmdInvalidMsgs, dbId, relId); /* * Most of the time, relcache invalidation is associated with system @@ -549,7 +564,7 @@ RegisterRelcacheInvalidation(Oid dbId, Oid relId) * as well. Also zap when we are invalidating whole relcache. */ if (relId == InvalidOid || RelationIdIsInInitFile(relId)) - transInvalInfo->RelcacheInitFileInval = true; + info->RelcacheInitFileInval = true; } /* @@ -559,10 +574,9 @@ RegisterRelcacheInvalidation(Oid dbId, Oid relId) * Only needed for catalogs that don't have catcaches. */ static void -RegisterSnapshotInvalidation(Oid dbId, Oid relId) +RegisterSnapshotInvalidation(InvalidationInfo *info, Oid dbId, Oid relId) { - AddSnapshotInvalidationMessage(&transInvalInfo->CurrentCmdInvalidMsgs, - dbId, relId); + AddSnapshotInvalidationMessage(&info->CurrentCmdInvalidMsgs, dbId, relId); } /* @@ -752,14 +766,18 @@ AcceptInvalidationMessages(void) * PrepareInvalidationState * Initialize inval lists for the current (sub)transaction. */ -static void +static InvalidationInfo * PrepareInvalidationState(void) { TransInvalidationInfo *myInfo; + Assert(IsTransactionState()); + /* Can't queue transactional message while collecting inplace messages. */ + Assert(inplaceInvalInfo == NULL); + if (transInvalInfo != NULL && transInvalInfo->my_level == GetCurrentTransactionNestLevel()) - return; + return (InvalidationInfo *) transInvalInfo; myInfo = (TransInvalidationInfo *) MemoryContextAllocZero(TopTransactionContext, @@ -775,6 +793,29 @@ PrepareInvalidationState(void) myInfo->my_level > transInvalInfo->my_level); transInvalInfo = myInfo; + return (InvalidationInfo *) myInfo; +} + +/* + * PrepareInplaceInvalidationState + * Initialize inval data for an inplace update. + * + * See previous function for more background. + */ +static InvalidationInfo * +PrepareInplaceInvalidationState(void) +{ + InvalidationInfo *myInfo; + + Assert(IsTransactionState()); + /* limit of one inplace update under assembly */ + Assert(inplaceInvalInfo == NULL); + + /* gone after WAL insertion CritSection ends, so use current context */ + myInfo = (InvalidationInfo *) palloc0(sizeof(InvalidationInfo)); + + inplaceInvalInfo = myInfo; + return myInfo; } /* @@ -870,7 +911,7 @@ xactGetCommittedInvalidationMessages(SharedInvalidationMessage **msgs, * after we send the SI messages. However, we need not do anything unless * we committed. */ - *RelcacheInitFileInval = transInvalInfo->RelcacheInitFileInval; + *RelcacheInitFileInval = transInvalInfo->ii.RelcacheInitFileInval; /* * Walk through TransInvalidationInfo to collect all the messages into a @@ -882,7 +923,7 @@ xactGetCommittedInvalidationMessages(SharedInvalidationMessage **msgs, */ oldcontext = MemoryContextSwitchTo(CurTransactionContext); - ProcessInvalidationMessagesMulti(&transInvalInfo->CurrentCmdInvalidMsgs, + ProcessInvalidationMessagesMulti(&transInvalInfo->ii.CurrentCmdInvalidMsgs, MakeSharedInvalidMessagesArray); ProcessInvalidationMessagesMulti(&transInvalInfo->PriorCmdInvalidMsgs, MakeSharedInvalidMessagesArray); @@ -972,7 +1013,9 @@ ProcessCommittedInvalidationMessages(SharedInvalidationMessage *msgs, void AtEOXact_Inval(bool isCommit) { - /* Quick exit if no messages */ + inplaceInvalInfo = NULL; + + /* Quick exit if no transactional messages */ if (transInvalInfo == NULL) return; @@ -986,16 +1029,16 @@ AtEOXact_Inval(bool isCommit) * after we send the SI messages. However, we need not do anything * unless we committed. */ - if (transInvalInfo->RelcacheInitFileInval) + if (transInvalInfo->ii.RelcacheInitFileInval) RelationCacheInitFilePreInvalidate(); AppendInvalidationMessages(&transInvalInfo->PriorCmdInvalidMsgs, - &transInvalInfo->CurrentCmdInvalidMsgs); + &transInvalInfo->ii.CurrentCmdInvalidMsgs); ProcessInvalidationMessagesMulti(&transInvalInfo->PriorCmdInvalidMsgs, SendSharedInvalidMessages); - if (transInvalInfo->RelcacheInitFileInval) + if (transInvalInfo->ii.RelcacheInitFileInval) RelationCacheInitFilePostInvalidate(); } else @@ -1011,6 +1054,45 @@ AtEOXact_Inval(bool isCommit) } /* + * PreInplace_Inval + * Process queued-up invalidation before inplace update critical section. + * + * Tasks belong here if they are safe even if the inplace update does not + * complete. Currently, this just unlinks a cache file, which can fail. The + * sum of this and AtInplace_Inval() mirrors AtEOXact_Inval(isCommit=true). + */ +void +PreInplace_Inval(void) +{ + Assert(CritSectionCount == 0); + + if (inplaceInvalInfo && inplaceInvalInfo->RelcacheInitFileInval) + RelationCacheInitFilePreInvalidate(); +} + +/* + * AtInplace_Inval + * Process queued-up invalidations after inplace update buffer mutation. + */ +void +AtInplace_Inval(void) +{ + Assert(CritSectionCount > 0); + + if (inplaceInvalInfo == NULL) + return; + + ProcessInvalidationMessagesMulti(&inplaceInvalInfo->CurrentCmdInvalidMsgs, + SendSharedInvalidMessages); + + if (inplaceInvalInfo->RelcacheInitFileInval) + RelationCacheInitFilePostInvalidate(); + + inplaceInvalInfo = NULL; + /* inplace doesn't use SharedInvalidMessagesArray */ +} + +/* * AtEOSubXact_Inval * Process queued-up invalidation messages at end of subtransaction. * @@ -1032,9 +1114,20 @@ void AtEOSubXact_Inval(bool isCommit) { int my_level; - TransInvalidationInfo *myInfo = transInvalInfo; + TransInvalidationInfo *myInfo; - /* Quick exit if no messages. */ + /* + * Successful inplace update must clear this, but we clear it on abort. + * Inplace updates allocate this in CurrentMemoryContext, which has + * lifespan <= subtransaction lifespan. Hence, don't free it explicitly. + */ + if (isCommit) + Assert(inplaceInvalInfo == NULL); + else + inplaceInvalInfo = NULL; + + /* Quick exit if no transactional messages. */ + myInfo = transInvalInfo; if (myInfo == NULL) return; @@ -1068,8 +1161,8 @@ AtEOSubXact_Inval(bool isCommit) &myInfo->PriorCmdInvalidMsgs); /* Pending relcache inval becomes parent's problem too */ - if (myInfo->RelcacheInitFileInval) - myInfo->parent->RelcacheInitFileInval = true; + if (myInfo->ii.RelcacheInitFileInval) + myInfo->parent->ii.RelcacheInitFileInval = true; /* Pop the transaction state stack */ transInvalInfo = myInfo->parent; @@ -1116,7 +1209,7 @@ CommandEndInvalidationMessages(void) if (transInvalInfo == NULL) return; - ProcessInvalidationMessages(&transInvalInfo->CurrentCmdInvalidMsgs, + ProcessInvalidationMessages(&transInvalInfo->ii.CurrentCmdInvalidMsgs, LocalExecuteInvalidationMessage); /* WAL Log per-command invalidation messages for wal_level=logical */ @@ -1124,26 +1217,21 @@ CommandEndInvalidationMessages(void) LogLogicalInvalidations(); AppendInvalidationMessages(&transInvalInfo->PriorCmdInvalidMsgs, - &transInvalInfo->CurrentCmdInvalidMsgs); + &transInvalInfo->ii.CurrentCmdInvalidMsgs); } /* - * CacheInvalidateHeapTuple - * Register the given tuple for invalidation at end of command - * (ie, current command is creating or outdating this tuple). - * Also, detect whether a relcache invalidation is implied. - * - * For an insert or delete, tuple is the target tuple and newtuple is NULL. - * For an update, we are called just once, with tuple being the old tuple - * version and newtuple the new version. This allows avoidance of duplicate - * effort during an update. + * CacheInvalidateHeapTupleCommon + * Common logic for end-of-command and inplace variants. */ -void -CacheInvalidateHeapTuple(Relation relation, - HeapTuple tuple, - HeapTuple newtuple) +static void +CacheInvalidateHeapTupleCommon(Relation relation, + HeapTuple tuple, + HeapTuple newtuple, + InvalidationInfo *(*prepare_callback) (void)) { + InvalidationInfo *info; Oid tupleRelId; Oid databaseId; Oid relationId; @@ -1167,11 +1255,8 @@ CacheInvalidateHeapTuple(Relation relation, if (IsToastRelation(relation)) return; - /* - * If we're not prepared to queue invalidation messages for this - * subtransaction level, get ready now. - */ - PrepareInvalidationState(); + /* Allocate any required resources. */ + info = prepare_callback(); /* * First let the catcache do its thing @@ -1180,11 +1265,12 @@ CacheInvalidateHeapTuple(Relation relation, if (RelationInvalidatesSnapshotsOnly(tupleRelId)) { databaseId = IsSharedRelation(tupleRelId) ? InvalidOid : MyDatabaseId; - RegisterSnapshotInvalidation(databaseId, tupleRelId); + RegisterSnapshotInvalidation(info, databaseId, tupleRelId); } else PrepareToInvalidateCacheTuple(relation, tuple, newtuple, - RegisterCatcacheInvalidation); + RegisterCatcacheInvalidation, + (void *) info); /* * Now, is this tuple one of the primary definers of a relcache entry? See @@ -1257,7 +1343,44 @@ CacheInvalidateHeapTuple(Relation relation, /* * Yes. We need to register a relcache invalidation event. */ - RegisterRelcacheInvalidation(databaseId, relationId); + RegisterRelcacheInvalidation(info, databaseId, relationId); +} + +/* + * CacheInvalidateHeapTuple + * Register the given tuple for invalidation at end of command + * (ie, current command is creating or outdating this tuple) and end of + * transaction. Also, detect whether a relcache invalidation is implied. + * + * For an insert or delete, tuple is the target tuple and newtuple is NULL. + * For an update, we are called just once, with tuple being the old tuple + * version and newtuple the new version. This allows avoidance of duplicate + * effort during an update. + */ +void +CacheInvalidateHeapTuple(Relation relation, + HeapTuple tuple, + HeapTuple newtuple) +{ + CacheInvalidateHeapTupleCommon(relation, tuple, newtuple, + PrepareInvalidationState); +} + +/* + * CacheInvalidateHeapTupleInplace + * Register the given tuple for nontransactional invalidation pertaining + * to an inplace update. Also, detect whether a relcache invalidation is + * implied. + * + * Like CacheInvalidateHeapTuple(), but for inplace updates. + */ +void +CacheInvalidateHeapTupleInplace(Relation relation, + HeapTuple tuple, + HeapTuple newtuple) +{ + CacheInvalidateHeapTupleCommon(relation, tuple, newtuple, + PrepareInplaceInvalidationState); } /* @@ -1276,14 +1399,13 @@ CacheInvalidateCatalog(Oid catalogId) { Oid databaseId; - PrepareInvalidationState(); - if (IsSharedRelation(catalogId)) databaseId = InvalidOid; else databaseId = MyDatabaseId; - RegisterCatalogInvalidation(databaseId, catalogId); + RegisterCatalogInvalidation(PrepareInvalidationState(), + databaseId, catalogId); } /* @@ -1301,15 +1423,14 @@ CacheInvalidateRelcache(Relation relation) Oid databaseId; Oid relationId; - PrepareInvalidationState(); - relationId = RelationGetRelid(relation); if (relation->rd_rel->relisshared) databaseId = InvalidOid; else databaseId = MyDatabaseId; - RegisterRelcacheInvalidation(databaseId, relationId); + RegisterRelcacheInvalidation(PrepareInvalidationState(), + databaseId, relationId); } /* @@ -1322,9 +1443,8 @@ CacheInvalidateRelcache(Relation relation) void CacheInvalidateRelcacheAll(void) { - PrepareInvalidationState(); - - RegisterRelcacheInvalidation(InvalidOid, InvalidOid); + RegisterRelcacheInvalidation(PrepareInvalidationState(), + InvalidOid, InvalidOid); } /* @@ -1338,14 +1458,13 @@ CacheInvalidateRelcacheByTuple(HeapTuple classTuple) Oid databaseId; Oid relationId; - PrepareInvalidationState(); - relationId = classtup->oid; if (classtup->relisshared) databaseId = InvalidOid; else databaseId = MyDatabaseId; - RegisterRelcacheInvalidation(databaseId, relationId); + RegisterRelcacheInvalidation(PrepareInvalidationState(), + databaseId, relationId); } /* @@ -1359,8 +1478,6 @@ CacheInvalidateRelcacheByRelid(Oid relid) { HeapTuple tup; - PrepareInvalidationState(); - tup = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); if (!HeapTupleIsValid(tup)) elog(ERROR, "cache lookup failed for relation %u", relid); @@ -1549,7 +1666,7 @@ LogLogicalInvalidations() if (transInvalInfo == NULL) return; - ProcessInvalidationMessagesMulti(&transInvalInfo->CurrentCmdInvalidMsgs, + ProcessInvalidationMessagesMulti(&transInvalInfo->ii.CurrentCmdInvalidMsgs, MakeSharedInvalidMessagesArray); Assert(!(numSharedInvalidMessagesArray > 0 && diff --git a/src/backend/utils/cache/syscache.c b/src/backend/utils/cache/syscache.c index 7d573b6e0ef..ce5d2e87173 100644 --- a/src/backend/utils/cache/syscache.c +++ b/src/backend/utils/cache/syscache.c @@ -1260,8 +1260,7 @@ SearchSysCacheLocked1(int cacheId, /* * If an inplace update just finished, ensure we process the syscache - * inval. XXX this is insufficient: the inplace updater may not yet - * have reached AtEOXact_Inval(). See test at inplace-inval.spec. + * inval. * * If a heap_update() call just released its LOCKTAG_TUPLE, we'll * probably find the old tuple and reach "tuple concurrently updated". diff --git a/src/include/utils/catcache.h b/src/include/utils/catcache.h index ddc2762eb3f..c4fd4d1232b 100644 --- a/src/include/utils/catcache.h +++ b/src/include/utils/catcache.h @@ -223,7 +223,8 @@ extern void CatCacheInvalidate(CatCache *cache, uint32 hashValue); extern void PrepareToInvalidateCacheTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple, - void (*function) (int, uint32, Oid)); + void (*function) (int, uint32, Oid, void *), + void *context); extern void PrintCatCacheLeakWarning(HeapTuple tuple); extern void PrintCatCacheListLeakWarning(CatCList *list); diff --git a/src/include/utils/inval.h b/src/include/utils/inval.h index 877e66c63c8..c89ba720d83 100644 --- a/src/include/utils/inval.h +++ b/src/include/utils/inval.h @@ -28,6 +28,9 @@ extern void AcceptInvalidationMessages(void); extern void AtEOXact_Inval(bool isCommit); +extern void PreInplace_Inval(void); +extern void AtInplace_Inval(void); + extern void AtEOSubXact_Inval(bool isCommit); extern void PostPrepare_Inval(void); @@ -37,6 +40,9 @@ extern void CommandEndInvalidationMessages(void); extern void CacheInvalidateHeapTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple); +extern void CacheInvalidateHeapTupleInplace(Relation relation, + HeapTuple tuple, + HeapTuple newtuple); extern void CacheInvalidateCatalog(Oid catalogId); diff --git a/src/test/isolation/expected/inplace-inval.out b/src/test/isolation/expected/inplace-inval.out index e68eca5de98..c35895a8aa7 100644 --- a/src/test/isolation/expected/inplace-inval.out +++ b/src/test/isolation/expected/inplace-inval.out @@ -1,6 +1,6 @@ Parsed test spec with 3 sessions -starting permutation: cachefill3 cir1 cic2 ddl3 +starting permutation: cachefill3 cir1 cic2 ddl3 read1 step cachefill3: TABLE newly_indexed; c - @@ -9,6 +9,14 @@ c step cir1: BEGIN; CREATE INDEX i1 ON newly_indexed (c); ROLLBACK; step cic2: CREATE INDEX i2 ON newly_indexed (c); step ddl3: ALTER TABLE newly_indexed ADD extra int; +step read1: + SELECT relhasindex FROM pg_class WHERE oid = 'newly_indexed'::regclass; + +relhasindex +----------- +t +(1 row) + starting permutation: cir1 cic2 ddl3 read1 step cir1: BEGIN; CREATE INDEX i1 ON newly_indexed (c); ROLLBACK; diff --git a/src/test/isolation/specs/inplace-inval.spec b/src/test/isolation/specs/inplace-inval.spec index 96954fd86c4..b99112ddb88 100644 --- a/src/test/isolation/specs/inplace-inval.spec +++ b/src/test/isolation/specs/inplace-inval.spec @@ -1,7 +1,7 @@ -# If a heap_update() caller retrieves its oldtup from a cache, it's possible -# for that cache entry to predate an inplace update, causing loss of that -# inplace update. This arises because the transaction may abort before -# sending the inplace invalidation message to the shared queue. +# An inplace update had been able to abort before sending the inplace +# invalidation message to the shared queue. If a heap_update() caller then +# retrieved its oldtup from a cache, the heap_update() could revert the +# inplace update. setup { @@ -27,14 +27,12 @@ step cachefill3 { TABLE newly_indexed; } step ddl3 { ALTER TABLE newly_indexed ADD extra int; } -# XXX shows an extant bug. Adding step read1 at the end would usually print -# relhasindex=f (not wanted). This does not reach the unwanted behavior under -# -DCATCACHE_FORCE_RELEASE and friends. permutation cachefill3 # populates the pg_class row in the catcache cir1 # sets relhasindex=true; rollback discards cache inval cic2 # sees relhasindex=true, skips changing it (so no inval) ddl3 # cached row as the oldtup of an update, losing relhasindex + read1 # observe damage # without cachefill3, no bug permutation cir1 cic2 ddl3 read1 |