aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/backend/access/heap/README.tuplock42
-rw-r--r--src/backend/access/heap/heapam.c150
-rw-r--r--src/backend/access/index/genam.c4
-rw-r--r--src/backend/catalog/aclchk.c9
-rw-r--r--src/backend/catalog/catalog.c9
-rw-r--r--src/backend/commands/dbcommands.c14
-rw-r--r--src/backend/commands/indexcmds.c7
-rw-r--r--src/backend/commands/tablecmds.c28
-rw-r--r--src/backend/executor/execMain.c7
-rw-r--r--src/backend/executor/execReplication.c7
-rw-r--r--src/backend/executor/nodeModifyTable.c30
-rw-r--r--src/backend/utils/cache/relcache.c9
-rw-r--r--src/backend/utils/cache/syscache.c117
-rw-r--r--src/include/nodes/execnodes.h3
-rw-r--r--src/include/storage/lockdefs.h2
-rw-r--r--src/include/utils/syscache.h5
-rw-r--r--src/test/isolation/expected/intra-grant-inplace.out14
-rw-r--r--src/test/isolation/specs/eval-plan-qual.spec2
-rw-r--r--src/test/isolation/specs/intra-grant-inplace.spec12
19 files changed, 437 insertions, 34 deletions
diff --git a/src/backend/access/heap/README.tuplock b/src/backend/access/heap/README.tuplock
index ddb2defd28b..818cd7f9806 100644
--- a/src/backend/access/heap/README.tuplock
+++ b/src/backend/access/heap/README.tuplock
@@ -154,6 +154,48 @@ The following infomask bits are applicable:
We currently never set the HEAP_XMAX_COMMITTED when the HEAP_XMAX_IS_MULTI bit
is set.
+Locking to write inplace-updated tables
+---------------------------------------
+
+If IsInplaceUpdateRelation() returns true for a table, the table is a system
+catalog that receives systable_inplace_update_begin() calls. Preparing a
+heap_update() of these tables follows additional locking rules, to ensure we
+don't lose the effects of an inplace update. In particular, consider a moment
+when a backend has fetched the old tuple to modify, not yet having called
+heap_update(). Another backend's inplace update starting then can't conclude
+until the heap_update() places its new tuple in a buffer. We enforce that
+using locktags as follows. While DDL code is the main audience, the executor
+follows these rules to make e.g. "MERGE INTO pg_class" safer. Locking rules
+are per-catalog:
+
+ pg_class systable_inplace_update_begin() callers: before the call, acquire a
+ lock on the relation in mode ShareUpdateExclusiveLock or stricter. If the
+ update targets a row of RELKIND_INDEX (but not RELKIND_PARTITIONED_INDEX),
+ that lock must be on the table. Locking the index rel is not necessary.
+ (This allows VACUUM to overwrite per-index pg_class while holding a lock on
+ the table alone.) systable_inplace_update_begin() acquires and releases
+ LOCKTAG_TUPLE in InplaceUpdateTupleLock, an alias for ExclusiveLock, on each
+ tuple it overwrites.
+
+ pg_class heap_update() callers: before copying the tuple to modify, take a
+ lock on the tuple, a ShareUpdateExclusiveLock on the relation, or a
+ ShareRowExclusiveLock or stricter on the relation.
+
+ SearchSysCacheLocked1() is one convenient way to acquire the tuple lock.
+ Most heap_update() callers already hold a suitable lock on the relation for
+ other reasons and can skip the tuple lock. If you do acquire the tuple
+ lock, release it immediately after the update.
+
+
+ pg_database: before copying the tuple to modify, all updaters of pg_database
+ rows acquire LOCKTAG_TUPLE. (Few updaters acquire LOCKTAG_OBJECT on the
+ database OID, so it wasn't worth extending that as a second option.)
+
+Ideally, DDL might want to perform permissions checks before LockTuple(), as
+we do with RangeVarGetRelidExtended() callbacks. We typically don't bother.
+LOCKTAG_TUPLE acquirers release it after each row, so the potential
+inconvenience is lower.
+
Reading inplace-updated columns
-------------------------------
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 1843153b40a..a7267fbb3be 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -52,6 +52,8 @@
#include "access/xloginsert.h"
#include "access/xlogutils.h"
#include "catalog/catalog.h"
+#include "catalog/pg_database.h"
+#include "catalog/pg_database_d.h"
#include "miscadmin.h"
#include "pgstat.h"
#include "port/atomics.h"
@@ -78,6 +80,12 @@ static XLogRecPtr log_heap_update(Relation reln, Buffer oldbuf,
Buffer newbuf, HeapTuple oldtup,
HeapTuple newtup, HeapTuple old_key_tuple,
bool all_visible_cleared, bool new_all_visible_cleared);
+#ifdef USE_ASSERT_CHECKING
+static void check_lock_if_inplace_updateable_rel(Relation relation,
+ ItemPointer otid,
+ HeapTuple newtup);
+static void check_inplace_rel_lock(HeapTuple oldtup);
+#endif
static Bitmapset *HeapDetermineColumnsInfo(Relation relation,
Bitmapset *interesting_cols,
Bitmapset *external_cols,
@@ -119,6 +127,8 @@ static HeapTuple ExtractReplicaIdentity(Relation rel, HeapTuple tup, bool key_re
* heavyweight lock mode and MultiXactStatus values to use for any particular
* tuple lock strength.
*
+ * These interact with InplaceUpdateTupleLock, an alias for ExclusiveLock.
+ *
* Don't look at lockstatus/updstatus directly! Use get_mxact_status_for_lock
* instead.
*/
@@ -3250,6 +3260,10 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
(errcode(ERRCODE_INVALID_TRANSACTION_STATE),
errmsg("cannot update tuples during a parallel operation")));
+#ifdef USE_ASSERT_CHECKING
+ check_lock_if_inplace_updateable_rel(relation, otid, newtup);
+#endif
+
/*
* Fetch the list of attributes to be checked for various operations.
*
@@ -4095,6 +4109,128 @@ l2:
return TM_Ok;
}
+#ifdef USE_ASSERT_CHECKING
+/*
+ * Confirm adequate lock held during heap_update(), per rules from
+ * README.tuplock section "Locking to write inplace-updated tables".
+ */
+static void
+check_lock_if_inplace_updateable_rel(Relation relation,
+ ItemPointer otid,
+ HeapTuple newtup)
+{
+ /* LOCKTAG_TUPLE acceptable for any catalog */
+ switch (RelationGetRelid(relation))
+ {
+ case RelationRelationId:
+ case DatabaseRelationId:
+ {
+ LOCKTAG tuptag;
+
+ SET_LOCKTAG_TUPLE(tuptag,
+ relation->rd_lockInfo.lockRelId.dbId,
+ relation->rd_lockInfo.lockRelId.relId,
+ ItemPointerGetBlockNumber(otid),
+ ItemPointerGetOffsetNumber(otid));
+ if (LockHeldByMe(&tuptag, InplaceUpdateTupleLock))
+ return;
+ }
+ break;
+ default:
+ Assert(!IsInplaceUpdateRelation(relation));
+ return;
+ }
+
+ switch (RelationGetRelid(relation))
+ {
+ case RelationRelationId:
+ {
+ /* LOCKTAG_TUPLE or LOCKTAG_RELATION ok */
+ Form_pg_class classForm = (Form_pg_class) GETSTRUCT(newtup);
+ Oid relid = classForm->oid;
+ Oid dbid;
+ LOCKTAG tag;
+
+ if (IsSharedRelation(relid))
+ dbid = InvalidOid;
+ else
+ dbid = MyDatabaseId;
+
+ if (classForm->relkind == RELKIND_INDEX)
+ {
+ Relation irel = index_open(relid, AccessShareLock);
+
+ SET_LOCKTAG_RELATION(tag, dbid, irel->rd_index->indrelid);
+ index_close(irel, AccessShareLock);
+ }
+ else
+ SET_LOCKTAG_RELATION(tag, dbid, relid);
+
+ if (!LockHeldByMe(&tag, ShareUpdateExclusiveLock) &&
+ !LockOrStrongerHeldByMe(&tag, ShareRowExclusiveLock))
+ elog(WARNING,
+ "missing lock for relation \"%s\" (OID %u, relkind %c) @ TID (%u,%u)",
+ NameStr(classForm->relname),
+ relid,
+ classForm->relkind,
+ ItemPointerGetBlockNumber(otid),
+ ItemPointerGetOffsetNumber(otid));
+ }
+ break;
+ case DatabaseRelationId:
+ {
+ /* LOCKTAG_TUPLE required */
+ Form_pg_database dbForm = (Form_pg_database) GETSTRUCT(newtup);
+
+ elog(WARNING,
+ "missing lock on database \"%s\" (OID %u) @ TID (%u,%u)",
+ NameStr(dbForm->datname),
+ dbForm->oid,
+ ItemPointerGetBlockNumber(otid),
+ ItemPointerGetOffsetNumber(otid));
+ }
+ break;
+ }
+}
+
+/*
+ * Confirm adequate relation lock held, per rules from README.tuplock section
+ * "Locking to write inplace-updated tables".
+ */
+static void
+check_inplace_rel_lock(HeapTuple oldtup)
+{
+ Form_pg_class classForm = (Form_pg_class) GETSTRUCT(oldtup);
+ Oid relid = classForm->oid;
+ Oid dbid;
+ LOCKTAG tag;
+
+ if (IsSharedRelation(relid))
+ dbid = InvalidOid;
+ else
+ dbid = MyDatabaseId;
+
+ if (classForm->relkind == RELKIND_INDEX)
+ {
+ Relation irel = index_open(relid, AccessShareLock);
+
+ SET_LOCKTAG_RELATION(tag, dbid, irel->rd_index->indrelid);
+ index_close(irel, AccessShareLock);
+ }
+ else
+ SET_LOCKTAG_RELATION(tag, dbid, relid);
+
+ if (!LockOrStrongerHeldByMe(&tag, ShareUpdateExclusiveLock))
+ elog(WARNING,
+ "missing lock for relation \"%s\" (OID %u, relkind %c) @ TID (%u,%u)",
+ NameStr(classForm->relname),
+ relid,
+ classForm->relkind,
+ ItemPointerGetBlockNumber(&oldtup->t_self),
+ ItemPointerGetOffsetNumber(&oldtup->t_self));
+}
+#endif
+
/*
* Check if the specified attribute's values are the same. Subroutine for
* HeapDetermineColumnsInfo.
@@ -6120,15 +6256,21 @@ heap_inplace_lock(Relation relation,
TM_Result result;
bool ret;
+#ifdef USE_ASSERT_CHECKING
+ if (RelationGetRelid(relation) == RelationRelationId)
+ check_inplace_rel_lock(oldtup_ptr);
+#endif
+
Assert(BufferIsValid(buffer));
+ LockTuple(relation, &oldtup.t_self, InplaceUpdateTupleLock);
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
/*----------
* Interpret HeapTupleSatisfiesUpdate() like heap_update() does, except:
*
* - wait unconditionally
- * - no tuple locks
+ * - already locked tuple above, since inplace needs that unconditionally
* - don't recheck header after wait: simpler to defer to next iteration
* - don't try to continue even if the updater aborts: likewise
* - no crosscheck
@@ -6212,7 +6354,10 @@ heap_inplace_lock(Relation relation,
* don't bother optimizing that.
*/
if (!ret)
+ {
+ UnlockTuple(relation, &oldtup.t_self, InplaceUpdateTupleLock);
InvalidateCatalogSnapshot();
+ }
return ret;
}
@@ -6221,6 +6366,8 @@ heap_inplace_lock(Relation relation,
*
* The tuple cannot change size, and therefore its header fields and null
* bitmap (if any) don't change either.
+ *
+ * Since we hold LOCKTAG_TUPLE, no updater has a local copy of this tuple.
*/
void
heap_inplace_update_and_unlock(Relation relation,
@@ -6304,6 +6451,7 @@ heap_inplace_unlock(Relation relation,
HeapTuple oldtup, Buffer buffer)
{
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
+ UnlockTuple(relation, &oldtup->t_self, InplaceUpdateTupleLock);
}
/*
diff --git a/src/backend/access/index/genam.c b/src/backend/access/index/genam.c
index 7741145733d..d2f5bf94a07 100644
--- a/src/backend/access/index/genam.c
+++ b/src/backend/access/index/genam.c
@@ -752,7 +752,9 @@ systable_endscan_ordered(SysScanDesc sysscan)
*
* Overwriting violates both MVCC and transactional safety, so the uses of
* this function in Postgres are extremely limited. Nonetheless we find some
- * places to use it. Standard flow:
+ * places to use it. See README.tuplock section "Locking to write
+ * inplace-updated tables" and later sections for expectations of readers and
+ * writers of a table that gets inplace updates. Standard flow:
*
* ... [any slow preparation not requiring oldtup] ...
* systable_inplace_update_begin([...], &tup, &inplace_state);
diff --git a/src/backend/catalog/aclchk.c b/src/backend/catalog/aclchk.c
index cb6cbee5949..47202d7c2d9 100644
--- a/src/backend/catalog/aclchk.c
+++ b/src/backend/catalog/aclchk.c
@@ -68,6 +68,7 @@
#include "nodes/makefuncs.h"
#include "parser/parse_func.h"
#include "parser/parse_type.h"
+#include "storage/lmgr.h"
#include "utils/acl.h"
#include "utils/aclchk_internal.h"
#include "utils/builtins.h"
@@ -1779,7 +1780,7 @@ ExecGrant_Relation(InternalGrant *istmt)
HeapTuple tuple;
ListCell *cell_colprivs;
- tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relOid));
+ tuple = SearchSysCacheLocked1(RELOID, ObjectIdGetDatum(relOid));
if (!HeapTupleIsValid(tuple))
elog(ERROR, "cache lookup failed for relation %u", relOid);
pg_class_tuple = (Form_pg_class) GETSTRUCT(tuple);
@@ -1995,6 +1996,7 @@ ExecGrant_Relation(InternalGrant *istmt)
values, nulls, replaces);
CatalogTupleUpdate(relation, &newtuple->t_self, newtuple);
+ UnlockTuple(relation, &tuple->t_self, InplaceUpdateTupleLock);
/* Update initial privileges for extensions */
recordExtensionInitPriv(relOid, RelationRelationId, 0, new_acl);
@@ -2007,6 +2009,8 @@ ExecGrant_Relation(InternalGrant *istmt)
pfree(new_acl);
}
+ else
+ UnlockTuple(relation, &tuple->t_self, InplaceUpdateTupleLock);
/*
* Handle column-level privileges, if any were specified or implied.
@@ -2116,7 +2120,7 @@ ExecGrant_Database(InternalGrant *istmt)
Oid *newmembers;
HeapTuple tuple;
- tuple = SearchSysCache1(DATABASEOID, ObjectIdGetDatum(datId));
+ tuple = SearchSysCacheLocked1(DATABASEOID, ObjectIdGetDatum(datId));
if (!HeapTupleIsValid(tuple))
elog(ERROR, "cache lookup failed for database %u", datId);
@@ -2185,6 +2189,7 @@ ExecGrant_Database(InternalGrant *istmt)
nulls, replaces);
CatalogTupleUpdate(relation, &newtuple->t_self, newtuple);
+ UnlockTuple(relation, &tuple->t_self, InplaceUpdateTupleLock);
/* Update the shared dependency ACL info */
updateAclDependencies(DatabaseRelationId, pg_database_tuple->oid, 0,
diff --git a/src/backend/catalog/catalog.c b/src/backend/catalog/catalog.c
index b54622895f6..98221150c7b 100644
--- a/src/backend/catalog/catalog.c
+++ b/src/backend/catalog/catalog.c
@@ -138,6 +138,15 @@ IsCatalogRelationOid(Oid relid)
/*
* IsInplaceUpdateRelation
* True iff core code performs inplace updates on the relation.
+ *
+ * This is used for assertions and for making the executor follow the
+ * locking protocol described at README.tuplock section "Locking to write
+ * inplace-updated tables". Extensions may inplace-update other heap
+ * tables, but concurrent SQL UPDATE on the same table may overwrite
+ * those modifications.
+ *
+ * The executor can assume these are not partitions or partitioned and
+ * have no triggers.
*/
bool
IsInplaceUpdateRelation(Relation relation)
diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c
index d3c7cae6b2f..57e342a61e8 100644
--- a/src/backend/commands/dbcommands.c
+++ b/src/backend/commands/dbcommands.c
@@ -1053,6 +1053,7 @@ RenameDatabase(const char *oldname, const char *newname)
{
Oid db_id;
HeapTuple newtup;
+ ItemPointerData otid;
Relation rel;
int notherbackends;
int npreparedxacts;
@@ -1124,11 +1125,13 @@ RenameDatabase(const char *oldname, const char *newname)
errdetail_busy_db(notherbackends, npreparedxacts)));
/* rename */
- newtup = SearchSysCacheCopy1(DATABASEOID, ObjectIdGetDatum(db_id));
+ newtup = SearchSysCacheLockedCopy1(DATABASEOID, ObjectIdGetDatum(db_id));
if (!HeapTupleIsValid(newtup))
elog(ERROR, "cache lookup failed for database %u", db_id);
+ otid = newtup->t_self;
namestrcpy(&(((Form_pg_database) GETSTRUCT(newtup))->datname), newname);
- CatalogTupleUpdate(rel, &newtup->t_self, newtup);
+ CatalogTupleUpdate(rel, &otid, newtup);
+ UnlockTuple(rel, &otid, InplaceUpdateTupleLock);
InvokeObjectPostAlterHook(DatabaseRelationId, db_id, 0);
@@ -1372,6 +1375,7 @@ movedb(const char *dbname, const char *tblspcname)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_DATABASE),
errmsg("database \"%s\" does not exist", dbname)));
+ LockTuple(pgdbrel, &oldtuple->t_self, InplaceUpdateTupleLock);
MemSet(new_record, 0, sizeof(new_record));
MemSet(new_record_nulls, false, sizeof(new_record_nulls));
@@ -1384,6 +1388,7 @@ movedb(const char *dbname, const char *tblspcname)
new_record,
new_record_nulls, new_record_repl);
CatalogTupleUpdate(pgdbrel, &oldtuple->t_self, newtuple);
+ UnlockTuple(pgdbrel, &oldtuple->t_self, InplaceUpdateTupleLock);
InvokeObjectPostAlterHook(DatabaseRelationId, db_id, 0);
@@ -1620,6 +1625,7 @@ AlterDatabase(ParseState *pstate, AlterDatabaseStmt *stmt, bool isTopLevel)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_DATABASE),
errmsg("database \"%s\" does not exist", stmt->dbname)));
+ LockTuple(rel, &tuple->t_self, InplaceUpdateTupleLock);
datform = (Form_pg_database) GETSTRUCT(tuple);
dboid = datform->oid;
@@ -1673,6 +1679,7 @@ AlterDatabase(ParseState *pstate, AlterDatabaseStmt *stmt, bool isTopLevel)
newtuple = heap_modify_tuple(tuple, RelationGetDescr(rel), new_record,
new_record_nulls, new_record_repl);
CatalogTupleUpdate(rel, &tuple->t_self, newtuple);
+ UnlockTuple(rel, &tuple->t_self, InplaceUpdateTupleLock);
InvokeObjectPostAlterHook(DatabaseRelationId, dboid, 0);
@@ -1783,6 +1790,8 @@ AlterDatabaseOwner(const char *dbname, Oid newOwnerId)
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("permission denied to change owner of database")));
+ LockTuple(rel, &tuple->t_self, InplaceUpdateTupleLock);
+
memset(repl_null, false, sizeof(repl_null));
memset(repl_repl, false, sizeof(repl_repl));
@@ -1807,6 +1816,7 @@ AlterDatabaseOwner(const char *dbname, Oid newOwnerId)
newtuple = heap_modify_tuple(tuple, RelationGetDescr(rel), repl_val, repl_null, repl_repl);
CatalogTupleUpdate(rel, &newtuple->t_self, newtuple);
+ UnlockTuple(rel, &tuple->t_self, InplaceUpdateTupleLock);
heap_freetuple(newtuple);
diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c
index 48b6c55e55d..99c536891a7 100644
--- a/src/backend/commands/indexcmds.c
+++ b/src/backend/commands/indexcmds.c
@@ -4299,14 +4299,17 @@ update_relispartition(Oid relationId, bool newval)
{
HeapTuple tup;
Relation classRel;
+ ItemPointerData otid;
classRel = table_open(RelationRelationId, RowExclusiveLock);
- tup = SearchSysCacheCopy1(RELOID, ObjectIdGetDatum(relationId));
+ tup = SearchSysCacheLockedCopy1(RELOID, ObjectIdGetDatum(relationId));
if (!HeapTupleIsValid(tup))
elog(ERROR, "cache lookup failed for relation %u", relationId);
+ otid = tup->t_self;
Assert(((Form_pg_class) GETSTRUCT(tup))->relispartition != newval);
((Form_pg_class) GETSTRUCT(tup))->relispartition = newval;
- CatalogTupleUpdate(classRel, &tup->t_self, tup);
+ CatalogTupleUpdate(classRel, &otid, tup);
+ UnlockTuple(classRel, &otid, InplaceUpdateTupleLock);
heap_freetuple(tup);
table_close(classRel, RowExclusiveLock);
}
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index 1646e9e3789..2c68d89f379 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -3322,6 +3322,7 @@ SetRelationTableSpace(Relation rel,
{
Relation pg_class;
HeapTuple tuple;
+ ItemPointerData otid;
Form_pg_class rd_rel;
Oid reloid = RelationGetRelid(rel);
@@ -3330,9 +3331,10 @@ SetRelationTableSpace(Relation rel,
/* Get a modifiable copy of the relation's pg_class row. */
pg_class = table_open(RelationRelationId, RowExclusiveLock);
- tuple = SearchSysCacheCopy1(RELOID, ObjectIdGetDatum(reloid));
+ tuple = SearchSysCacheLockedCopy1(RELOID, ObjectIdGetDatum(reloid));
if (!HeapTupleIsValid(tuple))
elog(ERROR, "cache lookup failed for relation %u", reloid);
+ otid = tuple->t_self;
rd_rel = (Form_pg_class) GETSTRUCT(tuple);
/* Update the pg_class row. */
@@ -3340,7 +3342,8 @@ SetRelationTableSpace(Relation rel,
InvalidOid : newTableSpaceId;
if (OidIsValid(newRelFileNode))
rd_rel->relfilenode = newRelFileNode;
- CatalogTupleUpdate(pg_class, &tuple->t_self, tuple);
+ CatalogTupleUpdate(pg_class, &otid, tuple);
+ UnlockTuple(pg_class, &otid, InplaceUpdateTupleLock);
/*
* Record dependency on tablespace. This is only required for relations
@@ -3834,6 +3837,7 @@ RenameRelationInternal(Oid myrelid, const char *newrelname, bool is_internal, bo
{
Relation targetrelation;
Relation relrelation; /* for RELATION relation */
+ ItemPointerData otid;
HeapTuple reltup;
Form_pg_class relform;
Oid namespaceId;
@@ -3856,7 +3860,8 @@ RenameRelationInternal(Oid myrelid, const char *newrelname, bool is_internal, bo
*/
relrelation = table_open(RelationRelationId, RowExclusiveLock);
- reltup = SearchSysCacheCopy1(RELOID, ObjectIdGetDatum(myrelid));
+ reltup = SearchSysCacheLockedCopy1(RELOID, ObjectIdGetDatum(myrelid));
+ otid = reltup->t_self;
if (!HeapTupleIsValid(reltup)) /* shouldn't happen */
elog(ERROR, "cache lookup failed for relation %u", myrelid);
relform = (Form_pg_class) GETSTRUCT(reltup);
@@ -3883,7 +3888,8 @@ RenameRelationInternal(Oid myrelid, const char *newrelname, bool is_internal, bo
*/
namestrcpy(&(relform->relname), newrelname);
- CatalogTupleUpdate(relrelation, &reltup->t_self, reltup);
+ CatalogTupleUpdate(relrelation, &otid, reltup);
+ UnlockTuple(relrelation, &otid, InplaceUpdateTupleLock);
InvokeObjectPostAlterHookArg(RelationRelationId, myrelid, 0,
InvalidOid, is_internal);
@@ -13856,7 +13862,7 @@ ATExecSetRelOptions(Relation rel, List *defList, AlterTableType operation,
/* Fetch heap tuple */
relid = RelationGetRelid(rel);
- tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
+ tuple = SearchSysCacheLocked1(RELOID, ObjectIdGetDatum(relid));
if (!HeapTupleIsValid(tuple))
elog(ERROR, "cache lookup failed for relation %u", relid);
@@ -13959,6 +13965,7 @@ ATExecSetRelOptions(Relation rel, List *defList, AlterTableType operation,
repl_val, repl_null, repl_repl);
CatalogTupleUpdate(pgclass, &newtuple->t_self, newtuple);
+ UnlockTuple(pgclass, &tuple->t_self, InplaceUpdateTupleLock);
InvokeObjectPostAlterHook(RelationRelationId, RelationGetRelid(rel), 0);
@@ -16156,7 +16163,8 @@ AlterRelationNamespaceInternal(Relation classRel, Oid relOid,
ObjectAddress thisobj;
bool already_done = false;
- classTup = SearchSysCacheCopy1(RELOID, ObjectIdGetDatum(relOid));
+ /* no rel lock for relkind=c so use LOCKTAG_TUPLE */
+ classTup = SearchSysCacheLockedCopy1(RELOID, ObjectIdGetDatum(relOid));
if (!HeapTupleIsValid(classTup))
elog(ERROR, "cache lookup failed for relation %u", relOid);
classForm = (Form_pg_class) GETSTRUCT(classTup);
@@ -16175,6 +16183,8 @@ AlterRelationNamespaceInternal(Relation classRel, Oid relOid,
already_done = object_address_present(&thisobj, objsMoved);
if (!already_done && oldNspOid != newNspOid)
{
+ ItemPointerData otid = classTup->t_self;
+
/* check for duplicate name (more friendly than unique-index failure) */
if (get_relname_relid(NameStr(classForm->relname),
newNspOid) != InvalidOid)
@@ -16187,7 +16197,9 @@ AlterRelationNamespaceInternal(Relation classRel, Oid relOid,
/* classTup is a copy, so OK to scribble on */
classForm->relnamespace = newNspOid;
- CatalogTupleUpdate(classRel, &classTup->t_self, classTup);
+ CatalogTupleUpdate(classRel, &otid, classTup);
+ UnlockTuple(classRel, &otid, InplaceUpdateTupleLock);
+
/* Update dependency on schema if caller said so */
if (hasDependEntry &&
@@ -16199,6 +16211,8 @@ AlterRelationNamespaceInternal(Relation classRel, Oid relOid,
elog(ERROR, "failed to change schema dependency for relation \"%s\"",
NameStr(classForm->relname));
}
+ else
+ UnlockTuple(classRel, &classTup->t_self, InplaceUpdateTupleLock);
if (!already_done)
{
add_exact_object_address(&thisobj, objsMoved);
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c
index 17a5ce61736..150d369d055 100644
--- a/src/backend/executor/execMain.c
+++ b/src/backend/executor/execMain.c
@@ -43,6 +43,7 @@
#include "access/tableam.h"
#include "access/transam.h"
#include "access/xact.h"
+#include "catalog/catalog.h"
#include "catalog/namespace.h"
#include "catalog/pg_publication.h"
#include "commands/matview.h"
@@ -997,6 +998,10 @@ CheckValidResultRel(ResultRelInfo *resultRelInfo, CmdType operation)
TriggerDesc *trigDesc = resultRel->trigdesc;
FdwRoutine *fdwroutine;
+ /* Expect a fully-formed ResultRelInfo from InitResultRelInfo(). */
+ Assert(resultRelInfo->ri_needLockTagTuple ==
+ IsInplaceUpdateRelation(resultRel));
+
switch (resultRel->rd_rel->relkind)
{
case RELKIND_RELATION:
@@ -1205,6 +1210,8 @@ InitResultRelInfo(ResultRelInfo *resultRelInfo,
resultRelInfo->ri_NumIndices = 0;
resultRelInfo->ri_IndexRelationDescs = NULL;
resultRelInfo->ri_IndexRelationInfo = NULL;
+ resultRelInfo->ri_needLockTagTuple =
+ IsInplaceUpdateRelation(resultRelationDesc);
/* make a copy so as not to depend on relcache info not changing... */
resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc);
if (resultRelInfo->ri_TrigDesc)
diff --git a/src/backend/executor/execReplication.c b/src/backend/executor/execReplication.c
index dec65abaa98..8134dea406f 100644
--- a/src/backend/executor/execReplication.c
+++ b/src/backend/executor/execReplication.c
@@ -19,6 +19,7 @@
#include "access/tableam.h"
#include "access/transam.h"
#include "access/xact.h"
+#include "catalog/catalog.h"
#include "commands/trigger.h"
#include "executor/executor.h"
#include "executor/nodeModifyTable.h"
@@ -483,8 +484,12 @@ ExecSimpleRelationUpdate(ResultRelInfo *resultRelInfo,
Relation rel = resultRelInfo->ri_RelationDesc;
ItemPointer tid = &(searchslot->tts_tid);
- /* For now we support only tables. */
+ /*
+ * We support only non-system tables, with
+ * check_publication_add_relation() accountable.
+ */
Assert(rel->rd_rel->relkind == RELKIND_RELATION);
+ Assert(!IsCatalogRelation(rel));
CheckCmdReplicaIdentity(rel, CMD_UPDATE);
diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c
index 1006f450258..214482d193d 100644
--- a/src/backend/executor/nodeModifyTable.c
+++ b/src/backend/executor/nodeModifyTable.c
@@ -1825,6 +1825,7 @@ ExecUpdate(ModifyTableState *mtstate,
}
else
{
+ ItemPointerData lockedtid;
LockTupleMode lockmode;
bool partition_constraint_failed;
bool update_indexes;
@@ -1837,6 +1838,7 @@ ExecUpdate(ModifyTableState *mtstate,
* to do them again.)
*/
lreplace:
+ lockedtid = *tupleid;
/*
* Constraints and GENERATED expressions might reference the tableoid
@@ -2014,6 +2016,14 @@ lreplace:
if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
ExecInitUpdateProjection(mtstate, resultRelInfo);
+ if (resultRelInfo->ri_needLockTagTuple)
+ {
+ UnlockTuple(resultRelationDesc,
+ &lockedtid, InplaceUpdateTupleLock);
+ LockTuple(resultRelationDesc,
+ tupleid, InplaceUpdateTupleLock);
+ }
+
/* Fetch the most recent version of old tuple. */
oldSlot = resultRelInfo->ri_oldTupleSlot;
if (!table_tuple_fetch_row_version(resultRelationDesc,
@@ -2143,6 +2153,14 @@ ExecOnConflictUpdate(ModifyTableState *mtstate,
TransactionId xmin;
bool isnull;
+ /*
+ * Parse analysis should have blocked ON CONFLICT for all system
+ * relations, which includes these. There's no fundamental obstacle to
+ * supporting this; we'd just need to handle LOCKTAG_TUPLE like the other
+ * ExecUpdate() caller.
+ */
+ Assert(!resultRelInfo->ri_needLockTagTuple);
+
/* Determine lock mode to use */
lockmode = ExecUpdateLockMode(estate, resultRelInfo);
@@ -2502,6 +2520,7 @@ ExecModifyTable(PlanState *pstate)
ItemPointerData tuple_ctid;
HeapTupleData oldtupdata;
HeapTuple oldtuple;
+ bool tuplock;
CHECK_FOR_INTERRUPTS();
@@ -2701,6 +2720,8 @@ ExecModifyTable(PlanState *pstate)
estate, node->canSetTag);
break;
case CMD_UPDATE:
+ tuplock = false;
+
/* Initialize projection info if first time for this table */
if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
ExecInitUpdateProjection(node, resultRelInfo);
@@ -2712,6 +2733,7 @@ ExecModifyTable(PlanState *pstate)
oldSlot = resultRelInfo->ri_oldTupleSlot;
if (oldtuple != NULL)
{
+ Assert(!resultRelInfo->ri_needLockTagTuple);
/* Use the wholerow junk attr as the old tuple. */
ExecForceStoreHeapTuple(oldtuple, oldSlot, false);
}
@@ -2721,6 +2743,11 @@ ExecModifyTable(PlanState *pstate)
Relation relation = resultRelInfo->ri_RelationDesc;
Assert(tupleid != NULL);
+ if (resultRelInfo->ri_needLockTagTuple)
+ {
+ LockTuple(relation, tupleid, InplaceUpdateTupleLock);
+ tuplock = true;
+ }
if (!table_tuple_fetch_row_version(relation, tupleid,
SnapshotAny,
oldSlot))
@@ -2733,6 +2760,9 @@ ExecModifyTable(PlanState *pstate)
slot = ExecUpdate(node, resultRelInfo, tupleid, oldtuple, slot,
planSlot, &node->mt_epqstate, estate,
node->canSetTag);
+ if (tuplock)
+ UnlockTuple(resultRelInfo->ri_RelationDesc, tupleid,
+ InplaceUpdateTupleLock);
break;
case CMD_DELETE:
slot = ExecDelete(node, resultRelInfo, tupleid, oldtuple,
diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c
index 6c7a5a04fcb..01ff1528da9 100644
--- a/src/backend/utils/cache/relcache.c
+++ b/src/backend/utils/cache/relcache.c
@@ -3693,6 +3693,7 @@ RelationSetNewRelfilenode(Relation relation, char persistence)
{
Oid newrelfilenode;
Relation pg_class;
+ ItemPointerData otid;
HeapTuple tuple;
Form_pg_class classform;
MultiXactId minmulti = InvalidMultiXactId;
@@ -3708,11 +3709,12 @@ RelationSetNewRelfilenode(Relation relation, char persistence)
*/
pg_class = table_open(RelationRelationId, RowExclusiveLock);
- tuple = SearchSysCacheCopy1(RELOID,
- ObjectIdGetDatum(RelationGetRelid(relation)));
+ tuple = SearchSysCacheLockedCopy1(RELOID,
+ ObjectIdGetDatum(RelationGetRelid(relation)));
if (!HeapTupleIsValid(tuple))
elog(ERROR, "could not find tuple for relation %u",
RelationGetRelid(relation));
+ otid = tuple->t_self;
classform = (Form_pg_class) GETSTRUCT(tuple);
/*
@@ -3811,9 +3813,10 @@ RelationSetNewRelfilenode(Relation relation, char persistence)
classform->relminmxid = minmulti;
classform->relpersistence = persistence;
- CatalogTupleUpdate(pg_class, &tuple->t_self, tuple);
+ CatalogTupleUpdate(pg_class, &otid, tuple);
}
+ UnlockTuple(pg_class, &otid, InplaceUpdateTupleLock);
heap_freetuple(tuple);
table_close(pg_class, RowExclusiveLock);
diff --git a/src/backend/utils/cache/syscache.c b/src/backend/utils/cache/syscache.c
index e4dc4ee34ee..7d573b6e0ef 100644
--- a/src/backend/utils/cache/syscache.c
+++ b/src/backend/utils/cache/syscache.c
@@ -74,7 +74,10 @@
#include "catalog/pg_type.h"
#include "catalog/pg_user_mapping.h"
#include "lib/qunique.h"
+#include "miscadmin.h"
+#include "storage/lmgr.h"
#include "utils/catcache.h"
+#include "utils/inval.h"
#include "utils/rel.h"
#include "utils/syscache.h"
@@ -1178,6 +1181,98 @@ ReleaseSysCache(HeapTuple tuple)
}
/*
+ * SearchSysCacheLocked1
+ *
+ * Combine SearchSysCache1() with acquiring a LOCKTAG_TUPLE at mode
+ * InplaceUpdateTupleLock. This is a tool for complying with the
+ * README.tuplock section "Locking to write inplace-updated tables". After
+ * the caller's heap_update(), it should UnlockTuple(InplaceUpdateTupleLock)
+ * and ReleaseSysCache().
+ *
+ * The returned tuple may be the subject of an uncommitted update, so this
+ * doesn't prevent the "tuple concurrently updated" error.
+ */
+HeapTuple
+SearchSysCacheLocked1(int cacheId,
+ Datum key1)
+{
+ ItemPointerData tid;
+ LOCKTAG tag;
+ Oid dboid =
+ SysCache[cacheId]->cc_relisshared ? InvalidOid : MyDatabaseId;
+ Oid reloid = cacheinfo[cacheId].reloid;
+
+ /*----------
+ * Since inplace updates may happen just before our LockTuple(), we must
+ * return content acquired after LockTuple() of the TID we return. If we
+ * just fetched twice instead of looping, the following sequence would
+ * defeat our locking:
+ *
+ * GRANT: SearchSysCache1() = TID (1,5)
+ * GRANT: LockTuple(pg_class, (1,5))
+ * [no more inplace update of (1,5) until we release the lock]
+ * CLUSTER: SearchSysCache1() = TID (1,5)
+ * CLUSTER: heap_update() = TID (1,8)
+ * CLUSTER: COMMIT
+ * GRANT: SearchSysCache1() = TID (1,8)
+ * GRANT: return (1,8) from SearchSysCacheLocked1()
+ * VACUUM: SearchSysCache1() = TID (1,8)
+ * VACUUM: LockTuple(pg_class, (1,8)) # two TIDs now locked for one rel
+ * VACUUM: inplace update
+ * GRANT: heap_update() = (1,9) # lose inplace update
+ *
+ * In the happy case, this takes two fetches, one to determine the TID to
+ * lock and another to get the content and confirm the TID didn't change.
+ *
+ * This is valid even if the row gets updated to a new TID, the old TID
+ * becomes LP_UNUSED, and the row gets updated back to its old TID. We'd
+ * still hold the right LOCKTAG_TUPLE and a copy of the row captured after
+ * the LOCKTAG_TUPLE.
+ */
+ ItemPointerSetInvalid(&tid);
+ for (;;)
+ {
+ HeapTuple tuple;
+ LOCKMODE lockmode = InplaceUpdateTupleLock;
+
+ tuple = SearchSysCache1(cacheId, key1);
+ if (ItemPointerIsValid(&tid))
+ {
+ if (!HeapTupleIsValid(tuple))
+ {
+ LockRelease(&tag, lockmode, false);
+ return tuple;
+ }
+ if (ItemPointerEquals(&tid, &tuple->t_self))
+ return tuple;
+ LockRelease(&tag, lockmode, false);
+ }
+ else if (!HeapTupleIsValid(tuple))
+ return tuple;
+
+ tid = tuple->t_self;
+ ReleaseSysCache(tuple);
+ /* like: LockTuple(rel, &tid, lockmode) */
+ SET_LOCKTAG_TUPLE(tag, dboid, reloid,
+ ItemPointerGetBlockNumber(&tid),
+ ItemPointerGetOffsetNumber(&tid));
+ (void) LockAcquire(&tag, lockmode, false, false);
+
+ /*
+ * If an inplace update just finished, ensure we process the syscache
+ * inval. XXX this is insufficient: the inplace updater may not yet
+ * have reached AtEOXact_Inval(). See test at inplace-inval.spec.
+ *
+ * If a heap_update() call just released its LOCKTAG_TUPLE, we'll
+ * probably find the old tuple and reach "tuple concurrently updated".
+ * If that heap_update() aborts, our LOCKTAG_TUPLE blocks inplace
+ * updates while our caller works.
+ */
+ AcceptInvalidationMessages();
+ }
+}
+
+/*
* SearchSysCacheCopy
*
* A convenience routine that does SearchSysCache and (if successful)
@@ -1204,6 +1299,28 @@ SearchSysCacheCopy(int cacheId,
}
/*
+ * SearchSysCacheLockedCopy1
+ *
+ * Meld SearchSysCacheLockedCopy1 with SearchSysCacheCopy(). After the
+ * caller's heap_update(), it should UnlockTuple(InplaceUpdateTupleLock) and
+ * heap_freetuple().
+ */
+HeapTuple
+SearchSysCacheLockedCopy1(int cacheId,
+ Datum key1)
+{
+ HeapTuple tuple,
+ newtuple;
+
+ tuple = SearchSysCacheLocked1(cacheId, key1);
+ if (!HeapTupleIsValid(tuple))
+ return tuple;
+ newtuple = heap_copytuple(tuple);
+ ReleaseSysCache(tuple);
+ return newtuple;
+}
+
+/*
* SearchSysCacheExists
*
* A convenience routine that just probes to see if a tuple can be found.
diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h
index 8d1566e8d4c..0cf2b458311 100644
--- a/src/include/nodes/execnodes.h
+++ b/src/include/nodes/execnodes.h
@@ -524,6 +524,9 @@ typedef struct ResultRelInfo
/* for use by copyfrom.c when performing multi-inserts */
struct CopyMultiInsertBuffer *ri_CopyMultiInsertBuffer;
+
+ /* updates do LockTuple() before oldtup read; see README.tuplock */
+ bool ri_needLockTagTuple;
} ResultRelInfo;
/*
diff --git a/src/include/storage/lockdefs.h b/src/include/storage/lockdefs.h
index f99ad0eff60..c1fffc0964b 100644
--- a/src/include/storage/lockdefs.h
+++ b/src/include/storage/lockdefs.h
@@ -47,6 +47,8 @@ typedef int LOCKMODE;
#define MaxLockMode 8
+/* See README.tuplock section "Locking to write inplace-updated tables" */
+#define InplaceUpdateTupleLock ExclusiveLock
/* WAL representation of an AccessExclusiveLock on a table */
typedef struct xl_standby_lock
diff --git a/src/include/utils/syscache.h b/src/include/utils/syscache.h
index d74a348600c..27c892d24c5 100644
--- a/src/include/utils/syscache.h
+++ b/src/include/utils/syscache.h
@@ -135,9 +135,14 @@ extern HeapTuple SearchSysCache4(int cacheId,
extern void ReleaseSysCache(HeapTuple tuple);
+extern HeapTuple SearchSysCacheLocked1(int cacheId,
+ Datum key1);
+
/* convenience routines */
extern HeapTuple SearchSysCacheCopy(int cacheId,
Datum key1, Datum key2, Datum key3, Datum key4);
+extern HeapTuple SearchSysCacheLockedCopy1(int cacheId,
+ Datum key1);
extern bool SearchSysCacheExists(int cacheId,
Datum key1, Datum key2, Datum key3, Datum key4);
extern Oid GetSysCacheOid(int cacheId, AttrNumber oidcol,
diff --git a/src/test/isolation/expected/intra-grant-inplace.out b/src/test/isolation/expected/intra-grant-inplace.out
index fe26984c0e0..b5fe8b06f76 100644
--- a/src/test/isolation/expected/intra-grant-inplace.out
+++ b/src/test/isolation/expected/intra-grant-inplace.out
@@ -100,7 +100,7 @@ f
step addk2: ALTER TABLE intra_grant_inplace ADD PRIMARY KEY (c);
step c2: COMMIT;
-starting permutation: b3 sfu3 b1 grant1 read2 as3 addk2 r3 c1 read2
+starting permutation: b3 sfu3 b1 grant1 read2 addk2 r3 c1 read2
step b3: BEGIN ISOLATION LEVEL READ COMMITTED;
step sfu3:
SELECT relhasindex FROM pg_class
@@ -124,7 +124,6 @@ relhasindex
f
(1 row)
-step as3: LOCK TABLE intra_grant_inplace IN ACCESS SHARE MODE;
step addk2: ALTER TABLE intra_grant_inplace ADD PRIMARY KEY (c); <waiting ...>
step r3: ROLLBACK;
step grant1: <... completed>
@@ -155,9 +154,11 @@ step b1: BEGIN;
step grant1:
GRANT SELECT ON intra_grant_inplace TO PUBLIC;
<waiting ...>
-step addk2: ALTER TABLE intra_grant_inplace ADD PRIMARY KEY (c);
-step c2: COMMIT;
+step addk2: ALTER TABLE intra_grant_inplace ADD PRIMARY KEY (c); <waiting ...>
+step addk2: <... completed>
+ERROR: deadlock detected
step grant1: <... completed>
+step c2: COMMIT;
step c1: COMMIT;
step read2:
SELECT relhasindex FROM pg_class
@@ -195,9 +196,8 @@ relhasindex
f
(1 row)
-s4: WARNING: got: tuple concurrently updated
-step revoke4: <... completed>
step r3: ROLLBACK;
+step revoke4: <... completed>
starting permutation: b1 drop1 b3 sfu3 revoke4 c1 r3
step b1: BEGIN;
@@ -224,6 +224,6 @@ relhasindex
-----------
(0 rows)
-s4: WARNING: got: tuple concurrently deleted
+s4: WARNING: got: cache lookup failed for relation REDACTED
step revoke4: <... completed>
step r3: ROLLBACK;
diff --git a/src/test/isolation/specs/eval-plan-qual.spec b/src/test/isolation/specs/eval-plan-qual.spec
index b58eb60eb04..8173cb7aa6c 100644
--- a/src/test/isolation/specs/eval-plan-qual.spec
+++ b/src/test/isolation/specs/eval-plan-qual.spec
@@ -190,7 +190,7 @@ step simplepartupdate_noroute {
update parttbl set b = 2 where c = 1 returning *;
}
-# test system class updates
+# test system class LockTuple()
step sys1 {
UPDATE pg_class SET reltuples = 123 WHERE oid = 'accounts'::regclass;
diff --git a/src/test/isolation/specs/intra-grant-inplace.spec b/src/test/isolation/specs/intra-grant-inplace.spec
index d07ed3bb2cc..2992c85b44d 100644
--- a/src/test/isolation/specs/intra-grant-inplace.spec
+++ b/src/test/isolation/specs/intra-grant-inplace.spec
@@ -14,6 +14,7 @@ teardown
# heap_update()
session s1
+setup { SET deadlock_timeout = '100s'; }
step b1 { BEGIN; }
step grant1 {
GRANT SELECT ON intra_grant_inplace TO PUBLIC;
@@ -25,6 +26,7 @@ step c1 { COMMIT; }
# inplace update
session s2
+setup { SET deadlock_timeout = '10ms'; }
step read2 {
SELECT relhasindex FROM pg_class
WHERE oid = 'intra_grant_inplace'::regclass;
@@ -48,7 +50,6 @@ step sfu3 {
SELECT relhasindex FROM pg_class
WHERE oid = 'intra_grant_inplace'::regclass FOR UPDATE;
}
-step as3 { LOCK TABLE intra_grant_inplace IN ACCESS SHARE MODE; }
step r3 { ROLLBACK; }
# Additional heap_update()
@@ -74,8 +75,6 @@ step keyshr5 {
teardown { ROLLBACK; }
-# XXX extant bugs: permutation comments refer to planned future LockTuple()
-
permutation
b1
grant1
@@ -118,7 +117,6 @@ permutation
b1
grant1(r3) # acquire LockTuple(), await sfu3 xmax
read2
- as3 # XXX temporary until patch adds locking to addk2
addk2(c1) # block in LockTuple() behind grant1
r3 # unblock grant1; addk2 now awaits grant1 xmax
c1
@@ -128,8 +126,8 @@ permutation
b2
sfnku2
b1
- grant1(c2) # acquire LockTuple(), await sfnku2 xmax
- addk2 # block in LockTuple() behind grant1 = deadlock
+ grant1(addk2) # acquire LockTuple(), await sfnku2 xmax
+ addk2(*) # block in LockTuple() behind grant1 = deadlock
c2
c1
read2
@@ -140,7 +138,7 @@ permutation
grant1
b3
sfu3(c1) # acquire LockTuple(), await grant1 xmax
- revoke4(sfu3) # block in LockTuple() behind sfu3
+ revoke4(r3) # block in LockTuple() behind sfu3
c1
r3 # revoke4 unlocks old tuple and finds new