aboutsummaryrefslogtreecommitdiff
path: root/src/backend/access/heap/heapam.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/access/heap/heapam.c')
-rw-r--r--src/backend/access/heap/heapam.c43
1 files changed, 36 insertions, 7 deletions
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index f54609dc8b9..e3a516d5394 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -6383,6 +6383,24 @@ heap_inplace_update_and_unlock(Relation relation,
if (oldlen != newlen || htup->t_hoff != tuple->t_data->t_hoff)
elog(ERROR, "wrong tuple length");
+ /*
+ * Construct shared cache inval if necessary. Note that because we only
+ * pass the new version of the tuple, this mustn't be used for any
+ * operations that could change catcache lookup keys. But we aren't
+ * bothering with index updates either, so that's true a fortiori.
+ */
+ CacheInvalidateHeapTupleInplace(relation, tuple, NULL);
+
+ /*
+ * Unlink relcache init files as needed. If unlinking, acquire
+ * RelCacheInitLock until after associated invalidations. By doing this
+ * in advance, if we checkpoint and then crash between inplace
+ * XLogInsert() and inval, we don't rely on StartupXLOG() ->
+ * RelationCacheInitFileRemove(). That uses elevel==LOG, so replay would
+ * neglect to PANIC on EIO.
+ */
+ PreInplace_Inval();
+
/* NO EREPORT(ERROR) from here till changes are logged */
START_CRIT_SECTION();
@@ -6426,17 +6444,28 @@ heap_inplace_update_and_unlock(Relation relation,
PageSetLSN(BufferGetPage(buffer), recptr);
}
+ LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
+
+ /*
+ * Send invalidations to shared queue. SearchSysCacheLocked1() assumes we
+ * do this before UnlockTuple().
+ *
+ * If we're mutating a tuple visible only to this transaction, there's an
+ * equivalent transactional inval from the action that created the tuple,
+ * and this inval is superfluous.
+ */
+ AtInplace_Inval();
+
END_CRIT_SECTION();
+ UnlockTuple(relation, &tuple->t_self, InplaceUpdateTupleLock);
- heap_inplace_unlock(relation, oldtup, buffer);
+ AcceptInvalidationMessages(); /* local processing of just-sent inval */
/*
- * Send out shared cache inval if necessary. Note that because we only
- * pass the new version of the tuple, this mustn't be used for any
- * operations that could change catcache lookup keys. But we aren't
- * bothering with index updates either, so that's true a fortiori.
- *
- * XXX ROLLBACK discards the invalidation. See test inplace-inval.spec.
+ * Queue a transactional inval. The immediate invalidation we just sent
+ * is the only one known to be necessary. To reduce risk from the
+ * transition to immediate invalidation, continue sending a transactional
+ * invalidation like we've long done. Third-party code might rely on it.
*/
if (!IsBootstrapProcessingMode())
CacheInvalidateHeapTuple(relation, tuple, NULL);