diff options
author | Tom Lane <tgl@sss.pgh.pa.us> | 2008-03-13 18:00:32 +0000 |
---|---|---|
committer | Tom Lane <tgl@sss.pgh.pa.us> | 2008-03-13 18:00:32 +0000 |
commit | 3e701a04febb0c423d36ce0c47721a4c276439a2 (patch) | |
tree | dcb58aee46d3c608f91f2685b97ff6f3ef0d39e0 /src/backend/utils/cache/inval.c | |
parent | f4bce7e086288a0287e86196768cd7256a94f07c (diff) | |
download | postgresql-3e701a04febb0c423d36ce0c47721a4c276439a2.tar.gz postgresql-3e701a04febb0c423d36ce0c47721a4c276439a2.zip |
Fix heap_page_prune's problem with failing to send cache invalidation
messages if the calling transaction aborts later on. Collapsing out line
pointer redirects is a done deal as soon as we complete the page update,
so syscache *must* be notified even if the VACUUM FULL as a whole doesn't
complete. To fix, add some functionality to inval.c to allow the pending
inval messages to be sent immediately while heap_page_prune is still
running. The implementation is a bit chintzy: it will only work in the
context of VACUUM FULL. But that's all we need now, and it can always be
extended later if needed. Per my trouble report of a week ago.
Diffstat (limited to 'src/backend/utils/cache/inval.c')
-rw-r--r-- | src/backend/utils/cache/inval.c | 95 |
1 files changed, 94 insertions, 1 deletions
diff --git a/src/backend/utils/cache/inval.c b/src/backend/utils/cache/inval.c index 033ae3a2d62..ccb002f0f5d 100644 --- a/src/backend/utils/cache/inval.c +++ b/src/backend/utils/cache/inval.c @@ -80,7 +80,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/utils/cache/inval.c,v 1.83 2008/01/01 19:45:53 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/utils/cache/inval.c,v 1.84 2008/03/13 18:00:32 tgl Exp $ * *------------------------------------------------------------------------- */ @@ -964,6 +964,99 @@ CommandEndInvalidationMessages(void) &transInvalInfo->CurrentCmdInvalidMsgs); } + +/* + * BeginNonTransactionalInvalidation + * Prepare for invalidation messages for nontransactional updates. + * + * A nontransactional invalidation is one that must be sent whether or not + * the current transaction eventually commits. We arrange for all invals + * queued between this call and EndNonTransactionalInvalidation() to be sent + * immediately when the latter is called. + * + * Currently, this is only used by heap_page_prune(), and only when it is + * invoked during VACUUM FULL's first pass over a table. We expect therefore + * that we are not inside a subtransaction and there are no already-pending + * invalidations. This could be relaxed by setting up a new nesting level of + * invalidation data, but for now there's no need. Note that heap_page_prune + * knows that this function does not change any state, and therefore there's + * no need to worry about cleaning up if there's an elog(ERROR) before + * reaching EndNonTransactionalInvalidation (the invals will just be thrown + * away if that happens). + */ +void +BeginNonTransactionalInvalidation(void) +{ + /* Must be at top of stack */ + Assert(transInvalInfo != NULL && transInvalInfo->parent == NULL); + + /* Must not have any previously-queued activity */ + Assert(transInvalInfo->PriorCmdInvalidMsgs.cclist == NULL); + Assert(transInvalInfo->PriorCmdInvalidMsgs.rclist == NULL); + Assert(transInvalInfo->CurrentCmdInvalidMsgs.cclist == NULL); + Assert(transInvalInfo->CurrentCmdInvalidMsgs.rclist == NULL); + Assert(transInvalInfo->RelcacheInitFileInval == false); +} + +/* + * EndNonTransactionalInvalidation + * Process queued-up invalidation messages for nontransactional updates. + * + * We expect to find messages in CurrentCmdInvalidMsgs only (else there + * was a CommandCounterIncrement within the "nontransactional" update). + * We must process them locally and send them out to the shared invalidation + * message queue. + * + * We must also reset the lists to empty and explicitly free memory (we can't + * rely on end-of-transaction cleanup for that). + */ +void +EndNonTransactionalInvalidation(void) +{ + InvalidationChunk *chunk; + InvalidationChunk *next; + + /* Must be at top of stack */ + Assert(transInvalInfo != NULL && transInvalInfo->parent == NULL); + + /* Must not have any prior-command messages */ + Assert(transInvalInfo->PriorCmdInvalidMsgs.cclist == NULL); + Assert(transInvalInfo->PriorCmdInvalidMsgs.rclist == NULL); + + /* + * At present, this function is only used for CTID-changing updates; + * since the relcache init file doesn't store any tuple CTIDs, we + * don't have to invalidate it. That might not be true forever + * though, in which case we'd need code similar to AtEOXact_Inval. + */ + + /* Send out the invals */ + ProcessInvalidationMessages(&transInvalInfo->CurrentCmdInvalidMsgs, + LocalExecuteInvalidationMessage); + ProcessInvalidationMessages(&transInvalInfo->CurrentCmdInvalidMsgs, + SendSharedInvalidMessage); + + /* Clean up and release memory */ + for (chunk = transInvalInfo->CurrentCmdInvalidMsgs.cclist; + chunk != NULL; + chunk = next) + { + next = chunk->next; + pfree(chunk); + } + for (chunk = transInvalInfo->CurrentCmdInvalidMsgs.rclist; + chunk != NULL; + chunk = next) + { + next = chunk->next; + pfree(chunk); + } + transInvalInfo->CurrentCmdInvalidMsgs.cclist = NULL; + transInvalInfo->CurrentCmdInvalidMsgs.rclist = NULL; + transInvalInfo->RelcacheInitFileInval = false; +} + + /* * CacheInvalidateHeapTuple * Register the given tuple for invalidation at end of command |