aboutsummaryrefslogtreecommitdiff
path: root/src/backend/storage
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/storage')
-rw-r--r--src/backend/storage/buffer/bufmgr.c58
-rw-r--r--src/backend/storage/buffer/freelist.c45
-rw-r--r--src/backend/storage/lmgr/proc.c4
3 files changed, 74 insertions, 33 deletions
diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c
index 1889941eda1..a1b588b95c1 100644
--- a/src/backend/storage/buffer/bufmgr.c
+++ b/src/backend/storage/buffer/bufmgr.c
@@ -968,7 +968,6 @@ void
MarkBufferDirty(Buffer buffer)
{
volatile BufferDesc *bufHdr;
- bool dirtied = false;
if (!BufferIsValid(buffer))
elog(ERROR, "bad buffer ID: %d", buffer);
@@ -989,26 +988,20 @@ MarkBufferDirty(Buffer buffer)
Assert(bufHdr->refcount > 0);
- if (!(bufHdr->flags & BM_DIRTY))
- dirtied = true;
-
- bufHdr->flags |= (BM_DIRTY | BM_JUST_DIRTIED);
-
- UnlockBufHdr(bufHdr);
-
/*
- * If the buffer was not dirty already, do vacuum accounting, and
- * nudge bgwriter.
+ * If the buffer was not dirty already, do vacuum accounting.
*/
- if (dirtied)
+ if (!(bufHdr->flags & BM_DIRTY))
{
VacuumPageDirty++;
pgBufferUsage.shared_blks_dirtied++;
if (VacuumCostActive)
VacuumCostBalance += VacuumCostPageDirty;
- if (ProcGlobal->bgwriterLatch)
- SetLatch(ProcGlobal->bgwriterLatch);
}
+
+ bufHdr->flags |= (BM_DIRTY | BM_JUST_DIRTIED);
+
+ UnlockBufHdr(bufHdr);
}
/*
@@ -1331,9 +1324,11 @@ BufferSync(int flags)
*
* This is called periodically by the background writer process.
*
- * Returns true if the clocksweep has been "lapped", so that there's nothing
- * to do. Also returns true if there's nothing to do because bgwriter was
- * effectively disabled by setting bgwriter_lru_maxpages to 0.
+ * Returns true if it's appropriate for the bgwriter process to go into
+ * low-power hibernation mode. (This happens if the strategy clock sweep
+ * has been "lapped" and no buffer allocations have occurred recently,
+ * or if the bgwriter has been effectively disabled by setting
+ * bgwriter_lru_maxpages to 0.)
*/
bool
BgBufferSync(void)
@@ -1375,6 +1370,10 @@ BgBufferSync(void)
int num_written;
int reusable_buffers;
+ /* Variables for final smoothed_density update */
+ long new_strategy_delta;
+ uint32 new_recent_alloc;
+
/*
* Find out where the freelist clock sweep currently is, and how many
* buffer allocations have happened since our last call.
@@ -1598,21 +1597,23 @@ BgBufferSync(void)
* which is helpful because a long memory isn't as desirable on the
* density estimates.
*/
- strategy_delta = bufs_to_lap - num_to_scan;
- recent_alloc = reusable_buffers - reusable_buffers_est;
- if (strategy_delta > 0 && recent_alloc > 0)
+ new_strategy_delta = bufs_to_lap - num_to_scan;
+ new_recent_alloc = reusable_buffers - reusable_buffers_est;
+ if (new_strategy_delta > 0 && new_recent_alloc > 0)
{
- scans_per_alloc = (float) strategy_delta / (float) recent_alloc;
+ scans_per_alloc = (float) new_strategy_delta / (float) new_recent_alloc;
smoothed_density += (scans_per_alloc - smoothed_density) /
smoothing_samples;
#ifdef BGW_DEBUG
elog(DEBUG2, "bgwriter: cleaner density alloc=%u scan=%ld density=%.2f new smoothed=%.2f",
- recent_alloc, strategy_delta, scans_per_alloc, smoothed_density);
+ new_recent_alloc, new_strategy_delta,
+ scans_per_alloc, smoothed_density);
#endif
}
- return (bufs_to_lap == 0);
+ /* Return true if OK to hibernate */
+ return (bufs_to_lap == 0 && recent_alloc == 0);
}
/*
@@ -2385,24 +2386,17 @@ SetBufferCommitInfoNeedsSave(Buffer buffer)
if ((bufHdr->flags & (BM_DIRTY | BM_JUST_DIRTIED)) !=
(BM_DIRTY | BM_JUST_DIRTIED))
{
- bool dirtied = false;
-
LockBufHdr(bufHdr);
Assert(bufHdr->refcount > 0);
if (!(bufHdr->flags & BM_DIRTY))
- dirtied = true;
- bufHdr->flags |= (BM_DIRTY | BM_JUST_DIRTIED);
- UnlockBufHdr(bufHdr);
-
- if (dirtied)
{
+ /* Do vacuum cost accounting */
VacuumPageDirty++;
if (VacuumCostActive)
VacuumCostBalance += VacuumCostPageDirty;
- /* The bgwriter may need to be woken. */
- if (ProcGlobal->bgwriterLatch)
- SetLatch(ProcGlobal->bgwriterLatch);
}
+ bufHdr->flags |= (BM_DIRTY | BM_JUST_DIRTIED);
+ UnlockBufHdr(bufHdr);
}
}
diff --git a/src/backend/storage/buffer/freelist.c b/src/backend/storage/buffer/freelist.c
index 3e62448386d..76a4beca699 100644
--- a/src/backend/storage/buffer/freelist.c
+++ b/src/backend/storage/buffer/freelist.c
@@ -41,6 +41,11 @@ typedef struct
*/
uint32 completePasses; /* Complete cycles of the clock sweep */
uint32 numBufferAllocs; /* Buffers allocated since last reset */
+
+ /*
+ * Notification latch, or NULL if none. See StrategyNotifyBgWriter.
+ */
+ Latch *bgwriterLatch;
} BufferStrategyControl;
/* Pointers to shared state */
@@ -107,6 +112,7 @@ volatile BufferDesc *
StrategyGetBuffer(BufferAccessStrategy strategy, bool *lock_held)
{
volatile BufferDesc *buf;
+ Latch *bgwriterLatch;
int trycounter;
/*
@@ -135,6 +141,21 @@ StrategyGetBuffer(BufferAccessStrategy strategy, bool *lock_held)
StrategyControl->numBufferAllocs++;
/*
+ * If bgwriterLatch is set, we need to waken the bgwriter, but we should
+ * not do so while holding BufFreelistLock; so release and re-grab. This
+ * is annoyingly tedious, but it happens at most once per bgwriter cycle,
+ * so the performance hit is minimal.
+ */
+ bgwriterLatch = StrategyControl->bgwriterLatch;
+ if (bgwriterLatch)
+ {
+ StrategyControl->bgwriterLatch = NULL;
+ LWLockRelease(BufFreelistLock);
+ SetLatch(bgwriterLatch);
+ LWLockAcquire(BufFreelistLock, LW_EXCLUSIVE);
+ }
+
+ /*
* Try to get a buffer from the freelist. Note that the freeNext fields
* are considered to be protected by the BufFreelistLock not the
* individual buffer spinlocks, so it's OK to manipulate them without
@@ -269,6 +290,27 @@ StrategySyncStart(uint32 *complete_passes, uint32 *num_buf_alloc)
return result;
}
+/*
+ * StrategyNotifyBgWriter -- set or clear allocation notification latch
+ *
+ * If bgwriterLatch isn't NULL, the next invocation of StrategyGetBuffer will
+ * set that latch. Pass NULL to clear the pending notification before it
+ * happens. This feature is used by the bgwriter process to wake itself up
+ * from hibernation, and is not meant for anybody else to use.
+ */
+void
+StrategyNotifyBgWriter(Latch *bgwriterLatch)
+{
+ /*
+ * We acquire the BufFreelistLock just to ensure that the store appears
+ * atomic to StrategyGetBuffer. The bgwriter should call this rather
+ * infrequently, so there's no performance penalty from being safe.
+ */
+ LWLockAcquire(BufFreelistLock, LW_EXCLUSIVE);
+ StrategyControl->bgwriterLatch = bgwriterLatch;
+ LWLockRelease(BufFreelistLock);
+}
+
/*
* StrategyShmemSize
@@ -344,6 +386,9 @@ StrategyInitialize(bool init)
/* Clear statistics */
StrategyControl->completePasses = 0;
StrategyControl->numBufferAllocs = 0;
+
+ /* No pending notification */
+ StrategyControl->bgwriterLatch = NULL;
}
else
Assert(!init);
diff --git a/src/backend/storage/lmgr/proc.c b/src/backend/storage/lmgr/proc.c
index 8e309f8a0b4..031e91d14c5 100644
--- a/src/backend/storage/lmgr/proc.c
+++ b/src/backend/storage/lmgr/proc.c
@@ -186,7 +186,6 @@ InitProcGlobal(void)
ProcGlobal->startupProc = NULL;
ProcGlobal->startupProcPid = 0;
ProcGlobal->startupBufferPinWaitBufId = -1;
- ProcGlobal->bgwriterLatch = NULL;
ProcGlobal->walwriterLatch = NULL;
ProcGlobal->checkpointerLatch = NULL;
@@ -627,6 +626,9 @@ HaveNFreeProcs(int n)
return (n <= 0);
}
+/*
+ * Check if the current process is awaiting a lock.
+ */
bool
IsWaitingForLock(void)
{