aboutsummaryrefslogtreecommitdiff
path: root/src/backend/storage/buffer/bufmgr.c
diff options
context:
space:
mode:
authorTom Lane <tgl@sss.pgh.pa.us>2007-09-25 22:11:48 +0000
committerTom Lane <tgl@sss.pgh.pa.us>2007-09-25 22:11:48 +0000
commit7a315a09dc0f7a1f903b89022a2a833c1719145b (patch)
tree56bbb50df14a7caa843411446b291d2acbf117f9 /src/backend/storage/buffer/bufmgr.c
parent6f5c38dcd0dfb391bcbaa683a96f4a62c574d95a (diff)
downloadpostgresql-7a315a09dc0f7a1f903b89022a2a833c1719145b.tar.gz
postgresql-7a315a09dc0f7a1f903b89022a2a833c1719145b.zip
Dept. of second thoughts: fix loop in BgBufferSync so that the exit when
bgwriter_lru_maxpages is exceeded leaves the loop variables in the expected state. In the original coding, we'd fail to advance next_to_clean, causing that buffer to be probably-uselessly rechecked next time, and also have an off-by-one idea of the number of buffers scanned.
Diffstat (limited to 'src/backend/storage/buffer/bufmgr.c')
-rw-r--r--src/backend/storage/buffer/bufmgr.c21
1 files changed, 11 insertions, 10 deletions
diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c
index 6ba935a09af..ce7c54deeec 100644
--- a/src/backend/storage/buffer/bufmgr.c
+++ b/src/backend/storage/buffer/bufmgr.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/buffer/bufmgr.c,v 1.225 2007/09/25 20:03:37 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/buffer/bufmgr.c,v 1.226 2007/09/25 22:11:48 tgl Exp $
*
*-------------------------------------------------------------------------
*/
@@ -1324,10 +1324,17 @@ BgBufferSync(void)
reusable_buffers = reusable_buffers_est;
/* Execute the LRU scan */
- while (num_to_scan-- > 0 && reusable_buffers < upcoming_alloc_est)
+ while (num_to_scan > 0 && reusable_buffers < upcoming_alloc_est)
{
int buffer_state = SyncOneBuffer(next_to_clean, true);
+ if (++next_to_clean >= NBuffers)
+ {
+ next_to_clean = 0;
+ next_passes++;
+ }
+ num_to_scan--;
+
if (buffer_state & BUF_WRITTEN)
{
reusable_buffers++;
@@ -1339,12 +1346,6 @@ BgBufferSync(void)
}
else if (buffer_state & BUF_REUSABLE)
reusable_buffers++;
-
- if (++next_to_clean >= NBuffers)
- {
- next_to_clean = 0;
- next_passes++;
- }
}
BgWriterStats.m_buf_written_clean += num_written;
@@ -1353,7 +1354,7 @@ BgBufferSync(void)
elog(DEBUG1, "bgwriter: recent_alloc=%u smoothed=%.2f delta=%ld ahead=%d density=%.2f reusable_est=%d upcoming_est=%d scanned=%d wrote=%d reusable=%d",
recent_alloc, smoothed_alloc, strategy_delta, bufs_ahead,
smoothed_density, reusable_buffers_est, upcoming_alloc_est,
- bufs_to_lap - num_to_scan - 1,
+ bufs_to_lap - num_to_scan,
num_written,
reusable_buffers - reusable_buffers_est);
#endif
@@ -1366,7 +1367,7 @@ BgBufferSync(void)
* scanning, which is helpful because a long memory isn't as desirable
* on the density estimates.
*/
- strategy_delta = bufs_to_lap - num_to_scan - 1;
+ strategy_delta = bufs_to_lap - num_to_scan;
recent_alloc = reusable_buffers - reusable_buffers_est;
if (strategy_delta > 0 && recent_alloc > 0)
{