aboutsummaryrefslogtreecommitdiff
path: root/src/backend/access/transam/xlog.c
diff options
context:
space:
mode:
authorHeikki Linnakangas <heikki.linnakangas@iki.fi>2014-10-01 16:37:15 +0300
committerHeikki Linnakangas <heikki.linnakangas@iki.fi>2014-10-01 16:42:26 +0300
commit5fa6c81a43d047a070eff1f711b90c084c6d3e31 (patch)
tree3fd2ae5c05c00a456ec712dd3b7dc04a4e90ec9f /src/backend/access/transam/xlog.c
parenta39e78b710eb588e102aedd2828611d7bc74714b (diff)
downloadpostgresql-5fa6c81a43d047a070eff1f711b90c084c6d3e31.tar.gz
postgresql-5fa6c81a43d047a070eff1f711b90c084c6d3e31.zip
Remove num_xloginsert_locks GUC, replace with a #define
I left the GUC in place for the beta period, so that people could experiment with different values. No-one's come up with any data that a different value would be better under some circumstances, so rather than try to document to users what the GUC, let's just hard-code the current value, 8.
Diffstat (limited to 'src/backend/access/transam/xlog.c')
-rw-r--r--src/backend/access/transam/xlog.c34
1 files changed, 20 insertions, 14 deletions
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index 981d4e55274..5a4dbb9c532 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -91,13 +91,19 @@ int sync_method = DEFAULT_SYNC_METHOD;
int wal_level = WAL_LEVEL_MINIMAL;
int CommitDelay = 0; /* precommit delay in microseconds */
int CommitSiblings = 5; /* # concurrent xacts needed to sleep */
-int num_xloginsert_locks = 8;
#ifdef WAL_DEBUG
bool XLOG_DEBUG = false;
#endif
/*
+ * Number of WAL insertion locks to use. A higher value allows more insertions
+ * to happen concurrently, but adds some CPU overhead to flushing the WAL,
+ * which needs to iterate all the locks.
+ */
+#define NUM_XLOGINSERT_LOCKS 8
+
+/*
* XLOGfileslop is the maximum number of preallocated future XLOG segments.
* When we are done with an old XLOG segment file, we will recycle it as a
* future XLOG segment as long as there aren't already XLOGfileslop future
@@ -1095,9 +1101,9 @@ begin:;
* inserter acquires an insertion lock. In addition to just indicating that
* an insertion is in progress, the lock tells others how far the inserter
* has progressed. There is a small fixed number of insertion locks,
- * determined by the num_xloginsert_locks GUC. When an inserter crosses a
- * page boundary, it updates the value stored in the lock to the how far it
- * has inserted, to allow the previous buffer to be flushed.
+ * determined by NUM_XLOGINSERT_LOCKS. When an inserter crosses a page
+ * boundary, it updates the value stored in the lock to the how far it has
+ * inserted, to allow the previous buffer to be flushed.
*
* Holding onto an insertion lock also protects RedoRecPtr and
* fullPageWrites from changing until the insertion is finished.
@@ -1578,7 +1584,7 @@ WALInsertLockAcquire(void)
static int lockToTry = -1;
if (lockToTry == -1)
- lockToTry = MyProc->pgprocno % num_xloginsert_locks;
+ lockToTry = MyProc->pgprocno % NUM_XLOGINSERT_LOCKS;
MyLockNo = lockToTry;
/*
@@ -1598,7 +1604,7 @@ WALInsertLockAcquire(void)
* than locks, it still helps to distribute the inserters evenly
* across the locks.
*/
- lockToTry = (lockToTry + 1) % num_xloginsert_locks;
+ lockToTry = (lockToTry + 1) % NUM_XLOGINSERT_LOCKS;
}
}
@@ -1617,7 +1623,7 @@ WALInsertLockAcquireExclusive(void)
* than any real XLogRecPtr value, to make sure that no-one blocks waiting
* on those.
*/
- for (i = 0; i < num_xloginsert_locks - 1; i++)
+ for (i = 0; i < NUM_XLOGINSERT_LOCKS - 1; i++)
{
LWLockAcquireWithVar(&WALInsertLocks[i].l.lock,
&WALInsertLocks[i].l.insertingAt,
@@ -1640,7 +1646,7 @@ WALInsertLockRelease(void)
{
int i;
- for (i = 0; i < num_xloginsert_locks; i++)
+ for (i = 0; i < NUM_XLOGINSERT_LOCKS; i++)
LWLockRelease(&WALInsertLocks[i].l.lock);
holdingAllLocks = false;
@@ -1664,8 +1670,8 @@ WALInsertLockUpdateInsertingAt(XLogRecPtr insertingAt)
* We use the last lock to mark our actual position, see comments in
* WALInsertLockAcquireExclusive.
*/
- LWLockUpdateVar(&WALInsertLocks[num_xloginsert_locks - 1].l.lock,
- &WALInsertLocks[num_xloginsert_locks - 1].l.insertingAt,
+ LWLockUpdateVar(&WALInsertLocks[NUM_XLOGINSERT_LOCKS - 1].l.lock,
+ &WALInsertLocks[NUM_XLOGINSERT_LOCKS - 1].l.insertingAt,
insertingAt);
}
else
@@ -1732,7 +1738,7 @@ WaitXLogInsertionsToFinish(XLogRecPtr upto)
* out for any insertion that's still in progress.
*/
finishedUpto = reservedUpto;
- for (i = 0; i < num_xloginsert_locks; i++)
+ for (i = 0; i < NUM_XLOGINSERT_LOCKS; i++)
{
XLogRecPtr insertingat = InvalidXLogRecPtr;
@@ -4752,7 +4758,7 @@ XLOGShmemSize(void)
size = sizeof(XLogCtlData);
/* WAL insertion locks, plus alignment */
- size = add_size(size, mul_size(sizeof(WALInsertLockPadded), num_xloginsert_locks + 1));
+ size = add_size(size, mul_size(sizeof(WALInsertLockPadded), NUM_XLOGINSERT_LOCKS + 1));
/* xlblocks array */
size = add_size(size, mul_size(sizeof(XLogRecPtr), XLOGbuffers));
/* extra alignment padding for XLOG I/O buffers */
@@ -4829,7 +4835,7 @@ XLOGShmemInit(void)
((uintptr_t) allocptr) %sizeof(WALInsertLockPadded);
WALInsertLocks = XLogCtl->Insert.WALInsertLocks =
(WALInsertLockPadded *) allocptr;
- allocptr += sizeof(WALInsertLockPadded) * num_xloginsert_locks;
+ allocptr += sizeof(WALInsertLockPadded) * NUM_XLOGINSERT_LOCKS;
XLogCtl->Insert.WALInsertLockTrancheId = LWLockNewTrancheId();
@@ -4838,7 +4844,7 @@ XLOGShmemInit(void)
XLogCtl->Insert.WALInsertLockTranche.array_stride = sizeof(WALInsertLockPadded);
LWLockRegisterTranche(XLogCtl->Insert.WALInsertLockTrancheId, &XLogCtl->Insert.WALInsertLockTranche);
- for (i = 0; i < num_xloginsert_locks; i++)
+ for (i = 0; i < NUM_XLOGINSERT_LOCKS; i++)
{
LWLockInitialize(&WALInsertLocks[i].l.lock,
XLogCtl->Insert.WALInsertLockTrancheId);