aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/backend/access/transam/xlog.c34
-rw-r--r--src/backend/utils/misc/guc.c11
-rw-r--r--src/include/access/xlog.h1
3 files changed, 20 insertions, 26 deletions
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index 981d4e55274..5a4dbb9c532 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -91,13 +91,19 @@ int sync_method = DEFAULT_SYNC_METHOD;
int wal_level = WAL_LEVEL_MINIMAL;
int CommitDelay = 0; /* precommit delay in microseconds */
int CommitSiblings = 5; /* # concurrent xacts needed to sleep */
-int num_xloginsert_locks = 8;
#ifdef WAL_DEBUG
bool XLOG_DEBUG = false;
#endif
/*
+ * Number of WAL insertion locks to use. A higher value allows more insertions
+ * to happen concurrently, but adds some CPU overhead to flushing the WAL,
+ * which needs to iterate all the locks.
+ */
+#define NUM_XLOGINSERT_LOCKS 8
+
+/*
* XLOGfileslop is the maximum number of preallocated future XLOG segments.
* When we are done with an old XLOG segment file, we will recycle it as a
* future XLOG segment as long as there aren't already XLOGfileslop future
@@ -1095,9 +1101,9 @@ begin:;
* inserter acquires an insertion lock. In addition to just indicating that
* an insertion is in progress, the lock tells others how far the inserter
* has progressed. There is a small fixed number of insertion locks,
- * determined by the num_xloginsert_locks GUC. When an inserter crosses a
- * page boundary, it updates the value stored in the lock to the how far it
- * has inserted, to allow the previous buffer to be flushed.
+ * determined by NUM_XLOGINSERT_LOCKS. When an inserter crosses a page
+ * boundary, it updates the value stored in the lock to the how far it has
+ * inserted, to allow the previous buffer to be flushed.
*
* Holding onto an insertion lock also protects RedoRecPtr and
* fullPageWrites from changing until the insertion is finished.
@@ -1578,7 +1584,7 @@ WALInsertLockAcquire(void)
static int lockToTry = -1;
if (lockToTry == -1)
- lockToTry = MyProc->pgprocno % num_xloginsert_locks;
+ lockToTry = MyProc->pgprocno % NUM_XLOGINSERT_LOCKS;
MyLockNo = lockToTry;
/*
@@ -1598,7 +1604,7 @@ WALInsertLockAcquire(void)
* than locks, it still helps to distribute the inserters evenly
* across the locks.
*/
- lockToTry = (lockToTry + 1) % num_xloginsert_locks;
+ lockToTry = (lockToTry + 1) % NUM_XLOGINSERT_LOCKS;
}
}
@@ -1617,7 +1623,7 @@ WALInsertLockAcquireExclusive(void)
* than any real XLogRecPtr value, to make sure that no-one blocks waiting
* on those.
*/
- for (i = 0; i < num_xloginsert_locks - 1; i++)
+ for (i = 0; i < NUM_XLOGINSERT_LOCKS - 1; i++)
{
LWLockAcquireWithVar(&WALInsertLocks[i].l.lock,
&WALInsertLocks[i].l.insertingAt,
@@ -1640,7 +1646,7 @@ WALInsertLockRelease(void)
{
int i;
- for (i = 0; i < num_xloginsert_locks; i++)
+ for (i = 0; i < NUM_XLOGINSERT_LOCKS; i++)
LWLockRelease(&WALInsertLocks[i].l.lock);
holdingAllLocks = false;
@@ -1664,8 +1670,8 @@ WALInsertLockUpdateInsertingAt(XLogRecPtr insertingAt)
* We use the last lock to mark our actual position, see comments in
* WALInsertLockAcquireExclusive.
*/
- LWLockUpdateVar(&WALInsertLocks[num_xloginsert_locks - 1].l.lock,
- &WALInsertLocks[num_xloginsert_locks - 1].l.insertingAt,
+ LWLockUpdateVar(&WALInsertLocks[NUM_XLOGINSERT_LOCKS - 1].l.lock,
+ &WALInsertLocks[NUM_XLOGINSERT_LOCKS - 1].l.insertingAt,
insertingAt);
}
else
@@ -1732,7 +1738,7 @@ WaitXLogInsertionsToFinish(XLogRecPtr upto)
* out for any insertion that's still in progress.
*/
finishedUpto = reservedUpto;
- for (i = 0; i < num_xloginsert_locks; i++)
+ for (i = 0; i < NUM_XLOGINSERT_LOCKS; i++)
{
XLogRecPtr insertingat = InvalidXLogRecPtr;
@@ -4752,7 +4758,7 @@ XLOGShmemSize(void)
size = sizeof(XLogCtlData);
/* WAL insertion locks, plus alignment */
- size = add_size(size, mul_size(sizeof(WALInsertLockPadded), num_xloginsert_locks + 1));
+ size = add_size(size, mul_size(sizeof(WALInsertLockPadded), NUM_XLOGINSERT_LOCKS + 1));
/* xlblocks array */
size = add_size(size, mul_size(sizeof(XLogRecPtr), XLOGbuffers));
/* extra alignment padding for XLOG I/O buffers */
@@ -4829,7 +4835,7 @@ XLOGShmemInit(void)
((uintptr_t) allocptr) %sizeof(WALInsertLockPadded);
WALInsertLocks = XLogCtl->Insert.WALInsertLocks =
(WALInsertLockPadded *) allocptr;
- allocptr += sizeof(WALInsertLockPadded) * num_xloginsert_locks;
+ allocptr += sizeof(WALInsertLockPadded) * NUM_XLOGINSERT_LOCKS;
XLogCtl->Insert.WALInsertLockTrancheId = LWLockNewTrancheId();
@@ -4838,7 +4844,7 @@ XLOGShmemInit(void)
XLogCtl->Insert.WALInsertLockTranche.array_stride = sizeof(WALInsertLockPadded);
LWLockRegisterTranche(XLogCtl->Insert.WALInsertLockTrancheId, &XLogCtl->Insert.WALInsertLockTranche);
- for (i = 0; i < num_xloginsert_locks; i++)
+ for (i = 0; i < NUM_XLOGINSERT_LOCKS; i++)
{
LWLockInitialize(&WALInsertLocks[i].l.lock,
XLogCtl->Insert.WALInsertLockTrancheId);
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index d2083142589..8111b93074a 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -2144,17 +2144,6 @@ static struct config_int ConfigureNamesInt[] =
},
{
- {"xloginsert_locks", PGC_POSTMASTER, WAL_SETTINGS,
- gettext_noop("Sets the number of locks used for concurrent xlog insertions."),
- NULL,
- GUC_NOT_IN_SAMPLE
- },
- &num_xloginsert_locks,
- 8, 1, 1000,
- NULL, NULL, NULL
- },
-
- {
/* see max_connections */
{"max_wal_senders", PGC_POSTMASTER, REPLICATION_SENDING,
gettext_noop("Sets the maximum number of simultaneously running WAL sender processes."),
diff --git a/src/include/access/xlog.h b/src/include/access/xlog.h
index 0b7bfa5f30e..0f068d91f38 100644
--- a/src/include/access/xlog.h
+++ b/src/include/access/xlog.h
@@ -194,7 +194,6 @@ extern bool EnableHotStandby;
extern bool fullPageWrites;
extern bool wal_log_hints;
extern bool log_checkpoints;
-extern int num_xloginsert_locks;
/* WAL levels */
typedef enum WalLevel