int wal_level = WAL_LEVEL_MINIMAL;
int CommitDelay = 0; /* precommit delay in microseconds */
int CommitSiblings = 5; /* # concurrent xacts needed to sleep */
-int num_xloginsert_locks = 8;
#ifdef WAL_DEBUG
bool XLOG_DEBUG = false;
#endif
+/*
+ * Number of WAL insertion locks to use. A higher value allows more insertions
+ * to happen concurrently, but adds some CPU overhead to flushing the WAL,
+ * which needs to iterate all the locks.
+ */
+#define NUM_XLOGINSERT_LOCKS 8
+
/*
* XLOGfileslop is the maximum number of preallocated future XLOG segments.
* When we are done with an old XLOG segment file, we will recycle it as a
* inserter acquires an insertion lock. In addition to just indicating that
* an insertion is in progress, the lock tells others how far the inserter
* has progressed. There is a small fixed number of insertion locks,
- * determined by the num_xloginsert_locks GUC. When an inserter crosses a
- * page boundary, it updates the value stored in the lock to the how far it
- * has inserted, to allow the previous buffer to be flushed.
+ * determined by NUM_XLOGINSERT_LOCKS. When an inserter crosses a page
+ * boundary, it updates the value stored in the lock to the how far it has
+ * inserted, to allow the previous buffer to be flushed.
*
* Holding onto an insertion lock also protects RedoRecPtr and
* fullPageWrites from changing until the insertion is finished.
static int lockToTry = -1;
if (lockToTry == -1)
- lockToTry = MyProc->pgprocno % num_xloginsert_locks;
+ lockToTry = MyProc->pgprocno % NUM_XLOGINSERT_LOCKS;
MyLockNo = lockToTry;
/*
* than locks, it still helps to distribute the inserters evenly
* across the locks.
*/
- lockToTry = (lockToTry + 1) % num_xloginsert_locks;
+ lockToTry = (lockToTry + 1) % NUM_XLOGINSERT_LOCKS;
}
}
* than any real XLogRecPtr value, to make sure that no-one blocks waiting
* on those.
*/
- for (i = 0; i < num_xloginsert_locks - 1; i++)
+ for (i = 0; i < NUM_XLOGINSERT_LOCKS - 1; i++)
{
LWLockAcquireWithVar(&WALInsertLocks[i].l.lock,
&WALInsertLocks[i].l.insertingAt,
{
int i;
- for (i = 0; i < num_xloginsert_locks; i++)
+ for (i = 0; i < NUM_XLOGINSERT_LOCKS; i++)
LWLockRelease(&WALInsertLocks[i].l.lock);
holdingAllLocks = false;
* We use the last lock to mark our actual position, see comments in
* WALInsertLockAcquireExclusive.
*/
- LWLockUpdateVar(&WALInsertLocks[num_xloginsert_locks - 1].l.lock,
- &WALInsertLocks[num_xloginsert_locks - 1].l.insertingAt,
+ LWLockUpdateVar(&WALInsertLocks[NUM_XLOGINSERT_LOCKS - 1].l.lock,
+ &WALInsertLocks[NUM_XLOGINSERT_LOCKS - 1].l.insertingAt,
insertingAt);
}
else
* out for any insertion that's still in progress.
*/
finishedUpto = reservedUpto;
- for (i = 0; i < num_xloginsert_locks; i++)
+ for (i = 0; i < NUM_XLOGINSERT_LOCKS; i++)
{
XLogRecPtr insertingat = InvalidXLogRecPtr;
size = sizeof(XLogCtlData);
/* WAL insertion locks, plus alignment */
- size = add_size(size, mul_size(sizeof(WALInsertLockPadded), num_xloginsert_locks + 1));
+ size = add_size(size, mul_size(sizeof(WALInsertLockPadded), NUM_XLOGINSERT_LOCKS + 1));
/* xlblocks array */
size = add_size(size, mul_size(sizeof(XLogRecPtr), XLOGbuffers));
/* extra alignment padding for XLOG I/O buffers */
((uintptr_t) allocptr) %sizeof(WALInsertLockPadded);
WALInsertLocks = XLogCtl->Insert.WALInsertLocks =
(WALInsertLockPadded *) allocptr;
- allocptr += sizeof(WALInsertLockPadded) * num_xloginsert_locks;
+ allocptr += sizeof(WALInsertLockPadded) * NUM_XLOGINSERT_LOCKS;
XLogCtl->Insert.WALInsertLockTrancheId = LWLockNewTrancheId();
XLogCtl->Insert.WALInsertLockTranche.array_stride = sizeof(WALInsertLockPadded);
LWLockRegisterTranche(XLogCtl->Insert.WALInsertLockTrancheId, &XLogCtl->Insert.WALInsertLockTranche);
- for (i = 0; i < num_xloginsert_locks; i++)
+ for (i = 0; i < NUM_XLOGINSERT_LOCKS; i++)
{
LWLockInitialize(&WALInsertLocks[i].l.lock,
XLogCtl->Insert.WALInsertLockTrancheId);