CLOGShmemInit(void)
{
ClogCtl->PagePrecedes = CLOGPagePrecedes;
- SimpleLruInit(ClogCtl, "CLOG Ctl", CLOGShmemBuffers(), CLOG_LSNS_PER_PAGE,
+ SimpleLruInit(ClogCtl, "clog", CLOGShmemBuffers(), CLOG_LSNS_PER_PAGE,
CLogControlLock, "pg_clog");
}
bool found;
CommitTsCtl->PagePrecedes = CommitTsPagePrecedes;
- SimpleLruInit(CommitTsCtl, "CommitTs Ctl", CommitTsShmemBuffers(), 0,
+ SimpleLruInit(CommitTsCtl, "commit_timestamp", CommitTsShmemBuffers(), 0,
CommitTsControlLock, "pg_commit_ts");
commitTsShared = ShmemInitStruct("CommitTs shared",
MultiXactMemberCtl->PagePrecedes = MultiXactMemberPagePrecedes;
SimpleLruInit(MultiXactOffsetCtl,
- "MultiXactOffset Ctl", NUM_MXACTOFFSET_BUFFERS, 0,
+ "multixact_offset", NUM_MXACTOFFSET_BUFFERS, 0,
MultiXactOffsetControlLock, "pg_multixact/offsets");
SimpleLruInit(MultiXactMemberCtl,
- "MultiXactMember Ctl", NUM_MXACTMEMBER_BUFFERS, 0,
+ "multixact_member", NUM_MXACTMEMBER_BUFFERS, 0,
MultiXactMemberControlLock, "pg_multixact/members");
/* Initialize our shared state struct */
sz += MAXALIGN(nslots * sizeof(bool)); /* page_dirty[] */
sz += MAXALIGN(nslots * sizeof(int)); /* page_number[] */
sz += MAXALIGN(nslots * sizeof(int)); /* page_lru_count[] */
- sz += MAXALIGN(nslots * sizeof(LWLock *)); /* buffer_locks[] */
+ sz += MAXALIGN(nslots * sizeof(LWLockPadded)); /* buffer_locks[] */
if (nlsns > 0)
sz += MAXALIGN(nslots * nlsns * sizeof(XLogRecPtr)); /* group_lsn[] */
offset += MAXALIGN(nslots * sizeof(int));
shared->page_lru_count = (int *) (ptr + offset);
offset += MAXALIGN(nslots * sizeof(int));
- shared->buffer_locks = (LWLock **) (ptr + offset);
- offset += MAXALIGN(nslots * sizeof(LWLock *));
if (nlsns > 0)
{
offset += MAXALIGN(nslots * nlsns * sizeof(XLogRecPtr));
}
+ /* Initialize LWLocks */
+ shared->buffer_locks = (LWLockPadded *) ShmemAlloc(sizeof(LWLockPadded) * nslots);
+
+ Assert(strlen(name) + 1 < SLRU_MAX_NAME_LENGTH);
+ strlcpy(shared->lwlock_tranche_name, name, SLRU_MAX_NAME_LENGTH);
+ shared->lwlock_tranche_id = LWLockNewTrancheId();
+ shared->lwlock_tranche.name = shared->lwlock_tranche_name;
+ shared->lwlock_tranche.array_base = shared->buffer_locks;
+ shared->lwlock_tranche.array_stride = sizeof(LWLockPadded);
+
ptr += BUFFERALIGN(offset);
for (slotno = 0; slotno < nslots; slotno++)
{
+ LWLockInitialize(&shared->buffer_locks[slotno].lock,
+ shared->lwlock_tranche_id);
+
shared->page_buffer[slotno] = ptr;
shared->page_status[slotno] = SLRU_PAGE_EMPTY;
shared->page_dirty[slotno] = false;
shared->page_lru_count[slotno] = 0;
- shared->buffer_locks[slotno] = LWLockAssign();
ptr += BLCKSZ;
}
}
else
Assert(found);
+ /* Register SLRU tranche in the main tranches array */
+ LWLockRegisterTranche(shared->lwlock_tranche_id, &shared->lwlock_tranche);
+
/*
* Initialize the unshared control struct, including directory path. We
* assume caller set PagePrecedes.
/* See notes at top of file */
LWLockRelease(shared->ControlLock);
- LWLockAcquire(shared->buffer_locks[slotno], LW_SHARED);
- LWLockRelease(shared->buffer_locks[slotno]);
+ LWLockAcquire(&shared->buffer_locks[slotno].lock, LW_SHARED);
+ LWLockRelease(&shared->buffer_locks[slotno].lock);
LWLockAcquire(shared->ControlLock, LW_EXCLUSIVE);
/*
if (shared->page_status[slotno] == SLRU_PAGE_READ_IN_PROGRESS ||
shared->page_status[slotno] == SLRU_PAGE_WRITE_IN_PROGRESS)
{
- if (LWLockConditionalAcquire(shared->buffer_locks[slotno], LW_SHARED))
+ if (LWLockConditionalAcquire(&shared->buffer_locks[slotno].lock, LW_SHARED))
{
/* indeed, the I/O must have failed */
if (shared->page_status[slotno] == SLRU_PAGE_READ_IN_PROGRESS)
shared->page_status[slotno] = SLRU_PAGE_VALID;
shared->page_dirty[slotno] = true;
}
- LWLockRelease(shared->buffer_locks[slotno]);
+ LWLockRelease(&shared->buffer_locks[slotno].lock);
}
}
}
shared->page_dirty[slotno] = false;
/* Acquire per-buffer lock (cannot deadlock, see notes at top) */
- LWLockAcquire(shared->buffer_locks[slotno], LW_EXCLUSIVE);
+ LWLockAcquire(&shared->buffer_locks[slotno].lock, LW_EXCLUSIVE);
/* Release control lock while doing I/O */
LWLockRelease(shared->ControlLock);
shared->page_status[slotno] = ok ? SLRU_PAGE_VALID : SLRU_PAGE_EMPTY;
- LWLockRelease(shared->buffer_locks[slotno]);
+ LWLockRelease(&shared->buffer_locks[slotno].lock);
/* Now it's okay to ereport if we failed */
if (!ok)
shared->page_dirty[slotno] = false;
/* Acquire per-buffer lock (cannot deadlock, see notes at top) */
- LWLockAcquire(shared->buffer_locks[slotno], LW_EXCLUSIVE);
+ LWLockAcquire(&shared->buffer_locks[slotno].lock, LW_EXCLUSIVE);
/* Release control lock while doing I/O */
LWLockRelease(shared->ControlLock);
shared->page_status[slotno] = SLRU_PAGE_VALID;
- LWLockRelease(shared->buffer_locks[slotno]);
+ LWLockRelease(&shared->buffer_locks[slotno].lock);
/* Now it's okay to ereport if we failed */
if (!ok)
SUBTRANSShmemInit(void)
{
SubTransCtl->PagePrecedes = SubTransPagePrecedes;
- SimpleLruInit(SubTransCtl, "SUBTRANS Ctl", NUM_SUBTRANS_BUFFERS, 0,
+ SimpleLruInit(SubTransCtl, "subtrans", NUM_SUBTRANS_BUFFERS, 0,
SubtransControlLock, "pg_subtrans");
/* Override default assumption that writes should be fsync'd */
SubTransCtl->do_fsync = false;
* Set up SLRU management of the pg_notify data.
*/
AsyncCtl->PagePrecedes = asyncQueuePagePrecedes;
- SimpleLruInit(AsyncCtl, "Async Ctl", NUM_ASYNC_BUFFERS, 0,
+ SimpleLruInit(AsyncCtl, "async", NUM_ASYNC_BUFFERS, 0,
AsyncCtlLock, "pg_notify");
/* Override default assumption that writes should be fsync'd */
AsyncCtl->do_fsync = false;
*/
#include "postgres.h"
-#include "access/clog.h"
-#include "access/commit_ts.h"
-#include "access/multixact.h"
-#include "access/subtrans.h"
-#include "commands/async.h"
#include "miscadmin.h"
#include "pg_trace.h"
#include "postmaster/postmaster.h"
/* proc.c needs one for each backend or auxiliary process */
numLocks += MaxBackends + NUM_AUXILIARY_PROCS;
- /* clog.c needs one per CLOG buffer */
- numLocks += CLOGShmemBuffers();
-
- /* commit_ts.c needs one per CommitTs buffer */
- numLocks += CommitTsShmemBuffers();
-
- /* subtrans.c needs one per SubTrans buffer */
- numLocks += NUM_SUBTRANS_BUFFERS;
-
- /* multixact.c needs two SLRU areas */
- numLocks += NUM_MXACTOFFSET_BUFFERS + NUM_MXACTMEMBER_BUFFERS;
-
- /* async.c needs one per Async buffer */
- numLocks += NUM_ASYNC_BUFFERS;
-
- /* predicate.c needs one per old serializable xid buffer */
- numLocks += NUM_OLDSERXID_BUFFERS;
-
/* slot.c needs one for each slot */
numLocks += max_replication_slots;
* Set up SLRU management of the pg_serial data.
*/
OldSerXidSlruCtl->PagePrecedes = OldSerXidPagePrecedesLogically;
- SimpleLruInit(OldSerXidSlruCtl, "OldSerXid SLRU Ctl",
+ SimpleLruInit(OldSerXidSlruCtl, "oldserxid",
NUM_OLDSERXID_BUFFERS, 0, OldSerXidLock, "pg_serial");
/* Override default assumption that writes should be fsync'd */
OldSerXidSlruCtl->do_fsync = false;
*/
#define SLRU_PAGES_PER_SEGMENT 32
+/* Maximum length of an SLRU name */
+#define SLRU_MAX_NAME_LENGTH 32
+
/*
* Page status codes. Note that these do not include the "dirty" bit.
* page_dirty can be TRUE only in the VALID or WRITE_IN_PROGRESS states;
bool *page_dirty;
int *page_number;
int *page_lru_count;
- LWLock **buffer_locks;
/*
* Optional array of WAL flush LSNs associated with entries in the SLRU
* the latest page.
*/
int latest_page_number;
+
+ /* LWLocks */
+ int lwlock_tranche_id;
+ LWLockTranche lwlock_tranche;
+ char lwlock_tranche_name[SLRU_MAX_NAME_LENGTH];
+ LWLockPadded *buffer_locks;
} SlruSharedData;
typedef SlruSharedData *SlruShared;