From 9f84280ae94b43b75dcf32aef433545335e7bb16 Mon Sep 17 00:00:00 2001 From: Robert Haas Date: Thu, 21 Apr 2016 13:24:09 -0400 Subject: [PATCH] Fix assorted defects in 09adc9a8c09c9640de05c7023b27fb83c761e91c. That commit increased all shared memory allocations to the next higher multiple of PG_CACHE_LINE_SIZE, but it didn't ensure that allocation started on a cache line boundary. It also failed to remove a couple other pieces of now-useless code. BUFFERALIGN() is perhaps obsolete at this point, and likely should be removed at some point, too, but that seems like it can be left to a future cleanup. Mistakes all pointed out by Andres Freund. The patch is mine, with a few extra assertions which I adopted from his version of this fix. --- src/backend/storage/buffer/buf_init.c | 15 ++++++--------- src/backend/storage/ipc/shmem.c | 15 +++++++++++---- 2 files changed, 17 insertions(+), 13 deletions(-) diff --git a/src/backend/storage/buffer/buf_init.c b/src/backend/storage/buffer/buf_init.c index a5cffc7896..5804870ad4 100644 --- a/src/backend/storage/buffer/buf_init.c +++ b/src/backend/storage/buffer/buf_init.c @@ -76,11 +76,9 @@ InitBufferPool(void) /* Align descriptors to a cacheline boundary. */ BufferDescriptors = (BufferDescPadded *) - CACHELINEALIGN( - ShmemInitStruct("Buffer Descriptors", - NBuffers * sizeof(BufferDescPadded) - + PG_CACHE_LINE_SIZE, - &foundDescs)); + ShmemInitStruct("Buffer Descriptors", + NBuffers * sizeof(BufferDescPadded), + &foundDescs); BufferBlocks = (char *) ShmemInitStruct("Buffer Blocks", @@ -88,10 +86,9 @@ InitBufferPool(void) /* Align lwlocks to cacheline boundary */ BufferIOLWLockArray = (LWLockMinimallyPadded *) - CACHELINEALIGN(ShmemInitStruct("Buffer IO Locks", - NBuffers * (Size) sizeof(LWLockMinimallyPadded) - + PG_CACHE_LINE_SIZE, - &foundIOLocks)); + ShmemInitStruct("Buffer IO Locks", + NBuffers * (Size) sizeof(LWLockMinimallyPadded), + &foundIOLocks); BufferIOLWLockTranche.name = "buffer_io"; BufferIOLWLockTranche.array_base = BufferIOLWLockArray; diff --git a/src/backend/storage/ipc/shmem.c b/src/backend/storage/ipc/shmem.c index 1ad68cda4e..1efe0201a7 100644 --- a/src/backend/storage/ipc/shmem.c +++ b/src/backend/storage/ipc/shmem.c @@ -112,6 +112,7 @@ void InitShmemAllocation(void) { PGShmemHeader *shmhdr = ShmemSegHdr; + char *aligned; Assert(shmhdr != NULL); @@ -139,6 +140,11 @@ InitShmemAllocation(void) shmhdr->freeoffset += MAXALIGN(sizeof(slock_t)); Assert(shmhdr->freeoffset <= shmhdr->totalsize); + /* Make sure the first allocation begins on a cache line boundary. */ + aligned = (char *) + (CACHELINEALIGN((((char *) shmhdr) + shmhdr->freeoffset))); + shmhdr->freeoffset = aligned - (char *) shmhdr; + SpinLockInit(ShmemLock); /* ShmemIndex can't be set up yet (need LWLocks first) */ @@ -189,10 +195,6 @@ ShmemAlloc(Size size) newStart = ShmemSegHdr->freeoffset; - /* extra alignment for large requests, since they are probably buffers */ - if (size >= BLCKSZ) - newStart = BUFFERALIGN(newStart); - newFree = newStart + size; if (newFree <= ShmemSegHdr->totalsize) { @@ -209,6 +211,8 @@ ShmemAlloc(Size size) (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of shared memory"))); + Assert(newSpace == (void *) CACHELINEALIGN(newSpace)); + return newSpace; } @@ -425,6 +429,9 @@ ShmemInitStruct(const char *name, Size size, bool *foundPtr) LWLockRelease(ShmemIndexLock); Assert(ShmemAddrIsValid(structPtr)); + + Assert(structPtr == (void *) CACHELINEALIGN(structPtr)); + return structPtr; } -- 2.40.0