/// Memory block abstraction.
class MemoryBlock {
public:
- MemoryBlock() : Address(nullptr), Size(0) { }
- MemoryBlock(void *addr, size_t size) : Address(addr), Size(size) { }
+ MemoryBlock() : Address(nullptr), AllocatedSize(0) {}
+ MemoryBlock(void *addr, size_t allocatedSize)
+ : Address(addr), AllocatedSize(allocatedSize) {}
void *base() const { return Address; }
- size_t size() const { return Size; }
-
+ /// The size as it was allocated. This is always greater or equal to the
+ /// size that was originally requested.
+ size_t allocatedSize() const { return AllocatedSize; }
+
private:
void *Address; ///< Address of first byte of memory area
- size_t Size; ///< Size, in bytes of the memory area
+ size_t AllocatedSize; ///< Size, in bytes of the memory area
unsigned Flags = 0;
friend class Memory;
};
Memory::releaseMappedMemory(M);
}
void *base() const { return M.base(); }
- size_t size() const { return M.size(); }
+ /// The size as it was allocated. This is always greater or equal to the
+ /// size that was originally requested.
+ size_t allocatedSize() const { return M.allocatedSize(); }
MemoryBlock getMemoryBlock() const { return M; }
private:
MemoryBlock M;
MutableArrayRef<char> getWorkingMemory(ProtectionFlags Seg) override {
assert(SegBlocks.count(Seg) && "No allocation for segment");
return {static_cast<char *>(SegBlocks[Seg].base()),
- SegBlocks[Seg].size()};
+ SegBlocks[Seg].allocatedSize()};
}
JITTargetAddress getTargetMemory(ProtectionFlags Seg) override {
assert(SegBlocks.count(Seg) && "No allocation for segment");
if (auto EC = sys::Memory::protectMappedMemory(Block, Prot))
return errorCodeToError(EC);
if (Prot & sys::Memory::MF_EXEC)
- sys::Memory::InvalidateInstructionCache(Block.base(), Block.size());
+ sys::Memory::InvalidateInstructionCache(Block.base(),
+ Block.allocatedSize());
}
return Error::success();
}
// Look in the list of free memory regions and use a block there if one
// is available.
for (FreeMemBlock &FreeMB : MemGroup.FreeMem) {
- if (FreeMB.Free.size() >= RequiredSize) {
+ if (FreeMB.Free.allocatedSize() >= RequiredSize) {
Addr = (uintptr_t)FreeMB.Free.base();
- uintptr_t EndOfBlock = Addr + FreeMB.Free.size();
+ uintptr_t EndOfBlock = Addr + FreeMB.Free.allocatedSize();
// Align the address.
Addr = (Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1);
// Remember that we allocated this memory
MemGroup.AllocatedMem.push_back(MB);
Addr = (uintptr_t)MB.base();
- uintptr_t EndOfBlock = Addr + MB.size();
+ uintptr_t EndOfBlock = Addr + MB.allocatedSize();
// Align the address.
Addr = (Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1);
size_t StartOverlap =
(PageSize - ((uintptr_t)M.base() % PageSize)) % PageSize;
- size_t TrimmedSize = M.size();
+ size_t TrimmedSize = M.allocatedSize();
TrimmedSize -= StartOverlap;
TrimmedSize -= TrimmedSize % PageSize;
TrimmedSize);
assert(((uintptr_t)Trimmed.base() % PageSize) == 0);
- assert((Trimmed.size() % PageSize) == 0);
- assert(M.base() <= Trimmed.base() && Trimmed.size() <= M.size());
+ assert((Trimmed.allocatedSize() % PageSize) == 0);
+ assert(M.base() <= Trimmed.base() &&
+ Trimmed.allocatedSize() <= M.allocatedSize());
return Trimmed;
}
}
// Remove all blocks which are now empty
- MemGroup.FreeMem.erase(
- remove_if(MemGroup.FreeMem,
- [](FreeMemBlock &FreeMB) { return FreeMB.Free.size() == 0; }),
- MemGroup.FreeMem.end());
+ MemGroup.FreeMem.erase(remove_if(MemGroup.FreeMem,
+ [](FreeMemBlock &FreeMB) {
+ return FreeMB.Free.allocatedSize() == 0;
+ }),
+ MemGroup.FreeMem.end());
return std::error_code();
}
void SectionMemoryManager::invalidateInstructionCache() {
for (sys::MemoryBlock &Block : CodeMem.PendingMem)
- sys::Memory::InvalidateInstructionCache(Block.base(), Block.size());
+ sys::Memory::InvalidateInstructionCache(Block.base(),
+ Block.allocatedSize());
}
SectionMemoryManager::~SectionMemoryManager() {
allocateMappedMemory(SectionMemoryManager::AllocationPurpose Purpose,
size_t NumBytes, const sys::MemoryBlock *const NearBlock,
unsigned Flags, std::error_code &EC) override {
- // allocateMappedMemory calls mmap(2). We round up a request size
- // to page size to get extra space for free.
- static const size_t PageSize = sys::Process::getPageSizeEstimate();
- size_t ReqBytes = (NumBytes + PageSize - 1) & ~(PageSize - 1);
- return sys::Memory::allocateMappedMemory(ReqBytes, NearBlock, Flags, EC);
+ return sys::Memory::allocateMappedMemory(NumBytes, NearBlock, Flags, EC);
}
std::error_code protectMappedMemory(const sys::MemoryBlock &Block,
// output file on commit(). This is used only when we cannot use OnDiskBuffer.
class InMemoryBuffer : public FileOutputBuffer {
public:
- InMemoryBuffer(StringRef Path, MemoryBlock Buf, unsigned Mode)
- : FileOutputBuffer(Path), Buffer(Buf), Mode(Mode) {}
+ InMemoryBuffer(StringRef Path, MemoryBlock Buf, std::size_t BufSize,
+ unsigned Mode)
+ : FileOutputBuffer(Path), Buffer(Buf), BufferSize(BufSize),
+ Mode(Mode) {}
uint8_t *getBufferStart() const override { return (uint8_t *)Buffer.base(); }
uint8_t *getBufferEnd() const override {
- return (uint8_t *)Buffer.base() + Buffer.size();
+ return (uint8_t *)Buffer.base() + BufferSize;
}
- size_t getBufferSize() const override { return Buffer.size(); }
+ size_t getBufferSize() const override { return BufferSize; }
Error commit() override {
if (FinalPath == "-") {
- llvm::outs() << StringRef((const char *)Buffer.base(), Buffer.size());
+ llvm::outs() << StringRef((const char *)Buffer.base(), BufferSize);
llvm::outs().flush();
return Error::success();
}
openFileForWrite(FinalPath, FD, CD_CreateAlways, OF_None, Mode))
return errorCodeToError(EC);
raw_fd_ostream OS(FD, /*shouldClose=*/true, /*unbuffered=*/true);
- OS << StringRef((const char *)Buffer.base(), Buffer.size());
+ OS << StringRef((const char *)Buffer.base(), BufferSize);
return Error::success();
}
private:
+ // Buffer may actually contain a larger memory block than BufferSize
OwningMemoryBlock Buffer;
+ size_t BufferSize;
unsigned Mode;
};
} // namespace
Size, nullptr, sys::Memory::MF_READ | sys::Memory::MF_WRITE, EC);
if (EC)
return errorCodeToError(EC);
- return llvm::make_unique<InMemoryBuffer>(Path, MB, Mode);
+ return llvm::make_unique<InMemoryBuffer>(Path, MB, Size, Mode);
}
static Expected<std::unique_ptr<FileOutputBuffer>>
raw_ostream &operator<<(raw_ostream &OS, const MemoryBlock &MB) {
return OS << "[ " << MB.base() << " .. "
- << (void *)((char *)MB.base() + MB.size()) << " ] (" << MB.size()
- << " bytes)";
+ << (void *)((char *)MB.base() + MB.allocatedSize()) << " ] ("
+ << MB.allocatedSize() << " bytes)";
}
} // end namespace sys
// Use any near hint and the page size to set a page-aligned starting address
uintptr_t Start = NearBlock ? reinterpret_cast<uintptr_t>(NearBlock->base()) +
- NearBlock->size() : 0;
+ NearBlock->allocatedSize() : 0;
static const size_t PageSize = Process::getPageSizeEstimate();
+ const size_t NumPages = (NumBytes+PageSize-1)/PageSize;
+
if (Start && Start % PageSize)
Start += PageSize - Start % PageSize;
// FIXME: Handle huge page requests (MF_HUGE_HINT).
- void *Addr = ::mmap(reinterpret_cast<void *>(Start), NumBytes, Protect,
+ void *Addr = ::mmap(reinterpret_cast<void *>(Start), PageSize*NumPages, Protect,
MMFlags, fd, 0);
if (Addr == MAP_FAILED) {
if (NearBlock) { //Try again without a near hint
MemoryBlock Result;
Result.Address = Addr;
- Result.Size = NumBytes;
+ Result.AllocatedSize = PageSize*NumPages;
Result.Flags = PFlags;
// Rely on protectMappedMemory to invalidate instruction cache.
std::error_code
Memory::releaseMappedMemory(MemoryBlock &M) {
- if (M.Address == nullptr || M.Size == 0)
+ if (M.Address == nullptr || M.AllocatedSize == 0)
return std::error_code();
- if (0 != ::munmap(M.Address, M.Size))
+ if (0 != ::munmap(M.Address, M.AllocatedSize))
return std::error_code(errno, std::generic_category());
M.Address = nullptr;
- M.Size = 0;
+ M.AllocatedSize = 0;
return std::error_code();
}
std::error_code
Memory::protectMappedMemory(const MemoryBlock &M, unsigned Flags) {
static const size_t PageSize = Process::getPageSizeEstimate();
- if (M.Address == nullptr || M.Size == 0)
+ if (M.Address == nullptr || M.AllocatedSize == 0)
return std::error_code();
if (!Flags)
int Protect = getPosixProtectionFlags(Flags);
uintptr_t Start = alignAddr((uint8_t *)M.Address - PageSize + 1, PageSize);
- uintptr_t End = alignAddr((uint8_t *)M.Address + M.Size, PageSize);
+ uintptr_t End = alignAddr((uint8_t *)M.Address + M.AllocatedSize, PageSize);
bool InvalidateCache = (Flags & MF_EXEC);
if (Result != 0)
return std::error_code(errno, std::generic_category());
- Memory::InvalidateInstructionCache(M.Address, M.Size);
+ Memory::InvalidateInstructionCache(M.Address, M.AllocatedSize);
InvalidateCache = false;
}
#endif
return std::error_code(errno, std::generic_category());
if (InvalidateCache)
- Memory::InvalidateInstructionCache(M.Address, M.Size);
+ Memory::InvalidateInstructionCache(M.Address, M.AllocatedSize);
return std::error_code();
}
size_t NumBlocks = (NumBytes + Granularity - 1) / Granularity;
uintptr_t Start = NearBlock ? reinterpret_cast<uintptr_t>(NearBlock->base()) +
- NearBlock->size()
+ NearBlock->allocatedSize()
: 0;
// If the requested address is not aligned to the allocation granularity,
MemoryBlock Result;
Result.Address = PA;
- Result.Size = NumBytes;
+ Result.AllocatedSize = AllocSize;
Result.Flags = (Flags & ~MF_HUGE_HINT) | (HugePages ? MF_HUGE_HINT : 0);
if (Flags & MF_EXEC)
}
std::error_code Memory::releaseMappedMemory(MemoryBlock &M) {
- if (M.Address == 0 || M.Size == 0)
+ if (M.Address == 0 || M.AllocatedSize == 0)
return std::error_code();
if (!VirtualFree(M.Address, 0, MEM_RELEASE))
return mapWindowsError(::GetLastError());
M.Address = 0;
- M.Size = 0;
+ M.AllocatedSize = 0;
return std::error_code();
}
std::error_code Memory::protectMappedMemory(const MemoryBlock &M,
unsigned Flags) {
- if (M.Address == 0 || M.Size == 0)
+ if (M.Address == 0 || M.AllocatedSize == 0)
return std::error_code();
DWORD Protect = getWindowsProtectionFlags(Flags);
DWORD OldFlags;
- if (!VirtualProtect(M.Address, M.Size, Protect, &OldFlags))
+ if (!VirtualProtect(M.Address, M.AllocatedSize, Protect, &OldFlags))
return mapWindowsError(::GetLastError());
if (Flags & MF_EXEC)
- Memory::InvalidateInstructionCache(M.Address, M.Size);
+ Memory::InvalidateInstructionCache(M.Address, M.AllocatedSize);
return std::error_code();
}
// reason (e.g. zero byte COFF sections). Don't include those sections in
// the allocation map.
if (LoadAddr != 0)
- AlreadyAllocated[LoadAddr] = (*Tmp)->MB.size();
+ AlreadyAllocated[LoadAddr] = (*Tmp)->MB.allocatedSize();
Worklist.erase(Tmp);
}
}
uint64_t NextSectionAddr = TargetAddrStart;
for (const auto &Alloc : AlreadyAllocated)
- if (NextSectionAddr + CurEntry->MB.size() + TargetSectionSep <= Alloc.first)
+ if (NextSectionAddr + CurEntry->MB.allocatedSize() + TargetSectionSep <=
+ Alloc.first)
break;
else
NextSectionAddr = Alloc.first + Alloc.second + TargetSectionSep;
Dyld.mapSectionAddress(CurEntry->MB.base(), NextSectionAddr);
- AlreadyAllocated[NextSectionAddr] = CurEntry->MB.size();
+ AlreadyAllocated[NextSectionAddr] = CurEntry->MB.allocatedSize();
}
// Add dummy symbols to the memory manager.
return true;
if (M1.base() > M2.base())
- return (unsigned char *)M2.base() + M2.size() > M1.base();
+ return (unsigned char *)M2.base() + M2.allocatedSize() > M1.base();
- return (unsigned char *)M1.base() + M1.size() > M2.base();
+ return (unsigned char *)M1.base() + M1.allocatedSize() > M2.base();
}
unsigned Flags;
EXPECT_EQ(std::error_code(), EC);
EXPECT_NE((void*)nullptr, M1.base());
- EXPECT_LE(sizeof(int), M1.size());
+ EXPECT_LE(sizeof(int), M1.allocatedSize());
EXPECT_FALSE(Memory::releaseMappedMemory(M1));
}
// returned, if large pages aren't available.
EXPECT_NE((void *)nullptr, M1.base());
- EXPECT_LE(sizeof(int), M1.size());
+ EXPECT_LE(sizeof(int), M1.allocatedSize());
EXPECT_FALSE(Memory::releaseMappedMemory(M1));
}
EXPECT_EQ(std::error_code(), EC);
EXPECT_NE((void*)nullptr, M1.base());
- EXPECT_LE(16U, M1.size());
+ EXPECT_LE(16U, M1.allocatedSize());
EXPECT_NE((void*)nullptr, M2.base());
- EXPECT_LE(64U, M2.size());
+ EXPECT_LE(64U, M2.allocatedSize());
EXPECT_NE((void*)nullptr, M3.base());
- EXPECT_LE(32U, M3.size());
+ EXPECT_LE(32U, M3.allocatedSize());
EXPECT_FALSE(doesOverlap(M1, M2));
EXPECT_FALSE(doesOverlap(M2, M3));
MemoryBlock M4 = Memory::allocateMappedMemory(16, nullptr, Flags, EC);
EXPECT_EQ(std::error_code(), EC);
EXPECT_NE((void*)nullptr, M4.base());
- EXPECT_LE(16U, M4.size());
+ EXPECT_LE(16U, M4.allocatedSize());
EXPECT_FALSE(Memory::releaseMappedMemory(M4));
EXPECT_FALSE(Memory::releaseMappedMemory(M2));
}
EXPECT_EQ(std::error_code(), EC);
EXPECT_NE((void*)nullptr, M1.base());
- EXPECT_LE(sizeof(int), M1.size());
+ EXPECT_LE(sizeof(int), M1.allocatedSize());
int *a = (int*)M1.base();
*a = 1;
EXPECT_FALSE(doesOverlap(M1, M3));
EXPECT_NE((void*)nullptr, M1.base());
- EXPECT_LE(1U * sizeof(int), M1.size());
+ EXPECT_LE(1U * sizeof(int), M1.allocatedSize());
EXPECT_NE((void*)nullptr, M2.base());
- EXPECT_LE(8U * sizeof(int), M2.size());
+ EXPECT_LE(8U * sizeof(int), M2.allocatedSize());
EXPECT_NE((void*)nullptr, M3.base());
- EXPECT_LE(4U * sizeof(int), M3.size());
+ EXPECT_LE(4U * sizeof(int), M3.allocatedSize());
int *x = (int*)M1.base();
*x = 1;
Flags, EC);
EXPECT_EQ(std::error_code(), EC);
EXPECT_NE((void*)nullptr, M4.base());
- EXPECT_LE(64U * sizeof(int), M4.size());
+ EXPECT_LE(64U * sizeof(int), M4.allocatedSize());
x = (int*)M4.base();
*x = 4;
EXPECT_EQ(4, *x);
EXPECT_EQ(std::error_code(), EC);
EXPECT_NE((void*)nullptr, M1.base());
- EXPECT_LE(2U * sizeof(int), M1.size());
+ EXPECT_LE(2U * sizeof(int), M1.allocatedSize());
EXPECT_NE((void*)nullptr, M2.base());
- EXPECT_LE(8U * sizeof(int), M2.size());
+ EXPECT_LE(8U * sizeof(int), M2.allocatedSize());
EXPECT_NE((void*)nullptr, M3.base());
- EXPECT_LE(4U * sizeof(int), M3.size());
+ EXPECT_LE(4U * sizeof(int), M3.allocatedSize());
EXPECT_FALSE(Memory::protectMappedMemory(M1, getTestableEquivalent(Flags)));
EXPECT_FALSE(Memory::protectMappedMemory(M2, getTestableEquivalent(Flags)));
MemoryBlock M4 = Memory::allocateMappedMemory(16, nullptr, Flags, EC);
EXPECT_EQ(std::error_code(), EC);
EXPECT_NE((void*)nullptr, M4.base());
- EXPECT_LE(16U, M4.size());
+ EXPECT_LE(16U, M4.allocatedSize());
EXPECT_EQ(std::error_code(),
Memory::protectMappedMemory(M4, getTestableEquivalent(Flags)));
x = (int*)M4.base();
EXPECT_EQ(std::error_code(), EC);
EXPECT_NE((void*)nullptr, M1.base());
- EXPECT_LE(16U, M1.size());
+ EXPECT_LE(16U, M1.allocatedSize());
EXPECT_NE((void*)nullptr, M2.base());
- EXPECT_LE(64U, M2.size());
+ EXPECT_LE(64U, M2.allocatedSize());
EXPECT_NE((void*)nullptr, M3.base());
- EXPECT_LE(32U, M3.size());
+ EXPECT_LE(32U, M3.allocatedSize());
EXPECT_FALSE(doesOverlap(M1, M2));
EXPECT_FALSE(doesOverlap(M2, M3));
EXPECT_EQ(std::error_code(), EC);
EXPECT_NE((void*)nullptr, M1.base());
- EXPECT_LE(16U, M1.size());
+ EXPECT_LE(16U, M1.allocatedSize());
EXPECT_NE((void*)nullptr, M2.base());
- EXPECT_LE(64U, M2.size());
+ EXPECT_LE(64U, M2.allocatedSize());
EXPECT_NE((void*)nullptr, M3.base());
- EXPECT_LE(32U, M3.size());
+ EXPECT_LE(32U, M3.allocatedSize());
EXPECT_FALSE(Memory::releaseMappedMemory(M1));
EXPECT_FALSE(Memory::releaseMappedMemory(M3));
EXPECT_EQ(std::error_code(), EC);
EXPECT_NE((void*)nullptr, M1.base());
- EXPECT_LE(16U, M1.size());
+ EXPECT_LE(16U, M1.allocatedSize());
EXPECT_NE((void*)nullptr, M2.base());
- EXPECT_LE(64U, M2.size());
+ EXPECT_LE(64U, M2.allocatedSize());
EXPECT_NE((void*)nullptr, M3.base());
- EXPECT_LE(32U, M3.size());
+ EXPECT_LE(32U, M3.allocatedSize());
EXPECT_FALSE(doesOverlap(M1, M2));
EXPECT_FALSE(doesOverlap(M2, M3));
EXPECT_EQ(std::error_code(), EC);
EXPECT_NE((void*)nullptr, M1.base());
- EXPECT_LE(16U, M1.size());
+ EXPECT_LE(16U, M1.allocatedSize());
EXPECT_NE((void*)nullptr, M2.base());
- EXPECT_LE(64U, M2.size());
+ EXPECT_LE(64U, M2.allocatedSize());
EXPECT_NE((void*)nullptr, M3.base());
- EXPECT_LE(32U, M3.size());
+ EXPECT_LE(32U, M3.allocatedSize());
EXPECT_FALSE(doesOverlap(M1, M2));
EXPECT_FALSE(doesOverlap(M2, M3));
EXPECT_EQ(std::error_code(), EC);
EXPECT_NE((void*)nullptr, M1.base());
- EXPECT_LE(sizeof(int), M1.size());
+ EXPECT_LE(sizeof(int), M1.allocatedSize());
EXPECT_FALSE(Memory::releaseMappedMemory(M1));
}