From: Eli Friedman Date: Thu, 24 Jan 2019 21:31:13 +0000 (+0000) Subject: [Analysis] Fix isSafeToLoadUnconditionally handling of volatile. X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=ce8f6d38a5ffea045954e3922ad539ef69f2bd2c;p=llvm [Analysis] Fix isSafeToLoadUnconditionally handling of volatile. A volatile operation cannot be used to prove an address points to normal memory. (LangRef was recently updated to state it explicitly.) Differential Revision: https://reviews.llvm.org/D57040 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@352109 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Analysis/Loads.cpp b/lib/Analysis/Loads.cpp index ba4f759a17b..7da9bd718a5 100644 --- a/lib/Analysis/Loads.cpp +++ b/lib/Analysis/Loads.cpp @@ -280,9 +280,17 @@ bool llvm::isSafeToLoadUnconditionally(Value *V, unsigned Align, Value *AccessedPtr; unsigned AccessedAlign; if (LoadInst *LI = dyn_cast(BBI)) { + // Ignore volatile loads. The execution of a volatile load cannot + // be used to prove an address is backed by regular memory; it can, + // for example, point to an MMIO register. + if (LI->isVolatile()) + continue; AccessedPtr = LI->getPointerOperand(); AccessedAlign = LI->getAlignment(); } else if (StoreInst *SI = dyn_cast(BBI)) { + // Ignore volatile stores (see comment for loads). + if (SI->isVolatile()) + continue; AccessedPtr = SI->getPointerOperand(); AccessedAlign = SI->getAlignment(); } else diff --git a/test/Transforms/SROA/phi-and-select.ll b/test/Transforms/SROA/phi-and-select.ll index e7ba2e89d79..d0904cecd9f 100644 --- a/test/Transforms/SROA/phi-and-select.ll +++ b/test/Transforms/SROA/phi-and-select.ll @@ -632,3 +632,15 @@ exit: %result = load i32, i32* %phi, align 4 ret i32 %result } + +; Don't speculate a load based on an earlier volatile operation. +define i8 @volatile_select(i8* %p, i1 %b) { +; CHECK-LABEL: @volatile_select( +; CHECK: select i1 %b, i8* %p, i8* %p2 + %p2 = alloca i8 + store i8 0, i8* %p2 + store volatile i8 0, i8* %p + %px = select i1 %b, i8* %p, i8* %p2 + %v2 = load i8, i8* %px + ret i8 %v2 +}