namespace llvm {
+class MemorySSA::ClobberWalkerBase {
+ ClobberWalker Walker;
+ MemorySSA *MSSA;
+
+public:
+ ClobberWalkerBase(MemorySSA *M, AliasAnalysis *A, DominatorTree *D)
+ : Walker(*M, *A, *D), MSSA(M) {}
+
+ MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *,
+ const MemoryLocation &);
+ // Second argument (bool), defines whether the clobber search should skip the
+ // original queried access. If true, there will be a follow-up query searching
+ // for a clobber access past "self". Note that the Optimized access is not
+ // updated if a new clobber is found by this SkipSelf search. If this
+ // additional query becomes heavily used we may decide to cache the result.
+ // Walker instantiations will decide how to set the SkipSelf bool.
+ MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *, bool);
+ void verify(const MemorySSA *MSSA) { Walker.verify(MSSA); }
+};
+
/// A MemorySSAWalker that does AA walks to disambiguate accesses. It no
/// longer does caching on its own, but the name has been retained for the
/// moment.
class MemorySSA::CachingWalker final : public MemorySSAWalker {
- ClobberWalker Walker;
-
- MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, UpwardsMemoryQuery &);
+ ClobberWalkerBase *Walker;
public:
- CachingWalker(MemorySSA *, AliasAnalysis *, DominatorTree *);
+ CachingWalker(MemorySSA *M, ClobberWalkerBase *W)
+ : MemorySSAWalker(M), Walker(W) {}
~CachingWalker() override = default;
using MemorySSAWalker::getClobberingMemoryAccess;
- MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) override;
- MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
- const MemoryLocation &) override;
- void invalidateInfo(MemoryAccess *) override;
+ MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override;
+ MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
+ const MemoryLocation &Loc) override;
+
+ void invalidateInfo(MemoryAccess *MA) override {
+ if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
+ MUD->resetOptimized();
+ }
void verify(const MemorySSA *MSSA) override {
MemorySSAWalker::verify(MSSA);
- Walker.verify(MSSA);
+ Walker->verify(MSSA);
}
};
if (Walker)
return Walker.get();
- Walker = llvm::make_unique<CachingWalker>(this, AA, DT);
+ if (!WalkerBase)
+ WalkerBase = llvm::make_unique<ClobberWalkerBase>(this, AA, DT);
+
+ Walker = llvm::make_unique<CachingWalker>(this, WalkerBase.get());
return Walker.get();
}
MemorySSAWalker::MemorySSAWalker(MemorySSA *M) : MSSA(M) {}
-MemorySSA::CachingWalker::CachingWalker(MemorySSA *M, AliasAnalysis *A,
- DominatorTree *D)
- : MemorySSAWalker(M), Walker(*M, *A, *D) {}
-
-void MemorySSA::CachingWalker::invalidateInfo(MemoryAccess *MA) {
- if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
- MUD->resetOptimized();
-}
-
-/// Walk the use-def chains starting at \p MA and find
+/// Walk the use-def chains starting at \p StartingAccess and find
/// the MemoryAccess that actually clobbers Loc.
///
/// \returns our clobbering memory access
-MemoryAccess *MemorySSA::CachingWalker::getClobberingMemoryAccess(
- MemoryAccess *StartingAccess, UpwardsMemoryQuery &Q) {
- return Walker.findClobber(StartingAccess, Q);
-}
-
-MemoryAccess *MemorySSA::CachingWalker::getClobberingMemoryAccess(
+MemoryAccess *MemorySSA::ClobberWalkerBase::getClobberingMemoryAccessBase(
MemoryAccess *StartingAccess, const MemoryLocation &Loc) {
if (isa<MemoryPhi>(StartingAccess))
return StartingAccess;
// Unlike the other function, do not walk to the def of a def, because we are
// handed something we already believe is the clobbering access.
+ // We never set SkipSelf to true in Q in this method.
MemoryAccess *DefiningAccess = isa<MemoryUse>(StartingUseOrDef)
? StartingUseOrDef->getDefiningAccess()
: StartingUseOrDef;
- MemoryAccess *Clobber = getClobberingMemoryAccess(DefiningAccess, Q);
+ MemoryAccess *Clobber = Walker.findClobber(DefiningAccess, Q);
LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
LLVM_DEBUG(dbgs() << *StartingUseOrDef << "\n");
LLVM_DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is ");
}
MemoryAccess *
-MemorySSA::CachingWalker::getClobberingMemoryAccess(MemoryAccess *MA) {
+MemorySSA::ClobberWalkerBase::getClobberingMemoryAccessBase(MemoryAccess *MA,
+ bool SkipSelf) {
auto *StartingAccess = dyn_cast<MemoryUseOrDef>(MA);
// If this is a MemoryPhi, we can't do anything.
if (!StartingAccess)
return MA;
+ bool IsOptimized = false;
+
// If this is an already optimized use or def, return the optimized result.
// Note: Currently, we store the optimized def result in a separate field,
// since we can't use the defining access.
- if (StartingAccess->isOptimized())
- return StartingAccess->getOptimized();
+ if (StartingAccess->isOptimized()) {
+ if (!SkipSelf || !isa<MemoryDef>(StartingAccess))
+ return StartingAccess->getOptimized();
+ IsOptimized = true;
+ }
const Instruction *I = StartingAccess->getMemoryInst();
// We can't sanely do anything with a fence, since they conservatively clobber
return LiveOnEntry;
}
- // Start with the thing we already think clobbers this location
- MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess();
+ MemoryAccess *OptimizedAccess;
+ if (!IsOptimized) {
+ // Start with the thing we already think clobbers this location
+ MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess();
+
+ // At this point, DefiningAccess may be the live on entry def.
+ // If it is, we will not get a better result.
+ if (MSSA->isLiveOnEntryDef(DefiningAccess)) {
+ StartingAccess->setOptimized(DefiningAccess);
+ StartingAccess->setOptimizedAccessType(None);
+ return DefiningAccess;
+ }
- // At this point, DefiningAccess may be the live on entry def.
- // If it is, we will not get a better result.
- if (MSSA->isLiveOnEntryDef(DefiningAccess)) {
- StartingAccess->setOptimized(DefiningAccess);
- StartingAccess->setOptimizedAccessType(None);
- return DefiningAccess;
- }
+ OptimizedAccess = Walker.findClobber(DefiningAccess, Q);
+ StartingAccess->setOptimized(OptimizedAccess);
+ if (MSSA->isLiveOnEntryDef(OptimizedAccess))
+ StartingAccess->setOptimizedAccessType(None);
+ else if (Q.AR == MustAlias)
+ StartingAccess->setOptimizedAccessType(MustAlias);
+ } else
+ OptimizedAccess = StartingAccess->getOptimized();
- MemoryAccess *Result = getClobberingMemoryAccess(DefiningAccess, Q);
LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
- LLVM_DEBUG(dbgs() << *DefiningAccess << "\n");
- LLVM_DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is ");
- LLVM_DEBUG(dbgs() << *Result << "\n");
-
- StartingAccess->setOptimized(Result);
- if (MSSA->isLiveOnEntryDef(Result))
- StartingAccess->setOptimizedAccessType(None);
- else if (Q.AR == MustAlias)
- StartingAccess->setOptimizedAccessType(MustAlias);
+ LLVM_DEBUG(dbgs() << *StartingAccess << "\n");
+ LLVM_DEBUG(dbgs() << "Optimized Memory SSA clobber for " << *I << " is ");
+ LLVM_DEBUG(dbgs() << *OptimizedAccess << "\n");
+
+ MemoryAccess *Result;
+ if (SkipSelf && isa<MemoryPhi>(OptimizedAccess) &&
+ isa<MemoryDef>(StartingAccess)) {
+ assert(isa<MemoryDef>(Q.OriginalAccess));
+ Q.SkipSelfAccess = true;
+ Result = Walker.findClobber(OptimizedAccess, Q);
+ } else
+ Result = OptimizedAccess;
+
+ LLVM_DEBUG(dbgs() << "Result Memory SSA clobber [SkipSelf = " << SkipSelf);
+ LLVM_DEBUG(dbgs() << "] for " << *I << " is " << *Result << "\n");
return Result;
}
+MemoryAccess *
+MemorySSA::CachingWalker::getClobberingMemoryAccess(MemoryAccess *MA) {
+ return Walker->getClobberingMemoryAccessBase(MA, false);
+}
+
+MemoryAccess *
+MemorySSA::CachingWalker::getClobberingMemoryAccess(MemoryAccess *MA,
+ const MemoryLocation &Loc) {
+ return Walker->getClobberingMemoryAccessBase(MA, Loc);
+}
+
MemoryAccess *
DoNothingMemorySSAWalker::getClobberingMemoryAccess(MemoryAccess *MA) {
if (auto *Use = dyn_cast<MemoryUseOrDef>(MA))