/// of symbolic strides, \p Strides provides the mapping (see
/// replaceSymbolicStrideSCEV). If there is no cached result available run
/// the analysis.
- const LoopAccessInfo &getInfo(Loop *L, const ValueToValueMap &Strides);
+ const LoopAccessInfo &
+ getInfo(Loop *L, const ValueToValueMap &Strides = ValueToValueMap());
void releaseMemory() override {
// Invalidate the cache when the pass is freed.
void LoopAccessAnalysis::print(raw_ostream &OS, const Module *M) const {
LoopAccessAnalysis &LAA = *const_cast<LoopAccessAnalysis *>(this);
- ValueToValueMap NoSymbolicStrides;
-
for (Loop *TopLevelLoop : *LI)
for (Loop *L : depth_first(TopLevelLoop)) {
OS.indent(2) << L->getHeader()->getName() << ":\n";
- auto &LAI = LAA.getInfo(L, NoSymbolicStrides);
+ auto &LAI = LAA.getInfo(L);
LAI.print(OS, 4);
}
}
return fail("multiple exit blocks");
// LAA will check that we only have a single exiting block.
- LAI = &LAA->getInfo(L, ValueToValueMap());
+ LAI = &LAA->getInfo(L);
// Currently, we only distribute to isolate the part of the loop with
// dependence cycles to enable partial vectorization.
// Now walk the identified inner loops.
bool Changed = false;
for (Loop *L : Worklist) {
- const LoopAccessInfo &LAI = LAA->getInfo(L, ValueToValueMap());
+ const LoopAccessInfo &LAI = LAA->getInfo(L);
// The actual work is performed by LoadEliminationForLoop.
LoadEliminationForLoop LEL(L, LI, LAI, DT);
Changed |= LEL.processLoop();
// Now walk the identified inner loops.
bool Changed = false;
for (Loop *L : Worklist) {
- const LoopAccessInfo &LAI = LAA->getInfo(L, ValueToValueMap());
+ const LoopAccessInfo &LAI = LAA->getInfo(L);
if (LAI.getNumRuntimePointerChecks() ||
!LAI.PSE.getUnionPredicate().isAlwaysTrue()) {
LoopVersioning LVer(LAI, L, LI, DT, SE);