LoopAccessInfo(Loop *L, ScalarEvolution *SE, const DataLayout &DL,
const TargetLibraryInfo *TLI, AliasAnalysis *AA,
DominatorTree *DT, LoopInfo *LI,
- const ValueToValueMap &Strides);
+ bool SpeculateSymbolicStrides);
/// Return true we can analyze the memory accesses in the loop and there are
/// no memory dependence cycles.
return DepChecker.getInstructionsForAccess(Ptr, isWrite);
}
+ /// \brief If an access has a symbolic strides, this maps the pointer value to
+ /// the stride symbol.
+ const ValueToValueMap &getSymbolicStrides() const { return SymbolicStrides; }
+
+ /// \brief Pointer has a symbolic stride.
+ bool hasStride(Value *V) const { return StrideSet.count(V); }
+
/// \brief Print the information about the memory accesses in the loop.
void print(raw_ostream &OS, unsigned Depth = 0) const;
/// \brief Used to ensure that if the analysis was run with speculating the
/// value of symbolic strides, the client queries it with the same assumption.
/// Only used in DEBUG build but we don't want NDEBUG-dependent ABI.
- unsigned NumSymbolicStrides;
+ bool SpeculateSymbolicStrides;
/// \brief Checks existence of store to invariant address inside loop.
/// If the loop has any store to invariant address, then it returns true,
PredicatedScalarEvolution PSE;
private:
- /// \brief Analyze the loop. Substitute symbolic strides using Strides.
- void analyzeLoop(const ValueToValueMap &SymbolicStrides);
+ /// \brief Analyze the loop.
+ void analyzeLoop();
/// \brief Check if the structure of the loop allows it to be analyzed by this
/// pass.
void emitAnalysis(LoopAccessReport &Message);
+ /// \brief Collect memory access with loop invariant strides.
+ ///
+ /// Looks for accesses like "a[i * StrideA]" where "StrideA" is loop
+ /// invariant.
+ void collectStridedAccess(Value *LoadOrStoreInst);
+
/// We need to check that all of the pointers in this list are disjoint
/// at runtime.
RuntimePointerChecking PtrRtChecking;
/// \brief The diagnostics report generated for the analysis. E.g. why we
/// couldn't analyze the loop.
Optional<LoopAccessReport> Report;
+
+ /// \brief If an access has a symbolic strides, this maps the pointer value to
+ /// the stride symbol.
+ ValueToValueMap SymbolicStrides;
+
+ /// \brief Set of symbolic strides values.
+ SmallPtrSet<Value *, 8> StrideSet;
};
Value *stripIntegerCast(Value *V);
/// \brief Query the result of the loop access information for the loop \p L.
///
- /// If the client speculates (and then issues run-time checks) for the values
- /// of symbolic strides, \p Strides provides the mapping (see
- /// replaceSymbolicStrideSCEV). If there is no cached result available run
- /// the analysis.
- const LoopAccessInfo &
- getInfo(Loop *L, const ValueToValueMap &Strides = ValueToValueMap());
+ /// \p SpeculateSymbolicStrides enables symbolic value speculation. The
+ /// corresponding run-time checks are collected in LAI::PSE.
+ ///
+ /// If there is no cached result available run the analysis.
+ const LoopAccessInfo &getInfo(Loop *L, bool SpeculateSymbolicStrides = false);
void releaseMemory() override {
// Invalidate the cache when the pass is freed.
return true;
}
-void LoopAccessInfo::analyzeLoop(const ValueToValueMap &SymbolicStrides) {
-
+void LoopAccessInfo::analyzeLoop() {
typedef SmallPtrSet<Value*, 16> ValueSet;
// Holds the Load and Store instructions.
NumLoads++;
Loads.push_back(Ld);
DepChecker.addAccess(Ld);
+ if (SpeculateSymbolicStrides)
+ collectStridedAccess(Ld);
continue;
}
NumStores++;
Stores.push_back(St);
DepChecker.addAccess(St);
+ if (SpeculateSymbolicStrides)
+ collectStridedAccess(St);
}
} // Next instr.
} // Next block.
return addRuntimeChecks(Loc, PtrRtChecking.getChecks());
}
+void LoopAccessInfo::collectStridedAccess(Value *MemAccess) {
+ Value *Ptr = nullptr;
+ if (LoadInst *LI = dyn_cast<LoadInst>(MemAccess))
+ Ptr = LI->getPointerOperand();
+ else if (StoreInst *SI = dyn_cast<StoreInst>(MemAccess))
+ Ptr = SI->getPointerOperand();
+ else
+ return;
+
+ Value *Stride = getStrideFromPointer(Ptr, PSE.getSE(), TheLoop);
+ if (!Stride)
+ return;
+
+ DEBUG(dbgs() << "LAA: Found a strided access that we can version");
+ DEBUG(dbgs() << " Ptr: " << *Ptr << " Stride: " << *Stride << "\n");
+ SymbolicStrides[Ptr] = Stride;
+ StrideSet.insert(Stride);
+}
+
LoopAccessInfo::LoopAccessInfo(Loop *L, ScalarEvolution *SE,
const DataLayout &DL,
const TargetLibraryInfo *TLI, AliasAnalysis *AA,
DominatorTree *DT, LoopInfo *LI,
- const ValueToValueMap &Strides)
- : PSE(*SE, *L), PtrRtChecking(SE), DepChecker(PSE, L), TheLoop(L), DL(DL),
- TLI(TLI), AA(AA), DT(DT), LI(LI), NumLoads(0), NumStores(0),
+ bool SpeculateSymbolicStrides)
+ : SpeculateSymbolicStrides(SpeculateSymbolicStrides), PSE(*SE, *L),
+ PtrRtChecking(SE), DepChecker(PSE, L), TheLoop(L), DL(DL), TLI(TLI),
+ AA(AA), DT(DT), LI(LI), NumLoads(0), NumStores(0),
MaxSafeDepDistBytes(-1U), CanVecMem(false),
StoreToLoopInvariantAddress(false) {
if (canAnalyzeLoop())
- analyzeLoop(Strides);
+ analyzeLoop();
}
void LoopAccessInfo::print(raw_ostream &OS, unsigned Depth) const {
}
const LoopAccessInfo &
-LoopAccessAnalysis::getInfo(Loop *L, const ValueToValueMap &Strides) {
+LoopAccessAnalysis::getInfo(Loop *L, bool SpeculateSymbolicStrides) {
auto &LAI = LoopAccessInfoMap[L];
#ifndef NDEBUG
- assert((!LAI || LAI->NumSymbolicStrides == Strides.size()) &&
+ assert((!LAI || LAI->SpeculateSymbolicStrides == SpeculateSymbolicStrides) &&
"Symbolic strides changed for loop");
#endif
if (!LAI) {
const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
- LAI =
- llvm::make_unique<LoopAccessInfo>(L, SE, DL, TLI, AA, DT, LI, Strides);
-#ifndef NDEBUG
- LAI->NumSymbolicStrides = Strides.size();
-#endif
+ LAI = llvm::make_unique<LoopAccessInfo>(L, SE, DL, TLI, AA, DT, LI,
+ SpeculateSymbolicStrides);
}
return *LAI.get();
}
bool legalLoopStructure();
bool legalLoopInstructions();
bool legalLoopMemoryAccesses();
- void collectStridedAccess(Value *LoadOrStoreInst);
bool isLoopAlreadyVisited();
void setNoAliasToLoop(Loop *);
bool instructionSafeForVersioning(Instruction *);
};
}
-/// \brief Collects stride access from a given value.
-void LoopVersioningLICM::collectStridedAccess(Value *MemAccess) {
- Value *Ptr = nullptr;
- if (LoadInst *LI = dyn_cast<LoadInst>(MemAccess))
- Ptr = LI->getPointerOperand();
- else if (StoreInst *SI = dyn_cast<StoreInst>(MemAccess))
- Ptr = SI->getPointerOperand();
- else
- return;
-
- Value *Stride = getStrideFromPointer(Ptr, SE, CurLoop);
- if (!Stride)
- return;
-
- DEBUG(dbgs() << "Found a strided access that we can version");
- DEBUG(dbgs() << " Ptr: " << *Ptr << " Stride: " << *Stride << "\n");
- Strides[Ptr] = Stride;
-}
-
/// \brief Check loop structure and confirms it's good for LoopVersioningLICM.
bool LoopVersioningLICM::legalLoopStructure() {
// Loop must have a preheader, if not return false.
return false;
}
LoadAndStoreCounter++;
- collectStridedAccess(Ld);
Value *Ptr = Ld->getPointerOperand();
// Check loop invariant.
if (SE->isLoopInvariant(SE->getSCEV(Ptr), CurLoop))
return false;
}
LoadAndStoreCounter++;
- collectStridedAccess(St);
Value *Ptr = St->getPointerOperand();
// Check loop invariant.
if (SE->isLoopInvariant(SE->getSCEV(Ptr), CurLoop))
return false;
}
// Get LoopAccessInfo from current loop.
- LAI = &LAA->getInfo(CurLoop, Strides);
+ LAI = &LAA->getInfo(CurLoop, true);
// Check LoopAccessInfo for need of runtime check.
if (LAI->getRuntimePointerChecking()->getChecks().empty()) {
DEBUG(dbgs() << " LAA: Runtime check not found !!\n");
unsigned getMaxSafeDepDistBytes() { return LAI->getMaxSafeDepDistBytes(); }
- bool hasStride(Value *V) { return StrideSet.count(V); }
+ bool hasStride(Value *V) { return LAI->hasStride(V); }
/// Returns true if the target machine supports masked store operation
/// for the given \p DataType and kind of access to \p Ptr.
/// and we know that we can read from them without segfault.
bool blockCanBePredicated(BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs);
- /// \brief Collect memory access with loop invariant strides.
- ///
- /// Looks for accesses like "a[i * StrideA]" where "StrideA" is loop
- /// invariant.
- void collectStridedAccess(Value *LoadOrStoreInst);
-
/// Updates the vectorization state by adding \p Phi to the inductions list.
/// This can set \p Phi as the main induction of the loop if \p Phi is a
/// better choice for the main induction than the existing one.
/// \brief If an access has a symbolic strides, this maps the pointer value to
/// the stride symbol.
- const ValueToValueMap *getSymbolicStrides() { return &SymbolicStrides; }
+ const ValueToValueMap *getSymbolicStrides() {
+ // FIXME: Currently, the set of symbolic strides is sometimes queried before
+ // it's collected. This happens from canVectorizeWithIfConvert, when the
+ // pointer is checked to reference consecutive elements suitable for a
+ // masked access.
+ return LAI ? &LAI->getSymbolicStrides() : nullptr;
+ }
unsigned NumPredStores;
/// Used to emit an analysis of any legality issues.
LoopVectorizeHints *Hints;
- ValueToValueMap SymbolicStrides;
- SmallPtrSet<Value *, 8> StrideSet;
-
/// While vectorizing these instructions we have to generate a
/// call to the appropriate masked intrinsic
SmallPtrSet<const Instruction *, 8> MaskedOp;
<< "store instruction cannot be vectorized");
return false;
}
- if (EnableMemAccessVersioning)
- collectStridedAccess(ST);
-
- } else if (LoadInst *LI = dyn_cast<LoadInst>(it)) {
- if (EnableMemAccessVersioning)
- collectStridedAccess(LI);
// FP instructions can allow unsafe algebra, thus vectorizable by
// non-IEEE-754 compliant SIMD units.
return true;
}
-void LoopVectorizationLegality::collectStridedAccess(Value *MemAccess) {
- Value *Ptr = nullptr;
- if (LoadInst *LI = dyn_cast<LoadInst>(MemAccess))
- Ptr = LI->getPointerOperand();
- else if (StoreInst *SI = dyn_cast<StoreInst>(MemAccess))
- Ptr = SI->getPointerOperand();
- else
- return;
-
- Value *Stride = getStrideFromPointer(Ptr, PSE.getSE(), TheLoop);
- if (!Stride)
- return;
-
- DEBUG(dbgs() << "LV: Found a strided access that we can version");
- DEBUG(dbgs() << " Ptr: " << *Ptr << " Stride: " << *Stride << "\n");
- SymbolicStrides[Ptr] = Stride;
- StrideSet.insert(Stride);
-}
-
void LoopVectorizationLegality::collectLoopUniforms() {
// We now know that the loop is vectorizable!
// Collect variables that will remain uniform after vectorization.
}
bool LoopVectorizationLegality::canVectorizeMemory() {
- LAI = &LAA->getInfo(TheLoop, *getSymbolicStrides());
+ LAI = &LAA->getInfo(TheLoop, EnableMemAccessVersioning);
auto &OptionalReport = LAI->getReport();
if (OptionalReport)
emitAnalysis(VectorizationReport(*OptionalReport));