DWARFDie getDIEForOffset(uint32_t Offset) {
extractDIEsIfNeeded(false);
assert(!DieArray.empty());
- auto it = std::lower_bound(
- DieArray.begin(), DieArray.end(), Offset,
- [](const DWARFDebugInfoEntry &LHS, uint32_t Offset) {
+ auto it = llvm::lower_bound(
+ DieArray, Offset, [](const DWARFDebugInfoEntry &LHS, uint32_t Offset) {
return LHS.getOffset() < Offset;
});
if (it != DieArray.end() && it->getOffset() == Offset)
if (funcName.empty())
return false;
- std::vector<VecDesc>::const_iterator I = std::lower_bound(
- VectorDescs.begin(), VectorDescs.end(), funcName,
- compareWithScalarFnName);
+ std::vector<VecDesc>::const_iterator I =
+ llvm::lower_bound(VectorDescs, funcName, compareWithScalarFnName);
return I != VectorDescs.end() && StringRef(I->ScalarFnName) == funcName;
}
F = sanitizeFunctionName(F);
if (F.empty())
return F;
- std::vector<VecDesc>::const_iterator I = std::lower_bound(
- VectorDescs.begin(), VectorDescs.end(), F, compareWithScalarFnName);
+ std::vector<VecDesc>::const_iterator I =
+ llvm::lower_bound(VectorDescs, F, compareWithScalarFnName);
while (I != VectorDescs.end() && StringRef(I->ScalarFnName) == F) {
if (I->VectorizationFactor == VF)
return I->VectorFnName;
if (F.empty())
return F;
- std::vector<VecDesc>::const_iterator I = std::lower_bound(
- ScalarDescs.begin(), ScalarDescs.end(), F, compareWithVectorFnName);
+ std::vector<VecDesc>::const_iterator I =
+ llvm::lower_bound(ScalarDescs, F, compareWithVectorFnName);
if (I == VectorDescs.end() || StringRef(I->VectorFnName) != F)
return StringRef();
VF = I->VectorizationFactor;
NewOp = RealVal;
} else {
// Otherwise, look up the placeholder in ResolveConstants.
- ResolveConstantsTy::iterator It = std::lower_bound(
- ResolveConstants.begin(), ResolveConstants.end(),
+ ResolveConstantsTy::iterator It = llvm::lower_bound(
+ ResolveConstants,
std::pair<Constant *, unsigned>(cast<Constant>(*I), 0));
assert(It != ResolveConstants.end() && It->first == *I);
NewOp = operator[](It->second);
uint64_t Offset = getOffsetFromIndices(U, *DL);
ArrayRef<unsigned> SrcRegs = getOrCreateVRegs(*Src);
ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*Src);
- unsigned Idx = std::lower_bound(Offsets.begin(), Offsets.end(), Offset) -
- Offsets.begin();
+ unsigned Idx = llvm::lower_bound(Offsets, Offset) - Offsets.begin();
auto &DstRegs = allocateVRegs(U);
for (unsigned i = 0; i < DstRegs.size(); ++i)
ArrayRef<SlotIndex> RMS = LIS->getRegMaskSlotsInBlock(BI.MBB->getNumber());
LLVM_DEBUG(dbgs() << RMS.size() << " regmasks in block:");
// Constrain to VirtReg's live range.
- unsigned ri = std::lower_bound(RMS.begin(), RMS.end(),
- Uses.front().getRegSlot()) - RMS.begin();
+ unsigned ri =
+ llvm::lower_bound(RMS, Uses.front().getRegSlot()) - RMS.begin();
unsigned re = RMS.size();
for (unsigned i = 0; i != NumGaps && ri != re; ++i) {
// Look for Uses[i] <= RMS <= Uses[i+1].
DWARFDebugLoc::LocationList const *
DWARFDebugLoc::getLocationListAtOffset(uint64_t Offset) const {
- auto It = std::lower_bound(
- Locations.begin(), Locations.end(), Offset,
+ auto It = llvm::lower_bound(
+ Locations, Offset,
[](const LocationList &L, uint64_t Offset) { return L.Offset < Offset; });
if (It != Locations.end() && It->Offset == Offset)
return &(*It);
DWARFDebugLoclists::LocationList const *
DWARFDebugLoclists::getLocationListAtOffset(uint64_t Offset) const {
- auto It = std::lower_bound(
- Locations.begin(), Locations.end(), Offset,
+ auto It = llvm::lower_bound(
+ Locations, Offset,
[](const LocationList &L, uint64_t Offset) { return L.Offset < Offset; });
if (It != Locations.end() && It->Offset == Offset)
return &(*It);
// Find the first range whose High field is >= R.High,
// then check if the Low field is <= R.Low. If so, we
// have a Range that covers R.
- auto I = std::lower_bound(
- Ranges.begin(), Ranges.end(), R,
- [](const IntRange &A, const IntRange &B) { return A.High < B.High; });
+ auto I = llvm::lower_bound(
+ Ranges, R, [](IntRange A, IntRange B) { return A.High < B.High; });
return I != Ranges.end() && I->Low <= R.Low;
}
unsigned LoadIdx = LBI.getInstructionIndex(LI);
// Find the nearest store that has a lower index than this load.
- StoresByIndexTy::iterator I =
- std::lower_bound(StoresByIndex.begin(), StoresByIndex.end(),
- std::make_pair(LoadIdx,
- static_cast<StoreInst *>(nullptr)),
- less_first());
+ StoresByIndexTy::iterator I = llvm::lower_bound(
+ StoresByIndex,
+ std::make_pair(LoadIdx, static_cast<StoreInst *>(nullptr)),
+ less_first());
if (I == StoresByIndex.begin()) {
if (StoresByIndex.empty())
// If there are no stores, the load takes the undef value.
// them from the Preds list.
for (unsigned i = 0, e = SomePHI->getNumIncomingValues(); i != e; ++i) {
// Do a log(n) search of the Preds list for the entry we want.
- SmallVectorImpl<BasicBlock *>::iterator EntIt = std::lower_bound(
- Preds.begin(), Preds.end(), SomePHI->getIncomingBlock(i),
- CompareBBNumbers);
+ SmallVectorImpl<BasicBlock *>::iterator EntIt = llvm::lower_bound(
+ Preds, SomePHI->getIncomingBlock(i), CompareBBNumbers);
assert(EntIt != Preds.end() && *EntIt == SomePHI->getIncomingBlock(i) &&
"PHI node has entry for a block which is not a predecessor!");