void eliminateMostlyEmptyBlock(BasicBlock *BB);
bool isMergingEmptyBlockProfitable(BasicBlock *BB, BasicBlock *DestBB,
bool isPreheader);
- bool optimizeBlock(BasicBlock &BB, bool &ModifiedDT);
- bool optimizeInst(Instruction *I, bool &ModifiedDT);
+ bool optimizeBlock(BasicBlock &BB, DominatorTree &DT, bool &ModifiedDT);
+ bool optimizeInst(Instruction *I, DominatorTree &DT, bool &ModifiedDT);
bool optimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
Type *AccessTy, unsigned AddrSpace);
bool optimizeInlineAsmInst(CallInst *CS);
const SmallVectorImpl<Instruction *> &Exts,
SmallVectorImpl<Instruction *> &ProfitablyMovedExts,
unsigned CreatedInstsCost = 0);
- bool mergeSExts(Function &F);
+ bool mergeSExts(Function &F, DominatorTree &DT);
bool splitLargeGEPOffsets();
bool performAddressTypePromotion(
Instruction *&Inst,
bool MadeChange = true;
while (MadeChange) {
MadeChange = false;
+ DominatorTree DT(F);
for (Function::iterator I = F.begin(); I != F.end(); ) {
BasicBlock *BB = &*I++;
bool ModifiedDTOnIteration = false;
- MadeChange |= optimizeBlock(*BB, ModifiedDTOnIteration);
+ MadeChange |= optimizeBlock(*BB, DT, ModifiedDTOnIteration);
// Restart BB iteration if the dominator tree of the Function was changed
if (ModifiedDTOnIteration)
break;
}
if (EnableTypePromotionMerge && !ValToSExtendedUses.empty())
- MadeChange |= mergeSExts(F);
+ MadeChange |= mergeSExts(F, DT);
if (!LargeOffsetGEPMap.empty())
MadeChange |= splitLargeGEPOffsets();
}
static bool replaceMathCmpWithIntrinsic(BinaryOperator *BO, CmpInst *Cmp,
- Intrinsic::ID IID) {
+ Intrinsic::ID IID, DominatorTree &DT) {
// We allow matching the canonical IR (add X, C) back to (usubo X, -C).
Value *Arg0 = BO->getOperand(0);
Value *Arg1 = BO->getOperand(1);
} else {
// The math and compare may be independent instructions. Check dominance to
// determine the insertion point for the intrinsic.
- DominatorTree DT(*BO->getFunction());
bool MathDominates = DT.dominates(BO, Cmp);
if (!MathDominates && !DT.dominates(Cmp, BO))
return false;
/// Try to combine the compare into a call to the llvm.uadd.with.overflow
/// intrinsic. Return true if any changes were made.
static bool combineToUAddWithOverflow(CmpInst *Cmp, const TargetLowering &TLI,
- const DataLayout &DL, bool &ModifiedDT) {
+ const DataLayout &DL, DominatorTree &DT,
+ bool &ModifiedDT) {
Value *A, *B;
BinaryOperator *Add;
if (!match(Cmp, m_UAddWithOverflow(m_Value(A), m_Value(B), m_BinOp(Add))))
if (Add->getParent() != Cmp->getParent() && !Add->hasOneUse())
return false;
- if (!replaceMathCmpWithIntrinsic(Add, Cmp, Intrinsic::uadd_with_overflow))
+ if (!replaceMathCmpWithIntrinsic(Add, Cmp, Intrinsic::uadd_with_overflow, DT))
return false;
// Reset callers - do not crash by iterating over a dead instruction.
}
static bool combineToUSubWithOverflow(CmpInst *Cmp, const TargetLowering &TLI,
- const DataLayout &DL, bool &ModifiedDT) {
+ const DataLayout &DL, DominatorTree &DT,
+ bool &ModifiedDT) {
// Convert (A u> B) to (A u< B) to simplify pattern matching.
Value *A = Cmp->getOperand(0), *B = Cmp->getOperand(1);
ICmpInst::Predicate Pred = Cmp->getPredicate();
TLI.getValueType(DL, Sub->getType())))
return false;
- if (!replaceMathCmpWithIntrinsic(Sub, Cmp, Intrinsic::usub_with_overflow))
+ if (!replaceMathCmpWithIntrinsic(Sub, Cmp, Intrinsic::usub_with_overflow, DT))
return false;
// Reset callers - do not crash by iterating over a dead instruction.
}
static bool optimizeCmp(CmpInst *Cmp, const TargetLowering &TLI,
- const DataLayout &DL, bool &ModifiedDT) {
+ const DataLayout &DL, DominatorTree &DT,
+ bool &ModifiedDT) {
if (sinkCmpExpression(Cmp, TLI))
return true;
- if (combineToUAddWithOverflow(Cmp, TLI, DL, ModifiedDT))
+ if (combineToUAddWithOverflow(Cmp, TLI, DL, DT, ModifiedDT))
return true;
- if (combineToUSubWithOverflow(Cmp, TLI, DL, ModifiedDT))
+ if (combineToUSubWithOverflow(Cmp, TLI, DL, DT, ModifiedDT))
return true;
return false;
}
/// Merging redundant sexts when one is dominating the other.
-bool CodeGenPrepare::mergeSExts(Function &F) {
- DominatorTree DT(F);
+bool CodeGenPrepare::mergeSExts(Function &F, DominatorTree &DT) {
bool Changed = false;
for (auto &Entry : ValToSExtendedUses) {
SExts &Insts = Entry.second;
return true;
}
-bool CodeGenPrepare::optimizeInst(Instruction *I, bool &ModifiedDT) {
+bool CodeGenPrepare::optimizeInst(Instruction *I, DominatorTree &DT,
+ bool &ModifiedDT) {
// Bail out if we inserted the instruction to prevent optimizations from
// stepping on each other's toes.
if (InsertedInsts.count(I))
}
if (auto *Cmp = dyn_cast<CmpInst>(I))
- if (TLI && optimizeCmp(Cmp, *TLI, *DL, ModifiedDT))
+ if (TLI && optimizeCmp(Cmp, *TLI, *DL, DT, ModifiedDT))
return true;
if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
GEPI->replaceAllUsesWith(NC);
GEPI->eraseFromParent();
++NumGEPsElim;
- optimizeInst(NC, ModifiedDT);
+ optimizeInst(NC, DT, ModifiedDT);
return true;
}
if (tryUnmergingGEPsAcrossIndirectBr(GEPI, TTI)) {
// In this pass we look for GEP and cast instructions that are used
// across basic blocks and rewrite them to improve basic-block-at-a-time
// selection.
-bool CodeGenPrepare::optimizeBlock(BasicBlock &BB, bool &ModifiedDT) {
+bool CodeGenPrepare::optimizeBlock(BasicBlock &BB, DominatorTree &DT,
+ bool &ModifiedDT) {
SunkAddrs.clear();
bool MadeChange = false;
CurInstIterator = BB.begin();
while (CurInstIterator != BB.end()) {
- MadeChange |= optimizeInst(&*CurInstIterator++, ModifiedDT);
+ MadeChange |= optimizeInst(&*CurInstIterator++, DT, ModifiedDT);
if (ModifiedDT)
return true;
}