if (!match(ZeroCmpOp, m_Sub(m_Value(Base), m_Value(Offset))))
return nullptr;
- // ZeroCmpOp < Base && ZeroCmpOp != 0 --> Base > Offset iff Offset != 0
- // ZeroCmpOp >= Base || ZeroCmpOp == 0 --> Base <= Base iff Offset != 0
- if (match(UnsignedICmp,
- m_c_ICmp(UnsignedPred, m_Specific(ZeroCmpOp), m_Specific(Base)))) {
- if (UnsignedICmp->getOperand(0) != ZeroCmpOp)
- UnsignedPred = ICmpInst::getSwappedPredicate(UnsignedPred);
-
- if (UnsignedPred == ICmpInst::ICMP_ULT && IsAnd &&
- EqPred == ICmpInst::ICMP_NE && IsKnownNonZero(Offset))
- return Builder.CreateICmpUGT(Base, Offset);
- if (UnsignedPred == ICmpInst::ICMP_UGE && !IsAnd &&
- EqPred == ICmpInst::ICMP_EQ && IsKnownNonZero(Offset))
- return Builder.CreateICmpULE(Base, Offset);
- }
-
if (!match(UnsignedICmp,
m_c_ICmp(UnsignedPred, m_Specific(Base), m_Specific(Offset))) ||
!ICmpInst::isUnsigned(UnsignedPred))
/// TODO: A large part of this logic is duplicated in InstSimplify's
/// simplifyICmpWithBinOp(). We should be able to share that and avoid the code
/// duplication.
-Instruction *InstCombiner::foldICmpBinOp(ICmpInst &I) {
+Instruction *InstCombiner::foldICmpBinOp(ICmpInst &I, const SimplifyQuery &SQ) {
+ const SimplifyQuery Q = SQ.getWithInstruction(&I);
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
// Special logic for binary operators.
// C u</u>= (C - D) --> C u</u>= D
if (C == Op0 && (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_UGE))
return new ICmpInst(Pred, C, D);
+ // (A - B) u>=/u< A --> B u>/u<= A iff B != 0
+ if (A == Op1 && (Pred == ICmpInst::ICMP_UGE || Pred == ICmpInst::ICMP_ULT) &&
+ isKnownNonZero(B, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
+ return new ICmpInst(CmpInst::getFlippedStrictnessPredicate(Pred), B, A);
+ // C u<=/u> (C - D) --> C u</u>= D iff B != 0
+ if (C == Op0 && (Pred == ICmpInst::ICMP_ULE || Pred == ICmpInst::ICMP_UGT) &&
+ isKnownNonZero(D, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
+ return new ICmpInst(CmpInst::getFlippedStrictnessPredicate(Pred), C, D);
// icmp (A-B), (C-B) -> icmp A, C for equalities or if there is no overflow.
if (B && D && B == D && NoOp0WrapProblem && NoOp1WrapProblem)
Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
bool Changed = false;
+ const SimplifyQuery Q = SQ.getWithInstruction(&I);
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
unsigned Op0Cplxity = getComplexity(Op0);
unsigned Op1Cplxity = getComplexity(Op1);
Changed = true;
}
- if (Value *V = SimplifyICmpInst(I.getPredicate(), Op0, Op1,
- SQ.getWithInstruction(&I)))
+ if (Value *V = SimplifyICmpInst(I.getPredicate(), Op0, Op1, Q))
return replaceInstUsesWith(I, V);
// Comparing -val or val with non-zero is the same as just comparing val
if (Instruction *Res = foldICmpWithDominatingICmp(I))
return Res;
- if (Instruction *Res = foldICmpBinOp(I))
+ if (Instruction *Res = foldICmpBinOp(I, Q))
return Res;
if (Instruction *Res = foldICmpUsingKnownBits(I))
Instruction *foldICmpWithConstant(ICmpInst &Cmp);
Instruction *foldICmpInstWithConstant(ICmpInst &Cmp);
Instruction *foldICmpInstWithConstantNotInt(ICmpInst &Cmp);
- Instruction *foldICmpBinOp(ICmpInst &Cmp);
+ Instruction *foldICmpBinOp(ICmpInst &Cmp, const SimplifyQuery &SQ);
Instruction *foldICmpEquality(ICmpInst &Cmp);
Instruction *foldIRemByPowerOfTwoToBitTest(ICmpInst &I);
Instruction *foldICmpWithZero(ICmpInst &Cmp);
; CHECK-NEXT: [[OFFSET:%.*]] = ptrtoint i64* [[OFFSETPTR:%.*]] to i64
; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET]]
; CHECK-NEXT: call void @use64(i64 [[ADJUSTED]])
-; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ult i64 [[ADJUSTED]], [[BASE]]
+; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ule i64 [[OFFSET]], [[BASE]]
; CHECK-NEXT: call void @use1(i1 [[NO_UNDERFLOW]])
; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i64 [[ADJUSTED]], 0
; CHECK-NEXT: call void @use1(i1 [[NOT_NULL]])
; CHECK-NEXT: [[OFFSET:%.*]] = ptrtoint i64* [[OFFSETPTR:%.*]] to i64
; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET]]
; CHECK-NEXT: call void @use64(i64 [[ADJUSTED]])
-; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ult i64 [[ADJUSTED]], [[BASE]]
+; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ule i64 [[OFFSET]], [[BASE]]
; CHECK-NEXT: call void @use1(i1 [[NO_UNDERFLOW]])
; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i64 [[ADJUSTED]], 0
; CHECK-NEXT: call void @use1(i1 [[NOT_NULL]])
; CHECK-NEXT: [[OFFSET:%.*]] = ptrtoint i64* [[OFFSETPTR:%.*]] to i64
; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET]]
; CHECK-NEXT: call void @use64(i64 [[ADJUSTED]])
-; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp uge i64 [[ADJUSTED]], [[BASE]]
+; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ugt i64 [[OFFSET]], [[BASE]]
; CHECK-NEXT: call void @use1(i1 [[NO_UNDERFLOW]])
; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp eq i64 [[ADJUSTED]], 0
; CHECK-NEXT: call void @use1(i1 [[NOT_NULL]])
; CHECK-NEXT: [[OFFSET:%.*]] = ptrtoint i64* [[OFFSETPTR:%.*]] to i64
; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET]]
; CHECK-NEXT: call void @use64(i64 [[ADJUSTED]])
-; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp uge i64 [[ADJUSTED]], [[BASE]]
+; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ugt i64 [[OFFSET]], [[BASE]]
; CHECK-NEXT: call void @use1(i1 [[NO_UNDERFLOW]])
; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp eq i64 [[ADJUSTED]], 0
; CHECK-NEXT: call void @use1(i1 [[NOT_NULL]])
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i8 [[BASE:%.*]], [[OFFSET]]
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
-; CHECK-NEXT: [[RES:%.*]] = icmp ult i8 [[ADJUSTED]], [[BASE]]
+; CHECK-NEXT: [[RES:%.*]] = icmp uge i8 [[BASE]], [[OFFSET]]
; CHECK-NEXT: ret i1 [[RES]]
;
%cmp = icmp slt i8 %offset, 0
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]])
; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i8 [[BASE:%.*]], [[OFFSET]]
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
-; CHECK-NEXT: [[RES:%.*]] = icmp uge i8 [[ADJUSTED]], [[BASE]]
+; CHECK-NEXT: [[RES:%.*]] = icmp ult i8 [[BASE]], [[OFFSET]]
; CHECK-NEXT: ret i1 [[RES]]
;
%cmp = icmp slt i8 %offset, 0
; CHECK-NEXT: [[BASE:%.*]] = call i8 @gen8()
; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i8 [[BASE]], [[OFFSET]]
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
-; CHECK-NEXT: [[RES:%.*]] = icmp ugt i8 [[BASE]], [[ADJUSTED]]
+; CHECK-NEXT: [[RES:%.*]] = icmp uge i8 [[BASE]], [[OFFSET]]
; CHECK-NEXT: ret i1 [[RES]]
;
%cmp = icmp slt i8 %offset, 0
; CHECK-NEXT: [[BASE:%.*]] = call i8 @gen8()
; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i8 [[BASE]], [[OFFSET]]
; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]])
-; CHECK-NEXT: [[RES:%.*]] = icmp ule i8 [[BASE]], [[ADJUSTED]]
+; CHECK-NEXT: [[RES:%.*]] = icmp ult i8 [[BASE]], [[OFFSET]]
; CHECK-NEXT: ret i1 [[RES]]
;
%cmp = icmp slt i8 %offset, 0