V, DL, Depth, AC, CxtI, DT, ORE, UseInstrInfo);
ConstantRange CR1 = ConstantRange::fromKnownBits(Known, ForSigned);
ConstantRange CR2 = computeConstantRange(V, UseInstrInfo);
- // TODO: Use ForSigned to determine preferred range.
- ConstantRange::PreferredRangeType RangeType = ConstantRange::Smallest;
+ ConstantRange::PreferredRangeType RangeType =
+ ForSigned ? ConstantRange::Signed : ConstantRange::Unsigned;
return CR1.intersectWith(CR2, RangeType);
}
ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
return OverflowResult::NeverOverflows;
- KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT);
- KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT);
- ConstantRange LHSRange =
- ConstantRange::fromKnownBits(LHSKnown, /*signed*/ true);
- ConstantRange RHSRange =
- ConstantRange::fromKnownBits(RHSKnown, /*signed*/ true);
+ ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
+ LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
+ ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
+ RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
OverflowResult OR =
mapOverflowResult(LHSRange.signedAddMayOverflow(RHSRange));
if (OR != OverflowResult::MayOverflow)
; CHECK-LABEL: @and1_lshr1_is_cmp_eq_0_multiuse(
; CHECK-NEXT: [[SH:%.*]] = lshr i8 1, %x
; CHECK-NEXT: [[AND:%.*]] = and i8 [[SH]], 1
-; CHECK-NEXT: [[ADD:%.*]] = add nuw i8 [[SH]], [[AND]]
+; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i8 [[SH]], [[AND]]
; CHECK-NEXT: ret i8 [[ADD]]
;
%sh = lshr i8 1, %x
; CHECK-LABEL: @sum_ugt_op_uses(
; CHECK-NEXT: [[X:%.*]] = sdiv i8 42, [[P1:%.*]]
; CHECK-NEXT: [[Y:%.*]] = sdiv i8 42, [[P2:%.*]]
-; CHECK-NEXT: [[A:%.*]] = add i8 [[X]], [[Y]]
+; CHECK-NEXT: [[A:%.*]] = add nsw i8 [[X]], [[Y]]
; CHECK-NEXT: store i8 [[A]], i8* [[P3:%.*]], align 1
; CHECK-NEXT: [[C:%.*]] = icmp ugt i8 [[X]], [[A]]
; CHECK-NEXT: ret i1 [[C]]
; CHECK-LABEL: @PR14613_smin(
; CHECK-NEXT: [[TMP1:%.*]] = icmp slt i8 [[X:%.*]], 40
; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i8 [[X]], i8 40
-; CHECK-NEXT: [[U7:%.*]] = add i8 [[TMP2]], 15
+; CHECK-NEXT: [[U7:%.*]] = add nsw i8 [[TMP2]], 15
; CHECK-NEXT: ret i8 [[U7]]
;
%u4 = sext i8 %x to i32
define { i32, i1 } @fold_mixed_signs(i32 %x) {
; CHECK-LABEL: @fold_mixed_signs(
-; CHECK-NEXT: [[TMP1:%.*]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[X:%.*]], i32 6)
+; CHECK-NEXT: [[B:%.*]] = add nsw i32 [[X:%.*]], 6
+; CHECK-NEXT: [[TMP1:%.*]] = insertvalue { i32, i1 } { i32 undef, i1 false }, i32 [[B]], 0
; CHECK-NEXT: ret { i32, i1 } [[TMP1]]
;
%a = add nsw i32 %x, 13
define i8 @test_scalar_sadd_srem_no_ov(i8 %a) {
; CHECK-LABEL: @test_scalar_sadd_srem_no_ov(
; CHECK-NEXT: [[B:%.*]] = srem i8 [[A:%.*]], 100
-; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[B]], i8 28)
+; CHECK-NEXT: [[R:%.*]] = add nsw i8 [[B]], 28
; CHECK-NEXT: ret i8 [[R]]
;
%b = srem i8 %a, 100
; CHECK-LABEL: @test_scalar_sadd_srem_and_no_ov(
; CHECK-NEXT: [[AA:%.*]] = srem i8 [[A:%.*]], 100
; CHECK-NEXT: [[BB:%.*]] = and i8 [[B:%.*]], 15
-; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.sadd.sat.i8(i8 [[AA]], i8 [[BB]])
+; CHECK-NEXT: [[R:%.*]] = add nsw i8 [[AA]], [[BB]]
; CHECK-NEXT: ret i8 [[R]]
;
%aa = srem i8 %a, 100
; CHECK-LABEL: @test64(
; CHECK-NEXT: [[TMP1:%.*]] = icmp slt i32 [[X:%.*]], 255
; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i32 [[X]], i32 255
-; CHECK-NEXT: [[RES:%.*]] = add i32 [[TMP2]], 1
+; CHECK-NEXT: [[RES:%.*]] = add nsw i32 [[TMP2]], 1
; CHECK-NEXT: ret i32 [[RES]]
;
%1 = xor i32 %x, -1
; CHECK-LABEL: @test68(
; CHECK-NEXT: [[TMP1:%.*]] = icmp slt <2 x i32> [[X:%.*]], <i32 255, i32 255>
; CHECK-NEXT: [[TMP2:%.*]] = select <2 x i1> [[TMP1]], <2 x i32> [[X]], <2 x i32> <i32 255, i32 255>
-; CHECK-NEXT: [[RES:%.*]] = add <2 x i32> [[TMP2]], <i32 1, i32 1>
+; CHECK-NEXT: [[RES:%.*]] = add nsw <2 x i32> [[TMP2]], <i32 1, i32 1>
; CHECK-NEXT: ret <2 x i32> [[RES]]
;
%1 = xor <2 x i32> %x, <i32 -1, i32 -1>