From: Nikita Popov Date: Sat, 15 Jun 2019 09:15:52 +0000 (+0000) Subject: [SCEV] Use unsigned/signed intersection type in SCEV X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=b435405a043fbd11282be0bec93a42b1f1c54bf0;p=llvm [SCEV] Use unsigned/signed intersection type in SCEV Based on D59959, this switches SCEV to use unsigned/signed range intersection based on the sign hint. This will prefer non-wrapping ranges in the relevant domain. I've left the one intersection in getRangeForAffineAR() to use the smallest intersection heuristic, as there doesn't seem to be any obvious preference there. Differential Revision: https://reviews.llvm.org/D60035 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@363490 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Analysis/ScalarEvolution.cpp b/lib/Analysis/ScalarEvolution.cpp index 88686427127..f37581fbded 100644 --- a/lib/Analysis/ScalarEvolution.cpp +++ b/lib/Analysis/ScalarEvolution.cpp @@ -5535,6 +5535,9 @@ ScalarEvolution::getRangeRef(const SCEV *S, DenseMap &Cache = SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges : SignedRanges; + ConstantRange::PreferredRangeType RangeType = + SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED + ? ConstantRange::Unsigned : ConstantRange::Signed; // See if we've computed this range already. DenseMap::iterator I = Cache.find(S); @@ -5565,53 +5568,60 @@ ScalarEvolution::getRangeRef(const SCEV *S, ConstantRange X = getRangeRef(Add->getOperand(0), SignHint); for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i) X = X.add(getRangeRef(Add->getOperand(i), SignHint)); - return setRange(Add, SignHint, ConservativeResult.intersectWith(X)); + return setRange(Add, SignHint, + ConservativeResult.intersectWith(X, RangeType)); } if (const SCEVMulExpr *Mul = dyn_cast(S)) { ConstantRange X = getRangeRef(Mul->getOperand(0), SignHint); for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i) X = X.multiply(getRangeRef(Mul->getOperand(i), SignHint)); - return setRange(Mul, SignHint, ConservativeResult.intersectWith(X)); + return setRange(Mul, SignHint, + ConservativeResult.intersectWith(X, RangeType)); } if (const SCEVSMaxExpr *SMax = dyn_cast(S)) { ConstantRange X = getRangeRef(SMax->getOperand(0), SignHint); for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i) X = X.smax(getRangeRef(SMax->getOperand(i), SignHint)); - return setRange(SMax, SignHint, ConservativeResult.intersectWith(X)); + return setRange(SMax, SignHint, + ConservativeResult.intersectWith(X, RangeType)); } if (const SCEVUMaxExpr *UMax = dyn_cast(S)) { ConstantRange X = getRangeRef(UMax->getOperand(0), SignHint); for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i) X = X.umax(getRangeRef(UMax->getOperand(i), SignHint)); - return setRange(UMax, SignHint, ConservativeResult.intersectWith(X)); + return setRange(UMax, SignHint, + ConservativeResult.intersectWith(X, RangeType)); } if (const SCEVUDivExpr *UDiv = dyn_cast(S)) { ConstantRange X = getRangeRef(UDiv->getLHS(), SignHint); ConstantRange Y = getRangeRef(UDiv->getRHS(), SignHint); return setRange(UDiv, SignHint, - ConservativeResult.intersectWith(X.udiv(Y))); + ConservativeResult.intersectWith(X.udiv(Y), RangeType)); } if (const SCEVZeroExtendExpr *ZExt = dyn_cast(S)) { ConstantRange X = getRangeRef(ZExt->getOperand(), SignHint); return setRange(ZExt, SignHint, - ConservativeResult.intersectWith(X.zeroExtend(BitWidth))); + ConservativeResult.intersectWith(X.zeroExtend(BitWidth), + RangeType)); } if (const SCEVSignExtendExpr *SExt = dyn_cast(S)) { ConstantRange X = getRangeRef(SExt->getOperand(), SignHint); return setRange(SExt, SignHint, - ConservativeResult.intersectWith(X.signExtend(BitWidth))); + ConservativeResult.intersectWith(X.signExtend(BitWidth), + RangeType)); } if (const SCEVTruncateExpr *Trunc = dyn_cast(S)) { ConstantRange X = getRangeRef(Trunc->getOperand(), SignHint); return setRange(Trunc, SignHint, - ConservativeResult.intersectWith(X.truncate(BitWidth))); + ConservativeResult.intersectWith(X.truncate(BitWidth), + RangeType)); } if (const SCEVAddRecExpr *AddRec = dyn_cast(S)) { @@ -5621,7 +5631,7 @@ ScalarEvolution::getRangeRef(const SCEV *S, if (const SCEVConstant *C = dyn_cast(AddRec->getStart())) if (!C->getValue()->isZero()) ConservativeResult = ConservativeResult.intersectWith( - ConstantRange(C->getAPInt(), APInt(BitWidth, 0))); + ConstantRange(C->getAPInt(), APInt(BitWidth, 0)), RangeType); // If there's no signed wrap, and all the operands have the same sign or // zero, the value won't ever change sign. @@ -5635,11 +5645,11 @@ ScalarEvolution::getRangeRef(const SCEV *S, if (AllNonNeg) ConservativeResult = ConservativeResult.intersectWith( ConstantRange(APInt(BitWidth, 0), - APInt::getSignedMinValue(BitWidth))); + APInt::getSignedMinValue(BitWidth)), RangeType); else if (AllNonPos) ConservativeResult = ConservativeResult.intersectWith( ConstantRange(APInt::getSignedMinValue(BitWidth), - APInt(BitWidth, 1))); + APInt(BitWidth, 1)), RangeType); } // TODO: non-affine addrec @@ -5652,14 +5662,14 @@ ScalarEvolution::getRangeRef(const SCEV *S, BitWidth); if (!RangeFromAffine.isFullSet()) ConservativeResult = - ConservativeResult.intersectWith(RangeFromAffine); + ConservativeResult.intersectWith(RangeFromAffine, RangeType); auto RangeFromFactoring = getRangeViaFactoring( AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount, BitWidth); if (!RangeFromFactoring.isFullSet()) ConservativeResult = - ConservativeResult.intersectWith(RangeFromFactoring); + ConservativeResult.intersectWith(RangeFromFactoring, RangeType); } } @@ -5670,7 +5680,8 @@ ScalarEvolution::getRangeRef(const SCEV *S, // Check if the IR explicitly contains !range metadata. Optional MDRange = GetRangeFromMetadata(U->getValue()); if (MDRange.hasValue()) - ConservativeResult = ConservativeResult.intersectWith(MDRange.getValue()); + ConservativeResult = ConservativeResult.intersectWith(MDRange.getValue(), + RangeType); // Split here to avoid paying the compile-time cost of calling both // computeKnownBits and ComputeNumSignBits. This restriction can be lifted @@ -5681,8 +5692,8 @@ ScalarEvolution::getRangeRef(const SCEV *S, KnownBits Known = computeKnownBits(U->getValue(), DL, 0, &AC, nullptr, &DT); if (Known.One != ~Known.Zero + 1) ConservativeResult = - ConservativeResult.intersectWith(ConstantRange(Known.One, - ~Known.Zero + 1)); + ConservativeResult.intersectWith( + ConstantRange(Known.One, ~Known.Zero + 1), RangeType); } else { assert(SignHint == ScalarEvolution::HINT_RANGE_SIGNED && "generalize as needed!"); @@ -5690,7 +5701,8 @@ ScalarEvolution::getRangeRef(const SCEV *S, if (NS > 1) ConservativeResult = ConservativeResult.intersectWith( ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1), - APInt::getSignedMaxValue(BitWidth).ashr(NS - 1) + 1)); + APInt::getSignedMaxValue(BitWidth).ashr(NS - 1) + 1), + RangeType); } // A range of Phi is a subset of union of all ranges of its input. @@ -5705,7 +5717,8 @@ ScalarEvolution::getRangeRef(const SCEV *S, if (RangeFromOps.isFullSet()) break; } - ConservativeResult = ConservativeResult.intersectWith(RangeFromOps); + ConservativeResult = + ConservativeResult.intersectWith(RangeFromOps, RangeType); bool Erased = PendingPhiRanges.erase(Phi); assert(Erased && "Failed to erase Phi properly?"); (void) Erased; @@ -5812,7 +5825,7 @@ ConstantRange ScalarEvolution::getRangeForAffineAR(const SCEV *Start, MaxBECountValue, BitWidth, /* Signed = */ false); // Finally, intersect signed and unsigned ranges. - return SR.intersectWith(UR); + return SR.intersectWith(UR, ConstantRange::Smallest); } ConstantRange ScalarEvolution::getRangeViaFactoring(const SCEV *Start, diff --git a/test/Analysis/ScalarEvolution/extract-highbits-sameconstmask.ll b/test/Analysis/ScalarEvolution/extract-highbits-sameconstmask.ll index 6b0f8588065..a4df455e2ab 100644 --- a/test/Analysis/ScalarEvolution/extract-highbits-sameconstmask.ll +++ b/test/Analysis/ScalarEvolution/extract-highbits-sameconstmask.ll @@ -8,7 +8,7 @@ define i32 @div(i32 %val) nounwind { ; CHECK-NEXT: %tmp1 = udiv i32 %val, 16 ; CHECK-NEXT: --> (%val /u 16) U: [0,268435456) S: [0,268435456) ; CHECK-NEXT: %tmp2 = mul i32 %tmp1, 16 -; CHECK-NEXT: --> (16 * (%val /u 16)) U: [0,-15) S: [0,-15) +; CHECK-NEXT: --> (16 * (%val /u 16)) U: [0,-15) S: [-2147483648,2147483633) ; CHECK-NEXT: Determining loop execution counts for: @div ; %tmp1 = udiv i32 %val, 16 @@ -38,7 +38,7 @@ define i32 @mask_b(i32 %val) nounwind { ; CHECK-LABEL: 'mask_b' ; CHECK-NEXT: Classifying expressions for: @mask_b ; CHECK-NEXT: %masked = and i32 %val, -16 -; CHECK-NEXT: --> (16 * (%val /u 16)) U: [0,-15) S: [0,-15) +; CHECK-NEXT: --> (16 * (%val /u 16)) U: [0,-15) S: [-2147483648,2147483633) ; CHECK-NEXT: Determining loop execution counts for: @mask_b ; %masked = and i32 %val, -16 @@ -51,7 +51,7 @@ define i32 @mask_d(i32 %val) nounwind { ; CHECK-NEXT: %lowbitscleared = lshr i32 %val, 4 ; CHECK-NEXT: --> (%val /u 16) U: [0,268435456) S: [0,268435456) ; CHECK-NEXT: %masked = shl i32 %lowbitscleared, 4 -; CHECK-NEXT: --> (16 * (%val /u 16)) U: [0,-15) S: [0,-15) +; CHECK-NEXT: --> (16 * (%val /u 16)) U: [0,-15) S: [-2147483648,2147483633) ; CHECK-NEXT: Determining loop execution counts for: @mask_d ; %lowbitscleared = lshr i32 %val, 4 diff --git a/test/Analysis/ScalarEvolution/increasing-or-decreasing-iv.ll b/test/Analysis/ScalarEvolution/increasing-or-decreasing-iv.ll index ae5e3609326..249698d36ed 100644 --- a/test/Analysis/ScalarEvolution/increasing-or-decreasing-iv.ll +++ b/test/Analysis/ScalarEvolution/increasing-or-decreasing-iv.ll @@ -58,15 +58,15 @@ loop: ; CHECK: %iv.m1 = sub i32 %iv, 1 ; CHECK-NEXT: --> {(-1 + %start),+,%step}<%loop> U: [-1,120) S: [-1,120) ; CHECK: %iv.m2 = sub i32 %iv, 2 -; CHECK-NEXT: --> {(-2 + %start),+,%step}<%loop> U: [-2,119) S: [-2,119) +; CHECK-NEXT: --> {(-2 + %start),+,%step}<%loop> U: [0,-1) S: [-2,119) ; CHECK: %iv.m3 = sub i32 %iv, 3 ; CHECK-NEXT: --> {(-3 + %start),+,%step}<%loop> U: [-3,118) S: [-3,118) ; CHECK: %iv.m4 = sub i32 %iv, 4 -; CHECK-NEXT: --> {(-4 + %start),+,%step}<%loop> U: [-4,117) S: [-4,117) +; CHECK-NEXT: --> {(-4 + %start),+,%step}<%loop> U: [0,-3) S: [-4,117) ; CHECK: %iv.m5 = sub i32 %iv, 5 ; CHECK-NEXT: --> {(-5 + %start),+,%step}<%loop> U: [-5,116) S: [-5,116) ; CHECK: %iv.m6 = sub i32 %iv, 6 -; CHECK-NEXT: --> {(-6 + %start),+,%step}<%loop> U: [-6,115) S: [-6,115) +; CHECK-NEXT: --> {(-6 + %start),+,%step}<%loop> U: [0,-1) S: [-6,115) ; CHECK: %iv.m7 = sub i32 %iv, 7 ; CHECK-NEXT: --> {(-7 + %start),+,%step}<%loop> U: [-7,114) S: [-7,114) @@ -206,7 +206,7 @@ loop: %iv.next = add i32 %iv, %step.plus.one %iv.sext = sext i32 %iv to i64 ; CHECK: %iv.sext = sext i32 %iv to i64 -; CHECK-NEXT: --> {(sext i32 %start to i64),+,(1 + (sext i32 %step to i64))}<%loop> U: [0,128) S: [0,128) +; CHECK-NEXT: --> {(sext i32 %start to i64),+,(1 + (sext i32 %step to i64))}<%loop> U: [0,128) S: [0,128) %loop.iv.inc = add i16 %loop.iv, 1 %be.cond = icmp ne i16 %loop.iv.inc, 128 br i1 %be.cond, label %loop, label %leave diff --git a/test/Analysis/ScalarEvolution/infer-prestart-no-wrap.ll b/test/Analysis/ScalarEvolution/infer-prestart-no-wrap.ll index f28e66716e1..318078ebf6a 100644 --- a/test/Analysis/ScalarEvolution/infer-prestart-no-wrap.ll +++ b/test/Analysis/ScalarEvolution/infer-prestart-no-wrap.ll @@ -59,7 +59,7 @@ define void @infer.sext.1(i32 %start, i1* %c) { %idx = phi i32 [ %start.real, %entry ], [ %idx.inc, %loop ] %idx.sext = sext i32 %idx to i64 ; CHECK: %idx.sext = sext i32 %idx to i64 -; CHECK-NEXT: --> {(2 + (sext i32 (4 * %start) to i64)),+,2}<%loop> +; CHECK-NEXT: --> {(2 + (sext i32 (4 * %start) to i64)),+,2}<%loop> %idx.inc = add nsw i32 %idx, 2 %condition = load i1, i1* %c br i1 %condition, label %exit, label %loop diff --git a/test/Analysis/ScalarEvolution/lshr-shl-differentconstmask.ll b/test/Analysis/ScalarEvolution/lshr-shl-differentconstmask.ll index 89c31d12a53..c7fda08fc71 100644 --- a/test/Analysis/ScalarEvolution/lshr-shl-differentconstmask.ll +++ b/test/Analysis/ScalarEvolution/lshr-shl-differentconstmask.ll @@ -121,7 +121,7 @@ define i32 @masky_biggerShr(i32 %val) { ; CHECK-NEXT: %tmp1 = shl i32 %val, 2 ; CHECK-NEXT: --> (4 * %val) U: [0,-3) S: [-2147483648,2147483645) ; CHECK-NEXT: %tmp2 = and i32 %tmp1, -64 -; CHECK-NEXT: --> (64 * (zext i26 (trunc i32 (%val /u 16) to i26) to i32)) U: [0,-63) S: [0,-63) +; CHECK-NEXT: --> (64 * (zext i26 (trunc i32 (%val /u 16) to i26) to i32)) U: [0,-63) S: [-2147483648,2147483585) ; CHECK-NEXT: Determining loop execution counts for: @masky_biggerShr ; %tmp1 = shl i32 %val, 2 diff --git a/test/Analysis/ScalarEvolution/sext-mul.ll b/test/Analysis/ScalarEvolution/sext-mul.ll index 8fe22db9f1c..42810be6ed7 100644 --- a/test/Analysis/ScalarEvolution/sext-mul.ll +++ b/test/Analysis/ScalarEvolution/sext-mul.ll @@ -7,7 +7,7 @@ ; CHECK: %tmp11 = getelementptr inbounds i32, i32* %arg, i64 %tmp10 ; CHECK-NEXT: --> {{.*}} Exits: ((4 * (sext i32 (-2 + (2 * %arg2)) to i64)) + %arg) ; CHECK: %tmp14 = or i64 %tmp10, 1 -; CHECK-NEXT: --> {{.*}} Exits: (1 + (sext i32 (-2 + (2 * %arg2)) to i64)) +; CHECK-NEXT: --> {{.*}} Exits: (1 + (sext i32 (-2 + (2 * %arg2)) to i64)) ; CHECK: %tmp15 = getelementptr inbounds i32, i32* %arg, i64 %tmp14 ; CHECK-NEXT: --> {{.*}} Exits: (4 + (4 * (sext i32 (-2 + (2 * %arg2)) to i64)) + %arg) ; CHECK:Loop %bb7: backedge-taken count is (-1 + (zext i32 %arg2 to i64)) @@ -50,7 +50,7 @@ bb7: ; preds = %bb7, %bb3 ; CHECK: %t10 = ashr exact i128 %t9, 1 ; CHECK-NEXT: --> {{.*}} Exits: (sext i127 (-633825300114114700748351602688 + (633825300114114700748351602688 * (zext i32 %arg5 to i127))) to i128) ; CHECK: %t14 = or i128 %t10, 1 -; CHECK-NEXT: --> {{.*}} Exits: (1 + (sext i127 (-633825300114114700748351602688 + (633825300114114700748351602688 * (zext i32 %arg5 to i127))) to i128)) +; CHECK-NEXT: --> {{.*}} Exits: (1 + (sext i127 (-633825300114114700748351602688 + (633825300114114700748351602688 * (zext i32 %arg5 to i127))) to i128)) ; CHECK: Loop %bb7: backedge-taken count is (-1 + (zext i32 %arg5 to i128)) ; CHECK-NEXT: Loop %bb7: max backedge-taken count is -1 ; CHECK-NEXT: Loop %bb7: Predicated backedge-taken count is (-1 + (zext i32 %arg5 to i128))