return false;
}
+// Finds an integer D for an expression (C + x + y + ...) such that the top
+// level addition in (D + (C - D + x + y + ...)) would not wrap (signed or
+// unsigned) and the number of trailing zeros of (C - D + x + y + ...) is
+// maximized, where C is the \p ConstantTerm, x, y, ... are arbitrary SCEVs, and
+// the (C + x + y + ...) expression is \p WholeAddExpr.
+static APInt extractConstantWithoutWrapping(ScalarEvolution &SE,
+ const SCEVConstant *ConstantTerm,
+ const SCEVAddExpr *WholeAddExpr) {
+ const APInt C = ConstantTerm->getAPInt();
+ const unsigned BitWidth = C.getBitWidth();
+ // Find number of trailing zeros of (x + y + ...) w/o the C first:
+ uint32_t TZ = BitWidth;
+ for (unsigned I = 1, E = WholeAddExpr->getNumOperands(); I < E && TZ; ++I)
+ TZ = std::min(TZ, SE.GetMinTrailingZeros(WholeAddExpr->getOperand(I)));
+ if (TZ) {
+ // Set D to be as many least significant bits of C as possible while still
+ // guaranteeing that adding D to (C - D + x + y + ...) won't cause a wrap:
+ return TZ < BitWidth ? C.trunc(TZ).zext(BitWidth) : C;
+ }
+ return APInt(BitWidth, 0);
+}
+
+// Finds an integer D for an affine AddRec expression {C,+,x} such that the top
+// level addition in (D + {C-D,+,x}) would not wrap (signed or unsigned) and the
+// number of trailing zeros of (C - D + x * n) is maximized, where C is the \p
+// ConstantStart, x is an arbitrary \p Step, and n is the loop trip count.
+static APInt extractConstantWithoutWrapping(ScalarEvolution &SE,
+ const APInt &ConstantStart,
+ const SCEV *Step) {
+ const unsigned BitWidth = ConstantStart.getBitWidth();
+ const uint32_t TZ = SE.GetMinTrailingZeros(Step);
+ if (TZ)
+ return TZ < BitWidth ? ConstantStart.trunc(TZ).zext(BitWidth)
+ : ConstantStart;
+ return APInt(BitWidth, 0);
+}
+
const SCEV *
ScalarEvolution::getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) {
assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
}
}
+ // zext({C,+,Step}) --> (zext(D) + zext({C-D,+,Step}))<nuw><nsw>
+ // if D + (C - D + Step * n) could be proven to not unsigned wrap
+ // where D maximizes the number of trailing zeros of (C - D + Step * n)
+ if (const auto *SC = dyn_cast<SCEVConstant>(Start)) {
+ const APInt &C = SC->getAPInt();
+ const APInt &D = extractConstantWithoutWrapping(*this, C, Step);
+ if (D != 0) {
+ const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth);
+ const SCEV *SResidual =
+ getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags());
+ const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1);
+ return getAddExpr(SZExtD, SZExtR,
+ (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW),
+ Depth + 1);
+ }
+ }
+
if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) {
const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW);
return getAddRecExpr(
return getAddExpr(Ops, SCEV::FlagNUW, Depth + 1);
}
- // zext(C + x + y + ...) --> (zext(D) + zext((C - D) + x + y + ...))<nuw>
+ // zext(C + x + y + ...) --> (zext(D) + zext((C - D) + x + y + ...))
// if D + (C - D + x + y + ...) could be proven to not unsigned wrap
// where D maximizes the number of trailing zeros of (C - D + x + y + ...)
//
- // Useful while proving that address arithmetic expressions are equal or
- // differ by a small constant amount, see LoadStoreVectorizer pass.
+ // Often address arithmetics contain expressions like
+ // (zext (add (shl X, C1), C2)), for instance, (zext (5 + (4 * X))).
+ // This transformation is useful while proving that such expressions are
+ // equal or differ by a small constant amount, see LoadStoreVectorizer pass.
if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) {
- // Often address arithmetics contain expressions like
- // (zext (add (shl X, C1), C2)), for instance, (zext (5 + (4 * X))).
- // ConstantRange is unable to prove that it's possible to transform
- // (5 + (4 * X)) to (1 + (4 + (4 * X))) w/o underflowing:
- //
- // | Expression | ConstantRange | KnownBits |
- // |---------------|------------------------|-----------------------|
- // | i8 4 * X | [L: 0, U: 253) | XXXX XX00 |
- // | | => Min: 0, Max: 252 | => Min: 0, Max: 252 |
- // | | | |
- // | i8 4 * X + 5 | [L: 5, U: 2) (wrapped) | YYYY YY01 |
- // | (101) | => Min: 0, Max: 255 | => Min: 1, Max: 253 |
- //
- // As KnownBits are not available for SCEV expressions, use number of
- // trailing zeroes instead:
- APInt C = SC->getAPInt();
- uint32_t TZ = C.getBitWidth();
- for (unsigned I = 1, E = SA->getNumOperands(); I < E && TZ; ++I)
- TZ = std::min(TZ, GetMinTrailingZeros(SA->getOperand(I)));
- if (TZ) {
- APInt D = TZ < C.getBitWidth() ? C.trunc(TZ).zext(C.getBitWidth()) : C;
- if (D != 0) {
- const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth);
- const SCEV *SResidual =
- getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth);
- const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1);
- return getAddExpr(SZExtD, SZExtR, SCEV::FlagNUW, Depth + 1);
- }
+ const APInt &D = extractConstantWithoutWrapping(*this, SC, SA);
+ if (D != 0) {
+ const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth);
+ const SCEV *SResidual =
+ getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth);
+ const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1);
+ return getAddExpr(SZExtD, SZExtR,
+ (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW),
+ Depth + 1);
}
}
}
return getTruncateOrSignExtend(X, Ty);
}
- // sext(C1 + (C2 * x)) --> C1 + sext(C2 * x) if C1 < C2
if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) {
- if (SA->getNumOperands() == 2) {
- auto *SC1 = dyn_cast<SCEVConstant>(SA->getOperand(0));
- auto *SMul = dyn_cast<SCEVMulExpr>(SA->getOperand(1));
- if (SMul && SC1) {
- if (auto *SC2 = dyn_cast<SCEVConstant>(SMul->getOperand(0))) {
- const APInt &C1 = SC1->getAPInt();
- const APInt &C2 = SC2->getAPInt();
- if (C1.isStrictlyPositive() && C2.isStrictlyPositive() &&
- C2.ugt(C1) && C2.isPowerOf2())
- return getAddExpr(getSignExtendExpr(SC1, Ty, Depth + 1),
- getSignExtendExpr(SMul, Ty, Depth + 1),
- SCEV::FlagAnyWrap, Depth + 1);
- }
- }
- }
-
// sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw>
if (SA->hasNoSignedWrap()) {
// If the addition does not sign overflow then we can, by definition,
Ops.push_back(getSignExtendExpr(Op, Ty, Depth + 1));
return getAddExpr(Ops, SCEV::FlagNSW, Depth + 1);
}
+
+ // sext(C + x + y + ...) --> (sext(D) + sext((C - D) + x + y + ...))
+ // if D + (C - D + x + y + ...) could be proven to not signed wrap
+ // where D maximizes the number of trailing zeros of (C - D + x + y + ...)
+ //
+ // For instance, this will bring two seemingly different expressions:
+ // 1 + sext(5 + 20 * %x + 24 * %y) and
+ // sext(6 + 20 * %x + 24 * %y)
+ // to the same form:
+ // 2 + sext(4 + 20 * %x + 24 * %y)
+ if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) {
+ const APInt &D = extractConstantWithoutWrapping(*this, SC, SA);
+ if (D != 0) {
+ const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth);
+ const SCEV *SResidual =
+ getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth);
+ const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1);
+ return getAddExpr(SSExtD, SSExtR,
+ (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW),
+ Depth + 1);
+ }
+ }
}
// If the input value is a chrec scev, and we can prove that the value
// did not overflow the old, smaller, value, we can sign extend all of the
}
}
- // If Start and Step are constants, check if we can apply this
- // transformation:
- // sext{C1,+,C2} --> C1 + sext{0,+,C2} if C1 < C2
- auto *SC1 = dyn_cast<SCEVConstant>(Start);
- auto *SC2 = dyn_cast<SCEVConstant>(Step);
- if (SC1 && SC2) {
- const APInt &C1 = SC1->getAPInt();
- const APInt &C2 = SC2->getAPInt();
- if (C1.isStrictlyPositive() && C2.isStrictlyPositive() && C2.ugt(C1) &&
- C2.isPowerOf2()) {
- Start = getSignExtendExpr(Start, Ty, Depth + 1);
- const SCEV *NewAR = getAddRecExpr(getZero(AR->getType()), Step, L,
- AR->getNoWrapFlags());
- return getAddExpr(Start, getSignExtendExpr(NewAR, Ty, Depth + 1),
- SCEV::FlagAnyWrap, Depth + 1);
+ // sext({C,+,Step}) --> (sext(D) + sext({C-D,+,Step}))<nuw><nsw>
+ // if D + (C - D + Step * n) could be proven to not signed wrap
+ // where D maximizes the number of trailing zeros of (C - D + Step * n)
+ if (const auto *SC = dyn_cast<SCEVConstant>(Start)) {
+ const APInt &C = SC->getAPInt();
+ const APInt &D = extractConstantWithoutWrapping(*this, C, Step);
+ if (D != 0) {
+ const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth);
+ const SCEV *SResidual =
+ getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags());
+ const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1);
+ return getAddExpr(SSExtD, SSExtR,
+ (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW),
+ Depth + 1);
}
}
ret i32 %conv
}
+; Similar to foo_2double but with a non-power-of-2 factor and potential
+; wrapping (both indices wrap or both don't in the same time)
+; CHECK-LABEL: foo_2double_non_power_of_2
+; CHECK: load <2 x double>
+; CHECK: load <2 x double>
+; Function Attrs: nounwind ssp uwtable
+define void @foo_2double_non_power_of_2(i32 %u) #0 {
+entry:
+ %u.addr = alloca i32, align 4
+ store i32 %u, i32* %u.addr, align 4
+ %mul = mul i32 %u, 6
+ %add6 = add i32 %mul, 6
+ %idxprom = sext i32 %add6 to i64
+ %arrayidx = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 %idxprom
+ %0 = load double, double* %arrayidx, align 8
+ %arrayidx4 = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 %idxprom
+ %1 = load double, double* %arrayidx4, align 8
+ %add5 = fadd double %0, %1
+ store double %add5, double* %arrayidx, align 8
+ %add7 = add i32 %mul, 7
+ %idxprom12 = sext i32 %add7 to i64
+ %arrayidx13 = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 %idxprom12
+ %2 = load double, double* %arrayidx13, align 8
+ %arrayidx17 = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 %idxprom12
+ %3 = load double, double* %arrayidx17, align 8
+ %add18 = fadd double %2, %3
+ store double %add18, double* %arrayidx13, align 8
+ ret void
+}
+
+; Similar to foo_2double_non_power_of_2 but with zext's instead of sext's
+; CHECK-LABEL: foo_2double_non_power_of_2_zext
+; CHECK: load <2 x double>
+; CHECK: load <2 x double>
+; Function Attrs: nounwind ssp uwtable
+define void @foo_2double_non_power_of_2_zext(i32 %u) #0 {
+entry:
+ %u.addr = alloca i32, align 4
+ store i32 %u, i32* %u.addr, align 4
+ %mul = mul i32 %u, 6
+ %add6 = add i32 %mul, 6
+ %idxprom = zext i32 %add6 to i64
+ %arrayidx = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 %idxprom
+ %0 = load double, double* %arrayidx, align 8
+ %arrayidx4 = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 %idxprom
+ %1 = load double, double* %arrayidx4, align 8
+ %add5 = fadd double %0, %1
+ store double %add5, double* %arrayidx, align 8
+ %add7 = add i32 %mul, 7
+ %idxprom12 = zext i32 %add7 to i64
+ %arrayidx13 = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 %idxprom12
+ %2 = load double, double* %arrayidx13, align 8
+ %arrayidx17 = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 %idxprom12
+ %3 = load double, double* %arrayidx17, align 8
+ %add18 = fadd double %2, %3
+ store double %add18, double* %arrayidx13, align 8
+ ret void
+}
+
+; Similar to foo_2double_non_power_of_2, but now we are dealing with AddRec SCEV.
+; Alternatively, this is like foo_loop, but with a non-power-of-2 factor and
+; potential wrapping (both indices wrap or both don't in the same time)
+; CHECK-LABEL: foo_loop_non_power_of_2
+; CHECK: <2 x double>
+; Function Attrs: nounwind ssp uwtable
+define i32 @foo_loop_non_power_of_2(double* %A, i32 %n) #0 {
+entry:
+ %A.addr = alloca double*, align 8
+ %n.addr = alloca i32, align 4
+ %sum = alloca double, align 8
+ %i = alloca i32, align 4
+ store double* %A, double** %A.addr, align 8
+ store i32 %n, i32* %n.addr, align 4
+ store double 0.000000e+00, double* %sum, align 8
+ store i32 0, i32* %i, align 4
+ %cmp1 = icmp slt i32 0, %n
+ br i1 %cmp1, label %for.body.lr.ph, label %for.end
+
+for.body.lr.ph: ; preds = %entry
+ br label %for.body
+
+for.body: ; preds = %for.body.lr.ph, %for.body
+ %0 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
+ %1 = phi double [ 0.000000e+00, %for.body.lr.ph ], [ %add7, %for.body ]
+ %mul = mul i32 %0, 12
+ %add.5 = add i32 %mul, 5
+ %idxprom = sext i32 %add.5 to i64
+ %arrayidx = getelementptr inbounds double, double* %A, i64 %idxprom
+ %2 = load double, double* %arrayidx, align 8
+ %mul1 = fmul double 7.000000e+00, %2
+ %add.6 = add i32 %mul, 6
+ %idxprom3 = sext i32 %add.6 to i64
+ %arrayidx4 = getelementptr inbounds double, double* %A, i64 %idxprom3
+ %3 = load double, double* %arrayidx4, align 8
+ %mul5 = fmul double 7.000000e+00, %3
+ %add6 = fadd double %mul1, %mul5
+ %add7 = fadd double %1, %add6
+ store double %add7, double* %sum, align 8
+ %inc = add i32 %0, 1
+ store i32 %inc, i32* %i, align 4
+ %cmp = icmp slt i32 %inc, %n
+ br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge
+
+for.cond.for.end_crit_edge: ; preds = %for.body
+ %split = phi double [ %add7, %for.body ]
+ br label %for.end
+
+for.end: ; preds = %for.cond.for.end_crit_edge, %entry
+ %.lcssa = phi double [ %split, %for.cond.for.end_crit_edge ], [ 0.000000e+00, %entry ]
+ %conv = fptosi double %.lcssa to i32
+ ret i32 %conv
+}
+
+; This is generated by `clang -std=c11 -Wpedantic -Wall -O3 main.c -S -o - -emit-llvm`
+; with !{!"clang version 7.0.0 (trunk 337339) (llvm/trunk 337344)"} and stripping off
+; the !tbaa metadata nodes to fit the rest of the test file, where `cat main.c` is:
+;
+; double bar(double *a, unsigned n) {
+; double x = 0.0;
+; double y = 0.0;
+; for (unsigned i = 0; i < n; i += 2) {
+; x += a[i];
+; y += a[i + 1];
+; }
+; return x * y;
+; }
+;
+; The resulting IR is similar to @foo_loop, but with zext's instead of sext's.
+;
+; Make sure we are able to vectorize this from now on:
+;
+; CHECK-LABEL: @bar
+; CHECK: load <2 x double>
+define double @bar(double* nocapture readonly %a, i32 %n) local_unnamed_addr #0 {
+entry:
+ %cmp15 = icmp eq i32 %n, 0
+ br i1 %cmp15, label %for.cond.cleanup, label %for.body
+
+for.cond.cleanup: ; preds = %for.body, %entry
+ %x.0.lcssa = phi double [ 0.000000e+00, %entry ], [ %add, %for.body ]
+ %y.0.lcssa = phi double [ 0.000000e+00, %entry ], [ %add4, %for.body ]
+ %mul = fmul double %x.0.lcssa, %y.0.lcssa
+ ret double %mul
+
+for.body: ; preds = %entry, %for.body
+ %i.018 = phi i32 [ %add5, %for.body ], [ 0, %entry ]
+ %y.017 = phi double [ %add4, %for.body ], [ 0.000000e+00, %entry ]
+ %x.016 = phi double [ %add, %for.body ], [ 0.000000e+00, %entry ]
+ %idxprom = zext i32 %i.018 to i64
+ %arrayidx = getelementptr inbounds double, double* %a, i64 %idxprom
+ %0 = load double, double* %arrayidx, align 8
+ %add = fadd double %x.016, %0
+ %add1 = or i32 %i.018, 1
+ %idxprom2 = zext i32 %add1 to i64
+ %arrayidx3 = getelementptr inbounds double, double* %a, i64 %idxprom2
+ %1 = load double, double* %arrayidx3, align 8
+ %add4 = fadd double %y.017, %1
+ %add5 = add i32 %i.018, 2
+ %cmp = icmp ult i32 %add5, %n
+ br i1 %cmp, label %for.body, label %for.cond.cleanup
+}
+
attributes #0 = { nounwind ssp uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
!llvm.ident = !{!0}