When folding a shift into a test-under-mask comparison, make sure that
there is no loss of precision when creating the shifted comparison
value. This usually never happens, except for certain always-true
comparisons in unoptimized code.
Fixes PR35529.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@319818
91177308-0d34-0410-b5e6-
96231b3b80d8
NewC.Op0.getOpcode() == ISD::SHL &&
isSimpleShift(NewC.Op0, ShiftVal) &&
(MaskVal >> ShiftVal != 0) &&
+ ((CmpVal >> ShiftVal) << ShiftVal) == CmpVal &&
(NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask,
MaskVal >> ShiftVal,
CmpVal >> ShiftVal,
NewC.Op0.getOpcode() == ISD::SRL &&
isSimpleShift(NewC.Op0, ShiftVal) &&
(MaskVal << ShiftVal != 0) &&
+ ((CmpVal << ShiftVal) >> ShiftVal) == CmpVal &&
(NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask,
MaskVal << ShiftVal,
CmpVal << ShiftVal,
exit:
ret void
}
+
+; Check that we don't fold a shift if the comparison value
+; would need to be shifted out of range
+define void @f19(i64 %a) {
+; CHECK-LABEL: f19:
+; CHECK-NOT: tmhh
+; CHECK: srlg [[REG:%r[0-5]]], %r2, 63
+; CHECK: cgibl [[REG]], 3, 0(%r14)
+; CHECK: br %r14
+entry:
+ %shr = lshr i64 %a, 63
+ %cmp = icmp ult i64 %shr, 3
+ br i1 %cmp, label %exit, label %store
+
+store:
+ store i32 1, i32 *@g
+ br label %exit
+
+exit:
+ ret void
+}
+