return Mask.isSubsetOf(Known.Zero);
}
+/// Helper function that checks to see if a node is a constant or a
+/// build vector of splat constants at least within the demanded elts.
+static ConstantSDNode *isConstOrDemandedConstSplat(SDValue N,
+ const APInt &DemandedElts) {
+ if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N))
+ return CN;
+ if (N.getOpcode() != ISD::BUILD_VECTOR)
+ return nullptr;
+ EVT VT = N.getValueType();
+ ConstantSDNode *Cst = nullptr;
+ unsigned NumElts = VT.getVectorNumElements();
+ assert(DemandedElts.getBitWidth() == NumElts && "Unexpected vector size");
+ for (unsigned i = 0; i != NumElts; ++i) {
+ if (!DemandedElts[i])
+ continue;
+ ConstantSDNode *C = dyn_cast<ConstantSDNode>(N.getOperand(i));
+ if (!C || (Cst && Cst->getAPIntValue() != C->getAPIntValue()) ||
+ C->getValueType(0) != VT.getScalarType())
+ return nullptr;
+ Cst = C;
+ }
+ return Cst;
+}
+
/// If a SHL/SRA/SRL node has a constant or splat constant shift amount that
/// is less than the element bit-width of the shift node, return it.
static const APInt *getValidShiftAmountConstant(SDValue V) {
case ISD::SRA:
Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1);
// SRA X, C -> adds C sign bits.
- if (ConstantSDNode *C = isConstOrConstSplat(Op.getOperand(1))) {
+ if (ConstantSDNode *C =
+ isConstOrDemandedConstSplat(Op.getOperand(1), DemandedElts)) {
APInt ShiftVal = C->getAPIntValue();
ShiftVal += Tmp;
Tmp = ShiftVal.uge(VTBits) ? VTBits : ShiftVal.getZExtValue();
}
return Tmp;
case ISD::SHL:
- if (ConstantSDNode *C = isConstOrConstSplat(Op.getOperand(1))) {
+ if (ConstantSDNode *C =
+ isConstOrDemandedConstSplat(Op.getOperand(1), DemandedElts)) {
// shl destroys sign bits.
- Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
+ Tmp = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1);
if (C->getAPIntValue().uge(VTBits) || // Bad shift.
C->getAPIntValue().uge(Tmp)) break; // Shifted all sign bits out.
return Tmp - C->getZExtValue();
define float @signbits_ashr_extract_sitofp_1(<2 x i64> %a0) nounwind {
; X32-LABEL: signbits_ashr_extract_sitofp_1:
; X32: # BB#0:
-; X32-NEXT: pushl %ebp
-; X32-NEXT: movl %esp, %ebp
-; X32-NEXT: andl $-8, %esp
-; X32-NEXT: subl $16, %esp
+; X32-NEXT: pushl %eax
; X32-NEXT: vmovdqa {{.*#+}} xmm1 = [0,2147483648,0,2147483648]
; X32-NEXT: vpsrlq $63, %xmm1, %xmm2
; X32-NEXT: vpsrlq $32, %xmm1, %xmm1
; X32-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
; X32-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X32-NEXT: vpsubq %xmm1, %xmm0, %xmm0
-; X32-NEXT: vmovq %xmm0, {{[0-9]+}}(%esp)
-; X32-NEXT: fildll {{[0-9]+}}(%esp)
-; X32-NEXT: fstps {{[0-9]+}}(%esp)
-; X32-NEXT: flds {{[0-9]+}}(%esp)
-; X32-NEXT: movl %ebp, %esp
-; X32-NEXT: popl %ebp
+; X32-NEXT: vmovd %xmm0, %eax
+; X32-NEXT: vcvtsi2ssl %eax, %xmm3, %xmm0
+; X32-NEXT: vmovss %xmm0, (%esp)
+; X32-NEXT: flds (%esp)
+; X32-NEXT: popl %eax
; X32-NEXT: retl
;
; X64-LABEL: signbits_ashr_extract_sitofp_1:
; X64-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X64-NEXT: vpsubq %xmm1, %xmm0, %xmm0
; X64-NEXT: vmovq %xmm0, %rax
-; X64-NEXT: vcvtsi2ssq %rax, %xmm2, %xmm0
+; X64-NEXT: vcvtsi2ssl %eax, %xmm2, %xmm0
; X64-NEXT: retq
%1 = ashr <2 x i64> %a0, <i64 32, i64 63>
%2 = extractelement <2 x i64> %1, i32 0
define float @signbits_ashr_shl_extract_sitofp(<2 x i64> %a0) nounwind {
; X32-LABEL: signbits_ashr_shl_extract_sitofp:
; X32: # BB#0:
-; X32-NEXT: pushl %ebp
-; X32-NEXT: movl %esp, %ebp
-; X32-NEXT: andl $-8, %esp
-; X32-NEXT: subl $16, %esp
+; X32-NEXT: pushl %eax
; X32-NEXT: vmovdqa {{.*#+}} xmm1 = [0,2147483648,0,2147483648]
; X32-NEXT: vpsrlq $60, %xmm1, %xmm2
; X32-NEXT: vpsrlq $61, %xmm1, %xmm1
; X32-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
; X32-NEXT: vpxor %xmm1, %xmm0, %xmm0
; X32-NEXT: vpsubq %xmm1, %xmm0, %xmm0
-; X32-NEXT: vpsllq $16, %xmm0, %xmm1
; X32-NEXT: vpsllq $20, %xmm0, %xmm0
-; X32-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
-; X32-NEXT: vmovq %xmm0, {{[0-9]+}}(%esp)
-; X32-NEXT: fildll {{[0-9]+}}(%esp)
-; X32-NEXT: fstps {{[0-9]+}}(%esp)
-; X32-NEXT: flds {{[0-9]+}}(%esp)
-; X32-NEXT: movl %ebp, %esp
-; X32-NEXT: popl %ebp
+; X32-NEXT: vmovd %xmm0, %eax
+; X32-NEXT: vcvtsi2ssl %eax, %xmm3, %xmm0
+; X32-NEXT: vmovss %xmm0, (%esp)
+; X32-NEXT: flds (%esp)
+; X32-NEXT: popl %eax
; X32-NEXT: retl
;
; X64-LABEL: signbits_ashr_shl_extract_sitofp:
; X64-NEXT: vpsubq %xmm1, %xmm0, %xmm0
; X64-NEXT: vpsllq $20, %xmm0, %xmm0
; X64-NEXT: vmovq %xmm0, %rax
-; X64-NEXT: vcvtsi2ssq %rax, %xmm2, %xmm0
+; X64-NEXT: vcvtsi2ssl %eax, %xmm2, %xmm0
; X64-NEXT: retq
%1 = ashr <2 x i64> %a0, <i64 61, i64 60>
%2 = shl <2 x i64> %1, <i64 20, i64 16>