From 8751486522ab78e5e550b96d8259f050d90ba5cf Mon Sep 17 00:00:00 2001 From: Sanjay Patel Date: Fri, 21 Oct 2016 20:16:27 +0000 Subject: [PATCH] [DAG] enhance computeKnownBits to handle SHL with vector splat constant Also, use APInt to avoid crashing on types larger than vNi64. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@284874 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/CodeGen/SelectionDAG/SelectionDAG.cpp | 19 +++++++++---------- test/CodeGen/X86/combine-shl.ll | 8 ++------ test/CodeGen/X86/negate.ll | 5 +---- 3 files changed, 12 insertions(+), 20 deletions(-) diff --git a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 56945b4bf98..974322aabc1 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -2144,23 +2144,21 @@ void SelectionDAG::computeKnownBits(SDValue Op, APInt &KnownZero, KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1); break; case ISD::SHL: - // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0 - if (ConstantSDNode *SA = dyn_cast(Op.getOperand(1))) { - unsigned ShAmt = SA->getZExtValue(); - + if (ConstantSDNode *SA = isConstOrConstSplat(Op.getOperand(1))) { // If the shift count is an invalid immediate, don't do anything. - if (ShAmt >= BitWidth) + APInt ShAmt = SA->getAPIntValue(); + if (ShAmt.uge(BitWidth)) break; - computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1); - KnownZero <<= ShAmt; - KnownOne <<= ShAmt; + computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth + 1); + KnownZero = KnownZero << ShAmt; + KnownOne = KnownOne << ShAmt; // low bits known zero. - KnownZero |= APInt::getLowBitsSet(BitWidth, ShAmt); + KnownZero |= APInt::getLowBitsSet(BitWidth, ShAmt.getZExtValue()); } break; case ISD::SRL: - // (ushr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 + // FIXME: Reuse isConstOrConstSplat + APInt from above. if (ConstantSDNode *SA = dyn_cast(Op.getOperand(1))) { unsigned ShAmt = SA->getZExtValue(); @@ -2177,6 +2175,7 @@ void SelectionDAG::computeKnownBits(SDValue Op, APInt &KnownZero, } break; case ISD::SRA: + // FIXME: Reuse isConstOrConstSplat + APInt from above. if (ConstantSDNode *SA = dyn_cast(Op.getOperand(1))) { unsigned ShAmt = SA->getZExtValue(); diff --git a/test/CodeGen/X86/combine-shl.ll b/test/CodeGen/X86/combine-shl.ll index 3f3871ec5c3..64ed1cea8d4 100644 --- a/test/CodeGen/X86/combine-shl.ll +++ b/test/CodeGen/X86/combine-shl.ll @@ -61,16 +61,12 @@ define <4 x i32> @combine_vec_shl_by_zero(<4 x i32> %x) { define <4 x i32> @combine_vec_shl_known_zero0(<4 x i32> %x) { ; SSE-LABEL: combine_vec_shl_known_zero0: ; SSE: # BB#0: -; SSE-NEXT: pxor %xmm1, %xmm1 -; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3],xmm1[4],xmm0[5],xmm1[6],xmm0[7] -; SSE-NEXT: pslld $16, %xmm0 +; SSE-NEXT: xorps %xmm0, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: combine_vec_shl_known_zero0: ; AVX: # BB#0: -; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3],xmm1[4],xmm0[5],xmm1[6],xmm0[7] -; AVX-NEXT: vpslld $16, %xmm0, %xmm0 +; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0 ; AVX-NEXT: retq %1 = and <4 x i32> %x, %2 = shl <4 x i32> %1, diff --git a/test/CodeGen/X86/negate.ll b/test/CodeGen/X86/negate.ll index e42794d184e..6f07378e0e4 100644 --- a/test/CodeGen/X86/negate.ll +++ b/test/CodeGen/X86/negate.ll @@ -35,10 +35,7 @@ define i8 @negate_zero_or_minsigned_nsw(i8 %x) { define <4 x i32> @negate_zero_or_minsigned_nsw_vec(<4 x i32> %x) { ; CHECK-LABEL: negate_zero_or_minsigned_nsw_vec: ; CHECK: # BB#0: -; CHECK-NEXT: pslld $31, %xmm0 -; CHECK-NEXT: pxor %xmm1, %xmm1 -; CHECK-NEXT: psubd %xmm0, %xmm1 -; CHECK-NEXT: movdqa %xmm1, %xmm0 +; CHECK-NEXT: xorps %xmm0, %xmm0 ; CHECK-NEXT: retq ; %signbit = shl <4 x i32> %x, -- 2.40.0