From ead22e207a86cad13c01716c9027e65fd5ec8fbf Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Mon, 31 Dec 2018 19:09:30 +0000 Subject: [PATCH] [SelectionDAG] Add SIGN_EXTEND_VECTOR_INREG support to computeKnownBits. Differential Revision: https://reviews.llvm.org/D56168 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@350179 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/CodeGen/SelectionDAG/SelectionDAG.cpp | 10 +++++++++- test/CodeGen/X86/combine-shl.ll | 14 ++++---------- 2 files changed, 13 insertions(+), 11 deletions(-) diff --git a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 2f26a150b50..519f4d82f6a 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -2827,7 +2827,15 @@ KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts, Known.Zero.setBitsFrom(InVT.getScalarSizeInBits()); break; } - // TODO ISD::SIGN_EXTEND_VECTOR_INREG + case ISD::SIGN_EXTEND_VECTOR_INREG: { + EVT InVT = Op.getOperand(0).getValueType(); + APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements()); + Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1); + // If the sign bit is known to be zero or one, then sext will extend + // it to the top bits, else it will just zext. + Known = Known.sext(BitWidth); + break; + } case ISD::SIGN_EXTEND: { Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); // If the sign bit is known to be zero or one, then sext will extend diff --git a/test/CodeGen/X86/combine-shl.ll b/test/CodeGen/X86/combine-shl.ll index d4ecec0376f..64f9f10c4c6 100644 --- a/test/CodeGen/X86/combine-shl.ll +++ b/test/CodeGen/X86/combine-shl.ll @@ -283,17 +283,11 @@ define <8 x i32> @combine_vec_shl_ext_shl1(<8 x i16> %x) { ; SSE41-LABEL: combine_vec_shl_ext_shl1: ; SSE41: # %bb.0: ; SSE41-NEXT: pmullw {{.*}}(%rip), %xmm0 -; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; SSE41-NEXT: pmovsxwd %xmm1, %xmm1 ; SSE41-NEXT: pmovsxwd %xmm0, %xmm0 -; SSE41-NEXT: movdqa %xmm0, %xmm2 -; SSE41-NEXT: pslld $30, %xmm2 -; SSE41-NEXT: pslld $31, %xmm0 -; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7] -; SSE41-NEXT: movdqa %xmm1, %xmm2 -; SSE41-NEXT: pslld $28, %xmm2 -; SSE41-NEXT: pslld $29, %xmm1 -; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7] +; SSE41-NEXT: pslld $30, %xmm0 +; SSE41-NEXT: pxor %xmm1, %xmm1 +; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7] +; SSE41-NEXT: pxor %xmm1, %xmm1 ; SSE41-NEXT: retq ; ; AVX-LABEL: combine_vec_shl_ext_shl1: -- 2.50.1