]> granicus.if.org Git - llvm/commitdiff
[X86] Add X86ISD::VSRAI to computeKnownBitsForTargetNode.
authorCraig Topper <craig.topper@intel.com>
Mon, 31 Dec 2018 19:09:27 +0000 (19:09 +0000)
committerCraig Topper <craig.topper@intel.com>
Mon, 31 Dec 2018 19:09:27 +0000 (19:09 +0000)
Differential Revision: https://reviews.llvm.org/D56169

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@350178 91177308-0d34-0410-b5e6-96231b3b80d8

lib/Target/X86/X86ISelLowering.cpp
test/CodeGen/X86/combine-shl.ll

index ddefe05667f0f2b3d0744392650dc3ea361cada0..2ea698d619143fd4c278907e9d8d9c7cc07518ad 100644 (file)
@@ -30001,6 +30001,7 @@ void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
     Known.Zero.setBitsFrom(SrcVT.getScalarSizeInBits());
     break;
   }
+  case X86ISD::VSRAI:
   case X86ISD::VSHLI:
   case X86ISD::VSRLI: {
     if (auto *ShiftImm = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
@@ -30016,11 +30017,14 @@ void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
         Known.One <<= ShAmt;
         // Low bits are known zero.
         Known.Zero.setLowBits(ShAmt);
-      } else {
+      } else if (Opc == X86ISD::VSRLI) {
         Known.Zero.lshrInPlace(ShAmt);
         Known.One.lshrInPlace(ShAmt);
         // High bits are known zero.
         Known.Zero.setHighBits(ShAmt);
+      } else {
+        Known.Zero.ashrInPlace(ShAmt);
+        Known.One.ashrInPlace(ShAmt);
       }
     }
     break;
index d7cd5451bef6dda9079f341b5f02a60faf382ebe..d4ecec0376f715665874a9680404c2a038609754 100644 (file)
@@ -269,17 +269,15 @@ define <8 x i32> @combine_vec_shl_ext_shl1(<8 x i16> %x) {
 ; SSE2:       # %bb.0:
 ; SSE2-NEXT:    pmullw {{.*}}(%rip), %xmm0
 ; SSE2-NEXT:    punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-NEXT:    psrad $16, %xmm1
 ; SSE2-NEXT:    punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
-; SSE2-NEXT:    psrad $16, %xmm0
-; SSE2-NEXT:    movdqa %xmm0, %xmm2
-; SSE2-NEXT:    pslld $31, %xmm2
-; SSE2-NEXT:    pslld $30, %xmm0
-; SSE2-NEXT:    movsd {{.*#+}} xmm0 = xmm2[0],xmm0[1]
+; SSE2-NEXT:    psrad $16, %xmm1
 ; SSE2-NEXT:    movdqa %xmm1, %xmm2
 ; SSE2-NEXT:    pslld $29, %xmm2
 ; SSE2-NEXT:    pslld $28, %xmm1
 ; SSE2-NEXT:    movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
+; SSE2-NEXT:    pslld $30, %xmm0
+; SSE2-NEXT:    xorpd %xmm2, %xmm2
+; SSE2-NEXT:    movsd {{.*#+}} xmm0 = xmm2[0],xmm0[1]
 ; SSE2-NEXT:    retq
 ;
 ; SSE41-LABEL: combine_vec_shl_ext_shl1: