From: Simon Pilgrim Date: Mon, 16 Jan 2017 14:49:26 +0000 (+0000) Subject: [SelectionDAG] Add knownbits support for BITREVERSE X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=1f7c0c9364eb80b1d3779360f50b5b45212056e5;p=llvm [SelectionDAG] Add knownbits support for BITREVERSE git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@292130 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 6b2123f1940..24aed22e228 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -2673,6 +2673,13 @@ void SelectionDAG::computeKnownBits(SDValue Op, APInt &KnownZero, } break; } + case ISD::BITREVERSE: { + computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, DemandedElts, + Depth + 1); + KnownZero = KnownZero2.reverseBits(); + KnownOne = KnownOne2.reverseBits(); + break; + } case ISD::BSWAP: { computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, DemandedElts, Depth + 1); diff --git a/test/CodeGen/X86/known-bits-vector.ll b/test/CodeGen/X86/known-bits-vector.ll index 1bbaff6b94b..8e720063dd6 100644 --- a/test/CodeGen/X86/known-bits-vector.ll +++ b/test/CodeGen/X86/known-bits-vector.ll @@ -535,34 +535,12 @@ define <4 x float> @knownbits_mask_umax_shuffle_uitofp(<4 x i32> %a0) { define <4 x i32> @knownbits_mask_bitreverse_ashr(<4 x i32> %a0) { ; X32-LABEL: knownbits_mask_bitreverse_ashr: ; X32: # BB#0: -; X32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 -; X32-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12] -; X32-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; X32-NEXT: vpand %xmm1, %xmm0, %xmm2 -; X32-NEXT: vmovdqa {{.*#+}} xmm3 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240] -; X32-NEXT: vpshufb %xmm2, %xmm3, %xmm2 -; X32-NEXT: vpsrlw $4, %xmm0, %xmm0 -; X32-NEXT: vpand %xmm1, %xmm0, %xmm0 -; X32-NEXT: vmovdqa {{.*#+}} xmm1 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15] -; X32-NEXT: vpshufb %xmm0, %xmm1, %xmm0 -; X32-NEXT: vpor %xmm0, %xmm2, %xmm0 -; X32-NEXT: vpsrad $31, %xmm0, %xmm0 +; X32-NEXT: vxorps %xmm0, %xmm0, %xmm0 ; X32-NEXT: retl ; ; X64-LABEL: knownbits_mask_bitreverse_ashr: ; X64: # BB#0: -; X64-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 -; X64-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12] -; X64-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; X64-NEXT: vpand %xmm1, %xmm0, %xmm2 -; X64-NEXT: vmovdqa {{.*#+}} xmm3 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240] -; X64-NEXT: vpshufb %xmm2, %xmm3, %xmm2 -; X64-NEXT: vpsrlw $4, %xmm0, %xmm0 -; X64-NEXT: vpand %xmm1, %xmm0, %xmm0 -; X64-NEXT: vmovdqa {{.*#+}} xmm1 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15] -; X64-NEXT: vpshufb %xmm0, %xmm1, %xmm0 -; X64-NEXT: vpor %xmm0, %xmm2, %xmm0 -; X64-NEXT: vpsrad $31, %xmm0, %xmm0 +; X64-NEXT: vxorps %xmm0, %xmm0, %xmm0 ; X64-NEXT: retq %1 = and <4 x i32> %a0, %2 = call <4 x i32> @llvm.bitreverse.v4i32(<4 x i32> %1)