From 2563d410c9892dd3fd68140587e013a1061575d2 Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Tue, 31 Jan 2017 14:59:44 +0000 Subject: [PATCH] [X86][SSE] Add support for combining PINSRB into a target shuffle. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@293637 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 11 +++-- .../X86/clear_upper_vector_element_bits.ll | 43 ++----------------- test/CodeGen/X86/insertelement-zero.ll | 12 +++--- 3 files changed, 17 insertions(+), 49 deletions(-) diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 8bbe21c9b3a..d928d5dfb28 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -5770,13 +5770,14 @@ static bool getFauxShuffleMask(SDValue N, SmallVectorImpl &Mask, Ops.push_back(IsAndN ? N1 : N0); return true; } + case X86ISD::PINSRB: case X86ISD::PINSRW: { SDValue InVec = N.getOperand(0); SDValue InScl = N.getOperand(1); uint64_t InIdx = N.getConstantOperandVal(2); assert(InIdx < NumElts && "Illegal insertion index"); - // Attempt to recognise a PINSRW(VEC, 0, Idx) shuffle pattern. + // Attempt to recognise a PINSR*(VEC, 0, Idx) shuffle pattern. if (X86::isZeroNode(InScl)) { Ops.push_back(InVec); for (unsigned i = 0; i != NumElts; ++i) @@ -5784,10 +5785,12 @@ static bool getFauxShuffleMask(SDValue N, SmallVectorImpl &Mask, return true; } - // Attempt to recognise a PINSRW(ASSERTZEXT(PEXTRW)) shuffle pattern. - // TODO: Expand this to support PINSRB/INSERT_VECTOR_ELT/etc. + // Attempt to recognise a PINSR*(ASSERTZEXT(PEXTR*)) shuffle pattern. + // TODO: Expand this to support INSERT_VECTOR_ELT/etc. + unsigned ExOp = + (X86ISD::PINSRB == Opcode ? X86ISD::PEXTRB : X86ISD::PEXTRW); if (InScl.getOpcode() != ISD::AssertZext || - InScl.getOperand(0).getOpcode() != X86ISD::PEXTRW) + InScl.getOperand(0).getOpcode() != ExOp) return false; SDValue ExVec = InScl.getOperand(0).getOperand(0); diff --git a/test/CodeGen/X86/clear_upper_vector_element_bits.ll b/test/CodeGen/X86/clear_upper_vector_element_bits.ll index bedc68e0000..6d50048ed7c 100644 --- a/test/CodeGen/X86/clear_upper_vector_element_bits.ll +++ b/test/CodeGen/X86/clear_upper_vector_element_bits.ll @@ -184,37 +184,10 @@ define <16 x i8> @_clearupper16xi8a(<16 x i8>) nounwind { ; AVX-LABEL: _clearupper16xi8a: ; AVX: # BB#0: ; AVX-NEXT: vpextrb $0, %xmm0, %eax +; AVX-NEXT: vpextrb $1, %xmm0, %ecx ; AVX-NEXT: vmovd %eax, %xmm1 -; AVX-NEXT: vpextrb $1, %xmm0, %eax -; AVX-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $2, %xmm0, %eax -; AVX-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $3, %xmm0, %eax -; AVX-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $4, %xmm0, %eax -; AVX-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $5, %xmm0, %eax -; AVX-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $6, %xmm0, %eax -; AVX-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $7, %xmm0, %eax -; AVX-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $8, %xmm0, %eax -; AVX-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $9, %xmm0, %eax -; AVX-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $10, %xmm0, %eax -; AVX-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $11, %xmm0, %eax -; AVX-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $12, %xmm0, %eax -; AVX-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $13, %xmm0, %eax -; AVX-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $14, %xmm0, %eax -; AVX-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1 -; AVX-NEXT: vpextrb $15, %xmm0, %eax -; AVX-NEXT: vpinsrb $15, %eax, %xmm1, %xmm0 +; AVX-NEXT: vpinsrb $1, %ecx, %xmm1, %xmm1 +; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3,4,5,6,7] ; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 ; AVX-NEXT: retq %x0 = extractelement <16 x i8> %0, i32 0 @@ -342,15 +315,7 @@ define <8 x i16> @_clearupper8xi16b(<8 x i16>) nounwind { ; ; AVX-LABEL: _clearupper8xi16b: ; AVX: # BB#0: -; AVX-NEXT: xorl %eax, %eax -; AVX-NEXT: vpinsrb $1, %eax, %xmm0, %xmm0 -; AVX-NEXT: vpinsrb $3, %eax, %xmm0, %xmm0 -; AVX-NEXT: vpinsrb $5, %eax, %xmm0, %xmm0 -; AVX-NEXT: vpinsrb $7, %eax, %xmm0, %xmm0 -; AVX-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0 -; AVX-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0 -; AVX-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0 -; AVX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0 +; AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0 ; AVX-NEXT: retq %x8 = bitcast <8 x i16> %0 to <16 x i8> %r0 = insertelement <16 x i8> %x8, i8 zeroinitializer, i32 1 diff --git a/test/CodeGen/X86/insertelement-zero.ll b/test/CodeGen/X86/insertelement-zero.ll index 054375a1250..64c586fcb68 100644 --- a/test/CodeGen/X86/insertelement-zero.ll +++ b/test/CodeGen/X86/insertelement-zero.ll @@ -492,8 +492,8 @@ define <32 x i8> @insert_v32i8_z123456789ABCDEzGHIJKLMNOPQRSTzz(<32 x i8> %a) { ; SSE41-NEXT: xorl %eax, %eax ; SSE41-NEXT: pinsrb $0, %eax, %xmm0 ; SSE41-NEXT: pinsrb $15, %eax, %xmm0 -; SSE41-NEXT: pinsrb $14, %eax, %xmm1 -; SSE41-NEXT: pinsrb $15, %eax, %xmm1 +; SSE41-NEXT: pxor %xmm2, %xmm2 +; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,6],xmm2[7] ; SSE41-NEXT: retq ; ; AVX1-LABEL: insert_v32i8_z123456789ABCDEzGHIJKLMNOPQRSTzz: @@ -504,8 +504,8 @@ define <32 x i8> @insert_v32i8_z123456789ABCDEzGHIJKLMNOPQRSTzz(<32 x i8> %a) { ; AVX1-NEXT: vpinsrb $15, %eax, %xmm0, %xmm1 ; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX1-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1 -; AVX1-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1 +; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,6],xmm2[7] ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: retq ; @@ -517,8 +517,8 @@ define <32 x i8> @insert_v32i8_z123456789ABCDEzGHIJKLMNOPQRSTzz(<32 x i8> %a) { ; AVX2-NEXT: vpinsrb $15, %eax, %xmm0, %xmm1 ; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX2-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1 -; AVX2-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1 +; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,6],xmm2[7] ; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 ; AVX2-NEXT: retq %1 = insertelement <32 x i8> %a, i8 0, i32 0 -- 2.50.1