From 6a67d10b3b5968fc2135a46df54ce501057f957d Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Wed, 26 Jun 2019 18:21:26 +0000 Subject: [PATCH] [X86][SSE] getFauxShuffleMask - handle OR(x,y) where x and y have no overlapping bits Create a per-byte shuffle mask based on the computeKnownBits from each operand - if for each byte we have a known zero (or both) then it can be safely blended. Fixes PR41545 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@364458 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 34 +++++++++++ test/CodeGen/X86/vector-shuffle-combining.ll | 64 +++----------------- 2 files changed, 42 insertions(+), 56 deletions(-) diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index e81339d8eb7..2475dc46e02 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -6676,6 +6676,40 @@ static bool getFauxShuffleMask(SDValue N, SmallVectorImpl &Mask, return true; } case ISD::OR: { + // Inspect each operand at the byte level. We can merge these into a + // blend shuffle mask if for each byte at least one is masked out (zero). + KnownBits Known0 = DAG.computeKnownBits(N.getOperand(0)); + KnownBits Known1 = DAG.computeKnownBits(N.getOperand(1)); + if (Known0.One.isNullValue() && Known1.One.isNullValue()) { + bool IsByteMask = true; + unsigned NumSizeInBytes = NumSizeInBits / 8; + unsigned NumBytesPerElt = NumBitsPerElt / 8; + APInt ZeroMask = APInt::getNullValue(NumBytesPerElt); + APInt SelectMask = APInt::getNullValue(NumBytesPerElt); + for (unsigned i = 0; i != NumBytesPerElt && IsByteMask; ++i) { + unsigned LHS = Known0.Zero.extractBits(8, i * 8).getZExtValue(); + unsigned RHS = Known1.Zero.extractBits(8, i * 8).getZExtValue(); + if (LHS == 255 && RHS == 0) + SelectMask.setBit(i); + else if (LHS == 255 && RHS == 255) + ZeroMask.setBit(i); + else if (!(LHS == 0 && RHS == 255)) + IsByteMask = false; + } + if (IsByteMask) { + for (unsigned i = 0; i != NumSizeInBytes; i += NumBytesPerElt) { + for (unsigned j = 0; j != NumBytesPerElt; ++j) { + unsigned Ofs = (SelectMask[j] ? NumSizeInBytes : 0); + int Idx = (ZeroMask[j] ? SM_SentinelZero : (i + j + Ofs)); + Mask.push_back(Idx); + } + } + Ops.push_back(N.getOperand(0)); + Ops.push_back(N.getOperand(1)); + return true; + } + } + // Handle OR(SHUFFLE,SHUFFLE) case where one source is zero and the other // is a valid shuffle index. SDValue N0 = peekThroughOneUseBitcasts(N.getOperand(0)); diff --git a/test/CodeGen/X86/vector-shuffle-combining.ll b/test/CodeGen/X86/vector-shuffle-combining.ll index 1e74e18718c..c9302817d47 100644 --- a/test/CodeGen/X86/vector-shuffle-combining.ll +++ b/test/CodeGen/X86/vector-shuffle-combining.ll @@ -2860,63 +2860,15 @@ define <8 x i16> @PR39549(<16 x i8> %x) { } define <4 x i32> @PR41545(<4 x i32> %a0, <16 x i8> %a1) { -; SSE2-LABEL: PR41545: -; SSE2: # %bb.0: -; SSE2-NEXT: paddd %xmm1, %xmm0 -; SSE2-NEXT: retq -; -; SSSE3-LABEL: PR41545: -; SSSE3: # %bb.0: -; SSSE3-NEXT: paddd %xmm1, %xmm0 -; SSSE3-NEXT: retq -; -; SSE41-LABEL: PR41545: -; SSE41: # %bb.0: -; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [0,0,255,0,0,0,255,0,0,0,255,0,0,0,255,0] -; SSE41-NEXT: pand %xmm1, %xmm2 -; SSE41-NEXT: pxor %xmm3, %xmm3 -; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm1[0],xmm3[1],xmm1[2],xmm3[3],xmm1[4],xmm3[5],xmm1[6],xmm3[7] -; SSE41-NEXT: psrld $24, %xmm1 -; SSE41-NEXT: pslld $24, %xmm1 -; SSE41-NEXT: por %xmm1, %xmm3 -; SSE41-NEXT: por %xmm2, %xmm3 -; SSE41-NEXT: paddd %xmm3, %xmm0 -; SSE41-NEXT: retq -; -; AVX1-LABEL: PR41545: -; AVX1: # %bb.0: -; AVX1-NEXT: vpsrld $24, %xmm1, %xmm2 -; AVX1-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm3 -; AVX1-NEXT: vpslld $24, %xmm2, %xmm2 -; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4 -; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2],xmm4[3],xmm1[4],xmm4[5],xmm1[6],xmm4[7] -; AVX1-NEXT: vpor %xmm2, %xmm1, %xmm1 -; AVX1-NEXT: vpor %xmm1, %xmm3, %xmm1 -; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: retq -; -; AVX2-SLOW-LABEL: PR41545: -; AVX2-SLOW: # %bb.0: -; AVX2-SLOW-NEXT: vpsrld $24, %xmm1, %xmm2 -; AVX2-SLOW-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm3 -; AVX2-SLOW-NEXT: vpslld $24, %xmm2, %xmm2 -; AVX2-SLOW-NEXT: vpxor %xmm4, %xmm4, %xmm4 -; AVX2-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2],xmm4[3],xmm1[4],xmm4[5],xmm1[6],xmm4[7] -; AVX2-SLOW-NEXT: vpor %xmm2, %xmm1, %xmm1 -; AVX2-SLOW-NEXT: vpor %xmm1, %xmm3, %xmm1 -; AVX2-SLOW-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; AVX2-SLOW-NEXT: retq +; SSE-LABEL: PR41545: +; SSE: # %bb.0: +; SSE-NEXT: paddd %xmm1, %xmm0 +; SSE-NEXT: retq ; -; AVX2-FAST-LABEL: PR41545: -; AVX2-FAST: # %bb.0: -; AVX2-FAST-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm2 -; AVX2-FAST-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm3 -; AVX2-FAST-NEXT: vpxor %xmm4, %xmm4, %xmm4 -; AVX2-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2],xmm4[3],xmm1[4],xmm4[5],xmm1[6],xmm4[7] -; AVX2-FAST-NEXT: vpor %xmm2, %xmm1, %xmm1 -; AVX2-FAST-NEXT: vpor %xmm3, %xmm1, %xmm1 -; AVX2-FAST-NEXT: vpaddd %xmm1, %xmm0, %xmm0 -; AVX2-FAST-NEXT: retq +; AVX-LABEL: PR41545: +; AVX: # %bb.0: +; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq %1 = shufflevector <16 x i8> %a1, <16 x i8> undef, <4 x i32> %2 = shufflevector <16 x i8> %a1, <16 x i8> undef, <4 x i32> %3 = shufflevector <16 x i8> %a1, <16 x i8> undef, <4 x i32> -- 2.40.0