From: Simon Pilgrim Date: Sat, 1 Oct 2016 16:04:28 +0000 (+0000) Subject: [X86][SSE] Add support for combining target shuffles to binary BLEND X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=20e8247890f6a0ad2ec911531742551a6035388a;p=llvm [X86][SSE] Add support for combining target shuffles to binary BLEND We already had support for 1-input BLEND with zero - this adds support for 2-input BLEND as well. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@283040 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 6c8def1c397..cd1ff4de70f 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -25160,7 +25160,7 @@ static bool matchBinaryPermuteVectorShuffle(MVT MaskVT, ArrayRef Mask, } } - // Attempt to blend with zero. + // Attempt to combine to X86ISD::BLENDI. if (NumMaskElts <= 8 && ((Subtarget.hasSSE41() && MaskVT.is128BitVector()) || (Subtarget.hasAVX() && MaskVT.is256BitVector()))) { // Determine a type compatible with X86ISD::BLENDI. @@ -25180,12 +25180,13 @@ static bool matchBinaryPermuteVectorShuffle(MVT MaskVT, ArrayRef Mask, BlendVT = MVT::v8f32; } + unsigned BlendSize = BlendVT.getVectorNumElements(); + unsigned MaskRatio = BlendSize / NumMaskElts; + + // Can we blend with zero? if (isSequentialOrUndefOrZeroInRange(Mask, /*Pos*/ 0, /*Size*/ NumMaskElts, /*Low*/ 0) && NumMaskElts <= BlendVT.getVectorNumElements()) { - unsigned BlendSize = BlendVT.getVectorNumElements(); - unsigned MaskRatio = BlendSize / NumMaskElts; - PermuteImm = 0; for (unsigned i = 0; i != BlendSize; ++i) if (Mask[i / MaskRatio] < 0) @@ -25196,6 +25197,31 @@ static bool matchBinaryPermuteVectorShuffle(MVT MaskVT, ArrayRef Mask, ShuffleVT = BlendVT; return true; } + + // Attempt to match as a binary blend. + if (NumMaskElts <= BlendVT.getVectorNumElements()) { + bool MatchBlend = true; + for (int i = 0; i != NumMaskElts; ++i) { + int M = Mask[i]; + if (M == SM_SentinelUndef) + continue; + else if (M == SM_SentinelZero) + MatchBlend = false; + else if ((M != i) && (M != (i + NumMaskElts))) + MatchBlend = false; + } + + if (MatchBlend) { + PermuteImm = 0; + for (unsigned i = 0; i != BlendSize; ++i) + if ((int)NumMaskElts <= Mask[i / MaskRatio]) + PermuteImm |= 1u << i; + + Shuffle = X86ISD::BLENDI; + ShuffleVT = BlendVT; + return true; + } + } } // Attempt to combine to INSERTPS. diff --git a/test/CodeGen/X86/vector-shuffle-256-v16.ll b/test/CodeGen/X86/vector-shuffle-256-v16.ll index 15da1ec6814..b2a202dc522 100644 --- a/test/CodeGen/X86/vector-shuffle-256-v16.ll +++ b/test/CodeGen/X86/vector-shuffle-256-v16.ll @@ -851,13 +851,11 @@ define <16 x i16> @shuffle_v16i16_16_16_16_16_04_05_06_07_24_24_24_24_12_13_14_1 ; AVX1-LABEL: shuffle_v16i16_16_16_16_16_04_05_06_07_24_24_24_24_12_13_14_15: ; AVX1: # BB#0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 -; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,0,1] ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 ; AVX1-NEXT: vpshuflw {{.*#+}} xmm3 = xmm3[0,0,0,0,4,5,6,7] -; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm3[0],xmm2[0] -; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7] ; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,0,0,0,4,5,6,7] -; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7] ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; AVX1-NEXT: retq ; diff --git a/test/CodeGen/X86/vector-shuffle-256-v32.ll b/test/CodeGen/X86/vector-shuffle-256-v32.ll index b0834dd2693..2b7e312d09c 100644 --- a/test/CodeGen/X86/vector-shuffle-256-v32.ll +++ b/test/CodeGen/X86/vector-shuffle-256-v32.ll @@ -1157,15 +1157,13 @@ define <32 x i8> @shuffle_v32i8_32_32_32_32_32_32_32_32_08_09_10_11_12_13_14_15_ ; AVX1-LABEL: shuffle_v32i8_32_32_32_32_32_32_32_32_08_09_10_11_12_13_14_15_48_48_48_48_48_48_48_48_24_25_26_27_28_29_30_31: ; AVX1: # BB#0: ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 -; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,0,1] ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 ; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; AVX1-NEXT: vpshuflw {{.*#+}} xmm3 = xmm3[0,0,0,0,4,5,6,7] -; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm3[0],xmm2[0] -; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7] ; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,0,0,0,4,5,6,7] -; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7] ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; AVX1-NEXT: retq ;