From: Simon Pilgrim Date: Tue, 14 Feb 2017 15:49:37 +0000 (+0000) Subject: [X86][SSE] Add shuffle combine tests showing missed opportunities to use UNPCK X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=fbe71f840423ff0c7e65e6078d783910a76096af;p=llvm [X86][SSE] Add shuffle combine tests showing missed opportunities to use UNPCK Not correctly using UNDEF or ZERO inputs to combine to UNPCK shuffles git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@295059 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/test/CodeGen/X86/vector-shuffle-combining-avx2.ll b/test/CodeGen/X86/vector-shuffle-combining-avx2.ll index c9845bf74cf..77864904261 100644 --- a/test/CodeGen/X86/vector-shuffle-combining-avx2.ll +++ b/test/CodeGen/X86/vector-shuffle-combining-avx2.ll @@ -662,6 +662,49 @@ define <32 x i8> @combine_pshufb_not_as_pshufw(<32 x i8> %a0) { ret <32 x i8> %res1 } +define <32 x i8> @combine_pshufb_as_unpacklo_undef(<32 x i8> %a0) { +; X32-LABEL: combine_pshufb_as_unpacklo_undef: +; X32: # BB#0: +; X32-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23] +; X32-NEXT: retl +; +; X64-LABEL: combine_pshufb_as_unpacklo_undef: +; X64: # BB#0: +; X64-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23] +; X64-NEXT: retq + %1 = tail call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %a0, <32 x i8> ) + %2 = shufflevector <32 x i8> %1, <32 x i8> undef, <32 x i32> + ret <32 x i8> %2 +} + +define <32 x i8> @combine_pshufb_as_unpacklo_zero(<32 x i8> %a0) { +; X32-LABEL: combine_pshufb_as_unpacklo_zero: +; X32: # BB#0: +; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1],zero,zero,ymm0[2,3],zero,zero,ymm0[4,5],zero,zero,ymm0[6,7],zero,zero,ymm0[16,17],zero,zero,ymm0[18,19],zero,zero,ymm0[20,21],zero,zero,ymm0[22,23],zero,zero +; X32-NEXT: retl +; +; X64-LABEL: combine_pshufb_as_unpacklo_zero: +; X64: # BB#0: +; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1],zero,zero,ymm0[2,3],zero,zero,ymm0[4,5],zero,zero,ymm0[6,7],zero,zero,ymm0[16,17],zero,zero,ymm0[18,19],zero,zero,ymm0[20,21],zero,zero,ymm0[22,23],zero,zero +; X64-NEXT: retq + %1 = tail call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %a0, <32 x i8> ) + ret <32 x i8> %1 +} + +define <32 x i8> @combine_pshufb_as_unpackhi_zero(<32 x i8> %a0) { +; X32-LABEL: combine_pshufb_as_unpackhi_zero: +; X32: # BB#0: +; X32-NEXT: vpshufb {{.*#+}} ymm0 = zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31] +; X32-NEXT: retl +; +; X64-LABEL: combine_pshufb_as_unpackhi_zero: +; X64: # BB#0: +; X64-NEXT: vpshufb {{.*#+}} ymm0 = zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31] +; X64-NEXT: retq + %1 = tail call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %a0, <32 x i8> ) + ret <32 x i8> %1 +} + define <32 x i8> @combine_psrlw_pshufb(<16 x i16> %a0) { ; X32-LABEL: combine_psrlw_pshufb: ; X32: # BB#0: diff --git a/test/CodeGen/X86/vector-shuffle-combining-ssse3.ll b/test/CodeGen/X86/vector-shuffle-combining-ssse3.ll index 7b73bda48ce..94d9f81214b 100644 --- a/test/CodeGen/X86/vector-shuffle-combining-ssse3.ll +++ b/test/CodeGen/X86/vector-shuffle-combining-ssse3.ll @@ -473,6 +473,65 @@ define <16 x i8> @combine_pshufb_as_unary_unpckhwd(<16 x i8> %a0) { ret <16 x i8> %1 } +define <8 x i16> @combine_pshufb_as_unpacklo_undef(<16 x i8> %a0) { +; SSE-LABEL: combine_pshufb_as_unpacklo_undef: +; SSE: # BB#0: +; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] +; SSE-NEXT: retq +; +; AVX-LABEL: combine_pshufb_as_unpacklo_undef: +; AVX: # BB#0: +; AVX-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] +; AVX-NEXT: retq + %1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> ) + %2 = bitcast <16 x i8> %1 to <8 x i16> + %3 = shufflevector <8 x i16> %2, <8 x i16> undef, <8 x i32> + ret <8 x i16> %3 +} + +define <16 x i8> @combine_pshufb_as_unpackhi_undef(<16 x i8> %a0) { +; SSE-LABEL: combine_pshufb_as_unpackhi_undef: +; SSE: # BB#0: +; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE-NEXT: retq +; +; AVX-LABEL: combine_pshufb_as_unpackhi_undef: +; AVX: # BB#0: +; AVX-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; AVX-NEXT: retq + %1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> ) + %2 = shufflevector <16 x i8> %1, <16 x i8> undef, <16 x i32> + ret <16 x i8> %2 +} + +define <16 x i8> @combine_pshufb_as_unpacklo_zero(<16 x i8> %a0) { +; SSE-LABEL: combine_pshufb_as_unpacklo_zero: +; SSE: # BB#0: +; SSE-NEXT: pshufb {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0,1,2,3],zero,zero,zero,zero,xmm0[4,5,6,7] +; SSE-NEXT: retq +; +; AVX-LABEL: combine_pshufb_as_unpacklo_zero: +; AVX: # BB#0: +; AVX-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0,1,2,3],zero,zero,zero,zero,xmm0[4,5,6,7] +; AVX-NEXT: retq + %1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> ) + ret <16 x i8> %1 +} + +define <16 x i8> @combine_pshufb_as_unpackhi_zero(<16 x i8> %a0) { +; SSE-LABEL: combine_pshufb_as_unpackhi_zero: +; SSE: # BB#0: +; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero +; SSE-NEXT: retq +; +; AVX-LABEL: combine_pshufb_as_unpackhi_zero: +; AVX: # BB#0: +; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero +; AVX-NEXT: retq + %1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> ) + ret <16 x i8> %1 +} + define <16 x i8> @combine_psrlw_pshufb(<8 x i16> %a0) { ; SSE-LABEL: combine_psrlw_pshufb: ; SSE: # BB#0: