From 491c5046445d88be0c9c614efa1eed34f1659c61 Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Thu, 11 Apr 2019 14:09:35 +0000 Subject: [PATCH] [X86][AVX] Add X86ISD::VPERMILPV demandedelts tests git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@358168 91177308-0d34-0410-b5e6-96231b3b80d8 --- .../X86/vector-shuffle-combining-avx.ll | 96 +++++++++++++++++++ 1 file changed, 96 insertions(+) diff --git a/test/CodeGen/X86/vector-shuffle-combining-avx.ll b/test/CodeGen/X86/vector-shuffle-combining-avx.ll index 5651174c4dc..1afbc2adf75 100644 --- a/test/CodeGen/X86/vector-shuffle-combining-avx.ll +++ b/test/CodeGen/X86/vector-shuffle-combining-avx.ll @@ -187,6 +187,47 @@ define <8 x float> @combine_vpermilvar_8f32_movshdup(<8 x float> %a0) { %1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> ) ret <8 x float> %1 } +define <8 x float> @demandedelts_vpermilvar_8f32_movshdup(<8 x float> %a0, i32 %a1) { +; X86-LABEL: demandedelts_vpermilvar_8f32_movshdup: +; X86: # %bb.0: +; X86-NEXT: vbroadcastss {{[0-9]+}}(%esp), %ymm1 +; X86-NEXT: vblendps {{.*#+}} ymm1 = mem[0,1,2,3,4,5,6],ymm1[7] +; X86-NEXT: vpermilps %ymm1, %ymm0, %ymm0 +; X86-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6,6] +; X86-NEXT: retl +; +; X64-AVX1-LABEL: demandedelts_vpermilvar_8f32_movshdup: +; X64-AVX1: # %bb.0: +; X64-AVX1-NEXT: vmovd %edi, %xmm1 +; X64-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,2,0] +; X64-AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 +; X64-AVX1-NEXT: vblendps {{.*#+}} ymm1 = mem[0,1,2,3,4,5,6],ymm1[7] +; X64-AVX1-NEXT: vpermilps %ymm1, %ymm0, %ymm0 +; X64-AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6,6] +; X64-AVX1-NEXT: retq +; +; X64-AVX2-LABEL: demandedelts_vpermilvar_8f32_movshdup: +; X64-AVX2: # %bb.0: +; X64-AVX2-NEXT: vmovd %edi, %xmm1 +; X64-AVX2-NEXT: vpbroadcastd %xmm1, %ymm1 +; X64-AVX2-NEXT: vpblendd {{.*#+}} ymm1 = mem[0,1,2,3,4,5,6],ymm1[7] +; X64-AVX2-NEXT: vpermilps %ymm1, %ymm0, %ymm0 +; X64-AVX2-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6,6] +; X64-AVX2-NEXT: retq +; +; X64-AVX512-LABEL: demandedelts_vpermilvar_8f32_movshdup: +; X64-AVX512: # %bb.0: +; X64-AVX512-NEXT: vmovd %edi, %xmm1 +; X64-AVX512-NEXT: vpbroadcastd %xmm1, %ymm1 +; X64-AVX512-NEXT: vpblendd {{.*#+}} ymm1 = mem[0,1,2,3,4,5,6],ymm1[7] +; X64-AVX512-NEXT: vpermilps %ymm1, %ymm0, %ymm0 +; X64-AVX512-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6,6] +; X64-AVX512-NEXT: retq + %1 = insertelement <8 x i32> , i32 %a1, i32 7 + %2 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> %1) + %3 = shufflevector <8 x float> %2, <8 x float> undef, <8 x i32> + ret <8 x float> %3 +} define <8 x float> @combine_vpermilvar_8f32_movsldup(<8 x float> %a0) { ; CHECK-LABEL: combine_vpermilvar_8f32_movsldup: @@ -196,6 +237,61 @@ define <8 x float> @combine_vpermilvar_8f32_movsldup(<8 x float> %a0) { %1 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> ) ret <8 x float> %1 } +define <8 x float> @demandedelts_vpermilvar_8f32_movsldup(<8 x float> %a0, i32 %a1) { +; X86-AVX1-LABEL: demandedelts_vpermilvar_8f32_movsldup: +; X86-AVX1: # %bb.0: +; X86-AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = +; X86-AVX1-NEXT: vpinsrd $0, {{[0-9]+}}(%esp), %xmm1, %xmm1 +; X86-AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],mem[4,5,6,7] +; X86-AVX1-NEXT: vpermilps %ymm1, %ymm0, %ymm0 +; X86-AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[1,1,2,3,4,5,6,7] +; X86-AVX1-NEXT: retl +; +; X86-AVX2-LABEL: demandedelts_vpermilvar_8f32_movsldup: +; X86-AVX2: # %bb.0: +; X86-AVX2-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; X86-AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],mem[1,2,3,4,5,6,7] +; X86-AVX2-NEXT: vpermilps %ymm1, %ymm0, %ymm0 +; X86-AVX2-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[1,1,2,3,4,5,6,7] +; X86-AVX2-NEXT: retl +; +; X86-AVX512-LABEL: demandedelts_vpermilvar_8f32_movsldup: +; X86-AVX512: # %bb.0: +; X86-AVX512-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; X86-AVX512-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],mem[1,2,3,4,5,6,7] +; X86-AVX512-NEXT: vpermilps %ymm1, %ymm0, %ymm0 +; X86-AVX512-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[1,1,2,3,4,5,6,7] +; X86-AVX512-NEXT: retl +; +; X64-AVX1-LABEL: demandedelts_vpermilvar_8f32_movsldup: +; X64-AVX1: # %bb.0: +; X64-AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = +; X64-AVX1-NEXT: vpinsrd $0, %edi, %xmm1, %xmm1 +; X64-AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],mem[4,5,6,7] +; X64-AVX1-NEXT: vpermilps %ymm1, %ymm0, %ymm0 +; X64-AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[1,1,2,3,4,5,6,7] +; X64-AVX1-NEXT: retq +; +; X64-AVX2-LABEL: demandedelts_vpermilvar_8f32_movsldup: +; X64-AVX2: # %bb.0: +; X64-AVX2-NEXT: vmovd %edi, %xmm1 +; X64-AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],mem[1,2,3,4,5,6,7] +; X64-AVX2-NEXT: vpermilps %ymm1, %ymm0, %ymm0 +; X64-AVX2-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[1,1,2,3,4,5,6,7] +; X64-AVX2-NEXT: retq +; +; X64-AVX512-LABEL: demandedelts_vpermilvar_8f32_movsldup: +; X64-AVX512: # %bb.0: +; X64-AVX512-NEXT: vmovd %edi, %xmm1 +; X64-AVX512-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],mem[1,2,3,4,5,6,7] +; X64-AVX512-NEXT: vpermilps %ymm1, %ymm0, %ymm0 +; X64-AVX512-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[1,1,2,3,4,5,6,7] +; X64-AVX512-NEXT: retq + %1 = insertelement <8 x i32> , i32 %a1, i32 0 + %2 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> %1) + %3 = shufflevector <8 x float> %2, <8 x float> undef, <8 x i32> + ret <8 x float> %3 +} define <2 x double> @combine_vpermilvar_2f64_identity(<2 x double> %a0) { ; CHECK-LABEL: combine_vpermilvar_2f64_identity: -- 2.50.1