From: Simon Pilgrim Date: Fri, 20 Jul 2018 15:07:53 +0000 (+0000) Subject: [X86][AVX] Add 256-bit vector horizontal op redundant shuffle tests X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=7b7da9f3c10315d4c9adef0607d2a147bc012a70;p=llvm [X86][AVX] Add 256-bit vector horizontal op redundant shuffle tests git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@337558 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/test/CodeGen/X86/haddsub-shuf.ll b/test/CodeGen/X86/haddsub-shuf.ll index 3b126b7b6df..8ede7035e7d 100644 --- a/test/CodeGen/X86/haddsub-shuf.ll +++ b/test/CodeGen/X86/haddsub-shuf.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+ssse3 | FileCheck %s --check-prefix=SSSE3 -; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX +; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1 +; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2 ; The next 8 tests check for matching the horizontal op and eliminating the shuffle. ; PR34111 - https://bugs.llvm.org/show_bug.cgi?id=34111 @@ -22,6 +23,55 @@ define <4 x float> @hadd_v4f32(<4 x float> %a) { ret <4 x float> %shuf } +define <8 x float> @hadd_v8f32a(<8 x float> %a) { +; SSSE3-LABEL: hadd_v8f32a: +; SSSE3: # %bb.0: +; SSSE3-NEXT: movaps %xmm0, %xmm2 +; SSSE3-NEXT: haddps %xmm1, %xmm2 +; SSSE3-NEXT: movddup {{.*#+}} xmm0 = xmm2[0,0] +; SSSE3-NEXT: movaps %xmm2, %xmm1 +; SSSE3-NEXT: retq +; +; AVX1-LABEL: hadd_v8f32a: +; AVX1: # %bb.0: +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVX1-NEXT: vhaddps %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vmovddup {{.*#+}} xmm1 = xmm0[0,0] +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: hadd_v8f32a: +; AVX2: # %bb.0: +; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVX2-NEXT: vhaddps %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,2,1] +; AVX2-NEXT: retq + %a0 = shufflevector <8 x float> %a, <8 x float> undef, <4 x i32> + %a1 = shufflevector <8 x float> %a, <8 x float> undef, <4 x i32> + %hop = fadd <4 x float> %a0, %a1 + %shuf = shufflevector <4 x float> %hop, <4 x float> undef, <8 x i32> + ret <8 x float> %shuf +} + +define <8 x float> @hadd_v8f32b(<8 x float> %a) { +; SSSE3-LABEL: hadd_v8f32b: +; SSSE3: # %bb.0: +; SSSE3-NEXT: haddps %xmm0, %xmm0 +; SSSE3-NEXT: haddps %xmm1, %xmm1 +; SSSE3-NEXT: retq +; +; AVX-LABEL: hadd_v8f32b: +; AVX: # %bb.0: +; AVX-NEXT: vhaddps %ymm0, %ymm0, %ymm0 +; AVX-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2] +; AVX-NEXT: retq + %a0 = shufflevector <8 x float> %a, <8 x float> undef, <8 x i32> + %a1 = shufflevector <8 x float> %a, <8 x float> undef, <8 x i32> + %hop = fadd <8 x float> %a0, %a1 + %shuf = shufflevector <8 x float> %hop, <8 x float> undef, <8 x i32> + ret <8 x float> %shuf +} + define <4 x float> @hsub_v4f32(<4 x float> %a) { ; SSSE3-LABEL: hsub_v4f32: ; SSSE3: # %bb.0: @@ -39,6 +89,55 @@ define <4 x float> @hsub_v4f32(<4 x float> %a) { ret <4 x float> %shuf } +define <8 x float> @hsub_v8f32a(<8 x float> %a) { +; SSSE3-LABEL: hsub_v8f32a: +; SSSE3: # %bb.0: +; SSSE3-NEXT: movaps %xmm0, %xmm2 +; SSSE3-NEXT: hsubps %xmm1, %xmm2 +; SSSE3-NEXT: movddup {{.*#+}} xmm0 = xmm2[0,0] +; SSSE3-NEXT: movaps %xmm2, %xmm1 +; SSSE3-NEXT: retq +; +; AVX1-LABEL: hsub_v8f32a: +; AVX1: # %bb.0: +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVX1-NEXT: vhsubps %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vmovddup {{.*#+}} xmm1 = xmm0[0,0] +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: hsub_v8f32a: +; AVX2: # %bb.0: +; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVX2-NEXT: vhsubps %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,2,1] +; AVX2-NEXT: retq + %a0 = shufflevector <8 x float> %a, <8 x float> undef, <4 x i32> + %a1 = shufflevector <8 x float> %a, <8 x float> undef, <4 x i32> + %hop = fsub <4 x float> %a0, %a1 + %shuf = shufflevector <4 x float> %hop, <4 x float> undef, <8 x i32> + ret <8 x float> %shuf +} + +define <8 x float> @hsub_v8f32b(<8 x float> %a) { +; SSSE3-LABEL: hsub_v8f32b: +; SSSE3: # %bb.0: +; SSSE3-NEXT: hsubps %xmm0, %xmm0 +; SSSE3-NEXT: hsubps %xmm1, %xmm1 +; SSSE3-NEXT: retq +; +; AVX-LABEL: hsub_v8f32b: +; AVX: # %bb.0: +; AVX-NEXT: vhsubps %ymm0, %ymm0, %ymm0 +; AVX-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2] +; AVX-NEXT: retq + %a0 = shufflevector <8 x float> %a, <8 x float> undef, <8 x i32> + %a1 = shufflevector <8 x float> %a, <8 x float> undef, <8 x i32> + %hop = fsub <8 x float> %a0, %a1 + %shuf = shufflevector <8 x float> %hop, <8 x float> undef, <8 x i32> + ret <8 x float> %shuf +} + define <2 x double> @hadd_v2f64(<2 x double> %a) { ; SSSE3-LABEL: hadd_v2f64: ; SSSE3: # %bb.0: @@ -56,6 +155,25 @@ define <2 x double> @hadd_v2f64(<2 x double> %a) { ret <2 x double> %shuf } +define <4 x double> @hadd_v4f64(<4 x double> %a) { +; SSSE3-LABEL: hadd_v4f64: +; SSSE3: # %bb.0: +; SSSE3-NEXT: haddpd %xmm0, %xmm0 +; SSSE3-NEXT: haddpd %xmm1, %xmm1 +; SSSE3-NEXT: retq +; +; AVX-LABEL: hadd_v4f64: +; AVX: # %bb.0: +; AVX-NEXT: vhaddpd %ymm0, %ymm0, %ymm0 +; AVX-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2] +; AVX-NEXT: retq + %a0 = shufflevector <4 x double> %a, <4 x double> undef, <4 x i32> + %a1 = shufflevector <4 x double> %a, <4 x double> undef, <4 x i32> + %hop = fadd <4 x double> %a0, %a1 + %shuf = shufflevector <4 x double> %hop, <4 x double> undef, <4 x i32> + ret <4 x double> %shuf +} + define <2 x double> @hsub_v2f64(<2 x double> %a) { ; SSSE3-LABEL: hsub_v2f64: ; SSSE3: # %bb.0: @@ -73,6 +191,25 @@ define <2 x double> @hsub_v2f64(<2 x double> %a) { ret <2 x double> %shuf } +define <4 x double> @hsub_v4f64(<4 x double> %a) { +; SSSE3-LABEL: hsub_v4f64: +; SSSE3: # %bb.0: +; SSSE3-NEXT: hsubpd %xmm0, %xmm0 +; SSSE3-NEXT: hsubpd %xmm1, %xmm1 +; SSSE3-NEXT: retq +; +; AVX-LABEL: hsub_v4f64: +; AVX: # %bb.0: +; AVX-NEXT: vhsubpd %ymm0, %ymm0, %ymm0 +; AVX-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2] +; AVX-NEXT: retq + %a0 = shufflevector <4 x double> %a, <4 x double> undef, <4 x i32> + %a1 = shufflevector <4 x double> %a, <4 x double> undef, <4 x i32> + %hop = fsub <4 x double> %a0, %a1 + %shuf = shufflevector <4 x double> %hop, <4 x double> undef, <4 x i32> + ret <4 x double> %shuf +} + define <4 x i32> @hadd_v4i32(<4 x i32> %a) { ; SSSE3-LABEL: hadd_v4i32: ; SSSE3: # %bb.0: @@ -90,6 +227,67 @@ define <4 x i32> @hadd_v4i32(<4 x i32> %a) { ret <4 x i32> %shuf } +define <8 x i32> @hadd_v8i32a(<8 x i32> %a) { +; SSSE3-LABEL: hadd_v8i32a: +; SSSE3: # %bb.0: +; SSSE3-NEXT: movdqa %xmm0, %xmm2 +; SSSE3-NEXT: phaddd %xmm1, %xmm2 +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,1,0,1] +; SSSE3-NEXT: movdqa %xmm2, %xmm1 +; SSSE3-NEXT: retq +; +; AVX1-LABEL: hadd_v8i32a: +; AVX1: # %bb.0: +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVX1-NEXT: vphaddd %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,1,0,1] +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: hadd_v8i32a: +; AVX2: # %bb.0: +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX2-NEXT: vphaddd %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,2,1] +; AVX2-NEXT: retq + %a0 = shufflevector <8 x i32> %a, <8 x i32> undef, <4 x i32> + %a1 = shufflevector <8 x i32> %a, <8 x i32> undef, <4 x i32> + %hop = add <4 x i32> %a0, %a1 + %shuf = shufflevector <4 x i32> %hop, <4 x i32> undef, <8 x i32> + ret <8 x i32> %shuf +} + +define <8 x i32> @hadd_v8i32b(<8 x i32> %a) { +; SSSE3-LABEL: hadd_v8i32b: +; SSSE3: # %bb.0: +; SSSE3-NEXT: phaddd %xmm0, %xmm0 +; SSSE3-NEXT: phaddd %xmm1, %xmm1 +; SSSE3-NEXT: retq +; +; AVX1-LABEL: hadd_v8i32b: +; AVX1: # %bb.0: +; AVX1-NEXT: vpermilps {{.*#+}} ymm1 = ymm0[0,2,2,3,4,6,6,7] +; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[1,3,2,3,5,7,6,7] +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 +; AVX1-NEXT: vpaddd %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vpaddd %xmm0, %xmm1, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 +; AVX1-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2] +; AVX1-NEXT: retq +; +; AVX2-LABEL: hadd_v8i32b: +; AVX2: # %bb.0: +; AVX2-NEXT: vphaddd %ymm0, %ymm0, %ymm0 +; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,1,0,1,4,5,4,5] +; AVX2-NEXT: retq + %a0 = shufflevector <8 x i32> %a, <8 x i32> undef, <8 x i32> + %a1 = shufflevector <8 x i32> %a, <8 x i32> undef, <8 x i32> + %hop = add <8 x i32> %a0, %a1 + %shuf = shufflevector <8 x i32> %hop, <8 x i32> undef, <8 x i32> + ret <8 x i32> %shuf +} + define <4 x i32> @hsub_v4i32(<4 x i32> %a) { ; SSSE3-LABEL: hsub_v4i32: ; SSSE3: # %bb.0: @@ -107,6 +305,67 @@ define <4 x i32> @hsub_v4i32(<4 x i32> %a) { ret <4 x i32> %shuf } +define <8 x i32> @hsub_v8i32a(<8 x i32> %a) { +; SSSE3-LABEL: hsub_v8i32a: +; SSSE3: # %bb.0: +; SSSE3-NEXT: movdqa %xmm0, %xmm2 +; SSSE3-NEXT: phsubd %xmm1, %xmm2 +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,1,0,1] +; SSSE3-NEXT: movdqa %xmm2, %xmm1 +; SSSE3-NEXT: retq +; +; AVX1-LABEL: hsub_v8i32a: +; AVX1: # %bb.0: +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVX1-NEXT: vphsubd %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,1,0,1] +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: hsub_v8i32a: +; AVX2: # %bb.0: +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX2-NEXT: vphsubd %xmm1, %xmm0, %xmm0 +; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,2,1] +; AVX2-NEXT: retq + %a0 = shufflevector <8 x i32> %a, <8 x i32> undef, <4 x i32> + %a1 = shufflevector <8 x i32> %a, <8 x i32> undef, <4 x i32> + %hop = sub <4 x i32> %a0, %a1 + %shuf = shufflevector <4 x i32> %hop, <4 x i32> undef, <8 x i32> + ret <8 x i32> %shuf +} + +define <8 x i32> @hsub_v8i32b(<8 x i32> %a) { +; SSSE3-LABEL: hsub_v8i32b: +; SSSE3: # %bb.0: +; SSSE3-NEXT: phsubd %xmm0, %xmm0 +; SSSE3-NEXT: phsubd %xmm1, %xmm1 +; SSSE3-NEXT: retq +; +; AVX1-LABEL: hsub_v8i32b: +; AVX1: # %bb.0: +; AVX1-NEXT: vpermilps {{.*#+}} ymm1 = ymm0[0,2,2,3,4,6,6,7] +; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[1,3,2,3,5,7,6,7] +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 +; AVX1-NEXT: vpsubd %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vpsubd %xmm0, %xmm1, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 +; AVX1-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2] +; AVX1-NEXT: retq +; +; AVX2-LABEL: hsub_v8i32b: +; AVX2: # %bb.0: +; AVX2-NEXT: vphsubd %ymm0, %ymm0, %ymm0 +; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,1,0,1,4,5,4,5] +; AVX2-NEXT: retq + %a0 = shufflevector <8 x i32> %a, <8 x i32> undef, <8 x i32> + %a1 = shufflevector <8 x i32> %a, <8 x i32> undef, <8 x i32> + %hop = sub <8 x i32> %a0, %a1 + %shuf = shufflevector <8 x i32> %hop, <8 x i32> undef, <8 x i32> + ret <8 x i32> %shuf +} + define <8 x i16> @hadd_v8i16(<8 x i16> %a) { ; SSSE3-LABEL: hadd_v8i16: ; SSSE3: # %bb.0: