From: Simon Pilgrim Date: Wed, 30 Nov 2016 11:30:33 +0000 (+0000) Subject: [X86][SSE] Add tests demonstrating missed opportunities to combine 64-bit element... X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=bdf16bd55d2f96112213c455501dbbc23c043c96;p=llvm [X86][SSE] Add tests demonstrating missed opportunities to combine 64-bit element unpacks with horizontal pair ops. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@288240 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/test/CodeGen/X86/horizontal-shuffle.ll b/test/CodeGen/X86/horizontal-shuffle.ll new file mode 100644 index 00000000000..def614150cd --- /dev/null +++ b/test/CodeGen/X86/horizontal-shuffle.ll @@ -0,0 +1,513 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=i686-apple-darwin -mattr=+avx2 | FileCheck %s --check-prefix=X32 +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx2 | FileCheck %s --check-prefix=X64 + +; +; 128-bit Vectors +; + +define <4 x float> @test_unpackl_fhadd_128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, <4 x float> %a3) { +; X32-LABEL: test_unpackl_fhadd_128: +; X32: ## BB#0: +; X32-NEXT: vhaddps %xmm1, %xmm0, %xmm0 +; X32-NEXT: vhaddps %xmm3, %xmm2, %xmm1 +; X32-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; X32-NEXT: retl +; +; X64-LABEL: test_unpackl_fhadd_128: +; X64: ## BB#0: +; X64-NEXT: vhaddps %xmm1, %xmm0, %xmm0 +; X64-NEXT: vhaddps %xmm3, %xmm2, %xmm1 +; X64-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; X64-NEXT: retq + %1 = call <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float> %a0, <4 x float> %a1) + %2 = call <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float> %a2, <4 x float> %a3) + %3 = shufflevector <4 x float> %1, <4 x float> %2, <4 x i32> + ret <4 x float> %3 +} + +define <2 x double> @test_unpackh_fhadd_128(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, <2 x double> %a3) { +; X32-LABEL: test_unpackh_fhadd_128: +; X32: ## BB#0: +; X32-NEXT: vhaddpd %xmm1, %xmm0, %xmm0 +; X32-NEXT: vhaddpd %xmm3, %xmm2, %xmm1 +; X32-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] +; X32-NEXT: retl +; +; X64-LABEL: test_unpackh_fhadd_128: +; X64: ## BB#0: +; X64-NEXT: vhaddpd %xmm1, %xmm0, %xmm0 +; X64-NEXT: vhaddpd %xmm3, %xmm2, %xmm1 +; X64-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] +; X64-NEXT: retq + %1 = call <2 x double> @llvm.x86.sse3.hadd.pd(<2 x double> %a0, <2 x double> %a1) + %2 = call <2 x double> @llvm.x86.sse3.hadd.pd(<2 x double> %a2, <2 x double> %a3) + %3 = shufflevector <2 x double> %1, <2 x double> %2, <2 x i32> + ret <2 x double> %3 +} + +define <2 x double> @test_unpackl_fhsub_128(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, <2 x double> %a3) { +; X32-LABEL: test_unpackl_fhsub_128: +; X32: ## BB#0: +; X32-NEXT: vhsubpd %xmm1, %xmm0, %xmm0 +; X32-NEXT: vhsubpd %xmm3, %xmm2, %xmm1 +; X32-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; X32-NEXT: retl +; +; X64-LABEL: test_unpackl_fhsub_128: +; X64: ## BB#0: +; X64-NEXT: vhsubpd %xmm1, %xmm0, %xmm0 +; X64-NEXT: vhsubpd %xmm3, %xmm2, %xmm1 +; X64-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; X64-NEXT: retq + %1 = call <2 x double> @llvm.x86.sse3.hsub.pd(<2 x double> %a0, <2 x double> %a1) + %2 = call <2 x double> @llvm.x86.sse3.hsub.pd(<2 x double> %a2, <2 x double> %a3) + %3 = shufflevector <2 x double> %1, <2 x double> %2, <2 x i32> + ret <2 x double> %3 +} + +define <4 x float> @test_unpackh_fhsub_128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, <4 x float> %a3) { +; X32-LABEL: test_unpackh_fhsub_128: +; X32: ## BB#0: +; X32-NEXT: vhsubps %xmm1, %xmm0, %xmm0 +; X32-NEXT: vhsubps %xmm3, %xmm2, %xmm1 +; X32-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] +; X32-NEXT: retl +; +; X64-LABEL: test_unpackh_fhsub_128: +; X64: ## BB#0: +; X64-NEXT: vhsubps %xmm1, %xmm0, %xmm0 +; X64-NEXT: vhsubps %xmm3, %xmm2, %xmm1 +; X64-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] +; X64-NEXT: retq + %1 = call <4 x float> @llvm.x86.sse3.hsub.ps(<4 x float> %a0, <4 x float> %a1) + %2 = call <4 x float> @llvm.x86.sse3.hsub.ps(<4 x float> %a2, <4 x float> %a3) + %3 = shufflevector <4 x float> %1, <4 x float> %2, <4 x i32> + ret <4 x float> %3 +} + +define <8 x i16> @test_unpackl_hadd_128(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> %a2, <8 x i16> %a3) { +; X32-LABEL: test_unpackl_hadd_128: +; X32: ## BB#0: +; X32-NEXT: vphaddw %xmm1, %xmm0, %xmm0 +; X32-NEXT: vphaddw %xmm3, %xmm2, %xmm1 +; X32-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; X32-NEXT: retl +; +; X64-LABEL: test_unpackl_hadd_128: +; X64: ## BB#0: +; X64-NEXT: vphaddw %xmm1, %xmm0, %xmm0 +; X64-NEXT: vphaddw %xmm3, %xmm2, %xmm1 +; X64-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; X64-NEXT: retq + %1 = call <8 x i16> @llvm.x86.ssse3.phadd.w.128(<8 x i16> %a0, <8 x i16> %a1) + %2 = call <8 x i16> @llvm.x86.ssse3.phadd.w.128(<8 x i16> %a2, <8 x i16> %a3) + %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> + ret <8 x i16> %3 +} + +define <4 x i32> @test_unpackh_hadd_128(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2, <4 x i32> %a3) { +; X32-LABEL: test_unpackh_hadd_128: +; X32: ## BB#0: +; X32-NEXT: vphaddd %xmm1, %xmm0, %xmm0 +; X32-NEXT: vphaddd %xmm3, %xmm2, %xmm1 +; X32-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1] +; X32-NEXT: retl +; +; X64-LABEL: test_unpackh_hadd_128: +; X64: ## BB#0: +; X64-NEXT: vphaddd %xmm1, %xmm0, %xmm0 +; X64-NEXT: vphaddd %xmm3, %xmm2, %xmm1 +; X64-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1] +; X64-NEXT: retq + %1 = call <4 x i32> @llvm.x86.ssse3.phadd.d.128(<4 x i32> %a0, <4 x i32> %a1) + %2 = call <4 x i32> @llvm.x86.ssse3.phadd.d.128(<4 x i32> %a2, <4 x i32> %a3) + %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> + ret <4 x i32> %3 +} + +define <4 x i32> @test_unpackl_hsub_128(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2, <4 x i32> %a3) { +; X32-LABEL: test_unpackl_hsub_128: +; X32: ## BB#0: +; X32-NEXT: vphsubd %xmm1, %xmm0, %xmm0 +; X32-NEXT: vphsubd %xmm3, %xmm2, %xmm1 +; X32-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; X32-NEXT: retl +; +; X64-LABEL: test_unpackl_hsub_128: +; X64: ## BB#0: +; X64-NEXT: vphsubd %xmm1, %xmm0, %xmm0 +; X64-NEXT: vphsubd %xmm3, %xmm2, %xmm1 +; X64-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; X64-NEXT: retq + %1 = call <4 x i32> @llvm.x86.ssse3.phsub.d.128(<4 x i32> %a0, <4 x i32> %a1) + %2 = call <4 x i32> @llvm.x86.ssse3.phsub.d.128(<4 x i32> %a2, <4 x i32> %a3) + %3 = shufflevector <4 x i32> %1, <4 x i32> %2, <4 x i32> + ret <4 x i32> %3 +} + +define <8 x i16> @test_unpackh_hsub_128(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> %a2, <8 x i16> %a3) { +; X32-LABEL: test_unpackh_hsub_128: +; X32: ## BB#0: +; X32-NEXT: vphsubw %xmm1, %xmm0, %xmm0 +; X32-NEXT: vphsubw %xmm3, %xmm2, %xmm1 +; X32-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1] +; X32-NEXT: retl +; +; X64-LABEL: test_unpackh_hsub_128: +; X64: ## BB#0: +; X64-NEXT: vphsubw %xmm1, %xmm0, %xmm0 +; X64-NEXT: vphsubw %xmm3, %xmm2, %xmm1 +; X64-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1] +; X64-NEXT: retq + %1 = call <8 x i16> @llvm.x86.ssse3.phsub.w.128(<8 x i16> %a0, <8 x i16> %a1) + %2 = call <8 x i16> @llvm.x86.ssse3.phsub.w.128(<8 x i16> %a2, <8 x i16> %a3) + %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> + ret <8 x i16> %3 +} + +define <16 x i8> @test_unpackl_packss_128(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> %a2, <8 x i16> %a3) { +; X32-LABEL: test_unpackl_packss_128: +; X32: ## BB#0: +; X32-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 +; X32-NEXT: vpacksswb %xmm3, %xmm2, %xmm1 +; X32-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; X32-NEXT: retl +; +; X64-LABEL: test_unpackl_packss_128: +; X64: ## BB#0: +; X64-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 +; X64-NEXT: vpacksswb %xmm3, %xmm2, %xmm1 +; X64-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; X64-NEXT: retq + %1 = call <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16> %a0, <8 x i16> %a1) + %2 = call <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16> %a2, <8 x i16> %a3) + %3 = shufflevector <16 x i8> %1, <16 x i8> %2, <16 x i32> + ret <16 x i8> %3 +} + +define <8 x i16> @test_unpackh_packss_128(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2, <4 x i32> %a3) { +; X32-LABEL: test_unpackh_packss_128: +; X32: ## BB#0: +; X32-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 +; X32-NEXT: vpackssdw %xmm3, %xmm2, %xmm1 +; X32-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1] +; X32-NEXT: retl +; +; X64-LABEL: test_unpackh_packss_128: +; X64: ## BB#0: +; X64-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 +; X64-NEXT: vpackssdw %xmm3, %xmm2, %xmm1 +; X64-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1] +; X64-NEXT: retq + %1 = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> %a0, <4 x i32> %a1) + %2 = call <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32> %a2, <4 x i32> %a3) + %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> + ret <8 x i16> %3 +} + +define <8 x i16> @test_unpackl_packus_128(<4 x i32> %a0, <4 x i32> %a1, <4 x i32> %a2, <4 x i32> %a3) { +; X32-LABEL: test_unpackl_packus_128: +; X32: ## BB#0: +; X32-NEXT: vpackusdw %xmm2, %xmm0, %xmm0 +; X32-NEXT: retl +; +; X64-LABEL: test_unpackl_packus_128: +; X64: ## BB#0: +; X64-NEXT: vpackusdw %xmm2, %xmm0, %xmm0 +; X64-NEXT: retq + %1 = call <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32> %a0, <4 x i32> %a1) + %2 = call <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32> %a2, <4 x i32> %a3) + %3 = shufflevector <8 x i16> %1, <8 x i16> %2, <8 x i32> + ret <8 x i16> %3 +} + +define <16 x i8> @test_unpackh_packus_128(<8 x i16> %a0, <8 x i16> %a1, <8 x i16> %a2, <8 x i16> %a3) { +; X32-LABEL: test_unpackh_packus_128: +; X32: ## BB#0: +; X32-NEXT: vpackuswb %xmm3, %xmm1, %xmm0 +; X32-NEXT: retl +; +; X64-LABEL: test_unpackh_packus_128: +; X64: ## BB#0: +; X64-NEXT: vpackuswb %xmm3, %xmm1, %xmm0 +; X64-NEXT: retq + %1 = call <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16> %a0, <8 x i16> %a1) + %2 = call <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16> %a2, <8 x i16> %a3) + %3 = shufflevector <16 x i8> %1, <16 x i8> %2, <16 x i32> + ret <16 x i8> %3 +} + +; +; 256-bit Vectors +; + +define <8 x float> @test_unpackl_fhadd_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, <8 x float> %a3) { +; X32-LABEL: test_unpackl_fhadd_256: +; X32: ## BB#0: +; X32-NEXT: vhaddps %ymm1, %ymm0, %ymm0 +; X32-NEXT: vhaddps %ymm3, %ymm2, %ymm1 +; X32-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] +; X32-NEXT: retl +; +; X64-LABEL: test_unpackl_fhadd_256: +; X64: ## BB#0: +; X64-NEXT: vhaddps %ymm1, %ymm0, %ymm0 +; X64-NEXT: vhaddps %ymm3, %ymm2, %ymm1 +; X64-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] +; X64-NEXT: retq + %1 = call <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float> %a0, <8 x float> %a1) + %2 = call <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float> %a2, <8 x float> %a3) + %3 = shufflevector <8 x float> %1, <8 x float> %2, <8 x i32> + ret <8 x float> %3 +} + +define <4 x double> @test_unpackh_fhadd_256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, <4 x double> %a3) { +; X32-LABEL: test_unpackh_fhadd_256: +; X32: ## BB#0: +; X32-NEXT: vhaddpd %ymm1, %ymm0, %ymm0 +; X32-NEXT: vhaddpd %ymm3, %ymm2, %ymm1 +; X32-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] +; X32-NEXT: retl +; +; X64-LABEL: test_unpackh_fhadd_256: +; X64: ## BB#0: +; X64-NEXT: vhaddpd %ymm1, %ymm0, %ymm0 +; X64-NEXT: vhaddpd %ymm3, %ymm2, %ymm1 +; X64-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] +; X64-NEXT: retq + %1 = call <4 x double> @llvm.x86.avx.hadd.pd.256(<4 x double> %a0, <4 x double> %a1) + %2 = call <4 x double> @llvm.x86.avx.hadd.pd.256(<4 x double> %a2, <4 x double> %a3) + %3 = shufflevector <4 x double> %1, <4 x double> %2, <4 x i32> + ret <4 x double> %3 +} + +define <4 x double> @test_unpackl_fhsub_256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, <4 x double> %a3) { +; X32-LABEL: test_unpackl_fhsub_256: +; X32: ## BB#0: +; X32-NEXT: vhsubpd %ymm1, %ymm0, %ymm0 +; X32-NEXT: vhsubpd %ymm3, %ymm2, %ymm1 +; X32-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] +; X32-NEXT: retl +; +; X64-LABEL: test_unpackl_fhsub_256: +; X64: ## BB#0: +; X64-NEXT: vhsubpd %ymm1, %ymm0, %ymm0 +; X64-NEXT: vhsubpd %ymm3, %ymm2, %ymm1 +; X64-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] +; X64-NEXT: retq + %1 = call <4 x double> @llvm.x86.avx.hsub.pd.256(<4 x double> %a0, <4 x double> %a1) + %2 = call <4 x double> @llvm.x86.avx.hsub.pd.256(<4 x double> %a2, <4 x double> %a3) + %3 = shufflevector <4 x double> %1, <4 x double> %2, <4 x i32> + ret <4 x double> %3 +} + +define <8 x float> @test_unpackh_fhsub_256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, <8 x float> %a3) { +; X32-LABEL: test_unpackh_fhsub_256: +; X32: ## BB#0: +; X32-NEXT: vhsubps %ymm1, %ymm0, %ymm0 +; X32-NEXT: vhsubps %ymm3, %ymm2, %ymm1 +; X32-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] +; X32-NEXT: retl +; +; X64-LABEL: test_unpackh_fhsub_256: +; X64: ## BB#0: +; X64-NEXT: vhsubps %ymm1, %ymm0, %ymm0 +; X64-NEXT: vhsubps %ymm3, %ymm2, %ymm1 +; X64-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] +; X64-NEXT: retq + %1 = call <8 x float> @llvm.x86.avx.hsub.ps.256(<8 x float> %a0, <8 x float> %a1) + %2 = call <8 x float> @llvm.x86.avx.hsub.ps.256(<8 x float> %a2, <8 x float> %a3) + %3 = shufflevector <8 x float> %1, <8 x float> %2, <8 x i32> + ret <8 x float> %3 +} + +define <16 x i16> @test_unpackl_hadd_256(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> %a2, <16 x i16> %a3) { +; X32-LABEL: test_unpackl_hadd_256: +; X32: ## BB#0: +; X32-NEXT: vphaddw %ymm1, %ymm0, %ymm0 +; X32-NEXT: vphaddw %ymm3, %ymm2, %ymm1 +; X32-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] +; X32-NEXT: retl +; +; X64-LABEL: test_unpackl_hadd_256: +; X64: ## BB#0: +; X64-NEXT: vphaddw %ymm1, %ymm0, %ymm0 +; X64-NEXT: vphaddw %ymm3, %ymm2, %ymm1 +; X64-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] +; X64-NEXT: retq + %1 = call <16 x i16> @llvm.x86.avx2.phadd.w(<16 x i16> %a0, <16 x i16> %a1) + %2 = call <16 x i16> @llvm.x86.avx2.phadd.w(<16 x i16> %a2, <16 x i16> %a3) + %3 = shufflevector <16 x i16> %1, <16 x i16> %2, <16 x i32> + ret <16 x i16> %3 +} + +define <8 x i32> @test_unpackh_hadd_256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2, <8 x i32> %a3) { +; X32-LABEL: test_unpackh_hadd_256: +; X32: ## BB#0: +; X32-NEXT: vphaddd %ymm1, %ymm0, %ymm0 +; X32-NEXT: vphaddd %ymm3, %ymm2, %ymm1 +; X32-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] +; X32-NEXT: retl +; +; X64-LABEL: test_unpackh_hadd_256: +; X64: ## BB#0: +; X64-NEXT: vphaddd %ymm1, %ymm0, %ymm0 +; X64-NEXT: vphaddd %ymm3, %ymm2, %ymm1 +; X64-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] +; X64-NEXT: retq + %1 = call <8 x i32> @llvm.x86.avx2.phadd.d(<8 x i32> %a0, <8 x i32> %a1) + %2 = call <8 x i32> @llvm.x86.avx2.phadd.d(<8 x i32> %a2, <8 x i32> %a3) + %3 = shufflevector <8 x i32> %1, <8 x i32> %2, <8 x i32> + ret <8 x i32> %3 +} + +define <8 x i32> @test_unpackl_hsub_256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2, <8 x i32> %a3) { +; X32-LABEL: test_unpackl_hsub_256: +; X32: ## BB#0: +; X32-NEXT: vphsubd %ymm1, %ymm0, %ymm0 +; X32-NEXT: vphsubd %ymm3, %ymm2, %ymm1 +; X32-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] +; X32-NEXT: retl +; +; X64-LABEL: test_unpackl_hsub_256: +; X64: ## BB#0: +; X64-NEXT: vphsubd %ymm1, %ymm0, %ymm0 +; X64-NEXT: vphsubd %ymm3, %ymm2, %ymm1 +; X64-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] +; X64-NEXT: retq + %1 = call <8 x i32> @llvm.x86.avx2.phsub.d(<8 x i32> %a0, <8 x i32> %a1) + %2 = call <8 x i32> @llvm.x86.avx2.phsub.d(<8 x i32> %a2, <8 x i32> %a3) + %3 = shufflevector <8 x i32> %1, <8 x i32> %2, <8 x i32> + ret <8 x i32> %3 +} + +define <16 x i16> @test_unpackh_hsub_256(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> %a2, <16 x i16> %a3) { +; X32-LABEL: test_unpackh_hsub_256: +; X32: ## BB#0: +; X32-NEXT: vphsubw %ymm1, %ymm0, %ymm0 +; X32-NEXT: vphsubw %ymm3, %ymm2, %ymm1 +; X32-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] +; X32-NEXT: retl +; +; X64-LABEL: test_unpackh_hsub_256: +; X64: ## BB#0: +; X64-NEXT: vphsubw %ymm1, %ymm0, %ymm0 +; X64-NEXT: vphsubw %ymm3, %ymm2, %ymm1 +; X64-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] +; X64-NEXT: retq + %1 = call <16 x i16> @llvm.x86.avx2.phsub.w(<16 x i16> %a0, <16 x i16> %a1) + %2 = call <16 x i16> @llvm.x86.avx2.phsub.w(<16 x i16> %a2, <16 x i16> %a3) + %3 = shufflevector <16 x i16> %1, <16 x i16> %2, <16 x i32> + ret <16 x i16> %3 +} + +define <32 x i8> @test_unpackl_packss_256(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> %a2, <16 x i16> %a3) { +; X32-LABEL: test_unpackl_packss_256: +; X32: ## BB#0: +; X32-NEXT: vpacksswb %ymm1, %ymm0, %ymm0 +; X32-NEXT: vpacksswb %ymm3, %ymm2, %ymm1 +; X32-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] +; X32-NEXT: retl +; +; X64-LABEL: test_unpackl_packss_256: +; X64: ## BB#0: +; X64-NEXT: vpacksswb %ymm1, %ymm0, %ymm0 +; X64-NEXT: vpacksswb %ymm3, %ymm2, %ymm1 +; X64-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] +; X64-NEXT: retq + %1 = call <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16> %a0, <16 x i16> %a1) + %2 = call <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16> %a2, <16 x i16> %a3) + %3 = shufflevector <32 x i8> %1, <32 x i8> %2, <32 x i32> + ret <32 x i8> %3 +} + +define <16 x i16> @test_unpackh_packss_256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2, <8 x i32> %a3) { +; X32-LABEL: test_unpackh_packss_256: +; X32: ## BB#0: +; X32-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 +; X32-NEXT: vpackssdw %ymm3, %ymm2, %ymm1 +; X32-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] +; X32-NEXT: retl +; +; X64-LABEL: test_unpackh_packss_256: +; X64: ## BB#0: +; X64-NEXT: vpackssdw %ymm1, %ymm0, %ymm0 +; X64-NEXT: vpackssdw %ymm3, %ymm2, %ymm1 +; X64-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] +; X64-NEXT: retq + %1 = call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> %a0, <8 x i32> %a1) + %2 = call <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32> %a2, <8 x i32> %a3) + %3 = shufflevector <16 x i16> %1, <16 x i16> %2, <16 x i32> + ret <16 x i16> %3 +} + +define <16 x i16> @test_unpackl_packus_256(<8 x i32> %a0, <8 x i32> %a1, <8 x i32> %a2, <8 x i32> %a3) { +; X32-LABEL: test_unpackl_packus_256: +; X32: ## BB#0: +; X32-NEXT: vpackusdw %ymm1, %ymm0, %ymm0 +; X32-NEXT: vpackusdw %ymm3, %ymm2, %ymm1 +; X32-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] +; X32-NEXT: retl +; +; X64-LABEL: test_unpackl_packus_256: +; X64: ## BB#0: +; X64-NEXT: vpackusdw %ymm1, %ymm0, %ymm0 +; X64-NEXT: vpackusdw %ymm3, %ymm2, %ymm1 +; X64-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] +; X64-NEXT: retq + %1 = call <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32> %a0, <8 x i32> %a1) + %2 = call <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32> %a2, <8 x i32> %a3) + %3 = shufflevector <16 x i16> %1, <16 x i16> %2, <16 x i32> + ret <16 x i16> %3 +} + +define <32 x i8> @test_unpackh_packus_256(<16 x i16> %a0, <16 x i16> %a1, <16 x i16> %a2, <16 x i16> %a3) { +; X32-LABEL: test_unpackh_packus_256: +; X32: ## BB#0: +; X32-NEXT: vpacksswb %ymm1, %ymm0, %ymm0 +; X32-NEXT: vpacksswb %ymm3, %ymm2, %ymm1 +; X32-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] +; X32-NEXT: retl +; +; X64-LABEL: test_unpackh_packus_256: +; X64: ## BB#0: +; X64-NEXT: vpacksswb %ymm1, %ymm0, %ymm0 +; X64-NEXT: vpacksswb %ymm3, %ymm2, %ymm1 +; X64-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] +; X64-NEXT: retq + %1 = call <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16> %a0, <16 x i16> %a1) + %2 = call <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16> %a2, <16 x i16> %a3) + %3 = shufflevector <32 x i8> %1, <32 x i8> %2, <32 x i32> + ret <32 x i8> %3 +} + +declare <4 x float> @llvm.x86.sse3.hadd.ps(<4 x float>, <4 x float>) +declare <4 x float> @llvm.x86.sse3.hsub.ps(<4 x float>, <4 x float>) +declare <2 x double> @llvm.x86.sse3.hadd.pd(<2 x double>, <2 x double>) +declare <2 x double> @llvm.x86.sse3.hsub.pd(<2 x double>, <2 x double>) + +declare <8 x i16> @llvm.x86.ssse3.phadd.w.128(<8 x i16>, <8 x i16>) +declare <4 x i32> @llvm.x86.ssse3.phadd.d.128(<4 x i32>, <4 x i32>) +declare <8 x i16> @llvm.x86.ssse3.phsub.w.128(<8 x i16>, <8 x i16>) +declare <4 x i32> @llvm.x86.ssse3.phsub.d.128(<4 x i32>, <4 x i32>) + +declare <16 x i8> @llvm.x86.sse2.packsswb.128(<8 x i16>, <8 x i16>) +declare <8 x i16> @llvm.x86.sse2.packssdw.128(<4 x i32>, <4 x i32>) +declare <16 x i8> @llvm.x86.sse2.packuswb.128(<8 x i16>, <8 x i16>) +declare <8 x i16> @llvm.x86.sse41.packusdw(<4 x i32>, <4 x i32>) + +declare <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float>, <8 x float>) +declare <8 x float> @llvm.x86.avx.hsub.ps.256(<8 x float>, <8 x float>) +declare <4 x double> @llvm.x86.avx.hadd.pd.256(<4 x double>, <4 x double>) +declare <4 x double> @llvm.x86.avx.hsub.pd.256(<4 x double>, <4 x double>) + +declare <16 x i16> @llvm.x86.avx2.phadd.w(<16 x i16>, <16 x i16>) +declare <8 x i32> @llvm.x86.avx2.phadd.d(<8 x i32>, <8 x i32>) +declare <16 x i16> @llvm.x86.avx2.phsub.w(<16 x i16>, <16 x i16>) +declare <8 x i32> @llvm.x86.avx2.phsub.d(<8 x i32>, <8 x i32>) + +declare <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16>, <16 x i16>) +declare <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32>, <8 x i32>) +declare <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16>, <16 x i16>) +declare <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32>, <8 x i32>)