From ef1bd283c257c9e6e17aec1b35fe5e09e5ebedf6 Mon Sep 17 00:00:00 2001 From: Sanjay Patel Date: Mon, 7 Jan 2019 16:10:14 +0000 Subject: [PATCH] [x86] add more tests for LowerToHorizontalOp(); NFC These tests show missed optimizations and a miscompile similar to PR40243 - https://bugs.llvm.org/show_bug.cgi?id=40243 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@350533 91177308-0d34-0410-b5e6-96231b3b80d8 --- test/CodeGen/X86/haddsub-undef.ll | 236 ++++++++++++++++++++++++++++++ 1 file changed, 236 insertions(+) diff --git a/test/CodeGen/X86/haddsub-undef.ll b/test/CodeGen/X86/haddsub-undef.ll index 4fcc38d5db9..7ba753b16cc 100644 --- a/test/CodeGen/X86/haddsub-undef.ll +++ b/test/CodeGen/X86/haddsub-undef.ll @@ -731,3 +731,239 @@ define <4 x float> @add_ps_018(<4 x float> %x) { ret <4 x float> %shuffle2 } +define <4 x float> @v8f32_inputs_v4f32_output_0101(<8 x float> %a, <8 x float> %b) { +; SSE-LABEL: v8f32_inputs_v4f32_output_0101: +; SSE: # %bb.0: +; SSE-NEXT: haddps %xmm2, %xmm0 +; SSE-NEXT: retq +; +; AVX-SLOW-LABEL: v8f32_inputs_v4f32_output_0101: +; AVX-SLOW: # %bb.0: +; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3] +; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm3 = xmm1[1,1,3,3] +; AVX-SLOW-NEXT: vaddss %xmm2, %xmm0, %xmm0 +; AVX-SLOW-NEXT: vaddss %xmm3, %xmm1, %xmm1 +; AVX-SLOW-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3] +; AVX-SLOW-NEXT: vzeroupper +; AVX-SLOW-NEXT: retq +; +; AVX-FAST-LABEL: v8f32_inputs_v4f32_output_0101: +; AVX-FAST: # %bb.0: +; AVX-FAST-NEXT: vhaddps %xmm1, %xmm0, %xmm0 +; AVX-FAST-NEXT: vzeroupper +; AVX-FAST-NEXT: retq + %a0 = extractelement <8 x float> %a, i32 0 + %a1 = extractelement <8 x float> %a, i32 1 + %b0 = extractelement <8 x float> %b, i32 0 + %b1 = extractelement <8 x float> %b, i32 1 + %add0 = fadd float %a0, %a1 + %add2 = fadd float %b0, %b1 + %r0 = insertelement <4 x float> undef, float %add0, i32 0 + %r = insertelement <4 x float> %r0, float %add2, i32 2 + ret <4 x float> %r +} + +define <4 x float> @v8f32_input0_v4f32_output_0123(<8 x float> %a, <4 x float> %b) { +; SSE-LABEL: v8f32_input0_v4f32_output_0123: +; SSE: # %bb.0: +; SSE-NEXT: haddps %xmm2, %xmm0 +; SSE-NEXT: retq +; +; AVX-SLOW-LABEL: v8f32_input0_v4f32_output_0123: +; AVX-SLOW: # %bb.0: +; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3] +; AVX-SLOW-NEXT: vpermilpd {{.*#+}} xmm3 = xmm1[1,0] +; AVX-SLOW-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3] +; AVX-SLOW-NEXT: vaddss %xmm1, %xmm3, %xmm1 +; AVX-SLOW-NEXT: vaddss %xmm2, %xmm0, %xmm0 +; AVX-SLOW-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0] +; AVX-SLOW-NEXT: vzeroupper +; AVX-SLOW-NEXT: retq +; +; AVX-FAST-LABEL: v8f32_input0_v4f32_output_0123: +; AVX-FAST: # %bb.0: +; AVX-FAST-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0] +; AVX-FAST-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3] +; AVX-FAST-NEXT: vaddss %xmm1, %xmm2, %xmm1 +; AVX-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0 +; AVX-FAST-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],zero,zero,xmm1[0] +; AVX-FAST-NEXT: vzeroupper +; AVX-FAST-NEXT: retq + %a0 = extractelement <8 x float> %a, i32 0 + %a1 = extractelement <8 x float> %a, i32 1 + %b2 = extractelement <4 x float> %b, i32 2 + %b3 = extractelement <4 x float> %b, i32 3 + %add0 = fadd float %a0, %a1 + %add3 = fadd float %b2, %b3 + %r0 = insertelement <4 x float> undef, float %add0, i32 0 + %r = insertelement <4 x float> %r0, float %add3, i32 3 + ret <4 x float> %r +} + +define <4 x float> @v8f32_input1_v4f32_output_2301(<4 x float> %a, <8 x float> %b) { +; SSE-LABEL: v8f32_input1_v4f32_output_2301: +; SSE: # %bb.0: +; SSE-NEXT: haddps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-SLOW-LABEL: v8f32_input1_v4f32_output_2301: +; AVX-SLOW: # %bb.0: +; AVX-SLOW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0] +; AVX-SLOW-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3] +; AVX-SLOW-NEXT: vaddss %xmm0, %xmm2, %xmm0 +; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3] +; AVX-SLOW-NEXT: vaddss %xmm2, %xmm1, %xmm1 +; AVX-SLOW-NEXT: vmovsldup {{.*#+}} xmm0 = xmm0[0,0,2,2] +; AVX-SLOW-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3] +; AVX-SLOW-NEXT: vzeroupper +; AVX-SLOW-NEXT: retq +; +; AVX-FAST-LABEL: v8f32_input1_v4f32_output_2301: +; AVX-FAST: # %bb.0: +; AVX-FAST-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0] +; AVX-FAST-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3] +; AVX-FAST-NEXT: vaddss %xmm0, %xmm2, %xmm0 +; AVX-FAST-NEXT: vhaddps %xmm1, %xmm1, %xmm1 +; AVX-FAST-NEXT: vmovsldup {{.*#+}} xmm0 = xmm0[0,0,2,2] +; AVX-FAST-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3] +; AVX-FAST-NEXT: vzeroupper +; AVX-FAST-NEXT: retq + %a2 = extractelement <4 x float> %a, i32 2 + %a3 = extractelement <4 x float> %a, i32 3 + %b0 = extractelement <8 x float> %b, i32 0 + %b1 = extractelement <8 x float> %b, i32 1 + %add1 = fadd float %a2, %a3 + %add2 = fadd float %b0, %b1 + %r1 = insertelement <4 x float> undef, float %add1, i32 1 + %r = insertelement <4 x float> %r1, float %add2, i32 2 + ret <4 x float> %r +} + +define <4 x float> @v8f32_inputs_v4f32_output_2323(<8 x float> %a, <8 x float> %b) { +; SSE-LABEL: v8f32_inputs_v4f32_output_2323: +; SSE: # %bb.0: +; SSE-NEXT: haddps %xmm2, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: v8f32_inputs_v4f32_output_2323: +; AVX: # %bb.0: +; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0] +; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3] +; AVX-NEXT: vaddss %xmm0, %xmm2, %xmm0 +; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0] +; AVX-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3] +; AVX-NEXT: vaddss %xmm1, %xmm2, %xmm1 +; AVX-NEXT: vmovsldup {{.*#+}} xmm0 = xmm0[0,0,2,2] +; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0] +; AVX-NEXT: vzeroupper +; AVX-NEXT: retq + %a2 = extractelement <8 x float> %a, i32 2 + %a3 = extractelement <8 x float> %a, i32 3 + %b2 = extractelement <8 x float> %b, i32 2 + %b3 = extractelement <8 x float> %b, i32 3 + %add1 = fadd float %a2, %a3 + %add3 = fadd float %b2, %b3 + %r1 = insertelement <4 x float> undef, float %add1, i32 1 + %r = insertelement <4 x float> %r1, float %add3, i32 3 + ret <4 x float> %r +} + +define <4 x float> @v16f32_inputs_v4f32_output_0123(<16 x float> %a, <16 x float> %b) { +; SSE-LABEL: v16f32_inputs_v4f32_output_0123: +; SSE: # %bb.0: +; SSE-NEXT: haddps %xmm4, %xmm0 +; SSE-NEXT: retq +; +; AVX1-SLOW-LABEL: v16f32_inputs_v4f32_output_0123: +; AVX1-SLOW: # %bb.0: +; AVX1-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] +; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm3 = xmm2[1,0] +; AVX1-SLOW-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,1,2,3] +; AVX1-SLOW-NEXT: vaddss %xmm2, %xmm3, %xmm2 +; AVX1-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0 +; AVX1-SLOW-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm2[0] +; AVX1-SLOW-NEXT: vzeroupper +; AVX1-SLOW-NEXT: retq +; +; AVX1-FAST-LABEL: v16f32_inputs_v4f32_output_0123: +; AVX1-FAST: # %bb.0: +; AVX1-FAST-NEXT: vpermilpd {{.*#+}} xmm1 = xmm2[1,0] +; AVX1-FAST-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,1,2,3] +; AVX1-FAST-NEXT: vaddss %xmm2, %xmm1, %xmm1 +; AVX1-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0 +; AVX1-FAST-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],zero,zero,xmm1[0] +; AVX1-FAST-NEXT: vzeroupper +; AVX1-FAST-NEXT: retq +; +; AVX512-SLOW-LABEL: v16f32_inputs_v4f32_output_0123: +; AVX512-SLOW: # %bb.0: +; AVX512-SLOW-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3] +; AVX512-SLOW-NEXT: vpermilpd {{.*#+}} xmm3 = xmm1[1,0] +; AVX512-SLOW-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3] +; AVX512-SLOW-NEXT: vaddss %xmm1, %xmm3, %xmm1 +; AVX512-SLOW-NEXT: vaddss %xmm2, %xmm0, %xmm0 +; AVX512-SLOW-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0] +; AVX512-SLOW-NEXT: vzeroupper +; AVX512-SLOW-NEXT: retq +; +; AVX512-FAST-LABEL: v16f32_inputs_v4f32_output_0123: +; AVX512-FAST: # %bb.0: +; AVX512-FAST-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0] +; AVX512-FAST-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3] +; AVX512-FAST-NEXT: vaddss %xmm1, %xmm2, %xmm1 +; AVX512-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0 +; AVX512-FAST-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],zero,zero,xmm1[0] +; AVX512-FAST-NEXT: vzeroupper +; AVX512-FAST-NEXT: retq + %a0 = extractelement <16 x float> %a, i32 0 + %a1 = extractelement <16 x float> %a, i32 1 + %b2 = extractelement <16 x float> %b, i32 2 + %b3 = extractelement <16 x float> %b, i32 3 + %add0 = fadd float %a0, %a1 + %add3 = fadd float %b2, %b3 + %r0 = insertelement <4 x float> undef, float %add0, i32 0 + %r = insertelement <4 x float> %r0, float %add3, i32 3 + ret <4 x float> %r +} + +; FIXME: Miscompiles with any AVX. + +define <8 x float> @v16f32_inputs_v8f32_output_4567(<16 x float> %a, <16 x float> %b) { +; SSE-LABEL: v16f32_inputs_v8f32_output_4567: +; SSE: # %bb.0: +; SSE-NEXT: haddps %xmm5, %xmm1 +; SSE-NEXT: retq +; +; AVX1-SLOW-LABEL: v16f32_inputs_v8f32_output_4567: +; AVX1-SLOW: # %bb.0: +; AVX1-SLOW-NEXT: vhaddps %ymm0, %ymm0, %ymm0 +; AVX1-SLOW-NEXT: retq +; +; AVX1-FAST-LABEL: v16f32_inputs_v8f32_output_4567: +; AVX1-FAST: # %bb.0: +; AVX1-FAST-NEXT: vhaddps %ymm0, %ymm0, %ymm0 +; AVX1-FAST-NEXT: retq +; +; AVX512-LABEL: v16f32_inputs_v8f32_output_4567: +; AVX512: # %bb.0: +; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm0 +; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3] +; AVX512-NEXT: vextractf128 $1, %ymm1, %xmm1 +; AVX512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm1[1,0] +; AVX512-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3] +; AVX512-NEXT: vaddss %xmm1, %xmm3, %xmm1 +; AVX512-NEXT: vaddss %xmm2, %xmm0, %xmm0 +; AVX512-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0] +; AVX512-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 +; AVX512-NEXT: retq + %a4 = extractelement <16 x float> %a, i32 4 + %a5 = extractelement <16 x float> %a, i32 5 + %b6 = extractelement <16 x float> %b, i32 6 + %b7 = extractelement <16 x float> %b, i32 7 + %add4 = fadd float %a4, %a5 + %add7 = fadd float %b6, %b7 + %r4 = insertelement <8 x float> undef, float %add4, i32 4 + %r = insertelement <8 x float> %r4, float %add7, i32 7 + ret <8 x float> %r +} + -- 2.50.1