From 99f07c223c3ace9c1fa7d160f99f18de9419793e Mon Sep 17 00:00:00 2001 From: Sanjay Patel Date: Wed, 1 May 2019 16:06:21 +0000 Subject: [PATCH] Revert "[DAGCombiner] try repeated fdiv divisor transform before building estimate" This reverts commit fb9a5307a94e6f1f850e4d89f79103b123f16279 (rL359398) because it can cause an infinite loop due to opposing combines. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@359695 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 6 +-- test/CodeGen/X86/fdiv-combine-vec.ll | 66 ++++++++++++++---------- 2 files changed, 42 insertions(+), 30 deletions(-) diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index aeab54d65a8..d1e71237fe8 100644 --- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -11993,9 +11993,6 @@ SDValue DAGCombiner::visitFDIV(SDNode *N) { if (SDValue NewSel = foldBinOpIntoSelect(N)) return NewSel; - if (SDValue V = combineRepeatedFPDivisors(N)) - return V; - if (Options.UnsafeFPMath || Flags.hasAllowReciprocal()) { // fold (fdiv X, c2) -> fmul X, 1/c2 if losing precision is acceptable. if (N1CFP) { @@ -12085,6 +12082,9 @@ SDValue DAGCombiner::visitFDIV(SDNode *N) { } } + if (SDValue CombineRepeatedDivisors = combineRepeatedFPDivisors(N)) + return CombineRepeatedDivisors; + return SDValue(); } diff --git a/test/CodeGen/X86/fdiv-combine-vec.ll b/test/CodeGen/X86/fdiv-combine-vec.ll index 825f8a50f96..6de3f31892d 100644 --- a/test/CodeGen/X86/fdiv-combine-vec.ll +++ b/test/CodeGen/X86/fdiv-combine-vec.ll @@ -51,17 +51,25 @@ define <4 x double> @splat_fdiv_v4f64(<4 x double> %x, double %y) { define <4 x float> @splat_fdiv_v4f32(<4 x float> %x, float %y) { ; SSE-LABEL: splat_fdiv_v4f32: ; SSE: # %bb.0: -; SSE-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero -; SSE-NEXT: divss %xmm1, %xmm2 -; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,0,0,0] -; SSE-NEXT: mulps %xmm2, %xmm0 +; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0,0,0] +; SSE-NEXT: rcpps %xmm1, %xmm2 +; SSE-NEXT: mulps %xmm2, %xmm1 +; SSE-NEXT: movaps {{.*#+}} xmm3 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0] +; SSE-NEXT: subps %xmm1, %xmm3 +; SSE-NEXT: mulps %xmm2, %xmm3 +; SSE-NEXT: addps %xmm2, %xmm3 +; SSE-NEXT: mulps %xmm3, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: splat_fdiv_v4f32: ; AVX: # %bb.0: -; AVX-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero -; AVX-NEXT: vdivss %xmm1, %xmm2, %xmm1 ; AVX-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[0,0,0,0] +; AVX-NEXT: vrcpps %xmm1, %xmm2 +; AVX-NEXT: vmulps %xmm2, %xmm1, %xmm1 +; AVX-NEXT: vmovaps {{.*#+}} xmm3 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0] +; AVX-NEXT: vsubps %xmm1, %xmm3, %xmm1 +; AVX-NEXT: vmulps %xmm1, %xmm2, %xmm1 +; AVX-NEXT: vaddps %xmm1, %xmm2, %xmm1 ; AVX-NEXT: vmulps %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %vy = insertelement <4 x float> undef, float %y, i32 0 @@ -82,10 +90,14 @@ define <8 x float> @splat_fdiv_v8f32(<8 x float> %x, float %y) { ; ; AVX-LABEL: splat_fdiv_v8f32: ; AVX: # %bb.0: -; AVX-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero -; AVX-NEXT: vdivss %xmm1, %xmm2, %xmm1 ; AVX-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[0,0,0,0] ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1 +; AVX-NEXT: vrcpps %ymm1, %ymm2 +; AVX-NEXT: vmulps %ymm2, %ymm1, %ymm1 +; AVX-NEXT: vmovaps {{.*#+}} ymm3 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0] +; AVX-NEXT: vsubps %ymm1, %ymm3, %ymm1 +; AVX-NEXT: vmulps %ymm1, %ymm2, %ymm1 +; AVX-NEXT: vaddps %ymm1, %ymm2, %ymm1 ; AVX-NEXT: vmulps %ymm1, %ymm0, %ymm0 ; AVX-NEXT: retq %vy = insertelement <8 x float> undef, float %y, i32 0 @@ -97,25 +109,25 @@ define <8 x float> @splat_fdiv_v8f32(<8 x float> %x, float %y) { define <4 x float> @splat_fdiv_v4f32_estimate(<4 x float> %x, float %y) #0 { ; SSE-LABEL: splat_fdiv_v4f32_estimate: ; SSE: # %bb.0: -; SSE-NEXT: rcpss %xmm1, %xmm2 -; SSE-NEXT: mulss %xmm2, %xmm1 -; SSE-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero -; SSE-NEXT: subss %xmm1, %xmm3 -; SSE-NEXT: mulss %xmm2, %xmm3 -; SSE-NEXT: addss %xmm2, %xmm3 -; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,0,0,0] +; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0,0,0] +; SSE-NEXT: rcpps %xmm1, %xmm2 +; SSE-NEXT: mulps %xmm2, %xmm1 +; SSE-NEXT: movaps {{.*#+}} xmm3 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0] +; SSE-NEXT: subps %xmm1, %xmm3 +; SSE-NEXT: mulps %xmm2, %xmm3 +; SSE-NEXT: addps %xmm2, %xmm3 ; SSE-NEXT: mulps %xmm3, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: splat_fdiv_v4f32_estimate: ; AVX: # %bb.0: -; AVX-NEXT: vrcpss %xmm1, %xmm1, %xmm2 -; AVX-NEXT: vmulss %xmm2, %xmm1, %xmm1 -; AVX-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero -; AVX-NEXT: vsubss %xmm1, %xmm3, %xmm1 -; AVX-NEXT: vmulss %xmm1, %xmm2, %xmm1 -; AVX-NEXT: vaddss %xmm1, %xmm2, %xmm1 ; AVX-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[0,0,0,0] +; AVX-NEXT: vrcpps %xmm1, %xmm2 +; AVX-NEXT: vmulps %xmm2, %xmm1, %xmm1 +; AVX-NEXT: vmovaps {{.*#+}} xmm3 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0] +; AVX-NEXT: vsubps %xmm1, %xmm3, %xmm1 +; AVX-NEXT: vmulps %xmm1, %xmm2, %xmm1 +; AVX-NEXT: vaddps %xmm1, %xmm2, %xmm1 ; AVX-NEXT: vmulps %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %vy = insertelement <4 x float> undef, float %y, i32 0 @@ -140,14 +152,14 @@ define <8 x float> @splat_fdiv_v8f32_estimate(<8 x float> %x, float %y) #0 { ; ; AVX-LABEL: splat_fdiv_v8f32_estimate: ; AVX: # %bb.0: -; AVX-NEXT: vrcpss %xmm1, %xmm1, %xmm2 -; AVX-NEXT: vmulss %xmm2, %xmm1, %xmm1 -; AVX-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero -; AVX-NEXT: vsubss %xmm1, %xmm3, %xmm1 -; AVX-NEXT: vmulss %xmm1, %xmm2, %xmm1 -; AVX-NEXT: vaddss %xmm1, %xmm2, %xmm1 ; AVX-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[0,0,0,0] ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1 +; AVX-NEXT: vrcpps %ymm1, %ymm2 +; AVX-NEXT: vmulps %ymm2, %ymm1, %ymm1 +; AVX-NEXT: vmovaps {{.*#+}} ymm3 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0] +; AVX-NEXT: vsubps %ymm1, %ymm3, %ymm1 +; AVX-NEXT: vmulps %ymm1, %ymm2, %ymm1 +; AVX-NEXT: vaddps %ymm1, %ymm2, %ymm1 ; AVX-NEXT: vmulps %ymm1, %ymm0, %ymm0 ; AVX-NEXT: retq %vy = insertelement <8 x float> undef, float %y, i32 0 -- 2.40.0