From 1fadf01eab83866cf5fc7d13435e9bac4ac1cb7c Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Thu, 27 Jun 2019 05:51:56 +0000 Subject: [PATCH] [X86] Teach selectScalarSSELoad to not narrow volatile loads. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@364498 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelDAGToDAG.cpp | 12 +++++----- test/CodeGen/X86/fold-load-unops.ll | 34 +++++++++++++++++++++++++++++ 2 files changed, 41 insertions(+), 5 deletions(-) diff --git a/lib/Target/X86/X86ISelDAGToDAG.cpp b/lib/Target/X86/X86ISelDAGToDAG.cpp index 9b7b1cfab76..544a3fa922d 100644 --- a/lib/Target/X86/X86ISelDAGToDAG.cpp +++ b/lib/Target/X86/X86ISelDAGToDAG.cpp @@ -2283,12 +2283,14 @@ bool X86DAGToDAGISel::selectScalarSSELoad(SDNode *Root, SDNode *Parent, if (!hasSingleUsesFromRoot(Root, Parent)) return false; - // We can allow a full vector load here since narrowing a load is ok. + // We can allow a full vector load here since narrowing a load is ok unless + // it's volatile. if (ISD::isNON_EXTLoad(N.getNode())) { - PatternNodeWithChain = N; - if (IsProfitableToFold(PatternNodeWithChain, N.getNode(), Root) && - IsLegalToFold(PatternNodeWithChain, Parent, Root, OptLevel)) { - LoadSDNode *LD = cast(PatternNodeWithChain); + LoadSDNode *LD = cast(N); + if (!LD->isVolatile() && + IsProfitableToFold(N, LD, Root) && + IsLegalToFold(N, Parent, Root, OptLevel)) { + PatternNodeWithChain = N; return selectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp, Segment); } diff --git a/test/CodeGen/X86/fold-load-unops.ll b/test/CodeGen/X86/fold-load-unops.ll index c77c6adf2e8..aa6bc720fa9 100644 --- a/test/CodeGen/X86/fold-load-unops.ll +++ b/test/CodeGen/X86/fold-load-unops.ll @@ -179,6 +179,23 @@ define <4 x float> @sqrtss_full_size(<4 x float>* %a) optsize{ ret <4 x float> %res } +define <4 x float> @sqrtss_full_size_volatile(<4 x float>* %a) optsize{ +; SSE-LABEL: sqrtss_full_size_volatile: +; SSE: # %bb.0: +; SSE-NEXT: movaps (%rdi), %xmm0 +; SSE-NEXT: sqrtss %xmm0, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: sqrtss_full_size_volatile: +; AVX: # %bb.0: +; AVX-NEXT: vmovaps (%rdi), %xmm0 +; AVX-NEXT: vsqrtss %xmm0, %xmm0, %xmm0 +; AVX-NEXT: retq + %ld = load volatile <4 x float>, <4 x float>* %a + %res = tail call <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float> %ld) + ret <4 x float> %res +} + define double @sqrtsd_size(double* %a) optsize { ; SSE-LABEL: sqrtsd_size: ; SSE: # %bb.0: @@ -213,6 +230,23 @@ define <2 x double> @sqrtsd_full_size(<2 x double>* %a) optsize { ret <2 x double> %res } +define <2 x double> @sqrtsd_full_size_volatile(<2 x double>* %a) optsize { +; SSE-LABEL: sqrtsd_full_size_volatile: +; SSE: # %bb.0: +; SSE-NEXT: movapd (%rdi), %xmm0 +; SSE-NEXT: sqrtsd %xmm0, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: sqrtsd_full_size_volatile: +; AVX: # %bb.0: +; AVX-NEXT: vmovapd (%rdi), %xmm0 +; AVX-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0 +; AVX-NEXT: retq + %ld = load volatile <2 x double>, <2 x double>* %a + %res = tail call <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double> %ld) + ret <2 x double> %res +} + declare <4 x float> @llvm.x86.sse.rcp.ss(<4 x float>) nounwind readnone declare <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float>) nounwind readnone declare <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float>) nounwind readnone -- 2.40.0