From: Craig Topper Date: Tue, 7 Feb 2017 04:10:57 +0000 (+0000) Subject: [X86] Change the Defs list for VZEROALL/VZEROUPPER back to not including YMM16-31. X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=47db62e1758e46b2656d3dc86f78cbe6af1965a7;p=llvm [X86] Change the Defs list for VZEROALL/VZEROUPPER back to not including YMM16-31. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@294277 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Target/X86/X86InstrSSE.td b/lib/Target/X86/X86InstrSSE.td index 4f8e7c95d91..ddd7b808450 100644 --- a/lib/Target/X86/X86InstrSSE.td +++ b/lib/Target/X86/X86InstrSSE.td @@ -8060,10 +8060,9 @@ def : Pat<(v16i16 (X86VPerm2x128 VR256:$src1, //===----------------------------------------------------------------------===// // VZERO - Zero YMM registers // +// Note, these instruction do not affect the YMM16-YMM31. let Defs = [YMM0, YMM1, YMM2, YMM3, YMM4, YMM5, YMM6, YMM7, - YMM8, YMM9, YMM10, YMM11, YMM12, YMM13, YMM14, YMM15, - YMM16, YMM17, YMM18, YMM19, YMM20, YMM21, YMM22, YMM23, - YMM24, YMM25, YMM26, YMM27, YMM28, YMM29, YMM30, YMM31] in { + YMM8, YMM9, YMM10, YMM11, YMM12, YMM13, YMM14, YMM15] in { // Zero All YMM registers def VZEROALL : I<0x77, RawFrm, (outs), (ins), "vzeroall", [(int_x86_avx_vzeroall)]>, PS, VEX, VEX_L, Requires<[HasAVX]>; diff --git a/test/CodeGen/X86/avx-intrinsics-x86_64.ll b/test/CodeGen/X86/avx-intrinsics-x86_64.ll index 24ab870cac8..9f18c0930fd 100644 --- a/test/CodeGen/X86/avx-intrinsics-x86_64.ll +++ b/test/CodeGen/X86/avx-intrinsics-x86_64.ll @@ -68,13 +68,20 @@ define i64 @test_x86_sse_cvttss2si64(<4 x float> %a0) { declare i64 @llvm.x86.sse.cvttss2si64(<4 x float>) nounwind readnone define <4 x double> @test_x86_avx_vzeroall(<4 x double> %a, <4 x double> %b) { -; CHECK-LABEL: test_x86_avx_vzeroall: -; CHECK: ## BB#0: -; CHECK-NEXT: vaddpd %ymm1, %ymm0, %ymm0 -; CHECK-NEXT: vmovupd %ymm0, -{{[0-9]+}}(%rsp) ## 32-byte Spill -; CHECK-NEXT: vzeroall -; CHECK-NEXT: vmovups -{{[0-9]+}}(%rsp), %ymm0 ## 32-byte Reload -; CHECK-NEXT: retq +; AVX-LABEL: test_x86_avx_vzeroall: +; AVX: ## BB#0: +; AVX-NEXT: vaddpd %ymm1, %ymm0, %ymm0 +; AVX-NEXT: vmovupd %ymm0, -{{[0-9]+}}(%rsp) ## 32-byte Spill +; AVX-NEXT: vzeroall +; AVX-NEXT: vmovups -{{[0-9]+}}(%rsp), %ymm0 ## 32-byte Reload +; AVX-NEXT: retq +; +; AVX512VL-LABEL: test_x86_avx_vzeroall: +; AVX512VL: ## BB#0: +; AVX512VL-NEXT: vaddpd %ymm1, %ymm0, %ymm16 +; AVX512VL-NEXT: vzeroall +; AVX512VL-NEXT: vmovapd %ymm16, %ymm0 +; AVX512VL-NEXT: retq %c = fadd <4 x double> %a, %b call void @llvm.x86.avx.vzeroall() ret <4 x double> %c @@ -82,13 +89,20 @@ define <4 x double> @test_x86_avx_vzeroall(<4 x double> %a, <4 x double> %b) { declare void @llvm.x86.avx.vzeroall() nounwind define <4 x double> @test_x86_avx_vzeroupper(<4 x double> %a, <4 x double> %b) { -; CHECK-LABEL: test_x86_avx_vzeroupper: -; CHECK: ## BB#0: -; CHECK-NEXT: vaddpd %ymm1, %ymm0, %ymm0 -; CHECK-NEXT: vmovupd %ymm0, -{{[0-9]+}}(%rsp) ## 32-byte Spill -; CHECK-NEXT: vzeroupper -; CHECK-NEXT: vmovups -{{[0-9]+}}(%rsp), %ymm0 ## 32-byte Reload -; CHECK-NEXT: retq +; AVX-LABEL: test_x86_avx_vzeroupper: +; AVX: ## BB#0: +; AVX-NEXT: vaddpd %ymm1, %ymm0, %ymm0 +; AVX-NEXT: vmovupd %ymm0, -{{[0-9]+}}(%rsp) ## 32-byte Spill +; AVX-NEXT: vzeroupper +; AVX-NEXT: vmovups -{{[0-9]+}}(%rsp), %ymm0 ## 32-byte Reload +; AVX-NEXT: retq +; +; AVX512VL-LABEL: test_x86_avx_vzeroupper: +; AVX512VL: ## BB#0: +; AVX512VL-NEXT: vaddpd %ymm1, %ymm0, %ymm16 +; AVX512VL-NEXT: vzeroupper +; AVX512VL-NEXT: vmovapd %ymm16, %ymm0 +; AVX512VL-NEXT: retq %c = fadd <4 x double> %a, %b call void @llvm.x86.avx.vzeroupper() ret <4 x double> %c