//===----------------------------------------------------------------------===//
// VZERO - Zero YMM registers
//
+// Note, these instruction do not affect the YMM16-YMM31.
let Defs = [YMM0, YMM1, YMM2, YMM3, YMM4, YMM5, YMM6, YMM7,
- YMM8, YMM9, YMM10, YMM11, YMM12, YMM13, YMM14, YMM15,
- YMM16, YMM17, YMM18, YMM19, YMM20, YMM21, YMM22, YMM23,
- YMM24, YMM25, YMM26, YMM27, YMM28, YMM29, YMM30, YMM31] in {
+ YMM8, YMM9, YMM10, YMM11, YMM12, YMM13, YMM14, YMM15] in {
// Zero All YMM registers
def VZEROALL : I<0x77, RawFrm, (outs), (ins), "vzeroall",
[(int_x86_avx_vzeroall)]>, PS, VEX, VEX_L, Requires<[HasAVX]>;
declare i64 @llvm.x86.sse.cvttss2si64(<4 x float>) nounwind readnone
define <4 x double> @test_x86_avx_vzeroall(<4 x double> %a, <4 x double> %b) {
-; CHECK-LABEL: test_x86_avx_vzeroall:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vaddpd %ymm1, %ymm0, %ymm0
-; CHECK-NEXT: vmovupd %ymm0, -{{[0-9]+}}(%rsp) ## 32-byte Spill
-; CHECK-NEXT: vzeroall
-; CHECK-NEXT: vmovups -{{[0-9]+}}(%rsp), %ymm0 ## 32-byte Reload
-; CHECK-NEXT: retq
+; AVX-LABEL: test_x86_avx_vzeroall:
+; AVX: ## BB#0:
+; AVX-NEXT: vaddpd %ymm1, %ymm0, %ymm0
+; AVX-NEXT: vmovupd %ymm0, -{{[0-9]+}}(%rsp) ## 32-byte Spill
+; AVX-NEXT: vzeroall
+; AVX-NEXT: vmovups -{{[0-9]+}}(%rsp), %ymm0 ## 32-byte Reload
+; AVX-NEXT: retq
+;
+; AVX512VL-LABEL: test_x86_avx_vzeroall:
+; AVX512VL: ## BB#0:
+; AVX512VL-NEXT: vaddpd %ymm1, %ymm0, %ymm16
+; AVX512VL-NEXT: vzeroall
+; AVX512VL-NEXT: vmovapd %ymm16, %ymm0
+; AVX512VL-NEXT: retq
%c = fadd <4 x double> %a, %b
call void @llvm.x86.avx.vzeroall()
ret <4 x double> %c
declare void @llvm.x86.avx.vzeroall() nounwind
define <4 x double> @test_x86_avx_vzeroupper(<4 x double> %a, <4 x double> %b) {
-; CHECK-LABEL: test_x86_avx_vzeroupper:
-; CHECK: ## BB#0:
-; CHECK-NEXT: vaddpd %ymm1, %ymm0, %ymm0
-; CHECK-NEXT: vmovupd %ymm0, -{{[0-9]+}}(%rsp) ## 32-byte Spill
-; CHECK-NEXT: vzeroupper
-; CHECK-NEXT: vmovups -{{[0-9]+}}(%rsp), %ymm0 ## 32-byte Reload
-; CHECK-NEXT: retq
+; AVX-LABEL: test_x86_avx_vzeroupper:
+; AVX: ## BB#0:
+; AVX-NEXT: vaddpd %ymm1, %ymm0, %ymm0
+; AVX-NEXT: vmovupd %ymm0, -{{[0-9]+}}(%rsp) ## 32-byte Spill
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: vmovups -{{[0-9]+}}(%rsp), %ymm0 ## 32-byte Reload
+; AVX-NEXT: retq
+;
+; AVX512VL-LABEL: test_x86_avx_vzeroupper:
+; AVX512VL: ## BB#0:
+; AVX512VL-NEXT: vaddpd %ymm1, %ymm0, %ymm16
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: vmovapd %ymm16, %ymm0
+; AVX512VL-NEXT: retq
%c = fadd <4 x double> %a, %b
call void @llvm.x86.avx.vzeroupper()
ret <4 x double> %c