defm VMOVNTPD : avx512_movnt_vl<0x2B, "vmovntpd", avx512vl_f64_info>, PD, VEX_W;
defm VMOVNTPS : avx512_movnt_vl<0x2B, "vmovntps", avx512vl_f32_info>, PS;
+let Predicates = [HasVLX], AddedComplexity = 400 in {
+ def : Pat<(alignednontemporalstore (v8i32 VR256X:$src), addr:$dst),
+ (VMOVNTDQZ256mr addr:$dst, VR256X:$src)>;
+ def : Pat<(alignednontemporalstore (v16i16 VR256X:$src), addr:$dst),
+ (VMOVNTDQZ256mr addr:$dst, VR256X:$src)>;
+ def : Pat<(alignednontemporalstore (v32i8 VR256X:$src), addr:$dst),
+ (VMOVNTDQZ256mr addr:$dst, VR256X:$src)>;
+
+ def : Pat<(alignednontemporalstore (v4i32 VR128X:$src), addr:$dst),
+ (VMOVNTDQZ128mr addr:$dst, VR128X:$src)>;
+ def : Pat<(alignednontemporalstore (v8i16 VR128X:$src), addr:$dst),
+ (VMOVNTDQZ128mr addr:$dst, VR128X:$src)>;
+ def : Pat<(alignednontemporalstore (v16i8 VR128X:$src), addr:$dst),
+ (VMOVNTDQZ128mr addr:$dst, VR128X:$src)>;
+}
+
//===----------------------------------------------------------------------===//
// AVX-512 - Integer arithmetic
//
; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s -check-prefix=CHECK -check-prefix=SSE
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s -check-prefix=CHECK -check-prefix=AVX
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s -check-prefix=CHECK -check-prefix=AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s -check-prefix=CHECK -check-prefix=VLX
; Make sure that we generate non-temporal stores for the test cases below.
; We use xorps for zeroing, so domain information isn't available anymore.
; SSE: movntps
; AVX: vmovntps
; AVX2: vmovntps
+; VLX: vmovntdq
store <4 x float> zeroinitializer, <4 x float>* %dst, align 16, !nontemporal !1
ret void
}
; SSE: movntps
; AVX: vmovntps
; AVX2: vmovntps
+; VLX: vmovntdq
store <4 x i32> zeroinitializer, <4 x i32>* %dst, align 16, !nontemporal !1
store <4 x i32> zeroinitializer, <4 x i32>* %dst, align 16, !nontemporal !1
ret void
; SSE: movntps
; AVX: vmovntps
; AVX2: vmovntps
+; VLX: vmovntdq
store <2 x double> zeroinitializer, <2 x double>* %dst, align 16, !nontemporal !1
ret void
}
; SSE: movntps
; AVX: vmovntps
; AVX2: vmovntps
+; VLX: vmovntdq
store <2 x i64> zeroinitializer, <2 x i64>* %dst, align 16, !nontemporal !1
ret void
}
; SSE: movntps
; AVX: vmovntps
; AVX2: vmovntps
+; VLX: vmovntdq
store <8 x i16> zeroinitializer, <8 x i16>* %dst, align 16, !nontemporal !1
ret void
}
; SSE: movntps
; AVX: vmovntps
; AVX2: vmovntps
+; VLX: vmovntdq
store <16 x i8> zeroinitializer, <16 x i8>* %dst, align 16, !nontemporal !1
ret void
}
; CHECK-LABEL: test_zero_v8f32:
; AVX: vmovntps %ymm
; AVX2: vmovntps %ymm
+; VLX: vmovntdq %ymm
store <8 x float> zeroinitializer, <8 x float>* %dst, align 32, !nontemporal !1
ret void
}
; CHECK-LABEL: test_zero_v8i32:
; AVX: vmovntps %ymm
; AVX2: vmovntps %ymm
+; VLX: vmovntdq %ymm
store <8 x i32> zeroinitializer, <8 x i32>* %dst, align 32, !nontemporal !1
ret void
}
; CHECK-LABEL: test_zero_v4f64:
; AVX: vmovntps %ymm
; AVX2: vmovntps %ymm
+; VLX: vmovntdq %ymm
store <4 x double> zeroinitializer, <4 x double>* %dst, align 32, !nontemporal !1
ret void
}
; CHECK-LABEL: test_zero_v4i64:
; AVX: vmovntps %ymm
; AVX2: vmovntps %ymm
+; VLX: vmovntdq %ymm
store <4 x i64> zeroinitializer, <4 x i64>* %dst, align 32, !nontemporal !1
ret void
}
; CHECK-LABEL: test_zero_v16i16:
; AVX: vmovntps %ymm
; AVX2: vmovntps %ymm
+; VLX: vmovntdq %ymm
store <16 x i16> zeroinitializer, <16 x i16>* %dst, align 32, !nontemporal !1
ret void
}
; CHECK-LABEL: test_zero_v32i8:
; AVX: vmovntps %ymm
; AVX2: vmovntps %ymm
+; VLX: vmovntdq %ymm
store <32 x i8> zeroinitializer, <32 x i8>* %dst, align 32, !nontemporal !1
ret void
}
; SSE: movntps
; AVX: vmovntps
; AVX2: vmovntps
+; VLX: vmovntps
store <4 x float> %arg, <4 x float>* %dst, align 16, !nontemporal !1
ret void
}
; SSE: movntps
; AVX: vmovntps
; AVX2: vmovntps
+; VLX: vmovntdq
store <4 x i32> %arg, <4 x i32>* %dst, align 16, !nontemporal !1
ret void
}
; SSE: movntps
; AVX: vmovntps
; AVX2: vmovntps
+; VLX: vmovntpd
store <2 x double> %arg, <2 x double>* %dst, align 16, !nontemporal !1
ret void
}
; SSE: movntps
; AVX: vmovntps
; AVX2: vmovntps
+; VLX: vmovntdq
store <2 x i64> %arg, <2 x i64>* %dst, align 16, !nontemporal !1
ret void
}
; SSE: movntps
; AVX: vmovntps
; AVX2: vmovntps
+; VLX: vmovntdq
store <8 x i16> %arg, <8 x i16>* %dst, align 16, !nontemporal !1
ret void
}
; SSE: movntps
; AVX: vmovntps
; AVX2: vmovntps
+; VLX: vmovntdq
store <16 x i8> %arg, <16 x i8>* %dst, align 16, !nontemporal !1
ret void
}
; CHECK-LABEL: test_arg_v8f32:
; AVX: vmovntps %ymm
; AVX2: vmovntps %ymm
+; VLX: vmovntps %ymm
store <8 x float> %arg, <8 x float>* %dst, align 32, !nontemporal !1
ret void
}
; CHECK-LABEL: test_arg_v8i32:
; AVX: vmovntps %ymm
; AVX2: vmovntps %ymm
+; VLX: vmovntdq %ymm
store <8 x i32> %arg, <8 x i32>* %dst, align 32, !nontemporal !1
ret void
}
; CHECK-LABEL: test_arg_v4f64:
; AVX: vmovntps %ymm
; AVX2: vmovntps %ymm
+; VLX: vmovntpd %ymm
store <4 x double> %arg, <4 x double>* %dst, align 32, !nontemporal !1
ret void
}
; CHECK-LABEL: test_arg_v4i64:
; AVX: vmovntps %ymm
; AVX2: vmovntps %ymm
+; VLX: vmovntdq %ymm
store <4 x i64> %arg, <4 x i64>* %dst, align 32, !nontemporal !1
ret void
}
; CHECK-LABEL: test_arg_v16i16:
; AVX: vmovntps %ymm
; AVX2: vmovntps %ymm
+; VLX: vmovntdq %ymm
store <16 x i16> %arg, <16 x i16>* %dst, align 32, !nontemporal !1
ret void
}
; CHECK-LABEL: test_arg_v32i8:
; AVX: vmovntps %ymm
; AVX2: vmovntps %ymm
+; VLX: vmovntdq %ymm
store <32 x i8> %arg, <32 x i8>* %dst, align 32, !nontemporal !1
ret void
}
; SSE: movntps
; AVX: vmovntps
; AVX2: vmovntps
+; VLX: vmovntps
%r = fadd <4 x float> %a, %b
store <4 x float> %r, <4 x float>* %dst, align 16, !nontemporal !1
ret void
; SSE: movntdq
; AVX: vmovntdq
; AVX2: vmovntdq
+; VLX: vmovntdq
%r = add <4 x i32> %a, %b
store <4 x i32> %r, <4 x i32>* %dst, align 16, !nontemporal !1
ret void
; SSE: movntpd
; AVX: vmovntpd
; AVX2: vmovntpd
+; VLX: vmovntpd
%r = fadd <2 x double> %a, %b
store <2 x double> %r, <2 x double>* %dst, align 16, !nontemporal !1
ret void
; SSE: movntdq
; AVX: vmovntdq
; AVX2: vmovntdq
+; VLX: vmovntdq
%r = add <2 x i64> %a, %b
store <2 x i64> %r, <2 x i64>* %dst, align 16, !nontemporal !1
ret void
; SSE: movntdq
; AVX: vmovntdq
; AVX2: vmovntdq
+; VLX: vmovntdq
%r = add <8 x i16> %a, %b
store <8 x i16> %r, <8 x i16>* %dst, align 16, !nontemporal !1
ret void
; SSE: movntdq
; AVX: vmovntdq
; AVX2: vmovntdq
+; VLX: vmovntdq
%r = add <16 x i8> %a, %b
store <16 x i8> %r, <16 x i8>* %dst, align 16, !nontemporal !1
ret void
; CHECK-LABEL: test_op_v8f32:
; AVX: vmovntps %ymm
; AVX2: vmovntps %ymm
+; VLX: vmovntps %ymm
%r = fadd <8 x float> %a, %b
store <8 x float> %r, <8 x float>* %dst, align 32, !nontemporal !1
ret void
; CHECK-LABEL: test_op_v8i32:
; AVX: vmovntps %ymm
; AVX2: vmovntdq %ymm
+; VLX: vmovntdq %ymm
%r = add <8 x i32> %a, %b
store <8 x i32> %r, <8 x i32>* %dst, align 32, !nontemporal !1
ret void
; CHECK-LABEL: test_op_v4f64:
; AVX: vmovntpd %ymm
; AVX2: vmovntpd %ymm
+; VLX: vmovntpd %ymm
%r = fadd <4 x double> %a, %b
store <4 x double> %r, <4 x double>* %dst, align 32, !nontemporal !1
ret void
; CHECK-LABEL: test_op_v4i64:
; AVX: vmovntps %ymm
; AVX2: vmovntdq %ymm
+; VLX: vmovntdq %ymm
%r = add <4 x i64> %a, %b
store <4 x i64> %r, <4 x i64>* %dst, align 32, !nontemporal !1
ret void
; CHECK-LABEL: test_op_v16i16:
; AVX: vmovntps %ymm
; AVX2: vmovntdq %ymm
+; VLX: vmovntdq %ymm
%r = add <16 x i16> %a, %b
store <16 x i16> %r, <16 x i16>* %dst, align 32, !nontemporal !1
ret void
; CHECK-LABEL: test_op_v32i8:
; AVX: vmovntps %ymm
; AVX2: vmovntdq %ymm
+; VLX: vmovntdq %ymm
%r = add <32 x i8> %a, %b
store <32 x i8> %r, <32 x i8>* %dst, align 32, !nontemporal !1
ret void
; AVX: vmovups %ymm
; AVX2-NOT: movnt
; AVX2: vmovups %ymm
+; VLX-NOT: movnt
+; VLX: vmovups %ymm
%r = fadd <8 x float> %a, %b
store <8 x float> %r, <8 x float>* %dst, align 16, !nontemporal !1
ret void