avx512_fp_binop_p_round<0x5C, "vsub", X86fsubRnd>;
defm VDIV : avx512_fp_binop_p<0x5E, "vdiv", fdiv, HasAVX512>,
avx512_fp_binop_p_round<0x5E, "vdiv", X86fdivRnd>;
-defm VMIN : avx512_fp_binop_p<0x5D, "vmin", X86fmin, HasAVX512, 1>,
+defm VMIN : avx512_fp_binop_p<0x5D, "vmin", X86fmin, HasAVX512, 0>,
avx512_fp_binop_p_sae<0x5D, "vmin", X86fminRnd>;
-defm VMAX : avx512_fp_binop_p<0x5F, "vmax", X86fmax, HasAVX512, 1>,
+defm VMAX : avx512_fp_binop_p<0x5F, "vmax", X86fmax, HasAVX512, 0>,
avx512_fp_binop_p_sae<0x5F, "vmax", X86fmaxRnd>;
+let isCodeGenOnly = 1 in {
+ defm VMINC : avx512_fp_binop_p<0x5D, "vmin", X86fminc, HasAVX512, 1>;
+ defm VMAXC : avx512_fp_binop_p<0x5F, "vmax", X86fmaxc, HasAVX512, 1>;
+}
defm VAND : avx512_fp_binop_p<0x54, "vand", X86fand, HasDQI, 1>;
defm VANDN : avx512_fp_binop_p<0x55, "vandn", X86fandn, HasDQI, 0>;
defm VOR : avx512_fp_binop_p<0x56, "vor", X86for, HasDQI, 1>;
--- /dev/null
+; NOTE: Assertions have been autogenerated by update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64 -enable-unsafe-fp-math -mattr=+avx512f | FileCheck %s --check-prefix=CHECK_UNSAFE --check-prefix=AVX512F_UNSAFE
+; RUN: llc < %s -mtriple=x86_64 -mattr=+avx512f | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512
+
+define <16 x float> @test_max_v16f32(<16 x float> * %a_ptr, <16 x float> %b) {
+; CHECK_UNSAFE-LABEL: test_max_v16f32:
+; CHECK_UNSAFE: # BB#0:
+; CHECK_UNSAFE-NEXT: vmaxps (%rdi), %zmm0, %zmm0
+; CHECK_UNSAFE-NEXT: retq
+;
+; CHECK-LABEL: test_max_v16f32:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmovaps (%rdi), %zmm1
+; CHECK-NEXT: vmaxps %zmm0, %zmm1, %zmm0
+; CHECK-NEXT: retq
+ %a = load <16 x float>, <16 x float>* %a_ptr
+ %tmp = fcmp fast ogt <16 x float> %a, %b
+ %tmp4 = select <16 x i1> %tmp, <16 x float> %a, <16 x float> %b
+ ret <16 x float> %tmp4;
+}
+
+define <16 x float> @test_min_v16f32(<16 x float>* %a_ptr, <16 x float> %b) {
+; CHECK_UNSAFE-LABEL: test_min_v16f32:
+; CHECK_UNSAFE: # BB#0:
+; CHECK_UNSAFE-NEXT: vminps (%rdi), %zmm0, %zmm0
+; CHECK_UNSAFE-NEXT: retq
+;
+; CHECK-LABEL: test_min_v16f32:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmovaps (%rdi), %zmm1
+; CHECK-NEXT: vminps %zmm0, %zmm1, %zmm0
+; CHECK-NEXT: retq
+ %a = load <16 x float>, <16 x float>* %a_ptr
+ %tmp = fcmp fast olt <16 x float> %a, %b
+ %tmp4 = select <16 x i1> %tmp, <16 x float> %a, <16 x float> %b
+ ret <16 x float> %tmp4;
+}
+
+define <8 x double> @test_max_v8f64(<8 x double> * %a_ptr, <8 x double> %b) {
+; CHECK_UNSAFE-LABEL: test_max_v8f64:
+; CHECK_UNSAFE: # BB#0:
+; CHECK_UNSAFE-NEXT: vmaxpd (%rdi), %zmm0, %zmm0
+; CHECK_UNSAFE-NEXT: retq
+;
+; CHECK-LABEL: test_max_v8f64:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmovapd (%rdi), %zmm1
+; CHECK-NEXT: vmaxpd %zmm0, %zmm1, %zmm0
+; CHECK-NEXT: retq
+ %a = load <8 x double>, <8 x double>* %a_ptr
+ %tmp = fcmp fast ogt <8 x double> %a, %b
+ %tmp4 = select <8 x i1> %tmp, <8 x double> %a, <8 x double> %b
+ ret <8 x double> %tmp4;
+}
+
+define <8 x double> @test_min_v8f64(<8 x double>* %a_ptr, <8 x double> %b) {
+; CHECK_UNSAFE-LABEL: test_min_v8f64:
+; CHECK_UNSAFE: # BB#0:
+; CHECK_UNSAFE-NEXT: vminpd (%rdi), %zmm0, %zmm0
+; CHECK_UNSAFE-NEXT: retq
+;
+; CHECK-LABEL: test_min_v8f64:
+; CHECK: # BB#0:
+; CHECK-NEXT: vmovapd (%rdi), %zmm1
+; CHECK-NEXT: vminpd %zmm0, %zmm1, %zmm0
+; CHECK-NEXT: retq
+ %a = load <8 x double>, <8 x double>* %a_ptr
+ %tmp = fcmp fast olt <8 x double> %a, %b
+ %tmp4 = select <8 x i1> %tmp, <8 x double> %a, <8 x double> %b
+ ret <8 x double> %tmp4;
+}