From 2da055d719bd84ec5de2db9b20488da3e2c66d78 Mon Sep 17 00:00:00 2001 From: Cameron McInally Date: Wed, 12 Jun 2019 19:39:42 +0000 Subject: [PATCH] [NFC][CodeGen] Add unary FNeg tests to X86/avx512vl-intrinsics-fast-isel.ll Patch 2 of 3 for X86/avx512vl-intrinsics-fast-isel.ll git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@363194 91177308-0d34-0410-b5e6-96231b3b80d8 --- .../X86/avx512vl-intrinsics-fast-isel.ll | 620 +++++++++++++++++- 1 file changed, 619 insertions(+), 1 deletion(-) diff --git a/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll b/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll index af0529fe41b..24479988b58 100644 --- a/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll +++ b/test/CodeGen/X86/avx512vl-intrinsics-fast-isel.ll @@ -5219,6 +5219,29 @@ entry: ret <8 x float> %2 } +define <8 x float> @test_mm256_mask3_fnmadd_ps_unary_fneg(<8 x float> %__A, <8 x float> %__B, <8 x float> %__C, i8 zeroext %__U) { +; X86-LABEL: test_mm256_mask3_fnmadd_ps_unary_fneg: +; X86: # %bb.0: # %entry +; X86-NEXT: movb {{[0-9]+}}(%esp), %al +; X86-NEXT: kmovw %eax, %k1 +; X86-NEXT: vfnmadd231ps {{.*#+}} ymm2 = -(ymm0 * ymm1) + ymm2 +; X86-NEXT: vmovaps %ymm2, %ymm0 +; X86-NEXT: retl +; +; X64-LABEL: test_mm256_mask3_fnmadd_ps_unary_fneg: +; X64: # %bb.0: # %entry +; X64-NEXT: kmovw %edi, %k1 +; X64-NEXT: vfnmadd231ps {{.*#+}} ymm2 = -(ymm0 * ymm1) + ymm2 +; X64-NEXT: vmovaps %ymm2, %ymm0 +; X64-NEXT: retq +entry: + %neg.i = fneg <8 x float> %__A + %0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %neg.i, <8 x float> %__B, <8 x float> %__C) #9 + %1 = bitcast i8 %__U to <8 x i1> + %2 = select <8 x i1> %1, <8 x float> %0, <8 x float> %__C + ret <8 x float> %2 +} + define <8 x float> @test_mm256_maskz_fmadd_ps(i8 zeroext %__U, <8 x float> %__A, <8 x float> %__B, <8 x float> %__C) { ; X86-LABEL: test_mm256_maskz_fmadd_ps: ; X86: # %bb.0: # %entry @@ -5260,6 +5283,27 @@ entry: ret <8 x float> %2 } +define <8 x float> @test_mm256_maskz_fmsub_ps_unary_fneg(i8 zeroext %__U, <8 x float> %__A, <8 x float> %__B, <8 x float> %__C) { +; X86-LABEL: test_mm256_maskz_fmsub_ps_unary_fneg: +; X86: # %bb.0: # %entry +; X86-NEXT: movb {{[0-9]+}}(%esp), %al +; X86-NEXT: kmovw %eax, %k1 +; X86-NEXT: vfmsub213ps {{.*#+}} ymm0 = (ymm1 * ymm0) - ymm2 +; X86-NEXT: retl +; +; X64-LABEL: test_mm256_maskz_fmsub_ps_unary_fneg: +; X64: # %bb.0: # %entry +; X64-NEXT: kmovw %edi, %k1 +; X64-NEXT: vfmsub213ps {{.*#+}} ymm0 = (ymm1 * ymm0) - ymm2 +; X64-NEXT: retq +entry: + %neg.i = fneg <8 x float> %__C + %0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %__A, <8 x float> %__B, <8 x float> %neg.i) #9 + %1 = bitcast i8 %__U to <8 x i1> + %2 = select <8 x i1> %1, <8 x float> %0, <8 x float> zeroinitializer + ret <8 x float> %2 +} + define <8 x float> @test_mm256_maskz_fnmadd_ps(i8 zeroext %__U, <8 x float> %__A, <8 x float> %__B, <8 x float> %__C) { ; X86-LABEL: test_mm256_maskz_fnmadd_ps: ; X86: # %bb.0: # %entry @@ -5281,6 +5325,27 @@ entry: ret <8 x float> %2 } +define <8 x float> @test_mm256_maskz_fnmadd_ps_unary_fneg(i8 zeroext %__U, <8 x float> %__A, <8 x float> %__B, <8 x float> %__C) { +; X86-LABEL: test_mm256_maskz_fnmadd_ps_unary_fneg: +; X86: # %bb.0: # %entry +; X86-NEXT: movb {{[0-9]+}}(%esp), %al +; X86-NEXT: kmovw %eax, %k1 +; X86-NEXT: vfnmadd213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) + ymm2 +; X86-NEXT: retl +; +; X64-LABEL: test_mm256_maskz_fnmadd_ps_unary_fneg: +; X64: # %bb.0: # %entry +; X64-NEXT: kmovw %edi, %k1 +; X64-NEXT: vfnmadd213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) + ymm2 +; X64-NEXT: retq +entry: + %neg.i = fneg <8 x float> %__A + %0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %neg.i, <8 x float> %__B, <8 x float> %__C) #9 + %1 = bitcast i8 %__U to <8 x i1> + %2 = select <8 x i1> %1, <8 x float> %0, <8 x float> zeroinitializer + ret <8 x float> %2 +} + define <8 x float> @test_mm256_maskz_fnmsub_ps(i8 zeroext %__U, <8 x float> %__A, <8 x float> %__B, <8 x float> %__C) { ; X86-LABEL: test_mm256_maskz_fnmsub_ps: ; X86: # %bb.0: # %entry @@ -5303,6 +5368,28 @@ entry: ret <8 x float> %2 } +define <8 x float> @test_mm256_maskz_fnmsub_ps_unary_fneg(i8 zeroext %__U, <8 x float> %__A, <8 x float> %__B, <8 x float> %__C) { +; X86-LABEL: test_mm256_maskz_fnmsub_ps_unary_fneg: +; X86: # %bb.0: # %entry +; X86-NEXT: movb {{[0-9]+}}(%esp), %al +; X86-NEXT: kmovw %eax, %k1 +; X86-NEXT: vfnmsub213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) - ymm2 +; X86-NEXT: retl +; +; X64-LABEL: test_mm256_maskz_fnmsub_ps_unary_fneg: +; X64: # %bb.0: # %entry +; X64-NEXT: kmovw %edi, %k1 +; X64-NEXT: vfnmsub213ps {{.*#+}} ymm0 = -(ymm1 * ymm0) - ymm2 +; X64-NEXT: retq +entry: + %neg.i = fneg <8 x float> %__A + %neg1.i = fneg <8 x float> %__C + %0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %neg.i, <8 x float> %__B, <8 x float> %neg1.i) #9 + %1 = bitcast i8 %__U to <8 x i1> + %2 = select <8 x i1> %1, <8 x float> %0, <8 x float> zeroinitializer + ret <8 x float> %2 +} + define <2 x double> @test_mm_mask_fmaddsub_pd(<2 x double> %__A, i8 zeroext %__U, <2 x double> %__B, <2 x double> %__C) { ; X86-LABEL: test_mm_mask_fmaddsub_pd: ; X86: # %bb.0: # %entry @@ -5327,6 +5414,30 @@ entry: ret <2 x double> %5 } +define <2 x double> @test_mm_mask_fmaddsub_pd_unary_fneg(<2 x double> %__A, i8 zeroext %__U, <2 x double> %__B, <2 x double> %__C) { +; X86-LABEL: test_mm_mask_fmaddsub_pd_unary_fneg: +; X86: # %bb.0: # %entry +; X86-NEXT: movb {{[0-9]+}}(%esp), %al +; X86-NEXT: kmovw %eax, %k1 +; X86-NEXT: vfmaddsub132pd {{.*#+}} xmm0 = (xmm0 * xmm1) +/- xmm2 +; X86-NEXT: retl +; +; X64-LABEL: test_mm_mask_fmaddsub_pd_unary_fneg: +; X64: # %bb.0: # %entry +; X64-NEXT: kmovw %edi, %k1 +; X64-NEXT: vfmaddsub132pd {{.*#+}} xmm0 = (xmm0 * xmm1) +/- xmm2 +; X64-NEXT: retq +entry: + %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %__B, <2 x double> %__C) #9 + %1 = fneg <2 x double> %__C + %2 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %__B, <2 x double> %1) #9 + %3 = shufflevector <2 x double> %2, <2 x double> %0, <2 x i32> + %4 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %4, <8 x i1> undef, <2 x i32> + %5 = select <2 x i1> %extract.i, <2 x double> %3, <2 x double> %__A + ret <2 x double> %5 +} + define <2 x double> @test_mm_mask_fmsubadd_pd(<2 x double> %__A, i8 zeroext %__U, <2 x double> %__B, <2 x double> %__C) { ; X86-LABEL: test_mm_mask_fmsubadd_pd: ; X86: # %bb.0: # %entry @@ -5351,6 +5462,30 @@ entry: ret <2 x double> %4 } +define <2 x double> @test_mm_mask_fmsubadd_pd_unary_fneg(<2 x double> %__A, i8 zeroext %__U, <2 x double> %__B, <2 x double> %__C) { +; X86-LABEL: test_mm_mask_fmsubadd_pd_unary_fneg: +; X86: # %bb.0: # %entry +; X86-NEXT: movb {{[0-9]+}}(%esp), %al +; X86-NEXT: kmovw %eax, %k1 +; X86-NEXT: vfmsubadd132pd {{.*#+}} xmm0 = (xmm0 * xmm1) -/+ xmm2 +; X86-NEXT: retl +; +; X64-LABEL: test_mm_mask_fmsubadd_pd_unary_fneg: +; X64: # %bb.0: # %entry +; X64-NEXT: kmovw %edi, %k1 +; X64-NEXT: vfmsubadd132pd {{.*#+}} xmm0 = (xmm0 * xmm1) -/+ xmm2 +; X64-NEXT: retq +entry: + %neg.i = fneg <2 x double> %__C + %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %__B, <2 x double> %neg.i) #9 + %1 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %__B, <2 x double> %__C) #9 + %2 = shufflevector <2 x double> %1, <2 x double> %0, <2 x i32> + %3 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> + %4 = select <2 x i1> %extract.i, <2 x double> %2, <2 x double> %__A + ret <2 x double> %4 +} + define <2 x double> @test_mm_mask3_fmaddsub_pd(<2 x double> %__A, <2 x double> %__B, <2 x double> %__C, i8 zeroext %__U) { ; X86-LABEL: test_mm_mask3_fmaddsub_pd: ; X86: # %bb.0: # %entry @@ -5377,6 +5512,32 @@ entry: ret <2 x double> %5 } +define <2 x double> @test_mm_mask3_fmaddsub_pd_unary_fneg(<2 x double> %__A, <2 x double> %__B, <2 x double> %__C, i8 zeroext %__U) { +; X86-LABEL: test_mm_mask3_fmaddsub_pd_unary_fneg: +; X86: # %bb.0: # %entry +; X86-NEXT: movb {{[0-9]+}}(%esp), %al +; X86-NEXT: kmovw %eax, %k1 +; X86-NEXT: vfmaddsub231pd {{.*#+}} xmm2 = (xmm0 * xmm1) +/- xmm2 +; X86-NEXT: vmovapd %xmm2, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: test_mm_mask3_fmaddsub_pd_unary_fneg: +; X64: # %bb.0: # %entry +; X64-NEXT: kmovw %edi, %k1 +; X64-NEXT: vfmaddsub231pd {{.*#+}} xmm2 = (xmm0 * xmm1) +/- xmm2 +; X64-NEXT: vmovapd %xmm2, %xmm0 +; X64-NEXT: retq +entry: + %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %__B, <2 x double> %__C) #9 + %1 = fneg <2 x double> %__C + %2 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %__B, <2 x double> %1) #9 + %3 = shufflevector <2 x double> %2, <2 x double> %0, <2 x i32> + %4 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %4, <8 x i1> undef, <2 x i32> + %5 = select <2 x i1> %extract.i, <2 x double> %3, <2 x double> %__C + ret <2 x double> %5 +} + define <2 x double> @test_mm_maskz_fmaddsub_pd(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B, <2 x double> %__C) { ; X86-LABEL: test_mm_maskz_fmaddsub_pd: ; X86: # %bb.0: # %entry @@ -5401,6 +5562,30 @@ entry: ret <2 x double> %5 } +define <2 x double> @test_mm_maskz_fmaddsub_pd_unary_fneg(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B, <2 x double> %__C) { +; X86-LABEL: test_mm_maskz_fmaddsub_pd_unary_fneg: +; X86: # %bb.0: # %entry +; X86-NEXT: movb {{[0-9]+}}(%esp), %al +; X86-NEXT: kmovw %eax, %k1 +; X86-NEXT: vfmaddsub213pd {{.*#+}} xmm0 = (xmm1 * xmm0) +/- xmm2 +; X86-NEXT: retl +; +; X64-LABEL: test_mm_maskz_fmaddsub_pd_unary_fneg: +; X64: # %bb.0: # %entry +; X64-NEXT: kmovw %edi, %k1 +; X64-NEXT: vfmaddsub213pd {{.*#+}} xmm0 = (xmm1 * xmm0) +/- xmm2 +; X64-NEXT: retq +entry: + %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %__B, <2 x double> %__C) #9 + %1 = fneg <2 x double> %__C + %2 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %__B, <2 x double> %1) #9 + %3 = shufflevector <2 x double> %2, <2 x double> %0, <2 x i32> + %4 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %4, <8 x i1> undef, <2 x i32> + %5 = select <2 x i1> %extract.i, <2 x double> %3, <2 x double> zeroinitializer + ret <2 x double> %5 +} + define <2 x double> @test_mm_maskz_fmsubadd_pd(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B, <2 x double> %__C) { ; X86-LABEL: test_mm_maskz_fmsubadd_pd: ; X86: # %bb.0: # %entry @@ -5425,6 +5610,30 @@ entry: ret <2 x double> %4 } +define <2 x double> @test_mm_maskz_fmsubadd_pd_unary_fneg(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B, <2 x double> %__C) { +; X86-LABEL: test_mm_maskz_fmsubadd_pd_unary_fneg: +; X86: # %bb.0: # %entry +; X86-NEXT: movb {{[0-9]+}}(%esp), %al +; X86-NEXT: kmovw %eax, %k1 +; X86-NEXT: vfmsubadd213pd {{.*#+}} xmm0 = (xmm1 * xmm0) -/+ xmm2 +; X86-NEXT: retl +; +; X64-LABEL: test_mm_maskz_fmsubadd_pd_unary_fneg: +; X64: # %bb.0: # %entry +; X64-NEXT: kmovw %edi, %k1 +; X64-NEXT: vfmsubadd213pd {{.*#+}} xmm0 = (xmm1 * xmm0) -/+ xmm2 +; X64-NEXT: retq +entry: + %neg.i = fneg <2 x double> %__C + %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %__B, <2 x double> %neg.i) #9 + %1 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %__B, <2 x double> %__C) #9 + %2 = shufflevector <2 x double> %1, <2 x double> %0, <2 x i32> + %3 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <2 x i32> + %4 = select <2 x i1> %extract.i, <2 x double> %2, <2 x double> zeroinitializer + ret <2 x double> %4 +} + define <4 x double> @test_mm256_mask_fmaddsub_pd(<4 x double> %__A, i8 zeroext %__U, <4 x double> %__B, <4 x double> %__C) { ; X86-LABEL: test_mm256_mask_fmaddsub_pd: ; X86: # %bb.0: # %entry @@ -5449,6 +5658,30 @@ entry: ret <4 x double> %5 } +define <4 x double> @test_mm256_mask_fmaddsub_pd_unary_fneg(<4 x double> %__A, i8 zeroext %__U, <4 x double> %__B, <4 x double> %__C) { +; X86-LABEL: test_mm256_mask_fmaddsub_pd_unary_fneg: +; X86: # %bb.0: # %entry +; X86-NEXT: movb {{[0-9]+}}(%esp), %al +; X86-NEXT: kmovw %eax, %k1 +; X86-NEXT: vfmaddsub132pd {{.*#+}} ymm0 = (ymm0 * ymm1) +/- ymm2 +; X86-NEXT: retl +; +; X64-LABEL: test_mm256_mask_fmaddsub_pd_unary_fneg: +; X64: # %bb.0: # %entry +; X64-NEXT: kmovw %edi, %k1 +; X64-NEXT: vfmaddsub132pd {{.*#+}} ymm0 = (ymm0 * ymm1) +/- ymm2 +; X64-NEXT: retq +entry: + %0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %__A, <4 x double> %__B, <4 x double> %__C) #9 + %1 = fneg <4 x double> %__C + %2 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %__A, <4 x double> %__B, <4 x double> %1) #9 + %3 = shufflevector <4 x double> %2, <4 x double> %0, <4 x i32> + %4 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %4, <8 x i1> undef, <4 x i32> + %5 = select <4 x i1> %extract.i, <4 x double> %3, <4 x double> %__A + ret <4 x double> %5 +} + define <4 x double> @test_mm256_mask_fmsubadd_pd(<4 x double> %__A, i8 zeroext %__U, <4 x double> %__B, <4 x double> %__C) { ; X86-LABEL: test_mm256_mask_fmsubadd_pd: ; X86: # %bb.0: # %entry @@ -5473,6 +5706,30 @@ entry: ret <4 x double> %4 } +define <4 x double> @test_mm256_mask_fmsubadd_pd_unary_fneg(<4 x double> %__A, i8 zeroext %__U, <4 x double> %__B, <4 x double> %__C) { +; X86-LABEL: test_mm256_mask_fmsubadd_pd_unary_fneg: +; X86: # %bb.0: # %entry +; X86-NEXT: movb {{[0-9]+}}(%esp), %al +; X86-NEXT: kmovw %eax, %k1 +; X86-NEXT: vfmsubadd132pd {{.*#+}} ymm0 = (ymm0 * ymm1) -/+ ymm2 +; X86-NEXT: retl +; +; X64-LABEL: test_mm256_mask_fmsubadd_pd_unary_fneg: +; X64: # %bb.0: # %entry +; X64-NEXT: kmovw %edi, %k1 +; X64-NEXT: vfmsubadd132pd {{.*#+}} ymm0 = (ymm0 * ymm1) -/+ ymm2 +; X64-NEXT: retq +entry: + %neg.i = fneg <4 x double> %__C + %0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %__A, <4 x double> %__B, <4 x double> %neg.i) #9 + %1 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %__A, <4 x double> %__B, <4 x double> %__C) #9 + %2 = shufflevector <4 x double> %1, <4 x double> %0, <4 x i32> + %3 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> + %4 = select <4 x i1> %extract.i, <4 x double> %2, <4 x double> %__A + ret <4 x double> %4 +} + define <4 x double> @test_mm256_mask3_fmaddsub_pd(<4 x double> %__A, <4 x double> %__B, <4 x double> %__C, i8 zeroext %__U) { ; X86-LABEL: test_mm256_mask3_fmaddsub_pd: ; X86: # %bb.0: # %entry @@ -5499,6 +5756,32 @@ entry: ret <4 x double> %5 } +define <4 x double> @test_mm256_mask3_fmaddsub_pd_unary_fneg(<4 x double> %__A, <4 x double> %__B, <4 x double> %__C, i8 zeroext %__U) { +; X86-LABEL: test_mm256_mask3_fmaddsub_pd_unary_fneg: +; X86: # %bb.0: # %entry +; X86-NEXT: movb {{[0-9]+}}(%esp), %al +; X86-NEXT: kmovw %eax, %k1 +; X86-NEXT: vfmaddsub231pd {{.*#+}} ymm2 = (ymm0 * ymm1) +/- ymm2 +; X86-NEXT: vmovapd %ymm2, %ymm0 +; X86-NEXT: retl +; +; X64-LABEL: test_mm256_mask3_fmaddsub_pd_unary_fneg: +; X64: # %bb.0: # %entry +; X64-NEXT: kmovw %edi, %k1 +; X64-NEXT: vfmaddsub231pd {{.*#+}} ymm2 = (ymm0 * ymm1) +/- ymm2 +; X64-NEXT: vmovapd %ymm2, %ymm0 +; X64-NEXT: retq +entry: + %0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %__A, <4 x double> %__B, <4 x double> %__C) #9 + %1 = fneg <4 x double> %__C + %2 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %__A, <4 x double> %__B, <4 x double> %1) #9 + %3 = shufflevector <4 x double> %2, <4 x double> %0, <4 x i32> + %4 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %4, <8 x i1> undef, <4 x i32> + %5 = select <4 x i1> %extract.i, <4 x double> %3, <4 x double> %__C + ret <4 x double> %5 +} + define <4 x double> @test_mm256_maskz_fmaddsub_pd(i8 zeroext %__U, <4 x double> %__A, <4 x double> %__B, <4 x double> %__C) { ; X86-LABEL: test_mm256_maskz_fmaddsub_pd: ; X86: # %bb.0: # %entry @@ -5523,6 +5806,30 @@ entry: ret <4 x double> %5 } +define <4 x double> @test_mm256_maskz_fmaddsub_pd_unary_fneg(i8 zeroext %__U, <4 x double> %__A, <4 x double> %__B, <4 x double> %__C) { +; X86-LABEL: test_mm256_maskz_fmaddsub_pd_unary_fneg: +; X86: # %bb.0: # %entry +; X86-NEXT: movb {{[0-9]+}}(%esp), %al +; X86-NEXT: kmovw %eax, %k1 +; X86-NEXT: vfmaddsub213pd {{.*#+}} ymm0 = (ymm1 * ymm0) +/- ymm2 +; X86-NEXT: retl +; +; X64-LABEL: test_mm256_maskz_fmaddsub_pd_unary_fneg: +; X64: # %bb.0: # %entry +; X64-NEXT: kmovw %edi, %k1 +; X64-NEXT: vfmaddsub213pd {{.*#+}} ymm0 = (ymm1 * ymm0) +/- ymm2 +; X64-NEXT: retq +entry: + %0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %__A, <4 x double> %__B, <4 x double> %__C) #9 + %1 = fneg <4 x double> %__C + %2 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %__A, <4 x double> %__B, <4 x double> %1) #9 + %3 = shufflevector <4 x double> %2, <4 x double> %0, <4 x i32> + %4 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %4, <8 x i1> undef, <4 x i32> + %5 = select <4 x i1> %extract.i, <4 x double> %3, <4 x double> zeroinitializer + ret <4 x double> %5 +} + define <4 x double> @test_mm256_maskz_fmsubadd_pd(i8 zeroext %__U, <4 x double> %__A, <4 x double> %__B, <4 x double> %__C) { ; X86-LABEL: test_mm256_maskz_fmsubadd_pd: ; X86: # %bb.0: # %entry @@ -5547,7 +5854,31 @@ entry: ret <4 x double> %4 } -define <4 x float> @test_mm_mask_fmaddsub_ps(<4 x float> %__A, i8 zeroext %__U, <4 x float> %__B, <4 x float> %__C) { +define <4 x double> @test_mm256_maskz_fmsubadd_pd_unary_fneg(i8 zeroext %__U, <4 x double> %__A, <4 x double> %__B, <4 x double> %__C) { +; X86-LABEL: test_mm256_maskz_fmsubadd_pd_unary_fneg: +; X86: # %bb.0: # %entry +; X86-NEXT: movb {{[0-9]+}}(%esp), %al +; X86-NEXT: kmovw %eax, %k1 +; X86-NEXT: vfmsubadd213pd {{.*#+}} ymm0 = (ymm1 * ymm0) -/+ ymm2 +; X86-NEXT: retl +; +; X64-LABEL: test_mm256_maskz_fmsubadd_pd_unary_fneg: +; X64: # %bb.0: # %entry +; X64-NEXT: kmovw %edi, %k1 +; X64-NEXT: vfmsubadd213pd {{.*#+}} ymm0 = (ymm1 * ymm0) -/+ ymm2 +; X64-NEXT: retq +entry: + %neg.i = fneg <4 x double> %__C + %0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %__A, <4 x double> %__B, <4 x double> %neg.i) #9 + %1 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %__A, <4 x double> %__B, <4 x double> %__C) #9 + %2 = shufflevector <4 x double> %1, <4 x double> %0, <4 x i32> + %3 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> + %4 = select <4 x i1> %extract.i, <4 x double> %2, <4 x double> zeroinitializer + ret <4 x double> %4 +} + +define <4 x float> @test_mm_mask_fmaddsub_ps(<4 x float> %__A, i8 zeroext %__U, <4 x float> %__B, <4 x float> %__C) { ; X86-LABEL: test_mm_mask_fmaddsub_ps: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al @@ -5571,6 +5902,30 @@ entry: ret <4 x float> %5 } +define <4 x float> @test_mm_mask_fmaddsub_ps_unary_fneg(<4 x float> %__A, i8 zeroext %__U, <4 x float> %__B, <4 x float> %__C) { +; X86-LABEL: test_mm_mask_fmaddsub_ps_unary_fneg: +; X86: # %bb.0: # %entry +; X86-NEXT: movb {{[0-9]+}}(%esp), %al +; X86-NEXT: kmovw %eax, %k1 +; X86-NEXT: vfmaddsub132ps {{.*#+}} xmm0 = (xmm0 * xmm1) +/- xmm2 +; X86-NEXT: retl +; +; X64-LABEL: test_mm_mask_fmaddsub_ps_unary_fneg: +; X64: # %bb.0: # %entry +; X64-NEXT: kmovw %edi, %k1 +; X64-NEXT: vfmaddsub132ps {{.*#+}} xmm0 = (xmm0 * xmm1) +/- xmm2 +; X64-NEXT: retq +entry: + %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %__A, <4 x float> %__B, <4 x float> %__C) #9 + %1 = fneg <4 x float> %__C + %2 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %__A, <4 x float> %__B, <4 x float> %1) #9 + %3 = shufflevector <4 x float> %2, <4 x float> %0, <4 x i32> + %4 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %4, <8 x i1> undef, <4 x i32> + %5 = select <4 x i1> %extract.i, <4 x float> %3, <4 x float> %__A + ret <4 x float> %5 +} + define <4 x float> @test_mm_mask_fmsubadd_ps(<4 x float> %__A, i8 zeroext %__U, <4 x float> %__B, <4 x float> %__C) { ; X86-LABEL: test_mm_mask_fmsubadd_ps: ; X86: # %bb.0: # %entry @@ -5595,6 +5950,30 @@ entry: ret <4 x float> %4 } +define <4 x float> @test_mm_mask_fmsubadd_ps_unary_fneg(<4 x float> %__A, i8 zeroext %__U, <4 x float> %__B, <4 x float> %__C) { +; X86-LABEL: test_mm_mask_fmsubadd_ps_unary_fneg: +; X86: # %bb.0: # %entry +; X86-NEXT: movb {{[0-9]+}}(%esp), %al +; X86-NEXT: kmovw %eax, %k1 +; X86-NEXT: vfmsubadd132ps {{.*#+}} xmm0 = (xmm0 * xmm1) -/+ xmm2 +; X86-NEXT: retl +; +; X64-LABEL: test_mm_mask_fmsubadd_ps_unary_fneg: +; X64: # %bb.0: # %entry +; X64-NEXT: kmovw %edi, %k1 +; X64-NEXT: vfmsubadd132ps {{.*#+}} xmm0 = (xmm0 * xmm1) -/+ xmm2 +; X64-NEXT: retq +entry: + %neg.i = fneg <4 x float> %__C + %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %__A, <4 x float> %__B, <4 x float> %neg.i) #9 + %1 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %__A, <4 x float> %__B, <4 x float> %__C) #9 + %2 = shufflevector <4 x float> %1, <4 x float> %0, <4 x i32> + %3 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> + %4 = select <4 x i1> %extract.i, <4 x float> %2, <4 x float> %__A + ret <4 x float> %4 +} + define <4 x float> @test_mm_mask3_fmaddsub_ps(<4 x float> %__A, <4 x float> %__B, <4 x float> %__C, i8 zeroext %__U) { ; X86-LABEL: test_mm_mask3_fmaddsub_ps: ; X86: # %bb.0: # %entry @@ -5621,6 +6000,32 @@ entry: ret <4 x float> %5 } +define <4 x float> @test_mm_mask3_fmaddsub_ps_unary_fneg(<4 x float> %__A, <4 x float> %__B, <4 x float> %__C, i8 zeroext %__U) { +; X86-LABEL: test_mm_mask3_fmaddsub_ps_unary_fneg: +; X86: # %bb.0: # %entry +; X86-NEXT: movb {{[0-9]+}}(%esp), %al +; X86-NEXT: kmovw %eax, %k1 +; X86-NEXT: vfmaddsub231ps {{.*#+}} xmm2 = (xmm0 * xmm1) +/- xmm2 +; X86-NEXT: vmovaps %xmm2, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: test_mm_mask3_fmaddsub_ps_unary_fneg: +; X64: # %bb.0: # %entry +; X64-NEXT: kmovw %edi, %k1 +; X64-NEXT: vfmaddsub231ps {{.*#+}} xmm2 = (xmm0 * xmm1) +/- xmm2 +; X64-NEXT: vmovaps %xmm2, %xmm0 +; X64-NEXT: retq +entry: + %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %__A, <4 x float> %__B, <4 x float> %__C) #9 + %1 = fneg <4 x float> %__C + %2 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %__A, <4 x float> %__B, <4 x float> %1) #9 + %3 = shufflevector <4 x float> %2, <4 x float> %0, <4 x i32> + %4 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %4, <8 x i1> undef, <4 x i32> + %5 = select <4 x i1> %extract.i, <4 x float> %3, <4 x float> %__C + ret <4 x float> %5 +} + define <4 x float> @test_mm_maskz_fmaddsub_ps(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B, <4 x float> %__C) { ; X86-LABEL: test_mm_maskz_fmaddsub_ps: ; X86: # %bb.0: # %entry @@ -5645,6 +6050,30 @@ entry: ret <4 x float> %5 } +define <4 x float> @test_mm_maskz_fmaddsub_ps_unary_fneg(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B, <4 x float> %__C) { +; X86-LABEL: test_mm_maskz_fmaddsub_ps_unary_fneg: +; X86: # %bb.0: # %entry +; X86-NEXT: movb {{[0-9]+}}(%esp), %al +; X86-NEXT: kmovw %eax, %k1 +; X86-NEXT: vfmaddsub213ps {{.*#+}} xmm0 = (xmm1 * xmm0) +/- xmm2 +; X86-NEXT: retl +; +; X64-LABEL: test_mm_maskz_fmaddsub_ps_unary_fneg: +; X64: # %bb.0: # %entry +; X64-NEXT: kmovw %edi, %k1 +; X64-NEXT: vfmaddsub213ps {{.*#+}} xmm0 = (xmm1 * xmm0) +/- xmm2 +; X64-NEXT: retq +entry: + %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %__A, <4 x float> %__B, <4 x float> %__C) #9 + %1 = fneg <4 x float> %__C + %2 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %__A, <4 x float> %__B, <4 x float> %1) #9 + %3 = shufflevector <4 x float> %2, <4 x float> %0, <4 x i32> + %4 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %4, <8 x i1> undef, <4 x i32> + %5 = select <4 x i1> %extract.i, <4 x float> %3, <4 x float> zeroinitializer + ret <4 x float> %5 +} + define <4 x float> @test_mm_maskz_fmsubadd_ps(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B, <4 x float> %__C) { ; X86-LABEL: test_mm_maskz_fmsubadd_ps: ; X86: # %bb.0: # %entry @@ -5669,6 +6098,30 @@ entry: ret <4 x float> %4 } +define <4 x float> @test_mm_maskz_fmsubadd_ps_unary_fneg(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B, <4 x float> %__C) { +; X86-LABEL: test_mm_maskz_fmsubadd_ps_unary_fneg: +; X86: # %bb.0: # %entry +; X86-NEXT: movb {{[0-9]+}}(%esp), %al +; X86-NEXT: kmovw %eax, %k1 +; X86-NEXT: vfmsubadd213ps {{.*#+}} xmm0 = (xmm1 * xmm0) -/+ xmm2 +; X86-NEXT: retl +; +; X64-LABEL: test_mm_maskz_fmsubadd_ps_unary_fneg: +; X64: # %bb.0: # %entry +; X64-NEXT: kmovw %edi, %k1 +; X64-NEXT: vfmsubadd213ps {{.*#+}} xmm0 = (xmm1 * xmm0) -/+ xmm2 +; X64-NEXT: retq +entry: + %neg.i = fneg <4 x float> %__C + %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %__A, <4 x float> %__B, <4 x float> %neg.i) #9 + %1 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %__A, <4 x float> %__B, <4 x float> %__C) #9 + %2 = shufflevector <4 x float> %1, <4 x float> %0, <4 x i32> + %3 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %3, <8 x i1> undef, <4 x i32> + %4 = select <4 x i1> %extract.i, <4 x float> %2, <4 x float> zeroinitializer + ret <4 x float> %4 +} + define <8 x float> @test_mm256_mask_fmaddsub_ps(<8 x float> %__A, i8 zeroext %__U, <8 x float> %__B, <8 x float> %__C) { ; X86-LABEL: test_mm256_mask_fmaddsub_ps: ; X86: # %bb.0: # %entry @@ -5692,6 +6145,29 @@ entry: ret <8 x float> %5 } +define <8 x float> @test_mm256_mask_fmaddsub_ps_unary_fneg(<8 x float> %__A, i8 zeroext %__U, <8 x float> %__B, <8 x float> %__C) { +; X86-LABEL: test_mm256_mask_fmaddsub_ps_unary_fneg: +; X86: # %bb.0: # %entry +; X86-NEXT: movb {{[0-9]+}}(%esp), %al +; X86-NEXT: kmovw %eax, %k1 +; X86-NEXT: vfmaddsub132ps {{.*#+}} ymm0 = (ymm0 * ymm1) +/- ymm2 +; X86-NEXT: retl +; +; X64-LABEL: test_mm256_mask_fmaddsub_ps_unary_fneg: +; X64: # %bb.0: # %entry +; X64-NEXT: kmovw %edi, %k1 +; X64-NEXT: vfmaddsub132ps {{.*#+}} ymm0 = (ymm0 * ymm1) +/- ymm2 +; X64-NEXT: retq +entry: + %0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %__A, <8 x float> %__B, <8 x float> %__C) #9 + %1 = fneg <8 x float> %__C + %2 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %__A, <8 x float> %__B, <8 x float> %1) #9 + %3 = shufflevector <8 x float> %2, <8 x float> %0, <8 x i32> + %4 = bitcast i8 %__U to <8 x i1> + %5 = select <8 x i1> %4, <8 x float> %3, <8 x float> %__A + ret <8 x float> %5 +} + define <8 x float> @test_mm256_mask_fmsubadd_ps(<8 x float> %__A, i8 zeroext %__U, <8 x float> %__B, <8 x float> %__C) { ; X86-LABEL: test_mm256_mask_fmsubadd_ps: ; X86: # %bb.0: # %entry @@ -5715,6 +6191,29 @@ entry: ret <8 x float> %4 } +define <8 x float> @test_mm256_mask_fmsubadd_ps_unary_fneg(<8 x float> %__A, i8 zeroext %__U, <8 x float> %__B, <8 x float> %__C) { +; X86-LABEL: test_mm256_mask_fmsubadd_ps_unary_fneg: +; X86: # %bb.0: # %entry +; X86-NEXT: movb {{[0-9]+}}(%esp), %al +; X86-NEXT: kmovw %eax, %k1 +; X86-NEXT: vfmsubadd132ps {{.*#+}} ymm0 = (ymm0 * ymm1) -/+ ymm2 +; X86-NEXT: retl +; +; X64-LABEL: test_mm256_mask_fmsubadd_ps_unary_fneg: +; X64: # %bb.0: # %entry +; X64-NEXT: kmovw %edi, %k1 +; X64-NEXT: vfmsubadd132ps {{.*#+}} ymm0 = (ymm0 * ymm1) -/+ ymm2 +; X64-NEXT: retq +entry: + %neg.i = fneg <8 x float> %__C + %0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %__A, <8 x float> %__B, <8 x float> %neg.i) #9 + %1 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %__A, <8 x float> %__B, <8 x float> %__C) #9 + %2 = shufflevector <8 x float> %1, <8 x float> %0, <8 x i32> + %3 = bitcast i8 %__U to <8 x i1> + %4 = select <8 x i1> %3, <8 x float> %2, <8 x float> %__A + ret <8 x float> %4 +} + define <8 x float> @test_mm256_mask3_fmaddsub_ps(<8 x float> %__A, <8 x float> %__B, <8 x float> %__C, i8 zeroext %__U) { ; X86-LABEL: test_mm256_mask3_fmaddsub_ps: ; X86: # %bb.0: # %entry @@ -5740,6 +6239,31 @@ entry: ret <8 x float> %5 } +define <8 x float> @test_mm256_mask3_fmaddsub_ps_unary_fneg(<8 x float> %__A, <8 x float> %__B, <8 x float> %__C, i8 zeroext %__U) { +; X86-LABEL: test_mm256_mask3_fmaddsub_ps_unary_fneg: +; X86: # %bb.0: # %entry +; X86-NEXT: movb {{[0-9]+}}(%esp), %al +; X86-NEXT: kmovw %eax, %k1 +; X86-NEXT: vfmaddsub231ps {{.*#+}} ymm2 = (ymm0 * ymm1) +/- ymm2 +; X86-NEXT: vmovaps %ymm2, %ymm0 +; X86-NEXT: retl +; +; X64-LABEL: test_mm256_mask3_fmaddsub_ps_unary_fneg: +; X64: # %bb.0: # %entry +; X64-NEXT: kmovw %edi, %k1 +; X64-NEXT: vfmaddsub231ps {{.*#+}} ymm2 = (ymm0 * ymm1) +/- ymm2 +; X64-NEXT: vmovaps %ymm2, %ymm0 +; X64-NEXT: retq +entry: + %0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %__A, <8 x float> %__B, <8 x float> %__C) #9 + %1 = fneg <8 x float> %__C + %2 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %__A, <8 x float> %__B, <8 x float> %1) #9 + %3 = shufflevector <8 x float> %2, <8 x float> %0, <8 x i32> + %4 = bitcast i8 %__U to <8 x i1> + %5 = select <8 x i1> %4, <8 x float> %3, <8 x float> %__C + ret <8 x float> %5 +} + define <8 x float> @test_mm256_maskz_fmaddsub_ps(i8 zeroext %__U, <8 x float> %__A, <8 x float> %__B, <8 x float> %__C) { ; X86-LABEL: test_mm256_maskz_fmaddsub_ps: ; X86: # %bb.0: # %entry @@ -5763,6 +6287,29 @@ entry: ret <8 x float> %5 } +define <8 x float> @test_mm256_maskz_fmaddsub_ps_unary_fneg(i8 zeroext %__U, <8 x float> %__A, <8 x float> %__B, <8 x float> %__C) { +; X86-LABEL: test_mm256_maskz_fmaddsub_ps_unary_fneg: +; X86: # %bb.0: # %entry +; X86-NEXT: movb {{[0-9]+}}(%esp), %al +; X86-NEXT: kmovw %eax, %k1 +; X86-NEXT: vfmaddsub213ps {{.*#+}} ymm0 = (ymm1 * ymm0) +/- ymm2 +; X86-NEXT: retl +; +; X64-LABEL: test_mm256_maskz_fmaddsub_ps_unary_fneg: +; X64: # %bb.0: # %entry +; X64-NEXT: kmovw %edi, %k1 +; X64-NEXT: vfmaddsub213ps {{.*#+}} ymm0 = (ymm1 * ymm0) +/- ymm2 +; X64-NEXT: retq +entry: + %0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %__A, <8 x float> %__B, <8 x float> %__C) #9 + %1 = fneg <8 x float> %__C + %2 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %__A, <8 x float> %__B, <8 x float> %1) #9 + %3 = shufflevector <8 x float> %2, <8 x float> %0, <8 x i32> + %4 = bitcast i8 %__U to <8 x i1> + %5 = select <8 x i1> %4, <8 x float> %3, <8 x float> zeroinitializer + ret <8 x float> %5 +} + define <8 x float> @test_mm256_maskz_fmsubadd_ps(i8 zeroext %__U, <8 x float> %__A, <8 x float> %__B, <8 x float> %__C) { ; X86-LABEL: test_mm256_maskz_fmsubadd_ps: ; X86: # %bb.0: # %entry @@ -5786,6 +6333,29 @@ entry: ret <8 x float> %4 } +define <8 x float> @test_mm256_maskz_fmsubadd_ps_unary_fneg(i8 zeroext %__U, <8 x float> %__A, <8 x float> %__B, <8 x float> %__C) { +; X86-LABEL: test_mm256_maskz_fmsubadd_ps_unary_fneg: +; X86: # %bb.0: # %entry +; X86-NEXT: movb {{[0-9]+}}(%esp), %al +; X86-NEXT: kmovw %eax, %k1 +; X86-NEXT: vfmsubadd213ps {{.*#+}} ymm0 = (ymm1 * ymm0) -/+ ymm2 +; X86-NEXT: retl +; +; X64-LABEL: test_mm256_maskz_fmsubadd_ps_unary_fneg: +; X64: # %bb.0: # %entry +; X64-NEXT: kmovw %edi, %k1 +; X64-NEXT: vfmsubadd213ps {{.*#+}} ymm0 = (ymm1 * ymm0) -/+ ymm2 +; X64-NEXT: retq +entry: + %neg.i = fneg <8 x float> %__C + %0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %__A, <8 x float> %__B, <8 x float> %neg.i) #9 + %1 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %__A, <8 x float> %__B, <8 x float> %__C) #9 + %2 = shufflevector <8 x float> %1, <8 x float> %0, <8 x i32> + %3 = bitcast i8 %__U to <8 x i1> + %4 = select <8 x i1> %3, <8 x float> %2, <8 x float> zeroinitializer + ret <8 x float> %4 +} + define <2 x double> @test_mm_mask3_fmsub_pd(<2 x double> %__A, <2 x double> %__B, <2 x double> %__C, i8 zeroext %__U) { ; X86-LABEL: test_mm_mask3_fmsub_pd: ; X86: # %bb.0: # %entry @@ -5810,6 +6380,30 @@ entry: ret <2 x double> %2 } +define <2 x double> @test_mm_mask3_fmsub_pd_unary_fneg(<2 x double> %__A, <2 x double> %__B, <2 x double> %__C, i8 zeroext %__U) { +; X86-LABEL: test_mm_mask3_fmsub_pd_unary_fneg: +; X86: # %bb.0: # %entry +; X86-NEXT: movb {{[0-9]+}}(%esp), %al +; X86-NEXT: kmovw %eax, %k1 +; X86-NEXT: vfmsub231pd {{.*#+}} xmm2 = (xmm0 * xmm1) - xmm2 +; X86-NEXT: vmovapd %xmm2, %xmm0 +; X86-NEXT: retl +; +; X64-LABEL: test_mm_mask3_fmsub_pd_unary_fneg: +; X64: # %bb.0: # %entry +; X64-NEXT: kmovw %edi, %k1 +; X64-NEXT: vfmsub231pd {{.*#+}} xmm2 = (xmm0 * xmm1) - xmm2 +; X64-NEXT: vmovapd %xmm2, %xmm0 +; X64-NEXT: retq +entry: + %neg.i = fneg <2 x double> %__C + %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %__A, <2 x double> %__B, <2 x double> %neg.i) #9 + %1 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <2 x i32> + %2 = select <2 x i1> %extract.i, <2 x double> %0, <2 x double> %__C + ret <2 x double> %2 +} + define <4 x double> @test_mm256_mask3_fmsub_pd(<4 x double> %__A, <4 x double> %__B, <4 x double> %__C, i8 zeroext %__U) { ; X86-LABEL: test_mm256_mask3_fmsub_pd: ; X86: # %bb.0: # %entry @@ -5834,6 +6428,30 @@ entry: ret <4 x double> %2 } +define <4 x double> @test_mm256_mask3_fmsub_pd_unary_fneg(<4 x double> %__A, <4 x double> %__B, <4 x double> %__C, i8 zeroext %__U) { +; X86-LABEL: test_mm256_mask3_fmsub_pd_unary_fneg: +; X86: # %bb.0: # %entry +; X86-NEXT: movb {{[0-9]+}}(%esp), %al +; X86-NEXT: kmovw %eax, %k1 +; X86-NEXT: vfmsub231pd {{.*#+}} ymm2 = (ymm0 * ymm1) - ymm2 +; X86-NEXT: vmovapd %ymm2, %ymm0 +; X86-NEXT: retl +; +; X64-LABEL: test_mm256_mask3_fmsub_pd_unary_fneg: +; X64: # %bb.0: # %entry +; X64-NEXT: kmovw %edi, %k1 +; X64-NEXT: vfmsub231pd {{.*#+}} ymm2 = (ymm0 * ymm1) - ymm2 +; X64-NEXT: vmovapd %ymm2, %ymm0 +; X64-NEXT: retq +entry: + %neg.i = fneg <4 x double> %__C + %0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %__A, <4 x double> %__B, <4 x double> %neg.i) #9 + %1 = bitcast i8 %__U to <8 x i1> + %extract.i = shufflevector <8 x i1> %1, <8 x i1> undef, <4 x i32> + %2 = select <4 x i1> %extract.i, <4 x double> %0, <4 x double> %__C + ret <4 x double> %2 +} + define <4 x float> @test_mm_mask3_fmsub_ps(<4 x float> %__A, <4 x float> %__B, <4 x float> %__C, i8 zeroext %__U) { ; X86-LABEL: test_mm_mask3_fmsub_ps: ; X86: # %bb.0: # %entry -- 2.40.0