ret <8 x double> %2
}
-define <8 x double> @test_mm512_maskz_fmsubadd_round_pd_unary_fneg(i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B, <8 x double> %__C) {
-; X86-LABEL: test_mm512_maskz_fmsubadd_round_pd_unary_fneg:
-; X86: # %bb.0: # %entry
-; X86-NEXT: movb {{[0-9]+}}(%esp), %al
-; X86-NEXT: kmovw %eax, %k1
-; X86-NEXT: vfmsubadd213pd {rn-sae}, %zmm2, %zmm1, %zmm0 {%k1} {z}
-; X86-NEXT: retl
-;
-; X64-LABEL: test_mm512_maskz_fmsubadd_round_pd_unary_fneg:
-; X64: # %bb.0: # %entry
-; X64-NEXT: kmovw %edi, %k1
-; X64-NEXT: vfmsubadd213pd {rn-sae}, %zmm2, %zmm1, %zmm0 {%k1} {z}
-; X64-NEXT: retq
-entry:
- %neg = fneg <8 x double> %__C
- %0 = tail call <8 x double> @llvm.x86.avx512.vfmaddsub.pd.512(<8 x double> %__A, <8 x double> %__B, <8 x double> %neg, i32 8)
- %1 = bitcast i8 %__U to <8 x i1>
- %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> zeroinitializer
- ret <8 x double> %2
-}
-
define <8 x double> @test_mm512_fmaddsub_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) {
; CHECK-LABEL: test_mm512_fmaddsub_pd:
; CHECK: # %bb.0: # %entry
ret <8 x double> %3
}
-define <8 x double> @test_mm512_fmaddsub_pd_unary_fneg(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) {
-; CHECK-LABEL: test_mm512_fmaddsub_pd_unary_fneg:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vfmaddsub213pd {{.*#+}} zmm0 = (zmm1 * zmm0) +/- zmm2
-; CHECK-NEXT: ret{{[l|q]}}
-entry:
- %0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) #10
- %1 = fneg <8 x double> %__C
- %2 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %1) #10
- %3 = shufflevector <8 x double> %2, <8 x double> %0, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
- ret <8 x double> %3
-}
-
define <8 x double> @test_mm512_mask_fmaddsub_pd(<8 x double> %__A, i8 zeroext %__U, <8 x double> %__B, <8 x double> %__C) {
; X86-LABEL: test_mm512_mask_fmaddsub_pd:
; X86: # %bb.0: # %entry
ret <8 x double> %5
}
-define <8 x double> @test_mm512_mask_fmaddsub_pd_unary_fneg(<8 x double> %__A, i8 zeroext %__U, <8 x double> %__B, <8 x double> %__C) {
-; X86-LABEL: test_mm512_mask_fmaddsub_pd_unary_fneg:
-; X86: # %bb.0: # %entry
-; X86-NEXT: movb {{[0-9]+}}(%esp), %al
-; X86-NEXT: kmovw %eax, %k1
-; X86-NEXT: vfmaddsub132pd {{.*#+}} zmm0 = (zmm0 * zmm1) +/- zmm2
-; X86-NEXT: retl
-;
-; X64-LABEL: test_mm512_mask_fmaddsub_pd_unary_fneg:
-; X64: # %bb.0: # %entry
-; X64-NEXT: kmovw %edi, %k1
-; X64-NEXT: vfmaddsub132pd {{.*#+}} zmm0 = (zmm0 * zmm1) +/- zmm2
-; X64-NEXT: retq
-entry:
- %0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) #10
- %1 = fneg <8 x double> %__C
- %2 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %1) #10
- %3 = shufflevector <8 x double> %2, <8 x double> %0, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
- %4 = bitcast i8 %__U to <8 x i1>
- %5 = select <8 x i1> %4, <8 x double> %3, <8 x double> %__A
- ret <8 x double> %5
-}
-
define <8 x double> @test_mm512_mask3_fmaddsub_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i8 zeroext %__U) {
; X86-LABEL: test_mm512_mask3_fmaddsub_pd:
; X86: # %bb.0: # %entry
ret <8 x double> %5
}
-define <8 x double> @test_mm512_mask3_fmaddsub_pd_unary_fneg(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i8 zeroext %__U) {
-; X86-LABEL: test_mm512_mask3_fmaddsub_pd_unary_fneg:
-; X86: # %bb.0: # %entry
-; X86-NEXT: movb {{[0-9]+}}(%esp), %al
-; X86-NEXT: kmovw %eax, %k1
-; X86-NEXT: vfmaddsub231pd {{.*#+}} zmm2 = (zmm0 * zmm1) +/- zmm2
-; X86-NEXT: vmovapd %zmm2, %zmm0
-; X86-NEXT: retl
-;
-; X64-LABEL: test_mm512_mask3_fmaddsub_pd_unary_fneg:
-; X64: # %bb.0: # %entry
-; X64-NEXT: kmovw %edi, %k1
-; X64-NEXT: vfmaddsub231pd {{.*#+}} zmm2 = (zmm0 * zmm1) +/- zmm2
-; X64-NEXT: vmovapd %zmm2, %zmm0
-; X64-NEXT: retq
-entry:
- %0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) #10
- %1 = fneg <8 x double> %__C
- %2 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %1) #10
- %3 = shufflevector <8 x double> %2, <8 x double> %0, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
- %4 = bitcast i8 %__U to <8 x i1>
- %5 = select <8 x i1> %4, <8 x double> %3, <8 x double> %__C
- ret <8 x double> %5
-}
-
define <8 x double> @test_mm512_maskz_fmaddsub_pd(i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B, <8 x double> %__C) {
; X86-LABEL: test_mm512_maskz_fmaddsub_pd:
; X86: # %bb.0: # %entry
ret <8 x double> %5
}
-define <8 x double> @test_mm512_maskz_fmaddsub_pd_unary_fneg(i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B, <8 x double> %__C) {
-; X86-LABEL: test_mm512_maskz_fmaddsub_pd_unary_fneg:
-; X86: # %bb.0: # %entry
-; X86-NEXT: movb {{[0-9]+}}(%esp), %al
-; X86-NEXT: kmovw %eax, %k1
-; X86-NEXT: vfmaddsub213pd {{.*#+}} zmm0 = (zmm1 * zmm0) +/- zmm2
-; X86-NEXT: retl
-;
-; X64-LABEL: test_mm512_maskz_fmaddsub_pd_unary_fneg:
-; X64: # %bb.0: # %entry
-; X64-NEXT: kmovw %edi, %k1
-; X64-NEXT: vfmaddsub213pd {{.*#+}} zmm0 = (zmm1 * zmm0) +/- zmm2
-; X64-NEXT: retq
-entry:
- %0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) #10
- %1 = fneg <8 x double> %__C
- %2 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %1) #10
- %3 = shufflevector <8 x double> %2, <8 x double> %0, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
- %4 = bitcast i8 %__U to <8 x i1>
- %5 = select <8 x i1> %4, <8 x double> %3, <8 x double> zeroinitializer
- ret <8 x double> %5
-}
-
define <8 x double> @test_mm512_fmsubadd_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) {
; CHECK-LABEL: test_mm512_fmsubadd_pd:
; CHECK: # %bb.0: # %entry
ret <8 x double> %2
}
-define <8 x double> @test_mm512_fmsubadd_pd_unary_fneg(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) {
-; CHECK-LABEL: test_mm512_fmsubadd_pd_unary_fneg:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vfmsubadd213pd {{.*#+}} zmm0 = (zmm1 * zmm0) -/+ zmm2
-; CHECK-NEXT: ret{{[l|q]}}
-entry:
- %neg.i = fneg <8 x double> %__C
- %0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %neg.i) #10
- %1 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) #10
- %2 = shufflevector <8 x double> %1, <8 x double> %0, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
- ret <8 x double> %2
-}
-
define <8 x double> @test_mm512_mask_fmsubadd_pd(<8 x double> %__A, i8 zeroext %__U, <8 x double> %__B, <8 x double> %__C) {
; X86-LABEL: test_mm512_mask_fmsubadd_pd:
; X86: # %bb.0: # %entry
ret <8 x double> %4
}
-define <8 x double> @test_mm512_mask_fmsubadd_pd_unary_fneg(<8 x double> %__A, i8 zeroext %__U, <8 x double> %__B, <8 x double> %__C) {
-; X86-LABEL: test_mm512_mask_fmsubadd_pd_unary_fneg:
-; X86: # %bb.0: # %entry
-; X86-NEXT: movb {{[0-9]+}}(%esp), %al
-; X86-NEXT: kmovw %eax, %k1
-; X86-NEXT: vfmsubadd132pd {{.*#+}} zmm0 = (zmm0 * zmm1) -/+ zmm2
-; X86-NEXT: retl
-;
-; X64-LABEL: test_mm512_mask_fmsubadd_pd_unary_fneg:
-; X64: # %bb.0: # %entry
-; X64-NEXT: kmovw %edi, %k1
-; X64-NEXT: vfmsubadd132pd {{.*#+}} zmm0 = (zmm0 * zmm1) -/+ zmm2
-; X64-NEXT: retq
-entry:
- %neg.i = fneg <8 x double> %__C
- %0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %neg.i) #10
- %1 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) #10
- %2 = shufflevector <8 x double> %1, <8 x double> %0, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
- %3 = bitcast i8 %__U to <8 x i1>
- %4 = select <8 x i1> %3, <8 x double> %2, <8 x double> %__A
- ret <8 x double> %4
-}
-
define <8 x double> @test_mm512_maskz_fmsubadd_pd(i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B, <8 x double> %__C) {
; X86-LABEL: test_mm512_maskz_fmsubadd_pd:
; X86: # %bb.0: # %entry
ret <8 x double> %4
}
-define <8 x double> @test_mm512_maskz_fmsubadd_pd_unary_fneg(i8 zeroext %__U, <8 x double> %__A, <8 x double> %__B, <8 x double> %__C) {
-; X86-LABEL: test_mm512_maskz_fmsubadd_pd_unary_fneg:
-; X86: # %bb.0: # %entry
-; X86-NEXT: movb {{[0-9]+}}(%esp), %al
-; X86-NEXT: kmovw %eax, %k1
-; X86-NEXT: vfmsubadd213pd {{.*#+}} zmm0 = (zmm1 * zmm0) -/+ zmm2
-; X86-NEXT: retl
-;
-; X64-LABEL: test_mm512_maskz_fmsubadd_pd_unary_fneg:
-; X64: # %bb.0: # %entry
-; X64-NEXT: kmovw %edi, %k1
-; X64-NEXT: vfmsubadd213pd {{.*#+}} zmm0 = (zmm1 * zmm0) -/+ zmm2
-; X64-NEXT: retq
-entry:
- %neg.i = fneg <8 x double> %__C
- %0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %neg.i) #10
- %1 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) #10
- %2 = shufflevector <8 x double> %1, <8 x double> %0, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
- %3 = bitcast i8 %__U to <8 x i1>
- %4 = select <8 x i1> %3, <8 x double> %2, <8 x double> zeroinitializer
- ret <8 x double> %4
-}
-
define <16 x float> @test_mm512_fmaddsub_round_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) {
; CHECK-LABEL: test_mm512_fmaddsub_round_ps:
; CHECK: # %bb.0: # %entry
ret <16 x float> %0
}
-define <16 x float> @test_mm512_fmsubadd_round_ps_unary_fneg(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) {
-; X86-LABEL: test_mm512_fmsubadd_round_ps_unary_fneg:
-; X86: # %bb.0: # %entry
-; X86-NEXT: vpxord {{\.LCPI.*}}{1to16}, %zmm2, %zmm2
-; X86-NEXT: vfmaddsub213ps {rn-sae}, %zmm2, %zmm1, %zmm0
-; X86-NEXT: retl
-;
-; X64-LABEL: test_mm512_fmsubadd_round_ps_unary_fneg:
-; X64: # %bb.0: # %entry
-; X64-NEXT: vpxord {{.*}}(%rip){1to16}, %zmm2, %zmm2
-; X64-NEXT: vfmaddsub213ps {rn-sae}, %zmm2, %zmm1, %zmm0
-; X64-NEXT: retq
-entry:
- %neg = fneg <16 x float> %__C
- %0 = tail call <16 x float> @llvm.x86.avx512.vfmaddsub.ps.512(<16 x float> %__A, <16 x float> %__B, <16 x float> %neg, i32 8)
- ret <16 x float> %0
-}
-
define <16 x float> @test_mm512_mask_fmsubadd_round_ps(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) {
; X86-LABEL: test_mm512_mask_fmsubadd_round_ps:
; X86: # %bb.0: # %entry
ret <16 x float> %2
}
-define <16 x float> @test_mm512_mask_fmsubadd_round_ps_unary_fneg(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) {
-; X86-LABEL: test_mm512_mask_fmsubadd_round_ps_unary_fneg:
-; X86: # %bb.0: # %entry
-; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: kmovw %eax, %k1
-; X86-NEXT: vfmsubadd132ps {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1}
-; X86-NEXT: retl
-;
-; X64-LABEL: test_mm512_mask_fmsubadd_round_ps_unary_fneg:
-; X64: # %bb.0: # %entry
-; X64-NEXT: kmovw %edi, %k1
-; X64-NEXT: vfmsubadd132ps {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1}
-; X64-NEXT: retq
-entry:
- %neg = fneg <16 x float> %__C
- %0 = tail call <16 x float> @llvm.x86.avx512.vfmaddsub.ps.512(<16 x float> %__A, <16 x float> %__B, <16 x float> %neg, i32 8)
- %1 = bitcast i16 %__U to <16 x i1>
- %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> %__A
- ret <16 x float> %2
-}
-
define <16 x float> @test_mm512_maskz_fmsubadd_round_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B, <16 x float> %__C) {
; X86-LABEL: test_mm512_maskz_fmsubadd_round_ps:
; X86: # %bb.0: # %entry
ret <16 x float> %2
}
-define <16 x float> @test_mm512_maskz_fmsubadd_round_ps_unary_fneg(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B, <16 x float> %__C) {
-; X86-LABEL: test_mm512_maskz_fmsubadd_round_ps_unary_fneg:
-; X86: # %bb.0: # %entry
-; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: kmovw %eax, %k1
-; X86-NEXT: vfmsubadd213ps {rn-sae}, %zmm2, %zmm1, %zmm0 {%k1} {z}
-; X86-NEXT: retl
-;
-; X64-LABEL: test_mm512_maskz_fmsubadd_round_ps_unary_fneg:
-; X64: # %bb.0: # %entry
-; X64-NEXT: kmovw %edi, %k1
-; X64-NEXT: vfmsubadd213ps {rn-sae}, %zmm2, %zmm1, %zmm0 {%k1} {z}
-; X64-NEXT: retq
-entry:
- %neg = fneg <16 x float> %__C
- %0 = tail call <16 x float> @llvm.x86.avx512.vfmaddsub.ps.512(<16 x float> %__A, <16 x float> %__B, <16 x float> %neg, i32 8)
- %1 = bitcast i16 %__U to <16 x i1>
- %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> zeroinitializer
- ret <16 x float> %2
-}
-
define <16 x float> @test_mm512_fmaddsub_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) {
; CHECK-LABEL: test_mm512_fmaddsub_ps:
; CHECK: # %bb.0: # %entry
ret <16 x float> %3
}
-define <16 x float> @test_mm512_fmaddsub_ps_unary_fneg(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) {
-; CHECK-LABEL: test_mm512_fmaddsub_ps_unary_fneg:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vfmaddsub213ps {{.*#+}} zmm0 = (zmm1 * zmm0) +/- zmm2
-; CHECK-NEXT: ret{{[l|q]}}
-entry:
- %0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) #10
- %1 = fneg <16 x float> %__C
- %2 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %1) #10
- %3 = shufflevector <16 x float> %2, <16 x float> %0, <16 x i32> <i32 0, i32 17, i32 2, i32 19, i32 4, i32 21, i32 6, i32 23, i32 8, i32 25, i32 10, i32 27, i32 12, i32 29, i32 14, i32 31>
- ret <16 x float> %3
-}
-
define <16 x float> @test_mm512_mask_fmaddsub_ps(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) {
; X86-LABEL: test_mm512_mask_fmaddsub_ps:
; X86: # %bb.0: # %entry
ret <16 x float> %5
}
-define <16 x float> @test_mm512_mask_fmaddsub_ps_unary_fneg(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) {
-; X86-LABEL: test_mm512_mask_fmaddsub_ps_unary_fneg:
-; X86: # %bb.0: # %entry
-; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: kmovw %eax, %k1
-; X86-NEXT: vfmaddsub132ps {{.*#+}} zmm0 = (zmm0 * zmm1) +/- zmm2
-; X86-NEXT: retl
-;
-; X64-LABEL: test_mm512_mask_fmaddsub_ps_unary_fneg:
-; X64: # %bb.0: # %entry
-; X64-NEXT: kmovw %edi, %k1
-; X64-NEXT: vfmaddsub132ps {{.*#+}} zmm0 = (zmm0 * zmm1) +/- zmm2
-; X64-NEXT: retq
-entry:
- %0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) #10
- %1 = fneg <16 x float> %__C
- %2 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %1) #10
- %3 = shufflevector <16 x float> %2, <16 x float> %0, <16 x i32> <i32 0, i32 17, i32 2, i32 19, i32 4, i32 21, i32 6, i32 23, i32 8, i32 25, i32 10, i32 27, i32 12, i32 29, i32 14, i32 31>
- %4 = bitcast i16 %__U to <16 x i1>
- %5 = select <16 x i1> %4, <16 x float> %3, <16 x float> %__A
- ret <16 x float> %5
-}
-
define <16 x float> @test_mm512_mask3_fmaddsub_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) {
; X86-LABEL: test_mm512_mask3_fmaddsub_ps:
; X86: # %bb.0: # %entry
ret <16 x float> %5
}
-define <16 x float> @test_mm512_mask3_fmaddsub_ps_unary_fneg(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) {
-; X86-LABEL: test_mm512_mask3_fmaddsub_ps_unary_fneg:
-; X86: # %bb.0: # %entry
-; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: kmovw %eax, %k1
-; X86-NEXT: vfmaddsub231ps {{.*#+}} zmm2 = (zmm0 * zmm1) +/- zmm2
-; X86-NEXT: vmovaps %zmm2, %zmm0
-; X86-NEXT: retl
-;
-; X64-LABEL: test_mm512_mask3_fmaddsub_ps_unary_fneg:
-; X64: # %bb.0: # %entry
-; X64-NEXT: kmovw %edi, %k1
-; X64-NEXT: vfmaddsub231ps {{.*#+}} zmm2 = (zmm0 * zmm1) +/- zmm2
-; X64-NEXT: vmovaps %zmm2, %zmm0
-; X64-NEXT: retq
-entry:
- %0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) #10
- %1 = fneg <16 x float> %__C
- %2 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %1) #10
- %3 = shufflevector <16 x float> %2, <16 x float> %0, <16 x i32> <i32 0, i32 17, i32 2, i32 19, i32 4, i32 21, i32 6, i32 23, i32 8, i32 25, i32 10, i32 27, i32 12, i32 29, i32 14, i32 31>
- %4 = bitcast i16 %__U to <16 x i1>
- %5 = select <16 x i1> %4, <16 x float> %3, <16 x float> %__C
- ret <16 x float> %5
-}
-
define <16 x float> @test_mm512_maskz_fmaddsub_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B, <16 x float> %__C) {
; X86-LABEL: test_mm512_maskz_fmaddsub_ps:
; X86: # %bb.0: # %entry
ret <16 x float> %5
}
-define <16 x float> @test_mm512_maskz_fmaddsub_ps_unary_fneg(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B, <16 x float> %__C) {
-; X86-LABEL: test_mm512_maskz_fmaddsub_ps_unary_fneg:
-; X86: # %bb.0: # %entry
-; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: kmovw %eax, %k1
-; X86-NEXT: vfmaddsub213ps {{.*#+}} zmm0 = (zmm1 * zmm0) +/- zmm2
-; X86-NEXT: retl
-;
-; X64-LABEL: test_mm512_maskz_fmaddsub_ps_unary_fneg:
-; X64: # %bb.0: # %entry
-; X64-NEXT: kmovw %edi, %k1
-; X64-NEXT: vfmaddsub213ps {{.*#+}} zmm0 = (zmm1 * zmm0) +/- zmm2
-; X64-NEXT: retq
-entry:
- %0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) #10
- %1 = fneg <16 x float> %__C
- %2 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %1) #10
- %3 = shufflevector <16 x float> %2, <16 x float> %0, <16 x i32> <i32 0, i32 17, i32 2, i32 19, i32 4, i32 21, i32 6, i32 23, i32 8, i32 25, i32 10, i32 27, i32 12, i32 29, i32 14, i32 31>
- %4 = bitcast i16 %__U to <16 x i1>
- %5 = select <16 x i1> %4, <16 x float> %3, <16 x float> zeroinitializer
- ret <16 x float> %5
-}
-
define <16 x float> @test_mm512_fmsubadd_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) {
; CHECK-LABEL: test_mm512_fmsubadd_ps:
; CHECK: # %bb.0: # %entry
ret <16 x float> %2
}
-define <16 x float> @test_mm512_fmsubadd_ps_unary_fneg(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) {
-; CHECK-LABEL: test_mm512_fmsubadd_ps_unary_fneg:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vfmsubadd213ps {{.*#+}} zmm0 = (zmm1 * zmm0) -/+ zmm2
-; CHECK-NEXT: ret{{[l|q]}}
-entry:
- %neg.i = fneg <16 x float> %__C
- %0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %neg.i) #10
- %1 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) #10
- %2 = shufflevector <16 x float> %1, <16 x float> %0, <16 x i32> <i32 0, i32 17, i32 2, i32 19, i32 4, i32 21, i32 6, i32 23, i32 8, i32 25, i32 10, i32 27, i32 12, i32 29, i32 14, i32 31>
- ret <16 x float> %2
-}
-
define <16 x float> @test_mm512_mask_fmsubadd_ps(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) {
; X86-LABEL: test_mm512_mask_fmsubadd_ps:
; X86: # %bb.0: # %entry
ret <16 x float> %4
}
-define <16 x float> @test_mm512_mask_fmsubadd_ps_unary_fneg(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) {
-; X86-LABEL: test_mm512_mask_fmsubadd_ps_unary_fneg:
-; X86: # %bb.0: # %entry
-; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: kmovw %eax, %k1
-; X86-NEXT: vfmsubadd132ps {{.*#+}} zmm0 = (zmm0 * zmm1) -/+ zmm2
-; X86-NEXT: retl
-;
-; X64-LABEL: test_mm512_mask_fmsubadd_ps_unary_fneg:
-; X64: # %bb.0: # %entry
-; X64-NEXT: kmovw %edi, %k1
-; X64-NEXT: vfmsubadd132ps {{.*#+}} zmm0 = (zmm0 * zmm1) -/+ zmm2
-; X64-NEXT: retq
-entry:
- %neg.i = fneg <16 x float> %__C
- %0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %neg.i) #10
- %1 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) #10
- %2 = shufflevector <16 x float> %1, <16 x float> %0, <16 x i32> <i32 0, i32 17, i32 2, i32 19, i32 4, i32 21, i32 6, i32 23, i32 8, i32 25, i32 10, i32 27, i32 12, i32 29, i32 14, i32 31>
- %3 = bitcast i16 %__U to <16 x i1>
- %4 = select <16 x i1> %3, <16 x float> %2, <16 x float> %__A
- ret <16 x float> %4
-}
-
define <16 x float> @test_mm512_maskz_fmsubadd_ps(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B, <16 x float> %__C) {
; X86-LABEL: test_mm512_maskz_fmsubadd_ps:
; X86: # %bb.0: # %entry
ret <16 x float> %4
}
-define <16 x float> @test_mm512_maskz_fmsubadd_ps_unary_fneg(i16 zeroext %__U, <16 x float> %__A, <16 x float> %__B, <16 x float> %__C) {
-; X86-LABEL: test_mm512_maskz_fmsubadd_ps_unary_fneg:
-; X86: # %bb.0: # %entry
-; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: kmovw %eax, %k1
-; X86-NEXT: vfmsubadd213ps {{.*#+}} zmm0 = (zmm1 * zmm0) -/+ zmm2
-; X86-NEXT: retl
-;
-; X64-LABEL: test_mm512_maskz_fmsubadd_ps_unary_fneg:
-; X64: # %bb.0: # %entry
-; X64-NEXT: kmovw %edi, %k1
-; X64-NEXT: vfmsubadd213ps {{.*#+}} zmm0 = (zmm1 * zmm0) -/+ zmm2
-; X64-NEXT: retq
-entry:
- %neg.i = fneg <16 x float> %__C
- %0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %neg.i) #10
- %1 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) #10
- %2 = shufflevector <16 x float> %1, <16 x float> %0, <16 x i32> <i32 0, i32 17, i32 2, i32 19, i32 4, i32 21, i32 6, i32 23, i32 8, i32 25, i32 10, i32 27, i32 12, i32 29, i32 14, i32 31>
- %3 = bitcast i16 %__U to <16 x i1>
- %4 = select <16 x i1> %3, <16 x float> %2, <16 x float> zeroinitializer
- ret <16 x float> %4
-}
-
define <8 x double> @test_mm512_mask3_fmsub_round_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i8 zeroext %__U) {
; X86-LABEL: test_mm512_mask3_fmsub_round_pd:
; X86: # %bb.0: # %entry
ret <8 x double> %2
}
-define <8 x double> @test_mm512_mask3_fmsub_round_pd_unary_fneg(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i8 zeroext %__U) {
-; X86-LABEL: test_mm512_mask3_fmsub_round_pd_unary_fneg:
-; X86: # %bb.0: # %entry
-; X86-NEXT: movb {{[0-9]+}}(%esp), %al
-; X86-NEXT: kmovw %eax, %k1
-; X86-NEXT: vfmsub231pd {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
-; X86-NEXT: vmovapd %zmm2, %zmm0
-; X86-NEXT: retl
-;
-; X64-LABEL: test_mm512_mask3_fmsub_round_pd_unary_fneg:
-; X64: # %bb.0: # %entry
-; X64-NEXT: kmovw %edi, %k1
-; X64-NEXT: vfmsub231pd {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
-; X64-NEXT: vmovapd %zmm2, %zmm0
-; X64-NEXT: retq
-entry:
- %neg = fneg <8 x double> %__C
- %0 = tail call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %__A, <8 x double> %__B, <8 x double> %neg, i32 8)
- %1 = bitcast i8 %__U to <8 x i1>
- %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> %__C
- ret <8 x double> %2
-}
-
define <8 x double> @test_mm512_mask3_fmsub_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i8 zeroext %__U) {
; X86-LABEL: test_mm512_mask3_fmsub_pd:
; X86: # %bb.0: # %entry
ret <8 x double> %2
}
-define <8 x double> @test_mm512_mask3_fmsub_pd_unary_fneg(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i8 zeroext %__U) {
-; X86-LABEL: test_mm512_mask3_fmsub_pd_unary_fneg:
-; X86: # %bb.0: # %entry
-; X86-NEXT: movb {{[0-9]+}}(%esp), %al
-; X86-NEXT: kmovw %eax, %k1
-; X86-NEXT: vfmsub231pd {{.*#+}} zmm2 = (zmm0 * zmm1) - zmm2
-; X86-NEXT: vmovapd %zmm2, %zmm0
-; X86-NEXT: retl
-;
-; X64-LABEL: test_mm512_mask3_fmsub_pd_unary_fneg:
-; X64: # %bb.0: # %entry
-; X64-NEXT: kmovw %edi, %k1
-; X64-NEXT: vfmsub231pd {{.*#+}} zmm2 = (zmm0 * zmm1) - zmm2
-; X64-NEXT: vmovapd %zmm2, %zmm0
-; X64-NEXT: retq
-entry:
- %neg.i = fneg <8 x double> %__C
- %0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %neg.i) #10
- %1 = bitcast i8 %__U to <8 x i1>
- %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> %__C
- ret <8 x double> %2
-}
-
define <16 x float> @test_mm512_mask3_fmsub_round_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) {
; X86-LABEL: test_mm512_mask3_fmsub_round_ps:
; X86: # %bb.0: # %entry
ret <16 x float> %2
}
-define <16 x float> @test_mm512_mask3_fmsub_round_ps_unary_fneg(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) {
-; X86-LABEL: test_mm512_mask3_fmsub_round_ps_unary_fneg:
-; X86: # %bb.0: # %entry
-; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: kmovw %eax, %k1
-; X86-NEXT: vfmsub231ps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
-; X86-NEXT: vmovaps %zmm2, %zmm0
-; X86-NEXT: retl
-;
-; X64-LABEL: test_mm512_mask3_fmsub_round_ps_unary_fneg:
-; X64: # %bb.0: # %entry
-; X64-NEXT: kmovw %edi, %k1
-; X64-NEXT: vfmsub231ps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
-; X64-NEXT: vmovaps %zmm2, %zmm0
-; X64-NEXT: retq
-entry:
- %neg = fneg <16 x float> %__C
- %0 = tail call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %__A, <16 x float> %__B, <16 x float> %neg, i32 8)
- %1 = bitcast i16 %__U to <16 x i1>
- %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> %__C
- ret <16 x float> %2
-}
-
define <16 x float> @test_mm512_mask3_fmsub_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) {
; X86-LABEL: test_mm512_mask3_fmsub_ps:
; X86: # %bb.0: # %entry
ret <16 x float> %2
}
-define <16 x float> @test_mm512_mask3_fmsub_ps_unary_fneg(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) {
-; X86-LABEL: test_mm512_mask3_fmsub_ps_unary_fneg:
-; X86: # %bb.0: # %entry
-; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: kmovw %eax, %k1
-; X86-NEXT: vfmsub231ps {{.*#+}} zmm2 = (zmm0 * zmm1) - zmm2
-; X86-NEXT: vmovaps %zmm2, %zmm0
-; X86-NEXT: retl
-;
-; X64-LABEL: test_mm512_mask3_fmsub_ps_unary_fneg:
-; X64: # %bb.0: # %entry
-; X64-NEXT: kmovw %edi, %k1
-; X64-NEXT: vfmsub231ps {{.*#+}} zmm2 = (zmm0 * zmm1) - zmm2
-; X64-NEXT: vmovaps %zmm2, %zmm0
-; X64-NEXT: retq
-entry:
- %neg.i = fneg <16 x float> %__C
- %0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %neg.i) #10
- %1 = bitcast i16 %__U to <16 x i1>
- %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> %__C
- ret <16 x float> %2
-}
-
define <8 x double> @test_mm512_mask3_fmsubadd_round_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i8 zeroext %__U) {
; X86-LABEL: test_mm512_mask3_fmsubadd_round_pd:
; X86: # %bb.0: # %entry
ret <8 x double> %2
}
-define <8 x double> @test_mm512_mask3_fmsubadd_round_pd_unary_fneg(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i8 zeroext %__U) {
-; X86-LABEL: test_mm512_mask3_fmsubadd_round_pd_unary_fneg:
-; X86: # %bb.0: # %entry
-; X86-NEXT: movb {{[0-9]+}}(%esp), %al
-; X86-NEXT: kmovw %eax, %k1
-; X86-NEXT: vfmsubadd231pd {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
-; X86-NEXT: vmovapd %zmm2, %zmm0
-; X86-NEXT: retl
-;
-; X64-LABEL: test_mm512_mask3_fmsubadd_round_pd_unary_fneg:
-; X64: # %bb.0: # %entry
-; X64-NEXT: kmovw %edi, %k1
-; X64-NEXT: vfmsubadd231pd {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
-; X64-NEXT: vmovapd %zmm2, %zmm0
-; X64-NEXT: retq
-entry:
- %neg = fneg <8 x double> %__C
- %0 = tail call <8 x double> @llvm.x86.avx512.vfmaddsub.pd.512(<8 x double> %__A, <8 x double> %__B, <8 x double> %neg, i32 8)
- %1 = bitcast i8 %__U to <8 x i1>
- %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> %__C
- ret <8 x double> %2
-}
-
define <8 x double> @test_mm512_mask3_fmsubadd_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i8 zeroext %__U) {
; X86-LABEL: test_mm512_mask3_fmsubadd_pd:
; X86: # %bb.0: # %entry
ret <8 x double> %4
}
-define <8 x double> @test_mm512_mask3_fmsubadd_pd_unary_fneg(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i8 zeroext %__U) {
-; X86-LABEL: test_mm512_mask3_fmsubadd_pd_unary_fneg:
-; X86: # %bb.0: # %entry
-; X86-NEXT: movb {{[0-9]+}}(%esp), %al
-; X86-NEXT: kmovw %eax, %k1
-; X86-NEXT: vfmsubadd231pd {{.*#+}} zmm2 = (zmm0 * zmm1) -/+ zmm2
-; X86-NEXT: vmovapd %zmm2, %zmm0
-; X86-NEXT: retl
-;
-; X64-LABEL: test_mm512_mask3_fmsubadd_pd_unary_fneg:
-; X64: # %bb.0: # %entry
-; X64-NEXT: kmovw %edi, %k1
-; X64-NEXT: vfmsubadd231pd {{.*#+}} zmm2 = (zmm0 * zmm1) -/+ zmm2
-; X64-NEXT: vmovapd %zmm2, %zmm0
-; X64-NEXT: retq
-entry:
- %neg.i = fneg <8 x double> %__C
- %0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %neg.i) #10
- %1 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C) #10
- %2 = shufflevector <8 x double> %1, <8 x double> %0, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
- %3 = bitcast i8 %__U to <8 x i1>
- %4 = select <8 x i1> %3, <8 x double> %2, <8 x double> %__C
- ret <8 x double> %4
-}
-
define <16 x float> @test_mm512_mask3_fmsubadd_round_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) {
; X86-LABEL: test_mm512_mask3_fmsubadd_round_ps:
; X86: # %bb.0: # %entry
ret <16 x float> %2
}
-define <16 x float> @test_mm512_mask3_fmsubadd_round_ps_unary_fneg(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) {
-; X86-LABEL: test_mm512_mask3_fmsubadd_round_ps_unary_fneg:
-; X86: # %bb.0: # %entry
-; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: kmovw %eax, %k1
-; X86-NEXT: vfmsubadd231ps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
-; X86-NEXT: vmovaps %zmm2, %zmm0
-; X86-NEXT: retl
-;
-; X64-LABEL: test_mm512_mask3_fmsubadd_round_ps_unary_fneg:
-; X64: # %bb.0: # %entry
-; X64-NEXT: kmovw %edi, %k1
-; X64-NEXT: vfmsubadd231ps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
-; X64-NEXT: vmovaps %zmm2, %zmm0
-; X64-NEXT: retq
-entry:
- %neg = fneg <16 x float> %__C
- %0 = tail call <16 x float> @llvm.x86.avx512.vfmaddsub.ps.512(<16 x float> %__A, <16 x float> %__B, <16 x float> %neg, i32 8)
- %1 = bitcast i16 %__U to <16 x i1>
- %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> %__C
- ret <16 x float> %2
-}
-
define <16 x float> @test_mm512_mask3_fmsubadd_ps(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) {
; X86-LABEL: test_mm512_mask3_fmsubadd_ps:
; X86: # %bb.0: # %entry
ret <16 x float> %4
}
-define <16 x float> @test_mm512_mask3_fmsubadd_ps_unary_fneg(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) {
-; X86-LABEL: test_mm512_mask3_fmsubadd_ps_unary_fneg:
-; X86: # %bb.0: # %entry
-; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: kmovw %eax, %k1
-; X86-NEXT: vfmsubadd231ps {{.*#+}} zmm2 = (zmm0 * zmm1) -/+ zmm2
-; X86-NEXT: vmovaps %zmm2, %zmm0
-; X86-NEXT: retl
-;
-; X64-LABEL: test_mm512_mask3_fmsubadd_ps_unary_fneg:
-; X64: # %bb.0: # %entry
-; X64-NEXT: kmovw %edi, %k1
-; X64-NEXT: vfmsubadd231ps {{.*#+}} zmm2 = (zmm0 * zmm1) -/+ zmm2
-; X64-NEXT: vmovaps %zmm2, %zmm0
-; X64-NEXT: retq
-entry:
- %neg.i = fneg <16 x float> %__C
- %0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %neg.i) #10
- %1 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C) #10
- %2 = shufflevector <16 x float> %1, <16 x float> %0, <16 x i32> <i32 0, i32 17, i32 2, i32 19, i32 4, i32 21, i32 6, i32 23, i32 8, i32 25, i32 10, i32 27, i32 12, i32 29, i32 14, i32 31>
- %3 = bitcast i16 %__U to <16 x i1>
- %4 = select <16 x i1> %3, <16 x float> %2, <16 x float> %__C
- ret <16 x float> %4
-}
-
define <8 x double> @test_mm512_mask_fnmadd_round_pd(<8 x double> %__A, i8 zeroext %__U, <8 x double> %__B, <8 x double> %__C) {
; X86-LABEL: test_mm512_mask_fnmadd_round_pd:
; X86: # %bb.0: # %entry
ret <8 x double> %2
}
-define <8 x double> @test_mm512_mask_fnmadd_round_pd_unary_fneg(<8 x double> %__A, i8 zeroext %__U, <8 x double> %__B, <8 x double> %__C) {
-; X86-LABEL: test_mm512_mask_fnmadd_round_pd_unary_fneg:
-; X86: # %bb.0: # %entry
-; X86-NEXT: movb {{[0-9]+}}(%esp), %al
-; X86-NEXT: kmovw %eax, %k1
-; X86-NEXT: vfnmadd132pd {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1}
-; X86-NEXT: retl
-;
-; X64-LABEL: test_mm512_mask_fnmadd_round_pd_unary_fneg:
-; X64: # %bb.0: # %entry
-; X64-NEXT: kmovw %edi, %k1
-; X64-NEXT: vfnmadd132pd {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1}
-; X64-NEXT: retq
-entry:
- %neg = fneg <8 x double> %__A
- %0 = tail call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %neg, <8 x double> %__B, <8 x double> %__C, i32 8)
- %1 = bitcast i8 %__U to <8 x i1>
- %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> %__A
- ret <8 x double> %2
-}
-
define <8 x double> @test_mm512_mask_fnmadd_pd(<8 x double> %__A, i8 zeroext %__U, <8 x double> %__B, <8 x double> %__C) {
; X86-LABEL: test_mm512_mask_fnmadd_pd:
; X86: # %bb.0: # %entry
ret <8 x double> %2
}
-define <8 x double> @test_mm512_mask_fnmadd_pd_unary_fneg(<8 x double> %__A, i8 zeroext %__U, <8 x double> %__B, <8 x double> %__C) {
-; X86-LABEL: test_mm512_mask_fnmadd_pd_unary_fneg:
-; X86: # %bb.0: # %entry
-; X86-NEXT: movb {{[0-9]+}}(%esp), %al
-; X86-NEXT: kmovw %eax, %k1
-; X86-NEXT: vfnmadd132pd {{.*#+}} zmm0 = -(zmm0 * zmm1) + zmm2
-; X86-NEXT: retl
-;
-; X64-LABEL: test_mm512_mask_fnmadd_pd_unary_fneg:
-; X64: # %bb.0: # %entry
-; X64-NEXT: kmovw %edi, %k1
-; X64-NEXT: vfnmadd132pd {{.*#+}} zmm0 = -(zmm0 * zmm1) + zmm2
-; X64-NEXT: retq
-entry:
- %neg.i = fneg <8 x double> %__A
- %0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %neg.i, <8 x double> %__B, <8 x double> %__C) #10
- %1 = bitcast i8 %__U to <8 x i1>
- %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> %__A
- ret <8 x double> %2
-}
-
define <16 x float> @test_mm512_mask_fnmadd_round_ps(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) {
; X86-LABEL: test_mm512_mask_fnmadd_round_ps:
; X86: # %bb.0: # %entry
ret <16 x float> %2
}
-define <16 x float> @test_mm512_mask_fnmadd_round_ps_unary_fneg(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) {
-; X86-LABEL: test_mm512_mask_fnmadd_round_ps_unary_fneg:
-; X86: # %bb.0: # %entry
-; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: kmovw %eax, %k1
-; X86-NEXT: vfnmadd132ps {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1}
-; X86-NEXT: retl
-;
-; X64-LABEL: test_mm512_mask_fnmadd_round_ps_unary_fneg:
-; X64: # %bb.0: # %entry
-; X64-NEXT: kmovw %edi, %k1
-; X64-NEXT: vfnmadd132ps {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1}
-; X64-NEXT: retq
-entry:
- %neg = fneg <16 x float> %__A
- %0 = tail call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %neg, <16 x float> %__B, <16 x float> %__C, i32 8)
- %1 = bitcast i16 %__U to <16 x i1>
- %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> %__A
- ret <16 x float> %2
-}
-
define <16 x float> @test_mm512_mask_fnmadd_ps(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) {
; X86-LABEL: test_mm512_mask_fnmadd_ps:
; X86: # %bb.0: # %entry
ret <16 x float> %2
}
-define <16 x float> @test_mm512_mask_fnmadd_ps_unary_fneg(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) {
-; X86-LABEL: test_mm512_mask_fnmadd_ps_unary_fneg:
-; X86: # %bb.0: # %entry
-; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: kmovw %eax, %k1
-; X86-NEXT: vfnmadd132ps {{.*#+}} zmm0 = -(zmm0 * zmm1) + zmm2
-; X86-NEXT: retl
-;
-; X64-LABEL: test_mm512_mask_fnmadd_ps_unary_fneg:
-; X64: # %bb.0: # %entry
-; X64-NEXT: kmovw %edi, %k1
-; X64-NEXT: vfnmadd132ps {{.*#+}} zmm0 = -(zmm0 * zmm1) + zmm2
-; X64-NEXT: retq
-entry:
- %neg.i = fneg <16 x float> %__A
- %0 = tail call <16 x float> @llvm.fma.v16f32(<16 x float> %neg.i, <16 x float> %__B, <16 x float> %__C) #10
- %1 = bitcast i16 %__U to <16 x i1>
- %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> %__A
- ret <16 x float> %2
-}
-
define <8 x double> @test_mm512_mask_fnmsub_round_pd(<8 x double> %__A, i8 zeroext %__U, <8 x double> %__B, <8 x double> %__C) {
; X86-LABEL: test_mm512_mask_fnmsub_round_pd:
; X86: # %bb.0: # %entry
ret <8 x double> %2
}
-define <8 x double> @test_mm512_mask_fnmsub_round_pd_unary_fneg(<8 x double> %__A, i8 zeroext %__U, <8 x double> %__B, <8 x double> %__C) {
-; X86-LABEL: test_mm512_mask_fnmsub_round_pd_unary_fneg:
-; X86: # %bb.0: # %entry
-; X86-NEXT: movb {{[0-9]+}}(%esp), %al
-; X86-NEXT: kmovw %eax, %k1
-; X86-NEXT: vfnmsub132pd {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1}
-; X86-NEXT: retl
-;
-; X64-LABEL: test_mm512_mask_fnmsub_round_pd_unary_fneg:
-; X64: # %bb.0: # %entry
-; X64-NEXT: kmovw %edi, %k1
-; X64-NEXT: vfnmsub132pd {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1}
-; X64-NEXT: retq
-entry:
- %neg = fneg <8 x double> %__B
- %neg1 = fneg <8 x double> %__C
- %0 = tail call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %__A, <8 x double> %neg, <8 x double> %neg1, i32 8)
- %1 = bitcast i8 %__U to <8 x i1>
- %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> %__A
- ret <8 x double> %2
-}
-
define <8 x double> @test_mm512_mask3_fnmsub_round_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i8 zeroext %__U) {
; X86-LABEL: test_mm512_mask3_fnmsub_round_pd:
; X86: # %bb.0: # %entry
ret <8 x double> %2
}
-define <8 x double> @test_mm512_mask3_fnmsub_round_pd_unary_fneg(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i8 zeroext %__U) {
-; X86-LABEL: test_mm512_mask3_fnmsub_round_pd_unary_fneg:
-; X86: # %bb.0: # %entry
-; X86-NEXT: movb {{[0-9]+}}(%esp), %al
-; X86-NEXT: kmovw %eax, %k1
-; X86-NEXT: vfnmsub231pd {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
-; X86-NEXT: vmovapd %zmm2, %zmm0
-; X86-NEXT: retl
-;
-; X64-LABEL: test_mm512_mask3_fnmsub_round_pd_unary_fneg:
-; X64: # %bb.0: # %entry
-; X64-NEXT: kmovw %edi, %k1
-; X64-NEXT: vfnmsub231pd {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
-; X64-NEXT: vmovapd %zmm2, %zmm0
-; X64-NEXT: retq
-entry:
- %neg = fneg <8 x double> %__B
- %neg1 = fneg <8 x double> %__C
- %0 = tail call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %__A, <8 x double> %neg, <8 x double> %neg1, i32 8)
- %1 = bitcast i8 %__U to <8 x i1>
- %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> %__C
- ret <8 x double> %2
-}
-
define <8 x double> @test_mm512_mask_fnmsub_pd(<8 x double> %__A, i8 zeroext %__U, <8 x double> %__B, <8 x double> %__C) {
; X86-LABEL: test_mm512_mask_fnmsub_pd:
; X86: # %bb.0: # %entry
ret <8 x double> %2
}
-define <8 x double> @test_mm512_mask_fnmsub_pd_unary_fneg(<8 x double> %__A, i8 zeroext %__U, <8 x double> %__B, <8 x double> %__C) {
-; X86-LABEL: test_mm512_mask_fnmsub_pd_unary_fneg:
-; X86: # %bb.0: # %entry
-; X86-NEXT: movb {{[0-9]+}}(%esp), %al
-; X86-NEXT: kmovw %eax, %k1
-; X86-NEXT: vfnmsub132pd {{.*#+}} zmm0 = -(zmm0 * zmm1) - zmm2
-; X86-NEXT: retl
-;
-; X64-LABEL: test_mm512_mask_fnmsub_pd_unary_fneg:
-; X64: # %bb.0: # %entry
-; X64-NEXT: kmovw %edi, %k1
-; X64-NEXT: vfnmsub132pd {{.*#+}} zmm0 = -(zmm0 * zmm1) - zmm2
-; X64-NEXT: retq
-entry:
- %neg.i = fneg <8 x double> %__B
- %neg2.i = fneg <8 x double> %__C
- %0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %neg.i, <8 x double> %neg2.i) #10
- %1 = bitcast i8 %__U to <8 x i1>
- %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> %__A
- ret <8 x double> %2
-}
-
define <8 x double> @test_mm512_mask3_fnmsub_pd(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i8 zeroext %__U) {
; X86-LABEL: test_mm512_mask3_fnmsub_pd:
; X86: # %bb.0: # %entry
ret <8 x double> %2
}
-define <8 x double> @test_mm512_mask3_fnmsub_pd_unary_fneg(<8 x double> %__A, <8 x double> %__B, <8 x double> %__C, i8 zeroext %__U) {
-; X86-LABEL: test_mm512_mask3_fnmsub_pd_unary_fneg:
-; X86: # %bb.0: # %entry
-; X86-NEXT: movb {{[0-9]+}}(%esp), %al
-; X86-NEXT: kmovw %eax, %k1
-; X86-NEXT: vfnmsub231pd {{.*#+}} zmm2 = -(zmm0 * zmm1) - zmm2
-; X86-NEXT: vmovapd %zmm2, %zmm0
-; X86-NEXT: retl
-;
-; X64-LABEL: test_mm512_mask3_fnmsub_pd_unary_fneg:
-; X64: # %bb.0: # %entry
-; X64-NEXT: kmovw %edi, %k1
-; X64-NEXT: vfnmsub231pd {{.*#+}} zmm2 = -(zmm0 * zmm1) - zmm2
-; X64-NEXT: vmovapd %zmm2, %zmm0
-; X64-NEXT: retq
-entry:
- %neg.i = fneg <8 x double> %__B
- %neg2.i = fneg <8 x double> %__C
- %0 = tail call <8 x double> @llvm.fma.v8f64(<8 x double> %__A, <8 x double> %neg.i, <8 x double> %neg2.i) #10
- %1 = bitcast i8 %__U to <8 x i1>
- %2 = select <8 x i1> %1, <8 x double> %0, <8 x double> %__C
- ret <8 x double> %2
-}
-
define <16 x float> @test_mm512_mask_fnmsub_round_ps(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) {
; X86-LABEL: test_mm512_mask_fnmsub_round_ps:
; X86: # %bb.0: # %entry
ret <16 x float> %2
}
-define <16 x float> @test_mm512_mask3_fnmsub_round_ps_unary_fneg(<16 x float> %__A, <16 x float> %__B, <16 x float> %__C, i16 zeroext %__U) {
-; X86-LABEL: test_mm512_mask3_fnmsub_round_ps_unary_fneg:
-; X86: # %bb.0: # %entry
-; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: kmovw %eax, %k1
-; X86-NEXT: vfnmsub231ps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
-; X86-NEXT: vmovaps %zmm2, %zmm0
-; X86-NEXT: retl
-;
-; X64-LABEL: test_mm512_mask3_fnmsub_round_ps_unary_fneg:
-; X64: # %bb.0: # %entry
-; X64-NEXT: kmovw %edi, %k1
-; X64-NEXT: vfnmsub231ps {rn-sae}, %zmm1, %zmm0, %zmm2 {%k1}
-; X64-NEXT: vmovaps %zmm2, %zmm0
-; X64-NEXT: retq
-entry:
- %neg = fneg <16 x float> %__B
- %neg1 = fneg <16 x float> %__C
- %0 = tail call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %__A, <16 x float> %neg, <16 x float> %neg1, i32 8)
- %1 = bitcast i16 %__U to <16 x i1>
- %2 = select <16 x i1> %1, <16 x float> %0, <16 x float> %__C
- ret <16 x float> %2
-}
-
define <16 x float> @test_mm512_mask_fnmsub_ps(<16 x float> %__A, i16 zeroext %__U, <16 x float> %__B, <16 x float> %__C) {
; X86-LABEL: test_mm512_mask_fnmsub_ps:
; X86: # %bb.0: # %entry