let Predicates = [HasAVX512] in {
let AddedComplexity = 15 in {
- // Move scalar to XMM zero-extended, zeroing a VR128X then do a
- // MOVS{S,D} to the lower bits.
- def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32X:$src)))),
- (VMOVSSZrr (v4f32 (AVX512_128_SET0)), FR32X:$src)>;
def : Pat<(v4f32 (X86vzmovl (v4f32 VR128X:$src))),
(VMOVSSZrr (v4f32 (AVX512_128_SET0)), (COPY_TO_REGCLASS VR128X:$src, FR32X))>;
def : Pat<(v4i32 (X86vzmovl (v4i32 VR128X:$src))),
let Predicates = [NoSSE41], AddedComplexity = 15 in {
// Move scalar to XMM zero-extended, zeroing a VR128 then do a
// MOVSS to the lower bits.
- def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),
- (MOVSSrr (v4f32 (V_SET0)), FR32:$src)>;
def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
(MOVSSrr (v4f32 (V_SET0)), (COPY_TO_REGCLASS VR128:$src, FR32))>;
def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
// movs[s/d] are 1-2 byte shorter instructions.
let Predicates = [UseAVX] in {
let AddedComplexity = 15 in {
- // Move scalar to XMM zero-extended, zeroing a VR128 then do a
- // MOVS{S,D} to the lower bits.
- def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),
- (VMOVSSrr (v4f32 (V_SET0)), FR32:$src)>;
def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
(VBLENDPSrri (v4f32 (V_SET0)), VR128:$src, (i8 1))>;
def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
; X32_AVX1-LABEL: test1:
; X32_AVX1: ## BB#0:
; X32_AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32_AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X32_AVX1-NEXT: vaddss LCPI0_0, %xmm0, %xmm0
; X32_AVX1-NEXT: vmulss LCPI0_1, %xmm0, %xmm0
+; X32_AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X32_AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; X32_AVX1-NEXT: vminss LCPI0_2, %xmm0, %xmm0
; X32_AVX1-NEXT: vmaxss %xmm1, %xmm0, %xmm0
;
; X64_AVX1-LABEL: test1:
; X64_AVX1: ## BB#0:
-; X64_AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64_AVX1-NEXT: vaddss {{.*}}(%rip), %xmm0, %xmm0
; X64_AVX1-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0
+; X64_AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64_AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; X64_AVX1-NEXT: vminss {{.*}}(%rip), %xmm0, %xmm0
; X64_AVX1-NEXT: vmaxss %xmm1, %xmm0, %xmm0
; X32_AVX512-LABEL: test1:
; X32_AVX512: ## BB#0:
; X32_AVX512-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X32_AVX512-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X32_AVX512-NEXT: vaddss LCPI0_0, %xmm0, %xmm0
; X32_AVX512-NEXT: vmulss LCPI0_1, %xmm0, %xmm0
+; X32_AVX512-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X32_AVX512-NEXT: vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; X32_AVX512-NEXT: vminss LCPI0_2, %xmm0, %xmm0
; X32_AVX512-NEXT: vmaxss %xmm1, %xmm0, %xmm0
;
; X64_AVX512-LABEL: test1:
; X64_AVX512: ## BB#0:
-; X64_AVX512-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64_AVX512-NEXT: vaddss {{.*}}(%rip), %xmm0, %xmm0
; X64_AVX512-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0
+; X64_AVX512-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X64_AVX512-NEXT: vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
; X64_AVX512-NEXT: vminss {{.*}}(%rip), %xmm0, %xmm0
; X64_AVX512-NEXT: vmaxss %xmm1, %xmm0, %xmm0