From: Craig Topper Date: Sun, 3 Sep 2017 17:52:19 +0000 (+0000) Subject: [X86] Canonicalize (concat_vectors X, zero) -> (insert_subvector zero, X, 0). X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=05f56c0b3fe89658b1f563cb5cfc0405582474d3;p=llvm [X86] Canonicalize (concat_vectors X, zero) -> (insert_subvector zero, X, 0). In a future patch, I plan to teach isel to use a small vector move with implicit zeroing of the upper elements when it sees the (insert_subvector zero, X, 0) pattern. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@312448 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index e0c17a2c27f..0193f2bfd58 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -35728,6 +35728,16 @@ static SDValue combineInsertSubvector(SDNode *N, SelectionDAG &DAG, return DAG.getNode(X86ISD::SUBV_BROADCAST, dl, OpVT, SubVec.getOperand(0)); } + + // If we're inserting all zeros into the upper half, change this to + // an insert into an all zeros vector. We will match this to a move + // with implicit upper bit zeroing during isel. + if (ISD::isBuildVectorAllZeros(SubVec.getNode())) { + return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT, + getZeroVector(OpVT, Subtarget, DAG, dl), SubVec2, + Vec.getOperand(2)); + + } } } diff --git a/test/CodeGen/X86/avx-intrinsics-fast-isel.ll b/test/CodeGen/X86/avx-intrinsics-fast-isel.ll index 70d746311ae..082061c099c 100644 --- a/test/CodeGen/X86/avx-intrinsics-fast-isel.ll +++ b/test/CodeGen/X86/avx-intrinsics-fast-isel.ll @@ -3778,15 +3778,15 @@ define <4 x double> @test_mm256_zextpd128_pd256(<2 x double> %a0) nounwind { ; X32-LABEL: test_mm256_zextpd128_pd256: ; X32: # BB#0: ; X32-NEXT: # kill: %XMM0 %XMM0 %YMM0 -; X32-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; X32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; X32-NEXT: vxorpd %xmm1, %xmm1, %xmm1 +; X32-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3] ; X32-NEXT: retl ; ; X64-LABEL: test_mm256_zextpd128_pd256: ; X64: # BB#0: ; X64-NEXT: # kill: %XMM0 %XMM0 %YMM0 -; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; X64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; X64-NEXT: vxorpd %xmm1, %xmm1, %xmm1 +; X64-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3] ; X64-NEXT: retq %res = shufflevector <2 x double> %a0, <2 x double> zeroinitializer, <4 x i32> ret <4 x double> %res @@ -3797,14 +3797,14 @@ define <8 x float> @test_mm256_zextps128_ps256(<4 x float> %a0) nounwind { ; X32: # BB#0: ; X32-NEXT: # kill: %XMM0 %XMM0 %YMM0 ; X32-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; X32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; X32-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7] ; X32-NEXT: retl ; ; X64-LABEL: test_mm256_zextps128_ps256: ; X64: # BB#0: ; X64-NEXT: # kill: %XMM0 %XMM0 %YMM0 ; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; X64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; X64-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7] ; X64-NEXT: retq %res = shufflevector <4 x float> %a0, <4 x float> zeroinitializer, <8 x i32> ret <8 x float> %res @@ -3815,14 +3815,14 @@ define <4 x i64> @test_mm256_zextsi128_si256(<2 x i64> %a0) nounwind { ; X32: # BB#0: ; X32-NEXT: # kill: %XMM0 %XMM0 %YMM0 ; X32-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; X32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; X32-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7] ; X32-NEXT: retl ; ; X64-LABEL: test_mm256_zextsi128_si256: ; X64: # BB#0: ; X64-NEXT: # kill: %XMM0 %XMM0 %YMM0 ; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; X64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; X64-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7] ; X64-NEXT: retq %res = shufflevector <2 x i64> %a0, <2 x i64> zeroinitializer, <4 x i32> ret <4 x i64> %res diff --git a/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll b/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll index fd6028f6b51..62bc489847b 100644 --- a/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll +++ b/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll @@ -1134,19 +1134,21 @@ define <8 x double> @test_mm512_zextpd128_pd512(<2 x double> %a0) nounwind { ; X32-LABEL: test_mm512_zextpd128_pd512: ; X32: # BB#0: ; X32-NEXT: # kill: %XMM0 %XMM0 %YMM0 -; X32-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; X32-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm2 -; X32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; X32-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0 +; X32-NEXT: vxorpd %xmm1, %xmm1, %xmm1 +; X32-NEXT: vxorpd %xmm2, %xmm2, %xmm2 +; X32-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3] +; X32-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm2[2,3] +; X32-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0 ; X32-NEXT: retl ; ; X64-LABEL: test_mm512_zextpd128_pd512: ; X64: # BB#0: ; X64-NEXT: # kill: %XMM0 %XMM0 %YMM0 -; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; X64-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm2 -; X64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; X64-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0 +; X64-NEXT: vxorpd %xmm1, %xmm1, %xmm1 +; X64-NEXT: vxorpd %xmm2, %xmm2, %xmm2 +; X64-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3] +; X64-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm2[2,3] +; X64-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0 ; X64-NEXT: retq %res = shufflevector <2 x double> %a0, <2 x double> zeroinitializer, <8 x i32> ret <8 x double> %res @@ -1155,16 +1157,14 @@ define <8 x double> @test_mm512_zextpd128_pd512(<2 x double> %a0) nounwind { define <8 x double> @test_mm512_zextpd256_pd512(<4 x double> %a0) nounwind { ; X32-LABEL: test_mm512_zextpd256_pd512: ; X32: # BB#0: -; X32-NEXT: # kill: %YMM0 %YMM0 %ZMM0 ; X32-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; X32-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0 +; X32-NEXT: vinsertf64x4 $0, %ymm0, %zmm1, %zmm0 ; X32-NEXT: retl ; ; X64-LABEL: test_mm512_zextpd256_pd512: ; X64: # BB#0: -; X64-NEXT: # kill: %YMM0 %YMM0 %ZMM0 ; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; X64-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0 +; X64-NEXT: vinsertf64x4 $0, %ymm0, %zmm1, %zmm0 ; X64-NEXT: retq %res = shufflevector <4 x double> %a0, <4 x double> zeroinitializer, <8 x i32> ret <8 x double> %res @@ -1174,19 +1174,21 @@ define <16 x float> @test_mm512_zextps128_ps512(<4 x float> %a0) nounwind { ; X32-LABEL: test_mm512_zextps128_ps512: ; X32: # BB#0: ; X32-NEXT: # kill: %XMM0 %XMM0 %YMM0 -; X32-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; X32-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm2 -; X32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; X32-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0 +; X32-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; X32-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; X32-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7] +; X32-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7] +; X32-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0 ; X32-NEXT: retl ; ; X64-LABEL: test_mm512_zextps128_ps512: ; X64: # BB#0: ; X64-NEXT: # kill: %XMM0 %XMM0 %YMM0 -; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; X64-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm2 -; X64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; X64-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0 +; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; X64-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; X64-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7] +; X64-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7] +; X64-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0 ; X64-NEXT: retq %res = shufflevector <4 x float> %a0, <4 x float> zeroinitializer, <16 x i32> ret <16 x float> %res @@ -1195,16 +1197,14 @@ define <16 x float> @test_mm512_zextps128_ps512(<4 x float> %a0) nounwind { define <16 x float> @test_mm512_zextps256_ps512(<8 x float> %a0) nounwind { ; X32-LABEL: test_mm512_zextps256_ps512: ; X32: # BB#0: -; X32-NEXT: # kill: %YMM0 %YMM0 %ZMM0 ; X32-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; X32-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0 +; X32-NEXT: vinsertf64x4 $0, %ymm0, %zmm1, %zmm0 ; X32-NEXT: retl ; ; X64-LABEL: test_mm512_zextps256_ps512: ; X64: # BB#0: -; X64-NEXT: # kill: %YMM0 %YMM0 %ZMM0 ; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; X64-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0 +; X64-NEXT: vinsertf64x4 $0, %ymm0, %zmm1, %zmm0 ; X64-NEXT: retq %res = shufflevector <8 x float> %a0, <8 x float> zeroinitializer, <16 x i32> ret <16 x float> %res @@ -1214,19 +1214,21 @@ define <8 x i64> @test_mm512_zextsi128_si512(<2 x i64> %a0) nounwind { ; X32-LABEL: test_mm512_zextsi128_si512: ; X32: # BB#0: ; X32-NEXT: # kill: %XMM0 %XMM0 %YMM0 -; X32-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; X32-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm2 -; X32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; X32-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0 +; X32-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; X32-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; X32-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7] +; X32-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7] +; X32-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 ; X32-NEXT: retl ; ; X64-LABEL: test_mm512_zextsi128_si512: ; X64: # BB#0: ; X64-NEXT: # kill: %XMM0 %XMM0 %YMM0 -; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; X64-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm2 -; X64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; X64-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0 +; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; X64-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; X64-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7] +; X64-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7] +; X64-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 ; X64-NEXT: retq %res = shufflevector <2 x i64> %a0, <2 x i64> zeroinitializer, <8 x i32> ret <8 x i64> %res @@ -1235,16 +1237,14 @@ define <8 x i64> @test_mm512_zextsi128_si512(<2 x i64> %a0) nounwind { define <8 x i64> @test_mm512_zextsi256_si512(<4 x i64> %a0) nounwind { ; X32-LABEL: test_mm512_zextsi256_si512: ; X32: # BB#0: -; X32-NEXT: # kill: %YMM0 %YMM0 %ZMM0 ; X32-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; X32-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0 +; X32-NEXT: vinsertf64x4 $0, %ymm0, %zmm1, %zmm0 ; X32-NEXT: retl ; ; X64-LABEL: test_mm512_zextsi256_si512: ; X64: # BB#0: -; X64-NEXT: # kill: %YMM0 %YMM0 %ZMM0 ; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; X64-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0 +; X64-NEXT: vinsertf64x4 $0, %ymm0, %zmm1, %zmm0 ; X64-NEXT: retq %res = shufflevector <4 x i64> %a0, <4 x i64> zeroinitializer, <8 x i32> ret <8 x i64> %res diff --git a/test/CodeGen/X86/madd.ll b/test/CodeGen/X86/madd.ll index 3fd4896f6a3..0523fa78699 100644 --- a/test/CodeGen/X86/madd.ll +++ b/test/CodeGen/X86/madd.ll @@ -41,14 +41,14 @@ define i32 @_Z10test_shortPsS_i(i16* nocapture readonly, i16* nocapture readonly ; AVX2-NEXT: # =>This Inner Loop Header: Depth=1 ; AVX2-NEXT: vmovdqu (%rsi,%rcx,2), %xmm2 ; AVX2-NEXT: vpmaddwd (%rdi,%rcx,2), %xmm2, %xmm2 -; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm2 -; AVX2-NEXT: vpaddd %ymm0, %ymm2, %ymm0 +; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm0[4,5,6,7] +; AVX2-NEXT: vpaddd %ymm1, %ymm2, %ymm1 ; AVX2-NEXT: addq $8, %rcx ; AVX2-NEXT: cmpq %rcx, %rax ; AVX2-NEXT: jne .LBB0_1 ; AVX2-NEXT: # BB#2: # %middle.block -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm0 +; AVX2-NEXT: vpaddd %ymm0, %ymm1, %ymm0 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] ; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vphaddd %ymm0, %ymm0, %ymm0 @@ -67,14 +67,14 @@ define i32 @_Z10test_shortPsS_i(i16* nocapture readonly, i16* nocapture readonly ; AVX512-NEXT: # =>This Inner Loop Header: Depth=1 ; AVX512-NEXT: vmovdqu (%rsi,%rcx,2), %xmm2 ; AVX512-NEXT: vpmaddwd (%rdi,%rcx,2), %xmm2, %xmm2 -; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm2 -; AVX512-NEXT: vpaddd %ymm0, %ymm2, %ymm0 +; AVX512-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm0[4,5,6,7] +; AVX512-NEXT: vpaddd %ymm1, %ymm2, %ymm1 ; AVX512-NEXT: addq $8, %rcx ; AVX512-NEXT: cmpq %rcx, %rax ; AVX512-NEXT: jne .LBB0_1 ; AVX512-NEXT: # BB#2: # %middle.block -; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512-NEXT: vpaddd %ymm1, %ymm0, %ymm0 +; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm0 +; AVX512-NEXT: vpaddd %ymm0, %ymm1, %ymm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] ; AVX512-NEXT: vpaddd %ymm1, %ymm0, %ymm0 ; AVX512-NEXT: vphaddd %ymm0, %ymm0, %ymm0 @@ -317,14 +317,14 @@ define i32 @_Z9test_charPcS_i(i8* nocapture readonly, i8* nocapture readonly, i3 ; AVX512-NEXT: vpmovsxbw (%rdi,%rcx), %ymm2 ; AVX512-NEXT: vpmovsxbw (%rsi,%rcx), %ymm3 ; AVX512-NEXT: vpmaddwd %ymm2, %ymm3, %ymm2 -; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm2 -; AVX512-NEXT: vpaddd %zmm0, %zmm2, %zmm0 +; AVX512-NEXT: vinserti64x4 $0, %ymm2, %zmm0, %zmm2 +; AVX512-NEXT: vpaddd %zmm1, %zmm2, %zmm1 ; AVX512-NEXT: addq $16, %rcx ; AVX512-NEXT: cmpq %rcx, %rax ; AVX512-NEXT: jne .LBB2_1 ; AVX512-NEXT: # BB#2: # %middle.block -; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1 -; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vextracti64x4 $1, %zmm1, %ymm0 +; AVX512-NEXT: vpaddd %zmm0, %zmm1, %zmm0 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] diff --git a/test/CodeGen/X86/merge-consecutive-loads-256.ll b/test/CodeGen/X86/merge-consecutive-loads-256.ll index ba2159def81..542886dc3f4 100644 --- a/test/CodeGen/X86/merge-consecutive-loads-256.ll +++ b/test/CodeGen/X86/merge-consecutive-loads-256.ll @@ -28,17 +28,15 @@ define <4 x double> @merge_4f64_2f64_23(<2 x double>* %ptr) nounwind uwtable noi define <4 x double> @merge_4f64_2f64_2z(<2 x double>* %ptr) nounwind uwtable noinline ssp { ; AVX-LABEL: merge_4f64_2f64_2z: ; AVX: # BB#0: -; AVX-NEXT: vmovaps 32(%rdi), %xmm0 -; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0 +; AVX-NEXT: vinsertf128 $0, 32(%rdi), %ymm0, %ymm0 ; AVX-NEXT: retq ; ; X32-AVX-LABEL: merge_4f64_2f64_2z: ; X32-AVX: # BB#0: ; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-AVX-NEXT: vmovaps 32(%eax), %xmm0 -; X32-AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; X32-AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; X32-AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0 +; X32-AVX-NEXT: vinsertf128 $0, 32(%eax), %ymm0, %ymm0 ; X32-AVX-NEXT: retl %ptr0 = getelementptr inbounds <2 x double>, <2 x double>* %ptr, i64 2 %val0 = load <2 x double>, <2 x double>* %ptr0 @@ -113,17 +111,15 @@ define <4 x double> @merge_4f64_f64_34uu(double* %ptr) nounwind uwtable noinline define <4 x double> @merge_4f64_f64_45zz(double* %ptr) nounwind uwtable noinline ssp { ; AVX-LABEL: merge_4f64_f64_45zz: ; AVX: # BB#0: -; AVX-NEXT: vmovups 32(%rdi), %xmm0 -; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0 +; AVX-NEXT: vinsertf128 $0, 32(%rdi), %ymm0, %ymm0 ; AVX-NEXT: retq ; ; X32-AVX-LABEL: merge_4f64_f64_45zz: ; X32-AVX: # BB#0: ; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-AVX-NEXT: vmovups 32(%eax), %xmm0 -; X32-AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; X32-AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; X32-AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0 +; X32-AVX-NEXT: vinsertf128 $0, 32(%eax), %ymm0, %ymm0 ; X32-AVX-NEXT: retl %ptr0 = getelementptr inbounds double, double* %ptr, i64 4 %ptr1 = getelementptr inbounds double, double* %ptr, i64 5 @@ -163,17 +159,15 @@ define <4 x double> @merge_4f64_f64_34z6(double* %ptr) nounwind uwtable noinline define <4 x i64> @merge_4i64_2i64_3z(<2 x i64>* %ptr) nounwind uwtable noinline ssp { ; AVX-LABEL: merge_4i64_2i64_3z: ; AVX: # BB#0: -; AVX-NEXT: vmovaps 48(%rdi), %xmm0 -; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0 +; AVX-NEXT: vinsertf128 $0, 48(%rdi), %ymm0, %ymm0 ; AVX-NEXT: retq ; ; X32-AVX-LABEL: merge_4i64_2i64_3z: ; X32-AVX: # BB#0: ; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-AVX-NEXT: vmovaps 48(%eax), %xmm0 -; X32-AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; X32-AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; X32-AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0 +; X32-AVX-NEXT: vinsertf128 $0, 48(%eax), %ymm0, %ymm0 ; X32-AVX-NEXT: retl %ptr0 = getelementptr inbounds <2 x i64>, <2 x i64>* %ptr, i64 3 %val0 = load <2 x i64>, <2 x i64>* %ptr0 @@ -229,17 +223,15 @@ define <4 x i64> @merge_4i64_i64_1zzu(i64* %ptr) nounwind uwtable noinline ssp { define <4 x i64> @merge_4i64_i64_23zz(i64* %ptr) nounwind uwtable noinline ssp { ; AVX-LABEL: merge_4i64_i64_23zz: ; AVX: # BB#0: -; AVX-NEXT: vmovups 16(%rdi), %xmm0 -; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0 +; AVX-NEXT: vinsertf128 $0, 16(%rdi), %ymm0, %ymm0 ; AVX-NEXT: retq ; ; X32-AVX-LABEL: merge_4i64_i64_23zz: ; X32-AVX: # BB#0: ; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-AVX-NEXT: vmovups 16(%eax), %xmm0 -; X32-AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; X32-AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; X32-AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0 +; X32-AVX-NEXT: vinsertf128 $0, 16(%eax), %ymm0, %ymm0 ; X32-AVX-NEXT: retl %ptr0 = getelementptr inbounds i64, i64* %ptr, i64 2 %ptr1 = getelementptr inbounds i64, i64* %ptr, i64 3 @@ -621,37 +613,21 @@ define <32 x i8> @merge_32i8_i8_23u5uuuuuuuuuuzzzzuuuuuuuuuuuuuu(i8* %ptr) nounw ; define <4 x double> @merge_4f64_f64_34uz_volatile(double* %ptr) nounwind uwtable noinline ssp { -; AVX1-LABEL: merge_4f64_f64_34uz_volatile: -; AVX1: # BB#0: -; AVX1-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero -; AVX1-NEXT: vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0] -; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; AVX1-NEXT: retq -; -; AVX2-LABEL: merge_4f64_f64_34uz_volatile: -; AVX2: # BB#0: -; AVX2-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero -; AVX2-NEXT: vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0] -; AVX2-NEXT: vxorpd %xmm1, %xmm1, %xmm1 -; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; AVX2-NEXT: retq -; -; AVX512F-LABEL: merge_4f64_f64_34uz_volatile: -; AVX512F: # BB#0: -; AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero -; AVX512F-NEXT: vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0] -; AVX512F-NEXT: vxorpd %xmm1, %xmm1, %xmm1 -; AVX512F-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; AVX512F-NEXT: retq +; AVX-LABEL: merge_4f64_f64_34uz_volatile: +; AVX: # BB#0: +; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; AVX-NEXT: vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0] +; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1 +; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3] +; AVX-NEXT: retq ; ; X32-AVX-LABEL: merge_4f64_f64_34uz_volatile: ; X32-AVX: # BB#0: ; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; X32-AVX-NEXT: vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0] -; X32-AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; X32-AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; X32-AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1 +; X32-AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3] ; X32-AVX-NEXT: retl %ptr0 = getelementptr inbounds double, double* %ptr, i64 3 %ptr1 = getelementptr inbounds double, double* %ptr, i64 4 diff --git a/test/CodeGen/X86/merge-consecutive-loads-512.ll b/test/CodeGen/X86/merge-consecutive-loads-512.ll index 3e3b9b88315..e95df0dcbd9 100644 --- a/test/CodeGen/X86/merge-consecutive-loads-512.ll +++ b/test/CodeGen/X86/merge-consecutive-loads-512.ll @@ -106,21 +106,19 @@ define <8 x double> @merge_8f64_f64_23uuuuu9(double* %ptr) nounwind uwtable noin define <8 x double> @merge_8f64_f64_12zzuuzz(double* %ptr) nounwind uwtable noinline ssp { ; ALL-LABEL: merge_8f64_f64_12zzuuzz: ; ALL: # BB#0: -; ALL-NEXT: vmovups 8(%rdi), %xmm0 -; ALL-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; ALL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; ALL-NEXT: vxorps %xmm0, %xmm0, %xmm0 +; ALL-NEXT: vinsertf128 $0, 8(%rdi), %ymm0, %ymm0 ; ALL-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; ALL-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0 +; ALL-NEXT: vinsertf64x4 $0, %ymm0, %zmm1, %zmm0 ; ALL-NEXT: retq ; ; X32-AVX512F-LABEL: merge_8f64_f64_12zzuuzz: ; X32-AVX512F: # BB#0: ; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-AVX512F-NEXT: vmovups 8(%eax), %xmm0 -; X32-AVX512F-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; X32-AVX512F-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; X32-AVX512F-NEXT: vxorps %xmm0, %xmm0, %xmm0 +; X32-AVX512F-NEXT: vinsertf128 $0, 8(%eax), %ymm0, %ymm0 ; X32-AVX512F-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; X32-AVX512F-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0 +; X32-AVX512F-NEXT: vinsertf64x4 $0, %ymm0, %zmm1, %zmm0 ; X32-AVX512F-NEXT: retl %ptr0 = getelementptr inbounds double, double* %ptr, i64 1 %ptr1 = getelementptr inbounds double, double* %ptr, i64 2 @@ -198,9 +196,8 @@ define <8 x i64> @merge_8i64_4i64_z3(<4 x i64>* %ptr) nounwind uwtable noinline define <8 x i64> @merge_8i64_i64_56zz9uzz(i64* %ptr) nounwind uwtable noinline ssp { ; ALL-LABEL: merge_8i64_i64_56zz9uzz: ; ALL: # BB#0: -; ALL-NEXT: vmovups 40(%rdi), %xmm0 -; ALL-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; ALL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; ALL-NEXT: vxorps %xmm0, %xmm0, %xmm0 +; ALL-NEXT: vinsertf128 $0, 40(%rdi), %ymm0, %ymm0 ; ALL-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero ; ALL-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0 ; ALL-NEXT: retq @@ -208,9 +205,8 @@ define <8 x i64> @merge_8i64_i64_56zz9uzz(i64* %ptr) nounwind uwtable noinline s ; X32-AVX512F-LABEL: merge_8i64_i64_56zz9uzz: ; X32-AVX512F: # BB#0: ; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-AVX512F-NEXT: vmovups 40(%eax), %xmm0 -; X32-AVX512F-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; X32-AVX512F-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; X32-AVX512F-NEXT: vxorps %xmm0, %xmm0, %xmm0 +; X32-AVX512F-NEXT: vinsertf128 $0, 40(%eax), %ymm0, %ymm0 ; X32-AVX512F-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero ; X32-AVX512F-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0 ; X32-AVX512F-NEXT: retl diff --git a/test/CodeGen/X86/vector-shuffle-variable-256.ll b/test/CodeGen/X86/vector-shuffle-variable-256.ll index 4ca878ef797..ba63fa27ca4 100644 --- a/test/CodeGen/X86/vector-shuffle-variable-256.ll +++ b/test/CodeGen/X86/vector-shuffle-variable-256.ll @@ -159,7 +159,7 @@ define <4 x i64> @var_shuffle_v4i64_v4i64_xx00_i64(<4 x i64> %x, i64 %i0, i64 %i ; AVX1-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero ; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] ; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7] ; AVX1-NEXT: movq %rbp, %rsp ; AVX1-NEXT: popq %rbp ; AVX1-NEXT: retq @@ -177,7 +177,7 @@ define <4 x i64> @var_shuffle_v4i64_v4i64_xx00_i64(<4 x i64> %x, i64 %i0, i64 %i ; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7] ; AVX2-NEXT: movq %rbp, %rsp ; AVX2-NEXT: popq %rbp ; AVX2-NEXT: retq