From: Simon Pilgrim Date: Sat, 29 Apr 2017 17:15:12 +0000 (+0000) Subject: [X86][AVX] Added codegen tests for _mm256_zext* helper intrinsics (PR32839) X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=6c030ac85c2aeb636b57269202968c93ac1a5304;p=llvm [X86][AVX] Added codegen tests for _mm256_zext* helper intrinsics (PR32839) Not great codegen, especially as VEX moves support implicit zeroing of upper bits.... git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@301748 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/test/CodeGen/X86/avx-intrinsics-fast-isel.ll b/test/CodeGen/X86/avx-intrinsics-fast-isel.ll index 4a86fa22f08..1d925ff8e9b 100644 --- a/test/CodeGen/X86/avx-intrinsics-fast-isel.ll +++ b/test/CodeGen/X86/avx-intrinsics-fast-isel.ll @@ -3774,4 +3774,58 @@ define void @test_mm256_zeroupper() nounwind { } declare void @llvm.x86.avx.vzeroupper() nounwind readnone +define <4 x double> @test_mm256_zextpd128_pd256(<2 x double> %a0) nounwind { +; X32-LABEL: test_mm256_zextpd128_pd256: +; X32: # BB#0: +; X32-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X32-NEXT: vxorps %xmm1, %xmm1, %xmm1 +; X32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; X32-NEXT: retl +; +; X64-LABEL: test_mm256_zextpd128_pd256: +; X64: # BB#0: +; X64-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1 +; X64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; X64-NEXT: retq + %res = shufflevector <2 x double> %a0, <2 x double> zeroinitializer, <4 x i32> + ret <4 x double> %res +} + +define <8 x float> @test_mm256_zextps128_ps256(<4 x float> %a0) nounwind { +; X32-LABEL: test_mm256_zextps128_ps256: +; X32: # BB#0: +; X32-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X32-NEXT: vxorps %xmm1, %xmm1, %xmm1 +; X32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; X32-NEXT: retl +; +; X64-LABEL: test_mm256_zextps128_ps256: +; X64: # BB#0: +; X64-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1 +; X64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; X64-NEXT: retq + %res = shufflevector <4 x float> %a0, <4 x float> zeroinitializer, <8 x i32> + ret <8 x float> %res +} + +define <4 x i64> @test_mm256_zextsi128_si256(<2 x i64> %a0) nounwind { +; X32-LABEL: test_mm256_zextsi128_si256: +; X32: # BB#0: +; X32-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X32-NEXT: vxorps %xmm1, %xmm1, %xmm1 +; X32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; X32-NEXT: retl +; +; X64-LABEL: test_mm256_zextsi128_si256: +; X64: # BB#0: +; X64-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1 +; X64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; X64-NEXT: retq + %res = shufflevector <2 x i64> %a0, <2 x i64> zeroinitializer, <4 x i32> + ret <4 x i64> %res +} + !0 = !{i32 1} diff --git a/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll b/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll index 2d4bf6ebb25..652f85d8833 100644 --- a/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll +++ b/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll @@ -1130,5 +1130,125 @@ define <16 x float> @test_mm512_maskz_unpacklo_ps(i16 %a0, <16 x float> %a1, <16 ret <16 x float> %res1 } +define <8 x double> @test_mm512_zextpd128_pd512(<2 x double> %a0) nounwind { +; X32-LABEL: test_mm512_zextpd128_pd512: +; X32: # BB#0: +; X32-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X32-NEXT: vxorpd %xmm1, %xmm1, %xmm1 +; X32-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm2 +; X32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; X32-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0 +; X32-NEXT: retl +; +; X64-LABEL: test_mm512_zextpd128_pd512: +; X64: # BB#0: +; X64-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X64-NEXT: vxorpd %xmm1, %xmm1, %xmm1 +; X64-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm2 +; X64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; X64-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0 +; X64-NEXT: retq + %res = shufflevector <2 x double> %a0, <2 x double> zeroinitializer, <8 x i32> + ret <8 x double> %res +} + +define <8 x double> @test_mm512_zextpd256_pd512(<4 x double> %a0) nounwind { +; X32-LABEL: test_mm512_zextpd256_pd512: +; X32: # BB#0: +; X32-NEXT: # kill: %YMM0 %YMM0 %ZMM0 +; X32-NEXT: vxorpd %ymm1, %ymm1, %ymm1 +; X32-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0 +; X32-NEXT: retl +; +; X64-LABEL: test_mm512_zextpd256_pd512: +; X64: # BB#0: +; X64-NEXT: # kill: %YMM0 %YMM0 %ZMM0 +; X64-NEXT: vxorpd %ymm1, %ymm1, %ymm1 +; X64-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0 +; X64-NEXT: retq + %res = shufflevector <4 x double> %a0, <4 x double> zeroinitializer, <8 x i32> + ret <8 x double> %res +} + +define <16 x float> @test_mm512_zextps128_ps512(<4 x float> %a0) nounwind { +; X32-LABEL: test_mm512_zextps128_ps512: +; X32: # BB#0: +; X32-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X32-NEXT: vxorpd %xmm1, %xmm1, %xmm1 +; X32-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm2 +; X32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; X32-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0 +; X32-NEXT: retl +; +; X64-LABEL: test_mm512_zextps128_ps512: +; X64: # BB#0: +; X64-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X64-NEXT: vxorpd %xmm1, %xmm1, %xmm1 +; X64-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm2 +; X64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; X64-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0 +; X64-NEXT: retq + %res = shufflevector <4 x float> %a0, <4 x float> zeroinitializer, <16 x i32> + ret <16 x float> %res +} + +define <16 x float> @test_mm512_zextps256_ps512(<8 x float> %a0) nounwind { +; X32-LABEL: test_mm512_zextps256_ps512: +; X32: # BB#0: +; X32-NEXT: # kill: %YMM0 %YMM0 %ZMM0 +; X32-NEXT: vxorpd %ymm1, %ymm1, %ymm1 +; X32-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0 +; X32-NEXT: retl +; +; X64-LABEL: test_mm512_zextps256_ps512: +; X64: # BB#0: +; X64-NEXT: # kill: %YMM0 %YMM0 %ZMM0 +; X64-NEXT: vxorpd %ymm1, %ymm1, %ymm1 +; X64-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0 +; X64-NEXT: retq + %res = shufflevector <8 x float> %a0, <8 x float> zeroinitializer, <16 x i32> + ret <16 x float> %res +} + +define <8 x i64> @test_mm512_zextsi128_si512(<2 x i64> %a0) nounwind { +; X32-LABEL: test_mm512_zextsi128_si512: +; X32: # BB#0: +; X32-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X32-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; X32-NEXT: vinserti128 $1, %xmm1, %ymm1, %ymm2 +; X32-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; X32-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0 +; X32-NEXT: retl +; +; X64-LABEL: test_mm512_zextsi128_si512: +; X64: # BB#0: +; X64-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; X64-NEXT: vinserti128 $1, %xmm1, %ymm1, %ymm2 +; X64-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 +; X64-NEXT: vinserti64x4 $1, %ymm2, %zmm0, %zmm0 +; X64-NEXT: retq + %res = shufflevector <2 x i64> %a0, <2 x i64> zeroinitializer, <8 x i32> + ret <8 x i64> %res +} + +define <8 x i64> @test_mm512_zextsi256_si512(<4 x i64> %a0) nounwind { +; X32-LABEL: test_mm512_zextsi256_si512: +; X32: # BB#0: +; X32-NEXT: # kill: %YMM0 %YMM0 %ZMM0 +; X32-NEXT: vpxor %ymm1, %ymm1, %ymm1 +; X32-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 +; X32-NEXT: retl +; +; X64-LABEL: test_mm512_zextsi256_si512: +; X64: # BB#0: +; X64-NEXT: # kill: %YMM0 %YMM0 %ZMM0 +; X64-NEXT: vpxor %ymm1, %ymm1, %ymm1 +; X64-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 +; X64-NEXT: retq + %res = shufflevector <4 x i64> %a0, <4 x i64> zeroinitializer, <8 x i32> + ret <8 x i64> %res +} + !0 = !{i32 1}