From: Simon Pilgrim Date: Sat, 10 Dec 2016 22:02:36 +0000 (+0000) Subject: [X86][SSE] Add tests for sign extended vXi64 multiplication X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=9b6294dff4db2597e06f742b855020f2f24bb767;p=llvm [X86][SSE] Add tests for sign extended vXi64 multiplication git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@289342 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/test/CodeGen/X86/pmul.ll b/test/CodeGen/X86/pmul.ll index 894cc5d8c87..bb8190d2bfe 100644 --- a/test/CodeGen/X86/pmul.ll +++ b/test/CodeGen/X86/pmul.ll @@ -1456,3 +1456,201 @@ entry: %res = shufflevector <16 x i32> %rescast, <16 x i32> undef, <8 x i32> ret <8 x i32> %res } + +define <8 x i64> @mul_v8i64_sext(<8 x i16> %val1, <8 x i32> %val2) { +; SSE2-LABEL: mul_v8i64_sext: +; SSE2: # BB#0: +; SSE2-NEXT: movdqa %xmm2, %xmm9 +; SSE2-NEXT: movdqa %xmm1, %xmm4 +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3] +; SSE2-NEXT: movdqa %xmm3, %xmm1 +; SSE2-NEXT: psrad $31, %xmm1 +; SSE2-NEXT: psrad $16, %xmm3 +; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1] +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3] +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: psrad $31, %xmm2 +; SSE2-NEXT: psrad $16, %xmm1 +; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] +; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7] +; SSE2-NEXT: movdqa %xmm2, %xmm6 +; SSE2-NEXT: psrad $31, %xmm6 +; SSE2-NEXT: psrad $16, %xmm2 +; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] +; SSE2-NEXT: movdqa %xmm0, %xmm6 +; SSE2-NEXT: psrad $31, %xmm6 +; SSE2-NEXT: psrad $16, %xmm0 +; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1] +; SSE2-NEXT: pshufd {{.*#+}} xmm8 = xmm9[2,3,0,1] +; SSE2-NEXT: movdqa %xmm8, %xmm7 +; SSE2-NEXT: psrad $31, %xmm7 +; SSE2-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm7[0],xmm8[1],xmm7[1] +; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm4[2,3,0,1] +; SSE2-NEXT: movdqa %xmm6, %xmm7 +; SSE2-NEXT: psrad $31, %xmm7 +; SSE2-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1] +; SSE2-NEXT: movdqa %xmm9, %xmm7 +; SSE2-NEXT: psrad $31, %xmm7 +; SSE2-NEXT: punpckldq {{.*#+}} xmm9 = xmm9[0],xmm7[0],xmm9[1],xmm7[1] +; SSE2-NEXT: movdqa %xmm4, %xmm7 +; SSE2-NEXT: psrad $31, %xmm7 +; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm7[0],xmm4[1],xmm7[1] +; SSE2-NEXT: movdqa %xmm0, %xmm7 +; SSE2-NEXT: pmuludq %xmm4, %xmm7 +; SSE2-NEXT: movdqa %xmm4, %xmm5 +; SSE2-NEXT: psrlq $32, %xmm5 +; SSE2-NEXT: pmuludq %xmm0, %xmm5 +; SSE2-NEXT: psllq $32, %xmm5 +; SSE2-NEXT: psrlq $32, %xmm0 +; SSE2-NEXT: pmuludq %xmm4, %xmm0 +; SSE2-NEXT: psllq $32, %xmm0 +; SSE2-NEXT: paddq %xmm5, %xmm0 +; SSE2-NEXT: paddq %xmm7, %xmm0 +; SSE2-NEXT: movdqa %xmm2, %xmm4 +; SSE2-NEXT: pmuludq %xmm9, %xmm4 +; SSE2-NEXT: movdqa %xmm9, %xmm5 +; SSE2-NEXT: psrlq $32, %xmm5 +; SSE2-NEXT: pmuludq %xmm2, %xmm5 +; SSE2-NEXT: psllq $32, %xmm5 +; SSE2-NEXT: psrlq $32, %xmm2 +; SSE2-NEXT: pmuludq %xmm9, %xmm2 +; SSE2-NEXT: psllq $32, %xmm2 +; SSE2-NEXT: paddq %xmm5, %xmm2 +; SSE2-NEXT: paddq %xmm4, %xmm2 +; SSE2-NEXT: movdqa %xmm1, %xmm4 +; SSE2-NEXT: pmuludq %xmm6, %xmm4 +; SSE2-NEXT: movdqa %xmm6, %xmm5 +; SSE2-NEXT: psrlq $32, %xmm5 +; SSE2-NEXT: pmuludq %xmm1, %xmm5 +; SSE2-NEXT: psllq $32, %xmm5 +; SSE2-NEXT: psrlq $32, %xmm1 +; SSE2-NEXT: pmuludq %xmm6, %xmm1 +; SSE2-NEXT: psllq $32, %xmm1 +; SSE2-NEXT: paddq %xmm5, %xmm1 +; SSE2-NEXT: paddq %xmm4, %xmm1 +; SSE2-NEXT: movdqa %xmm3, %xmm4 +; SSE2-NEXT: pmuludq %xmm8, %xmm4 +; SSE2-NEXT: movdqa %xmm8, %xmm5 +; SSE2-NEXT: psrlq $32, %xmm5 +; SSE2-NEXT: pmuludq %xmm3, %xmm5 +; SSE2-NEXT: psllq $32, %xmm5 +; SSE2-NEXT: psrlq $32, %xmm3 +; SSE2-NEXT: pmuludq %xmm8, %xmm3 +; SSE2-NEXT: psllq $32, %xmm3 +; SSE2-NEXT: paddq %xmm5, %xmm3 +; SSE2-NEXT: paddq %xmm4, %xmm3 +; SSE2-NEXT: retq +; +; SSE41-LABEL: mul_v8i64_sext: +; SSE41: # BB#0: +; SSE41-NEXT: movdqa %xmm2, %xmm4 +; SSE41-NEXT: movdqa %xmm1, %xmm5 +; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3] +; SSE41-NEXT: pmovsxwq %xmm1, %xmm3 +; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3] +; SSE41-NEXT: pmovsxwq %xmm1, %xmm1 +; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1] +; SSE41-NEXT: pmovsxwq %xmm2, %xmm2 +; SSE41-NEXT: pmovsxwq %xmm0, %xmm0 +; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm4[2,3,0,1] +; SSE41-NEXT: pmovsxdq %xmm6, %xmm8 +; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm5[2,3,0,1] +; SSE41-NEXT: pmovsxdq %xmm7, %xmm9 +; SSE41-NEXT: pmovsxdq %xmm4, %xmm4 +; SSE41-NEXT: pmovsxdq %xmm5, %xmm7 +; SSE41-NEXT: movdqa %xmm0, %xmm6 +; SSE41-NEXT: pmuludq %xmm7, %xmm6 +; SSE41-NEXT: movdqa %xmm7, %xmm5 +; SSE41-NEXT: psrlq $32, %xmm5 +; SSE41-NEXT: pmuludq %xmm0, %xmm5 +; SSE41-NEXT: psllq $32, %xmm5 +; SSE41-NEXT: psrlq $32, %xmm0 +; SSE41-NEXT: pmuludq %xmm7, %xmm0 +; SSE41-NEXT: psllq $32, %xmm0 +; SSE41-NEXT: paddq %xmm5, %xmm0 +; SSE41-NEXT: paddq %xmm6, %xmm0 +; SSE41-NEXT: movdqa %xmm2, %xmm5 +; SSE41-NEXT: pmuludq %xmm4, %xmm5 +; SSE41-NEXT: movdqa %xmm4, %xmm6 +; SSE41-NEXT: psrlq $32, %xmm6 +; SSE41-NEXT: pmuludq %xmm2, %xmm6 +; SSE41-NEXT: psllq $32, %xmm6 +; SSE41-NEXT: psrlq $32, %xmm2 +; SSE41-NEXT: pmuludq %xmm4, %xmm2 +; SSE41-NEXT: psllq $32, %xmm2 +; SSE41-NEXT: paddq %xmm6, %xmm2 +; SSE41-NEXT: paddq %xmm5, %xmm2 +; SSE41-NEXT: movdqa %xmm1, %xmm4 +; SSE41-NEXT: pmuludq %xmm9, %xmm4 +; SSE41-NEXT: movdqa %xmm9, %xmm5 +; SSE41-NEXT: psrlq $32, %xmm5 +; SSE41-NEXT: pmuludq %xmm1, %xmm5 +; SSE41-NEXT: psllq $32, %xmm5 +; SSE41-NEXT: psrlq $32, %xmm1 +; SSE41-NEXT: pmuludq %xmm9, %xmm1 +; SSE41-NEXT: psllq $32, %xmm1 +; SSE41-NEXT: paddq %xmm5, %xmm1 +; SSE41-NEXT: paddq %xmm4, %xmm1 +; SSE41-NEXT: movdqa %xmm3, %xmm4 +; SSE41-NEXT: pmuludq %xmm8, %xmm4 +; SSE41-NEXT: movdqa %xmm8, %xmm5 +; SSE41-NEXT: psrlq $32, %xmm5 +; SSE41-NEXT: pmuludq %xmm3, %xmm5 +; SSE41-NEXT: psllq $32, %xmm5 +; SSE41-NEXT: psrlq $32, %xmm3 +; SSE41-NEXT: pmuludq %xmm8, %xmm3 +; SSE41-NEXT: psllq $32, %xmm3 +; SSE41-NEXT: paddq %xmm5, %xmm3 +; SSE41-NEXT: paddq %xmm4, %xmm3 +; SSE41-NEXT: retq +; +; AVX2-LABEL: mul_v8i64_sext: +; AVX2: # BB#0: +; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,0,1] +; AVX2-NEXT: vpmovsxwq %xmm2, %ymm2 +; AVX2-NEXT: vpmovsxwq %xmm0, %ymm0 +; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3 +; AVX2-NEXT: vpmovsxdq %xmm3, %ymm3 +; AVX2-NEXT: vpmovsxdq %xmm1, %ymm1 +; AVX2-NEXT: vpmuludq %ymm1, %ymm0, %ymm4 +; AVX2-NEXT: vpsrlq $32, %ymm1, %ymm5 +; AVX2-NEXT: vpmuludq %ymm5, %ymm0, %ymm5 +; AVX2-NEXT: vpsllq $32, %ymm5, %ymm5 +; AVX2-NEXT: vpsrlq $32, %ymm0, %ymm0 +; AVX2-NEXT: vpmuludq %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vpsllq $32, %ymm0, %ymm0 +; AVX2-NEXT: vpaddq %ymm0, %ymm5, %ymm0 +; AVX2-NEXT: vpaddq %ymm0, %ymm4, %ymm0 +; AVX2-NEXT: vpmuludq %ymm3, %ymm2, %ymm1 +; AVX2-NEXT: vpsrlq $32, %ymm3, %ymm4 +; AVX2-NEXT: vpmuludq %ymm4, %ymm2, %ymm4 +; AVX2-NEXT: vpsllq $32, %ymm4, %ymm4 +; AVX2-NEXT: vpsrlq $32, %ymm2, %ymm2 +; AVX2-NEXT: vpmuludq %ymm3, %ymm2, %ymm2 +; AVX2-NEXT: vpsllq $32, %ymm2, %ymm2 +; AVX2-NEXT: vpaddq %ymm2, %ymm4, %ymm2 +; AVX2-NEXT: vpaddq %ymm2, %ymm1, %ymm1 +; AVX2-NEXT: retq +; +; AVX512-LABEL: mul_v8i64_sext: +; AVX512: # BB#0: +; AVX512-NEXT: vpmovsxwq %xmm0, %zmm0 +; AVX512-NEXT: vpmovsxdq %ymm1, %zmm1 +; AVX512-NEXT: vpmuludq %zmm1, %zmm0, %zmm2 +; AVX512-NEXT: vpsrlq $32, %zmm1, %zmm3 +; AVX512-NEXT: vpmuludq %zmm3, %zmm0, %zmm3 +; AVX512-NEXT: vpsllq $32, %zmm3, %zmm3 +; AVX512-NEXT: vpsrlq $32, %zmm0, %zmm0 +; AVX512-NEXT: vpmuludq %zmm1, %zmm0, %zmm0 +; AVX512-NEXT: vpsllq $32, %zmm0, %zmm0 +; AVX512-NEXT: vpaddq %zmm0, %zmm3, %zmm0 +; AVX512-NEXT: vpaddq %zmm0, %zmm2, %zmm0 +; AVX512-NEXT: retq + %1 = sext <8 x i16> %val1 to <8 x i64> + %2 = sext <8 x i32> %val2 to <8 x i64> + %3 = mul <8 x i64> %1, %2 + ret <8 x i64> %3 +}