From: Simon Pilgrim Date: Mon, 26 Jun 2017 16:22:52 +0000 (+0000) Subject: [X86][SSE] Add combine tests for PMULDQ/PMULUDQ X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=575411ebf86ac8f8c1162626cc2115c5426026a8;p=llvm [X86][SSE] Add combine tests for PMULDQ/PMULUDQ Found several missed optimizations while investigating replacing _mm_mul_epi32/_mm_mul_epu32 with generic implementations git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@306302 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/test/CodeGen/X86/combine-pmuldq.ll b/test/CodeGen/X86/combine-pmuldq.ll new file mode 100644 index 00000000000..09a142aa831 --- /dev/null +++ b/test/CodeGen/X86/combine-pmuldq.ll @@ -0,0 +1,110 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX + +; TODO - shuffle+sext are superfluous +define <2 x i64> @combine_shuffle_sext_pmuldq(<4 x i32> %a0, <4 x i32> %a1) { +; SSE-LABEL: combine_shuffle_sext_pmuldq: +; SSE: # BB#0: +; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] +; SSE-NEXT: pmovsxdq %xmm0, %xmm2 +; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3] +; SSE-NEXT: pmovsxdq %xmm0, %xmm0 +; SSE-NEXT: pmuldq %xmm2, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: combine_shuffle_sext_pmuldq: +; AVX: # BB#0: +; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] +; AVX-NEXT: vpmovsxdq %xmm0, %xmm0 +; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] +; AVX-NEXT: vpmovsxdq %xmm1, %xmm1 +; AVX-NEXT: vpmuldq %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %1 = shufflevector <4 x i32> %a0, <4 x i32> undef, <2 x i32> + %2 = shufflevector <4 x i32> %a1, <4 x i32> undef, <2 x i32> + %3 = sext <2 x i32> %1 to <2 x i64> + %4 = sext <2 x i32> %2 to <2 x i64> + %5 = mul nuw <2 x i64> %3, %4 + ret <2 x i64> %5 +} + +; TODO - shuffle+zext are superfluous +define <2 x i64> @combine_shuffle_zext_pmuludq(<4 x i32> %a0, <4 x i32> %a1) { +; SSE-LABEL: combine_shuffle_zext_pmuludq: +; SSE: # BB#0: +; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] +; SSE-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero +; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3] +; SSE-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero +; SSE-NEXT: pmuludq %xmm2, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: combine_shuffle_zext_pmuludq: +; AVX: # BB#0: +; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] +; AVX-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero +; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] +; AVX-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero +; AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %1 = shufflevector <4 x i32> %a0, <4 x i32> undef, <2 x i32> + %2 = shufflevector <4 x i32> %a1, <4 x i32> undef, <2 x i32> + %3 = zext <2 x i32> %1 to <2 x i64> + %4 = zext <2 x i32> %2 to <2 x i64> + %5 = mul nuw <2 x i64> %3, %4 + ret <2 x i64> %5 +} + +; TODO - blends are superfluous +define <2 x i64> @combine_shuffle_zero_pmuludq(<4 x i32> %a0, <4 x i32> %a1) { +; SSE-LABEL: combine_shuffle_zero_pmuludq: +; SSE: # BB#0: +; SSE-NEXT: pxor %xmm2, %xmm2 +; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] +; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7] +; SSE-NEXT: pmuludq %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: combine_shuffle_zero_pmuludq: +; AVX: # BB#0: +; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3] +; AVX-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3] +; AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %1 = shufflevector <4 x i32> %a0, <4 x i32> zeroinitializer, <4 x i32> + %2 = shufflevector <4 x i32> %a1, <4 x i32> zeroinitializer, <4 x i32> + %3 = bitcast <4 x i32> %1 to <2 x i64> + %4 = bitcast <4 x i32> %2 to <2 x i64> + %5 = mul <2 x i64> %3, %4 + ret <2 x i64> %5 +} + +; TODO - blends are superfluous +define <4 x i64> @combine_shuffle_zero_pmuludq_256(<8 x i32> %a0, <8 x i32> %a1) { +; SSE-LABEL: combine_shuffle_zero_pmuludq_256: +; SSE: # BB#0: +; SSE-NEXT: pxor %xmm4, %xmm4 +; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm4[2,3],xmm1[4,5],xmm4[6,7] +; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,3],xmm0[4,5],xmm4[6,7] +; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2,3],xmm3[4,5],xmm4[6,7] +; SSE-NEXT: pmuludq %xmm3, %xmm1 +; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3],xmm2[4,5],xmm4[6,7] +; SSE-NEXT: pmuludq %xmm2, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: combine_shuffle_zero_pmuludq_256: +; AVX: # BB#0: +; AVX-NEXT: vpxor %ymm2, %ymm2, %ymm2 +; AVX-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7] +; AVX-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7] +; AVX-NEXT: vpmuludq %ymm1, %ymm0, %ymm0 +; AVX-NEXT: retq + %1 = shufflevector <8 x i32> %a0, <8 x i32> zeroinitializer, <8 x i32> + %2 = shufflevector <8 x i32> %a1, <8 x i32> zeroinitializer, <8 x i32> + %3 = bitcast <8 x i32> %1 to <4 x i64> + %4 = bitcast <8 x i32> %2 to <4 x i64> + %5 = mul <4 x i64> %3, %4 + ret <4 x i64> %5 +}