From: Simon Pilgrim Date: Tue, 5 Mar 2019 15:36:45 +0000 (+0000) Subject: [X86] Add SMULO/UMULO combine tests X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=7bb5d42fe3c0749b94834e7e3546f6861550d066;p=llvm [X86] Add SMULO/UMULO combine tests Include scalar and vector test variants covering the folds in DAGCombiner (vector isn't currently supported - PR40442) git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@355407 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/test/CodeGen/X86/combine-mulo.ll b/test/CodeGen/X86/combine-mulo.ll new file mode 100644 index 00000000000..045e6595ed2 --- /dev/null +++ b/test/CodeGen/X86/combine-mulo.ll @@ -0,0 +1,136 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX + +declare {i32, i1} @llvm.smul.with.overflow.i32(i32, i32) nounwind readnone +declare {i32, i1} @llvm.umul.with.overflow.i32(i32, i32) nounwind readnone + +declare {<4 x i32>, <4 x i1>} @llvm.smul.with.overflow.v4i32(<4 x i32>, <4 x i32>) nounwind readnone +declare {<4 x i32>, <4 x i1>} @llvm.umul.with.overflow.v4i32(<4 x i32>, <4 x i32>) nounwind readnone + +; fold (smulo x, 2) -> (saddo x, x) +define i32 @combine_smul_two(i32 %a0, i32 %a1) { +; SSE-LABEL: combine_smul_two: +; SSE: # %bb.0: +; SSE-NEXT: movl %edi, %eax +; SSE-NEXT: addl %edi, %eax +; SSE-NEXT: cmovol %esi, %eax +; SSE-NEXT: retq +; +; AVX-LABEL: combine_smul_two: +; AVX: # %bb.0: +; AVX-NEXT: movl %edi, %eax +; AVX-NEXT: addl %edi, %eax +; AVX-NEXT: cmovol %esi, %eax +; AVX-NEXT: retq + %1 = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %a0, i32 2) + %2 = extractvalue {i32, i1} %1, 0 + %3 = extractvalue {i32, i1} %1, 1 + %4 = select i1 %3, i32 %a1, i32 %2 + ret i32 %4 +} + +define <4 x i32> @combine_vec_smul_two(<4 x i32> %a0, <4 x i32> %a1) { +; SSE-LABEL: combine_vec_smul_two: +; SSE: # %bb.0: +; SSE-NEXT: movdqa %xmm0, %xmm2 +; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] +; SSE-NEXT: movdqa {{.*#+}} xmm3 = [2,2,2,2] +; SSE-NEXT: pmuldq %xmm3, %xmm0 +; SSE-NEXT: pmuldq %xmm2, %xmm3 +; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] +; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm0[2,3],xmm3[4,5],xmm0[6,7] +; SSE-NEXT: paddd %xmm2, %xmm2 +; SSE-NEXT: movdqa %xmm2, %xmm0 +; SSE-NEXT: psrad $31, %xmm0 +; SSE-NEXT: pcmpeqd %xmm3, %xmm0 +; SSE-NEXT: pcmpeqd %xmm3, %xmm3 +; SSE-NEXT: pxor %xmm3, %xmm0 +; SSE-NEXT: blendvps %xmm0, %xmm1, %xmm2 +; SSE-NEXT: movaps %xmm2, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: combine_vec_smul_two: +; AVX: # %bb.0: +; AVX-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] +; AVX-NEXT: vpbroadcastd {{.*#+}} xmm3 = [2,2,2,2] +; AVX-NEXT: vpmuldq %xmm3, %xmm2, %xmm2 +; AVX-NEXT: vpmuldq %xmm3, %xmm0, %xmm3 +; AVX-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] +; AVX-NEXT: vpblendd {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2],xmm2[3] +; AVX-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; AVX-NEXT: vpsrad $31, %xmm0, %xmm3 +; AVX-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2 +; AVX-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3 +; AVX-NEXT: vpxor %xmm3, %xmm2, %xmm2 +; AVX-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %1 = call {<4 x i32>, <4 x i1>} @llvm.smul.with.overflow.v4i32(<4 x i32> %a0, <4 x i32> ) + %2 = extractvalue {<4 x i32>, <4 x i1>} %1, 0 + %3 = extractvalue {<4 x i32>, <4 x i1>} %1, 1 + %4 = select <4 x i1> %3, <4 x i32> %a1, <4 x i32> %2 + ret <4 x i32> %4 +} + +; fold (umulo x, 2) -> (uaddo x, x) +define i32 @combine_umul_two(i32 %a0, i32 %a1) { +; SSE-LABEL: combine_umul_two: +; SSE: # %bb.0: +; SSE-NEXT: movl %edi, %eax +; SSE-NEXT: addl %edi, %eax +; SSE-NEXT: cmovbl %esi, %eax +; SSE-NEXT: retq +; +; AVX-LABEL: combine_umul_two: +; AVX: # %bb.0: +; AVX-NEXT: movl %edi, %eax +; AVX-NEXT: addl %edi, %eax +; AVX-NEXT: cmovbl %esi, %eax +; AVX-NEXT: retq + %1 = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %a0, i32 2) + %2 = extractvalue {i32, i1} %1, 0 + %3 = extractvalue {i32, i1} %1, 1 + %4 = select i1 %3, i32 %a1, i32 %2 + ret i32 %4 +} + +define <4 x i32> @combine_vec_umul_two(<4 x i32> %a0, <4 x i32> %a1) { +; SSE-LABEL: combine_vec_umul_two: +; SSE: # %bb.0: +; SSE-NEXT: movdqa %xmm0, %xmm2 +; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3] +; SSE-NEXT: movdqa {{.*#+}} xmm3 = [2,2,2,2] +; SSE-NEXT: pmuludq %xmm3, %xmm0 +; SSE-NEXT: pmuludq %xmm2, %xmm3 +; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] +; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm0[2,3],xmm3[4,5],xmm0[6,7] +; SSE-NEXT: pxor %xmm4, %xmm4 +; SSE-NEXT: pcmpeqd %xmm3, %xmm4 +; SSE-NEXT: pcmpeqd %xmm0, %xmm0 +; SSE-NEXT: pxor %xmm4, %xmm0 +; SSE-NEXT: paddd %xmm2, %xmm2 +; SSE-NEXT: blendvps %xmm0, %xmm1, %xmm2 +; SSE-NEXT: movaps %xmm2, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: combine_vec_umul_two: +; AVX: # %bb.0: +; AVX-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] +; AVX-NEXT: vpbroadcastd {{.*#+}} xmm3 = [2,2,2,2] +; AVX-NEXT: vpmuludq %xmm3, %xmm2, %xmm2 +; AVX-NEXT: vpmuludq %xmm3, %xmm0, %xmm3 +; AVX-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3] +; AVX-NEXT: vpblendd {{.*#+}} xmm2 = xmm3[0],xmm2[1],xmm3[2],xmm2[3] +; AVX-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; AVX-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2 +; AVX-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3 +; AVX-NEXT: vpxor %xmm3, %xmm2, %xmm2 +; AVX-NEXT: vpaddd %xmm0, %xmm0, %xmm0 +; AVX-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %1 = call {<4 x i32>, <4 x i1>} @llvm.umul.with.overflow.v4i32(<4 x i32> %a0, <4 x i32> ) + %2 = extractvalue {<4 x i32>, <4 x i1>} %1, 0 + %3 = extractvalue {<4 x i32>, <4 x i1>} %1, 1 + %4 = select <4 x i1> %3, <4 x i32> %a1, <4 x i32> %2 + ret <4 x i32> %4 +}