From 4864d89affbad659862b5fc41451ed20a7b03406 Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Tue, 25 Oct 2016 21:14:11 +0000 Subject: [PATCH] [X86][SSE] Added vector srem combine tests git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@285121 91177308-0d34-0410-b5e6-96231b3b80d8 --- test/CodeGen/X86/combine-srem.ll | 123 +++++++++++++++++++++++++++++++ 1 file changed, 123 insertions(+) create mode 100644 test/CodeGen/X86/combine-srem.ll diff --git a/test/CodeGen/X86/combine-srem.ll b/test/CodeGen/X86/combine-srem.ll new file mode 100644 index 00000000000..dc7373ce519 --- /dev/null +++ b/test/CodeGen/X86/combine-srem.ll @@ -0,0 +1,123 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX + +; fold (srem undef, x) -> 0 +define <4 x i32> @combine_vec_srem_undef0(<4 x i32> %x) { +; SSE-LABEL: combine_vec_srem_undef0: +; SSE: # BB#0: +; SSE-NEXT: retq +; +; AVX-LABEL: combine_vec_srem_undef0: +; AVX: # BB#0: +; AVX-NEXT: retq + %1 = srem <4 x i32> undef, %x + ret <4 x i32> %1 +} + +; fold (srem x, undef) -> undef +define <4 x i32> @combine_vec_srem_undef1(<4 x i32> %x) { +; SSE-LABEL: combine_vec_srem_undef1: +; SSE: # BB#0: +; SSE-NEXT: retq +; +; AVX-LABEL: combine_vec_srem_undef1: +; AVX: # BB#0: +; AVX-NEXT: retq + %1 = srem <4 x i32> %x, undef + ret <4 x i32> %1 +} + +; fold (srem x, y) -> (urem x, y) iff x and y are positive +define <4 x i32> @combine_vec_srem_by_pos0(<4 x i32> %x) { +; SSE-LABEL: combine_vec_srem_by_pos0: +; SSE: # BB#0: +; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0] +; SSE-NEXT: pand %xmm0, %xmm1 +; SSE-NEXT: pand {{.*}}(%rip), %xmm0 +; SSE-NEXT: psubd %xmm0, %xmm1 +; SSE-NEXT: movdqa %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: combine_vec_srem_by_pos0: +; AVX: # BB#0: +; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm1 +; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2 +; AVX-NEXT: vpand %xmm2, %xmm0, %xmm0 +; AVX-NEXT: vpsubd %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq + %1 = and <4 x i32> %x, + %2 = srem <4 x i32> %1, + ret <4 x i32> %2 +} + +define <4 x i32> @combine_vec_srem_by_pos1(<4 x i32> %x) { +; SSE-LABEL: combine_vec_srem_by_pos1: +; SSE: # BB#0: +; SSE-NEXT: pand {{.*}}(%rip), %xmm0 +; SSE-NEXT: pextrd $3, %xmm0, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarl $31, %ecx +; SSE-NEXT: shrl $28, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: andl $-16, %ecx +; SSE-NEXT: subl %ecx, %eax +; SSE-NEXT: movd %eax, %xmm1 +; SSE-NEXT: pextrd $2, %xmm0, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarl $31, %ecx +; SSE-NEXT: shrl $29, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: andl $-8, %ecx +; SSE-NEXT: subl %ecx, %eax +; SSE-NEXT: movd %eax, %xmm2 +; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] +; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,1,0,1] +; SSE-NEXT: pextrd $1, %xmm0, %eax +; SSE-NEXT: movl %eax, %ecx +; SSE-NEXT: sarl $31, %ecx +; SSE-NEXT: shrl $30, %ecx +; SSE-NEXT: addl %eax, %ecx +; SSE-NEXT: andl $-4, %ecx +; SSE-NEXT: subl %ecx, %eax +; SSE-NEXT: movd %eax, %xmm0 +; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,0,2,3] +; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] +; SSE-NEXT: retq +; +; AVX-LABEL: combine_vec_srem_by_pos1: +; AVX: # BB#0: +; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 +; AVX-NEXT: vpextrd $3, %xmm0, %eax +; AVX-NEXT: movl %eax, %ecx +; AVX-NEXT: sarl $31, %ecx +; AVX-NEXT: shrl $28, %ecx +; AVX-NEXT: addl %eax, %ecx +; AVX-NEXT: andl $-16, %ecx +; AVX-NEXT: subl %ecx, %eax +; AVX-NEXT: vmovd %eax, %xmm1 +; AVX-NEXT: vpextrd $2, %xmm0, %eax +; AVX-NEXT: movl %eax, %ecx +; AVX-NEXT: sarl $31, %ecx +; AVX-NEXT: shrl $29, %ecx +; AVX-NEXT: addl %eax, %ecx +; AVX-NEXT: andl $-8, %ecx +; AVX-NEXT: subl %ecx, %eax +; AVX-NEXT: vmovd %eax, %xmm2 +; AVX-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] +; AVX-NEXT: vpbroadcastq %xmm1, %xmm1 +; AVX-NEXT: vpextrd $1, %xmm0, %eax +; AVX-NEXT: movl %eax, %ecx +; AVX-NEXT: sarl $31, %ecx +; AVX-NEXT: shrl $30, %ecx +; AVX-NEXT: addl %eax, %ecx +; AVX-NEXT: andl $-4, %ecx +; AVX-NEXT: subl %ecx, %eax +; AVX-NEXT: vmovd %eax, %xmm0 +; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,0,2,3] +; AVX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3] +; AVX-NEXT: retq + %1 = and <4 x i32> %x, + %2 = srem <4 x i32> %1, + ret <4 x i32> %2 +} -- 2.40.0