From: Sanjay Patel Date: Thu, 17 Aug 2017 17:07:37 +0000 (+0000) Subject: [x86] add tests for vector select-of-constants; NFC X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=6257fc9a0b73a990fe931393667ccca702d042bf;p=llvm [x86] add tests for vector select-of-constants; NFC We've discussed canonicalizing to this form in IR, so the backend should be prepared to lower these in ways better than what we see here in most cases. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@311103 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/test/CodeGen/X86/vselect-constants.ll b/test/CodeGen/X86/vselect-constants.ll new file mode 100644 index 00000000000..838c03500c6 --- /dev/null +++ b/test/CodeGen/X86/vselect-constants.ll @@ -0,0 +1,273 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse2 | FileCheck %s --check-prefix=ALL --check-prefix=SSE +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX + +; First, check the generic pattern for any 2 vector constants. Then, check special cases where +; the constants are all off-by-one. Finally, check the extra special cases where the constants +; include 0 or -1. +; Each minimal select test is repeated with a more typical pattern that includes a compare to +; generate the condition value. + +define <4 x i32> @sel_C1_or_C2_vec(<4 x i1> %cond) { +; SSE-LABEL: sel_C1_or_C2_vec: +; SSE: # BB#0: +; SSE-NEXT: pslld $31, %xmm0 +; SSE-NEXT: psrad $31, %xmm0 +; SSE-NEXT: movdqa %xmm0, %xmm1 +; SSE-NEXT: pandn {{.*}}(%rip), %xmm1 +; SSE-NEXT: pand {{.*}}(%rip), %xmm0 +; SSE-NEXT: por %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: sel_C1_or_C2_vec: +; AVX: # BB#0: +; AVX-NEXT: vpslld $31, %xmm0, %xmm0 +; AVX-NEXT: vmovaps {{.*#+}} xmm1 = [42,0,4294967294,4294967295] +; AVX-NEXT: vblendvps %xmm0, {{.*}}(%rip), %xmm1, %xmm0 +; AVX-NEXT: retq + %add = select <4 x i1> %cond, <4 x i32> , <4 x i32> + ret <4 x i32> %add +} + +define <4 x i32> @cmp_sel_C1_or_C2_vec(<4 x i32> %x, <4 x i32> %y) { +; SSE-LABEL: cmp_sel_C1_or_C2_vec: +; SSE: # BB#0: +; SSE-NEXT: pcmpeqd %xmm1, %xmm0 +; SSE-NEXT: movdqa %xmm0, %xmm1 +; SSE-NEXT: pandn {{.*}}(%rip), %xmm1 +; SSE-NEXT: pand {{.*}}(%rip), %xmm0 +; SSE-NEXT: por %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: cmp_sel_C1_or_C2_vec: +; AVX: # BB#0: +; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vmovaps {{.*#+}} xmm1 = [42,0,4294967294,4294967295] +; AVX-NEXT: vblendvps %xmm0, {{.*}}(%rip), %xmm1, %xmm0 +; AVX-NEXT: retq + %cond = icmp eq <4 x i32> %x, %y + %add = select <4 x i1> %cond, <4 x i32> , <4 x i32> + ret <4 x i32> %add +} + +define <4 x i32> @sel_Cplus1_or_C_vec(<4 x i1> %cond) { +; SSE-LABEL: sel_Cplus1_or_C_vec: +; SSE: # BB#0: +; SSE-NEXT: pslld $31, %xmm0 +; SSE-NEXT: psrad $31, %xmm0 +; SSE-NEXT: movdqa %xmm0, %xmm1 +; SSE-NEXT: pandn {{.*}}(%rip), %xmm1 +; SSE-NEXT: pand {{.*}}(%rip), %xmm0 +; SSE-NEXT: por %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: sel_Cplus1_or_C_vec: +; AVX: # BB#0: +; AVX-NEXT: vpslld $31, %xmm0, %xmm0 +; AVX-NEXT: vmovaps {{.*#+}} xmm1 = [42,0,4294967294,4294967295] +; AVX-NEXT: vblendvps %xmm0, {{.*}}(%rip), %xmm1, %xmm0 +; AVX-NEXT: retq + %add = select <4 x i1> %cond, <4 x i32> , <4 x i32> + ret <4 x i32> %add +} + +define <4 x i32> @cmp_sel_Cplus1_or_C_vec(<4 x i32> %x, <4 x i32> %y) { +; SSE-LABEL: cmp_sel_Cplus1_or_C_vec: +; SSE: # BB#0: +; SSE-NEXT: pcmpeqd %xmm1, %xmm0 +; SSE-NEXT: movdqa %xmm0, %xmm1 +; SSE-NEXT: pandn {{.*}}(%rip), %xmm1 +; SSE-NEXT: pand {{.*}}(%rip), %xmm0 +; SSE-NEXT: por %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: cmp_sel_Cplus1_or_C_vec: +; AVX: # BB#0: +; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vmovaps {{.*#+}} xmm1 = [42,0,4294967294,4294967295] +; AVX-NEXT: vblendvps %xmm0, {{.*}}(%rip), %xmm1, %xmm0 +; AVX-NEXT: retq + %cond = icmp eq <4 x i32> %x, %y + %add = select <4 x i1> %cond, <4 x i32> , <4 x i32> + ret <4 x i32> %add +} + +define <4 x i32> @sel_Cminus1_or_C_vec(<4 x i1> %cond) { +; SSE-LABEL: sel_Cminus1_or_C_vec: +; SSE: # BB#0: +; SSE-NEXT: pslld $31, %xmm0 +; SSE-NEXT: psrad $31, %xmm0 +; SSE-NEXT: movdqa %xmm0, %xmm1 +; SSE-NEXT: pandn {{.*}}(%rip), %xmm1 +; SSE-NEXT: pand {{.*}}(%rip), %xmm0 +; SSE-NEXT: por %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: sel_Cminus1_or_C_vec: +; AVX: # BB#0: +; AVX-NEXT: vpslld $31, %xmm0, %xmm0 +; AVX-NEXT: vmovaps {{.*#+}} xmm1 = [44,2,0,1] +; AVX-NEXT: vblendvps %xmm0, {{.*}}(%rip), %xmm1, %xmm0 +; AVX-NEXT: retq + %add = select <4 x i1> %cond, <4 x i32> , <4 x i32> + ret <4 x i32> %add +} + +define <4 x i32> @cmp_sel_Cminus1_or_C_vec(<4 x i32> %x, <4 x i32> %y) { +; SSE-LABEL: cmp_sel_Cminus1_or_C_vec: +; SSE: # BB#0: +; SSE-NEXT: pcmpeqd %xmm1, %xmm0 +; SSE-NEXT: movdqa %xmm0, %xmm1 +; SSE-NEXT: pandn {{.*}}(%rip), %xmm1 +; SSE-NEXT: pand {{.*}}(%rip), %xmm0 +; SSE-NEXT: por %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: cmp_sel_Cminus1_or_C_vec: +; AVX: # BB#0: +; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vmovaps {{.*#+}} xmm1 = [44,2,0,1] +; AVX-NEXT: vblendvps %xmm0, {{.*}}(%rip), %xmm1, %xmm0 +; AVX-NEXT: retq + %cond = icmp eq <4 x i32> %x, %y + %add = select <4 x i1> %cond, <4 x i32> , <4 x i32> + ret <4 x i32> %add +} + +define <4 x i32> @sel_minus1_or_0_vec(<4 x i1> %cond) { +; SSE-LABEL: sel_minus1_or_0_vec: +; SSE: # BB#0: +; SSE-NEXT: pslld $31, %xmm0 +; SSE-NEXT: psrad $31, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: sel_minus1_or_0_vec: +; AVX: # BB#0: +; AVX-NEXT: vpslld $31, %xmm0, %xmm0 +; AVX-NEXT: vpsrad $31, %xmm0, %xmm0 +; AVX-NEXT: retq + %add = select <4 x i1> %cond, <4 x i32> , <4 x i32> + ret <4 x i32> %add +} + +define <4 x i32> @cmp_sel_minus1_or_0_vec(<4 x i32> %x, <4 x i32> %y) { +; SSE-LABEL: cmp_sel_minus1_or_0_vec: +; SSE: # BB#0: +; SSE-NEXT: pcmpeqd %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: cmp_sel_minus1_or_0_vec: +; AVX: # BB#0: +; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %cond = icmp eq <4 x i32> %x, %y + %add = select <4 x i1> %cond, <4 x i32> , <4 x i32> + ret <4 x i32> %add +} + +define <4 x i32> @sel_0_or_minus1_vec(<4 x i1> %cond) { +; SSE-LABEL: sel_0_or_minus1_vec: +; SSE: # BB#0: +; SSE-NEXT: pslld $31, %xmm0 +; SSE-NEXT: psrad $31, %xmm0 +; SSE-NEXT: pcmpeqd %xmm1, %xmm1 +; SSE-NEXT: pxor %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: sel_0_or_minus1_vec: +; AVX: # BB#0: +; AVX-NEXT: vpslld $31, %xmm0, %xmm0 +; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1 +; AVX-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 +; AVX-NEXT: vblendvps %xmm0, %xmm1, %xmm2, %xmm0 +; AVX-NEXT: retq + %add = select <4 x i1> %cond, <4 x i32> , <4 x i32> + ret <4 x i32> %add +} + +define <4 x i32> @cmp_sel_0_or_minus1_vec(<4 x i32> %x, <4 x i32> %y) { +; SSE-LABEL: cmp_sel_0_or_minus1_vec: +; SSE: # BB#0: +; SSE-NEXT: pcmpeqd %xmm1, %xmm0 +; SSE-NEXT: pcmpeqd %xmm1, %xmm1 +; SSE-NEXT: pxor %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: cmp_sel_0_or_minus1_vec: +; AVX: # BB#0: +; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 +; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %cond = icmp eq <4 x i32> %x, %y + %add = select <4 x i1> %cond, <4 x i32> , <4 x i32> + ret <4 x i32> %add +} + +define <4 x i32> @sel_1_or_0_vec(<4 x i1> %cond) { +; SSE-LABEL: sel_1_or_0_vec: +; SSE: # BB#0: +; SSE-NEXT: andps {{.*}}(%rip), %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: sel_1_or_0_vec: +; AVX: # BB#0: +; AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0 +; AVX-NEXT: retq + %add = select <4 x i1> %cond, <4 x i32> , <4 x i32> + ret <4 x i32> %add +} + +define <4 x i32> @cmp_sel_1_or_0_vec(<4 x i32> %x, <4 x i32> %y) { +; SSE-LABEL: cmp_sel_1_or_0_vec: +; SSE: # BB#0: +; SSE-NEXT: pcmpeqd %xmm1, %xmm0 +; SSE-NEXT: psrld $31, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: cmp_sel_1_or_0_vec: +; AVX: # BB#0: +; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vpsrld $31, %xmm0, %xmm0 +; AVX-NEXT: retq + %cond = icmp eq <4 x i32> %x, %y + %add = select <4 x i1> %cond, <4 x i32> , <4 x i32> + ret <4 x i32> %add +} + +define <4 x i32> @sel_0_or_1_vec(<4 x i1> %cond) { +; SSE-LABEL: sel_0_or_1_vec: +; SSE: # BB#0: +; SSE-NEXT: pslld $31, %xmm0 +; SSE-NEXT: psrad $31, %xmm0 +; SSE-NEXT: pandn {{.*}}(%rip), %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: sel_0_or_1_vec: +; AVX: # BB#0: +; AVX-NEXT: vpslld $31, %xmm0, %xmm0 +; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1 +; AVX-NEXT: vmovaps {{.*#+}} xmm2 = [1,1,1,1] +; AVX-NEXT: vblendvps %xmm0, %xmm1, %xmm2, %xmm0 +; AVX-NEXT: retq + %add = select <4 x i1> %cond, <4 x i32> , <4 x i32> + ret <4 x i32> %add +} + +define <4 x i32> @cmp_sel_0_or_1_vec(<4 x i32> %x, <4 x i32> %y) { +; SSE-LABEL: cmp_sel_0_or_1_vec: +; SSE: # BB#0: +; SSE-NEXT: pcmpeqd %xmm1, %xmm0 +; SSE-NEXT: pandn {{.*}}(%rip), %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: cmp_sel_0_or_1_vec: +; AVX: # BB#0: +; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: vpandn {{.*}}(%rip), %xmm0, %xmm0 +; AVX-NEXT: retq + %cond = icmp eq <4 x i32> %x, %y + %add = select <4 x i1> %cond, <4 x i32> , <4 x i32> + ret <4 x i32> %add +} +