From: Roman Lebedev Date: Thu, 23 May 2019 10:55:13 +0000 (+0000) Subject: [NFC][X86] Fix check prefixes and autogenerate fold-pcmpeqd-2.ll test X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=685cac1a6b03f4a9858a69469236bcccacc4a9ca;p=llvm [NFC][X86] Fix check prefixes and autogenerate fold-pcmpeqd-2.ll test Being affected by (sub %x, c) -> (add %x, (sub 0, c)) patch in an uncertain way. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@361483 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/test/CodeGen/X86/fold-pcmpeqd-2.ll b/test/CodeGen/X86/fold-pcmpeqd-2.ll index d95c6323de4..55c3287028c 100644 --- a/test/CodeGen/X86/fold-pcmpeqd-2.ll +++ b/test/CodeGen/X86/fold-pcmpeqd-2.ll @@ -1,5 +1,6 @@ -; RUN: llc < %s -mtriple=i386-apple-darwin -mcpu=yonah -regalloc=basic | FileCheck %s -; RUN: llc < %s -mtriple=x86_64-apple-darwin -regalloc=basic | FileCheck %s +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=i386-apple-darwin -mcpu=yonah -regalloc=basic | FileCheck %s --check-prefixes=ALL,X32 +; RUN: llc < %s -mtriple=x86_64-apple-darwin -regalloc=basic | FileCheck %s --check-prefixes=ALL,X64 ; This testcase should need to spill the -1 value on both x86-32 and x86-64, ; so it shouldn't use pcmpeqd to materialize an all-ones vector; it @@ -7,18 +8,177 @@ ; ; RAGreedy defeats the test by splitting live ranges. -; Constant pool all-ones vector: -; CHECK: .space 16,255 - -; No pcmpeqd instructions, everybody uses the constant pool. -; CHECK-LABEL: program_1: -; CHECK-NOT: pcmpeqd +; There should be no pcmpeqd instructions, everybody should the constant pool. %struct.__ImageExecInfo = type <{ <4 x i32>, <4 x float>, <2 x i64>, i8*, i8*, i8*, i32, i32, i32, i32, i32 }> %struct._cl_image_format_t = type <{ i32, i32, i32 }> %struct._image2d_t = type <{ i8*, %struct._cl_image_format_t, i32, i32, i32, i32, i32, i32 }> define void @program_1(%struct._image2d_t* %dest, %struct._image2d_t* %t0, <4 x float> %p0, <4 x float> %p1, <4 x float> %p4, <4 x float> %p5, <4 x float> %p6) nounwind { +; X32-LABEL: program_1: +; X32: ## %bb.0: ## %entry +; X32-NEXT: cmpl $0, 0 +; X32-NEXT: jle LBB0_2 +; X32-NEXT: ## %bb.1: ## %forcond +; X32-NEXT: cmpl $0, 0 +; X32-NEXT: jg LBB0_3 +; X32-NEXT: LBB0_2: ## %ifthen +; X32-NEXT: retl +; X32-NEXT: LBB0_3: ## %forbody +; X32-NEXT: pushl %esi +; X32-NEXT: subl $88, %esp +; X32-NEXT: movaps {{.*#+}} xmm1 = [1.28E+2,1.28E+2,1.28E+2,1.28E+2] +; X32-NEXT: minps LCPI0_3, %xmm1 +; X32-NEXT: cvttps2dq %xmm1, %xmm0 +; X32-NEXT: cvtdq2ps %xmm0, %xmm0 +; X32-NEXT: subps %xmm0, %xmm1 +; X32-NEXT: movaps %xmm1, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill +; X32-NEXT: movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload +; X32-NEXT: mulps LCPI0_3, %xmm0 +; X32-NEXT: movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill +; X32-NEXT: movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload +; X32-NEXT: addps LCPI0_1, %xmm0 +; X32-NEXT: movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill +; X32-NEXT: movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload +; X32-NEXT: mulps %xmm1, %xmm0 +; X32-NEXT: movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill +; X32-NEXT: movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload +; X32-NEXT: addps LCPI0_2, %xmm0 +; X32-NEXT: movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill +; X32-NEXT: movdqa {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload +; X32-NEXT: psubd LCPI0_4, %xmm0 +; X32-NEXT: movdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill +; X32-NEXT: movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload +; X32-NEXT: mulps LCPI0_3, %xmm0 +; X32-NEXT: movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill +; X32-NEXT: xorps %xmm0, %xmm0 +; X32-NEXT: movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill +; X32-NEXT: movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload +; X32-NEXT: mulps %xmm0, %xmm0 +; X32-NEXT: movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill +; X32-NEXT: movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload +; X32-NEXT: mulps LCPI0_3, %xmm0 +; X32-NEXT: movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill +; X32-NEXT: xorps %xmm0, %xmm0 +; X32-NEXT: movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill +; X32-NEXT: movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload +; X32-NEXT: cmpunordps %xmm0, %xmm0 +; X32-NEXT: movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill +; X32-NEXT: movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload +; X32-NEXT: minps LCPI0_3, %xmm0 +; X32-NEXT: movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill +; X32-NEXT: xorps %xmm0, %xmm0 +; X32-NEXT: movaps %xmm0, {{[0-9]+}}(%esp) +; X32-NEXT: movl $0, (%esp) +; X32-NEXT: xorl %esi, %esi +; X32-NEXT: xorps %xmm3, %xmm3 +; X32-NEXT: movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload +; X32-NEXT: movdqa {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 ## 16-byte Reload +; X32-NEXT: movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm2 ## 16-byte Reload +; X32-NEXT: calll *%esi +; X32-NEXT: movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload +; X32-NEXT: minps LCPI0_3, %xmm0 +; X32-NEXT: movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill +; X32-NEXT: pxor %xmm1, %xmm1 +; X32-NEXT: psubd {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 ## 16-byte Folded Reload +; X32-NEXT: movdqa {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload +; X32-NEXT: psubd LCPI0_4, %xmm0 +; X32-NEXT: movdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill +; X32-NEXT: movdqa {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload +; X32-NEXT: por %xmm1, %xmm0 +; X32-NEXT: movdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) ## 16-byte Spill +; X32-NEXT: pxor %xmm0, %xmm0 +; X32-NEXT: movdqa %xmm0, {{[0-9]+}}(%esp) +; X32-NEXT: movl $0, (%esp) +; X32-NEXT: xorps %xmm3, %xmm3 +; X32-NEXT: movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 ## 16-byte Reload +; X32-NEXT: movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 ## 16-byte Reload +; X32-NEXT: movaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm2 ## 16-byte Reload +; X32-NEXT: calll *%esi +; X32-NEXT: ud2 +; +; X64-LABEL: program_1: +; X64: ## %bb.0: ## %entry +; X64-NEXT: cmpl $0, 0 +; X64-NEXT: jle LBB0_2 +; X64-NEXT: ## %bb.1: ## %forcond +; X64-NEXT: cmpl $0, 0 +; X64-NEXT: jg LBB0_3 +; X64-NEXT: LBB0_2: ## %ifthen +; X64-NEXT: retq +; X64-NEXT: LBB0_3: ## %forbody +; X64-NEXT: pushq %rbx +; X64-NEXT: subq $64, %rsp +; X64-NEXT: xorps %xmm0, %xmm0 +; X64-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill +; X64-NEXT: movaps {{.*#+}} xmm1 = [1.28E+2,1.28E+2,1.28E+2,1.28E+2] +; X64-NEXT: minps {{.*}}(%rip), %xmm1 +; X64-NEXT: cvttps2dq %xmm1, %xmm0 +; X64-NEXT: cvtdq2ps %xmm0, %xmm0 +; X64-NEXT: subps %xmm0, %xmm1 +; X64-NEXT: movaps %xmm1, (%rsp) ## 16-byte Spill +; X64-NEXT: movaps (%rsp), %xmm0 ## 16-byte Reload +; X64-NEXT: mulps {{.*}}(%rip), %xmm0 +; X64-NEXT: movaps %xmm0, (%rsp) ## 16-byte Spill +; X64-NEXT: movaps (%rsp), %xmm0 ## 16-byte Reload +; X64-NEXT: addps {{.*}}(%rip), %xmm0 +; X64-NEXT: movaps %xmm0, (%rsp) ## 16-byte Spill +; X64-NEXT: movaps (%rsp), %xmm0 ## 16-byte Reload +; X64-NEXT: mulps %xmm1, %xmm0 +; X64-NEXT: movaps %xmm0, (%rsp) ## 16-byte Spill +; X64-NEXT: movaps (%rsp), %xmm0 ## 16-byte Reload +; X64-NEXT: addps {{.*}}(%rip), %xmm0 +; X64-NEXT: movaps %xmm0, (%rsp) ## 16-byte Spill +; X64-NEXT: movdqa (%rsp), %xmm0 ## 16-byte Reload +; X64-NEXT: psubd {{.*}}(%rip), %xmm0 +; X64-NEXT: movdqa %xmm0, (%rsp) ## 16-byte Spill +; X64-NEXT: movaps (%rsp), %xmm0 ## 16-byte Reload +; X64-NEXT: mulps {{.*}}(%rip), %xmm0 +; X64-NEXT: movaps %xmm0, (%rsp) ## 16-byte Spill +; X64-NEXT: xorps %xmm0, %xmm0 +; X64-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill +; X64-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload +; X64-NEXT: mulps %xmm0, %xmm0 +; X64-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill +; X64-NEXT: movaps (%rsp), %xmm0 ## 16-byte Reload +; X64-NEXT: mulps {{.*}}(%rip), %xmm0 +; X64-NEXT: movaps %xmm0, (%rsp) ## 16-byte Spill +; X64-NEXT: xorps %xmm0, %xmm0 +; X64-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill +; X64-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload +; X64-NEXT: cmpunordps %xmm0, %xmm0 +; X64-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill +; X64-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload +; X64-NEXT: minps {{.*}}(%rip), %xmm0 +; X64-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill +; X64-NEXT: xorl %ebx, %ebx +; X64-NEXT: xorps %xmm3, %xmm3 +; X64-NEXT: xorps %xmm4, %xmm4 +; X64-NEXT: movaps (%rsp), %xmm0 ## 16-byte Reload +; X64-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload +; X64-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 ## 16-byte Reload +; X64-NEXT: xorl %edi, %edi +; X64-NEXT: callq *%rbx +; X64-NEXT: movaps (%rsp), %xmm0 ## 16-byte Reload +; X64-NEXT: minps {{.*}}(%rip), %xmm0 +; X64-NEXT: movaps %xmm0, (%rsp) ## 16-byte Spill +; X64-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload +; X64-NEXT: psubd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload +; X64-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill +; X64-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload +; X64-NEXT: psubd {{.*}}(%rip), %xmm0 +; X64-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill +; X64-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload +; X64-NEXT: orps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Folded Reload +; X64-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill +; X64-NEXT: xorps %xmm3, %xmm3 +; X64-NEXT: xorps %xmm4, %xmm4 +; X64-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload +; X64-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 ## 16-byte Reload +; X64-NEXT: movaps (%rsp), %xmm2 ## 16-byte Reload +; X64-NEXT: xorl %edi, %edi +; X64-NEXT: callq *%rbx +; X64-NEXT: ud2 entry: %tmp3.i = load i32, i32* null ; [#uses=1] %cmp = icmp slt i32 0, %tmp3.i ; [#uses=1]