From: Sanjay Patel Date: Sun, 2 Jul 2017 15:24:08 +0000 (+0000) Subject: [x86] auto-generate complete checks for tests; NFC X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=6d8a69ea00655b37f123dc578056aea8497f1c39;p=llvm [x86] auto-generate complete checks for tests; NFC These all used 'CHECK-NOT' which isn't necessary if we have complete checks. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@306984 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/test/CodeGen/X86/pr15309.ll b/test/CodeGen/X86/pr15309.ll index e9d9b9e54c1..0301b58def1 100644 --- a/test/CodeGen/X86/pr15309.ll +++ b/test/CodeGen/X86/pr15309.ll @@ -1,15 +1,43 @@ -; RUN: llc < %s -mtriple=i686-linux-pc -mcpu=corei7 | FileCheck %s +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=i686-linux-pc | FileCheck %s -define void @test_convert_float2_ulong2(<2 x i64>* nocapture %src, <2 x float>* nocapture %dest) noinline { -L.entry: - %0 = getelementptr <2 x i64>, <2 x i64>* %src, i32 10 - %1 = load <2 x i64>, <2 x i64>* %0, align 16 - %2 = uitofp <2 x i64> %1 to <2 x float> - %3 = getelementptr <2 x float>, <2 x float>* %dest, i32 10 - store <2 x float> %2, <2 x float>* %3, align 8 +define void @test_convert_float2_ulong2(<2 x i64>* nocapture %src, <2 x float>* nocapture %dest) nounwind { +; CHECK-LABEL: test_convert_float2_ulong2: +; CHECK: # BB#0: +; CHECK-NEXT: pushl %edi +; CHECK-NEXT: pushl %esi +; CHECK-NEXT: subl $20, %esp +; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax +; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx +; CHECK-NEXT: movl 168(%ecx), %edx +; CHECK-NEXT: movl 172(%ecx), %esi +; CHECK-NEXT: movl 160(%ecx), %edi +; CHECK-NEXT: movl 164(%ecx), %ecx +; CHECK-NEXT: movl %ecx, {{[0-9]+}}(%esp) +; CHECK-NEXT: movl %edi, (%esp) +; CHECK-NEXT: movl %esi, {{[0-9]+}}(%esp) +; CHECK-NEXT: movl %edx, {{[0-9]+}}(%esp) +; CHECK-NEXT: xorl %edx, %edx +; CHECK-NEXT: testl %ecx, %ecx +; CHECK-NEXT: setns %dl +; CHECK-NEXT: fildll (%esp) +; CHECK-NEXT: fadds {{\.LCPI.*}}(,%edx,4) +; CHECK-NEXT: xorl %ecx, %ecx +; CHECK-NEXT: testl %esi, %esi +; CHECK-NEXT: setns %cl +; CHECK-NEXT: fildll {{[0-9]+}}(%esp) +; CHECK-NEXT: fadds {{\.LCPI.*}}(,%ecx,4) +; CHECK-NEXT: fstps 84(%eax) +; CHECK-NEXT: fstps 80(%eax) +; CHECK-NEXT: addl $20, %esp +; CHECK-NEXT: popl %esi +; CHECK-NEXT: popl %edi +; CHECK-NEXT: retl + %t0 = getelementptr <2 x i64>, <2 x i64>* %src, i32 10 + %t1 = load <2 x i64>, <2 x i64>* %t0, align 16 + %t2 = uitofp <2 x i64> %t1 to <2 x float> + %t3 = getelementptr <2 x float>, <2 x float>* %dest, i32 10 + store <2 x float> %t2, <2 x float>* %t3, align 8 ret void } -; CHECK: test_convert_float2_ulong2 -; CHECK-NOT: cvtpd2ps -; CHECK: ret diff --git a/test/CodeGen/X86/pr23603.ll b/test/CodeGen/X86/pr23603.ll index 6f856aedb8d..315e6076861 100644 --- a/test/CodeGen/X86/pr23603.ll +++ b/test/CodeGen/X86/pr23603.ll @@ -1,14 +1,29 @@ -; RUN: llc < %s -mtriple=x86_64-apple-darwin | FileCheck %s +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s declare void @free_v() -define void @f(i32* %x, i32 %c32, i32* %y) { -; CHECK-LABEL: f +define void @f(i32* %x, i32 %c32, i32* %y) nounwind { +; CHECK-LABEL: f: +; CHECK: # BB#0: # %entry +; CHECK-NEXT: pushq %rbp +; CHECK-NEXT: pushq %r14 +; CHECK-NEXT: pushq %rbx +; CHECK-NEXT: movq %rdx, %r14 +; CHECK-NEXT: movl %esi, %ebp +; CHECK-NEXT: movl (%rdi), %ebx +; CHECK-NEXT: callq free_v +; CHECK-NEXT: testl %ebp, %ebp +; CHECK-NEXT: je .LBB0_2 +; CHECK-NEXT: # BB#1: # %left +; CHECK-NEXT: movl %ebx, (%r14) +; CHECK-NEXT: .LBB0_2: # %merge +; CHECK-NEXT: popq %rbx +; CHECK-NEXT: popq %r14 +; CHECK-NEXT: popq %rbp +; CHECK-NEXT: retq entry: %v = load i32, i32* %x, !invariant.load !0 -; CHECK: movl (%rdi), %ebx -; CHECK: free_v -; CHECK-NOT: movl (%rdi), %ebx call void @free_v() %c = icmp ne i32 %c32, 0 br i1 %c, label %left, label %merge diff --git a/test/CodeGen/X86/rotate4.ll b/test/CodeGen/X86/rotate4.ll index 56a7d328505..c7117be91ab 100644 --- a/test/CodeGen/X86/rotate4.ll +++ b/test/CodeGen/X86/rotate4.ll @@ -1,17 +1,20 @@ -; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=generic | FileCheck %s +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s ; Check that we recognize this idiom for rotation too: ; a << (b & (OpSize-1)) | a >> ((0 - b) & (OpSize-1)) define i32 @rotate_left_32(i32 %a, i32 %b) { ; CHECK-LABEL: rotate_left_32: -; CHECK-NOT: and -; CHECK: roll -entry: +; CHECK: # BB#0: +; CHECK-NEXT: movl %esi, %ecx +; CHECK-NEXT: roll %cl, %edi +; CHECK-NEXT: movl %edi, %eax +; CHECK-NEXT: retq %and = and i32 %b, 31 %shl = shl i32 %a, %and - %0 = sub i32 0, %b - %and3 = and i32 %0, 31 + %t0 = sub i32 0, %b + %and3 = and i32 %t0, 31 %shr = lshr i32 %a, %and3 %or = or i32 %shl, %shr ret i32 %or @@ -19,13 +22,15 @@ entry: define i32 @rotate_right_32(i32 %a, i32 %b) { ; CHECK-LABEL: rotate_right_32: -; CHECK-NOT: and -; CHECK: rorl -entry: +; CHECK: # BB#0: +; CHECK-NEXT: movl %esi, %ecx +; CHECK-NEXT: rorl %cl, %edi +; CHECK-NEXT: movl %edi, %eax +; CHECK-NEXT: retq %and = and i32 %b, 31 %shl = lshr i32 %a, %and - %0 = sub i32 0, %b - %and3 = and i32 %0, 31 + %t0 = sub i32 0, %b + %and3 = and i32 %t0, 31 %shr = shl i32 %a, %and3 %or = or i32 %shl, %shr ret i32 %or @@ -33,13 +38,15 @@ entry: define i64 @rotate_left_64(i64 %a, i64 %b) { ; CHECK-LABEL: rotate_left_64: -; CHECK-NOT: and -; CHECK: rolq -entry: +; CHECK: # BB#0: +; CHECK-NEXT: movl %esi, %ecx +; CHECK-NEXT: rolq %cl, %rdi +; CHECK-NEXT: movq %rdi, %rax +; CHECK-NEXT: retq %and = and i64 %b, 63 %shl = shl i64 %a, %and - %0 = sub i64 0, %b - %and3 = and i64 %0, 63 + %t0 = sub i64 0, %b + %and3 = and i64 %t0, 63 %shr = lshr i64 %a, %and3 %or = or i64 %shl, %shr ret i64 %or @@ -47,13 +54,15 @@ entry: define i64 @rotate_right_64(i64 %a, i64 %b) { ; CHECK-LABEL: rotate_right_64: -; CHECK-NOT: and -; CHECK: rorq -entry: +; CHECK: # BB#0: +; CHECK-NEXT: movl %esi, %ecx +; CHECK-NEXT: rorq %cl, %rdi +; CHECK-NEXT: movq %rdi, %rax +; CHECK-NEXT: retq %and = and i64 %b, 63 %shl = lshr i64 %a, %and - %0 = sub i64 0, %b - %and3 = and i64 %0, 63 + %t0 = sub i64 0, %b + %and3 = and i64 %t0, 63 %shr = shl i64 %a, %and3 %or = or i64 %shl, %shr ret i64 %or @@ -63,16 +72,15 @@ entry: define void @rotate_left_m32(i32 *%pa, i32 %b) { ; CHECK-LABEL: rotate_left_m32: -; CHECK-NOT: and -; CHECK: roll -; no store: -; CHECK-NOT: mov -entry: +; CHECK: # BB#0: +; CHECK-NEXT: movl %esi, %ecx +; CHECK-NEXT: roll %cl, (%rdi) +; CHECK-NEXT: retq %a = load i32, i32* %pa, align 16 %and = and i32 %b, 31 %shl = shl i32 %a, %and - %0 = sub i32 0, %b - %and3 = and i32 %0, 31 + %t0 = sub i32 0, %b + %and3 = and i32 %t0, 31 %shr = lshr i32 %a, %and3 %or = or i32 %shl, %shr store i32 %or, i32* %pa, align 32 @@ -81,16 +89,15 @@ entry: define void @rotate_right_m32(i32 *%pa, i32 %b) { ; CHECK-LABEL: rotate_right_m32: -; CHECK-NOT: and -; CHECK: rorl -; no store: -; CHECK-NOT: mov -entry: +; CHECK: # BB#0: +; CHECK-NEXT: movl %esi, %ecx +; CHECK-NEXT: rorl %cl, (%rdi) +; CHECK-NEXT: retq %a = load i32, i32* %pa, align 16 %and = and i32 %b, 31 %shl = lshr i32 %a, %and - %0 = sub i32 0, %b - %and3 = and i32 %0, 31 + %t0 = sub i32 0, %b + %and3 = and i32 %t0, 31 %shr = shl i32 %a, %and3 %or = or i32 %shl, %shr store i32 %or, i32* %pa, align 32 @@ -99,16 +106,15 @@ entry: define void @rotate_left_m64(i64 *%pa, i64 %b) { ; CHECK-LABEL: rotate_left_m64: -; CHECK-NOT: and -; CHECK: rolq -; no store: -; CHECK-NOT: mov -entry: +; CHECK: # BB#0: +; CHECK-NEXT: movl %esi, %ecx +; CHECK-NEXT: rolq %cl, (%rdi) +; CHECK-NEXT: retq %a = load i64, i64* %pa, align 16 %and = and i64 %b, 63 %shl = shl i64 %a, %and - %0 = sub i64 0, %b - %and3 = and i64 %0, 63 + %t0 = sub i64 0, %b + %and3 = and i64 %t0, 63 %shr = lshr i64 %a, %and3 %or = or i64 %shl, %shr store i64 %or, i64* %pa, align 64 @@ -117,18 +123,18 @@ entry: define void @rotate_right_m64(i64 *%pa, i64 %b) { ; CHECK-LABEL: rotate_right_m64: -; CHECK-NOT: and -; CHECK: rorq -; no store: -; CHECK-NOT: mov -entry: +; CHECK: # BB#0: +; CHECK-NEXT: movl %esi, %ecx +; CHECK-NEXT: rorq %cl, (%rdi) +; CHECK-NEXT: retq %a = load i64, i64* %pa, align 16 %and = and i64 %b, 63 %shl = lshr i64 %a, %and - %0 = sub i64 0, %b - %and3 = and i64 %0, 63 + %t0 = sub i64 0, %b + %and3 = and i64 %t0, 63 %shr = shl i64 %a, %and3 %or = or i64 %shl, %shr store i64 %or, i64* %pa, align 64 ret void } + diff --git a/test/CodeGen/X86/vec_return.ll b/test/CodeGen/X86/vec_return.ll index f7fcd032cab..556e32d0c87 100644 --- a/test/CodeGen/X86/vec_return.ll +++ b/test/CodeGen/X86/vec_return.ll @@ -1,16 +1,21 @@ -; RUN: llc < %s -march=x86 -mattr=+sse2 | FileCheck %s +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s ; Without any typed operations, always use the smaller xorps. -; CHECK: test -; CHECK: xorps define <2 x double> @test() { +; CHECK-LABEL: test: +; CHECK: # BB#0: +; CHECK-NEXT: xorps %xmm0, %xmm0 +; CHECK-NEXT: retl ret <2 x double> zeroinitializer } ; Prefer a constant pool load here. -; CHECK: test2 -; CHECK-NOT: shuf -; CHECK: movaps {{.*}}{{CPI|__xmm@}} define <4 x i32> @test2() nounwind { +; CHECK-LABEL: test2: +; CHECK: # BB#0: +; CHECK-NEXT: movaps {{.*#+}} xmm0 = [0,0,1,0] +; CHECK-NEXT: retl ret <4 x i32> < i32 0, i32 0, i32 1, i32 0 > } +