From 6661e7650508b51de2eaf95ed79c2f2268cb6e15 Mon Sep 17 00:00:00 2001 From: Sanjay Patel Date: Sun, 18 Jun 2017 21:30:57 +0000 Subject: [PATCH] [x86] specify triple and auto-generate checks; NFC git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@305654 91177308-0d34-0410-b5e6-96231b3b80d8 --- test/CodeGen/X86/widen_arith-1.ll | 76 +++++++++++++++------- test/CodeGen/X86/widen_arith-2.ll | 104 ++++++++++++++++++++---------- test/CodeGen/X86/widen_arith-3.ll | 88 +++++++++++++++++-------- 3 files changed, 182 insertions(+), 86 deletions(-) diff --git a/test/CodeGen/X86/widen_arith-1.ll b/test/CodeGen/X86/widen_arith-1.ll index 5663b8b4094..b087d44537a 100644 --- a/test/CodeGen/X86/widen_arith-1.ll +++ b/test/CodeGen/X86/widen_arith-1.ll @@ -1,44 +1,70 @@ -; RUN: llc < %s -mcpu=generic -march=x86 -mattr=+sse4.2 | FileCheck %s +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse4.2 | FileCheck %s define void @update(<3 x i8>* %dst, <3 x i8>* %src, i32 %n) nounwind { +; CHECK-LABEL: update: +; CHECK: # BB#0: # %entry +; CHECK-NEXT: subl $12, %esp +; CHECK-NEXT: movl $0, (%esp) +; CHECK-NEXT: movdqa {{.*#+}} xmm0 = <1,1,1,u> +; CHECK-NEXT: movdqa {{.*#+}} xmm1 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u> +; CHECK-NEXT: jmp .LBB0_1 +; CHECK-NEXT: .p2align 4, 0x90 +; CHECK-NEXT: .LBB0_2: # %forbody +; CHECK-NEXT: # in Loop: Header=BB0_1 Depth=1 +; CHECK-NEXT: movl (%esp), %eax +; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx +; CHECK-NEXT: movl {{[0-9]+}}(%esp), %edx +; CHECK-NEXT: pmovzxbd {{.*#+}} xmm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero +; CHECK-NEXT: paddd %xmm0, %xmm2 +; CHECK-NEXT: pextrb $8, %xmm2, 2(%ecx,%eax,4) +; CHECK-NEXT: pshufb %xmm1, %xmm2 +; CHECK-NEXT: pextrw $0, %xmm2, (%ecx,%eax,4) +; CHECK-NEXT: incl (%esp) +; CHECK-NEXT: .LBB0_1: # %forcond +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: movl (%esp), %eax +; CHECK-NEXT: cmpl {{[0-9]+}}(%esp), %eax +; CHECK-NEXT: jl .LBB0_2 +; CHECK-NEXT: # BB#3: # %afterfor +; CHECK-NEXT: addl $12, %esp +; CHECK-NEXT: retl entry: -; CHECK-NOT: pextrw -; CHECK: add - - %dst.addr = alloca <3 x i8>* ; <<3 x i8>**> [#uses=2] - %src.addr = alloca <3 x i8>* ; <<3 x i8>**> [#uses=2] - %n.addr = alloca i32 ; [#uses=2] - %i = alloca i32, align 4 ; [#uses=6] + %dst.addr = alloca <3 x i8>* + %src.addr = alloca <3 x i8>* + %n.addr = alloca i32 + %i = alloca i32, align 4 store <3 x i8>* %dst, <3 x i8>** %dst.addr store <3 x i8>* %src, <3 x i8>** %src.addr store i32 %n, i32* %n.addr store i32 0, i32* %i br label %forcond -forcond: ; preds = %forinc, %entry - %tmp = load i32, i32* %i ; [#uses=1] - %tmp1 = load i32, i32* %n.addr ; [#uses=1] - %cmp = icmp slt i32 %tmp, %tmp1 ; [#uses=1] +forcond: + %tmp = load i32, i32* %i + %tmp1 = load i32, i32* %n.addr + %cmp = icmp slt i32 %tmp, %tmp1 br i1 %cmp, label %forbody, label %afterfor -forbody: ; preds = %forcond - %tmp2 = load i32, i32* %i ; [#uses=1] - %tmp3 = load <3 x i8>*, <3 x i8>** %dst.addr ; <<3 x i8>*> [#uses=1] - %arrayidx = getelementptr <3 x i8>, <3 x i8>* %tmp3, i32 %tmp2 ; <<3 x i8>*> [#uses=1] - %tmp4 = load i32, i32* %i ; [#uses=1] - %tmp5 = load <3 x i8>*, <3 x i8>** %src.addr ; <<3 x i8>*> [#uses=1] - %arrayidx6 = getelementptr <3 x i8>, <3 x i8>* %tmp5, i32 %tmp4 ; <<3 x i8>*> [#uses=1] - %tmp7 = load <3 x i8>, <3 x i8>* %arrayidx6 ; <<3 x i8>> [#uses=1] - %add = add <3 x i8> %tmp7, < i8 1, i8 1, i8 1 > ; <<3 x i8>> [#uses=1] +forbody: + %tmp2 = load i32, i32* %i + %tmp3 = load <3 x i8>*, <3 x i8>** %dst.addr + %arrayidx = getelementptr <3 x i8>, <3 x i8>* %tmp3, i32 %tmp2 + %tmp4 = load i32, i32* %i + %tmp5 = load <3 x i8>*, <3 x i8>** %src.addr + %arrayidx6 = getelementptr <3 x i8>, <3 x i8>* %tmp5, i32 %tmp4 + %tmp7 = load <3 x i8>, <3 x i8>* %arrayidx6 + %add = add <3 x i8> %tmp7, < i8 1, i8 1, i8 1 > store <3 x i8> %add, <3 x i8>* %arrayidx br label %forinc -forinc: ; preds = %forbody - %tmp8 = load i32, i32* %i ; [#uses=1] - %inc = add i32 %tmp8, 1 ; [#uses=1] +forinc: + %tmp8 = load i32, i32* %i + %inc = add i32 %tmp8, 1 store i32 %inc, i32* %i br label %forcond -afterfor: ; preds = %forcond +afterfor: ret void } + diff --git a/test/CodeGen/X86/widen_arith-2.ll b/test/CodeGen/X86/widen_arith-2.ll index 6c219c1720e..46cc0535d7c 100644 --- a/test/CodeGen/X86/widen_arith-2.ll +++ b/test/CodeGen/X86/widen_arith-2.ll @@ -1,59 +1,93 @@ -; RUN: llc < %s -march=x86 -mattr=+sse4.2 | FileCheck %s -; CHECK: padd -; CHECK: pand +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse4.2 | FileCheck %s ; widen v8i8 to v16i8 (checks even power of 2 widening with add & and) define void @update(i64* %dst_i, i64* %src_i, i32 %n) nounwind { +; CHECK-LABEL: update: +; CHECK: # BB#0: # %entry +; CHECK-NEXT: subl $12, %esp +; CHECK-NEXT: movl $0, (%esp) +; CHECK-NEXT: movdqa {{.*#+}} xmm0 = [1,1,1,1,1,1,1,1] +; CHECK-NEXT: movdqa {{.*#+}} xmm1 = [4,4,4,4,4,4,4,4] +; CHECK-NEXT: movdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u> +; CHECK-NEXT: jmp .LBB0_1 +; CHECK-NEXT: .p2align 4, 0x90 +; CHECK-NEXT: .LBB0_2: # %forbody +; CHECK-NEXT: # in Loop: Header=BB0_1 Depth=1 +; CHECK-NEXT: movl (%esp), %eax +; CHECK-NEXT: shll $3, %eax +; CHECK-NEXT: addl {{[0-9]+}}(%esp), %eax +; CHECK-NEXT: movl %eax, {{[0-9]+}}(%esp) +; CHECK-NEXT: movl (%esp), %eax +; CHECK-NEXT: shll $3, %eax +; CHECK-NEXT: addl {{[0-9]+}}(%esp), %eax +; CHECK-NEXT: movl %eax, {{[0-9]+}}(%esp) +; CHECK-NEXT: movl (%esp), %ecx +; CHECK-NEXT: movl {{[0-9]+}}(%esp), %edx +; CHECK-NEXT: pmovzxbw {{.*#+}} xmm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero +; CHECK-NEXT: paddw %xmm0, %xmm3 +; CHECK-NEXT: pand %xmm1, %xmm3 +; CHECK-NEXT: pshufb %xmm2, %xmm3 +; CHECK-NEXT: movq %xmm3, (%edx,%ecx,8) +; CHECK-NEXT: incl (%esp) +; CHECK-NEXT: .LBB0_1: # %forcond +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: movl (%esp), %eax +; CHECK-NEXT: cmpl {{[0-9]+}}(%esp), %eax +; CHECK-NEXT: jl .LBB0_2 +; CHECK-NEXT: # BB#3: # %afterfor +; CHECK-NEXT: addl $12, %esp +; CHECK-NEXT: retl entry: - %dst_i.addr = alloca i64* ; [#uses=2] - %src_i.addr = alloca i64* ; [#uses=2] - %n.addr = alloca i32 ; [#uses=2] - %i = alloca i32, align 4 ; [#uses=8] - %dst = alloca <8 x i8>*, align 4 ; <<8 x i8>**> [#uses=2] - %src = alloca <8 x i8>*, align 4 ; <<8 x i8>**> [#uses=2] + %dst_i.addr = alloca i64* + %src_i.addr = alloca i64* + %n.addr = alloca i32 + %i = alloca i32, align 4 + %dst = alloca <8 x i8>*, align 4 + %src = alloca <8 x i8>*, align 4 store i64* %dst_i, i64** %dst_i.addr store i64* %src_i, i64** %src_i.addr store i32 %n, i32* %n.addr store i32 0, i32* %i br label %forcond -forcond: ; preds = %forinc, %entry - %tmp = load i32, i32* %i ; [#uses=1] - %tmp1 = load i32, i32* %n.addr ; [#uses=1] - %cmp = icmp slt i32 %tmp, %tmp1 ; [#uses=1] +forcond: + %tmp = load i32, i32* %i + %tmp1 = load i32, i32* %n.addr + %cmp = icmp slt i32 %tmp, %tmp1 br i1 %cmp, label %forbody, label %afterfor -forbody: ; preds = %forcond - %tmp2 = load i32, i32* %i ; [#uses=1] - %tmp3 = load i64*, i64** %dst_i.addr ; [#uses=1] - %arrayidx = getelementptr i64, i64* %tmp3, i32 %tmp2 ; [#uses=1] - %conv = bitcast i64* %arrayidx to <8 x i8>* ; <<8 x i8>*> [#uses=1] +forbody: + %tmp2 = load i32, i32* %i + %tmp3 = load i64*, i64** %dst_i.addr + %arrayidx = getelementptr i64, i64* %tmp3, i32 %tmp2 + %conv = bitcast i64* %arrayidx to <8 x i8>* store <8 x i8>* %conv, <8 x i8>** %dst - %tmp4 = load i32, i32* %i ; [#uses=1] - %tmp5 = load i64*, i64** %src_i.addr ; [#uses=1] - %arrayidx6 = getelementptr i64, i64* %tmp5, i32 %tmp4 ; [#uses=1] - %conv7 = bitcast i64* %arrayidx6 to <8 x i8>* ; <<8 x i8>*> [#uses=1] + %tmp4 = load i32, i32* %i + %tmp5 = load i64*, i64** %src_i.addr + %arrayidx6 = getelementptr i64, i64* %tmp5, i32 %tmp4 + %conv7 = bitcast i64* %arrayidx6 to <8 x i8>* store <8 x i8>* %conv7, <8 x i8>** %src - %tmp8 = load i32, i32* %i ; [#uses=1] - %tmp9 = load <8 x i8>*, <8 x i8>** %dst ; <<8 x i8>*> [#uses=1] - %arrayidx10 = getelementptr <8 x i8>, <8 x i8>* %tmp9, i32 %tmp8 ; <<8 x i8>*> [#uses=1] - %tmp11 = load i32, i32* %i ; [#uses=1] - %tmp12 = load <8 x i8>*, <8 x i8>** %src ; <<8 x i8>*> [#uses=1] - %arrayidx13 = getelementptr <8 x i8>, <8 x i8>* %tmp12, i32 %tmp11 ; <<8 x i8>*> [#uses=1] - %tmp14 = load <8 x i8>, <8 x i8>* %arrayidx13 ; <<8 x i8>> [#uses=1] - %add = add <8 x i8> %tmp14, < i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1 > ; <<8 x i8>> [#uses=1] - %and = and <8 x i8> %add, < i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4 > ; <<8 x i8>> [#uses=1] + %tmp8 = load i32, i32* %i + %tmp9 = load <8 x i8>*, <8 x i8>** %dst + %arrayidx10 = getelementptr <8 x i8>, <8 x i8>* %tmp9, i32 %tmp8 + %tmp11 = load i32, i32* %i + %tmp12 = load <8 x i8>*, <8 x i8>** %src + %arrayidx13 = getelementptr <8 x i8>, <8 x i8>* %tmp12, i32 %tmp11 + %tmp14 = load <8 x i8>, <8 x i8>* %arrayidx13 + %add = add <8 x i8> %tmp14, < i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1 > + %and = and <8 x i8> %add, < i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4 > store <8 x i8> %and, <8 x i8>* %arrayidx10 br label %forinc -forinc: ; preds = %forbody - %tmp15 = load i32, i32* %i ; [#uses=1] - %inc = add i32 %tmp15, 1 ; [#uses=1] +forinc: + %tmp15 = load i32, i32* %i + %inc = add i32 %tmp15, 1 store i32 %inc, i32* %i br label %forcond -afterfor: ; preds = %forcond +afterfor: ret void } diff --git a/test/CodeGen/X86/widen_arith-3.ll b/test/CodeGen/X86/widen_arith-3.ll index aea7975a045..0520f52dd2a 100644 --- a/test/CodeGen/X86/widen_arith-3.ll +++ b/test/CodeGen/X86/widen_arith-3.ll @@ -1,18 +1,54 @@ -; RUN: llc < %s -mcpu=generic -march=x86 -mattr=+sse4.2 -post-RA-scheduler=true | FileCheck %s -; CHECK: paddd +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse4.2 -post-RA-scheduler=true | FileCheck %s ; Widen a v3i16 to v8i16 to do a vector add -@.str = internal constant [4 x i8] c"%d \00" ; <[4 x i8]*> [#uses=1] -@.str1 = internal constant [2 x i8] c"\0A\00" ; <[2 x i8]*> [#uses=1] +@.str = internal constant [4 x i8] c"%d \00" +@.str1 = internal constant [2 x i8] c"\0A\00" define void @update(<3 x i16>* %dst, <3 x i16>* %src, i32 %n) nounwind { +; CHECK-LABEL: update: +; CHECK: # BB#0: # %entry +; CHECK-NEXT: pushl %ebp +; CHECK-NEXT: movl %esp, %ebp +; CHECK-NEXT: andl $-8, %esp +; CHECK-NEXT: subl $40, %esp +; CHECK-NEXT: movl {{\.LCPI.*}}, %eax +; CHECK-NEXT: movdqa {{.*#+}} xmm0 = <1,1,1,u> +; CHECK-NEXT: movdqa {{.*#+}} xmm1 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] +; CHECK-NEXT: movl $0, {{[0-9]+}}(%esp) +; CHECK-NEXT: movl %eax, {{[0-9]+}}(%esp) +; CHECK-NEXT: movw $1, {{[0-9]+}}(%esp) +; CHECK-NEXT: jmp .LBB0_1 +; CHECK-NEXT: .p2align 4, 0x90 +; CHECK-NEXT: .LBB0_2: # %forbody +; CHECK-NEXT: # in Loop: Header=BB0_1 Depth=1 +; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax +; CHECK-NEXT: movl 12(%ebp), %edx +; CHECK-NEXT: movl 8(%ebp), %ecx +; CHECK-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero +; CHECK-NEXT: pmovzxwd {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero +; CHECK-NEXT: pinsrd $2, 4(%edx,%eax,8), %xmm2 +; CHECK-NEXT: paddd %xmm0, %xmm2 +; CHECK-NEXT: pextrw $4, %xmm2, 4(%ecx,%eax,8) +; CHECK-NEXT: pshufb %xmm1, %xmm2 +; CHECK-NEXT: movd %xmm2, (%ecx,%eax,8) +; CHECK-NEXT: incl {{[0-9]+}}(%esp) +; CHECK-NEXT: .LBB0_1: # %forcond +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax +; CHECK-NEXT: cmpl 16(%ebp), %eax +; CHECK-NEXT: jl .LBB0_2 +; CHECK-NEXT: # BB#3: # %afterfor +; CHECK-NEXT: movl %ebp, %esp +; CHECK-NEXT: popl %ebp +; CHECK-NEXT: retl entry: - %dst.addr = alloca <3 x i16>* ; <<3 x i16>**> [#uses=2] - %src.addr = alloca <3 x i16>* ; <<3 x i16>**> [#uses=2] - %n.addr = alloca i32 ; [#uses=2] - %v = alloca <3 x i16>, align 8 ; <<3 x i16>*> [#uses=1] - %i = alloca i32, align 4 ; [#uses=6] + %dst.addr = alloca <3 x i16>* + %src.addr = alloca <3 x i16>* + %n.addr = alloca i32 + %v = alloca <3 x i16>, align 8 + %i = alloca i32, align 4 store <3 x i16>* %dst, <3 x i16>** %dst.addr store <3 x i16>* %src, <3 x i16>** %src.addr store i32 %n, i32* %n.addr @@ -20,31 +56,31 @@ entry: store i32 0, i32* %i br label %forcond -forcond: ; preds = %forinc, %entry - %tmp = load i32, i32* %i ; [#uses=1] - %tmp1 = load i32, i32* %n.addr ; [#uses=1] - %cmp = icmp slt i32 %tmp, %tmp1 ; [#uses=1] +forcond: + %tmp = load i32, i32* %i + %tmp1 = load i32, i32* %n.addr + %cmp = icmp slt i32 %tmp, %tmp1 br i1 %cmp, label %forbody, label %afterfor -forbody: ; preds = %forcond - %tmp2 = load i32, i32* %i ; [#uses=1] - %tmp3 = load <3 x i16>*, <3 x i16>** %dst.addr ; <<3 x i16>*> [#uses=1] - %arrayidx = getelementptr <3 x i16>, <3 x i16>* %tmp3, i32 %tmp2 ; <<3 x i16>*> [#uses=1] - %tmp4 = load i32, i32* %i ; [#uses=1] - %tmp5 = load <3 x i16>*, <3 x i16>** %src.addr ; <<3 x i16>*> [#uses=1] - %arrayidx6 = getelementptr <3 x i16>, <3 x i16>* %tmp5, i32 %tmp4 ; <<3 x i16>*> [#uses=1] - %tmp7 = load <3 x i16>, <3 x i16>* %arrayidx6 ; <<3 x i16>> [#uses=1] - %add = add <3 x i16> %tmp7, < i16 1, i16 1, i16 1 > ; <<3 x i16>> [#uses=1] +forbody: + %tmp2 = load i32, i32* %i + %tmp3 = load <3 x i16>*, <3 x i16>** %dst.addr + %arrayidx = getelementptr <3 x i16>, <3 x i16>* %tmp3, i32 %tmp2 + %tmp4 = load i32, i32* %i + %tmp5 = load <3 x i16>*, <3 x i16>** %src.addr + %arrayidx6 = getelementptr <3 x i16>, <3 x i16>* %tmp5, i32 %tmp4 + %tmp7 = load <3 x i16>, <3 x i16>* %arrayidx6 + %add = add <3 x i16> %tmp7, < i16 1, i16 1, i16 1 > store <3 x i16> %add, <3 x i16>* %arrayidx br label %forinc -forinc: ; preds = %forbody - %tmp8 = load i32, i32* %i ; [#uses=1] - %inc = add i32 %tmp8, 1 ; [#uses=1] +forinc: + %tmp8 = load i32, i32* %i + %inc = add i32 %tmp8, 1 store i32 %inc, i32* %i br label %forcond -afterfor: ; preds = %forcond +afterfor: ret void } -- 2.50.1