From: Simon Pilgrim Date: Fri, 10 Mar 2017 15:41:05 +0000 (+0000) Subject: [X86][MMX] Regenerate mmx load folding tests X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=9e4921a584e489e8ca03bb2a6d7cdfed82e08468;p=llvm [X86][MMX] Regenerate mmx load folding tests git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@297470 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/test/CodeGen/X86/mmx-fold-load.ll b/test/CodeGen/X86/mmx-fold-load.ll index 2b9d30f59fd..9505adbabf8 100644 --- a/test/CodeGen/X86/mmx-fold-load.ll +++ b/test/CodeGen/X86/mmx-fold-load.ll @@ -1,12 +1,31 @@ -; RUN: llc < %s -march=x86-64 -mattr=+mmx,+sse2 | FileCheck %s +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=i686-unknown -mattr=+mmx,+sse2 | FileCheck %s --check-prefix=X86 +; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+mmx,+sse2 | FileCheck %s --check-prefix=X64 -define i64 @t0(<1 x i64>* %a, i32* %b) { -; CHECK-LABEL: t0: -; CHECK: # BB#0:{{.*}} %entry -; CHECK: movq (%[[REG1:[a-z]+]]), %mm0 -; CHECK-NEXT: psllq (%[[REG2:[a-z]+]]), %mm0 -; CHECK-NEXT: movd %mm0, %rax -; CHECK-NEXT: retq +define i64 @t0(<1 x i64>* %a, i32* %b) nounwind { +; X86-LABEL: t0: +; X86: # BB#0: # %entry +; X86-NEXT: pushl %ebp +; X86-NEXT: movl %esp, %ebp +; X86-NEXT: andl $-8, %esp +; X86-NEXT: subl $8, %esp +; X86-NEXT: movl 12(%ebp), %eax +; X86-NEXT: movl 8(%ebp), %ecx +; X86-NEXT: movq (%ecx), %mm0 +; X86-NEXT: psllq (%eax), %mm0 +; X86-NEXT: movq %mm0, (%esp) +; X86-NEXT: movl (%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; X86-NEXT: movl %ebp, %esp +; X86-NEXT: popl %ebp +; X86-NEXT: retl +; +; X64-LABEL: t0: +; X64: # BB#0: # %entry +; X64-NEXT: movq (%rdi), %mm0 +; X64-NEXT: psllq (%rsi), %mm0 +; X64-NEXT: movd %mm0, %rax +; X64-NEXT: retq entry: %0 = bitcast <1 x i64>* %a to x86_mmx* %1 = load x86_mmx, x86_mmx* %0, align 8 @@ -17,13 +36,30 @@ entry: } declare x86_mmx @llvm.x86.mmx.pslli.q(x86_mmx, i32) -define i64 @t1(<1 x i64>* %a, i32* %b) { -; CHECK-LABEL: t1: -; CHECK: # BB#0:{{.*}} %entry -; CHECK: movq (%[[REG1]]), %mm0 -; CHECK-NEXT: psrlq (%[[REG2]]), %mm0 -; CHECK-NEXT: movd %mm0, %rax -; CHECK-NEXT: retq +define i64 @t1(<1 x i64>* %a, i32* %b) nounwind { +; X86-LABEL: t1: +; X86: # BB#0: # %entry +; X86-NEXT: pushl %ebp +; X86-NEXT: movl %esp, %ebp +; X86-NEXT: andl $-8, %esp +; X86-NEXT: subl $8, %esp +; X86-NEXT: movl 12(%ebp), %eax +; X86-NEXT: movl 8(%ebp), %ecx +; X86-NEXT: movq (%ecx), %mm0 +; X86-NEXT: psrlq (%eax), %mm0 +; X86-NEXT: movq %mm0, (%esp) +; X86-NEXT: movl (%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; X86-NEXT: movl %ebp, %esp +; X86-NEXT: popl %ebp +; X86-NEXT: retl +; +; X64-LABEL: t1: +; X64: # BB#0: # %entry +; X64-NEXT: movq (%rdi), %mm0 +; X64-NEXT: psrlq (%rsi), %mm0 +; X64-NEXT: movd %mm0, %rax +; X64-NEXT: retq entry: %0 = bitcast <1 x i64>* %a to x86_mmx* %1 = load x86_mmx, x86_mmx* %0, align 8 @@ -34,13 +70,30 @@ entry: } declare x86_mmx @llvm.x86.mmx.psrli.q(x86_mmx, i32) -define i64 @t2(<1 x i64>* %a, i32* %b) { -; CHECK-LABEL: t2: -; CHECK: # BB#0:{{.*}} %entry -; CHECK: movq (%[[REG1]]), %mm0 -; CHECK-NEXT: psllw (%[[REG2]]), %mm0 -; CHECK-NEXT: movd %mm0, %rax -; CHECK-NEXT: retq +define i64 @t2(<1 x i64>* %a, i32* %b) nounwind { +; X86-LABEL: t2: +; X86: # BB#0: # %entry +; X86-NEXT: pushl %ebp +; X86-NEXT: movl %esp, %ebp +; X86-NEXT: andl $-8, %esp +; X86-NEXT: subl $8, %esp +; X86-NEXT: movl 12(%ebp), %eax +; X86-NEXT: movl 8(%ebp), %ecx +; X86-NEXT: movq (%ecx), %mm0 +; X86-NEXT: psllw (%eax), %mm0 +; X86-NEXT: movq %mm0, (%esp) +; X86-NEXT: movl (%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; X86-NEXT: movl %ebp, %esp +; X86-NEXT: popl %ebp +; X86-NEXT: retl +; +; X64-LABEL: t2: +; X64: # BB#0: # %entry +; X64-NEXT: movq (%rdi), %mm0 +; X64-NEXT: psllw (%rsi), %mm0 +; X64-NEXT: movd %mm0, %rax +; X64-NEXT: retq entry: %0 = bitcast <1 x i64>* %a to x86_mmx* %1 = load x86_mmx, x86_mmx* %0, align 8 @@ -51,13 +104,30 @@ entry: } declare x86_mmx @llvm.x86.mmx.pslli.w(x86_mmx, i32) -define i64 @t3(<1 x i64>* %a, i32* %b) { -; CHECK-LABEL: t3: -; CHECK: # BB#0:{{.*}} %entry -; CHECK: movq (%[[REG1]]), %mm0 -; CHECK-NEXT: psrlw (%[[REG2]]), %mm0 -; CHECK-NEXT: movd %mm0, %rax -; CHECK-NEXT: retq +define i64 @t3(<1 x i64>* %a, i32* %b) nounwind { +; X86-LABEL: t3: +; X86: # BB#0: # %entry +; X86-NEXT: pushl %ebp +; X86-NEXT: movl %esp, %ebp +; X86-NEXT: andl $-8, %esp +; X86-NEXT: subl $8, %esp +; X86-NEXT: movl 12(%ebp), %eax +; X86-NEXT: movl 8(%ebp), %ecx +; X86-NEXT: movq (%ecx), %mm0 +; X86-NEXT: psrlw (%eax), %mm0 +; X86-NEXT: movq %mm0, (%esp) +; X86-NEXT: movl (%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; X86-NEXT: movl %ebp, %esp +; X86-NEXT: popl %ebp +; X86-NEXT: retl +; +; X64-LABEL: t3: +; X64: # BB#0: # %entry +; X64-NEXT: movq (%rdi), %mm0 +; X64-NEXT: psrlw (%rsi), %mm0 +; X64-NEXT: movd %mm0, %rax +; X64-NEXT: retq entry: %0 = bitcast <1 x i64>* %a to x86_mmx* %1 = load x86_mmx, x86_mmx* %0, align 8 @@ -68,13 +138,30 @@ entry: } declare x86_mmx @llvm.x86.mmx.psrli.w(x86_mmx, i32) -define i64 @t4(<1 x i64>* %a, i32* %b) { -; CHECK-LABEL: t4: -; CHECK: # BB#0:{{.*}} %entry -; CHECK: movq (%[[REG1]]), %mm0 -; CHECK-NEXT: pslld (%[[REG2]]), %mm0 -; CHECK-NEXT: movd %mm0, %rax -; CHECK-NEXT: retq +define i64 @t4(<1 x i64>* %a, i32* %b) nounwind { +; X86-LABEL: t4: +; X86: # BB#0: # %entry +; X86-NEXT: pushl %ebp +; X86-NEXT: movl %esp, %ebp +; X86-NEXT: andl $-8, %esp +; X86-NEXT: subl $8, %esp +; X86-NEXT: movl 12(%ebp), %eax +; X86-NEXT: movl 8(%ebp), %ecx +; X86-NEXT: movq (%ecx), %mm0 +; X86-NEXT: pslld (%eax), %mm0 +; X86-NEXT: movq %mm0, (%esp) +; X86-NEXT: movl (%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; X86-NEXT: movl %ebp, %esp +; X86-NEXT: popl %ebp +; X86-NEXT: retl +; +; X64-LABEL: t4: +; X64: # BB#0: # %entry +; X64-NEXT: movq (%rdi), %mm0 +; X64-NEXT: pslld (%rsi), %mm0 +; X64-NEXT: movd %mm0, %rax +; X64-NEXT: retq entry: %0 = bitcast <1 x i64>* %a to x86_mmx* %1 = load x86_mmx, x86_mmx* %0, align 8 @@ -85,13 +172,30 @@ entry: } declare x86_mmx @llvm.x86.mmx.pslli.d(x86_mmx, i32) -define i64 @t5(<1 x i64>* %a, i32* %b) { -; CHECK-LABEL: t5: -; CHECK: # BB#0:{{.*}} %entry -; CHECK: movq (%[[REG1]]), %mm0 -; CHECK-NEXT: psrld (%[[REG2]]), %mm0 -; CHECK-NEXT: movd %mm0, %rax -; CHECK-NEXT: retq +define i64 @t5(<1 x i64>* %a, i32* %b) nounwind { +; X86-LABEL: t5: +; X86: # BB#0: # %entry +; X86-NEXT: pushl %ebp +; X86-NEXT: movl %esp, %ebp +; X86-NEXT: andl $-8, %esp +; X86-NEXT: subl $8, %esp +; X86-NEXT: movl 12(%ebp), %eax +; X86-NEXT: movl 8(%ebp), %ecx +; X86-NEXT: movq (%ecx), %mm0 +; X86-NEXT: psrld (%eax), %mm0 +; X86-NEXT: movq %mm0, (%esp) +; X86-NEXT: movl (%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; X86-NEXT: movl %ebp, %esp +; X86-NEXT: popl %ebp +; X86-NEXT: retl +; +; X64-LABEL: t5: +; X64: # BB#0: # %entry +; X64-NEXT: movq (%rdi), %mm0 +; X64-NEXT: psrld (%rsi), %mm0 +; X64-NEXT: movd %mm0, %rax +; X64-NEXT: retq entry: %0 = bitcast <1 x i64>* %a to x86_mmx* %1 = load x86_mmx, x86_mmx* %0, align 8 @@ -102,13 +206,30 @@ entry: } declare x86_mmx @llvm.x86.mmx.psrli.d(x86_mmx, i32) -define i64 @t6(<1 x i64>* %a, i32* %b) { -; CHECK-LABEL: t6: -; CHECK: # BB#0:{{.*}} %entry -; CHECK: movq (%[[REG1]]), %mm0 -; CHECK-NEXT: psraw (%[[REG2]]), %mm0 -; CHECK-NEXT: movd %mm0, %rax -; CHECK-NEXT: retq +define i64 @t6(<1 x i64>* %a, i32* %b) nounwind { +; X86-LABEL: t6: +; X86: # BB#0: # %entry +; X86-NEXT: pushl %ebp +; X86-NEXT: movl %esp, %ebp +; X86-NEXT: andl $-8, %esp +; X86-NEXT: subl $8, %esp +; X86-NEXT: movl 12(%ebp), %eax +; X86-NEXT: movl 8(%ebp), %ecx +; X86-NEXT: movq (%ecx), %mm0 +; X86-NEXT: psraw (%eax), %mm0 +; X86-NEXT: movq %mm0, (%esp) +; X86-NEXT: movl (%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; X86-NEXT: movl %ebp, %esp +; X86-NEXT: popl %ebp +; X86-NEXT: retl +; +; X64-LABEL: t6: +; X64: # BB#0: # %entry +; X64-NEXT: movq (%rdi), %mm0 +; X64-NEXT: psraw (%rsi), %mm0 +; X64-NEXT: movd %mm0, %rax +; X64-NEXT: retq entry: %0 = bitcast <1 x i64>* %a to x86_mmx* %1 = load x86_mmx, x86_mmx* %0, align 8 @@ -119,13 +240,30 @@ entry: } declare x86_mmx @llvm.x86.mmx.psrai.w(x86_mmx, i32) -define i64 @t7(<1 x i64>* %a, i32* %b) { -; CHECK-LABEL: t7: -; CHECK: # BB#0:{{.*}} %entry -; CHECK: movq (%[[REG1]]), %mm0 -; CHECK-NEXT: psrad (%[[REG2]]), %mm0 -; CHECK-NEXT: movd %mm0, %rax -; CHECK-NEXT: retq +define i64 @t7(<1 x i64>* %a, i32* %b) nounwind { +; X86-LABEL: t7: +; X86: # BB#0: # %entry +; X86-NEXT: pushl %ebp +; X86-NEXT: movl %esp, %ebp +; X86-NEXT: andl $-8, %esp +; X86-NEXT: subl $8, %esp +; X86-NEXT: movl 12(%ebp), %eax +; X86-NEXT: movl 8(%ebp), %ecx +; X86-NEXT: movq (%ecx), %mm0 +; X86-NEXT: psrad (%eax), %mm0 +; X86-NEXT: movq %mm0, (%esp) +; X86-NEXT: movl (%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; X86-NEXT: movl %ebp, %esp +; X86-NEXT: popl %ebp +; X86-NEXT: retl +; +; X64-LABEL: t7: +; X64: # BB#0: # %entry +; X64-NEXT: movq (%rdi), %mm0 +; X64-NEXT: psrad (%rsi), %mm0 +; X64-NEXT: movd %mm0, %rax +; X64-NEXT: retq entry: %0 = bitcast <1 x i64>* %a to x86_mmx* %1 = load x86_mmx, x86_mmx* %0, align 8 @@ -136,13 +274,29 @@ entry: } declare x86_mmx @llvm.x86.mmx.psrai.d(x86_mmx, i32) -define i64 @tt0(x86_mmx %t, x86_mmx* %q) { -; CHECK-LABEL: tt0: -; CHECK: # BB#0:{{.*}} %entry -; CHECK: paddb (%[[REG3:[a-z]+]]), %mm0 -; CHECK-NEXT: movd %mm0, %rax -; CHECK-NEXT: emms -; CHECK-NEXT: retq +define i64 @tt0(x86_mmx %t, x86_mmx* %q) nounwind { +; X86-LABEL: tt0: +; X86: # BB#0: # %entry +; X86-NEXT: pushl %ebp +; X86-NEXT: movl %esp, %ebp +; X86-NEXT: andl $-8, %esp +; X86-NEXT: subl $8, %esp +; X86-NEXT: movl 8(%ebp), %eax +; X86-NEXT: paddb (%eax), %mm0 +; X86-NEXT: movq %mm0, (%esp) +; X86-NEXT: movl (%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; X86-NEXT: emms +; X86-NEXT: movl %ebp, %esp +; X86-NEXT: popl %ebp +; X86-NEXT: retl +; +; X64-LABEL: tt0: +; X64: # BB#0: # %entry +; X64-NEXT: paddb (%rdi), %mm0 +; X64-NEXT: movd %mm0, %rax +; X64-NEXT: emms +; X64-NEXT: retq entry: %v = load x86_mmx, x86_mmx* %q %u = tail call x86_mmx @llvm.x86.mmx.padd.b(x86_mmx %t, x86_mmx %v) @@ -153,13 +307,29 @@ entry: declare x86_mmx @llvm.x86.mmx.padd.b(x86_mmx, x86_mmx) declare void @llvm.x86.mmx.emms() -define i64 @tt1(x86_mmx %t, x86_mmx* %q) { -; CHECK-LABEL: tt1: -; CHECK: # BB#0:{{.*}} %entry -; CHECK: paddw (%[[REG3]]), %mm0 -; CHECK-NEXT: movd %mm0, %rax -; CHECK-NEXT: emms -; CHECK-NEXT: retq +define i64 @tt1(x86_mmx %t, x86_mmx* %q) nounwind { +; X86-LABEL: tt1: +; X86: # BB#0: # %entry +; X86-NEXT: pushl %ebp +; X86-NEXT: movl %esp, %ebp +; X86-NEXT: andl $-8, %esp +; X86-NEXT: subl $8, %esp +; X86-NEXT: movl 8(%ebp), %eax +; X86-NEXT: paddw (%eax), %mm0 +; X86-NEXT: movq %mm0, (%esp) +; X86-NEXT: movl (%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; X86-NEXT: emms +; X86-NEXT: movl %ebp, %esp +; X86-NEXT: popl %ebp +; X86-NEXT: retl +; +; X64-LABEL: tt1: +; X64: # BB#0: # %entry +; X64-NEXT: paddw (%rdi), %mm0 +; X64-NEXT: movd %mm0, %rax +; X64-NEXT: emms +; X64-NEXT: retq entry: %v = load x86_mmx, x86_mmx* %q %u = tail call x86_mmx @llvm.x86.mmx.padd.w(x86_mmx %t, x86_mmx %v) @@ -169,13 +339,29 @@ entry: } declare x86_mmx @llvm.x86.mmx.padd.w(x86_mmx, x86_mmx) -define i64 @tt2(x86_mmx %t, x86_mmx* %q) { -; CHECK-LABEL: tt2: -; CHECK: # BB#0:{{.*}} %entry -; CHECK: paddd (%[[REG3]]), %mm0 -; CHECK-NEXT: movd %mm0, %rax -; CHECK-NEXT: emms -; CHECK-NEXT: retq +define i64 @tt2(x86_mmx %t, x86_mmx* %q) nounwind { +; X86-LABEL: tt2: +; X86: # BB#0: # %entry +; X86-NEXT: pushl %ebp +; X86-NEXT: movl %esp, %ebp +; X86-NEXT: andl $-8, %esp +; X86-NEXT: subl $8, %esp +; X86-NEXT: movl 8(%ebp), %eax +; X86-NEXT: paddd (%eax), %mm0 +; X86-NEXT: movq %mm0, (%esp) +; X86-NEXT: movl (%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; X86-NEXT: emms +; X86-NEXT: movl %ebp, %esp +; X86-NEXT: popl %ebp +; X86-NEXT: retl +; +; X64-LABEL: tt2: +; X64: # BB#0: # %entry +; X64-NEXT: paddd (%rdi), %mm0 +; X64-NEXT: movd %mm0, %rax +; X64-NEXT: emms +; X64-NEXT: retq entry: %v = load x86_mmx, x86_mmx* %q %u = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %t, x86_mmx %v) @@ -185,13 +371,29 @@ entry: } declare x86_mmx @llvm.x86.mmx.padd.d(x86_mmx, x86_mmx) -define i64 @tt3(x86_mmx %t, x86_mmx* %q) { -; CHECK-LABEL: tt3: -; CHECK: # BB#0:{{.*}} %entry -; CHECK: paddq (%[[REG3]]), %mm0 -; CHECK-NEXT: movd %mm0, %rax -; CHECK-NEXT: emms -; CHECK-NEXT: retq +define i64 @tt3(x86_mmx %t, x86_mmx* %q) nounwind { +; X86-LABEL: tt3: +; X86: # BB#0: # %entry +; X86-NEXT: pushl %ebp +; X86-NEXT: movl %esp, %ebp +; X86-NEXT: andl $-8, %esp +; X86-NEXT: subl $8, %esp +; X86-NEXT: movl 8(%ebp), %eax +; X86-NEXT: paddq (%eax), %mm0 +; X86-NEXT: movq %mm0, (%esp) +; X86-NEXT: movl (%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; X86-NEXT: emms +; X86-NEXT: movl %ebp, %esp +; X86-NEXT: popl %ebp +; X86-NEXT: retl +; +; X64-LABEL: tt3: +; X64: # BB#0: # %entry +; X64-NEXT: paddq (%rdi), %mm0 +; X64-NEXT: movd %mm0, %rax +; X64-NEXT: emms +; X64-NEXT: retq entry: %v = load x86_mmx, x86_mmx* %q %u = tail call x86_mmx @llvm.x86.mmx.padd.q(x86_mmx %t, x86_mmx %v) @@ -201,13 +403,29 @@ entry: } declare x86_mmx @llvm.x86.mmx.padd.q(x86_mmx, x86_mmx) -define i64 @tt4(x86_mmx %t, x86_mmx* %q) { -; CHECK-LABEL: tt4: -; CHECK: # BB#0:{{.*}} %entry -; CHECK: paddusb (%[[REG3]]), %mm0 -; CHECK-NEXT: movd %mm0, %rax -; CHECK-NEXT: emms -; CHECK-NEXT: retq +define i64 @tt4(x86_mmx %t, x86_mmx* %q) nounwind { +; X86-LABEL: tt4: +; X86: # BB#0: # %entry +; X86-NEXT: pushl %ebp +; X86-NEXT: movl %esp, %ebp +; X86-NEXT: andl $-8, %esp +; X86-NEXT: subl $8, %esp +; X86-NEXT: movl 8(%ebp), %eax +; X86-NEXT: paddusb (%eax), %mm0 +; X86-NEXT: movq %mm0, (%esp) +; X86-NEXT: movl (%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; X86-NEXT: emms +; X86-NEXT: movl %ebp, %esp +; X86-NEXT: popl %ebp +; X86-NEXT: retl +; +; X64-LABEL: tt4: +; X64: # BB#0: # %entry +; X64-NEXT: paddusb (%rdi), %mm0 +; X64-NEXT: movd %mm0, %rax +; X64-NEXT: emms +; X64-NEXT: retq entry: %v = load x86_mmx, x86_mmx* %q %u = tail call x86_mmx @llvm.x86.mmx.paddus.b(x86_mmx %t, x86_mmx %v) @@ -217,13 +435,29 @@ entry: } declare x86_mmx @llvm.x86.mmx.paddus.b(x86_mmx, x86_mmx) -define i64 @tt5(x86_mmx %t, x86_mmx* %q) { -; CHECK-LABEL: tt5: -; CHECK: # BB#0:{{.*}} %entry -; CHECK: paddusw (%[[REG3]]), %mm0 -; CHECK-NEXT: movd %mm0, %rax -; CHECK-NEXT: emms -; CHECK-NEXT: retq +define i64 @tt5(x86_mmx %t, x86_mmx* %q) nounwind { +; X86-LABEL: tt5: +; X86: # BB#0: # %entry +; X86-NEXT: pushl %ebp +; X86-NEXT: movl %esp, %ebp +; X86-NEXT: andl $-8, %esp +; X86-NEXT: subl $8, %esp +; X86-NEXT: movl 8(%ebp), %eax +; X86-NEXT: paddusw (%eax), %mm0 +; X86-NEXT: movq %mm0, (%esp) +; X86-NEXT: movl (%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; X86-NEXT: emms +; X86-NEXT: movl %ebp, %esp +; X86-NEXT: popl %ebp +; X86-NEXT: retl +; +; X64-LABEL: tt5: +; X64: # BB#0: # %entry +; X64-NEXT: paddusw (%rdi), %mm0 +; X64-NEXT: movd %mm0, %rax +; X64-NEXT: emms +; X64-NEXT: retq entry: %v = load x86_mmx, x86_mmx* %q %u = tail call x86_mmx @llvm.x86.mmx.paddus.w(x86_mmx %t, x86_mmx %v) @@ -233,13 +467,29 @@ entry: } declare x86_mmx @llvm.x86.mmx.paddus.w(x86_mmx, x86_mmx) -define i64 @tt6(x86_mmx %t, x86_mmx* %q) { -; CHECK-LABEL: tt6: -; CHECK: # BB#0:{{.*}} %entry -; CHECK: psrlw (%[[REG3]]), %mm0 -; CHECK-NEXT: movd %mm0, %rax -; CHECK-NEXT: emms -; CHECK-NEXT: retq +define i64 @tt6(x86_mmx %t, x86_mmx* %q) nounwind { +; X86-LABEL: tt6: +; X86: # BB#0: # %entry +; X86-NEXT: pushl %ebp +; X86-NEXT: movl %esp, %ebp +; X86-NEXT: andl $-8, %esp +; X86-NEXT: subl $8, %esp +; X86-NEXT: movl 8(%ebp), %eax +; X86-NEXT: psrlw (%eax), %mm0 +; X86-NEXT: movq %mm0, (%esp) +; X86-NEXT: movl (%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; X86-NEXT: emms +; X86-NEXT: movl %ebp, %esp +; X86-NEXT: popl %ebp +; X86-NEXT: retl +; +; X64-LABEL: tt6: +; X64: # BB#0: # %entry +; X64-NEXT: psrlw (%rdi), %mm0 +; X64-NEXT: movd %mm0, %rax +; X64-NEXT: emms +; X64-NEXT: retq entry: %v = load x86_mmx, x86_mmx* %q %u = tail call x86_mmx @llvm.x86.mmx.psrl.w(x86_mmx %t, x86_mmx %v) @@ -249,13 +499,29 @@ entry: } declare x86_mmx @llvm.x86.mmx.psrl.w(x86_mmx, x86_mmx) -define i64 @tt7(x86_mmx %t, x86_mmx* %q) { -; CHECK-LABEL: tt7: -; CHECK: # BB#0:{{.*}} %entry -; CHECK: psrld (%[[REG3]]), %mm0 -; CHECK-NEXT: movd %mm0, %rax -; CHECK-NEXT: emms -; CHECK-NEXT: retq +define i64 @tt7(x86_mmx %t, x86_mmx* %q) nounwind { +; X86-LABEL: tt7: +; X86: # BB#0: # %entry +; X86-NEXT: pushl %ebp +; X86-NEXT: movl %esp, %ebp +; X86-NEXT: andl $-8, %esp +; X86-NEXT: subl $8, %esp +; X86-NEXT: movl 8(%ebp), %eax +; X86-NEXT: psrld (%eax), %mm0 +; X86-NEXT: movq %mm0, (%esp) +; X86-NEXT: movl (%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; X86-NEXT: emms +; X86-NEXT: movl %ebp, %esp +; X86-NEXT: popl %ebp +; X86-NEXT: retl +; +; X64-LABEL: tt7: +; X64: # BB#0: # %entry +; X64-NEXT: psrld (%rdi), %mm0 +; X64-NEXT: movd %mm0, %rax +; X64-NEXT: emms +; X64-NEXT: retq entry: %v = load x86_mmx, x86_mmx* %q %u = tail call x86_mmx @llvm.x86.mmx.psrl.d(x86_mmx %t, x86_mmx %v) @@ -265,13 +531,29 @@ entry: } declare x86_mmx @llvm.x86.mmx.psrl.d(x86_mmx, x86_mmx) -define i64 @tt8(x86_mmx %t, x86_mmx* %q) { -; CHECK-LABEL: tt8: -; CHECK: # BB#0:{{.*}} %entry -; CHECK: psrlq (%[[REG3]]), %mm0 -; CHECK-NEXT: movd %mm0, %rax -; CHECK-NEXT: emms -; CHECK-NEXT: retq +define i64 @tt8(x86_mmx %t, x86_mmx* %q) nounwind { +; X86-LABEL: tt8: +; X86: # BB#0: # %entry +; X86-NEXT: pushl %ebp +; X86-NEXT: movl %esp, %ebp +; X86-NEXT: andl $-8, %esp +; X86-NEXT: subl $8, %esp +; X86-NEXT: movl 8(%ebp), %eax +; X86-NEXT: psrlq (%eax), %mm0 +; X86-NEXT: movq %mm0, (%esp) +; X86-NEXT: movl (%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; X86-NEXT: emms +; X86-NEXT: movl %ebp, %esp +; X86-NEXT: popl %ebp +; X86-NEXT: retl +; +; X64-LABEL: tt8: +; X64: # BB#0: # %entry +; X64-NEXT: psrlq (%rdi), %mm0 +; X64-NEXT: movd %mm0, %rax +; X64-NEXT: emms +; X64-NEXT: retq entry: %v = load x86_mmx, x86_mmx* %q %u = tail call x86_mmx @llvm.x86.mmx.psrl.q(x86_mmx %t, x86_mmx %v)