From c49eec7f315ac6ac31ba2941e84dd5a67788b72f Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Tue, 18 Jul 2017 14:19:34 +0000 Subject: [PATCH] [X86] Added cmov target to memcmp test As discussed by @spatel on D35067: "I added the cmov attribute to the 32-bit codegen test because it removes some noise for that file. I think the intent for the SSE vs no-SSE runs is to show the potential difference for the 16 and 32 byte cases rather than the lack of cmov (which has been available for all CPUs since SSE1, so that's why it shows up automatically with -mattr=sse2)." git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@308309 91177308-0d34-0410-b5e6-96231b3b80d8 --- test/CodeGen/X86/memcmp.ll | 108 +++++++++++-------------------------- 1 file changed, 32 insertions(+), 76 deletions(-) diff --git a/test/CodeGen/X86/memcmp.ll b/test/CodeGen/X86/memcmp.ll index 0e09abf73c8..e7ba32cab0a 100644 --- a/test/CodeGen/X86/memcmp.ll +++ b/test/CodeGen/X86/memcmp.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s --check-prefix=X86 --check-prefix=X86-NOSSE +; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=cmov | FileCheck %s --check-prefix=X86 --check-prefix=X86-NOSSE ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X86 --check-prefix=X86-SSE2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=X64 --check-prefix=X64-SSE2 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=AVX2 | FileCheck %s --check-prefix=X64 --check-prefix=X64-AVX2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx2 | FileCheck %s --check-prefix=X64 --check-prefix=X64-AVX2 ; This tests codegen time inlining/optimization of memcmp ; rdar://6480398 @@ -12,43 +12,21 @@ declare i32 @memcmp(i8*, i8*, i64) define i32 @length2(i8* %X, i8* %Y) nounwind { -; X86-NOSSE-LABEL: length2: -; X86-NOSSE: # BB#0: -; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-NOSSE-NEXT: movzwl (%ecx), %ecx -; X86-NOSSE-NEXT: movzwl (%eax), %eax -; X86-NOSSE-NEXT: rolw $8, %cx -; X86-NOSSE-NEXT: rolw $8, %ax -; X86-NOSSE-NEXT: cmpw %ax, %cx -; X86-NOSSE-NEXT: movl $-1, %eax -; X86-NOSSE-NEXT: jae .LBB0_1 -; X86-NOSSE-NEXT: # BB#2: -; X86-NOSSE-NEXT: je .LBB0_3 -; X86-NOSSE-NEXT: .LBB0_4: -; X86-NOSSE-NEXT: retl -; X86-NOSSE-NEXT: .LBB0_1: -; X86-NOSSE-NEXT: movl $1, %eax -; X86-NOSSE-NEXT: jne .LBB0_4 -; X86-NOSSE-NEXT: .LBB0_3: -; X86-NOSSE-NEXT: xorl %eax, %eax -; X86-NOSSE-NEXT: retl -; -; X86-SSE2-LABEL: length2: -; X86-SSE2: # BB#0: -; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-SSE2-NEXT: movzwl (%ecx), %ecx -; X86-SSE2-NEXT: movzwl (%eax), %eax -; X86-SSE2-NEXT: rolw $8, %cx -; X86-SSE2-NEXT: rolw $8, %ax -; X86-SSE2-NEXT: xorl %edx, %edx -; X86-SSE2-NEXT: cmpw %ax, %cx -; X86-SSE2-NEXT: movl $-1, %ecx -; X86-SSE2-NEXT: movl $1, %eax -; X86-SSE2-NEXT: cmovbl %ecx, %eax -; X86-SSE2-NEXT: cmovel %edx, %eax -; X86-SSE2-NEXT: retl +; X86-LABEL: length2: +; X86: # BB#0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movzwl (%ecx), %ecx +; X86-NEXT: movzwl (%eax), %eax +; X86-NEXT: rolw $8, %cx +; X86-NEXT: rolw $8, %ax +; X86-NEXT: xorl %edx, %edx +; X86-NEXT: cmpw %ax, %cx +; X86-NEXT: movl $-1, %ecx +; X86-NEXT: movl $1, %eax +; X86-NEXT: cmovbl %ecx, %eax +; X86-NEXT: cmovel %edx, %eax +; X86-NEXT: retl ; ; X64-LABEL: length2: ; X64: # BB#0: @@ -182,43 +160,21 @@ define i1 @length3_eq(i8* %X, i8* %Y) nounwind { } define i32 @length4(i8* %X, i8* %Y) nounwind { -; X86-NOSSE-LABEL: length4: -; X86-NOSSE: # BB#0: -; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-NOSSE-NEXT: movl (%ecx), %ecx -; X86-NOSSE-NEXT: movl (%eax), %eax -; X86-NOSSE-NEXT: bswapl %ecx -; X86-NOSSE-NEXT: bswapl %eax -; X86-NOSSE-NEXT: cmpl %eax, %ecx -; X86-NOSSE-NEXT: movl $-1, %eax -; X86-NOSSE-NEXT: jae .LBB6_1 -; X86-NOSSE-NEXT: # BB#2: -; X86-NOSSE-NEXT: je .LBB6_3 -; X86-NOSSE-NEXT: .LBB6_4: -; X86-NOSSE-NEXT: retl -; X86-NOSSE-NEXT: .LBB6_1: -; X86-NOSSE-NEXT: movl $1, %eax -; X86-NOSSE-NEXT: jne .LBB6_4 -; X86-NOSSE-NEXT: .LBB6_3: -; X86-NOSSE-NEXT: xorl %eax, %eax -; X86-NOSSE-NEXT: retl -; -; X86-SSE2-LABEL: length4: -; X86-SSE2: # BB#0: -; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-SSE2-NEXT: movl (%ecx), %ecx -; X86-SSE2-NEXT: movl (%eax), %eax -; X86-SSE2-NEXT: bswapl %ecx -; X86-SSE2-NEXT: bswapl %eax -; X86-SSE2-NEXT: xorl %edx, %edx -; X86-SSE2-NEXT: cmpl %eax, %ecx -; X86-SSE2-NEXT: movl $-1, %ecx -; X86-SSE2-NEXT: movl $1, %eax -; X86-SSE2-NEXT: cmovbl %ecx, %eax -; X86-SSE2-NEXT: cmovel %edx, %eax -; X86-SSE2-NEXT: retl +; X86-LABEL: length4: +; X86: # BB#0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl (%ecx), %ecx +; X86-NEXT: movl (%eax), %eax +; X86-NEXT: bswapl %ecx +; X86-NEXT: bswapl %eax +; X86-NEXT: xorl %edx, %edx +; X86-NEXT: cmpl %eax, %ecx +; X86-NEXT: movl $-1, %ecx +; X86-NEXT: movl $1, %eax +; X86-NEXT: cmovbl %ecx, %eax +; X86-NEXT: cmovel %edx, %eax +; X86-NEXT: retl ; ; X64-LABEL: length4: ; X64: # BB#0: -- 2.40.0