From f8db082af9bf75142f23fa2e80e2c0b9f9de131b Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Sat, 18 Jun 2016 17:20:52 +0000 Subject: [PATCH] [X86][TBM] Added fast-isel tests matching tools/clang/test/CodeGen/tbm-builtins.c git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@273087 91177308-0d34-0410-b5e6-96231b3b80d8 --- .../X86/tbm-intrinsics-fast-isel-x86_64.ll | 133 +++++++++++ test/CodeGen/X86/tbm-intrinsics-fast-isel.ll | 212 ++++++++++++++++++ 2 files changed, 345 insertions(+) create mode 100644 test/CodeGen/X86/tbm-intrinsics-fast-isel-x86_64.ll create mode 100644 test/CodeGen/X86/tbm-intrinsics-fast-isel.ll diff --git a/test/CodeGen/X86/tbm-intrinsics-fast-isel-x86_64.ll b/test/CodeGen/X86/tbm-intrinsics-fast-isel-x86_64.ll new file mode 100644 index 00000000000..f6c49cab71b --- /dev/null +++ b/test/CodeGen/X86/tbm-intrinsics-fast-isel-x86_64.ll @@ -0,0 +1,133 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+tbm | FileCheck %s --check-prefix=X64 + +; NOTE: This should use IR equivalent to what is generated by clang/test/CodeGen/tbm-builtins.c + +define i64 @test__bextri_u64(i64 %a0) { +; X64-LABEL: test__bextri_u64: +; X64: # BB#0: +; X64-NEXT: bextr $1, %rdi, %rax +; X64-NEXT: retq + %1 = call i64 @llvm.x86.tbm.bextri.u64(i64 %a0, i64 1) + ret i64 %1 +} + +define i64 @test__blcfill_u64(i64 %a0) { +; X64-LABEL: test__blcfill_u64: +; X64: # BB#0: +; X64-NEXT: leaq 1(%rdi), %rax +; X64-NEXT: andq %rdi, %rax +; X64-NEXT: retq + %1 = add i64 %a0, 1 + %2 = and i64 %a0, %1 + ret i64 %2 +} + +define i64 @test__blci_u64(i64 %a0) { +; X64-LABEL: test__blci_u64: +; X64: # BB#0: +; X64-NEXT: leaq 1(%rdi), %rax +; X64-NEXT: xorq $-1, %rax +; X64-NEXT: orq %rdi, %rax +; X64-NEXT: retq + %1 = add i64 %a0, 1 + %2 = xor i64 %1, -1 + %3 = or i64 %a0, %2 + ret i64 %3 +} + +define i64 @test__blcic_u64(i64 %a0) { +; X64-LABEL: test__blcic_u64: +; X64: # BB#0: +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: xorq $-1, %rax +; X64-NEXT: addq $1, %rdi +; X64-NEXT: andq %rax, %rdi +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: retq + %1 = xor i64 %a0, -1 + %2 = add i64 %a0, 1 + %3 = and i64 %1, %2 + ret i64 %3 +} + +define i64 @test__blcmsk_u64(i64 %a0) { +; X64-LABEL: test__blcmsk_u64: +; X64: # BB#0: +; X64-NEXT: leaq 1(%rdi), %rax +; X64-NEXT: xorq %rdi, %rax +; X64-NEXT: retq + %1 = add i64 %a0, 1 + %2 = xor i64 %a0, %1 + ret i64 %2 +} + +define i64 @test__blcs_u64(i64 %a0) { +; X64-LABEL: test__blcs_u64: +; X64: # BB#0: +; X64-NEXT: leaq 1(%rdi), %rax +; X64-NEXT: orq %rdi, %rax +; X64-NEXT: retq + %1 = add i64 %a0, 1 + %2 = or i64 %a0, %1 + ret i64 %2 +} + +define i64 @test__blsfill_u64(i64 %a0) { +; X64-LABEL: test__blsfill_u64: +; X64: # BB#0: +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: subq $1, %rax +; X64-NEXT: orq %rdi, %rax +; X64-NEXT: retq + %1 = sub i64 %a0, 1 + %2 = or i64 %a0, %1 + ret i64 %2 +} + +define i64 @test__blsic_u64(i64 %a0) { +; X64-LABEL: test__blsic_u64: +; X64: # BB#0: +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: xorq $-1, %rax +; X64-NEXT: subq $1, %rdi +; X64-NEXT: orq %rax, %rdi +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: retq + %1 = xor i64 %a0, -1 + %2 = sub i64 %a0, 1 + %3 = or i64 %1, %2 + ret i64 %3 +} + +define i64 @test__t1mskc_u64(i64 %a0) { +; X64-LABEL: test__t1mskc_u64: +; X64: # BB#0: +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: xorq $-1, %rax +; X64-NEXT: addq $1, %rdi +; X64-NEXT: orq %rax, %rdi +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: retq + %1 = xor i64 %a0, -1 + %2 = add i64 %a0, 1 + %3 = or i64 %1, %2 + ret i64 %3 +} + +define i64 @test__tzmsk_u64(i64 %a0) { +; X64-LABEL: test__tzmsk_u64: +; X64: # BB#0: +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: xorq $-1, %rax +; X64-NEXT: subq $1, %rdi +; X64-NEXT: andq %rax, %rdi +; X64-NEXT: movq %rdi, %rax +; X64-NEXT: retq + %1 = xor i64 %a0, -1 + %2 = sub i64 %a0, 1 + %3 = and i64 %1, %2 + ret i64 %3 +} + +declare i64 @llvm.x86.tbm.bextri.u64(i64, i64) diff --git a/test/CodeGen/X86/tbm-intrinsics-fast-isel.ll b/test/CodeGen/X86/tbm-intrinsics-fast-isel.ll new file mode 100644 index 00000000000..035291c0d8a --- /dev/null +++ b/test/CodeGen/X86/tbm-intrinsics-fast-isel.ll @@ -0,0 +1,212 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -fast-isel -mtriple=i686-unknown-unknown -mattr=+tbm | FileCheck %s --check-prefix=X32 +; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-unknown -mattr=+tbm | FileCheck %s --check-prefix=X64 + +; NOTE: This should use IR equivalent to what is generated by clang/test/CodeGen/tbm-builtins.c + +define i32 @test__bextri_u32(i32 %a0) { +; X32-LABEL: test__bextri_u32: +; X32: # BB#0: +; X32-NEXT: bextr $1, {{[0-9]+}}(%esp), %eax +; X32-NEXT: retl +; +; X64-LABEL: test__bextri_u32: +; X64: # BB#0: +; X64-NEXT: bextr $1, %edi, %eax +; X64-NEXT: retq + %1 = call i32 @llvm.x86.tbm.bextri.u32(i32 %a0, i32 1) + ret i32 %1 +} + +define i32 @test__blcfill_u32(i32 %a0) { +; X32-LABEL: test__blcfill_u32: +; X32: # BB#0: +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: leal 1(%ecx), %eax +; X32-NEXT: andl %ecx, %eax +; X32-NEXT: retl +; +; X64-LABEL: test__blcfill_u32: +; X64: # BB#0: +; X64-NEXT: leal 1(%rdi), %eax +; X64-NEXT: andl %edi, %eax +; X64-NEXT: retq + %1 = add i32 %a0, 1 + %2 = and i32 %a0, %1 + ret i32 %2 +} + +define i32 @test__blci_u32(i32 %a0) { +; X32-LABEL: test__blci_u32: +; X32: # BB#0: +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: leal 1(%ecx), %eax +; X32-NEXT: xorl $-1, %eax +; X32-NEXT: orl %ecx, %eax +; X32-NEXT: retl +; +; X64-LABEL: test__blci_u32: +; X64: # BB#0: +; X64-NEXT: leal 1(%rdi), %eax +; X64-NEXT: xorl $-1, %eax +; X64-NEXT: orl %edi, %eax +; X64-NEXT: retq + %1 = add i32 %a0, 1 + %2 = xor i32 %1, -1 + %3 = or i32 %a0, %2 + ret i32 %3 +} + +define i32 @test__blcic_u32(i32 %a0) { +; X32-LABEL: test__blcic_u32: +; X32: # BB#0: +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, %ecx +; X32-NEXT: xorl $-1, %ecx +; X32-NEXT: addl $1, %eax +; X32-NEXT: andl %ecx, %eax +; X32-NEXT: retl +; +; X64-LABEL: test__blcic_u32: +; X64: # BB#0: +; X64-NEXT: movl %edi, %eax +; X64-NEXT: xorl $-1, %eax +; X64-NEXT: addl $1, %edi +; X64-NEXT: andl %eax, %edi +; X64-NEXT: movl %edi, %eax +; X64-NEXT: retq + %1 = xor i32 %a0, -1 + %2 = add i32 %a0, 1 + %3 = and i32 %1, %2 + ret i32 %3 +} + +define i32 @test__blcmsk_u32(i32 %a0) { +; X32-LABEL: test__blcmsk_u32: +; X32: # BB#0: +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: leal 1(%ecx), %eax +; X32-NEXT: xorl %ecx, %eax +; X32-NEXT: retl +; +; X64-LABEL: test__blcmsk_u32: +; X64: # BB#0: +; X64-NEXT: leal 1(%rdi), %eax +; X64-NEXT: xorl %edi, %eax +; X64-NEXT: retq + %1 = add i32 %a0, 1 + %2 = xor i32 %a0, %1 + ret i32 %2 +} + +define i32 @test__blcs_u32(i32 %a0) { +; X32-LABEL: test__blcs_u32: +; X32: # BB#0: +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: leal 1(%ecx), %eax +; X32-NEXT: orl %ecx, %eax +; X32-NEXT: retl +; +; X64-LABEL: test__blcs_u32: +; X64: # BB#0: +; X64-NEXT: leal 1(%rdi), %eax +; X64-NEXT: orl %edi, %eax +; X64-NEXT: retq + %1 = add i32 %a0, 1 + %2 = or i32 %a0, %1 + ret i32 %2 +} + +define i32 @test__blsfill_u32(i32 %a0) { +; X32-LABEL: test__blsfill_u32: +; X32: # BB#0: +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl %ecx, %eax +; X32-NEXT: subl $1, %eax +; X32-NEXT: orl %ecx, %eax +; X32-NEXT: retl +; +; X64-LABEL: test__blsfill_u32: +; X64: # BB#0: +; X64-NEXT: movl %edi, %eax +; X64-NEXT: subl $1, %eax +; X64-NEXT: orl %edi, %eax +; X64-NEXT: retq + %1 = sub i32 %a0, 1 + %2 = or i32 %a0, %1 + ret i32 %2 +} + +define i32 @test__blsic_u32(i32 %a0) { +; X32-LABEL: test__blsic_u32: +; X32: # BB#0: +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, %ecx +; X32-NEXT: xorl $-1, %ecx +; X32-NEXT: subl $1, %eax +; X32-NEXT: orl %ecx, %eax +; X32-NEXT: retl +; +; X64-LABEL: test__blsic_u32: +; X64: # BB#0: +; X64-NEXT: movl %edi, %eax +; X64-NEXT: xorl $-1, %eax +; X64-NEXT: subl $1, %edi +; X64-NEXT: orl %eax, %edi +; X64-NEXT: movl %edi, %eax +; X64-NEXT: retq + %1 = xor i32 %a0, -1 + %2 = sub i32 %a0, 1 + %3 = or i32 %1, %2 + ret i32 %3 +} + +define i32 @test__t1mskc_u32(i32 %a0) { +; X32-LABEL: test__t1mskc_u32: +; X32: # BB#0: +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, %ecx +; X32-NEXT: xorl $-1, %ecx +; X32-NEXT: addl $1, %eax +; X32-NEXT: orl %ecx, %eax +; X32-NEXT: retl +; +; X64-LABEL: test__t1mskc_u32: +; X64: # BB#0: +; X64-NEXT: movl %edi, %eax +; X64-NEXT: xorl $-1, %eax +; X64-NEXT: addl $1, %edi +; X64-NEXT: orl %eax, %edi +; X64-NEXT: movl %edi, %eax +; X64-NEXT: retq + %1 = xor i32 %a0, -1 + %2 = add i32 %a0, 1 + %3 = or i32 %1, %2 + ret i32 %3 +} + +define i32 @test__tzmsk_u32(i32 %a0) { +; X32-LABEL: test__tzmsk_u32: +; X32: # BB#0: +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl %eax, %ecx +; X32-NEXT: xorl $-1, %ecx +; X32-NEXT: subl $1, %eax +; X32-NEXT: andl %ecx, %eax +; X32-NEXT: retl +; +; X64-LABEL: test__tzmsk_u32: +; X64: # BB#0: +; X64-NEXT: movl %edi, %eax +; X64-NEXT: xorl $-1, %eax +; X64-NEXT: subl $1, %edi +; X64-NEXT: andl %eax, %edi +; X64-NEXT: movl %edi, %eax +; X64-NEXT: retq + %1 = xor i32 %a0, -1 + %2 = sub i32 %a0, 1 + %3 = and i32 %1, %2 + ret i32 %3 +} + +declare i32 @llvm.x86.tbm.bextri.u32(i32, i32) -- 2.50.1