From: Simon Pilgrim Date: Fri, 17 Feb 2017 18:00:43 +0000 (+0000) Subject: [X86][BMI] Add BMI2 stack folding tests X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=c60bc7e2e2f6e415f22e1e1fee908a47a94935f0;p=llvm [X86][BMI] Add BMI2 stack folding tests git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@295470 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/test/CodeGen/X86/stack-folding-bmi2.ll b/test/CodeGen/X86/stack-folding-bmi2.ll new file mode 100644 index 00000000000..b70f7c668d0 --- /dev/null +++ b/test/CodeGen/X86/stack-folding-bmi2.ll @@ -0,0 +1,77 @@ +; RUN: llc -O3 -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+bmi,+bmi2 < %s | FileCheck %s + +target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-unknown" + +; Stack reload folding tests. +; +; By including a nop call with sideeffects we can force a partial register spill of the +; relevant registers and check that the reload is correctly folded into the instruction. + +define i32 @stack_fold_bzhi_u32(i32 %a0, i32 %a1) { + ;CHECK-LABEL: stack_fold_bzhi_u32 + ;CHECK: bzhil %eax, {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 4-byte Folded Reload + %1 = tail call i32 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"() + %2 = tail call i32 @llvm.x86.bmi.bzhi.32(i32 %a0, i32 %a1) + ret i32 %2 +} +declare i32 @llvm.x86.bmi.bzhi.32(i32, i32) + +define i64 @stack_fold_bzhi_u64(i64 %a0, i64 %a1) { + ;CHECK-LABEL: stack_fold_bzhi_u64 + ;CHECK: bzhiq %rax, {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 8-byte Folded Reload + %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"() + %2 = tail call i64 @llvm.x86.bmi.bzhi.64(i64 %a0, i64 %a1) + ret i64 %2 +} +declare i64 @llvm.x86.bmi.bzhi.64(i64, i64) + +define i64 @stack_fold_mulx_u64(i64 %a0, i64 %a1, i64 *%a2) { + ;CHECK-LABEL: stack_fold_mulx_u64 + ;CHECK: mulxq {{-?[0-9]*}}(%rsp), %rax, %rcx {{.*#+}} 8-byte Folded Reload + %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"() + %2 = zext i64 %a0 to i128 + %3 = zext i64 %a1 to i128 + %4 = mul i128 %2, %3 + %5 = lshr i128 %4, 64 + %6 = trunc i128 %4 to i64 + %7 = trunc i128 %5 to i64 + store i64 %7, i64 *%a2 + ret i64 %6 +} + +define i32 @stack_fold_pdep_u32(i32 %a0, i32 %a1) { + ;CHECK-LABEL: stack_fold_pdep_u32 + ;CHECK: pdepl {{-?[0-9]*}}(%rsp), %eax, %eax {{.*#+}} 4-byte Folded Reload + %1 = tail call i32 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"() + %2 = tail call i32 @llvm.x86.bmi.pdep.32(i32 %a0, i32 %a1) + ret i32 %2 +} +declare i32 @llvm.x86.bmi.pdep.32(i32, i32) + +define i64 @stack_fold_pdep_u64(i64 %a0, i64 %a1) { + ;CHECK-LABEL: stack_fold_pdep_u64 + ;CHECK: pdepq {{-?[0-9]*}}(%rsp), %rax, %rax {{.*#+}} 8-byte Folded Reload + %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"() + %2 = tail call i64 @llvm.x86.bmi.pdep.64(i64 %a0, i64 %a1) + ret i64 %2 +} +declare i64 @llvm.x86.bmi.pdep.64(i64, i64) + +define i32 @stack_fold_pext_u32(i32 %a0, i32 %a1) { + ;CHECK-LABEL: stack_fold_pext_u32 + ;CHECK: pextl {{-?[0-9]*}}(%rsp), %eax, %eax {{.*#+}} 4-byte Folded Reload + %1 = tail call i32 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"() + %2 = tail call i32 @llvm.x86.bmi.pext.32(i32 %a0, i32 %a1) + ret i32 %2 +} +declare i32 @llvm.x86.bmi.pext.32(i32, i32) + +define i64 @stack_fold_pext_u64(i64 %a0, i64 %a1) { + ;CHECK-LABEL: stack_fold_pext_u64 + ;CHECK: pextq {{-?[0-9]*}}(%rsp), %rax, %rax {{.*#+}} 8-byte Folded Reload + %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"() + %2 = tail call i64 @llvm.x86.bmi.pext.64(i64 %a0, i64 %a1) + ret i64 %2 +} +declare i64 @llvm.x86.bmi.pext.64(i64, i64)