From 154ed2a66bbaf21b55e18a9887ff4ba7c6a81a02 Mon Sep 17 00:00:00 2001 From: Guy Blank Date: Wed, 28 Sep 2016 11:22:17 +0000 Subject: [PATCH] [X86][FastISel] Use a COPY from K register to a GPR instead of a K operation The KORTEST was introduced due to a bug where a TEST instruction used a K register. but, turns out that the opposite case of KORTEST using a GPR is now happening The change removes the KORTEST flow and adds a COPY instruction from the K reg to a GPR. Differential Revision: https://reviews.llvm.org/D24953 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@282580 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86FastISel.cpp | 58 ++++++++++++----------- test/CodeGen/X86/avx512-fsel.ll | 3 +- test/CodeGen/X86/fast-isel-load-i1.ll | 15 ++++++ test/CodeGen/X86/fast-isel-select-cmov.ll | 9 ++-- 4 files changed, 54 insertions(+), 31 deletions(-) create mode 100644 test/CodeGen/X86/fast-isel-load-i1.ll diff --git a/lib/Target/X86/X86FastISel.cpp b/lib/Target/X86/X86FastISel.cpp index 41651d26148..d7eeb402b1d 100644 --- a/lib/Target/X86/X86FastISel.cpp +++ b/lib/Target/X86/X86FastISel.cpp @@ -1731,15 +1731,17 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) { unsigned OpReg = getRegForValue(BI->getCondition()); if (OpReg == 0) return false; - // In case OpReg is a K register, kortest against itself. - if (MRI.getRegClass(OpReg) == &X86::VK1RegClass) - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::KORTESTWrr)) - .addReg(OpReg) - .addReg(OpReg); - else - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri)) - .addReg(OpReg) - .addImm(1); + // In case OpReg is a K register, COPY to a GPR + if (MRI.getRegClass(OpReg) == &X86::VK1RegClass) { + unsigned KOpReg = OpReg; + OpReg = createResultReg(&X86::GR8RegClass); + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, + TII.get(TargetOpcode::COPY), OpReg) + .addReg(KOpReg); + } + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri)) + .addReg(OpReg) + .addImm(1); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::JNE_1)) .addMBB(TrueMBB); finishCondBranch(BI->getParent(), TrueMBB, FalseMBB); @@ -2073,16 +2075,17 @@ bool X86FastISel::X86FastEmitCMoveSelect(MVT RetVT, const Instruction *I) { return false; bool CondIsKill = hasTrivialKill(Cond); - // In case OpReg is a K register, kortest against itself. - if (MRI.getRegClass(CondReg) == &X86::VK1RegClass) + // In case OpReg is a K register, COPY to a GPR + if (MRI.getRegClass(CondReg) == &X86::VK1RegClass) { + unsigned KCondReg = CondReg; + CondReg = createResultReg(&X86::GR8RegClass); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, - TII.get(X86::KORTESTWrr)) - .addReg(CondReg, getKillRegState(CondIsKill)) - .addReg(CondReg); - else - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri)) - .addReg(CondReg, getKillRegState(CondIsKill)) - .addImm(1); + TII.get(TargetOpcode::COPY), CondReg) + .addReg(KCondReg, getKillRegState(CondIsKill)); + } + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri)) + .addReg(CondReg, getKillRegState(CondIsKill)) + .addImm(1); } const Value *LHS = I->getOperand(1); @@ -2254,16 +2257,17 @@ bool X86FastISel::X86FastEmitPseudoSelect(MVT RetVT, const Instruction *I) { return false; bool CondIsKill = hasTrivialKill(Cond); - // In case OpReg is a K register, kortest against itself. - if (MRI.getRegClass(CondReg) == &X86::VK1RegClass) + // In case OpReg is a K register, COPY to a GPR + if (MRI.getRegClass(CondReg) == &X86::VK1RegClass) { + unsigned KCondReg = CondReg; + CondReg = createResultReg(&X86::GR8RegClass); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, - TII.get(X86::KORTESTWrr)) - .addReg(CondReg, getKillRegState(CondIsKill)) - .addReg(CondReg); - else - BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri)) - .addReg(CondReg, getKillRegState(CondIsKill)) - .addImm(1); + TII.get(TargetOpcode::COPY), CondReg) + .addReg(KCondReg, getKillRegState(CondIsKill)); + } + BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri)) + .addReg(CondReg, getKillRegState(CondIsKill)) + .addImm(1); } const Value *LHS = I->getOperand(1); diff --git a/test/CodeGen/X86/avx512-fsel.ll b/test/CodeGen/X86/avx512-fsel.ll index 2c9c42b84b9..0afaeaec9b4 100644 --- a/test/CodeGen/X86/avx512-fsel.ll +++ b/test/CodeGen/X86/avx512-fsel.ll @@ -26,7 +26,8 @@ define i32 @test(float %a, float %b) { ; CHECK-NEXT: movb %dil, %r8b ; CHECK-NEXT: andl $1, %r8d ; CHECK-NEXT: kmovw %r8d, %k1 -; CHECK-NEXT: kortestw %k1, %k1 +; CHECK-NEXT: kmovw %k1, %ecx +; CHECK-NEXT: testb $1, %cl ; CHECK-NEXT: movb %al, {{[0-9]+}}(%rsp) ## 1-byte Spill ; CHECK-NEXT: kmovw %k0, {{[0-9]+}}(%rsp) ## 2-byte Spill ; CHECK-NEXT: jne LBB0_1 diff --git a/test/CodeGen/X86/fast-isel-load-i1.ll b/test/CodeGen/X86/fast-isel-load-i1.ll new file mode 100644 index 00000000000..1b2e3c5b9bb --- /dev/null +++ b/test/CodeGen/X86/fast-isel-load-i1.ll @@ -0,0 +1,15 @@ +; RUN: llc < %s -fast-isel -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512f | FileCheck %s + +define i1 @test_i1(i1* %b) { +; CHECK-LABEL: test_i1: +; CHECK: # BB#0: # %entry +; CHECK-NEXT: testb $1, (%rdi) +entry: + %0 = load i1, i1* %b, align 1 + br i1 %0, label %in, label %out +in: + ret i1 0 +out: + ret i1 1 +} + diff --git a/test/CodeGen/X86/fast-isel-select-cmov.ll b/test/CodeGen/X86/fast-isel-select-cmov.ll index 290bcaaf4a2..a9b2dd841f2 100644 --- a/test/CodeGen/X86/fast-isel-select-cmov.ll +++ b/test/CodeGen/X86/fast-isel-select-cmov.ll @@ -16,7 +16,8 @@ define zeroext i16 @select_cmov_i16(i1 zeroext %cond, i16 zeroext %a, i16 zeroex ; AVX512-LABEL: select_cmov_i16: ; AVX512: ## BB#0: ; AVX512-NEXT: kmovw %edi, %k0 -; AVX512-NEXT: kortestw %k0, %k0 +; AVX512-NEXT: kmovw %k0, %eax +; AVX512-NEXT: testb $1, %al ; AVX512-NEXT: cmovew %dx, %si ; AVX512-NEXT: movzwl %si, %eax ; AVX512-NEXT: retq @@ -47,7 +48,8 @@ define i32 @select_cmov_i32(i1 zeroext %cond, i32 %a, i32 %b) { ; AVX512-LABEL: select_cmov_i32: ; AVX512: ## BB#0: ; AVX512-NEXT: kmovw %edi, %k0 -; AVX512-NEXT: kortestw %k0, %k0 +; AVX512-NEXT: kmovw %k0, %eax +; AVX512-NEXT: testb $1, %al ; AVX512-NEXT: cmovel %edx, %esi ; AVX512-NEXT: movl %esi, %eax ; AVX512-NEXT: retq @@ -78,7 +80,8 @@ define i64 @select_cmov_i64(i1 zeroext %cond, i64 %a, i64 %b) { ; AVX512-LABEL: select_cmov_i64: ; AVX512: ## BB#0: ; AVX512-NEXT: kmovw %edi, %k0 -; AVX512-NEXT: kortestw %k0, %k0 +; AVX512-NEXT: kmovw %k0, %eax +; AVX512-NEXT: testb $1, %al ; AVX512-NEXT: cmoveq %rdx, %rsi ; AVX512-NEXT: movq %rsi, %rax ; AVX512-NEXT: retq -- 2.50.1