From: Michael Liao Date: Tue, 28 May 2019 16:29:39 +0000 (+0000) Subject: [AMDGPU] Fix the mis-handling of `vreg_1` copied from scalar register. X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=5571fe4bfcfd0707e7193556ea306699d2dfcd9c;p=llvm [AMDGPU] Fix the mis-handling of `vreg_1` copied from scalar register. Summary: - Don't treat the use of a scalar register as `vreg_1` an VGPR usage. Otherwise, that promotes that scalar register into vector one, which breaks the assumption that scalar register holds the lane mask. - The issue is triggered in a complicated case, where if the uses of that (lane mask) scalar register is legalized firstly before its definition, e.g., due to the mismatch block placement and its topological order or loop. In that cases, the legalization of PHI introduces the use of that scalar register as `vreg_1`. Reviewers: rampitec, nhaehnle, arsenm, alex-t Subscribers: kzhuravl, jvesely, wdng, dstuttard, tpr, t-tye, hiraditya, llvm-commits, yaxunl Tags: #llvm Differential Revision: https://reviews.llvm.org/D62492 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@361847 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Target/AMDGPU/SIFixSGPRCopies.cpp b/lib/Target/AMDGPU/SIFixSGPRCopies.cpp index d20910baed3..fb151b4ffdc 100644 --- a/lib/Target/AMDGPU/SIFixSGPRCopies.cpp +++ b/lib/Target/AMDGPU/SIFixSGPRCopies.cpp @@ -588,7 +588,9 @@ bool SIFixSGPRCopies::runOnMachineFunction(MachineFunction &MF) { } if (UseMI->isPHI()) { - if (!TRI->isSGPRReg(MRI, Use.getReg())) + const TargetRegisterClass *UseRC = MRI.getRegClass(Use.getReg()); + if (!TRI->isSGPRReg(MRI, Use.getReg()) && + UseRC != &AMDGPU::VReg_1RegClass) hasVGPRUses++; continue; } @@ -633,8 +635,10 @@ bool SIFixSGPRCopies::runOnMachineFunction(MachineFunction &MF) { if ((!TRI->isVGPR(MRI, PHIRes) && RC0 != &AMDGPU::VReg_1RegClass) && (hasVGPRInput || hasVGPRUses > 1)) { + LLVM_DEBUG(dbgs() << "Fixing PHI: " << MI); TII->moveToVALU(MI); } else { + LLVM_DEBUG(dbgs() << "Legalizing PHI: " << MI); TII->legalizeOperands(MI, MDT); } diff --git a/test/CodeGen/AMDGPU/fix-sgpr-copies.mir b/test/CodeGen/AMDGPU/fix-sgpr-copies.mir index 3d6e05cb2c9..306e62a4309 100644 --- a/test/CodeGen/AMDGPU/fix-sgpr-copies.mir +++ b/test/CodeGen/AMDGPU/fix-sgpr-copies.mir @@ -16,3 +16,47 @@ body: | %6:sreg_32 = S_ADD_I32 %2:sreg_32, %5:sreg_32, implicit-def $scc %7:sreg_32 = S_ADDC_U32 %3:sreg_32, %1:sreg_32, implicit-def $scc, implicit $scc ... + +# Test to ensure i1 phi copies from scalar registers through another phi won't +# be promoted into vector ones. +# GCN-LABEL: name: fix-sgpr-i1-phi-copies +# GCN: .8: +# GCN-NOT: vreg_64 = PHI +--- +name: fix-sgpr-i1-phi-copies +tracksRegLiveness: true +body: | + bb.9: + S_BRANCH %bb.0 + + bb.4: + S_CBRANCH_SCC1 %bb.6, implicit undef $scc + + bb.5: + %3:vreg_1 = IMPLICIT_DEF + + bb.6: + %4:vreg_1 = PHI %2:sreg_64, %bb.4, %3:vreg_1, %bb.5 + + bb.7: + %5:vreg_1 = PHI %2:sreg_64, %bb.3, %4:vreg_1, %bb.6 + S_BRANCH %bb.8 + + bb.0: + S_CBRANCH_SCC1 %bb.2, implicit undef $scc + + bb.1: + %0:sreg_64 = S_MOV_B64 0 + S_BRANCH %bb.3 + + bb.2: + %1:sreg_64 = S_MOV_B64 -1 + S_BRANCH %bb.3 + + bb.3: + %2:sreg_64 = PHI %0:sreg_64, %bb.1, %1:sreg_64, %bb.2 + S_CBRANCH_SCC1 %bb.7, implicit undef $scc + S_BRANCH %bb.4 + + bb.8: +...