}
if (UseMI->isPHI()) {
- if (!TRI->isSGPRReg(MRI, Use.getReg()))
+ const TargetRegisterClass *UseRC = MRI.getRegClass(Use.getReg());
+ if (!TRI->isSGPRReg(MRI, Use.getReg()) &&
+ UseRC != &AMDGPU::VReg_1RegClass)
hasVGPRUses++;
continue;
}
if ((!TRI->isVGPR(MRI, PHIRes) && RC0 != &AMDGPU::VReg_1RegClass) &&
(hasVGPRInput || hasVGPRUses > 1)) {
+ LLVM_DEBUG(dbgs() << "Fixing PHI: " << MI);
TII->moveToVALU(MI);
} else {
+ LLVM_DEBUG(dbgs() << "Legalizing PHI: " << MI);
TII->legalizeOperands(MI, MDT);
}
%6:sreg_32 = S_ADD_I32 %2:sreg_32, %5:sreg_32, implicit-def $scc
%7:sreg_32 = S_ADDC_U32 %3:sreg_32, %1:sreg_32, implicit-def $scc, implicit $scc
...
+
+# Test to ensure i1 phi copies from scalar registers through another phi won't
+# be promoted into vector ones.
+# GCN-LABEL: name: fix-sgpr-i1-phi-copies
+# GCN: .8:
+# GCN-NOT: vreg_64 = PHI
+---
+name: fix-sgpr-i1-phi-copies
+tracksRegLiveness: true
+body: |
+ bb.9:
+ S_BRANCH %bb.0
+
+ bb.4:
+ S_CBRANCH_SCC1 %bb.6, implicit undef $scc
+
+ bb.5:
+ %3:vreg_1 = IMPLICIT_DEF
+
+ bb.6:
+ %4:vreg_1 = PHI %2:sreg_64, %bb.4, %3:vreg_1, %bb.5
+
+ bb.7:
+ %5:vreg_1 = PHI %2:sreg_64, %bb.3, %4:vreg_1, %bb.6
+ S_BRANCH %bb.8
+
+ bb.0:
+ S_CBRANCH_SCC1 %bb.2, implicit undef $scc
+
+ bb.1:
+ %0:sreg_64 = S_MOV_B64 0
+ S_BRANCH %bb.3
+
+ bb.2:
+ %1:sreg_64 = S_MOV_B64 -1
+ S_BRANCH %bb.3
+
+ bb.3:
+ %2:sreg_64 = PHI %0:sreg_64, %bb.1, %1:sreg_64, %bb.2
+ S_CBRANCH_SCC1 %bb.7, implicit undef $scc
+ S_BRANCH %bb.4
+
+ bb.8:
+...