}
MachineOperand *New = Fold.OpToFold;
- if (TargetRegisterInfo::isVirtualRegister(Old.getReg()) &&
- TargetRegisterInfo::isVirtualRegister(New->getReg())) {
- Old.substVirtReg(New->getReg(), New->getSubReg(), TRI);
-
- Old.setIsUndef(New->isUndef());
- return true;
- }
-
- // FIXME: Handle physical registers.
-
- return false;
+ Old.substVirtReg(New->getReg(), New->getSubReg(), TRI);
+ Old.setIsUndef(New->isUndef());
+ return true;
}
static bool isUseMIInFoldList(ArrayRef<FoldCandidate> FoldList,
} else {
if (UseMI->isCopy() && OpToFold.isReg() &&
TargetRegisterInfo::isVirtualRegister(UseMI->getOperand(0).getReg()) &&
- TargetRegisterInfo::isVirtualRegister(UseMI->getOperand(1).getReg()) &&
TRI->isVGPR(*MRI, UseMI->getOperand(0).getReg()) &&
TRI->isVGPR(*MRI, UseMI->getOperand(1).getReg()) &&
!UseMI->getOperand(1).getSubReg()) {
const TargetRegisterClass *FoldRC =
TRI->getRegClass(FoldDesc.OpInfo[0].RegClass);
-
// Split 64-bit constants into 32-bits for folding.
if (UseOp.getSubReg() && AMDGPU::getRegBitWidth(FoldRC->getID()) == 64) {
unsigned UseReg = UseOp.getReg();
- const TargetRegisterClass *UseRC
- = TargetRegisterInfo::isVirtualRegister(UseReg) ?
- MRI->getRegClass(UseReg) :
- TRI->getPhysRegClass(UseReg);
+ const TargetRegisterClass *UseRC = MRI->getRegClass(UseReg);
if (AMDGPU::getRegBitWidth(UseRC->getID()) != 64)
return;