Visited.insert(Reg);
- MachineInstr *DefInstr = MRI.getUniqueVRegDef(Reg);
- assert(DefInstr);
+ MachineInstr *DefInstr = MRI.getVRegDef(Reg);
switch (DefInstr->getOpcode()) {
default:
break;
return false;
DenseSet<const MachineBasicBlock*> Visited;
- SmallVector<MachineBasicBlock*, 4> Worklist(MBB->pred_begin(),
+ SmallVector<MachineBasicBlock*, 4> Worklist(MBB->pred_begin(),
MBB->pred_end());
while (!Worklist.empty()) {
const TargetRegisterClass *SrcRC, *DstRC;
std::tie(SrcRC, DstRC) = getCopyRegClasses(MI, *TRI, MRI);
if (isVGPRToSGPRCopy(SrcRC, DstRC, *TRI)) {
- MachineInstr *DefMI = MRI.getVRegDef(MI.getOperand(1).getReg());
+ unsigned SrcReg = MI.getOperand(1).getReg();
+ if (!TargetRegisterInfo::isVirtualRegister(SrcReg)) {
+ TII->moveToVALU(MI);
+ break;
+ }
+
+ MachineInstr *DefMI = MRI.getVRegDef(SrcReg);
unsigned SMovOp;
int64_t Imm;
// If we are just copying an immediate, we can replace the copy with
call void asm sideeffect "; use $0 $1 ", "{VGPR0}, {VGPR1}"(i1 %val0, i1 %val1)
ret void
}
+
+; CHECK-LABEL: {{^}}muliple_def_phys_vgpr:
+; CHECK: ; def v0
+; CHECK: v_mov_b32_e32 v1, v0
+; CHECK: ; def v0
+; CHECK: v_lshlrev_b32_e32 v{{[0-9]+}}, v0, v1
+define amdgpu_kernel void @muliple_def_phys_vgpr() {
+entry:
+ %def0 = call i32 asm sideeffect "; def $0 ", "={VGPR0}"()
+ %def1 = call i32 asm sideeffect "; def $0 ", "={VGPR0}"()
+ %add = shl i32 %def0, %def1
+ store i32 %add, i32 addrspace(1)* undef
+ ret void
+}