void spillVirtReg(MachineBasicBlock::iterator MI, unsigned VirtReg);
void usePhysReg(MachineOperand &MO);
- void definePhysReg(MachineInstr &MI, MCPhysReg PhysReg, RegState NewState);
+ void definePhysReg(MachineBasicBlock::iterator MI, MCPhysReg PhysReg,
+ RegState NewState);
unsigned calcSpillCost(MCPhysReg PhysReg) const;
- void assignVirtToPhysReg(LiveReg&, MCPhysReg PhysReg);
+ void assignVirtToPhysReg(LiveReg &, MCPhysReg PhysReg);
LiveRegMap::iterator findLiveVirtReg(unsigned VirtReg) {
return LiveVirtRegs.find(TargetRegisterInfo::virtReg2Index(VirtReg));
/// Mark PhysReg as reserved or free after spilling any virtregs. This is very
/// similar to defineVirtReg except the physreg is reserved instead of
/// allocated.
-void RegAllocFast::definePhysReg(MachineInstr &MI, MCPhysReg PhysReg,
- RegState NewState) {
+void RegAllocFast::definePhysReg(MachineBasicBlock::iterator MI,
+ MCPhysReg PhysReg, RegState NewState) {
markRegUsedInInstr(PhysReg);
switch (unsigned VirtReg = PhysRegState[PhysReg]) {
case regDisabled:
// Add live-in registers as live.
for (const MachineBasicBlock::RegisterMaskPair LI : MBB.liveins())
if (MRI->isAllocatable(LI.PhysReg))
- definePhysReg(*MII, LI.PhysReg, regReserved);
+ definePhysReg(MII, LI.PhysReg, regReserved);
VirtDead.clear();
Coalesced.clear();
--- /dev/null
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple aarch64-apple-ios -run-pass regallocfast -o - %s | FileCheck %s
+# This test used to crash the fast register alloc.
+# Basically, when a basic block has liveins, the fast regalloc
+# was deferencing the begin iterator of this block. However,
+# when this block is empty and it will just crashed!
+---
+name: crashing
+tracksRegLiveness: true
+body: |
+ ; CHECK-LABEL: name: crashing
+ ; CHECK: bb.0:
+ ; CHECK: successors: %bb.1(0x80000000)
+ ; CHECK: liveins: %x0, %x1
+ ; CHECK: bb.1:
+ ; CHECK: renamable %w0 = MOVi32imm -1
+ ; CHECK: RET_ReallyLR implicit killed %w0
+ bb.1:
+ liveins: %x0, %x1
+
+ bb.2:
+ %0:gpr32 = MOVi32imm -1
+ %w0 = COPY %0
+ RET_ReallyLR implicit %w0
+
+...