From: Jonas Paulsson Date: Tue, 10 May 2016 08:09:37 +0000 (+0000) Subject: [foldMemoryOperand()] Pass LiveIntervals to enable liveness check. X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=32db7c31b2fdd74d6bd45414c32b8c0cfc4eedd7;p=llvm [foldMemoryOperand()] Pass LiveIntervals to enable liveness check. SystemZ (and probably other targets as well) can fold a memory operand by changing the opcode into a new instruction that as a side-effect also clobbers the CC-reg. In order to do this, liveness of that reg must first be checked. When LIS is passed, getRegUnit() can be called on it and the right LiveRange is computed on demand. Reviewed by Matthias Braun. http://reviews.llvm.org/D19861 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@269026 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/include/llvm/Target/TargetInstrInfo.h b/include/llvm/Target/TargetInstrInfo.h index 3db219c4184..b86e1566a15 100644 --- a/include/llvm/Target/TargetInstrInfo.h +++ b/include/llvm/Target/TargetInstrInfo.h @@ -21,6 +21,7 @@ #include "llvm/MC/MCInstrInfo.h" #include "llvm/Support/BranchProbability.h" #include "llvm/Target/TargetRegisterInfo.h" +#include "llvm/CodeGen/LiveIntervalAnalysis.h" namespace llvm { @@ -799,13 +800,15 @@ public: /// The new instruction is inserted before MI, and the client is responsible /// for removing the old instruction. MachineInstr *foldMemoryOperand(MachineBasicBlock::iterator MI, - ArrayRef Ops, int FrameIndex) const; + ArrayRef Ops, int FrameIndex, + LiveIntervals *LIS = nullptr) const; /// Same as the previous version except it allows folding of any load and /// store from / to any address, not just from a specific stack slot. MachineInstr *foldMemoryOperand(MachineBasicBlock::iterator MI, ArrayRef Ops, - MachineInstr *LoadMI) const; + MachineInstr *LoadMI, + LiveIntervals *LIS = nullptr) const; /// Return true when there is potentially a faster code sequence /// for an instruction chain ending in \p Root. All potential patterns are @@ -884,7 +887,8 @@ protected: /// at InsertPt. virtual MachineInstr *foldMemoryOperandImpl( MachineFunction &MF, MachineInstr *MI, ArrayRef Ops, - MachineBasicBlock::iterator InsertPt, int FrameIndex) const { + MachineBasicBlock::iterator InsertPt, int FrameIndex, + LiveIntervals *LIS = nullptr) const { return nullptr; } @@ -895,7 +899,8 @@ protected: /// at InsertPt. virtual MachineInstr *foldMemoryOperandImpl( MachineFunction &MF, MachineInstr *MI, ArrayRef Ops, - MachineBasicBlock::iterator InsertPt, MachineInstr *LoadMI) const { + MachineBasicBlock::iterator InsertPt, MachineInstr *LoadMI, + LiveIntervals *LIS = nullptr) const { return nullptr; } diff --git a/lib/CodeGen/InlineSpiller.cpp b/lib/CodeGen/InlineSpiller.cpp index 33340901816..6d2fcb9c358 100644 --- a/lib/CodeGen/InlineSpiller.cpp +++ b/lib/CodeGen/InlineSpiller.cpp @@ -761,8 +761,8 @@ foldMemoryOperand(ArrayRef > Ops, MachineInstrSpan MIS(MI); MachineInstr *FoldMI = - LoadMI ? TII.foldMemoryOperand(MI, FoldOps, LoadMI) - : TII.foldMemoryOperand(MI, FoldOps, StackSlot); + LoadMI ? TII.foldMemoryOperand(MI, FoldOps, LoadMI, &LIS) + : TII.foldMemoryOperand(MI, FoldOps, StackSlot, &LIS); if (!FoldMI) return false; diff --git a/lib/CodeGen/LiveRangeEdit.cpp b/lib/CodeGen/LiveRangeEdit.cpp index 3ed02f46c0e..8b355f00255 100644 --- a/lib/CodeGen/LiveRangeEdit.cpp +++ b/lib/CodeGen/LiveRangeEdit.cpp @@ -205,7 +205,7 @@ bool LiveRangeEdit::foldAsLoad(LiveInterval *LI, if (UseMI->readsWritesVirtualRegister(LI->reg, &Ops).second) return false; - MachineInstr *FoldMI = TII.foldMemoryOperand(UseMI, Ops, DefMI); + MachineInstr *FoldMI = TII.foldMemoryOperand(UseMI, Ops, DefMI, &LIS); if (!FoldMI) return false; DEBUG(dbgs() << " folded: " << *FoldMI); diff --git a/lib/CodeGen/TargetInstrInfo.cpp b/lib/CodeGen/TargetInstrInfo.cpp index 800ad6d1bb4..6d90f9dd819 100644 --- a/lib/CodeGen/TargetInstrInfo.cpp +++ b/lib/CodeGen/TargetInstrInfo.cpp @@ -497,7 +497,8 @@ static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr *MI, /// stream. MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI, ArrayRef Ops, - int FI) const { + int FI, + LiveIntervals *LIS) const { unsigned Flags = 0; for (unsigned i = 0, e = Ops.size(); i != e; ++i) if (MI->getOperand(Ops[i]).isDef()) @@ -519,7 +520,7 @@ MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI, MBB->insert(MI, NewMI); } else { // Ask the target to do the actual folding. - NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, FI); + NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, FI, LIS); } if (NewMI) { @@ -778,7 +779,8 @@ void TargetInstrInfo::genAlternativeCodeSequence( /// stack slot. MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI, ArrayRef Ops, - MachineInstr *LoadMI) const { + MachineInstr *LoadMI, + LiveIntervals *LIS) const { assert(LoadMI->canFoldAsLoad() && "LoadMI isn't foldable!"); #ifndef NDEBUG for (unsigned i = 0, e = Ops.size(); i != e; ++i) @@ -800,7 +802,7 @@ MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI, NewMI = MBB.insert(MI, NewMI); } else { // Ask the target to do the actual folding. - NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI); + NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI, LIS); } if (!NewMI) return nullptr; diff --git a/lib/Target/AArch64/AArch64InstrInfo.cpp b/lib/Target/AArch64/AArch64InstrInfo.cpp index 4f1a72cf97b..e396c335094 100644 --- a/lib/Target/AArch64/AArch64InstrInfo.cpp +++ b/lib/Target/AArch64/AArch64InstrInfo.cpp @@ -2448,7 +2448,8 @@ void llvm::emitFrameOffset(MachineBasicBlock &MBB, MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl( MachineFunction &MF, MachineInstr *MI, ArrayRef Ops, - MachineBasicBlock::iterator InsertPt, int FrameIndex) const { + MachineBasicBlock::iterator InsertPt, int FrameIndex, + LiveIntervals *LIS) const { // This is a bit of a hack. Consider this instruction: // // %vreg0 = COPY %SP; GPR64all:%vreg0 diff --git a/lib/Target/AArch64/AArch64InstrInfo.h b/lib/Target/AArch64/AArch64InstrInfo.h index 32d5c194154..37a9d41f845 100644 --- a/lib/Target/AArch64/AArch64InstrInfo.h +++ b/lib/Target/AArch64/AArch64InstrInfo.h @@ -143,7 +143,8 @@ public: MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, ArrayRef Ops, MachineBasicBlock::iterator InsertPt, - int FrameIndex) const override; + int FrameIndex, + LiveIntervals *LIS = nullptr) const override; bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, diff --git a/lib/Target/SystemZ/SystemZInstrInfo.cpp b/lib/Target/SystemZ/SystemZInstrInfo.cpp index 3938db256e7..fa824d01065 100644 --- a/lib/Target/SystemZ/SystemZInstrInfo.cpp +++ b/lib/Target/SystemZ/SystemZInstrInfo.cpp @@ -15,6 +15,7 @@ #include "SystemZInstrBuilder.h" #include "SystemZTargetMachine.h" #include "llvm/CodeGen/LiveVariables.h" +#include "llvm/CodeGen/LiveIntervalAnalysis.h" #include "llvm/CodeGen/MachineRegisterInfo.h" using namespace llvm; @@ -846,31 +847,42 @@ SystemZInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl( MachineFunction &MF, MachineInstr *MI, ArrayRef Ops, - MachineBasicBlock::iterator InsertPt, int FrameIndex) const { + MachineBasicBlock::iterator InsertPt, int FrameIndex, + LiveIntervals *LIS) const { + const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); const MachineFrameInfo *MFI = MF.getFrameInfo(); unsigned Size = MFI->getObjectSize(FrameIndex); unsigned Opcode = MI->getOpcode(); -// XXX This is an introduction of a CC def and is illegal! Reactivate -// with a check of liveness of CC reg. -#if 0 if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) { - if ((Opcode == SystemZ::LA || Opcode == SystemZ::LAY) && + if (LIS != nullptr && + (Opcode == SystemZ::LA || Opcode == SystemZ::LAY) && isInt<8>(MI->getOperand(2).getImm()) && !MI->getOperand(3).getReg()) { - // LA(Y) %reg, CONST(%reg) -> AGSI %mem, CONST - MachineInstr *BuiltMI = - BuildMI(*InsertPt->getParent(), InsertPt, MI->getDebugLoc(), - get(SystemZ::AGSI)) + + // Check CC liveness, since new instruction introduces a dead + // def of CC. + MCRegUnitIterator CCUnit(SystemZ::CC, TRI); + LiveRange &CCLiveRange = LIS->getRegUnit(*CCUnit); + ++CCUnit; + assert (!CCUnit.isValid() && "CC only has one reg unit."); + SlotIndex MISlot = + LIS->getSlotIndexes()->getInstructionIndex(*MI).getRegSlot(); + if (!CCLiveRange.liveAt(MISlot)) { + // LA(Y) %reg, CONST(%reg) -> AGSI %mem, CONST + MachineInstr *BuiltMI = + BuildMI(*InsertPt->getParent(), InsertPt, MI->getDebugLoc(), + get(SystemZ::AGSI)) .addFrameIndex(FrameIndex) .addImm(0) .addImm(MI->getOperand(2).getImm()); - BuiltMI->findRegisterDefOperand(SystemZ::CC)->setIsDead(true); - return BuiltMI; + BuiltMI->findRegisterDefOperand(SystemZ::CC)->setIsDead(true); + CCLiveRange.createDeadDef(MISlot, LIS->getVNInfoAllocator()); + return BuiltMI; + } } return nullptr; } -#endif // All other cases require a single operand. if (Ops.size() != 1) @@ -992,7 +1004,8 @@ MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl( MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl( MachineFunction &MF, MachineInstr *MI, ArrayRef Ops, - MachineBasicBlock::iterator InsertPt, MachineInstr *LoadMI) const { + MachineBasicBlock::iterator InsertPt, MachineInstr *LoadMI, + LiveIntervals *LIS) const { return nullptr; } diff --git a/lib/Target/SystemZ/SystemZInstrInfo.h b/lib/Target/SystemZ/SystemZInstrInfo.h index e995ff10ddc..b5e4ff48733 100644 --- a/lib/Target/SystemZ/SystemZInstrInfo.h +++ b/lib/Target/SystemZ/SystemZInstrInfo.h @@ -202,11 +202,13 @@ public: MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, ArrayRef Ops, MachineBasicBlock::iterator InsertPt, - int FrameIndex) const override; + int FrameIndex, + LiveIntervals *LIS = nullptr) const override; MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, ArrayRef Ops, MachineBasicBlock::iterator InsertPt, - MachineInstr *LoadMI) const override; + MachineInstr *LoadMI, + LiveIntervals *LIS = nullptr) const override; bool expandPostRAPseudo(MachineBasicBlock::iterator MBBI) const override; bool ReverseBranchCondition(SmallVectorImpl &Cond) const override; diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp index 45f3727a705..f6c11c80855 100644 --- a/lib/Target/X86/X86InstrInfo.cpp +++ b/lib/Target/X86/X86InstrInfo.cpp @@ -6081,7 +6081,8 @@ breakPartialRegDependency(MachineBasicBlock::iterator MI, unsigned OpNum, MachineInstr *X86InstrInfo::foldMemoryOperandImpl( MachineFunction &MF, MachineInstr *MI, ArrayRef Ops, - MachineBasicBlock::iterator InsertPt, int FrameIndex) const { + MachineBasicBlock::iterator InsertPt, int FrameIndex, + LiveIntervals *LIS) const { // Check switch flag if (NoFusing) return nullptr; @@ -6193,14 +6194,15 @@ static bool isNonFoldablePartialRegisterLoad(const MachineInstr &LoadMI, MachineInstr *X86InstrInfo::foldMemoryOperandImpl( MachineFunction &MF, MachineInstr *MI, ArrayRef Ops, - MachineBasicBlock::iterator InsertPt, MachineInstr *LoadMI) const { + MachineBasicBlock::iterator InsertPt, MachineInstr *LoadMI, + LiveIntervals *LIS) const { // If loading from a FrameIndex, fold directly from the FrameIndex. unsigned NumOps = LoadMI->getDesc().getNumOperands(); int FrameIndex; if (isLoadFromStackSlot(LoadMI, FrameIndex)) { if (isNonFoldablePartialRegisterLoad(*LoadMI, *MI, MF)) return nullptr; - return foldMemoryOperandImpl(MF, MI, Ops, InsertPt, FrameIndex); + return foldMemoryOperandImpl(MF, MI, Ops, InsertPt, FrameIndex, LIS); } // Check switch flag diff --git a/lib/Target/X86/X86InstrInfo.h b/lib/Target/X86/X86InstrInfo.h index d72589604ae..5a82c161b27 100644 --- a/lib/Target/X86/X86InstrInfo.h +++ b/lib/Target/X86/X86InstrInfo.h @@ -370,7 +370,8 @@ public: MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, ArrayRef Ops, MachineBasicBlock::iterator InsertPt, - int FrameIndex) const override; + int FrameIndex, + LiveIntervals *LIS = nullptr) const override; /// foldMemoryOperand - Same as the previous version except it allows folding /// of any load and store from / to any address, not just from a specific @@ -378,7 +379,8 @@ public: MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, ArrayRef Ops, MachineBasicBlock::iterator InsertPt, - MachineInstr *LoadMI) const override; + MachineInstr *LoadMI, + LiveIntervals *LIS = nullptr) const override; /// unfoldMemoryOperand - Separate a single instruction which folded a load or /// a store or a load and a store into two or more instruction. If this is diff --git a/test/CodeGen/SystemZ/int-add-12.ll b/test/CodeGen/SystemZ/int-add-12.ll index 96273a6692b..496650f435c 100644 --- a/test/CodeGen/SystemZ/int-add-12.ll +++ b/test/CodeGen/SystemZ/int-add-12.ll @@ -130,7 +130,7 @@ define void @f10(i64 %base, i64 %index) { ; Check that adding 127 to a spilled value can use AGSI. define void @f11(i64 *%ptr, i32 %sel) { ; CHECK-LABEL: f11: -; _CHECK: agsi {{[0-9]+}}(%r15), 127 +; CHECK: agsi {{[0-9]+}}(%r15), 127 ; CHECK: br %r14 entry: %val0 = load volatile i64 , i64 *%ptr