-//===-- AtomicExpandPass.cpp - Expand atomic instructions -------===//
+//===- AtomicExpandPass.cpp - Expand atomic instructions ------------------===//
//
// The LLVM Compiler Infrastructure
//
//
//===----------------------------------------------------------------------===//
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/AtomicExpandUtils.h"
-#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/RuntimeLibcalls.h"
#include "llvm/CodeGen/TargetPassConfig.h"
+#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Constant.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/InstIterator.h"
+#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
-#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Module.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/User.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/AtomicOrdering.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetSubtargetInfo.h"
+#include <cassert>
+#include <cstdint>
+#include <iterator>
using namespace llvm;
#define DEBUG_TYPE "atomic-expand"
namespace {
+
class AtomicExpand: public FunctionPass {
- const TargetLowering *TLI;
+ const TargetLowering *TLI = nullptr;
+
public:
static char ID; // Pass identification, replacement for typeid
- AtomicExpand() : FunctionPass(ID), TLI(nullptr) {
+
+ AtomicExpand() : FunctionPass(ID) {
initializeAtomicExpandPass(*PassRegistry::getPassRegistry());
}
llvm::expandAtomicRMWToCmpXchg(AtomicRMWInst *AI,
CreateCmpXchgInstFun CreateCmpXchg);
};
-}
+
+} // end anonymous namespace
char AtomicExpand::ID = 0;
+
char &llvm::AtomicExpandID = AtomicExpand::ID;
+
INITIALIZE_PASS(AtomicExpand, DEBUG_TYPE, "Expand Atomic instructions",
false, false)
FunctionPass *llvm::createAtomicExpandPass() { return new AtomicExpand(); }
-namespace {
// Helper functions to retrieve the size of atomic instructions.
-unsigned getAtomicOpSize(LoadInst *LI) {
+static unsigned getAtomicOpSize(LoadInst *LI) {
const DataLayout &DL = LI->getModule()->getDataLayout();
return DL.getTypeStoreSize(LI->getType());
}
-unsigned getAtomicOpSize(StoreInst *SI) {
+static unsigned getAtomicOpSize(StoreInst *SI) {
const DataLayout &DL = SI->getModule()->getDataLayout();
return DL.getTypeStoreSize(SI->getValueOperand()->getType());
}
-unsigned getAtomicOpSize(AtomicRMWInst *RMWI) {
+static unsigned getAtomicOpSize(AtomicRMWInst *RMWI) {
const DataLayout &DL = RMWI->getModule()->getDataLayout();
return DL.getTypeStoreSize(RMWI->getValOperand()->getType());
}
-unsigned getAtomicOpSize(AtomicCmpXchgInst *CASI) {
+static unsigned getAtomicOpSize(AtomicCmpXchgInst *CASI) {
const DataLayout &DL = CASI->getModule()->getDataLayout();
return DL.getTypeStoreSize(CASI->getCompareOperand()->getType());
}
// Helper functions to retrieve the alignment of atomic instructions.
-unsigned getAtomicOpAlign(LoadInst *LI) {
+static unsigned getAtomicOpAlign(LoadInst *LI) {
unsigned Align = LI->getAlignment();
// In the future, if this IR restriction is relaxed, we should
// return DataLayout::getABITypeAlignment when there's no align
return Align;
}
-unsigned getAtomicOpAlign(StoreInst *SI) {
+static unsigned getAtomicOpAlign(StoreInst *SI) {
unsigned Align = SI->getAlignment();
// In the future, if this IR restriction is relaxed, we should
// return DataLayout::getABITypeAlignment when there's no align
return Align;
}
-unsigned getAtomicOpAlign(AtomicRMWInst *RMWI) {
+static unsigned getAtomicOpAlign(AtomicRMWInst *RMWI) {
// TODO(PR27168): This instruction has no alignment attribute, but unlike the
// default alignment for load/store, the default here is to assume
// it has NATURAL alignment, not DataLayout-specified alignment.
return DL.getTypeStoreSize(RMWI->getValOperand()->getType());
}
-unsigned getAtomicOpAlign(AtomicCmpXchgInst *CASI) {
+static unsigned getAtomicOpAlign(AtomicCmpXchgInst *CASI) {
// TODO(PR27168): same comment as above.
const DataLayout &DL = CASI->getModule()->getDataLayout();
return DL.getTypeStoreSize(CASI->getCompareOperand()->getType());
// and is of appropriate alignment, to be passed through for target
// lowering. (Versus turning into a __atomic libcall)
template <typename Inst>
-bool atomicSizeSupported(const TargetLowering *TLI, Inst *I) {
+static bool atomicSizeSupported(const TargetLowering *TLI, Inst *I) {
unsigned Size = getAtomicOpSize(I);
unsigned Align = getAtomicOpAlign(I);
return Align >= Size && Size <= TLI->getMaxAtomicSizeInBitsSupported() / 8;
}
-} // end anonymous namespace
-
bool AtomicExpand::runOnFunction(Function &F) {
auto *TPC = getAnalysisIfAvailable<TargetPassConfig>();
if (!TPC)
Value *Mask;
Value *Inv_Mask;
};
+
} // end anonymous namespace
/// This is a helper function which builds instructions to provide
/// include only the part that would've been loaded from Addr.
///
/// Inv_Mask: The inverse of Mask.
-
static PartwordMaskValues createMaskInstrs(IRBuilder<> &Builder, Instruction *I,
Type *ValueType, Value *Addr,
unsigned WordSize) {
/// part of the value.
void AtomicExpand::expandPartwordAtomicRMW(
AtomicRMWInst *AI, TargetLoweringBase::AtomicExpansionKind ExpansionKind) {
-
assert(ExpansionKind == TargetLoweringBase::AtomicExpansionKind::CmpXChg);
AtomicOrdering MemOpOrder = AI->getOrdering();
return NewCI;
}
-
bool AtomicExpand::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
AtomicOrdering SuccessOrder = CI->getSuccessOrdering();
AtomicOrdering FailureOrder = CI->getFailureOrdering();
-//===-- DwarfEHPrepare - Prepare exception handling for code generation ---===//
+//===- DwarfEHPrepare - Prepare exception handling for code generation ----===//
//
// The LLVM Compiler Infrastructure
//
//===----------------------------------------------------------------------===//
#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/CFG.h"
#include "llvm/Analysis/EHPersonalities.h"
#include "llvm/Analysis/TargetTransformInfo.h"
-#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/RuntimeLibcalls.h"
#include "llvm/CodeGen/TargetPassConfig.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Module.h"
+#include "llvm/IR/Type.h"
#include "llvm/Pass.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Target/TargetLowering.h"
+#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include "llvm/Transforms/Utils/Local.h"
+#include <cstddef>
+
using namespace llvm;
#define DEBUG_TYPE "dwarfehprepare"
STATISTIC(NumResumesLowered, "Number of resume calls lowered");
namespace {
+
class DwarfEHPrepare : public FunctionPass {
// RewindFunction - _Unwind_Resume or the target equivalent.
- Constant *RewindFunction;
+ Constant *RewindFunction = nullptr;
- DominatorTree *DT;
- const TargetLowering *TLI;
+ DominatorTree *DT = nullptr;
+ const TargetLowering *TLI = nullptr;
bool InsertUnwindResumeCalls(Function &Fn);
Value *GetExceptionObject(ResumeInst *RI);
public:
static char ID; // Pass identification, replacement for typeid.
- DwarfEHPrepare()
- : FunctionPass(ID), RewindFunction(nullptr), DT(nullptr), TLI(nullptr) {
- }
+ DwarfEHPrepare() : FunctionPass(ID) {}
bool runOnFunction(Function &Fn) override;
return "Exception handling preparation";
}
};
+
} // end anonymous namespace
char DwarfEHPrepare::ID = 0;
+
INITIALIZE_PASS_BEGIN(DwarfEHPrepare, DEBUG_TYPE,
"Prepare DWARF exceptions", false, false)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
-//===-- GlobalMerge.cpp - Internal globals merging -----------------------===//
+//===- GlobalMerge.cpp - Internal globals merging -------------------------===//
//
// The LLVM Compiler Infrastructure
//
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
+//
// This pass merges globals with internal linkage into one. This way all the
// globals which were merged into a biggest one can be addressed using offsets
// from the same base pointer (no need for separate base pointer for each of the
// - it can increase register pressure when the uses are disparate enough.
//
// We use heuristics to discover the best global grouping we can (cf cl::opts).
+//
// ===---------------------------------------------------------------------===//
+#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/ADT/Twine.h"
#include "llvm/CodeGen/Passes.h"
-#include "llvm/IR/Attributes.h"
+#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
+#include "llvm/IR/GlobalAlias.h"
+#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/GlobalVariable.h"
-#include "llvm/IR/Instructions.h"
-#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/Instruction.h"
#include "llvm/IR/Module.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Use.h"
+#include "llvm/IR/User.h"
#include "llvm/Pass.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
-#include "llvm/Target/TargetSubtargetInfo.h"
+#include "llvm/Target/TargetMachine.h"
#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <cstddef>
+#include <string>
+#include <vector>
+
using namespace llvm;
#define DEBUG_TYPE "global-merge"
cl::desc("Enable global merge pass on external linkage"));
STATISTIC(NumMerged, "Number of globals merged");
+
namespace {
+
class GlobalMerge : public FunctionPass {
- const TargetMachine *TM;
+ const TargetMachine *TM = nullptr;
+
// FIXME: Infer the maximum possible offset depending on the actual users
// (these max offsets are different for the users inside Thumb or ARM
// functions), see the code that passes in the offset in the ARM backend
/// Currently, this applies a dead simple heuristic: only consider globals
/// used in minsize functions for merging.
/// FIXME: This could learn about optsize, and be used in the cost model.
- bool OnlyOptimizeForSize;
+ bool OnlyOptimizeForSize = false;
/// Whether we should merge global variables that have external linkage.
- bool MergeExternalGlobals;
+ bool MergeExternalGlobals = false;
bool IsMachO;
bool doMerge(SmallVectorImpl<GlobalVariable*> &Globals,
Module &M, bool isConst, unsigned AddrSpace) const;
+
/// \brief Merge everything in \p Globals for which the corresponding bit
/// in \p GlobalSet is set.
bool doMerge(const SmallVectorImpl<GlobalVariable *> &Globals,
public:
static char ID; // Pass identification, replacement for typeid.
+
explicit GlobalMerge()
- : FunctionPass(ID), TM(nullptr), MaxOffset(GlobalMergeMaxOffset),
- OnlyOptimizeForSize(false), MergeExternalGlobals(false) {
+ : FunctionPass(ID), MaxOffset(GlobalMergeMaxOffset) {
initializeGlobalMergePass(*PassRegistry::getPassRegistry());
}
FunctionPass::getAnalysisUsage(AU);
}
};
+
} // end anonymous namespace
char GlobalMerge::ID = 0;
+
INITIALIZE_PASS(GlobalMerge, DEBUG_TYPE, "Merge global variables", false, false)
bool GlobalMerge::doMerge(SmallVectorImpl<GlobalVariable*> &Globals,
// We keep track of the sets of globals used together "close enough".
struct UsedGlobalSet {
- UsedGlobalSet(size_t Size) : Globals(Size), UsageCount(1) {}
BitVector Globals;
- unsigned UsageCount;
+ unsigned UsageCount = 1;
+
+ UsedGlobalSet(size_t Size) : Globals(Size) {}
};
// Each set is unique in UsedGlobalSets.
IsMachO = Triple(M.getTargetTriple()).isOSBinFormatMachO();
auto &DL = M.getDataLayout();
- DenseMap<unsigned, SmallVector<GlobalVariable*, 16> > Globals, ConstGlobals,
+ DenseMap<unsigned, SmallVector<GlobalVariable *, 16>> Globals, ConstGlobals,
BSSGlobals;
bool Changed = false;
setMustKeepGlobalVariables(M);
-//===-- IfConversion.cpp - Machine code if conversion pass. ---------------===//
+//===- IfConversion.cpp - Machine code if conversion pass -----------------===//
//
// The LLVM Compiler Infrastructure
//
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/ScopeExit.h"
#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/SparseSet.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/iterator_range.h"
#include "llvm/CodeGen/LivePhysRegs.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
#include "llvm/CodeGen/MachineBranchProbabilityInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/TargetSchedule.h"
+#include "llvm/IR/DebugLoc.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/BranchProbability.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include <algorithm>
+#include <cassert>
+#include <functional>
+#include <iterator>
+#include <memory>
#include <utility>
+#include <vector>
using namespace llvm;
STATISTIC(NumUnpred, "Number of true blocks of diamonds unpredicated");
namespace {
+
class IfConverter : public MachineFunctionPass {
enum IfcvtKind {
ICNotClassfied, // BB data valid, but not classified.
bool IsUnpredicable : 1;
bool CannotBeCopied : 1;
bool ClobbersPred : 1;
- unsigned NonPredSize;
- unsigned ExtraCost;
- unsigned ExtraCost2;
- MachineBasicBlock *BB;
- MachineBasicBlock *TrueBB;
- MachineBasicBlock *FalseBB;
+ unsigned NonPredSize = 0;
+ unsigned ExtraCost = 0;
+ unsigned ExtraCost2 = 0;
+ MachineBasicBlock *BB = nullptr;
+ MachineBasicBlock *TrueBB = nullptr;
+ MachineBasicBlock *FalseBB = nullptr;
SmallVector<MachineOperand, 4> BrCond;
SmallVector<MachineOperand, 4> Predicate;
+
BBInfo() : IsDone(false), IsBeingAnalyzed(false),
IsAnalyzed(false), IsEnqueued(false), IsBrAnalyzable(false),
IsBrReversible(false), HasFallThrough(false),
IsUnpredicable(false), CannotBeCopied(false),
- ClobbersPred(false), NonPredSize(0), ExtraCost(0),
- ExtraCost2(0), BB(nullptr), TrueBB(nullptr),
- FalseBB(nullptr) {}
+ ClobbersPred(false) {}
};
/// Record information about pending if-conversions to attempt:
bool NeedSubsumption : 1;
bool TClobbersPred : 1;
bool FClobbersPred : 1;
+
IfcvtToken(BBInfo &b, IfcvtKind k, bool s, unsigned d, unsigned d2 = 0,
bool tc = false, bool fc = false)
: BBI(b), Kind(k), NumDups(d), NumDups2(d2), NeedSubsumption(s),
bool PreRegAlloc;
bool MadeChange;
- int FnNum;
+ int FnNum = -1;
std::function<bool(const MachineFunction &)> PredicateFtor;
public:
static char ID;
+
IfConverter(std::function<bool(const MachineFunction &)> Ftor = nullptr)
- : MachineFunctionPass(ID), FnNum(-1), PredicateFtor(std::move(Ftor)) {
+ : MachineFunctionPass(ID), PredicateFtor(std::move(Ftor)) {
initializeIfConverterPass(*PassRegistry::getPassRegistry());
}
}
};
- char IfConverter::ID = 0;
-}
+} // end anonymous namespace
+
+char IfConverter::ID = 0;
char &llvm::IfConverterID = IfConverter::ID;
}
break;
}
- case ICDiamond: {
+ case ICDiamond:
if (DisableDiamond) break;
DEBUG(dbgs() << "Ifcvt (Diamond): BB#" << BBI.BB->getNumber() << " (T:"
<< BBI.TrueBB->getNumber() << ",F:"
DEBUG(dbgs() << (RetVal ? "succeeded!" : "failed!") << "\n");
if (RetVal) ++NumDiamonds;
break;
- }
- case ICForkedDiamond: {
+ case ICForkedDiamond:
if (DisableForkedDiamond) break;
DEBUG(dbgs() << "Ifcvt (Forked Diamond): BB#"
<< BBI.BB->getNumber() << " (T:"
if (RetVal) ++NumForkedDiamonds;
break;
}
- }
if (RetVal && MRI->tracksLiveness())
recomputeLivenessFlags(*BBI.BB);
unsigned &Dups1, unsigned &Dups2,
MachineBasicBlock &TBB, MachineBasicBlock &FBB,
bool SkipUnconditionalBranches) const {
-
while (TIB != TIE && FIB != FIE) {
// Skip dbg_value instructions. These do not count.
TIB = skipDebugInstructionsForward(TIB, TIE);
-//===-- InterferenceCache.cpp - Caching per-block interference ---------*--===//
+//===- InterferenceCache.cpp - Caching per-block interference -------------===//
//
// The LLVM Compiler Infrastructure
//
//===----------------------------------------------------------------------===//
#include "InterferenceCache.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/CodeGen/LiveInterval.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
+#include "llvm/CodeGen/LiveIntervalUnion.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineOperand.h"
+#include "llvm/CodeGen/SlotIndexes.h"
+#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include <cassert>
+#include <cstdint>
+#include <cstdlib>
+#include <tuple>
using namespace llvm;
BlockInterference *BI = &Blocks[MBBNum];
ArrayRef<SlotIndex> RegMaskSlots;
ArrayRef<const uint32_t*> RegMaskBits;
- for (;;) {
+ while (true) {
BI->Tag = Tag;
BI->First = BI->Last = SlotIndex();
-//===-- InterferenceCache.h - Caching per-block interference ---*- C++ -*--===//
+//===- InterferenceCache.h - Caching per-block interference ----*- C++ -*--===//
//
// The LLVM Compiler Infrastructure
//
#ifndef LLVM_LIB_CODEGEN_INTERFERENCECACHE_H
#define LLVM_LIB_CODEGEN_INTERFERENCECACHE_H
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/LiveInterval.h"
#include "llvm/CodeGen/LiveIntervalUnion.h"
+#include "llvm/CodeGen/SlotIndexes.h"
+#include "llvm/Support/Compiler.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdlib>
namespace llvm {
class LiveIntervals;
+class MachineFunction;
+class TargetRegisterInfo;
class LLVM_LIBRARY_VISIBILITY InterferenceCache {
- const TargetRegisterInfo *TRI;
- LiveIntervalUnion *LIUArray;
- MachineFunction *MF;
-
/// BlockInterference - information about the interference in a single basic
/// block.
struct BlockInterference {
- BlockInterference() : Tag(0) {}
- unsigned Tag;
+ unsigned Tag = 0;
SlotIndex First;
SlotIndex Last;
+
+ BlockInterference() = default;
};
/// Entry - A cache entry containing interference information for all aliases
/// of PhysReg in all basic blocks.
class Entry {
/// PhysReg - The register currently represented.
- unsigned PhysReg;
+ unsigned PhysReg = 0;
/// Tag - Cache tag is changed when any of the underlying LiveIntervalUnions
/// change.
- unsigned Tag;
+ unsigned Tag = 0;
/// RefCount - The total number of Cursor instances referring to this Entry.
- unsigned RefCount;
+ unsigned RefCount = 0;
/// MF - The current function.
MachineFunction *MF;
/// Indexes - Mapping block numbers to SlotIndex ranges.
- SlotIndexes *Indexes;
+ SlotIndexes *Indexes = nullptr;
/// LIS - Used for accessing register mask interference maps.
- LiveIntervals *LIS;
+ LiveIntervals *LIS = nullptr;
/// PrevPos - The previous position the iterators were moved to.
SlotIndex PrevPos;
unsigned VirtTag;
/// Fixed interference in RegUnit.
- LiveRange *Fixed;
+ LiveRange *Fixed = nullptr;
/// Iterator pointing into the fixed RegUnit interference.
LiveInterval::iterator FixedI;
- RegUnitInfo(LiveIntervalUnion &LIU)
- : VirtTag(LIU.getTag()), Fixed(nullptr) {
+ RegUnitInfo(LiveIntervalUnion &LIU) : VirtTag(LIU.getTag()) {
VirtI.setMap(LIU.getMap());
}
};
void update(unsigned MBBNum);
public:
- Entry() : PhysReg(0), Tag(0), RefCount(0), Indexes(nullptr), LIS(nullptr) {}
+ Entry() = default;
void clear(MachineFunction *mf, SlotIndexes *indexes, LiveIntervals *lis) {
assert(!hasRefs() && "Cannot clear cache entry with references");
// robin manner.
enum { CacheEntries = 32 };
+ const TargetRegisterInfo *TRI = nullptr;
+ LiveIntervalUnion *LIUArray = nullptr;
+ MachineFunction *MF = nullptr;
+
// Point to an entry for each physreg. The entry pointed to may not be up to
// date, and it may have been reused for a different physreg.
- unsigned char* PhysRegEntries;
- size_t PhysRegEntriesCount;
+ unsigned char* PhysRegEntries = nullptr;
+ size_t PhysRegEntriesCount = 0;
// Next round-robin entry to be picked.
- unsigned RoundRobin;
+ unsigned RoundRobin = 0;
// The actual cache entries.
Entry Entries[CacheEntries];
Entry *get(unsigned PhysReg);
public:
- InterferenceCache()
- : TRI(nullptr), LIUArray(nullptr), MF(nullptr), PhysRegEntries(nullptr),
- PhysRegEntriesCount(0), RoundRobin(0) {}
+ friend class Cursor;
+
+ InterferenceCache() = default;
~InterferenceCache() {
free(PhysRegEntries);
void reinitPhysRegEntries();
/// init - Prepare cache for a new function.
- void init(MachineFunction*, LiveIntervalUnion*, SlotIndexes*, LiveIntervals*,
- const TargetRegisterInfo *);
+ void init(MachineFunction *mf, LiveIntervalUnion *liuarray,
+ SlotIndexes *indexes, LiveIntervals *lis,
+ const TargetRegisterInfo *tri);
/// getMaxCursors - Return the maximum number of concurrent cursors that can
/// be supported.
/// Cursor - The primary query interface for the block interference cache.
class Cursor {
- Entry *CacheEntry;
- const BlockInterference *Current;
+ Entry *CacheEntry = nullptr;
+ const BlockInterference *Current = nullptr;
static const BlockInterference NoInterference;
void setEntry(Entry *E) {
public:
/// Cursor - Create a dangling cursor.
- Cursor() : CacheEntry(nullptr), Current(nullptr) {}
- ~Cursor() { setEntry(nullptr); }
+ Cursor() = default;
- Cursor(const Cursor &O) : CacheEntry(nullptr), Current(nullptr) {
+ Cursor(const Cursor &O) {
setEntry(O.CacheEntry);
}
return *this;
}
+ ~Cursor() { setEntry(nullptr); }
+
/// setPhysReg - Point this cursor to PhysReg's interference.
void setPhysReg(InterferenceCache &Cache, unsigned PhysReg) {
// Release reference before getting a new one. That guarantees we can
return Current->Last;
}
};
-
- friend class Cursor;
};
-} // namespace llvm
+} // end namespace llvm
-#endif
+#endif // LLVM_LIB_CODEGEN_INTERFERENCECACHE_H
-//===--------------------- InterleavedAccessPass.cpp ----------------------===//
+//===- InterleavedAccessPass.cpp ------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
//
// Similarly, a set of interleaved stores can be transformed into an optimized
// sequence of shuffles followed by a set of target specific stores for X86.
+//
//===----------------------------------------------------------------------===//
-#include "llvm/CodeGen/Passes.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/TargetPassConfig.h"
+#include "llvm/IR/Constants.h"
#include "llvm/IR/Dominators.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/InstIterator.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Type.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetLowering.h"
+#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetSubtargetInfo.h"
+#include <cassert>
+#include <utility>
using namespace llvm;
namespace {
class InterleavedAccess : public FunctionPass {
-
public:
static char ID;
- InterleavedAccess() : FunctionPass(ID), DT(nullptr), TLI(nullptr) {
+
+ InterleavedAccess() : FunctionPass(ID) {
initializeInterleavedAccessPass(*PassRegistry::getPassRegistry());
}
}
private:
- DominatorTree *DT;
- const TargetLowering *TLI;
+ DominatorTree *DT = nullptr;
+ const TargetLowering *TLI = nullptr;
/// The maximum supported interleave factor.
unsigned MaxFactor;
bool tryReplaceExtracts(ArrayRef<ExtractElementInst *> Extracts,
ArrayRef<ShuffleVectorInst *> Shuffles);
};
+
} // end anonymous namespace.
char InterleavedAccess::ID = 0;
+
INITIALIZE_PASS_BEGIN(InterleavedAccess, DEBUG_TYPE,
"Lower interleaved memory accesses to target specific intrinsics", false,
false)
bool InterleavedAccess::tryReplaceExtracts(
ArrayRef<ExtractElementInst *> Extracts,
ArrayRef<ShuffleVectorInst *> Shuffles) {
-
// If there aren't any extractelement instructions to modify, there's nothing
// to do.
if (Extracts.empty())
DenseMap<ExtractElementInst *, std::pair<Value *, int>> ReplacementMap;
for (auto *Extract : Extracts) {
-
// The vector index that is extracted.
auto *IndexOperand = cast<ConstantInt>(Extract->getIndexOperand());
auto Index = IndexOperand->getSExtValue();
// extractelement instruction (which uses an interleaved load) to use one
// of the shufflevector instructions instead of the load.
for (auto *Shuffle : Shuffles) {
-
// If the shufflevector instruction doesn't dominate the extract, we
// can't create a use of it.
if (!DT->dominates(Shuffle, Extract))
-//===-- MachineLICM.cpp - Machine Loop Invariant Code Motion Pass ---------===//
+//===- MachineLICM.cpp - Machine Loop Invariant Code Motion Pass ----------===//
//
// The LLVM Compiler Infrastructure
//
//
//===----------------------------------------------------------------------===//
+#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineMemOperand.h"
+#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/CodeGen/TargetSchedule.h"
+#include "llvm/IR/DebugLoc.h"
+#include "llvm/MC/MCInstrDesc.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetLowering.h"
-#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
+#include <algorithm>
+#include <cassert>
+#include <limits>
+#include <vector>
+
using namespace llvm;
#define DEBUG_TYPE "machinelicm"
"Number of machine instructions hoisted out of loops post regalloc");
namespace {
+
class MachineLICM : public MachineFunctionPass {
const TargetInstrInfo *TII;
const TargetLoweringBase *TLI;
const MachineFrameInfo *MFI;
MachineRegisterInfo *MRI;
TargetSchedModel SchedModel;
- bool PreRegAlloc;
+ bool PreRegAlloc = true;
// Various analyses that we use...
AliasAnalysis *AA; // Alias analysis info.
MachineBasicBlock *CurPreheader; // The preheader for CurLoop.
// Exit blocks for CurLoop.
- SmallVector<MachineBasicBlock*, 8> ExitBlocks;
+ SmallVector<MachineBasicBlock *, 8> ExitBlocks;
bool isExitBlock(const MachineBasicBlock *MBB) const {
return is_contained(ExitBlocks, MBB);
SmallVector<SmallVector<unsigned, 8>, 16> BackTrace;
// For each opcode, keep a list of potential CSE instructions.
- DenseMap<unsigned, std::vector<const MachineInstr*> > CSEMap;
+ DenseMap<unsigned, std::vector<const MachineInstr *>> CSEMap;
enum {
SpeculateFalse = 0,
public:
static char ID; // Pass identification, replacement for typeid
- MachineLICM() :
- MachineFunctionPass(ID), PreRegAlloc(true) {
- initializeMachineLICMPass(*PassRegistry::getPassRegistry());
- }
- explicit MachineLICM(bool PreRA) :
- MachineFunctionPass(ID), PreRegAlloc(PreRA) {
+ MachineLICM() : MachineFunctionPass(ID) {
+ initializeMachineLICMPass(*PassRegistry::getPassRegistry());
+ }
+
+ explicit MachineLICM(bool PreRA)
+ : MachineFunctionPass(ID), PreRegAlloc(PreRA) {
initializeMachineLICMPass(*PassRegistry::getPassRegistry());
- }
+ }
bool runOnMachineFunction(MachineFunction &MF) override;
MachineInstr *MI;
unsigned Def;
int FI;
+
CandidateInfo(MachineInstr *mi, unsigned def, int fi)
: MI(mi), Def(def), FI(fi) {}
};
MachineBasicBlock *getCurPreheader();
};
+
} // end anonymous namespace
char MachineLICM::ID = 0;
+
char &llvm::MachineLICMID = MachineLICM::ID;
+
INITIALIZE_PASS_BEGIN(MachineLICM, DEBUG_TYPE,
"Machine Loop Invariant Code Motion", false, false)
INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
// Only consider reloads for now and remats which do not have register
// operands. FIXME: Consider unfold load folding instructions.
if (Def && !RuledOut) {
- int FI = INT_MIN;
+ int FI = std::numeric_limits<int>::min();
if ((!HasNonInvariantUse && IsLICMCandidate(*MI)) ||
(TII->isLoadFromStackSlot(*MI, FI) && MFI->isSpillSlotObjectIndex(FI)))
Candidates.push_back(CandidateInfo(MI, Def, FI));
// registers read by the terminator. Similarly its def should not be
// clobbered by the terminator.
for (CandidateInfo &Candidate : Candidates) {
- if (Candidate.FI != INT_MIN &&
+ if (Candidate.FI != std::numeric_limits<int>::min() &&
StoredFIs.count(Candidate.FI))
continue;
/// specified header block, and that are in the current loop) in depth first
/// order w.r.t the DominatorTree. This allows us to visit definitions before
/// uses, allowing us to hoist a loop body in one pass without iteration.
-///
void MachineLICM::HoistOutOfLoop(MachineDomTreeNode *HeaderN) {
MachineBasicBlock *Preheader = getCurPreheader();
if (!Preheader)
/// Return true if this machine instruction loads from global offset table or
/// constant pool.
static bool mayLoadFromGOTOrConstantPool(MachineInstr &MI) {
- assert (MI.mayLoad() && "Expected MI that loads!");
+ assert(MI.mayLoad() && "Expected MI that loads!");
// If we lost memory operands, conservatively assume that the instruction
// reads from everything..
/// I.e., all virtual register operands are defined outside of the loop,
/// physical registers aren't accessed explicitly, and there are no side
/// effects that aren't captured by the operands or other flags.
-///
bool MachineLICM::IsLoopInvariantInst(MachineInstr &I) {
if (!IsLICMCandidate(I))
return false;
return true;
}
-
/// Return true if the specified instruction is used by a phi node and hoisting
/// it could cause a copy to be inserted.
bool MachineLICM::HasLoopPHIUse(const MachineInstr *MI) const {
/// the existing instruction rather than hoisting the instruction to the
/// preheader.
bool MachineLICM::EliminateCSE(MachineInstr *MI,
- DenseMap<unsigned, std::vector<const MachineInstr*> >::iterator &CI) {
+ DenseMap<unsigned, std::vector<const MachineInstr *>>::iterator &CI) {
// Do not CSE implicit_def so ProcessImplicitDefs can properly propagate
// the undef property onto uses.
if (CI == CSEMap.end() || MI->isImplicitDef())
/// the loop.
bool MachineLICM::MayCSE(MachineInstr *MI) {
unsigned Opcode = MI->getOpcode();
- DenseMap<unsigned, std::vector<const MachineInstr*> >::iterator
+ DenseMap<unsigned, std::vector<const MachineInstr *>>::iterator
CI = CSEMap.find(Opcode);
// Do not CSE implicit_def so ProcessImplicitDefs can properly propagate
// the undef property onto uses.
// Look for opportunity to CSE the hoisted instruction.
unsigned Opcode = MI->getOpcode();
- DenseMap<unsigned, std::vector<const MachineInstr*> >::iterator
+ DenseMap<unsigned, std::vector<const MachineInstr *>>::iterator
CI = CSEMap.find(Opcode);
if (!EliminateCSE(MI, CI)) {
// Otherwise, splice the instruction to the preheader.
-//===- RegisterCoalescer.cpp - Generic Register Coalescing Interface -------==//
+//===- RegisterCoalescer.cpp - Generic Register Coalescing Interface ------===//
//
// The LLVM Compiler Infrastructure
//
//===----------------------------------------------------------------------===//
#include "RegisterCoalescer.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/CodeGen/LiveInterval.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/LiveRangeEdit.h"
-#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/RegisterClassInfo.h"
-#include "llvm/CodeGen/VirtRegMap.h"
-#include "llvm/IR/Value.h"
+#include "llvm/CodeGen/SlotIndexes.h"
+#include "llvm/IR/DebugLoc.h"
#include "llvm/Pass.h"
+#include "llvm/MC/LaneBitmask.h"
+#include "llvm/MC/MCInstrDesc.h"
+#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
-#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetOpcodes.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include <algorithm>
-#include <cmath>
+#include <cassert>
+#include <iterator>
+#include <limits>
+#include <tuple>
+#include <utility>
+#include <vector>
+
using namespace llvm;
#define DEBUG_TYPE "regalloc"
cl::Hidden);
namespace {
+
class RegisterCoalescer : public MachineFunctionPass,
private LiveRangeEdit::Delegate {
MachineFunction* MF;
MachineRegisterInfo* MRI;
- const TargetMachine* TM;
const TargetRegisterInfo* TRI;
const TargetInstrInfo* TII;
LiveIntervals *LIS;
public:
static char ID; ///< Class identification, replacement for typeinfo
+
RegisterCoalescer() : MachineFunctionPass(ID) {
initializeRegisterCoalescerPass(*PassRegistry::getPassRegistry());
}
/// Implement the dump method.
void print(raw_ostream &O, const Module* = nullptr) const override;
};
+
} // end anonymous namespace
+char RegisterCoalescer::ID = 0;
+
char &llvm::RegisterCoalescerID = RegisterCoalescer::ID;
INITIALIZE_PASS_BEGIN(RegisterCoalescer, "simple-register-coalescing",
INITIALIZE_PASS_END(RegisterCoalescer, "simple-register-coalescing",
"Simple Register Coalescing", false, false)
-char RegisterCoalescer::ID = 0;
-
static bool isMoveInstr(const TargetRegisterInfo &tri, const MachineInstr *MI,
unsigned &Src, unsigned &Dst,
unsigned &SrcSub, unsigned &DstSub) {
/// Copy segements with value number @p SrcValNo from liverange @p Src to live
/// range @Dst and use value number @p DstValNo there.
static void addSegmentsWithValNo(LiveRange &Dst, VNInfo *DstValNo,
- const LiveRange &Src, const VNInfo *SrcValNo)
-{
+ const LiveRange &Src, const VNInfo *SrcValNo) {
for (const LiveRange::Segment &S : Src.segments) {
if (S.valno != SrcValNo)
continue;
}
bool RegisterCoalescer::joinCopy(MachineInstr *CopyMI, bool &Again) {
-
Again = false;
DEBUG(dbgs() << LIS->getInstructionIndex(*CopyMI) << '\t' << *CopyMI);
// lane value escapes the block, the join is aborted.
namespace {
+
/// Track information about values in a single virtual register about to be
/// joined. Objects of this class are always created in pairs - one for each
/// side of the CoalescerPair (or one for each lane of a side of the coalescer
class JoinVals {
/// Live range we work on.
LiveRange &LR;
+
/// (Main) register we work on.
const unsigned Reg;
/// subregister SubIdx in the coalesced register. Either CP.DstIdx or
/// CP.SrcIdx.
const unsigned SubIdx;
+
/// The LaneMask that this liverange will occupy the coalesced register. May
/// be smaller than the lanemask produced by SubIdx when merging subranges.
const LaneBitmask LaneMask;
/// This is true when joining sub register ranges, false when joining main
/// ranges.
const bool SubRangeJoin;
+
/// Whether the current LiveInterval tracks subregister liveness.
const bool TrackSubRegLiveness;
/// joined register, so they can be compared directly between SrcReg and
/// DstReg.
struct Val {
- ConflictResolution Resolution;
+ ConflictResolution Resolution = CR_Keep;
/// Lanes written by this def, 0 for unanalyzed values.
LaneBitmask WriteLanes;
LaneBitmask ValidLanes;
/// Value in LI being redefined by this def.
- VNInfo *RedefVNI;
+ VNInfo *RedefVNI = nullptr;
/// Value in the other live range that overlaps this def, if any.
- VNInfo *OtherVNI;
+ VNInfo *OtherVNI = nullptr;
/// Is this value an IMPLICIT_DEF that can be erased?
///
/// ProcessImplicitDefs can very rarely create IMPLICIT_DEF values with
/// longer live ranges. Such IMPLICIT_DEF values should be treated like
/// normal values.
- bool ErasableImplicitDef;
+ bool ErasableImplicitDef = false;
/// True when the live range of this value will be pruned because of an
/// overlapping CR_Replace value in the other live range.
- bool Pruned;
+ bool Pruned = false;
/// True once Pruned above has been computed.
- bool PrunedComputed;
+ bool PrunedComputed = false;
- Val() : Resolution(CR_Keep), WriteLanes(), ValidLanes(),
- RedefVNI(nullptr), OtherVNI(nullptr), ErasableImplicitDef(false),
- Pruned(false), PrunedComputed(false) {}
+ Val() = default;
bool isAnalyzed() const { return WriteLanes.any(); }
};
/// entry to TaintedVals.
///
/// Returns false if the tainted lanes extend beyond the basic block.
- bool taintExtent(unsigned, LaneBitmask, JoinVals&,
- SmallVectorImpl<std::pair<SlotIndex, LaneBitmask> >&);
+ bool
+ taintExtent(unsigned ValNo, LaneBitmask TaintedLanes, JoinVals &Other,
+ SmallVectorImpl<std::pair<SlotIndex, LaneBitmask>> &TaintExtent);
/// Return true if MI uses any of the given Lanes from Reg.
/// This does not include partial redefinitions of Reg.
: LR(LR), Reg(Reg), SubIdx(SubIdx), LaneMask(LaneMask),
SubRangeJoin(SubRangeJoin), TrackSubRegLiveness(TrackSubRegLiveness),
NewVNInfo(newVNInfo), CP(cp), LIS(lis), Indexes(LIS->getSlotIndexes()),
- TRI(TRI), Assignments(LR.getNumValNums(), -1), Vals(LR.getNumValNums())
- {}
+ TRI(TRI), Assignments(LR.getNumValNums(), -1), Vals(LR.getNumValNums()) {}
/// Analyze defs in LR and compute a value mapping in NewVNInfo.
/// Returns false if any conflicts were impossible to resolve.
/// Get the value assignments suitable for passing to LiveInterval::join.
const int *getAssignments() const { return Assignments.data(); }
};
+
} // end anonymous namespace
LaneBitmask JoinVals::computeWriteLanes(const MachineInstr *DefMI, bool &Redef)
bool JoinVals::
taintExtent(unsigned ValNo, LaneBitmask TaintedLanes, JoinVals &Other,
- SmallVectorImpl<std::pair<SlotIndex, LaneBitmask> > &TaintExtent) {
+ SmallVectorImpl<std::pair<SlotIndex, LaneBitmask>> &TaintExtent) {
VNInfo *VNI = LR.getValNumInfo(ValNo);
MachineBasicBlock *MBB = Indexes->getMBBFromIndex(VNI->def);
SlotIndex MBBEnd = Indexes->getMBBEndIdx(MBB);
bool JoinVals::resolveConflicts(JoinVals &Other) {
for (unsigned i = 0, e = LR.getNumValNums(); i != e; ++i) {
Val &V = Vals[i];
- assert (V.Resolution != CR_Impossible && "Unresolvable conflict");
+ assert(V.Resolution != CR_Impossible && "Unresolvable conflict");
if (V.Resolution != CR_Unresolved)
continue;
DEBUG(dbgs() << "\t\tconflict at " << PrintReg(Reg) << ':' << i
Indexes->getInstructionFromIndex(TaintExtent.front().first);
assert(LastMI && "Range must end at a proper instruction");
unsigned TaintNum = 0;
- for (;;) {
+ while (true) {
assert(MI != MBB->end() && "Bad LastMI");
if (usesLanes(*MI, Other.Reg, Other.SubIdx, TaintedLanes)) {
DEBUG(dbgs() << "\t\ttainted lanes used by: " << *MI);
}
namespace {
+
/// Information concerning MBB coalescing priority.
struct MBBPriorityInfo {
MachineBasicBlock *MBB;
MBBPriorityInfo(MachineBasicBlock *mbb, unsigned depth, bool issplit)
: MBB(mbb), Depth(depth), IsSplit(issplit) {}
};
-}
+
+} // end anonymous namespace
/// C-style comparator that sorts first based on the loop depth of the basic
/// block (the unsigned), and then on the MBB number.
array_pod_sort(MBBs.begin(), MBBs.end(), compareMBBPriority);
// Coalesce intervals in MBB priority order.
- unsigned CurrDepth = UINT_MAX;
+ unsigned CurrDepth = std::numeric_limits<unsigned>::max();
for (unsigned i = 0, e = MBBs.size(); i != e; ++i) {
// Try coalescing the collected local copies for deeper loops.
if (JoinGlobalCopies && MBBs[i].Depth < CurrDepth) {
bool RegisterCoalescer::runOnMachineFunction(MachineFunction &fn) {
MF = &fn;
MRI = &fn.getRegInfo();
- TM = &fn.getTarget();
const TargetSubtargetInfo &STI = fn.getSubtarget();
TRI = STI.getRegisterInfo();
TII = STI.getInstrInfo();
-//===-- RegisterCoalescer.h - Register Coalescing Interface -----*- C++ -*-===//
+//===- RegisterCoalescer.h - Register Coalescing Interface ------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
namespace llvm {
- class MachineInstr;
- class TargetRegisterInfo;
- class TargetRegisterClass;
- class TargetInstrInfo;
+class MachineInstr;
+class TargetRegisterClass;
+class TargetRegisterInfo;
/// A helper class for register coalescers. When deciding if
/// two registers can be coalesced, CoalescerPair can determine if a copy
/// The register that will be left after coalescing. It can be a
/// virtual or physical register.
- unsigned DstReg;
+ unsigned DstReg = 0;
/// The virtual register that will be coalesced into dstReg.
- unsigned SrcReg;
+ unsigned SrcReg = 0;
/// The sub-register index of the old DstReg in the new coalesced register.
- unsigned DstIdx;
+ unsigned DstIdx = 0;
/// The sub-register index of the old SrcReg in the new coalesced register.
- unsigned SrcIdx;
+ unsigned SrcIdx = 0;
/// True when the original copy was a partial subregister copy.
- bool Partial;
+ bool Partial = false;
/// True when both regs are virtual and newRC is constrained.
- bool CrossClass;
+ bool CrossClass = false;
/// True when DstReg and SrcReg are reversed from the original
/// copy instruction.
- bool Flipped;
+ bool Flipped = false;
/// The register class of the coalesced register, or NULL if DstReg
/// is a physreg. This register class may be a super-register of both
/// SrcReg and DstReg.
- const TargetRegisterClass *NewRC;
+ const TargetRegisterClass *NewRC = nullptr;
public:
- CoalescerPair(const TargetRegisterInfo &tri)
- : TRI(tri), DstReg(0), SrcReg(0), DstIdx(0), SrcIdx(0),
- Partial(false), CrossClass(false), Flipped(false), NewRC(nullptr) {}
+ CoalescerPair(const TargetRegisterInfo &tri) : TRI(tri) {}
/// Create a CoalescerPair representing a virtreg-to-physreg copy.
/// No need to call setRegisters().
CoalescerPair(unsigned VirtReg, unsigned PhysReg,
const TargetRegisterInfo &tri)
- : TRI(tri), DstReg(PhysReg), SrcReg(VirtReg), DstIdx(0), SrcIdx(0),
- Partial(false), CrossClass(false), Flipped(false), NewRC(nullptr) {}
+ : TRI(tri), DstReg(PhysReg), SrcReg(VirtReg) {}
/// Set registers to match the copy instruction MI. Return
/// false if MI is not a coalescable copy instruction.
/// Return the register class of the coalesced register.
const TargetRegisterClass *getNewRC() const { return NewRC; }
};
-} // End llvm namespace
-#endif
+} // end namespace llvm
+
+#endif // LLVM_LIB_CODEGEN_REGISTERCOALESCER_H