///
/// \return True if the lowering succeeds, false otherwise.
virtual bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val,
- ArrayRef<unsigned> VRegs,
- unsigned SwiftErrorVReg) const {
+ ArrayRef<Register> VRegs,
+ Register SwiftErrorVReg) const {
if (!supportSwiftError()) {
assert(SwiftErrorVReg == 0 && "attempt to use unsupported swifterror");
return lowerReturn(MIRBuilder, Val, VRegs);
/// This hook behaves as the extended lowerReturn function, but for targets
/// that do not support swifterror value promotion.
virtual bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val,
- ArrayRef<unsigned> VRegs) const {
+ ArrayRef<Register> VRegs) const {
return false;
}
/// \return True if the lowering succeeded, false otherwise.
virtual bool lowerFormalArguments(MachineIRBuilder &MIRBuilder,
const Function &F,
- ArrayRef<unsigned> VRegs) const {
+ ArrayRef<Register> VRegs) const {
return false;
}
virtual bool lowerCall(MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv,
const MachineOperand &Callee, const ArgInfo &OrigRet,
ArrayRef<ArgInfo> OrigArgs,
- unsigned SwiftErrorVReg) const {
+ Register SwiftErrorVReg) const {
if (!supportSwiftError()) {
assert(SwiftErrorVReg == 0 && "trying to use unsupported swifterror");
return lowerCall(MIRBuilder, CallConv, Callee, OrigRet, OrigArgs);
///
/// \return true if the lowering succeeded, false otherwise.
bool lowerCall(MachineIRBuilder &MIRBuilder, ImmutableCallSite CS,
- unsigned ResReg, ArrayRef<unsigned> ArgRegs,
- unsigned SwiftErrorVReg,
+ Register ResReg, ArrayRef<Register> ArgRegs,
+ Register SwiftErrorVReg,
std::function<unsigned()> GetCalleeReg) const;
};
public:
ValueToVRegInfo() = default;
- using VRegListT = SmallVector<unsigned, 1>;
+ using VRegListT = SmallVector<Register, 1>;
using OffsetListT = SmallVector<uint64_t, 1>;
using const_vreg_iterator =
/// Non-aggregate types have just one corresponding VReg and the list can be
/// used as a single "unsigned". Aggregates get flattened. If such VRegs do
/// not exist, they are created.
- ArrayRef<unsigned> getOrCreateVRegs(const Value &Val);
+ ArrayRef<Register> getOrCreateVRegs(const Value &Val);
- unsigned getOrCreateVReg(const Value &Val) {
+ Register getOrCreateVReg(const Value &Val) {
auto Regs = getOrCreateVRegs(Val);
if (Regs.empty())
return 0;
return false;
Builder.setInstr(MI);
- unsigned DstReg = MI.getOperand(0).getReg();
- unsigned SrcReg = lookThroughCopyInstrs(MI.getOperand(1).getReg());
+ Register DstReg = MI.getOperand(0).getReg();
+ Register SrcReg = lookThroughCopyInstrs(MI.getOperand(1).getReg());
// aext(trunc x) - > aext/copy/trunc x
- unsigned TruncSrc;
+ Register TruncSrc;
if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc)))) {
LLVM_DEBUG(dbgs() << ".. Combine MI: " << MI;);
Builder.buildAnyExtOrTrunc(DstReg, TruncSrc);
}
// aext([asz]ext x) -> [asz]ext x
- unsigned ExtSrc;
+ Register ExtSrc;
MachineInstr *ExtMI;
if (mi_match(SrcReg, MRI,
m_all_of(m_MInstr(ExtMI), m_any_of(m_GAnyExt(m_Reg(ExtSrc)),
return false;
Builder.setInstr(MI);
- unsigned DstReg = MI.getOperand(0).getReg();
- unsigned SrcReg = lookThroughCopyInstrs(MI.getOperand(1).getReg());
+ Register DstReg = MI.getOperand(0).getReg();
+ Register SrcReg = lookThroughCopyInstrs(MI.getOperand(1).getReg());
// zext(trunc x) - > and (aext/copy/trunc x), mask
- unsigned TruncSrc;
+ Register TruncSrc;
if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc)))) {
LLT DstTy = MRI.getType(DstReg);
if (isInstUnsupported({TargetOpcode::G_AND, {DstTy}}) ||
return false;
Builder.setInstr(MI);
- unsigned DstReg = MI.getOperand(0).getReg();
- unsigned SrcReg = lookThroughCopyInstrs(MI.getOperand(1).getReg());
+ Register DstReg = MI.getOperand(0).getReg();
+ Register SrcReg = lookThroughCopyInstrs(MI.getOperand(1).getReg());
// sext(trunc x) - > ashr (shl (aext/copy/trunc x), c), c
- unsigned TruncSrc;
+ Register TruncSrc;
if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc)))) {
LLT DstTy = MRI.getType(DstReg);
// Guess on the RHS shift amount type, which should be re-legalized if
if (MachineInstr *DefMI = getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF,
MI.getOperand(1).getReg(), MRI)) {
Builder.setInstr(MI);
- unsigned DstReg = MI.getOperand(0).getReg();
+ Register DstReg = MI.getOperand(0).getReg();
LLT DstTy = MRI.getType(DstReg);
if (Opcode == TargetOpcode::G_ANYEXT) {
const unsigned NewNumDefs = NumDefs / NumMergeRegs;
for (unsigned Idx = 0; Idx < NumMergeRegs; ++Idx) {
- SmallVector<unsigned, 2> DstRegs;
+ SmallVector<Register, 2> DstRegs;
for (unsigned j = 0, DefIdx = Idx * NewNumDefs; j < NewNumDefs;
++j, ++DefIdx)
DstRegs.push_back(MI.getOperand(DefIdx).getReg());
const unsigned NumRegs = NumMergeRegs / NumDefs;
for (unsigned DefIdx = 0; DefIdx < NumDefs; ++DefIdx) {
- SmallVector<unsigned, 2> Regs;
+ SmallVector<Register, 2> Regs;
for (unsigned j = 0, Idx = NumRegs * DefIdx + 1; j < NumRegs;
++j, ++Idx)
Regs.push_back(MergeI->getOperand(Idx).getReg());
/// Looks through copy instructions and returns the actual
/// source register.
- unsigned lookThroughCopyInstrs(unsigned Reg) {
- unsigned TmpReg;
+ unsigned lookThroughCopyInstrs(Register Reg) {
+ Register TmpReg;
while (mi_match(Reg, MRI, m_Copy(m_Reg(TmpReg)))) {
if (MRI.getType(TmpReg).isValid())
Reg = TmpReg;
/// Helper function to split a wide generic register into bitwise blocks with
/// the given Type (which implies the number of blocks needed). The generic
/// registers created are appended to Ops, starting at bit 0 of Reg.
- void extractParts(unsigned Reg, LLT Ty, int NumParts,
- SmallVectorImpl<unsigned> &VRegs);
+ void extractParts(Register Reg, LLT Ty, int NumParts,
+ SmallVectorImpl<Register> &VRegs);
/// Version which handles irregular splits.
- bool extractParts(unsigned Reg, LLT RegTy, LLT MainTy,
+ bool extractParts(Register Reg, LLT RegTy, LLT MainTy,
LLT &LeftoverTy,
- SmallVectorImpl<unsigned> &VRegs,
- SmallVectorImpl<unsigned> &LeftoverVRegs);
+ SmallVectorImpl<Register> &VRegs,
+ SmallVectorImpl<Register> &LeftoverVRegs);
/// Helper function to build a wide generic register \p DstReg of type \p
/// RegTy from smaller parts. This will produce a G_MERGE_VALUES,
///
/// If \p ResultTy does not evenly break into \p PartTy sized pieces, the
/// remainder must be specified with \p LeftoverRegs of type \p LeftoverTy.
- void insertParts(unsigned DstReg, LLT ResultTy,
- LLT PartTy, ArrayRef<unsigned> PartRegs,
- LLT LeftoverTy = LLT(), ArrayRef<unsigned> LeftoverRegs = {});
+ void insertParts(Register DstReg, LLT ResultTy,
+ LLT PartTy, ArrayRef<Register> PartRegs,
+ LLT LeftoverTy = LLT(), ArrayRef<Register> LeftoverRegs = {});
/// Perform generic multiplication of values held in multiple registers.
/// Generated instructions use only types NarrowTy and i1.
/// Destination can be same or two times size of the source.
- void multiplyRegisters(SmallVectorImpl<unsigned> &DstRegs,
- ArrayRef<unsigned> Src1Regs,
- ArrayRef<unsigned> Src2Regs, LLT NarrowTy);
+ void multiplyRegisters(SmallVectorImpl<Register> &DstRegs,
+ ArrayRef<Register> Src1Regs,
+ ArrayRef<Register> Src2Regs, LLT NarrowTy);
LegalizeResult fewerElementsVectorImplicitDef(MachineInstr &MI,
unsigned TypeIdx, LLT NarrowTy);
}
};
-inline bind_ty<unsigned> m_Reg(unsigned &R) { return R; }
+inline bind_ty<Register> m_Reg(Register &R) { return R; }
inline bind_ty<MachineInstr *> m_MInstr(MachineInstr *&MI) { return MI; }
inline bind_ty<LLT> m_Type(LLT &Ty) { return Ty; }
public:
enum class DstType { Ty_LLT, Ty_Reg, Ty_RC };
DstOp(unsigned R) : Reg(R), Ty(DstType::Ty_Reg) {}
+ DstOp(Register R) : Reg(R), Ty(DstType::Ty_Reg) {}
DstOp(const MachineOperand &Op) : Reg(Op.getReg()), Ty(DstType::Ty_Reg) {}
DstOp(const LLT &T) : LLTTy(T), Ty(DstType::Ty_LLT) {}
DstOp(const TargetRegisterClass *TRC) : RC(TRC), Ty(DstType::Ty_RC) {}
public:
enum class SrcType { Ty_Reg, Ty_MIB, Ty_Predicate };
SrcOp(unsigned R) : Reg(R), Ty(SrcType::Ty_Reg) {}
+ SrcOp(Register R) : Reg(R), Ty(SrcType::Ty_Reg) {}
SrcOp(const MachineOperand &Op) : Reg(Op.getReg()), Ty(SrcType::Ty_Reg) {}
SrcOp(const MachineInstrBuilder &MIB) : SrcMIB(MIB), Ty(SrcType::Ty_MIB) {}
SrcOp(const CmpInst::Predicate P) : Pred(P), Ty(SrcType::Ty_Predicate) {}
/// type as \p Op0 or \p Op0 itself.
///
/// \return a MachineInstrBuilder for the newly created instruction.
- Optional<MachineInstrBuilder> materializeGEP(unsigned &Res, unsigned Op0,
+ Optional<MachineInstrBuilder> materializeGEP(Register &Res, Register Op0,
const LLT &ValueTy,
uint64_t Value);
/// \pre The bits defined by each Op (derived from index and scalar size) must
/// not overlap.
/// \pre \p Indices must be in ascending order of bit position.
- void buildSequence(unsigned Res, ArrayRef<unsigned> Ops,
+ void buildSequence(Register Res, ArrayRef<Register> Ops,
ArrayRef<uint64_t> Indices);
/// Build and insert \p Res = G_MERGE_VALUES \p Op0, ...
/// \pre The type of all \p Ops registers must be identical.
///
/// \return a MachineInstrBuilder for the newly created instruction.
- MachineInstrBuilder buildMerge(const DstOp &Res, ArrayRef<unsigned> Ops);
+ MachineInstrBuilder buildMerge(const DstOp &Res, ArrayRef<Register> Ops);
/// Build and insert \p Res0, ... = G_UNMERGE_VALUES \p Op
///
///
/// \return a MachineInstrBuilder for the newly created instruction.
MachineInstrBuilder buildUnmerge(ArrayRef<LLT> Res, const SrcOp &Op);
- MachineInstrBuilder buildUnmerge(ArrayRef<unsigned> Res, const SrcOp &Op);
+ MachineInstrBuilder buildUnmerge(ArrayRef<Register> Res, const SrcOp &Op);
/// Build and insert an unmerge of \p Res sized pieces to cover \p Op
MachineInstrBuilder buildUnmerge(LLT Res, const SrcOp &Op);
///
/// \return a MachineInstrBuilder for the newly created instruction.
MachineInstrBuilder buildBuildVector(const DstOp &Res,
- ArrayRef<unsigned> Ops);
+ ArrayRef<Register> Ops);
/// Build and insert \p Res = G_BUILD_VECTOR with \p Src replicated to fill
/// the number of elements
///
/// \return a MachineInstrBuilder for the newly created instruction.
MachineInstrBuilder buildBuildVectorTrunc(const DstOp &Res,
- ArrayRef<unsigned> Ops);
+ ArrayRef<Register> Ops);
/// Build and insert \p Res = G_CONCAT_VECTORS \p Op0, ...
///
///
/// \return a MachineInstrBuilder for the newly created instruction.
MachineInstrBuilder buildConcatVectors(const DstOp &Res,
- ArrayRef<unsigned> Ops);
+ ArrayRef<Register> Ops);
- MachineInstrBuilder buildInsert(unsigned Res, unsigned Src,
- unsigned Op, unsigned Index);
+ MachineInstrBuilder buildInsert(Register Res, Register Src,
+ Register Op, unsigned Index);
/// Build and insert either a G_INTRINSIC (if \p HasSideEffects is false) or
/// G_INTRINSIC_W_SIDE_EFFECTS instruction. Its first operand will be the
/// \pre setBasicBlock or setMI must have been called.
///
/// \return a MachineInstrBuilder for the newly created instruction.
- MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef<unsigned> Res,
+ MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef<Register> Res,
bool HasSideEffects);
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef<DstOp> Res,
bool HasSideEffects);
#define LLVM_CODEGEN_MACHINEOPERAND_H
#include "llvm/ADT/DenseMap.h"
+#include "llvm/CodeGen/Register.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/LowLevelTypeImpl.h"
//===--------------------------------------------------------------------===//
/// getReg - Returns the register number.
- unsigned getReg() const {
+ Register getReg() const {
assert(isReg() && "This is not a register operand!");
- return SmallContents.RegNo;
+ return Register(SmallContents.RegNo);
}
unsigned getSubReg() const {
/// createVirtualRegister - Create and return a new virtual register in the
/// function with the specified register class.
- unsigned createVirtualRegister(const TargetRegisterClass *RegClass,
+ Register createVirtualRegister(const TargetRegisterClass *RegClass,
StringRef Name = "");
/// Create and return a new virtual register in the function with the same
/// attributes as the given register.
- unsigned cloneVirtualRegister(unsigned VReg, StringRef Name = "");
+ Register cloneVirtualRegister(Register VReg, StringRef Name = "");
/// Get the low-level type of \p Reg or LLT{} if Reg is not a generic
/// (target independent) virtual register.
/// Create and return a new generic virtual register with low-level
/// type \p Ty.
- unsigned createGenericVirtualRegister(LLT Ty, StringRef Name = "");
+ Register createGenericVirtualRegister(LLT Ty, StringRef Name = "");
/// Remove all types associated to virtual registers (after instruction
/// selection and constraining of all generic virtual registers).
--- /dev/null
+//===-- llvm/CodeGen/Register.h ---------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_REGISTER_H
+#define LLVM_CODEGEN_REGISTER_H
+
+#include <cassert>
+
+namespace llvm {
+
+/// Wrapper class representing virtual and physical registers. Should be passed
+/// by value.
+class Register {
+ unsigned Reg;
+
+public:
+ Register(unsigned Val = 0): Reg(Val) {}
+
+ /// Return true if the specified register number is in the virtual register
+ /// namespace.
+ bool isVirtual() const {
+ return int(Reg) < 0;
+ }
+
+ /// Return true if the specified register number is in the physical register
+ /// namespace.
+ bool isPhysical() const {
+ return int(Reg) > 0;
+ }
+
+ /// Convert a virtual register number to a 0-based index. The first virtual
+ /// register in a function will get the index 0.
+ unsigned virtRegIndex() const {
+ assert(isVirtual() && "Not a virtual register");
+ return Reg & ~(1u << 31);
+ }
+
+ /// Convert a 0-based index to a virtual register number.
+ /// This is the inverse operation of VirtReg2IndexFunctor below.
+ static Register index2VirtReg(unsigned Index) {
+ return Register(Index | (1u << 31));
+ }
+
+ operator unsigned() const {
+ return Reg;
+ }
+
+ bool isValid() const {
+ return Reg != 0;
+ }
+};
+
+}
+
+#endif
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/Register.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/DebugLoc.h"
#include <functional>
/// A map from swifterror value in a basic block to the virtual register it is
/// currently represented by.
- DenseMap<std::pair<const MachineBasicBlock *, const Value *>, unsigned>
+ DenseMap<std::pair<const MachineBasicBlock *, const Value *>, Register>
VRegDefMap;
/// A list of upward exposed vreg uses that need to be satisfied by either a
/// copy def or a phi node at the beginning of the basic block representing
/// the predecessor(s) swifterror value.
- DenseMap<std::pair<const MachineBasicBlock *, const Value *>, unsigned>
+ DenseMap<std::pair<const MachineBasicBlock *, const Value *>, Register>
VRegUpwardsUse;
/// A map from instructions that define/use a swifterror value to the virtual
/// register that represents that def/use.
- llvm::DenseMap<PointerIntPair<const Instruction *, 1, bool>, unsigned>
+ llvm::DenseMap<PointerIntPair<const Instruction *, 1, bool>, Register>
VRegDefUses;
/// The swifterror argument of the current function.
/// Set the swifterror virtual register in the VRegDefMap for this
/// basic block.
- void setCurrentVReg(const MachineBasicBlock *MBB, const Value *, unsigned);
+ void setCurrentVReg(const MachineBasicBlock *MBB, const Value *, Register);
/// Get or create the swifterror value virtual register for a def of a
/// swifterror by an instruction.
/// getFrameRegister - This method should return the register used as a base
/// for values allocated in the current stack frame.
- virtual unsigned getFrameRegister(const MachineFunction &MF) const = 0;
+ virtual Register getFrameRegister(const MachineFunction &MF) const = 0;
/// Mark a register and all its aliases as reserved in the given set.
void markSuperRegs(BitVector &RegisterSet, unsigned Reg) const;
/// returns the physical register mapped to the specified
/// virtual register
- unsigned getPhys(unsigned virtReg) const {
- assert(TargetRegisterInfo::isVirtualRegister(virtReg));
+ Register getPhys(Register virtReg) const {
+ assert(virtReg.isVirtual());
return Virt2PhysMap[virtReg];
}
// If @MI is a DBG_VALUE with debug value described by a
// defined register, returns the number of this register.
// In the other case, returns 0.
-static unsigned isDescribedByReg(const MachineInstr &MI) {
+static Register isDescribedByReg(const MachineInstr &MI) {
assert(MI.isDebugValue());
assert(MI.getNumOperands() == 4);
// If location of variable is described using a register (directly or
// indirectly), this register is always a first operand.
- return MI.getOperand(0).isReg() ? MI.getOperand(0).getReg() : 0;
+ return MI.getOperand(0).isReg() ? MI.getOperand(0).getReg() : Register();
}
bool DbgValueHistoryMap::startDbgValue(InlinedEntity Var,
void CallLowering::anchor() {}
bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, ImmutableCallSite CS,
- unsigned ResReg, ArrayRef<unsigned> ArgRegs,
- unsigned SwiftErrorVReg,
+ Register ResReg, ArrayRef<Register> ArgRegs,
+ Register SwiftErrorVReg,
std::function<unsigned()> GetCalleeReg) const {
auto &DL = CS.getParent()->getParent()->getParent()->getDataLayout();
if (Handler.assignArg(i, CurVT, CurVT, CCValAssign::Full, Args[i], CCInfo)) {
// Try to use the register type if we couldn't assign the VT.
if (!Handler.isArgumentHandler() || !CurVT.isValid())
- return false;
+ return false;
CurVT = TLI->getRegisterTypeForCallingConv(
F.getContext(), F.getCallingConv(), EVT(CurVT));
if (Handler.assignArg(i, CurVT, CurVT, CCValAssign::Full, Args[i], CCInfo))
return *Regs;
}
-ArrayRef<unsigned> IRTranslator::getOrCreateVRegs(const Value &Val) {
+ArrayRef<Register> IRTranslator::getOrCreateVRegs(const Value &Val) {
auto VRegsIt = VMap.findVRegs(Val);
if (VRegsIt != VMap.vregs_end())
return *VRegsIt->second;
if (Ret && DL->getTypeStoreSize(Ret->getType()) == 0)
Ret = nullptr;
- ArrayRef<unsigned> VRegs;
+ ArrayRef<Register> VRegs;
if (Ret)
VRegs = getOrCreateVRegs(*Ret);
- unsigned SwiftErrorVReg = 0;
+ Register SwiftErrorVReg = 0;
if (CLI->supportSwiftError() && SwiftError.getFunctionArg()) {
SwiftErrorVReg = SwiftError.getOrCreateVRegUseAt(
&RI, &MIRBuilder.getMBB(), SwiftError.getFunctionArg());
if (DL->getTypeStoreSize(LI.getType()) == 0)
return true;
- ArrayRef<unsigned> Regs = getOrCreateVRegs(LI);
+ ArrayRef<Register> Regs = getOrCreateVRegs(LI);
ArrayRef<uint64_t> Offsets = *VMap.getOffsets(LI);
unsigned Base = getOrCreateVReg(*LI.getPointerOperand());
for (unsigned i = 0; i < Regs.size(); ++i) {
- unsigned Addr = 0;
+ Register Addr;
MIRBuilder.materializeGEP(Addr, Base, OffsetTy, Offsets[i] / 8);
MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8);
if (DL->getTypeStoreSize(SI.getValueOperand()->getType()) == 0)
return true;
- ArrayRef<unsigned> Vals = getOrCreateVRegs(*SI.getValueOperand());
+ ArrayRef<Register> Vals = getOrCreateVRegs(*SI.getValueOperand());
ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*SI.getValueOperand());
unsigned Base = getOrCreateVReg(*SI.getPointerOperand());
}
for (unsigned i = 0; i < Vals.size(); ++i) {
- unsigned Addr = 0;
+ Register Addr;
MIRBuilder.materializeGEP(Addr, Base, OffsetTy, Offsets[i] / 8);
MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i] / 8);
MachineIRBuilder &MIRBuilder) {
const Value *Src = U.getOperand(0);
uint64_t Offset = getOffsetFromIndices(U, *DL);
- ArrayRef<unsigned> SrcRegs = getOrCreateVRegs(*Src);
+ ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src);
ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*Src);
unsigned Idx = llvm::lower_bound(Offsets, Offset) - Offsets.begin();
auto &DstRegs = allocateVRegs(U);
uint64_t Offset = getOffsetFromIndices(U, *DL);
auto &DstRegs = allocateVRegs(U);
ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(U);
- ArrayRef<unsigned> SrcRegs = getOrCreateVRegs(*Src);
- ArrayRef<unsigned> InsertedRegs = getOrCreateVRegs(*U.getOperand(1));
+ ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src);
+ ArrayRef<Register> InsertedRegs = getOrCreateVRegs(*U.getOperand(1));
auto InsertedIt = InsertedRegs.begin();
for (unsigned i = 0; i < DstRegs.size(); ++i) {
bool IRTranslator::translateSelect(const User &U,
MachineIRBuilder &MIRBuilder) {
unsigned Tst = getOrCreateVReg(*U.getOperand(0));
- ArrayRef<unsigned> ResRegs = getOrCreateVRegs(U);
- ArrayRef<unsigned> Op0Regs = getOrCreateVRegs(*U.getOperand(1));
- ArrayRef<unsigned> Op1Regs = getOrCreateVRegs(*U.getOperand(2));
+ ArrayRef<Register> ResRegs = getOrCreateVRegs(U);
+ ArrayRef<Register> Op0Regs = getOrCreateVRegs(*U.getOperand(1));
+ ArrayRef<Register> Op1Regs = getOrCreateVRegs(*U.getOperand(2));
const SelectInst &SI = cast<SelectInst>(U);
uint16_t Flags = 0;
bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
MachineIRBuilder &MIRBuilder) {
- ArrayRef<unsigned> ResRegs = getOrCreateVRegs(CI);
+ ArrayRef<Register> ResRegs = getOrCreateVRegs(CI);
MIRBuilder.buildInstr(Op)
.addDef(ResRegs[0])
.addDef(ResRegs[1])
unsigned IRTranslator::packRegs(const Value &V,
MachineIRBuilder &MIRBuilder) {
- ArrayRef<unsigned> Regs = getOrCreateVRegs(V);
+ ArrayRef<Register> Regs = getOrCreateVRegs(V);
ArrayRef<uint64_t> Offsets = *VMap.getOffsets(V);
LLT BigTy = getLLTForType(*V.getType(), *DL);
void IRTranslator::unpackRegs(const Value &V, unsigned Src,
MachineIRBuilder &MIRBuilder) {
- ArrayRef<unsigned> Regs = getOrCreateVRegs(V);
+ ArrayRef<Register> Regs = getOrCreateVRegs(V);
ArrayRef<uint64_t> Offsets = *VMap.getOffsets(V);
for (unsigned i = 0; i < Regs.size(); ++i)
if (!F || !F->isIntrinsic() || ID == Intrinsic::not_intrinsic) {
bool IsSplitType = valueIsSplit(CI);
- unsigned Res = IsSplitType ? MRI->createGenericVirtualRegister(
+ Register Res = IsSplitType ? MRI->createGenericVirtualRegister(
getLLTForType(*CI.getType(), *DL))
: getOrCreateVReg(CI);
- SmallVector<unsigned, 8> Args;
- unsigned SwiftErrorVReg = 0;
+ SmallVector<Register, 8> Args;
+ Register SwiftErrorVReg;
for (auto &Arg: CI.arg_operands()) {
if (CLI->supportSwiftError() && isSwiftError(Arg)) {
LLT Ty = getLLTForType(*Arg->getType(), *DL);
if (translateKnownIntrinsic(CI, ID, MIRBuilder))
return true;
- ArrayRef<unsigned> ResultRegs;
+ ArrayRef<Register> ResultRegs;
if (!CI.getType()->isVoidTy())
ResultRegs = getOrCreateVRegs(CI);
unsigned Res = 0;
if (!I.getType()->isVoidTy())
Res = MRI->createGenericVirtualRegister(getLLTForType(*I.getType(), *DL));
- SmallVector<unsigned, 8> Args;
- unsigned SwiftErrorVReg = 0;
+ SmallVector<Register, 8> Args;
+ Register SwiftErrorVReg;
for (auto &Arg : I.arg_operands()) {
if (CLI->supportSwiftError() && isSwiftError(Arg)) {
LLT Ty = getLLTForType(*Arg->getType(), *DL);
return false;
MBB.addLiveIn(ExceptionReg);
- ArrayRef<unsigned> ResRegs = getOrCreateVRegs(LP);
+ ArrayRef<Register> ResRegs = getOrCreateVRegs(LP);
MIRBuilder.buildCopy(ResRegs[0], ExceptionReg);
unsigned SelectorReg = TLI.getExceptionSelectorRegister(PersonalityFn);
SmallSet<const MachineBasicBlock *, 16> SeenPreds;
for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) {
auto IRPred = PI->getIncomingBlock(i);
- ArrayRef<unsigned> ValRegs = getOrCreateVRegs(*PI->getIncomingValue(i));
+ ArrayRef<Register> ValRegs = getOrCreateVRegs(*PI->getIncomingValue(i));
for (auto Pred : getMachinePredBBs({IRPred, PI->getParent()})) {
if (SeenPreds.count(Pred))
continue;
// Return the scalar if it is a <1 x Ty> vector.
if (CAZ->getNumElements() == 1)
return translate(*CAZ->getElementValue(0u), Reg);
- SmallVector<unsigned, 4> Ops;
+ SmallVector<Register, 4> Ops;
for (unsigned i = 0; i < CAZ->getNumElements(); ++i) {
Constant &Elt = *CAZ->getElementValue(i);
Ops.push_back(getOrCreateVReg(Elt));
// Return the scalar if it is a <1 x Ty> vector.
if (CV->getNumElements() == 1)
return translate(*CV->getElementAsConstant(0), Reg);
- SmallVector<unsigned, 4> Ops;
+ SmallVector<Register, 4> Ops;
for (unsigned i = 0; i < CV->getNumElements(); ++i) {
Constant &Elt = *CV->getElementAsConstant(i);
Ops.push_back(getOrCreateVReg(Elt));
} else if (auto CV = dyn_cast<ConstantVector>(&C)) {
if (CV->getNumOperands() == 1)
return translate(*CV->getOperand(0), Reg);
- SmallVector<unsigned, 4> Ops;
+ SmallVector<Register, 4> Ops;
for (unsigned i = 0; i < CV->getNumOperands(); ++i) {
Ops.push_back(getOrCreateVReg(*CV->getOperand(i)));
}
EntryBB->addSuccessor(&getMBB(F.front()));
// Lower the actual args into this basic block.
- SmallVector<unsigned, 8> VRegArgs;
+ SmallVector<Register, 8> VRegArgs;
for (const Argument &Arg: F.args()) {
if (DL->getTypeStoreSize(Arg.getType()) == 0)
continue; // Don't handle zero sized types.
}
}
-void LegalizerHelper::extractParts(unsigned Reg, LLT Ty, int NumParts,
- SmallVectorImpl<unsigned> &VRegs) {
+void LegalizerHelper::extractParts(Register Reg, LLT Ty, int NumParts,
+ SmallVectorImpl<Register> &VRegs) {
for (int i = 0; i < NumParts; ++i)
VRegs.push_back(MRI.createGenericVirtualRegister(Ty));
MIRBuilder.buildUnmerge(VRegs, Reg);
}
-bool LegalizerHelper::extractParts(unsigned Reg, LLT RegTy,
+bool LegalizerHelper::extractParts(Register Reg, LLT RegTy,
LLT MainTy, LLT &LeftoverTy,
- SmallVectorImpl<unsigned> &VRegs,
- SmallVectorImpl<unsigned> &LeftoverRegs) {
+ SmallVectorImpl<Register> &VRegs,
+ SmallVectorImpl<Register> &LeftoverRegs) {
assert(!LeftoverTy.isValid() && "this is an out argument");
unsigned RegSize = RegTy.getSizeInBits();
// For irregular sizes, extract the individual parts.
for (unsigned I = 0; I != NumParts; ++I) {
- unsigned NewReg = MRI.createGenericVirtualRegister(MainTy);
+ Register NewReg = MRI.createGenericVirtualRegister(MainTy);
VRegs.push_back(NewReg);
MIRBuilder.buildExtract(NewReg, Reg, MainSize * I);
}
for (unsigned Offset = MainSize * NumParts; Offset < RegSize;
Offset += LeftoverSize) {
- unsigned NewReg = MRI.createGenericVirtualRegister(LeftoverTy);
+ Register NewReg = MRI.createGenericVirtualRegister(LeftoverTy);
LeftoverRegs.push_back(NewReg);
MIRBuilder.buildExtract(NewReg, Reg, Offset);
}
return true;
}
-void LegalizerHelper::insertParts(unsigned DstReg,
+void LegalizerHelper::insertParts(Register DstReg,
LLT ResultTy, LLT PartTy,
- ArrayRef<unsigned> PartRegs,
+ ArrayRef<Register> PartRegs,
LLT LeftoverTy,
- ArrayRef<unsigned> LeftoverRegs) {
+ ArrayRef<Register> LeftoverRegs) {
if (!LeftoverTy.isValid()) {
assert(LeftoverRegs.empty());
return UnableToLegalize;
int NumParts = SizeOp0 / NarrowSize;
- SmallVector<unsigned, 2> DstRegs;
+ SmallVector<Register, 2> DstRegs;
for (int i = 0; i < NumParts; ++i)
DstRegs.push_back(
MIRBuilder.buildUndef(NarrowTy)->getOperand(0).getReg());
unsigned NarrowSize = NarrowTy.getSizeInBits();
int NumParts = TotalSize / NarrowSize;
- SmallVector<unsigned, 4> PartRegs;
+ SmallVector<Register, 4> PartRegs;
for (int I = 0; I != NumParts; ++I) {
unsigned Offset = I * NarrowSize;
auto K = MIRBuilder.buildConstant(NarrowTy,
LLT LeftoverTy;
unsigned LeftoverBits = TotalSize - NumParts * NarrowSize;
- SmallVector<unsigned, 1> LeftoverRegs;
+ SmallVector<Register, 1> LeftoverRegs;
if (LeftoverBits != 0) {
LeftoverTy = LLT::scalar(LeftoverBits);
auto K = MIRBuilder.buildConstant(
// Expand in terms of carry-setting/consuming G_ADDE instructions.
int NumParts = SizeOp0 / NarrowTy.getSizeInBits();
- SmallVector<unsigned, 2> Src1Regs, Src2Regs, DstRegs;
+ SmallVector<Register, 2> Src1Regs, Src2Regs, DstRegs;
extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, Src1Regs);
extractParts(MI.getOperand(2).getReg(), NarrowTy, NumParts, Src2Regs);
int NumParts = SizeOp0 / NarrowTy.getSizeInBits();
- SmallVector<unsigned, 2> Src1Regs, Src2Regs, DstRegs;
+ SmallVector<Register, 2> Src1Regs, Src2Regs, DstRegs;
extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, Src1Regs);
extractParts(MI.getOperand(2).getReg(), NarrowTy, NumParts, Src2Regs);
// Use concat_vectors if the result is a multiple of the number of elements.
if (NumParts * OldElts == NewElts) {
- SmallVector<unsigned, 8> Parts;
+ SmallVector<Register, 8> Parts;
Parts.push_back(MO.getReg());
unsigned ImpDef = MIRBuilder.buildUndef(OldTy).getReg(0);
if (TypeIdx != 1)
return UnableToLegalize;
- unsigned DstReg = MI.getOperand(0).getReg();
+ Register DstReg = MI.getOperand(0).getReg();
LLT DstTy = MRI.getType(DstReg);
if (!DstTy.isScalar())
return UnableToLegalize;
unsigned PartSize = DstTy.getSizeInBits() / NumSrc;
unsigned Src1 = MI.getOperand(1).getReg();
- unsigned ResultReg = MIRBuilder.buildZExt(DstTy, Src1)->getOperand(0).getReg();
+ Register ResultReg = MIRBuilder.buildZExt(DstTy, Src1)->getOperand(0).getReg();
for (unsigned I = 2; I != NumOps; ++I) {
const unsigned Offset = (I - 1) * PartSize;
- unsigned SrcReg = MI.getOperand(I).getReg();
+ Register SrcReg = MI.getOperand(I).getReg();
assert(MRI.getType(SrcReg) == LLT::scalar(PartSize));
auto ZextInput = MIRBuilder.buildZExt(DstTy, SrcReg);
- unsigned NextResult = I + 1 == NumOps ? DstReg :
+ Register NextResult = I + 1 == NumOps ? DstReg :
MRI.createGenericVirtualRegister(DstTy);
auto ShiftAmt = MIRBuilder.buildConstant(DstTy, Offset);
return UnableToLegalize;
unsigned NumDst = MI.getNumOperands() - 1;
- unsigned SrcReg = MI.getOperand(NumDst).getReg();
+ Register SrcReg = MI.getOperand(NumDst).getReg();
LLT SrcTy = MRI.getType(SrcReg);
if (!SrcTy.isScalar())
return UnableToLegalize;
- unsigned Dst0Reg = MI.getOperand(0).getReg();
+ Register Dst0Reg = MI.getOperand(0).getReg();
LLT DstTy = MRI.getType(Dst0Reg);
if (!DstTy.isScalar())
return UnableToLegalize;
LegalizerHelper::LegalizeResult
LegalizerHelper::widenScalarExtract(MachineInstr &MI, unsigned TypeIdx,
LLT WideTy) {
- unsigned DstReg = MI.getOperand(0).getReg();
- unsigned SrcReg = MI.getOperand(1).getReg();
+ Register DstReg = MI.getOperand(0).getReg();
+ Register SrcReg = MI.getOperand(1).getReg();
LLT SrcTy = MRI.getType(SrcReg);
LLT DstTy = MRI.getType(DstReg);
LegalizerHelper::LegalizeResult LegalizerHelper::fewerElementsVectorImplicitDef(
MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy) {
- SmallVector<unsigned, 2> DstRegs;
+ SmallVector<Register, 2> DstRegs;
unsigned NarrowSize = NarrowTy.getSizeInBits();
unsigned DstReg = MI.getOperand(0).getReg();
return Legalized;
}
- SmallVector<unsigned, 2> DstRegs, Src0Regs, Src1Regs, Src2Regs;
+ SmallVector<Register, 2> DstRegs, Src0Regs, Src1Regs, Src2Regs;
extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, Src0Regs);
SmallVector<MachineInstrBuilder, 4> NewInsts;
- SmallVector<unsigned, 4> DstRegs, LeftoverDstRegs;
- SmallVector<unsigned, 4> PartRegs, LeftoverRegs;
+ SmallVector<Register, 4> DstRegs, LeftoverDstRegs;
+ SmallVector<Register, 4> PartRegs, LeftoverRegs;
for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) {
LLT LeftoverTy;
NarrowTy1 = SrcTy.getElementType();
}
- SmallVector<unsigned, 4> SrcRegs, DstRegs;
+ SmallVector<Register, 4> SrcRegs, DstRegs;
extractParts(SrcReg, NarrowTy1, NumParts, SrcRegs);
for (unsigned I = 0; I < NumParts; ++I) {
CmpInst::Predicate Pred
= static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate());
- SmallVector<unsigned, 2> Src1Regs, Src2Regs, DstRegs;
+ SmallVector<Register, 2> Src1Regs, Src2Regs, DstRegs;
extractParts(MI.getOperand(2).getReg(), NarrowTy1, NumParts, Src1Regs);
extractParts(MI.getOperand(3).getReg(), NarrowTy1, NumParts, Src2Regs);
LegalizerHelper::LegalizeResult
LegalizerHelper::fewerElementsVectorSelect(MachineInstr &MI, unsigned TypeIdx,
LLT NarrowTy) {
- unsigned DstReg = MI.getOperand(0).getReg();
- unsigned CondReg = MI.getOperand(1).getReg();
+ Register DstReg = MI.getOperand(0).getReg();
+ Register CondReg = MI.getOperand(1).getReg();
unsigned NumParts = 0;
LLT NarrowTy0, NarrowTy1;
}
}
- SmallVector<unsigned, 2> DstRegs, Src0Regs, Src1Regs, Src2Regs;
+ SmallVector<Register, 2> DstRegs, Src0Regs, Src1Regs, Src2Regs;
if (CondTy.isVector())
extractParts(MI.getOperand(1).getReg(), NarrowTy1, NumParts, Src0Regs);
extractParts(MI.getOperand(3).getReg(), NarrowTy0, NumParts, Src2Regs);
for (unsigned i = 0; i < NumParts; ++i) {
- unsigned DstReg = MRI.createGenericVirtualRegister(NarrowTy0);
+ Register DstReg = MRI.createGenericVirtualRegister(NarrowTy0);
MIRBuilder.buildSelect(DstReg, CondTy.isVector() ? Src0Regs[i] : CondReg,
Src1Regs[i], Src2Regs[i]);
DstRegs.push_back(DstReg);
if (NumParts < 0)
return UnableToLegalize;
- SmallVector<unsigned, 4> DstRegs, LeftoverDstRegs;
+ SmallVector<Register, 4> DstRegs, LeftoverDstRegs;
SmallVector<MachineInstrBuilder, 4> NewInsts;
const int TotalNumParts = NumParts + NumLeftover;
// Insert the new phis in the result block first.
for (int I = 0; I != TotalNumParts; ++I) {
LLT Ty = I < NumParts ? NarrowTy : LeftoverTy;
- unsigned PartDstReg = MRI.createGenericVirtualRegister(Ty);
+ Register PartDstReg = MRI.createGenericVirtualRegister(Ty);
NewInsts.push_back(MIRBuilder.buildInstr(TargetOpcode::G_PHI)
.addDef(PartDstReg));
if (I < NumParts)
MIRBuilder.setInsertPt(*MBB, MBB->getFirstNonPHI());
insertParts(DstReg, PhiTy, NarrowTy, DstRegs, LeftoverTy, LeftoverDstRegs);
- SmallVector<unsigned, 4> PartRegs, LeftoverRegs;
+ SmallVector<Register, 4> PartRegs, LeftoverRegs;
// Insert code to extract the incoming values in each predecessor block.
for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) {
return UnableToLegalize;
bool IsLoad = MI.getOpcode() == TargetOpcode::G_LOAD;
- unsigned ValReg = MI.getOperand(0).getReg();
- unsigned AddrReg = MI.getOperand(1).getReg();
+ Register ValReg = MI.getOperand(0).getReg();
+ Register AddrReg = MI.getOperand(1).getReg();
LLT ValTy = MRI.getType(ValReg);
int NumParts = -1;
int NumLeftover = -1;
LLT LeftoverTy;
- SmallVector<unsigned, 8> NarrowRegs, NarrowLeftoverRegs;
+ SmallVector<Register, 8> NarrowRegs, NarrowLeftoverRegs;
if (IsLoad) {
std::tie(NumParts, NumLeftover) = getNarrowTypeBreakDown(ValTy, NarrowTy, LeftoverTy);
} else {
// is a load, return the new registers in ValRegs. For a store, each elements
// of ValRegs should be PartTy. Returns the next offset that needs to be
// handled.
- auto splitTypePieces = [=](LLT PartTy, SmallVectorImpl<unsigned> &ValRegs,
+ auto splitTypePieces = [=](LLT PartTy, SmallVectorImpl<Register> &ValRegs,
unsigned Offset) -> unsigned {
MachineFunction &MF = MIRBuilder.getMF();
unsigned PartSize = PartTy.getSizeInBits();
Offset += PartSize, ++Idx) {
unsigned ByteSize = PartSize / 8;
unsigned ByteOffset = Offset / 8;
- unsigned NewAddrReg = 0;
+ Register NewAddrReg;
MIRBuilder.materializeGEP(NewAddrReg, AddrReg, OffsetTy, ByteOffset);
MF.getMachineMemOperand(MMO, ByteOffset, ByteSize);
if (IsLoad) {
- unsigned Dst = MRI.createGenericVirtualRegister(PartTy);
+ Register Dst = MRI.createGenericVirtualRegister(PartTy);
ValRegs.push_back(Dst);
MIRBuilder.buildLoad(Dst, NewAddrReg, *NewMMO);
} else {
auto IsShort = MIRBuilder.buildICmp(ICmpInst::ICMP_ULT, CondTy, Amt, NewBits);
auto IsZero = MIRBuilder.buildICmp(ICmpInst::ICMP_EQ, CondTy, Amt, Zero);
- unsigned ResultRegs[2];
+ Register ResultRegs[2];
switch (MI.getOpcode()) {
case TargetOpcode::G_SHL: {
// Short: ShAmt < NewBitSize
}
}
-void LegalizerHelper::multiplyRegisters(SmallVectorImpl<unsigned> &DstRegs,
- ArrayRef<unsigned> Src1Regs,
- ArrayRef<unsigned> Src2Regs,
+void LegalizerHelper::multiplyRegisters(SmallVectorImpl<Register> &DstRegs,
+ ArrayRef<Register> Src1Regs,
+ ArrayRef<Register> Src2Regs,
LLT NarrowTy) {
MachineIRBuilder &B = MIRBuilder;
unsigned SrcParts = Src1Regs.size();
DstRegs[DstIdx] = FactorSum;
unsigned CarrySumPrevDstIdx;
- SmallVector<unsigned, 4> Factors;
+ SmallVector<Register, 4> Factors;
for (DstIdx = 1; DstIdx < DstParts; DstIdx++) {
// Collect low parts of muls for DstIdx.
LegalizerHelper::LegalizeResult
LegalizerHelper::narrowScalarMul(MachineInstr &MI, LLT NarrowTy) {
- unsigned DstReg = MI.getOperand(0).getReg();
- unsigned Src1 = MI.getOperand(1).getReg();
- unsigned Src2 = MI.getOperand(2).getReg();
+ Register DstReg = MI.getOperand(0).getReg();
+ Register Src1 = MI.getOperand(1).getReg();
+ Register Src2 = MI.getOperand(2).getReg();
LLT Ty = MRI.getType(DstReg);
if (Ty.isVector())
bool IsMulHigh = MI.getOpcode() == TargetOpcode::G_UMULH;
unsigned DstTmpParts = NumDstParts * (IsMulHigh ? 2 : 1);
- SmallVector<unsigned, 2> Src1Parts, Src2Parts, DstTmpRegs;
+ SmallVector<Register, 2> Src1Parts, Src2Parts, DstTmpRegs;
extractParts(Src1, NarrowTy, NumSrcParts, Src1Parts);
extractParts(Src2, NarrowTy, NumSrcParts, Src2Parts);
DstTmpRegs.resize(DstTmpParts);
multiplyRegisters(DstTmpRegs, Src1Parts, Src2Parts, NarrowTy);
// Take only high half of registers if this is high mul.
- ArrayRef<unsigned> DstRegs(
+ ArrayRef<Register> DstRegs(
IsMulHigh ? &DstTmpRegs[DstTmpParts / 2] : &DstTmpRegs[0], NumDstParts);
MIRBuilder.buildMerge(DstReg, DstRegs);
MI.eraseFromParent();
return UnableToLegalize;
int NumParts = SizeOp1 / NarrowSize;
- SmallVector<unsigned, 2> SrcRegs, DstRegs;
+ SmallVector<Register, 2> SrcRegs, DstRegs;
SmallVector<uint64_t, 2> Indexes;
extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, SrcRegs);
int NumParts = SizeOp0 / NarrowSize;
- SmallVector<unsigned, 2> SrcRegs, DstRegs;
+ SmallVector<Register, 2> SrcRegs, DstRegs;
SmallVector<uint64_t, 2> Indexes;
extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, SrcRegs);
assert(MI.getNumOperands() == 3 && TypeIdx == 0);
- SmallVector<unsigned, 4> DstRegs, DstLeftoverRegs;
- SmallVector<unsigned, 4> Src0Regs, Src0LeftoverRegs;
- SmallVector<unsigned, 4> Src1Regs, Src1LeftoverRegs;
+ SmallVector<Register, 4> DstRegs, DstLeftoverRegs;
+ SmallVector<Register, 4> Src0Regs, Src0LeftoverRegs;
+ SmallVector<Register, 4> Src1Regs, Src1LeftoverRegs;
LLT LeftoverTy;
if (!extractParts(MI.getOperand(1).getReg(), DstTy, NarrowTy, LeftoverTy,
Src0Regs, Src0LeftoverRegs))
unsigned DstReg = MI.getOperand(0).getReg();
LLT DstTy = MRI.getType(DstReg);
- SmallVector<unsigned, 4> DstRegs, DstLeftoverRegs;
- SmallVector<unsigned, 4> Src1Regs, Src1LeftoverRegs;
- SmallVector<unsigned, 4> Src2Regs, Src2LeftoverRegs;
+ SmallVector<Register, 4> DstRegs, DstLeftoverRegs;
+ SmallVector<Register, 4> Src1Regs, Src1LeftoverRegs;
+ SmallVector<Register, 4> Src2Regs, Src2LeftoverRegs;
LLT LeftoverTy;
if (!extractParts(MI.getOperand(2).getReg(), DstTy, NarrowTy, LeftoverTy,
Src1Regs, Src1LeftoverRegs))
}
Optional<MachineInstrBuilder>
-MachineIRBuilder::materializeGEP(unsigned &Res, unsigned Op0,
+MachineIRBuilder::materializeGEP(Register &Res, Register Op0,
const LLT &ValueTy, uint64_t Value) {
assert(Res == 0 && "Res is a result argument");
assert(ValueTy.isScalar() && "invalid offset type");
return Extract;
}
-void MachineIRBuilder::buildSequence(unsigned Res, ArrayRef<unsigned> Ops,
+void MachineIRBuilder::buildSequence(Register Res, ArrayRef<Register> Ops,
ArrayRef<uint64_t> Indices) {
#ifndef NDEBUG
assert(Ops.size() == Indices.size() && "incompatible args");
return;
}
- unsigned ResIn = getMRI()->createGenericVirtualRegister(ResTy);
+ Register ResIn = getMRI()->createGenericVirtualRegister(ResTy);
buildUndef(ResIn);
for (unsigned i = 0; i < Ops.size(); ++i) {
- unsigned ResOut = i + 1 == Ops.size()
+ Register ResOut = i + 1 == Ops.size()
? Res
: getMRI()->createGenericVirtualRegister(ResTy);
buildInsert(ResOut, ResIn, Ops[i], Indices[i]);
}
MachineInstrBuilder MachineIRBuilder::buildMerge(const DstOp &Res,
- ArrayRef<unsigned> Ops) {
+ ArrayRef<Register> Ops) {
// Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
// we need some temporary storage for the DstOp objects. Here we use a
// sufficiently large SmallVector to not go through the heap.
MachineInstrBuilder MachineIRBuilder::buildUnmerge(LLT Res,
const SrcOp &Op) {
unsigned NumReg = Op.getLLTTy(*getMRI()).getSizeInBits() / Res.getSizeInBits();
- SmallVector<unsigned, 8> TmpVec;
+ SmallVector<Register, 8> TmpVec;
for (unsigned I = 0; I != NumReg; ++I)
TmpVec.push_back(getMRI()->createGenericVirtualRegister(Res));
return buildUnmerge(TmpVec, Op);
}
-MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<unsigned> Res,
+MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<Register> Res,
const SrcOp &Op) {
// Unfortunately to convert from ArrayRef<unsigned> to ArrayRef<DstOp>,
// we need some temporary storage for the DstOp objects. Here we use a
}
MachineInstrBuilder MachineIRBuilder::buildBuildVector(const DstOp &Res,
- ArrayRef<unsigned> Ops) {
+ ArrayRef<Register> Ops) {
// Unfortunately to convert from ArrayRef<unsigned> to ArrayRef<SrcOp>,
// we need some temporary storage for the DstOp objects. Here we use a
// sufficiently large SmallVector to not go through the heap.
MachineInstrBuilder
MachineIRBuilder::buildBuildVectorTrunc(const DstOp &Res,
- ArrayRef<unsigned> Ops) {
+ ArrayRef<Register> Ops) {
// Unfortunately to convert from ArrayRef<unsigned> to ArrayRef<SrcOp>,
// we need some temporary storage for the DstOp objects. Here we use a
// sufficiently large SmallVector to not go through the heap.
}
MachineInstrBuilder
-MachineIRBuilder::buildConcatVectors(const DstOp &Res, ArrayRef<unsigned> Ops) {
+MachineIRBuilder::buildConcatVectors(const DstOp &Res, ArrayRef<Register> Ops) {
// Unfortunately to convert from ArrayRef<unsigned> to ArrayRef<SrcOp>,
// we need some temporary storage for the DstOp objects. Here we use a
// sufficiently large SmallVector to not go through the heap.
return buildInstr(TargetOpcode::G_CONCAT_VECTORS, Res, TmpVec);
}
-MachineInstrBuilder MachineIRBuilder::buildInsert(unsigned Res, unsigned Src,
- unsigned Op, unsigned Index) {
+MachineInstrBuilder MachineIRBuilder::buildInsert(Register Res, Register Src,
+ Register Op, unsigned Index) {
assert(Index + getMRI()->getType(Op).getSizeInBits() <=
getMRI()->getType(Res).getSizeInBits() &&
"insertion past the end of a register");
}
MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
- ArrayRef<unsigned> ResultRegs,
+ ArrayRef<Register> ResultRegs,
bool HasSideEffects) {
auto MIB =
buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
// If @MI is a DBG_VALUE with debug value described by a defined
// register, returns the number of this register. In the other case, returns 0.
-static unsigned isDbgValueDescribedByReg(const MachineInstr &MI) {
+static Register isDbgValueDescribedByReg(const MachineInstr &MI) {
assert(MI.isDebugValue() && "expected a DBG_VALUE");
assert(MI.getNumOperands() == 4 && "malformed DBG_VALUE");
// If location of variable is described using a register (directly
// or indirectly), this register is always a first operand.
- return MI.getOperand(0).isReg() ? MI.getOperand(0).getReg() : 0;
+ return MI.getOperand(0).isReg() ? MI.getOperand(0).getReg() : Register();
}
namespace {
switch (MO.getType()) {
case MachineOperand::MO_Register:
// Register operands don't have target flags.
- return hash_combine(MO.getType(), MO.getReg(), MO.getSubReg(), MO.isDef());
+ return hash_combine(MO.getType(), (unsigned)MO.getReg(), MO.getSubReg(), MO.isDef());
case MachineOperand::MO_Immediate:
return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getImm());
case MachineOperand::MO_CImmediate:
/// createVirtualRegister - Create and return a new virtual register in the
/// function with the specified register class.
///
-unsigned
+Register
MachineRegisterInfo::createVirtualRegister(const TargetRegisterClass *RegClass,
StringRef Name) {
assert(RegClass && "Cannot create register without RegClass!");
return Reg;
}
-unsigned MachineRegisterInfo::cloneVirtualRegister(unsigned VReg,
+Register MachineRegisterInfo::cloneVirtualRegister(Register VReg,
StringRef Name) {
unsigned Reg = createIncompleteVirtualRegister(Name);
VRegInfo[Reg].first = VRegInfo[VReg].first;
VRegToType[VReg] = Ty;
}
-unsigned
+Register
MachineRegisterInfo::createGenericVirtualRegister(LLT Ty, StringRef Name) {
// New virtual register number.
unsigned Reg = createIncompleteVirtualRegister(Name);
if (!Instr.isFullCopy())
continue;
// Look for the other end of the copy.
- unsigned OtherReg = Instr.getOperand(0).getReg();
+ Register OtherReg = Instr.getOperand(0).getReg();
if (OtherReg == Reg) {
OtherReg = Instr.getOperand(1).getReg();
if (OtherReg == Reg)
continue;
}
// Get the current assignment.
- unsigned OtherPhysReg = TargetRegisterInfo::isPhysicalRegister(OtherReg)
+ Register OtherPhysReg = TargetRegisterInfo::isPhysicalRegister(OtherReg)
? OtherReg
: VRM->getPhys(OtherReg);
// Push the collected information.
for (; NumRegs; --NumRegs, ++I) {
assert(I != RC->end() && "Ran out of registers to allocate!");
- auto R = (AssignedReg) ? *I : RegInfo.createVirtualRegister(RC);
+ Register R = AssignedReg ? Register(*I) : RegInfo.createVirtualRegister(RC);
Regs.push_back(R);
}
for (unsigned i = 0, e = FuncInfo->ArgDbgValues.size(); i != e; ++i) {
MachineInstr *MI = FuncInfo->ArgDbgValues[e-i-1];
bool hasFI = MI->getOperand(0).isFI();
- unsigned Reg =
+ Register Reg =
hasFI ? TRI.getFrameRegister(*MF) : MI->getOperand(0).getReg();
if (TargetRegisterInfo::isPhysicalRegister(Reg))
EntryMBB->insert(EntryMBB->begin(), MI);
}
void SwiftErrorValueTracking::setCurrentVReg(const MachineBasicBlock *MBB,
- const Value *Val, unsigned VReg) {
+ const Value *Val, Register VReg) {
VRegDefMap[std::make_pair(MBB, Val)] = VReg;
}
auto UUseIt = VRegUpwardsUse.find(Key);
auto VRegDefIt = VRegDefMap.find(Key);
bool UpwardsUse = UUseIt != VRegUpwardsUse.end();
- unsigned UUseVReg = UpwardsUse ? UUseIt->second : 0;
+ Register UUseVReg = UpwardsUse ? UUseIt->second : Register();
bool DownwardDef = VRegDefIt != VRegDefMap.end();
assert(!(UpwardsUse && !DownwardDef) &&
"We can't have an upwards use but no downwards def");
// destination virtual register number otherwise we generate a new one.
auto &DL = MF->getDataLayout();
auto const *RC = TLI->getRegClassFor(TLI->getPointerTy(DL));
- unsigned PHIVReg =
+ Register PHIVReg =
UpwardsUse ? UUseVReg : MF->getRegInfo().createVirtualRegister(RC);
MachineInstrBuilder PHI =
BuildMI(*MBB, MBB->getFirstNonPHI(), DLoc,
assert(MI.getOperand(Idx1).isReg() && MI.getOperand(Idx2).isReg() &&
"This only knows how to commute register operands so far");
- unsigned Reg0 = HasDef ? MI.getOperand(0).getReg() : 0;
- unsigned Reg1 = MI.getOperand(Idx1).getReg();
- unsigned Reg2 = MI.getOperand(Idx2).getReg();
+ Register Reg0 = HasDef ? MI.getOperand(0).getReg() : Register();
+ Register Reg1 = MI.getOperand(Idx1).getReg();
+ Register Reg2 = MI.getOperand(Idx2).getReg();
unsigned SubReg0 = HasDef ? MI.getOperand(0).getSubReg() : 0;
unsigned SubReg1 = MI.getOperand(Idx1).getSubReg();
unsigned SubReg2 = MI.getOperand(Idx2).getSubReg();
bool AArch64CallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
const Value *Val,
- ArrayRef<unsigned> VRegs,
- unsigned SwiftErrorVReg) const {
+ ArrayRef<Register> VRegs,
+ Register SwiftErrorVReg) const {
auto MIB = MIRBuilder.buildInstrNoInsert(AArch64::RET_ReallyLR);
assert(((Val && !VRegs.empty()) || (!Val && VRegs.empty())) &&
"Return value without a vreg");
bool AArch64CallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
const Function &F,
- ArrayRef<unsigned> VRegs) const {
+ ArrayRef<Register> VRegs) const {
MachineFunction &MF = MIRBuilder.getMF();
MachineBasicBlock &MBB = MIRBuilder.getMBB();
MachineRegisterInfo &MRI = MF.getRegInfo();
const MachineOperand &Callee,
const ArgInfo &OrigRet,
ArrayRef<ArgInfo> OrigArgs,
- unsigned SwiftErrorVReg) const {
+ Register SwiftErrorVReg) const {
MachineFunction &MF = MIRBuilder.getMF();
const Function &F = MF.getFunction();
MachineRegisterInfo &MRI = MF.getRegInfo();
SplitArgs.clear();
SmallVector<uint64_t, 8> RegOffsets;
- SmallVector<unsigned, 8> SplitRegs;
+ SmallVector<Register, 8> SplitRegs;
splitToValueTypes(OrigRet, SplitArgs, DL, MRI, F.getCallingConv(),
[&](unsigned Reg, uint64_t Offset) {
RegOffsets.push_back(Offset);
AArch64CallLowering(const AArch64TargetLowering &TLI);
bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val,
- ArrayRef<unsigned> VRegs,
- unsigned SwiftErrorVReg) const override;
+ ArrayRef<Register> VRegs,
+ Register SwiftErrorVReg) const override;
bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F,
- ArrayRef<unsigned> VRegs) const override;
+ ArrayRef<Register> VRegs) const override;
bool lowerCall(MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv,
const MachineOperand &Callee, const ArgInfo &OrigRet,
ArrayRef<ArgInfo> OrigArgs,
- unsigned SwiftErrorVReg) const override;
+ Register SwiftErrorVReg) const override;
bool lowerCall(MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv,
const MachineOperand &Callee, const ArgInfo &OrigRet,
struct LoadInfo {
LoadInfo() = default;
- unsigned DestReg = 0;
- unsigned BaseReg = 0;
+ Register DestReg;
+ Register BaseReg;
int BaseRegIdx = -1;
const MachineOperand *OffsetOpnd = nullptr;
bool IsPrePost = false;
return None;
LoadInfo LI;
- LI.DestReg = DestRegIdx == -1 ? 0 : MI.getOperand(DestRegIdx).getReg();
+ LI.DestReg = DestRegIdx == -1 ? Register() : MI.getOperand(DestRegIdx).getReg();
LI.BaseReg = BaseReg;
LI.BaseRegIdx = BaseRegIdx;
LI.OffsetOpnd = OffsetIdx == -1 ? nullptr : &MI.getOperand(OffsetIdx);
MovZ->addOperand(MF, MachineOperand::CreateImm(0));
constrainSelectedInstRegOperands(*MovZ, TII, TRI, RBI);
- auto BuildMovK = [&](unsigned SrcReg, unsigned char Flags, unsigned Offset,
- unsigned ForceDstReg) {
- unsigned DstReg = ForceDstReg
+ auto BuildMovK = [&](Register SrcReg, unsigned char Flags, unsigned Offset,
+ Register ForceDstReg) {
+ Register DstReg = ForceDstReg
? ForceDstReg
: MRI.createVirtualRegister(&AArch64::GPR64RegClass);
auto MovI = MIB.buildInstr(AArch64::MOVKXi).addDef(DstReg).addUse(SrcReg);
bool IsCopy = MI->isCopy();
bool IsMoveImm = MI->isMoveImmediate();
if (IsCopy || IsMoveImm) {
- MCPhysReg DefReg = MI->getOperand(0).getReg();
- MCPhysReg SrcReg = IsCopy ? MI->getOperand(1).getReg() : 0;
+ Register DefReg = MI->getOperand(0).getReg();
+ Register SrcReg = IsCopy ? MI->getOperand(1).getReg() : Register();
int64_t SrcImm = IsMoveImm ? MI->getOperand(1).getImm() : 0;
if (!MRI->isReserved(DefReg) &&
((IsCopy && (SrcReg == AArch64::XZR || SrcReg == AArch64::WZR)) ||
return false;
}
-unsigned
+Register
AArch64RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
const AArch64FrameLowering *TFI = getFrameLowering(MF);
return TFI->hasFP(MF) ? AArch64::FP : AArch64::SP;
unsigned getBaseRegister() const;
// Debug information queries.
- unsigned getFrameRegister(const MachineFunction &MF) const override;
+ Register getFrameRegister(const MachineFunction &MF) const override;
unsigned getRegPressureLimit(const TargetRegisterClass *RC,
MachineFunction &MF) const override;
bool AMDGPUCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
const Value *Val,
- ArrayRef<unsigned> VRegs) const {
+ ArrayRef<Register> VRegs) const {
MachineFunction &MF = MIRBuilder.getMF();
MachineRegisterInfo &MRI = MF.getRegInfo();
return true;
}
- unsigned VReg = VRegs[0];
+ Register VReg = VRegs[0];
const Function &F = MF.getFunction();
auto &DL = F.getParent()->getDataLayout();
void AMDGPUCallLowering::lowerParameter(MachineIRBuilder &MIRBuilder,
Type *ParamTy, uint64_t Offset,
unsigned Align,
- unsigned DstReg) const {
+ Register DstReg) const {
MachineFunction &MF = MIRBuilder.getMF();
const Function &F = MF.getFunction();
const DataLayout &DL = F.getParent()->getDataLayout();
PointerType *PtrTy = PointerType::get(ParamTy, AMDGPUAS::CONSTANT_ADDRESS);
MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
unsigned TypeSize = DL.getTypeStoreSize(ParamTy);
- unsigned PtrReg = lowerParameterPtr(MIRBuilder, ParamTy, Offset);
+ Register PtrReg = lowerParameterPtr(MIRBuilder, ParamTy, Offset);
MachineMemOperand *MMO =
MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOLoad |
bool AMDGPUCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
const Function &F,
- ArrayRef<unsigned> VRegs) const {
+ ArrayRef<Register> VRegs) const {
// AMDGPU_GS and AMDGP_HS are not supported yet.
if (F.getCallingConv() == CallingConv::AMDGPU_GS ||
F.getCallingConv() == CallingConv::AMDGPU_HS)
void lowerParameter(MachineIRBuilder &MIRBuilder, Type *ParamTy,
uint64_t Offset, unsigned Align,
- unsigned DstReg) const;
+ Register DstReg) const;
public:
AMDGPUCallLowering(const AMDGPUTargetLowering &TLI);
bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val,
- ArrayRef<unsigned> VRegs) const override;
+ ArrayRef<Register> VRegs) const override;
bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F,
- ArrayRef<unsigned> VRegs) const override;
+ ArrayRef<Register> VRegs) const override;
static CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg);
static CCAssignFn *CCAssignFnForReturn(CallingConv::ID CC, bool IsVarArg);
};
4,
MinAlign(64, StructOffset));
- unsigned LoadResult = MRI.createGenericVirtualRegister(S32);
- unsigned LoadAddr = AMDGPU::NoRegister;
+ Register LoadResult = MRI.createGenericVirtualRegister(S32);
+ Register LoadAddr;
MIRBuilder.materializeGEP(LoadAddr, QueuePtr, LLT::scalar(64), StructOffset);
MIRBuilder.buildLoad(LoadResult, LoadAddr, *MMO);
MIRBuilder.setInstr(MI);
- unsigned Dst = MI.getOperand(0).getReg();
- unsigned Src = MI.getOperand(1).getReg();
+ Register Dst = MI.getOperand(0).getReg();
+ Register Src = MI.getOperand(1).getReg();
LLT DstTy = MRI.getType(Dst);
LLT SrcTy = MRI.getType(Src);
void AMDGPURegisterBankInfo::split64BitValueForMapping(
MachineIRBuilder &B,
- SmallVector<unsigned, 2> &Regs,
+ SmallVector<Register, 2> &Regs,
LLT HalfTy,
unsigned Reg) const {
assert(HalfTy.getSizeInBits() == 32);
}
/// Replace the current type each register in \p Regs has with \p NewTy
-static void setRegsToType(MachineRegisterInfo &MRI, ArrayRef<unsigned> Regs,
+static void setRegsToType(MachineRegisterInfo &MRI, ArrayRef<Register> Regs,
LLT NewTy) {
for (unsigned Reg : Regs) {
assert(MRI.getType(Reg).getSizeInBits() == NewTy.getSizeInBits());
// Use a set to avoid extra readfirstlanes in the case where multiple operands
// are the same register.
- SmallSet<unsigned, 4> SGPROperandRegs;
+ SmallSet<Register, 4> SGPROperandRegs;
for (unsigned Op : OpIndices) {
assert(MI.getOperand(Op).isUse());
unsigned Reg = MI.getOperand(Op).getReg();
return;
MachineIRBuilder B(MI);
- SmallVector<unsigned, 4> ResultRegs;
- SmallVector<unsigned, 4> InitResultRegs;
- SmallVector<unsigned, 4> PhiRegs;
+ SmallVector<Register, 4> ResultRegs;
+ SmallVector<Register, 4> InitResultRegs;
+ SmallVector<Register, 4> PhiRegs;
for (MachineOperand &Def : MI.defs()) {
LLT ResTy = MRI.getType(Def.getReg());
const RegisterBank *DefBank = getRegBank(Def.getReg(), MRI, *TRI);
}
} else {
LLT S32 = LLT::scalar(32);
- SmallVector<unsigned, 8> ReadlanePieces;
+ SmallVector<Register, 8> ReadlanePieces;
// The compares can be done as 64-bit, but the extract needs to be done
// in 32-bit pieces.
LLT HalfTy = getHalfSizedType(DstTy);
- SmallVector<unsigned, 2> DefRegs(OpdMapper.getVRegs(0));
- SmallVector<unsigned, 1> Src0Regs(OpdMapper.getVRegs(1));
- SmallVector<unsigned, 2> Src1Regs(OpdMapper.getVRegs(2));
- SmallVector<unsigned, 2> Src2Regs(OpdMapper.getVRegs(3));
+ SmallVector<Register, 2> DefRegs(OpdMapper.getVRegs(0));
+ SmallVector<Register, 1> Src0Regs(OpdMapper.getVRegs(1));
+ SmallVector<Register, 2> Src1Regs(OpdMapper.getVRegs(2));
+ SmallVector<Register, 2> Src2Regs(OpdMapper.getVRegs(3));
// All inputs are SGPRs, nothing special to do.
if (DefRegs.empty()) {
break;
LLT HalfTy = getHalfSizedType(DstTy);
- SmallVector<unsigned, 2> DefRegs(OpdMapper.getVRegs(0));
- SmallVector<unsigned, 2> Src0Regs(OpdMapper.getVRegs(1));
- SmallVector<unsigned, 2> Src1Regs(OpdMapper.getVRegs(2));
+ SmallVector<Register, 2> DefRegs(OpdMapper.getVRegs(0));
+ SmallVector<Register, 2> Src0Regs(OpdMapper.getVRegs(1));
+ SmallVector<Register, 2> Src1Regs(OpdMapper.getVRegs(2));
// All inputs are SGPRs, nothing special to do.
if (DefRegs.empty()) {
#ifndef LLVM_LIB_TARGET_AMDGPU_AMDGPUREGISTERBANKINFO_H
#define LLVM_LIB_TARGET_AMDGPU_AMDGPUREGISTERBANKINFO_H
+#include "llvm/CodeGen/Register.h"
#include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h"
#define GET_REGBANK_DECLARATIONS
/// Split 64-bit value \p Reg into two 32-bit halves and populate them into \p
/// Regs. This appropriately sets the regbank of the new registers.
void split64BitValueForMapping(MachineIRBuilder &B,
- SmallVector<unsigned, 2> &Regs,
+ SmallVector<Register, 2> &Regs,
LLT HalfTy,
unsigned Reg) const;
}
}
-unsigned SIRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
+Register SIRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
const SIFrameLowering *TFI =
MF.getSubtarget<GCNSubtarget>().getFrameLowering();
const SIMachineFunctionInfo *FuncInfo = MF.getInfo<SIMachineFunctionInfo>();
// Does MII and MIJ share the same pred_sel ?
int OpI = TII->getOperandIdx(MII->getOpcode(), R600::OpName::pred_sel),
OpJ = TII->getOperandIdx(MIJ->getOpcode(), R600::OpName::pred_sel);
- unsigned PredI = (OpI > -1)?MII->getOperand(OpI).getReg():0,
- PredJ = (OpJ > -1)?MIJ->getOperand(OpJ).getReg():0;
+ Register PredI = (OpI > -1)?MII->getOperand(OpI).getReg() : Register(),
+ PredJ = (OpJ > -1)?MIJ->getOperand(OpJ).getReg() : Register();
if (PredI != PredJ)
return false;
if (SUJ->isSucc(SUI)) {
return &CalleeSavedReg;
}
-unsigned R600RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
+Register R600RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
return R600::NoRegister;
}
BitVector getReservedRegs(const MachineFunction &MF) const override;
const MCPhysReg *getCalleeSavedRegs(const MachineFunction *MF) const override;
- unsigned getFrameRegister(const MachineFunction &MF) const override;
+ Register getFrameRegister(const MachineFunction &MF) const override;
/// get the HW encoding for a register's channel.
unsigned getHWRegChan(unsigned reg) const;
assert(SaveExec.getSubReg() == AMDGPU::NoSubRegister &&
Cond.getSubReg() == AMDGPU::NoSubRegister);
- unsigned SaveExecReg = SaveExec.getReg();
+ Register SaveExecReg = SaveExec.getReg();
MachineOperand &ImpDefSCC = MI.getOperand(4);
assert(ImpDefSCC.getReg() == AMDGPU::SCC && ImpDefSCC.isDef());
// Add an implicit def of exec to discourage scheduling VALU after this which
// will interfere with trying to form s_and_saveexec_b64 later.
- unsigned CopyReg = SimpleIf ? SaveExecReg
+ Register CopyReg = SimpleIf ? SaveExecReg
: MRI->createVirtualRegister(BoolRC);
MachineInstr *CopyExec =
BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), CopyReg)
MachineBasicBlock &MBB = *MI.getParent();
const DebugLoc &DL = MI.getDebugLoc();
- unsigned DstReg = MI.getOperand(0).getReg();
+ Register DstReg = MI.getOperand(0).getReg();
assert(MI.getOperand(0).getSubReg() == AMDGPU::NoSubRegister);
bool ExecModified = MI.getOperand(3).getImm() != 0;
// We are running before TwoAddressInstructions, and si_else's operands are
// tied. In order to correctly tie the registers, split this into a copy of
// the src like it does.
- unsigned CopyReg = MRI->createVirtualRegister(BoolRC);
+ Register CopyReg = MRI->createVirtualRegister(BoolRC);
MachineInstr *CopyExec =
BuildMI(MBB, Start, DL, TII->get(AMDGPU::COPY), CopyReg)
.add(MI.getOperand(1)); // Saved EXEC
// This must be inserted before phis and any spill code inserted before the
// else.
- unsigned SaveReg = ExecModified ?
+ Register SaveReg = ExecModified ?
MRI->createVirtualRegister(BoolRC) : DstReg;
MachineInstr *OrSaveExec =
BuildMI(MBB, Start, DL, TII->get(OrSaveExecOpc), SaveReg)
if (SpillToSMEM && OnlyToVGPR)
return false;
- unsigned FrameReg = getFrameRegister(*MF);
+ Register FrameReg = getFrameRegister(*MF);
assert(SpillToVGPR || (SuperReg != MFI->getStackPtrOffsetReg() &&
SuperReg != MFI->getFrameOffsetReg() &&
unsigned EltSize = 4;
unsigned ScalarLoadOp;
- unsigned FrameReg = getFrameRegister(*MF);
+ Register FrameReg = getFrameRegister(*MF);
const TargetRegisterClass *RC = getPhysRegClass(SuperReg);
if (SpillToSMEM && isSGPRClass(RC)) {
MachineOperand &FIOp = MI->getOperand(FIOperandNum);
int Index = MI->getOperand(FIOperandNum).getIndex();
- unsigned FrameReg = getFrameRegister(*MF);
+ Register FrameReg = getFrameRegister(*MF);
switch (MI->getOpcode()) {
// SGPR register spill
= MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
bool IsCopy = MI->getOpcode() == AMDGPU::V_MOV_B32_e32;
- unsigned ResultReg = IsCopy ?
+ Register ResultReg = IsCopy ?
MI->getOperand(0).getReg() :
MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
return 100;
}
- unsigned getFrameRegister(const MachineFunction &MF) const override;
+ Register getFrameRegister(const MachineFunction &MF) const override;
bool canRealignStack(const MachineFunction &MF) const override;
bool requiresRegisterScavenging(const MachineFunction &Fn) const override;
bool IsStore = Ldst->mayStore();
bool IsLoad = Ldst->mayLoad();
- unsigned ValReg = IsLoad ? Ldst->getOperand(0).getReg() : 0;
+ Register ValReg = IsLoad ? Ldst->getOperand(0).getReg() : Register();
for (; MI != ME && MI != End; ++MI) {
if (MI->isDebugValue())
continue;
// Special handling of DBG_VALUE instructions.
if (MI.isDebugValue()) {
- unsigned FrameReg = getFrameRegister(MF);
+ Register FrameReg = getFrameRegister(MF);
MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, false /*isDef*/);
MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
return;
ObjSize, RS, SPAdj);
}
-unsigned ARCRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
+Register ARCRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
const ARCFrameLowering *TFI = getFrameLowering(MF);
return TFI->hasFP(MF) ? ARC::FP : ARC::SP;
}
CallingConv::ID CC) const override;
// Debug information queries.
- unsigned getFrameRegister(const MachineFunction &MF) const override;
+ Register getFrameRegister(const MachineFunction &MF) const override;
//! Return whether to emit frame moves
static bool needsFrameMoves(const MachineFunction &MF);
|| needsStackRealignment(MF);
}
-unsigned
+Register
ARMBaseRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
const ARMFrameLowering *TFI = getFrameLowering(MF);
int PIdx = MI.findFirstPredOperandIdx();
ARMCC::CondCodes Pred = (PIdx == -1)
? ARMCC::AL : (ARMCC::CondCodes)MI.getOperand(PIdx).getImm();
- unsigned PredReg = (PIdx == -1) ? 0 : MI.getOperand(PIdx+1).getReg();
+ Register PredReg = (PIdx == -1) ? Register() : MI.getOperand(PIdx+1).getReg();
if (Offset == 0)
// Must be addrmode4/6.
MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, false, false, false);
bool cannotEliminateFrame(const MachineFunction &MF) const;
// Debug information queries.
- unsigned getFrameRegister(const MachineFunction &MF) const override;
+ Register getFrameRegister(const MachineFunction &MF) const override;
unsigned getBaseRegister() const { return BasePtr; }
bool isLowRegister(unsigned Reg) const;
assert(VA.isRegLoc() && "Value should be in reg");
assert(NextVA.isRegLoc() && "Value should be in reg");
- unsigned NewRegs[] = {MRI.createGenericVirtualRegister(LLT::scalar(32)),
+ Register NewRegs[] = {MRI.createGenericVirtualRegister(LLT::scalar(32)),
MRI.createGenericVirtualRegister(LLT::scalar(32))};
MIRBuilder.buildUnmerge(NewRegs, Arg.Reg);
/// Lower the return value for the already existing \p Ret. This assumes that
/// \p MIRBuilder's insertion point is correct.
bool ARMCallLowering::lowerReturnVal(MachineIRBuilder &MIRBuilder,
- const Value *Val, ArrayRef<unsigned> VRegs,
+ const Value *Val, ArrayRef<Register> VRegs,
MachineInstrBuilder &Ret) const {
if (!Val)
// Nothing to do here.
ArgInfo CurArgInfo(VRegs[i], SplitEVTs[i].getTypeForEVT(Ctx));
setArgFlags(CurArgInfo, AttributeList::ReturnIndex, DL, F);
- SmallVector<unsigned, 4> Regs;
+ SmallVector<Register, 4> Regs;
splitToValueTypes(CurArgInfo, SplitVTs, MF,
- [&](unsigned Reg) { Regs.push_back(Reg); });
+ [&](Register Reg) { Regs.push_back(Reg); });
if (Regs.size() > 1)
MIRBuilder.buildUnmerge(Regs, VRegs[i]);
}
bool ARMCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
const Value *Val,
- ArrayRef<unsigned> VRegs) const {
+ ArrayRef<Register> VRegs) const {
assert(!Val == VRegs.empty() && "Return value without a vreg");
auto const &ST = MIRBuilder.getMF().getSubtarget<ARMSubtarget>();
assert(VA.isRegLoc() && "Value should be in reg");
assert(NextVA.isRegLoc() && "Value should be in reg");
- unsigned NewRegs[] = {MRI.createGenericVirtualRegister(LLT::scalar(32)),
+ Register NewRegs[] = {MRI.createGenericVirtualRegister(LLT::scalar(32)),
MRI.createGenericVirtualRegister(LLT::scalar(32))};
assignValueToReg(NewRegs[0], VA.getLocReg(), VA);
bool ARMCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
const Function &F,
- ArrayRef<unsigned> VRegs) const {
+ ArrayRef<Register> VRegs) const {
auto &TLI = *getTLI<ARMTargetLowering>();
auto Subtarget = TLI.getSubtarget();
AssignFn);
SmallVector<ArgInfo, 8> ArgInfos;
- SmallVector<unsigned, 4> SplitRegs;
+ SmallVector<Register, 4> SplitRegs;
unsigned Idx = 0;
for (auto &Arg : F.args()) {
ArgInfo AInfo(VRegs[Idx], Arg.getType());
SplitRegs.clear();
splitToValueTypes(AInfo, ArgInfos, MF,
- [&](unsigned Reg) { SplitRegs.push_back(Reg); });
+ [&](Register Reg) { SplitRegs.push_back(Reg); });
if (!SplitRegs.empty())
MIRBuilder.buildMerge(VRegs[Idx], SplitRegs);
if (Arg.Flags.isByVal())
return false;
- SmallVector<unsigned, 8> Regs;
+ SmallVector<Register, 8> Regs;
splitToValueTypes(Arg, ArgInfos, MF,
[&](unsigned Reg) { Regs.push_back(Reg); });
return false;
ArgInfos.clear();
- SmallVector<unsigned, 8> SplitRegs;
+ SmallVector<Register, 8> SplitRegs;
splitToValueTypes(OrigRet, ArgInfos, MF,
- [&](unsigned Reg) { SplitRegs.push_back(Reg); });
+ [&](Register Reg) { SplitRegs.push_back(Reg); });
auto RetAssignFn = TLI.CCAssignFnForReturn(CallConv, IsVarArg);
CallReturnHandler RetHandler(MIRBuilder, MRI, MIB, RetAssignFn);
ARMCallLowering(const ARMTargetLowering &TLI);
bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val,
- ArrayRef<unsigned> VRegs) const override;
+ ArrayRef<Register> VRegs) const override;
bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F,
- ArrayRef<unsigned> VRegs) const override;
+ ArrayRef<Register> VRegs) const override;
bool lowerCall(MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv,
const MachineOperand &Callee, const ArgInfo &OrigRet,
private:
bool lowerReturnVal(MachineIRBuilder &MIRBuilder, const Value *Val,
- ArrayRef<unsigned> VRegs,
+ ArrayRef<Register> VRegs,
MachineInstrBuilder &Ret) const;
using SplitArgTy = std::function<void(unsigned Reg)>;
}
}
-unsigned BPFRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
+Register BPFRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
return BPF::R10;
}
unsigned FIOperandNum,
RegScavenger *RS = nullptr) const override;
- unsigned getFrameRegister(const MachineFunction &MF) const override;
+ Register getFrameRegister(const MachineFunction &MF) const override;
};
}
MI.isMetaInstruction();
}
-static unsigned UseReg(const MachineOperand& MO) {
- return MO.isReg() ? MO.getReg() : 0;
+static Register UseReg(const MachineOperand& MO) {
+ return MO.isReg() ? MO.getReg() : Register();
}
/// isSafeToMoveTogether - Returns true if it is safe to move I1 next to I2 such
std::advance(It2, MaxX);
MachineInstr &Def1 = *It1, &Def2 = *It2;
MachineOperand *Src1 = &Def1.getOperand(2), *Src2 = &Def2.getOperand(2);
- unsigned SR1 = Src1->isReg() ? Src1->getReg() : 0;
- unsigned SR2 = Src2->isReg() ? Src2->getReg() : 0;
+ Register SR1 = Src1->isReg() ? Src1->getReg() : Register();
+ Register SR2 = Src2->isReg() ? Src2->getReg() : Register();
bool Failure = false, CanUp = true, CanDown = true;
for (unsigned X = MinX+1; X < MaxX; X++) {
const DefUseInfo &DU = DUM.lookup(X);
RegisterSubReg(unsigned r = 0, unsigned s = 0) : R(r), S(s) {}
RegisterSubReg(const MachineOperand &MO) : R(MO.getReg()), S(MO.getSubReg()) {}
+ RegisterSubReg(const Register &Reg) : R(Reg), S(0) {}
bool operator== (const RegisterSubReg &Reg) const {
return R == Reg.R && S == Reg.S;
}
-unsigned HexagonRegisterInfo::getFrameRegister(const MachineFunction
+Register HexagonRegisterInfo::getFrameRegister(const MachineFunction
&MF) const {
const HexagonFrameLowering *TFI = getFrameLowering(MF);
if (TFI->hasFP(MF))
// Debug information queries.
unsigned getRARegister() const;
- unsigned getFrameRegister(const MachineFunction &MF) const override;
+ Register getFrameRegister(const MachineFunction &MF) const override;
unsigned getFrameRegister() const;
unsigned getStackRegister() const;
unsigned LanaiRegisterInfo::getRARegister() const { return Lanai::RCA; }
-unsigned
+Register
LanaiRegisterInfo::getFrameRegister(const MachineFunction & /*MF*/) const {
return Lanai::FP;
}
-unsigned LanaiRegisterInfo::getBaseRegister() const { return Lanai::R14; }
+Register LanaiRegisterInfo::getBaseRegister() const { return Lanai::R14; }
const uint32_t *
LanaiRegisterInfo::getCallPreservedMask(const MachineFunction & /*MF*/,
// Debug information queries.
unsigned getRARegister() const;
- unsigned getFrameRegister(const MachineFunction &MF) const override;
- unsigned getBaseRegister() const;
+ Register getFrameRegister(const MachineFunction &MF) const override;
+ Register getBaseRegister() const;
bool hasBasePointer(const MachineFunction &MF) const;
int getDwarfRegNum(unsigned RegNum, bool IsEH) const;
MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
}
-unsigned MSP430RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
+Register MSP430RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
const MSP430FrameLowering *TFI = getFrameLowering(MF);
return TFI->hasFP(MF) ? MSP430::FP : MSP430::SP;
}
RegScavenger *RS = nullptr) const override;
// Debug information queries.
- unsigned getFrameRegister(const MachineFunction &MF) const override;
+ Register getFrameRegister(const MachineFunction &MF) const override;
};
} // end namespace llvm
MipsCallLowering::MipsCallLowering(const MipsTargetLowering &TLI)
: CallLowering(&TLI) {}
-bool MipsCallLowering::MipsHandler::assign(unsigned VReg, const CCValAssign &VA,
+bool MipsCallLowering::MipsHandler::assign(Register VReg, const CCValAssign &VA,
const EVT &VT) {
if (VA.isRegLoc()) {
assignValueToReg(VReg, VA, VT);
return true;
}
-bool MipsCallLowering::MipsHandler::assignVRegs(ArrayRef<unsigned> VRegs,
+bool MipsCallLowering::MipsHandler::assignVRegs(ArrayRef<Register> VRegs,
ArrayRef<CCValAssign> ArgLocs,
unsigned ArgLocsStartIndex,
const EVT &VT) {
}
void MipsCallLowering::MipsHandler::setLeastSignificantFirst(
- SmallVectorImpl<unsigned> &VRegs) {
+ SmallVectorImpl<Register> &VRegs) {
if (!MIRBuilder.getMF().getDataLayout().isLittleEndian())
std::reverse(VRegs.begin(), VRegs.end());
}
bool MipsCallLowering::MipsHandler::handle(
ArrayRef<CCValAssign> ArgLocs, ArrayRef<CallLowering::ArgInfo> Args) {
- SmallVector<unsigned, 4> VRegs;
+ SmallVector<Register, 4> VRegs;
unsigned SplitLength;
const Function &F = MIRBuilder.getMF().getFunction();
const DataLayout &DL = F.getParent()->getDataLayout();
: MipsHandler(MIRBuilder, MRI) {}
private:
- void assignValueToReg(unsigned ValVReg, const CCValAssign &VA,
+ void assignValueToReg(Register ValVReg, const CCValAssign &VA,
const EVT &VT) override;
unsigned getStackAddress(const CCValAssign &VA,
MachineMemOperand *&MMO) override;
- void assignValueToAddress(unsigned ValVReg, const CCValAssign &VA) override;
+ void assignValueToAddress(Register ValVReg, const CCValAssign &VA) override;
- bool handleSplit(SmallVectorImpl<unsigned> &VRegs,
+ bool handleSplit(SmallVectorImpl<Register> &VRegs,
ArrayRef<CCValAssign> ArgLocs, unsigned ArgLocsStartIndex,
- unsigned ArgsReg, const EVT &VT) override;
+ Register ArgsReg, const EVT &VT) override;
virtual void markPhysRegUsed(unsigned PhysReg) {
MIRBuilder.getMBB().addLiveIn(PhysReg);
} // end anonymous namespace
-void IncomingValueHandler::assignValueToReg(unsigned ValVReg,
+void IncomingValueHandler::assignValueToReg(Register ValVReg,
const CCValAssign &VA,
const EVT &VT) {
const MipsSubtarget &STI =
return AddrReg;
}
-void IncomingValueHandler::assignValueToAddress(unsigned ValVReg,
+void IncomingValueHandler::assignValueToAddress(Register ValVReg,
const CCValAssign &VA) {
if (VA.getLocInfo() == CCValAssign::SExt ||
VA.getLocInfo() == CCValAssign::ZExt ||
VA.getLocInfo() == CCValAssign::AExt) {
- unsigned LoadReg = MRI.createGenericVirtualRegister(LLT::scalar(32));
+ Register LoadReg = MRI.createGenericVirtualRegister(LLT::scalar(32));
buildLoad(LoadReg, VA);
MIRBuilder.buildTrunc(ValVReg, LoadReg);
} else
buildLoad(ValVReg, VA);
}
-bool IncomingValueHandler::handleSplit(SmallVectorImpl<unsigned> &VRegs,
+bool IncomingValueHandler::handleSplit(SmallVectorImpl<Register> &VRegs,
ArrayRef<CCValAssign> ArgLocs,
unsigned ArgLocsStartIndex,
- unsigned ArgsReg, const EVT &VT) {
+ Register ArgsReg, const EVT &VT) {
if (!assignVRegs(VRegs, ArgLocs, ArgLocsStartIndex, VT))
return false;
setLeastSignificantFirst(VRegs);
: MipsHandler(MIRBuilder, MRI), MIB(MIB) {}
private:
- void assignValueToReg(unsigned ValVReg, const CCValAssign &VA,
+ void assignValueToReg(Register ValVReg, const CCValAssign &VA,
const EVT &VT) override;
unsigned getStackAddress(const CCValAssign &VA,
MachineMemOperand *&MMO) override;
- void assignValueToAddress(unsigned ValVReg, const CCValAssign &VA) override;
+ void assignValueToAddress(Register ValVReg, const CCValAssign &VA) override;
- bool handleSplit(SmallVectorImpl<unsigned> &VRegs,
+ bool handleSplit(SmallVectorImpl<Register> &VRegs,
ArrayRef<CCValAssign> ArgLocs, unsigned ArgLocsStartIndex,
- unsigned ArgsReg, const EVT &VT) override;
+ Register ArgsReg, const EVT &VT) override;
- unsigned extendRegister(unsigned ValReg, const CCValAssign &VA);
+ unsigned extendRegister(Register ValReg, const CCValAssign &VA);
MachineInstrBuilder &MIB;
};
} // end anonymous namespace
-void OutgoingValueHandler::assignValueToReg(unsigned ValVReg,
+void OutgoingValueHandler::assignValueToReg(Register ValVReg,
const CCValAssign &VA,
const EVT &VT) {
- unsigned PhysReg = VA.getLocReg();
+ Register PhysReg = VA.getLocReg();
const MipsSubtarget &STI =
static_cast<const MipsSubtarget &>(MIRBuilder.getMF().getSubtarget());
LLT p0 = LLT::pointer(0, 32);
LLT s32 = LLT::scalar(32);
- unsigned SPReg = MRI.createGenericVirtualRegister(p0);
+ Register SPReg = MRI.createGenericVirtualRegister(p0);
MIRBuilder.buildCopy(SPReg, Mips::SP);
- unsigned OffsetReg = MRI.createGenericVirtualRegister(s32);
+ Register OffsetReg = MRI.createGenericVirtualRegister(s32);
unsigned Offset = VA.getLocMemOffset();
MIRBuilder.buildConstant(OffsetReg, Offset);
- unsigned AddrReg = MRI.createGenericVirtualRegister(p0);
+ Register AddrReg = MRI.createGenericVirtualRegister(p0);
MIRBuilder.buildGEP(AddrReg, SPReg, OffsetReg);
MachinePointerInfo MPO =
return AddrReg;
}
-void OutgoingValueHandler::assignValueToAddress(unsigned ValVReg,
+void OutgoingValueHandler::assignValueToAddress(Register ValVReg,
const CCValAssign &VA) {
MachineMemOperand *MMO;
- unsigned Addr = getStackAddress(VA, MMO);
+ Register Addr = getStackAddress(VA, MMO);
unsigned ExtReg = extendRegister(ValVReg, VA);
MIRBuilder.buildStore(ExtReg, Addr, *MMO);
}
-unsigned OutgoingValueHandler::extendRegister(unsigned ValReg,
+unsigned OutgoingValueHandler::extendRegister(Register ValReg,
const CCValAssign &VA) {
LLT LocTy{VA.getLocVT()};
switch (VA.getLocInfo()) {
case CCValAssign::SExt: {
- unsigned ExtReg = MRI.createGenericVirtualRegister(LocTy);
+ Register ExtReg = MRI.createGenericVirtualRegister(LocTy);
MIRBuilder.buildSExt(ExtReg, ValReg);
return ExtReg;
}
case CCValAssign::ZExt: {
- unsigned ExtReg = MRI.createGenericVirtualRegister(LocTy);
+ Register ExtReg = MRI.createGenericVirtualRegister(LocTy);
MIRBuilder.buildZExt(ExtReg, ValReg);
return ExtReg;
}
case CCValAssign::AExt: {
- unsigned ExtReg = MRI.createGenericVirtualRegister(LocTy);
+ Register ExtReg = MRI.createGenericVirtualRegister(LocTy);
MIRBuilder.buildAnyExt(ExtReg, ValReg);
return ExtReg;
}
llvm_unreachable("unable to extend register");
}
-bool OutgoingValueHandler::handleSplit(SmallVectorImpl<unsigned> &VRegs,
+bool OutgoingValueHandler::handleSplit(SmallVectorImpl<Register> &VRegs,
ArrayRef<CCValAssign> ArgLocs,
unsigned ArgLocsStartIndex,
- unsigned ArgsReg, const EVT &VT) {
+ Register ArgsReg, const EVT &VT) {
MIRBuilder.buildUnmerge(VRegs, ArgsReg);
setLeastSignificantFirst(VRegs);
if (!assignVRegs(VRegs, ArgLocs, ArgLocsStartIndex, VT))
bool MipsCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
const Value *Val,
- ArrayRef<unsigned> VRegs) const {
+ ArrayRef<Register> VRegs) const {
MachineInstrBuilder Ret = MIRBuilder.buildInstrNoInsert(Mips::RetRA);
bool MipsCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
const Function &F,
- ArrayRef<unsigned> VRegs) const {
+ ArrayRef<Register> VRegs) const {
// Quick exit if there aren't any args.
if (F.arg_empty())
ArrayRef<CallLowering::ArgInfo> Args);
protected:
- bool assignVRegs(ArrayRef<unsigned> VRegs, ArrayRef<CCValAssign> ArgLocs,
+ bool assignVRegs(ArrayRef<Register> VRegs, ArrayRef<CCValAssign> ArgLocs,
unsigned ArgLocsStartIndex, const EVT &VT);
- void setLeastSignificantFirst(SmallVectorImpl<unsigned> &VRegs);
+ void setLeastSignificantFirst(SmallVectorImpl<Register> &VRegs);
MachineIRBuilder &MIRBuilder;
MachineRegisterInfo &MRI;
private:
- bool assign(unsigned VReg, const CCValAssign &VA, const EVT &VT);
+ bool assign(Register VReg, const CCValAssign &VA, const EVT &VT);
virtual unsigned getStackAddress(const CCValAssign &VA,
MachineMemOperand *&MMO) = 0;
- virtual void assignValueToReg(unsigned ValVReg, const CCValAssign &VA,
+ virtual void assignValueToReg(Register ValVReg, const CCValAssign &VA,
const EVT &VT) = 0;
- virtual void assignValueToAddress(unsigned ValVReg,
+ virtual void assignValueToAddress(Register ValVReg,
const CCValAssign &VA) = 0;
- virtual bool handleSplit(SmallVectorImpl<unsigned> &VRegs,
+ virtual bool handleSplit(SmallVectorImpl<Register> &VRegs,
ArrayRef<CCValAssign> ArgLocs,
- unsigned ArgLocsStartIndex, unsigned ArgsReg,
+ unsigned ArgLocsStartIndex, Register ArgsReg,
const EVT &VT) = 0;
};
MipsCallLowering(const MipsTargetLowering &TLI);
bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val,
- ArrayRef<unsigned> VRegs) const override;
+ ArrayRef<Register> VRegs) const override;
bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F,
- ArrayRef<unsigned> VRegs) const override;
+ ArrayRef<Register> VRegs) const override;
bool lowerCall(MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv,
const MachineOperand &Callee, const ArgInfo &OrigRet,
eliminateFI(MI, FIOperandNum, FrameIndex, stackSize, spOffset);
}
-unsigned MipsRegisterInfo::
+Register MipsRegisterInfo::
getFrameRegister(const MachineFunction &MF) const {
const MipsSubtarget &Subtarget = MF.getSubtarget<MipsSubtarget>();
const TargetFrameLowering *TFI = Subtarget.getFrameLowering();
bool canRealignStack(const MachineFunction &MF) const override;
/// Debug information queries.
- unsigned getFrameRegister(const MachineFunction &MF) const override;
+ Register getFrameRegister(const MachineFunction &MF) const override;
/// Return GPR register class.
virtual const TargetRegisterClass *intRegClass(unsigned Size) const = 0;
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
DebugLoc DL = MI.getDebugLoc();
- unsigned Fd = MI.getOperand(0).getReg();
- unsigned Ws = MI.getOperand(1).getReg();
+ Register Fd = MI.getOperand(0).getReg();
+ Register Ws = MI.getOperand(1).getReg();
MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
const TargetRegisterClass *GPRRC =
unsigned MTC1Opc = IsFGR64onMips64
? Mips::DMTC1
: (IsFGR64onMips32 ? Mips::MTC1_D64 : Mips::MTC1);
- unsigned COPYOpc = IsFGR64onMips64 ? Mips::COPY_S_D : Mips::COPY_S_W;
+ Register COPYOpc = IsFGR64onMips64 ? Mips::COPY_S_D : Mips::COPY_S_W;
- unsigned Wtemp = RegInfo.createVirtualRegister(&Mips::MSA128WRegClass);
- unsigned WPHI = Wtemp;
+ Register Wtemp = RegInfo.createVirtualRegister(&Mips::MSA128WRegClass);
+ Register WPHI = Wtemp;
BuildMI(*BB, MI, DL, TII->get(Mips::FEXUPR_W), Wtemp).addReg(Ws);
if (IsFGR64) {
}
// Perform the safety regclass copy mentioned above.
- unsigned Rtemp = RegInfo.createVirtualRegister(GPRRC);
- unsigned FPRPHI = IsFGR64onMips32
+ Register Rtemp = RegInfo.createVirtualRegister(GPRRC);
+ Register FPRPHI = IsFGR64onMips32
? RegInfo.createVirtualRegister(&Mips::FGR64RegClass)
: Fd;
BuildMI(*BB, MI, DL, TII->get(COPYOpc), Rtemp).addReg(WPHI).addImm(0);
BuildMI(*BB, MI, DL, TII->get(MTC1Opc), FPRPHI).addReg(Rtemp);
if (IsFGR64onMips32) {
- unsigned Rtemp2 = RegInfo.createVirtualRegister(GPRRC);
+ Register Rtemp2 = RegInfo.createVirtualRegister(GPRRC);
BuildMI(*BB, MI, DL, TII->get(Mips::COPY_S_W), Rtemp2)
.addReg(WPHI)
.addImm(1);
MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
}
-unsigned NVPTXRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
+Register NVPTXRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
return NVPTX::VRFrame;
}
unsigned FIOperandNum,
RegScavenger *RS = nullptr) const override;
- unsigned getFrameRegister(const MachineFunction &MF) const override;
+ Register getFrameRegister(const MachineFunction &MF) const override;
ManagedStringPool *getStrPool() const {
return const_cast<ManagedStringPool *>(&ManagedStrPool);
/// Returns true if we should use a direct load into vector instruction
/// (such as lxsd or lfd), instead of a load into gpr + direct move sequence.
static bool usePartialVectorLoads(SDNode *N, const PPCSubtarget& ST) {
-
+
// If there are any other uses other than scalar to vector, then we should
// keep it as a scalar load -> direct move pattern to prevent multiple
// loads.
// We cannot add R2/X2 as an operand here for PATCHPOINT, because there is
// no way to mark dependencies as implicit here.
// We will add the R2/X2 dependency in EmitInstrWithCustomInserter.
- if (!isPatchPoint)
+ if (!isPatchPoint)
Ops.push_back(DAG.getRegister(isPPC64 ? PPC::X2
: PPC::R2, PtrVT));
}
// undefined):
// < MSB1|LSB1, MSB2|LSB2, uu, uu, uu, uu, uu, uu> to
// < LSB1, LSB2, u, u, u, u, u, u, u, u, u, u, u, u, u, u>
- //
+ //
// The same operation in little-endian ordering will be:
// <uu, uu, uu, uu, uu, uu, LSB2|MSB2, LSB1|MSB1> to
// <u, u, u, u, u, u, u, u, u, u, u, u, u, u, LSB2, LSB1>
BifID = Intrinsic::ppc_altivec_vmaxsh;
else if (VT == MVT::v16i8)
BifID = Intrinsic::ppc_altivec_vmaxsb;
-
+
return BuildIntrinsicOp(BifID, X, Y, DAG, dl, VT);
}
MachineFunction *F = BB->getParent();
MachineFunction::iterator It = ++BB->getIterator();
- unsigned dest = MI.getOperand(0).getReg();
- unsigned ptrA = MI.getOperand(1).getReg();
- unsigned ptrB = MI.getOperand(2).getReg();
- unsigned incr = MI.getOperand(3).getReg();
+ Register dest = MI.getOperand(0).getReg();
+ Register ptrA = MI.getOperand(1).getReg();
+ Register ptrB = MI.getOperand(2).getReg();
+ Register incr = MI.getOperand(3).getReg();
DebugLoc dl = MI.getDebugLoc();
MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
exitMBB->transferSuccessorsAndUpdatePHIs(BB);
MachineRegisterInfo &RegInfo = F->getRegInfo();
- unsigned TmpReg = (!BinOpcode) ? incr :
+ Register TmpReg = (!BinOpcode) ? incr :
RegInfo.createVirtualRegister( AtomicSize == 8 ? &PPC::G8RCRegClass
: &PPC::GPRCRegClass);
is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
- unsigned PtrReg = RegInfo.createVirtualRegister(RC);
- unsigned Shift1Reg = RegInfo.createVirtualRegister(GPRC);
- unsigned ShiftReg =
+ Register PtrReg = RegInfo.createVirtualRegister(RC);
+ Register Shift1Reg = RegInfo.createVirtualRegister(GPRC);
+ Register ShiftReg =
isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC);
- unsigned Incr2Reg = RegInfo.createVirtualRegister(GPRC);
- unsigned MaskReg = RegInfo.createVirtualRegister(GPRC);
- unsigned Mask2Reg = RegInfo.createVirtualRegister(GPRC);
- unsigned Mask3Reg = RegInfo.createVirtualRegister(GPRC);
- unsigned Tmp2Reg = RegInfo.createVirtualRegister(GPRC);
- unsigned Tmp3Reg = RegInfo.createVirtualRegister(GPRC);
- unsigned Tmp4Reg = RegInfo.createVirtualRegister(GPRC);
- unsigned TmpDestReg = RegInfo.createVirtualRegister(GPRC);
- unsigned Ptr1Reg;
- unsigned TmpReg =
+ Register Incr2Reg = RegInfo.createVirtualRegister(GPRC);
+ Register MaskReg = RegInfo.createVirtualRegister(GPRC);
+ Register Mask2Reg = RegInfo.createVirtualRegister(GPRC);
+ Register Mask3Reg = RegInfo.createVirtualRegister(GPRC);
+ Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC);
+ Register Tmp3Reg = RegInfo.createVirtualRegister(GPRC);
+ Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC);
+ Register TmpDestReg = RegInfo.createVirtualRegister(GPRC);
+ Register Ptr1Reg;
+ Register TmpReg =
(!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(GPRC);
// thisMBB:
is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
- unsigned PtrReg = RegInfo.createVirtualRegister(RC);
- unsigned Shift1Reg = RegInfo.createVirtualRegister(GPRC);
- unsigned ShiftReg =
+ Register PtrReg = RegInfo.createVirtualRegister(RC);
+ Register Shift1Reg = RegInfo.createVirtualRegister(GPRC);
+ Register ShiftReg =
isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC);
- unsigned NewVal2Reg = RegInfo.createVirtualRegister(GPRC);
- unsigned NewVal3Reg = RegInfo.createVirtualRegister(GPRC);
- unsigned OldVal2Reg = RegInfo.createVirtualRegister(GPRC);
- unsigned OldVal3Reg = RegInfo.createVirtualRegister(GPRC);
- unsigned MaskReg = RegInfo.createVirtualRegister(GPRC);
- unsigned Mask2Reg = RegInfo.createVirtualRegister(GPRC);
- unsigned Mask3Reg = RegInfo.createVirtualRegister(GPRC);
- unsigned Tmp2Reg = RegInfo.createVirtualRegister(GPRC);
- unsigned Tmp4Reg = RegInfo.createVirtualRegister(GPRC);
- unsigned TmpDestReg = RegInfo.createVirtualRegister(GPRC);
- unsigned Ptr1Reg;
- unsigned TmpReg = RegInfo.createVirtualRegister(GPRC);
- unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
+ Register NewVal2Reg = RegInfo.createVirtualRegister(GPRC);
+ Register NewVal3Reg = RegInfo.createVirtualRegister(GPRC);
+ Register OldVal2Reg = RegInfo.createVirtualRegister(GPRC);
+ Register OldVal3Reg = RegInfo.createVirtualRegister(GPRC);
+ Register MaskReg = RegInfo.createVirtualRegister(GPRC);
+ Register Mask2Reg = RegInfo.createVirtualRegister(GPRC);
+ Register Mask3Reg = RegInfo.createVirtualRegister(GPRC);
+ Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC);
+ Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC);
+ Register TmpDestReg = RegInfo.createVirtualRegister(GPRC);
+ Register Ptr1Reg;
+ Register TmpReg = RegInfo.createVirtualRegister(GPRC);
+ Register ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
// thisMBB:
// ...
// fallthrough --> loopMBB
// Save FPSCR value.
BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg);
- // The floating point rounding mode is in the bits 62:63 of FPCSR, and has
+ // The floating point rounding mode is in the bits 62:63 of FPCSR, and has
// the following settings:
// 00 Round to nearest
// 01 Round to 0
// Copy register from F8RCRegClass::SrcReg to G8RCRegClass::DestReg
// or copy register from G8RCRegClass::SrcReg to F8RCRegClass::DestReg.
- // If the target doesn't have DirectMove, we should use stack to do the
+ // If the target doesn't have DirectMove, we should use stack to do the
// conversion, because the target doesn't have the instructions like mtvsrd
// or mfvsrd to do this conversion directly.
auto copyRegFromG8RCOrF8RC = [&] (unsigned DestReg, unsigned SrcReg) {
MachineMemOperand::MOLoad, MFI.getObjectSize(FrameIdx),
MFI.getObjectAlignment(FrameIdx));
- // Load from the stack where SrcReg is stored, and save to DestReg,
- // so we have done the RegClass conversion from RegClass::SrcReg to
+ // Load from the stack where SrcReg is stored, and save to DestReg,
+ // so we have done the RegClass conversion from RegClass::SrcReg to
// RegClass::DestReg.
BuildMI(*BB, MI, dl, TII->get(LoadOp), DestReg)
.addImm(0)
};
unsigned OldFPSCRReg = MI.getOperand(0).getReg();
-
+
// Save FPSCR value.
BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg);
// When the operand is gprc register, use two least significant bits of the
- // register and mtfsf instruction to set the bits 62:63 of FPSCR.
- //
- // copy OldFPSCRTmpReg, OldFPSCRReg
+ // register and mtfsf instruction to set the bits 62:63 of FPSCR.
+ //
+ // copy OldFPSCRTmpReg, OldFPSCRReg
// (INSERT_SUBREG ExtSrcReg, (IMPLICIT_DEF ImDefReg), SrcOp, 1)
// rldimi NewFPSCRTmpReg, ExtSrcReg, OldFPSCRReg, 0, 62
// copy NewFPSCRReg, NewFPSCRTmpReg
unsigned OldFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
copyRegFromG8RCOrF8RC(OldFPSCRTmpReg, OldFPSCRReg);
-
+
unsigned ImDefReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
unsigned ExtSrcReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
}
case ISD::BUILD_VECTOR:
return DAGCombineBuildVector(N, DCI);
- case ISD::ABS:
+ case ISD::ABS:
return combineABS(N, DCI);
- case ISD::VSELECT:
+ case ISD::VSELECT:
return combineVSelect(N, DCI);
}
if (!DisableInnermostLoopAlign32) {
// If the nested loop is an innermost loop, prefer to a 32-byte alignment,
- // so that we can decrease cache misses and branch-prediction misses.
+ // so that we can decrease cache misses and branch-prediction misses.
// Actual alignment of the loop will depend on the hotness check and other
// logic in alignBlocks.
- if (ML->getLoopDepth() > 1 && ML->getSubLoops().empty())
+ if (ML->getLoopDepth() > 1 && ML->getSubLoops().empty())
return 5;
}
if (CModel == CodeModel::Small || CModel == CodeModel::Large)
return true;
- // JumpTable and BlockAddress are accessed as got-indirect.
+ // JumpTable and BlockAddress are accessed as got-indirect.
if (isa<JumpTableSDNode>(GA) || isa<BlockAddressSDNode>(GA))
return true;
// Swap op1/op2
assert(((OpIdx1 == 1 && OpIdx2 == 2) || (OpIdx1 == 2 && OpIdx2 == 1)) &&
"Only the operands 1 and 2 can be swapped in RLSIMI/RLWIMIo.");
- unsigned Reg0 = MI.getOperand(0).getReg();
- unsigned Reg1 = MI.getOperand(1).getReg();
- unsigned Reg2 = MI.getOperand(2).getReg();
+ Register Reg0 = MI.getOperand(0).getReg();
+ Register Reg1 = MI.getOperand(1).getReg();
+ Register Reg2 = MI.getOperand(2).getReg();
unsigned SubReg1 = MI.getOperand(1).getSubReg();
unsigned SubReg2 = MI.getOperand(2).getSubReg();
bool Reg1IsKill = MI.getOperand(1).isKill();
if (NewMI) {
// Create a new instruction.
- unsigned Reg0 = ChangeReg0 ? Reg2 : MI.getOperand(0).getReg();
+ Register Reg0 = ChangeReg0 ? Reg2 : MI.getOperand(0).getReg();
bool Reg0IsDead = MI.getOperand(0).isDead();
return BuildMI(MF, MI.getDebugLoc(), MI.getDesc())
.addReg(Reg0, RegState::Define | getDeadRegState(Reg0IsDead))
return &*It;
}
break;
- } else if (It->readsRegister(Reg, &getRegisterInfo()))
+ } else if (It->readsRegister(Reg, &getRegisterInfo()))
// If we see another use of this reg between the def and the MI,
// we want to flat it so the def isn't deleted.
SeenIntermediateUse = true;
}
}
-// Check if the 'MI' that has the index OpNoForForwarding
+// Check if the 'MI' that has the index OpNoForForwarding
// meets the requirement described in the ImmInstrInfo.
bool PPCInstrInfo::isUseMIElgibleForForwarding(MachineInstr &MI,
const ImmInstrInfo &III,
MachineOperand *&RegMO) const {
unsigned Opc = DefMI.getOpcode();
if (Opc != PPC::ADDItocL && Opc != PPC::ADDI && Opc != PPC::ADDI8)
- return false;
+ return false;
assert(DefMI.getNumOperands() >= 3 &&
"Add inst must have at least three operands");
// Otherwise, it is Constant Pool Index(CPI) or Global,
// which is relocation in fact. We need to replace the special zero
// register with ImmMO.
- // Before that, we need to fixup the target flags for imm.
+ // Before that, we need to fixup the target flags for imm.
// For some reason, we miss to set the flag for the ImmMO if it is CPI.
if (DefMI.getOpcode() == PPC::ADDItocL)
ImmMO->setTargetFlags(PPCII::MO_TOC_LO);
MI.getOperand(OperandBase + 1).ChangeToRegister(SReg, false, false, true);
}
-unsigned PPCRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
+Register PPCRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
const PPCFrameLowering *TFI = getFrameLowering(MF);
if (!TM.isPPC64())
return TFI->hasFP(MF) ? PPC::X31 : PPC::X1;
}
-unsigned PPCRegisterInfo::getBaseRegister(const MachineFunction &MF) const {
+Register PPCRegisterInfo::getBaseRegister(const MachineFunction &MF) const {
const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
if (!hasBasePointer(MF))
return getFrameRegister(MF);
int64_t Offset) const override;
// Debug information queries.
- unsigned getFrameRegister(const MachineFunction &MF) const override;
+ Register getFrameRegister(const MachineFunction &MF) const override;
// Base pointer (stack realignment) support.
- unsigned getBaseRegister(const MachineFunction &MF) const;
+ Register getBaseRegister(const MachineFunction &MF) const;
bool hasBasePointer(const MachineFunction &MF) const;
/// stripRegisterPrefix - This method strips the character prefix from a
MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
}
-unsigned RISCVRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
+Register RISCVRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
const TargetFrameLowering *TFI = getFrameLowering(MF);
return TFI->hasFP(MF) ? RISCV::X8 : RISCV::X2;
}
unsigned FIOperandNum,
RegScavenger *RS = nullptr) const override;
- unsigned getFrameRegister(const MachineFunction &MF) const override;
+ Register getFrameRegister(const MachineFunction &MF) const override;
bool requiresRegisterScavenging(const MachineFunction &MF) const override {
return true;
}
-unsigned SparcRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
+Register SparcRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
return SP::I6;
}
int SPAdj, unsigned FIOperandNum,
RegScavenger *RS = nullptr) const override;
- unsigned getFrameRegister(const MachineFunction &MF) const override;
+ Register getFrameRegister(const MachineFunction &MF) const override;
bool canRealignStack(const MachineFunction &MF) const override;
// SrcReg2 is the register if the source operand is a register,
// 0 if the source operand is immediate, and the base register
// if the source operand is memory (index is not supported).
- unsigned SrcReg = Compare.getOperand(0).getReg();
- unsigned SrcReg2 =
- Compare.getOperand(1).isReg() ? Compare.getOperand(1).getReg() : 0;
+ Register SrcReg = Compare.getOperand(0).getReg();
+ Register SrcReg2 =
+ Compare.getOperand(1).isReg() ? Compare.getOperand(1).getReg() : Register();
MachineBasicBlock::iterator MBBI = Compare, MBBE = Branch;
for (++MBBI; MBBI != MBBE; ++MBBI)
if (MBBI->modifiesRegister(SrcReg, TRI) ||
}
// Force base value Base into a register before MI. Return the register.
-static unsigned forceReg(MachineInstr &MI, MachineOperand &Base,
+static Register forceReg(MachineInstr &MI, MachineOperand &Base,
const SystemZInstrInfo *TII) {
if (Base.isReg())
return Base.getReg();
MachineFunction &MF = *MBB->getParent();
MachineRegisterInfo &MRI = MF.getRegInfo();
- unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
+ Register Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(SystemZ::LA), Reg)
.add(Base)
.addImm(0)
MachineOperand Base = earlyUseOperand(MI.getOperand(1));
int64_t Disp = MI.getOperand(2).getImm();
MachineOperand Src2 = earlyUseOperand(MI.getOperand(3));
- unsigned BitShift = (IsSubWord ? MI.getOperand(4).getReg() : 0);
- unsigned NegBitShift = (IsSubWord ? MI.getOperand(5).getReg() : 0);
+ Register BitShift = IsSubWord ? MI.getOperand(4).getReg() : Register();
+ Register NegBitShift = IsSubWord ? MI.getOperand(5).getReg() : Register();
DebugLoc DL = MI.getDebugLoc();
if (IsSubWord)
BitSize = MI.getOperand(6).getImm();
assert(LOpcode && CSOpcode && "Displacement out of range");
// Create virtual registers for temporary results.
- unsigned OrigVal = MRI.createVirtualRegister(RC);
- unsigned OldVal = MRI.createVirtualRegister(RC);
- unsigned NewVal = (BinOpcode || IsSubWord ?
+ Register OrigVal = MRI.createVirtualRegister(RC);
+ Register OldVal = MRI.createVirtualRegister(RC);
+ Register NewVal = (BinOpcode || IsSubWord ?
MRI.createVirtualRegister(RC) : Src2.getReg());
- unsigned RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal);
- unsigned RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal);
+ Register RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal);
+ Register RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal);
// Insert a basic block for the main loop.
MachineBasicBlock *StartMBB = MBB;
unsigned Dest = MI.getOperand(0).getReg();
MachineOperand Base = earlyUseOperand(MI.getOperand(1));
int64_t Disp = MI.getOperand(2).getImm();
- unsigned Src2 = MI.getOperand(3).getReg();
- unsigned BitShift = (IsSubWord ? MI.getOperand(4).getReg() : 0);
- unsigned NegBitShift = (IsSubWord ? MI.getOperand(5).getReg() : 0);
+ Register Src2 = MI.getOperand(3).getReg();
+ Register BitShift = (IsSubWord ? MI.getOperand(4).getReg() : Register());
+ Register NegBitShift = (IsSubWord ? MI.getOperand(5).getReg() : Register());
DebugLoc DL = MI.getDebugLoc();
if (IsSubWord)
BitSize = MI.getOperand(6).getImm();
assert(LOpcode && CSOpcode && "Displacement out of range");
// Create virtual registers for temporary results.
- unsigned OrigVal = MRI.createVirtualRegister(RC);
- unsigned OldVal = MRI.createVirtualRegister(RC);
- unsigned NewVal = MRI.createVirtualRegister(RC);
- unsigned RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal);
- unsigned RotatedAltVal = (IsSubWord ? MRI.createVirtualRegister(RC) : Src2);
- unsigned RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal);
+ Register OrigVal = MRI.createVirtualRegister(RC);
+ Register OldVal = MRI.createVirtualRegister(RC);
+ Register NewVal = MRI.createVirtualRegister(RC);
+ Register RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal);
+ Register RotatedAltVal = (IsSubWord ? MRI.createVirtualRegister(RC) : Src2);
+ Register RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal);
// Insert 3 basic blocks for the loop.
MachineBasicBlock *StartMBB = MBB;
if (MI.getNumExplicitOperands() > 5) {
bool HaveSingleBase = DestBase.isIdenticalTo(SrcBase);
- uint64_t StartCountReg = MI.getOperand(5).getReg();
- uint64_t StartSrcReg = forceReg(MI, SrcBase, TII);
- uint64_t StartDestReg = (HaveSingleBase ? StartSrcReg :
+ Register StartCountReg = MI.getOperand(5).getReg();
+ Register StartSrcReg = forceReg(MI, SrcBase, TII);
+ Register StartDestReg = (HaveSingleBase ? StartSrcReg :
forceReg(MI, DestBase, TII));
const TargetRegisterClass *RC = &SystemZ::ADDR64BitRegClass;
- uint64_t ThisSrcReg = MRI.createVirtualRegister(RC);
- uint64_t ThisDestReg = (HaveSingleBase ? ThisSrcReg :
+ Register ThisSrcReg = MRI.createVirtualRegister(RC);
+ Register ThisDestReg = (HaveSingleBase ? ThisSrcReg :
MRI.createVirtualRegister(RC));
- uint64_t NextSrcReg = MRI.createVirtualRegister(RC);
- uint64_t NextDestReg = (HaveSingleBase ? NextSrcReg :
+ Register NextSrcReg = MRI.createVirtualRegister(RC);
+ Register NextDestReg = (HaveSingleBase ? NextSrcReg :
MRI.createVirtualRegister(RC));
RC = &SystemZ::GR64BitRegClass;
- uint64_t ThisCountReg = MRI.createVirtualRegister(RC);
- uint64_t NextCountReg = MRI.createVirtualRegister(RC);
+ Register ThisCountReg = MRI.createVirtualRegister(RC);
+ Register NextCountReg = MRI.createVirtualRegister(RC);
MachineBasicBlock *StartMBB = MBB;
MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB);
MemOpcode = -1;
else {
assert(NumOps == 3 && "Expected two source registers.");
- unsigned DstReg = MI.getOperand(0).getReg();
- unsigned DstPhys =
+ Register DstReg = MI.getOperand(0).getReg();
+ Register DstPhys =
(TRI->isVirtualRegister(DstReg) ? VRM->getPhys(DstReg) : DstReg);
- unsigned SrcReg = (OpNum == 2 ? MI.getOperand(1).getReg()
+ Register SrcReg = (OpNum == 2 ? MI.getOperand(1).getReg()
: ((OpNum == 1 && MI.isCommutable())
? MI.getOperand(2).getReg()
- : 0));
+ : Register()));
if (DstPhys && !SystemZ::GRH32BitRegClass.contains(DstPhys) && SrcReg &&
TRI->isVirtualRegister(SrcReg) && DstPhys == VRM->getPhys(SrcReg))
NeedsCommute = (OpNum == 1);
continue;
auto tryAddHint = [&](const MachineOperand *MO) -> void {
- unsigned Reg = MO->getReg();
- unsigned PhysReg = isPhysicalRegister(Reg) ? Reg : VRM->getPhys(Reg);
+ Register Reg = MO->getReg();
+ Register PhysReg = isPhysicalRegister(Reg) ? Reg : VRM->getPhys(Reg);
if (PhysReg) {
if (MO->getSubReg())
PhysReg = getSubReg(PhysReg, MO->getSubReg());
return true;
}
-unsigned
+Register
SystemZRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
const SystemZFrameLowering *TFI = getFrameLowering(MF);
return TFI->hasFP(MF) ? SystemZ::R11D : SystemZ::R15D;
const TargetRegisterClass *NewRC,
LiveIntervals &LIS) const override;
- unsigned getFrameRegister(const MachineFunction &MF) const override;
+ Register getFrameRegister(const MachineFunction &MF) const override;
};
} // end namespace llvm
assert(MFI.getObjectSize(FrameIndex) != 0 &&
"We assume that variable-sized objects have already been lowered, "
"and don't use FrameIndex operands.");
- unsigned FrameRegister = getFrameRegister(MF);
+ Register FrameRegister = getFrameRegister(MF);
// If this is the address operand of a load or store, make it relative to SP
// and fold the frame offset directly in.
MI.getOperand(FIOperandNum).ChangeToRegister(FIRegOperand, /*IsDef=*/false);
}
-unsigned
+Register
WebAssemblyRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
static const unsigned Regs[2][2] = {
/* !isArch64Bit isArch64Bit */
RegScavenger *RS = nullptr) const override;
// Debug information queries.
- unsigned getFrameRegister(const MachineFunction &MF) const override;
+ Register getFrameRegister(const MachineFunction &MF) const override;
const TargetRegisterClass *
getPointerRegClass(const MachineFunction &MF,
return true;
}
- SmallVector<unsigned, 8> SplitRegs;
+ SmallVector<Register, 8> SplitRegs;
EVT PartVT = TLI.getRegisterType(Context, VT);
Type *PartTy = PartVT.getTypeForEVT(Context);
bool X86CallLowering::lowerReturn(
MachineIRBuilder &MIRBuilder, const Value *Val,
- ArrayRef<unsigned> VRegs) const {
+ ArrayRef<Register> VRegs) const {
assert(((Val && !VRegs.empty()) || (!Val && VRegs.empty())) &&
"Return value without a vreg");
auto MIB = MIRBuilder.buildInstrNoInsert(X86::RET).addImm(0);
ArgInfo CurArgInfo = ArgInfo{VRegs[i], SplitEVTs[i].getTypeForEVT(Ctx)};
setArgFlags(CurArgInfo, AttributeList::ReturnIndex, DL, F);
if (!splitToValueTypes(CurArgInfo, SplitArgs, DL, MRI,
- [&](ArrayRef<unsigned> Regs) {
+ [&](ArrayRef<Register> Regs) {
MIRBuilder.buildUnmerge(Regs, VRegs[i]);
}))
return false;
bool X86CallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
const Function &F,
- ArrayRef<unsigned> VRegs) const {
+ ArrayRef<Register> VRegs) const {
if (F.arg_empty())
return true;
ArgInfo OrigArg(VRegs[Idx], Arg.getType());
setArgFlags(OrigArg, Idx + AttributeList::FirstArgIndex, DL, F);
if (!splitToValueTypes(OrigArg, SplitArgs, DL, MRI,
- [&](ArrayRef<unsigned> Regs) {
+ [&](ArrayRef<Register> Regs) {
MIRBuilder.buildMerge(VRegs[Idx], Regs);
}))
return false;
return false;
if (!splitToValueTypes(OrigArg, SplitArgs, DL, MRI,
- [&](ArrayRef<unsigned> Regs) {
+ [&](ArrayRef<Register> Regs) {
MIRBuilder.buildUnmerge(Regs, OrigArg.Reg);
}))
return false;
if (OrigRet.Reg) {
SplitArgs.clear();
- SmallVector<unsigned, 8> NewRegs;
+ SmallVector<Register, 8> NewRegs;
if (!splitToValueTypes(OrigRet, SplitArgs, DL, MRI,
- [&](ArrayRef<unsigned> Regs) {
+ [&](ArrayRef<Register> Regs) {
NewRegs.assign(Regs.begin(), Regs.end());
}))
return false;
X86CallLowering(const X86TargetLowering &TLI);
bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val,
- ArrayRef<unsigned> VRegs) const override;
+ ArrayRef<Register> VRegs) const override;
bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F,
- ArrayRef<unsigned> VRegs) const override;
+ ArrayRef<Register> VRegs) const override;
bool lowerCall(MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv,
const MachineOperand &Callee, const ArgInfo &OrigRet,
private:
/// A function of this type is used to perform value split action.
- using SplitArgTy = std::function<void(ArrayRef<unsigned>)>;
+ using SplitArgTy = std::function<void(ArrayRef<Register>)>;
bool splitToValueTypes(const ArgInfo &OrigArgInfo,
SmallVectorImpl<ArgInfo> &SplitArgs,
// registers. For the prolog expansion we use RAX, RCX and RDX.
MachineRegisterInfo &MRI = MF.getRegInfo();
const TargetRegisterClass *RegClass = &X86::GR64RegClass;
- const unsigned SizeReg = InProlog ? (unsigned)X86::RAX
+ const Register SizeReg = InProlog ? X86::RAX
: MRI.createVirtualRegister(RegClass),
- ZeroReg = InProlog ? (unsigned)X86::RCX
+ ZeroReg = InProlog ? X86::RCX
: MRI.createVirtualRegister(RegClass),
- CopyReg = InProlog ? (unsigned)X86::RDX
+ CopyReg = InProlog ? X86::RDX
: MRI.createVirtualRegister(RegClass),
- TestReg = InProlog ? (unsigned)X86::RDX
+ TestReg = InProlog ? X86::RDX
: MRI.createVirtualRegister(RegClass),
- FinalReg = InProlog ? (unsigned)X86::RDX
+ FinalReg = InProlog ? X86::RDX
: MRI.createVirtualRegister(RegClass),
- RoundedReg = InProlog ? (unsigned)X86::RDX
+ RoundedReg = InProlog ? X86::RDX
: MRI.createVirtualRegister(RegClass),
- LimitReg = InProlog ? (unsigned)X86::RCX
+ LimitReg = InProlog ? X86::RCX
: MRI.createVirtualRegister(RegClass),
- JoinReg = InProlog ? (unsigned)X86::RCX
+ JoinReg = InProlog ? X86::RCX
: MRI.createVirtualRegister(RegClass),
- ProbeReg = InProlog ? (unsigned)X86::RCX
+ ProbeReg = InProlog ? X86::RCX
: MRI.createVirtualRegister(RegClass);
// SP-relative offsets where we can save RCX and RDX.
bool X86FrameLowering::has128ByteRedZone(const MachineFunction& MF) const {
// x86-64 (non Win64) has a 128 byte red zone which is guaranteed not to be
- // clobbered by any interrupt handler.
+ // clobbered by any interrupt handler.
assert(&STI == &MF.getSubtarget<X86Subtarget>() &&
"MF used frame lowering for wrong subtarget");
const Function &Fn = MF.getFunction();
unsigned CommuteOpIdx1 = OpNum, CommuteOpIdx2 = CommuteAnyOperandIndex;
if (findCommutedOpIndices(MI, CommuteOpIdx1, CommuteOpIdx2)) {
bool HasDef = MI.getDesc().getNumDefs();
- unsigned Reg0 = HasDef ? MI.getOperand(0).getReg() : 0;
- unsigned Reg1 = MI.getOperand(CommuteOpIdx1).getReg();
- unsigned Reg2 = MI.getOperand(CommuteOpIdx2).getReg();
+ Register Reg0 = HasDef ? MI.getOperand(0).getReg() : Register();
+ Register Reg1 = MI.getOperand(CommuteOpIdx1).getReg();
+ Register Reg2 = MI.getOperand(CommuteOpIdx2).getReg();
bool Tied1 =
0 == MI.getDesc().getOperandConstraint(CommuteOpIdx1, MCOI::TIED_TO);
bool Tied2 =
}
}
-unsigned X86RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
+Register X86RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
const X86FrameLowering *TFI = getFrameLowering(MF);
return TFI->hasFP(MF) ? FramePtr : StackPtr;
}
RegScavenger *RS = nullptr) const override;
// Debug information queries.
- unsigned getFrameRegister(const MachineFunction &MF) const override;
+ Register getFrameRegister(const MachineFunction &MF) const override;
unsigned getPtrSizedFrameRegister(const MachineFunction &MF) const;
unsigned getPtrSizedStackRegister(const MachineFunction &MF) const;
unsigned getStackRegister() const { return StackPtr; }
Offset += StackSize;
- unsigned FrameReg = getFrameRegister(MF);
+ Register FrameReg = getFrameRegister(MF);
// Special handling of DBG_VALUE instructions.
if (MI.isDebugValue()) {
}
-unsigned XCoreRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
+Register XCoreRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
const XCoreFrameLowering *TFI = getFrameLowering(MF);
return TFI->hasFP(MF) ? XCore::R10 : XCore::SP;
RegScavenger *RS = nullptr) const override;
// Debug information queries.
- unsigned getFrameRegister(const MachineFunction &MF) const override;
+ Register getFrameRegister(const MachineFunction &MF) const override;
//! Return whether to emit frame moves
static bool needsFrameMoves(const MachineFunction &MF);
return MF;
}
-static void collectCopies(SmallVectorImpl<unsigned> &Copies,
+static void collectCopies(SmallVectorImpl<Register> &Copies,
MachineFunction *MF) {
for (auto &MBB : *MF)
for (MachineInstr &MI : MBB) {
MachineFunction *MF;
std::pair<std::unique_ptr<Module>, std::unique_ptr<MachineModuleInfo>>
ModuleMMIPair;
- SmallVector<unsigned, 4> Copies;
+ SmallVector<Register, 4> Copies;
MachineBasicBlock *EntryMBB;
MachineIRBuilder B;
MachineRegisterInfo *MRI;
if (!TM)
return;
- SmallVector<unsigned, 4> Copies;
+ SmallVector<Register, 4> Copies;
collectCopies(Copies, MF);
LLT s64 = LLT::scalar(64);
if (!TM)
return;
- SmallVector<unsigned, 4> Copies;
+ SmallVector<Register, 4> Copies;
collectCopies(Copies, MF);
B.buildUnmerge(LLT::scalar(32), Copies[0]);
B.buildUnmerge(LLT::scalar(16), Copies[1]);
if (!TM)
return;
- SmallVector<unsigned, 4> Copies;
+ SmallVector<Register, 4> Copies;
collectCopies(Copies, MF);
LLT S64 = LLT::scalar(64);
return;
LLT S64 = LLT::scalar(64);
- SmallVector<unsigned, 4> Copies;
+ SmallVector<Register, 4> Copies;
collectCopies(Copies, MF);
// Make sure DstOp version works. sqrt is just a placeholder intrinsic.
.addUse(Copies[0]);
// Make sure register version works
- SmallVector<unsigned, 1> Results;
+ SmallVector<Register, 1> Results;
Results.push_back(MRI->createGenericVirtualRegister(S64));
B.buildIntrinsic(Intrinsic::sqrt, Results, false)
.addUse(Copies[1]);
LLT S64 = LLT::scalar(64);
LLT S128 = LLT::scalar(128);
- SmallVector<unsigned, 4> Copies;
+ SmallVector<Register, 4> Copies;
collectCopies(Copies, MF);
B.buildXor(S64, Copies[0], Copies[1]);
B.buildNot(S64, Copies[0]);
return;
LLT S32 = LLT::scalar(32);
- SmallVector<unsigned, 4> Copies;
+ SmallVector<Register, 4> Copies;
collectCopies(Copies, MF);
B.buildCTPOP(S32, Copies[0]);
return;
LLT S32 = LLT::scalar(32);
- SmallVector<unsigned, 4> Copies;
+ SmallVector<Register, 4> Copies;
collectCopies(Copies, MF);
B.buildUITOFP(S32, Copies[0]);
return;
LLT S64 = LLT::scalar(64);
- SmallVector<unsigned, 4> Copies;
+ SmallVector<Register, 4> Copies;
collectCopies(Copies, MF);
B.buildSMin(S64, Copies[0], Copies[1]);
bool match =
mi_match(MIBAdd->getOperand(0).getReg(), MRI, m_GAdd(m_Reg(), m_Reg()));
EXPECT_TRUE(match);
- unsigned Src0, Src1, Src2;
+ Register Src0, Src1, Src2;
match = mi_match(MIBAdd->getOperand(0).getReg(), MRI,
m_GAdd(m_Reg(Src0), m_Reg(Src1)));
EXPECT_TRUE(match);
bool match = mi_match(MIBFabs->getOperand(0).getReg(), MRI, m_GFabs(m_Reg()));
EXPECT_TRUE(match);
- unsigned Src;
+ Register Src;
auto MIBFNeg = B.buildInstr(TargetOpcode::G_FNEG, {s32}, {Copy0s32});
match = mi_match(MIBFNeg->getOperand(0).getReg(), MRI, m_GFNeg(m_Reg(Src)));
EXPECT_TRUE(match);
auto MIBAExt = B.buildAnyExt(s64, MIBTrunc);
auto MIBZExt = B.buildZExt(s64, MIBTrunc);
auto MIBSExt = B.buildSExt(s64, MIBTrunc);
- unsigned Src0;
+ Register Src0;
bool match =
mi_match(MIBTrunc->getOperand(0).getReg(), MRI, m_GTrunc(m_Reg(Src0)));
EXPECT_TRUE(match);
LLT PtrTy = LLT::pointer(0, 64);
auto MIBIntToPtr = B.buildCast(PtrTy, Copies[0]);
auto MIBPtrToInt = B.buildCast(s64, MIBIntToPtr);
- unsigned Src0;
+ Register Src0;
// match the ptrtoint(inttoptr reg)
bool match = mi_match(MIBPtrToInt->getOperand(0).getReg(), MRI,
LLT s64 = LLT::scalar(64);
LLT s32 = LLT::scalar(32);
auto MIBAdd = B.buildAdd(s64, Copies[0], Copies[1]);
- unsigned Src0, Src1;
+ Register Src0, Src1;
bool match =
mi_match(MIBAdd->getOperand(0).getReg(), MRI,
m_all_of(m_SpecificType(s64), m_GAdd(m_Reg(Src0), m_Reg(Src1))));