From: Diana Picus Date: Fri, 13 Jan 2017 09:58:52 +0000 (+0000) Subject: [CodeGen] Rename MachineInstrBuilder::addOperand. NFC X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=8a47810cd6bf8c0803b2c0076b292f042a7b6d8f;p=llvm [CodeGen] Rename MachineInstrBuilder::addOperand. NFC Rename from addOperand to just add, to match the other method that has been added to MachineInstrBuilder for adding more than just 1 operand. See https://reviews.llvm.org/D28057 for the whole discussion. Differential Revision: https://reviews.llvm.org/D28556 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@291891 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/include/llvm/CodeGen/MachineInstrBuilder.h b/include/llvm/CodeGen/MachineInstrBuilder.h index 53e1fb13a2d..233a467e17e 100644 --- a/include/llvm/CodeGen/MachineInstrBuilder.h +++ b/include/llvm/CodeGen/MachineInstrBuilder.h @@ -187,7 +187,7 @@ public: return *this; } - const MachineInstrBuilder &addOperand(const MachineOperand &MO) const { + const MachineInstrBuilder &add(const MachineOperand &MO) const { MI->addOperand(*MF, MO); return *this; } diff --git a/lib/CodeGen/ImplicitNullChecks.cpp b/lib/CodeGen/ImplicitNullChecks.cpp index 9588dfb7205..04405553428 100644 --- a/lib/CodeGen/ImplicitNullChecks.cpp +++ b/lib/CodeGen/ImplicitNullChecks.cpp @@ -522,7 +522,7 @@ ImplicitNullChecks::insertFaultingLoad(MachineInstr *LoadMI, .addImm(LoadMI->getOpcode()); for (auto &MO : LoadMI->uses()) - MIB.addOperand(MO); + MIB.add(MO); MIB.setMemRefs(LoadMI->memoperands_begin(), LoadMI->memoperands_end()); diff --git a/lib/CodeGen/LiveDebugVariables.cpp b/lib/CodeGen/LiveDebugVariables.cpp index 0934d8cfeaa..b6b2b614d66 100644 --- a/lib/CodeGen/LiveDebugVariables.cpp +++ b/lib/CodeGen/LiveDebugVariables.cpp @@ -944,7 +944,7 @@ void UserValue::insertDebugValue(MachineBasicBlock *MBB, SlotIndex Idx, IsIndirect, Loc.getReg(), offset, Variable, Expression); else BuildMI(*MBB, I, getDebugLoc(), TII.get(TargetOpcode::DBG_VALUE)) - .addOperand(Loc) + .add(Loc) .addImm(offset) .addMetadata(Variable) .addMetadata(Expression); diff --git a/lib/CodeGen/PatchableFunction.cpp b/lib/CodeGen/PatchableFunction.cpp index ad9166f1ed2..00e72971a01 100644 --- a/lib/CodeGen/PatchableFunction.cpp +++ b/lib/CodeGen/PatchableFunction.cpp @@ -75,7 +75,7 @@ bool PatchableFunction::runOnMachineFunction(MachineFunction &MF) { .addImm(FirstActualI->getOpcode()); for (auto &MO : FirstActualI->operands()) - MIB.addOperand(MO); + MIB.add(MO); FirstActualI->eraseFromParent(); MF.ensureAlignment(4); diff --git a/lib/CodeGen/SelectionDAG/FastISel.cpp b/lib/CodeGen/SelectionDAG/FastISel.cpp index e2f33bb433b..9517d64447b 100644 --- a/lib/CodeGen/SelectionDAG/FastISel.cpp +++ b/lib/CodeGen/SelectionDAG/FastISel.cpp @@ -646,7 +646,7 @@ bool FastISel::selectStackmap(const CallInst *I) { MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::STACKMAP)); for (auto const &MO : Ops) - MIB.addOperand(MO); + MIB.add(MO); // Issue CALLSEQ_END unsigned AdjStackUp = TII.getCallFrameDestroyOpcode(); @@ -826,7 +826,7 @@ bool FastISel::selectPatchpoint(const CallInst *I) { TII.get(TargetOpcode::PATCHPOINT)); for (auto &MO : Ops) - MIB.addOperand(MO); + MIB.add(MO); MIB->setPhysRegsDeadExcept(CLI.InRegs, TRI); @@ -1149,7 +1149,7 @@ bool FastISel::selectIntrinsicCall(const IntrinsicInst *II) { } else BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::DBG_VALUE)) - .addOperand(*Op) + .add(*Op) .addImm(0) .addMetadata(DI->getVariable()) .addMetadata(DI->getExpression()); diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index 9ca646534e2..4e5ad2fad01 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -4752,7 +4752,7 @@ bool SelectionDAGBuilder::EmitFuncArgumentDbgValue( else FuncInfo.ArgDbgValues.push_back( BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE)) - .addOperand(*Op) + .add(*Op) .addImm(Offset) .addMetadata(Variable) .addMetadata(Expr)); diff --git a/lib/CodeGen/TargetInstrInfo.cpp b/lib/CodeGen/TargetInstrInfo.cpp index 01f91b96b58..ef766db756e 100644 --- a/lib/CodeGen/TargetInstrInfo.cpp +++ b/lib/CodeGen/TargetInstrInfo.cpp @@ -470,7 +470,7 @@ static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr &MI, // No need to fold return, the meta data, and function arguments for (unsigned i = 0; i < StartIdx; ++i) - MIB.addOperand(MI.getOperand(i)); + MIB.add(MI.getOperand(i)); for (unsigned i = StartIdx; i < MI.getNumOperands(); ++i) { MachineOperand &MO = MI.getOperand(i); @@ -490,7 +490,7 @@ static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr &MI, MIB.addImm(SpillOffset); } else - MIB.addOperand(MO); + MIB.add(MO); } return NewMI; } diff --git a/lib/CodeGen/TargetLoweringBase.cpp b/lib/CodeGen/TargetLoweringBase.cpp index 003311b157f..76b7852bcbc 100644 --- a/lib/CodeGen/TargetLoweringBase.cpp +++ b/lib/CodeGen/TargetLoweringBase.cpp @@ -1227,7 +1227,7 @@ TargetLoweringBase::emitPatchPoint(MachineInstr &InitialMI, // Copy operands before the frame-index. for (unsigned i = 0; i < OperIdx; ++i) - MIB.addOperand(MI->getOperand(i)); + MIB.add(MI->getOperand(i)); // Add frame index operands recognized by stackmaps.cpp if (MFI.isStatepointSpillSlotObjectIndex(FI)) { // indirect-mem-ref tag, size, #FI, offset. @@ -1237,18 +1237,18 @@ TargetLoweringBase::emitPatchPoint(MachineInstr &InitialMI, assert(MI->getOpcode() == TargetOpcode::STATEPOINT && "sanity"); MIB.addImm(StackMaps::IndirectMemRefOp); MIB.addImm(MFI.getObjectSize(FI)); - MIB.addOperand(MI->getOperand(OperIdx)); + MIB.add(MI->getOperand(OperIdx)); MIB.addImm(0); } else { // direct-mem-ref tag, #FI, offset. // Used by patchpoint, and direct alloca arguments to statepoints MIB.addImm(StackMaps::DirectMemRefOp); - MIB.addOperand(MI->getOperand(OperIdx)); + MIB.add(MI->getOperand(OperIdx)); MIB.addImm(0); } // Copy the operands after the frame index. for (unsigned i = OperIdx + 1; i != MI->getNumOperands(); ++i) - MIB.addOperand(MI->getOperand(i)); + MIB.add(MI->getOperand(i)); // Inherit previous memory operands. MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); diff --git a/lib/CodeGen/TwoAddressInstructionPass.cpp b/lib/CodeGen/TwoAddressInstructionPass.cpp index 0f1b2ed994b..a0c1a4f70b3 100644 --- a/lib/CodeGen/TwoAddressInstructionPass.cpp +++ b/lib/CodeGen/TwoAddressInstructionPass.cpp @@ -1785,7 +1785,7 @@ eliminateRegSequence(MachineBasicBlock::iterator &MBBI) { MachineInstr *CopyMI = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), TII->get(TargetOpcode::COPY)) .addReg(DstReg, RegState::Define, SubIdx) - .addOperand(UseMO); + .add(UseMO); // The first def needs an flag because there is no live register // before it. diff --git a/lib/CodeGen/XRayInstrumentation.cpp b/lib/CodeGen/XRayInstrumentation.cpp index 63bd762eeb2..760683bc3bf 100644 --- a/lib/CodeGen/XRayInstrumentation.cpp +++ b/lib/CodeGen/XRayInstrumentation.cpp @@ -81,7 +81,7 @@ void XRayInstrumentation::replaceRetWithPatchableRet(MachineFunction &MF, auto MIB = BuildMI(MBB, T, T.getDebugLoc(), TII->get(Opc)) .addImm(T.getOpcode()); for (auto &MO : T.operands()) - MIB.addOperand(MO); + MIB.add(MO); Terminators.push_back(&T); } } diff --git a/lib/Target/AArch64/AArch64CallLowering.cpp b/lib/Target/AArch64/AArch64CallLowering.cpp index a4950af3209..75153cca444 100644 --- a/lib/Target/AArch64/AArch64CallLowering.cpp +++ b/lib/Target/AArch64/AArch64CallLowering.cpp @@ -264,7 +264,7 @@ bool AArch64CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, // uses of arg registers. auto MIB = MIRBuilder.buildInstrNoInsert(Callee.isReg() ? AArch64::BLR : AArch64::BL); - MIB.addOperand(Callee); + MIB.add(Callee); // Tell the call which registers are clobbered. auto TRI = MF.getSubtarget().getRegisterInfo(); diff --git a/lib/Target/AArch64/AArch64ConditionOptimizer.cpp b/lib/Target/AArch64/AArch64ConditionOptimizer.cpp index 8b186328d12..2dfcd2d1c39 100644 --- a/lib/Target/AArch64/AArch64ConditionOptimizer.cpp +++ b/lib/Target/AArch64/AArch64ConditionOptimizer.cpp @@ -265,10 +265,10 @@ void AArch64ConditionOptimizer::modifyCmp(MachineInstr *CmpMI, // Change immediate in comparison instruction (ADDS or SUBS). BuildMI(*MBB, CmpMI, CmpMI->getDebugLoc(), TII->get(Opc)) - .addOperand(CmpMI->getOperand(0)) - .addOperand(CmpMI->getOperand(1)) + .add(CmpMI->getOperand(0)) + .add(CmpMI->getOperand(1)) .addImm(Imm) - .addOperand(CmpMI->getOperand(3)); + .add(CmpMI->getOperand(3)); CmpMI->eraseFromParent(); // The fact that this comparison was picked ensures that it's related to the @@ -278,7 +278,7 @@ void AArch64ConditionOptimizer::modifyCmp(MachineInstr *CmpMI, // Change condition in branch instruction. BuildMI(*MBB, BrMI, BrMI.getDebugLoc(), TII->get(AArch64::Bcc)) .addImm(Cmp) - .addOperand(BrMI.getOperand(1)); + .add(BrMI.getOperand(1)); BrMI.eraseFromParent(); MBB->updateTerminator(); diff --git a/lib/Target/AArch64/AArch64ConditionalCompares.cpp b/lib/Target/AArch64/AArch64ConditionalCompares.cpp index da09b36cac9..00a0111f2bd 100644 --- a/lib/Target/AArch64/AArch64ConditionalCompares.cpp +++ b/lib/Target/AArch64/AArch64ConditionalCompares.cpp @@ -594,7 +594,7 @@ void SSACCmpConv::convert(SmallVectorImpl &RemovedBlocks) { // Insert a SUBS Rn, #0 instruction instead of the cbz / cbnz. BuildMI(*Head, Head->end(), TermDL, MCID) .addReg(DestReg, RegState::Define | RegState::Dead) - .addOperand(HeadCond[2]) + .add(HeadCond[2]) .addImm(0) .addImm(0); // SUBS uses the GPR*sp register classes. @@ -650,13 +650,12 @@ void SSACCmpConv::convert(SmallVectorImpl &RemovedBlocks) { if (CmpMI->getOperand(FirstOp + 1).isReg()) MRI->constrainRegClass(CmpMI->getOperand(FirstOp + 1).getReg(), TII->getRegClass(MCID, 1, TRI, *MF)); - MachineInstrBuilder MIB = - BuildMI(*Head, CmpMI, CmpMI->getDebugLoc(), MCID) - .addOperand(CmpMI->getOperand(FirstOp)); // Register Rn + MachineInstrBuilder MIB = BuildMI(*Head, CmpMI, CmpMI->getDebugLoc(), MCID) + .add(CmpMI->getOperand(FirstOp)); // Register Rn if (isZBranch) MIB.addImm(0); // cbz/cbnz Rn -> ccmp Rn, #0 else - MIB.addOperand(CmpMI->getOperand(FirstOp + 1)); // Register Rm / Immediate + MIB.add(CmpMI->getOperand(FirstOp + 1)); // Register Rm / Immediate MIB.addImm(NZCV).addImm(HeadCmpBBCC); // If CmpMI was a terminator, we need a new conditional branch to replace it. @@ -666,7 +665,7 @@ void SSACCmpConv::convert(SmallVectorImpl &RemovedBlocks) { CmpMI->getOpcode() == AArch64::CBNZX; BuildMI(*Head, CmpMI, CmpMI->getDebugLoc(), TII->get(AArch64::Bcc)) .addImm(isNZ ? AArch64CC::NE : AArch64CC::EQ) - .addOperand(CmpMI->getOperand(1)); // Branch target. + .add(CmpMI->getOperand(1)); // Branch target. } CmpMI->eraseFromParent(); Head->updateTerminator(); diff --git a/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp b/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp index fe1c0beee0e..6bfbce401e8 100644 --- a/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp +++ b/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp @@ -70,9 +70,9 @@ static void transferImpOps(MachineInstr &OldMI, MachineInstrBuilder &UseMI, const MachineOperand &MO = OldMI.getOperand(i); assert(MO.isReg() && MO.getReg()); if (MO.isUse()) - UseMI.addOperand(MO); + UseMI.add(MO); else - DefMI.addOperand(MO); + DefMI.add(MO); } } @@ -112,7 +112,7 @@ static bool tryOrrMovk(uint64_t UImm, uint64_t OrrImm, MachineInstr &MI, // Create the ORR-immediate instruction. MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ORRXri)) - .addOperand(MI.getOperand(0)) + .add(MI.getOperand(0)) .addReg(AArch64::XZR) .addImm(Encoding); @@ -179,7 +179,7 @@ static bool tryToreplicateChunks(uint64_t UImm, MachineInstr &MI, // Create the ORR-immediate instruction. MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ORRXri)) - .addOperand(MI.getOperand(0)) + .add(MI.getOperand(0)) .addReg(AArch64::XZR) .addImm(Encoding); @@ -362,7 +362,7 @@ static bool trySequenceOfOnes(uint64_t UImm, MachineInstr &MI, AArch64_AM::processLogicalImmediate(OrrImm, 64, Encoding); MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ORRXri)) - .addOperand(MI.getOperand(0)) + .add(MI.getOperand(0)) .addReg(AArch64::XZR) .addImm(Encoding); @@ -425,7 +425,7 @@ bool AArch64ExpandPseudo::expandMOVImm(MachineBasicBlock &MBB, unsigned Opc = (BitSize == 32 ? AArch64::ORRWri : AArch64::ORRXri); MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(Opc)) - .addOperand(MI.getOperand(0)) + .add(MI.getOperand(0)) .addReg(BitSize == 32 ? AArch64::WZR : AArch64::XZR) .addImm(Encoding); transferImpOps(MI, MIB, MIB); @@ -627,7 +627,7 @@ bool AArch64ExpandPseudo::expandCMP_SWAP( .addReg(Addr.getReg()); BuildMI(LoadCmpBB, DL, TII->get(CmpOp), ZeroReg) .addReg(Dest.getReg(), getKillRegState(Dest.isDead())) - .addOperand(Desired) + .add(Desired) .addImm(ExtendImm); BuildMI(LoadCmpBB, DL, TII->get(AArch64::Bcc)) .addImm(AArch64CC::NE) @@ -643,9 +643,7 @@ bool AArch64ExpandPseudo::expandCMP_SWAP( StoreBB->addLiveIn(New.getReg()); addPostLoopLiveIns(StoreBB, LiveRegs); - BuildMI(StoreBB, DL, TII->get(StlrOp), StatusReg) - .addOperand(New) - .addOperand(Addr); + BuildMI(StoreBB, DL, TII->get(StlrOp), StatusReg).add(New).add(Addr); BuildMI(StoreBB, DL, TII->get(AArch64::CBNZW)) .addReg(StatusReg, RegState::Kill) .addMBB(LoadCmpBB); @@ -710,7 +708,7 @@ bool AArch64ExpandPseudo::expandCMP_SWAP_128( .addReg(Addr.getReg()); BuildMI(LoadCmpBB, DL, TII->get(AArch64::SUBSXrs), AArch64::XZR) .addReg(DestLo.getReg(), getKillRegState(DestLo.isDead())) - .addOperand(DesiredLo) + .add(DesiredLo) .addImm(0); BuildMI(LoadCmpBB, DL, TII->get(AArch64::CSINCWr), StatusReg) .addUse(AArch64::WZR) @@ -718,7 +716,7 @@ bool AArch64ExpandPseudo::expandCMP_SWAP_128( .addImm(AArch64CC::EQ); BuildMI(LoadCmpBB, DL, TII->get(AArch64::SUBSXrs), AArch64::XZR) .addReg(DestHi.getReg(), getKillRegState(DestHi.isDead())) - .addOperand(DesiredHi) + .add(DesiredHi) .addImm(0); BuildMI(LoadCmpBB, DL, TII->get(AArch64::CSINCWr), StatusReg) .addUse(StatusReg, RegState::Kill) @@ -738,9 +736,9 @@ bool AArch64ExpandPseudo::expandCMP_SWAP_128( StoreBB->addLiveIn(NewHi.getReg()); addPostLoopLiveIns(StoreBB, LiveRegs); BuildMI(StoreBB, DL, TII->get(AArch64::STLXPX), StatusReg) - .addOperand(NewLo) - .addOperand(NewHi) - .addOperand(Addr); + .add(NewLo) + .add(NewHi) + .add(Addr); BuildMI(StoreBB, DL, TII->get(AArch64::CBNZW)) .addReg(StatusReg, RegState::Kill) .addMBB(LoadCmpBB); @@ -825,8 +823,8 @@ bool AArch64ExpandPseudo::expandMI(MachineBasicBlock &MBB, MachineInstrBuilder MIB1 = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(Opcode), MI.getOperand(0).getReg()) - .addOperand(MI.getOperand(1)) - .addOperand(MI.getOperand(2)) + .add(MI.getOperand(1)) + .add(MI.getOperand(2)) .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0)); transferImpOps(MI, MIB1, MIB1); MI.eraseFromParent(); @@ -842,7 +840,7 @@ bool AArch64ExpandPseudo::expandMI(MachineBasicBlock &MBB, BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ADRP), DstReg); MachineInstrBuilder MIB2 = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::LDRXui)) - .addOperand(MI.getOperand(0)) + .add(MI.getOperand(0)) .addReg(DstReg); if (MO1.isGlobal()) { @@ -878,13 +876,13 @@ bool AArch64ExpandPseudo::expandMI(MachineBasicBlock &MBB, unsigned DstReg = MI.getOperand(0).getReg(); MachineInstrBuilder MIB1 = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ADRP), DstReg) - .addOperand(MI.getOperand(1)); + .add(MI.getOperand(1)); MachineInstrBuilder MIB2 = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(AArch64::ADDXri)) - .addOperand(MI.getOperand(0)) + .add(MI.getOperand(0)) .addReg(DstReg) - .addOperand(MI.getOperand(2)) + .add(MI.getOperand(2)) .addImm(0); transferImpOps(MI, MIB1, MIB2); diff --git a/lib/Target/AArch64/AArch64FrameLowering.cpp b/lib/Target/AArch64/AArch64FrameLowering.cpp index f5b8c35375f..65e94699ca3 100644 --- a/lib/Target/AArch64/AArch64FrameLowering.cpp +++ b/lib/Target/AArch64/AArch64FrameLowering.cpp @@ -362,7 +362,7 @@ static MachineBasicBlock::iterator convertCalleeSaveRestoreToSPPrePostIncDec( unsigned OpndIdx = 0; for (unsigned OpndEnd = MBBI->getNumOperands() - 1; OpndIdx < OpndEnd; ++OpndIdx) - MIB.addOperand(MBBI->getOperand(OpndIdx)); + MIB.add(MBBI->getOperand(OpndIdx)); assert(MBBI->getOperand(OpndIdx).getImm() == 0 && "Unexpected immediate offset in first/last callee-save save/restore " diff --git a/lib/Target/AArch64/AArch64InstrInfo.cpp b/lib/Target/AArch64/AArch64InstrInfo.cpp index 5c8acba26aa..f5a2ee03c57 100644 --- a/lib/Target/AArch64/AArch64InstrInfo.cpp +++ b/lib/Target/AArch64/AArch64InstrInfo.cpp @@ -369,7 +369,7 @@ void AArch64InstrInfo::instantiateCondBranch( // Folded compare-and-branch // Note that we use addOperand instead of addReg to keep the flags. const MachineInstrBuilder MIB = - BuildMI(&MBB, DL, get(Cond[1].getImm())).addOperand(Cond[2]); + BuildMI(&MBB, DL, get(Cond[1].getImm())).add(Cond[2]); if (Cond.size() > 3) MIB.addImm(Cond[3].getImm()); MIB.addMBB(TBB); @@ -3793,7 +3793,7 @@ void AArch64InstrInfo::genAlternativeCodeSequence( MachineInstrBuilder MIB1 = BuildMI(MF, Root.getDebugLoc(), TII->get(SubOpc), NewVR) .addReg(ZeroReg) - .addOperand(Root.getOperand(2)); + .add(Root.getOperand(2)); InsInstrs.push_back(MIB1); InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0)); MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC); diff --git a/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp b/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp index 8a76c42b589..337b9cedca3 100644 --- a/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp +++ b/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp @@ -595,7 +595,7 @@ AArch64LoadStoreOpt::mergeNarrowZeroStores(MachineBasicBlock::iterator I, MachineInstrBuilder MIB; MIB = BuildMI(*MBB, InsertionPoint, DL, TII->get(getMatchingWideOpcode(Opc))) .addReg(isNarrowStore(Opc) ? AArch64::WZR : AArch64::XZR) - .addOperand(BaseRegOp) + .add(BaseRegOp) .addImm(OffsetImm) .setMemRefs(I->mergeMemRefsWith(*MergeMI)); (void)MIB; @@ -688,9 +688,9 @@ AArch64LoadStoreOpt::mergePairedInsns(MachineBasicBlock::iterator I, DebugLoc DL = I->getDebugLoc(); MachineBasicBlock *MBB = I->getParent(); MIB = BuildMI(*MBB, InsertionPoint, DL, TII->get(getMatchingPairOpcode(Opc))) - .addOperand(getLdStRegOp(*RtMI)) - .addOperand(getLdStRegOp(*Rt2MI)) - .addOperand(BaseRegOp) + .add(getLdStRegOp(*RtMI)) + .add(getLdStRegOp(*Rt2MI)) + .add(BaseRegOp) .addImm(OffsetImm) .setMemRefs(I->mergeMemRefsWith(*Paired)); @@ -1210,19 +1210,19 @@ AArch64LoadStoreOpt::mergeUpdateInsn(MachineBasicBlock::iterator I, if (!isPairedLdSt(*I)) { // Non-paired instruction. MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), TII->get(NewOpc)) - .addOperand(getLdStRegOp(*Update)) - .addOperand(getLdStRegOp(*I)) - .addOperand(getLdStBaseOp(*I)) + .add(getLdStRegOp(*Update)) + .add(getLdStRegOp(*I)) + .add(getLdStBaseOp(*I)) .addImm(Value) .setMemRefs(I->memoperands_begin(), I->memoperands_end()); } else { // Paired instruction. int Scale = getMemScale(*I); MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), TII->get(NewOpc)) - .addOperand(getLdStRegOp(*Update)) - .addOperand(getLdStRegOp(*I, 0)) - .addOperand(getLdStRegOp(*I, 1)) - .addOperand(getLdStBaseOp(*I)) + .add(getLdStRegOp(*Update)) + .add(getLdStRegOp(*I, 0)) + .add(getLdStRegOp(*I, 1)) + .add(getLdStBaseOp(*I)) .addImm(Value / Scale) .setMemRefs(I->memoperands_begin(), I->memoperands_end()); } diff --git a/lib/Target/AMDGPU/R600ISelLowering.cpp b/lib/Target/AMDGPU/R600ISelLowering.cpp index de7ce5cb9e4..13a29d9a56f 100644 --- a/lib/Target/AMDGPU/R600ISelLowering.cpp +++ b/lib/Target/AMDGPU/R600ISelLowering.cpp @@ -266,7 +266,7 @@ R600TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, NewMI = BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::getLDSNoRetOp(MI.getOpcode()))); for (unsigned i = 1, e = MI.getNumOperands(); i < e; ++i) { - NewMI.addOperand(MI.getOperand(i)); + NewMI.add(MI.getOperand(i)); } } else { return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB); @@ -339,34 +339,34 @@ R600TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, case AMDGPU::RAT_WRITE_CACHELESS_64_eg: case AMDGPU::RAT_WRITE_CACHELESS_128_eg: BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(MI.getOpcode())) - .addOperand(MI.getOperand(0)) - .addOperand(MI.getOperand(1)) + .add(MI.getOperand(0)) + .add(MI.getOperand(1)) .addImm(isEOP(I)); // Set End of program bit break; case AMDGPU::RAT_STORE_TYPED_eg: BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(MI.getOpcode())) - .addOperand(MI.getOperand(0)) - .addOperand(MI.getOperand(1)) - .addOperand(MI.getOperand(2)) + .add(MI.getOperand(0)) + .add(MI.getOperand(1)) + .add(MI.getOperand(2)) .addImm(isEOP(I)); // Set End of program bit break; case AMDGPU::BRANCH: BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::JUMP)) - .addOperand(MI.getOperand(0)); + .add(MI.getOperand(0)); break; case AMDGPU::BRANCH_COND_f32: { MachineInstr *NewMI = BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::PRED_X), AMDGPU::PREDICATE_BIT) - .addOperand(MI.getOperand(1)) + .add(MI.getOperand(1)) .addImm(AMDGPU::PRED_SETNE) .addImm(0); // Flags TII->addFlag(*NewMI, 0, MO_FLAG_PUSH); BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::JUMP_COND)) - .addOperand(MI.getOperand(0)) + .add(MI.getOperand(0)) .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill); break; } @@ -375,12 +375,12 @@ R600TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, MachineInstr *NewMI = BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::PRED_X), AMDGPU::PREDICATE_BIT) - .addOperand(MI.getOperand(1)) + .add(MI.getOperand(1)) .addImm(AMDGPU::PRED_SETNE_INT) .addImm(0); // Flags TII->addFlag(*NewMI, 0, MO_FLAG_PUSH); BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::JUMP_COND)) - .addOperand(MI.getOperand(0)) + .add(MI.getOperand(0)) .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill); break; } @@ -408,13 +408,13 @@ R600TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, return BB; unsigned CfInst = (MI.getOpcode() == AMDGPU::EG_ExportSwz) ? 84 : 40; BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(MI.getOpcode())) - .addOperand(MI.getOperand(0)) - .addOperand(MI.getOperand(1)) - .addOperand(MI.getOperand(2)) - .addOperand(MI.getOperand(3)) - .addOperand(MI.getOperand(4)) - .addOperand(MI.getOperand(5)) - .addOperand(MI.getOperand(6)) + .add(MI.getOperand(0)) + .add(MI.getOperand(1)) + .add(MI.getOperand(2)) + .add(MI.getOperand(3)) + .add(MI.getOperand(4)) + .add(MI.getOperand(5)) + .add(MI.getOperand(6)) .addImm(CfInst) .addImm(EOP); break; diff --git a/lib/Target/AMDGPU/SIFixSGPRCopies.cpp b/lib/Target/AMDGPU/SIFixSGPRCopies.cpp index 6a422e70fe1..43cb15f502c 100644 --- a/lib/Target/AMDGPU/SIFixSGPRCopies.cpp +++ b/lib/Target/AMDGPU/SIFixSGPRCopies.cpp @@ -234,8 +234,9 @@ static bool foldVGPRCopyIntoRegSequence(MachineInstr &MI, unsigned TmpReg = MRI.createVirtualRegister(NewSrcRC); - BuildMI(*MI.getParent(), &MI, MI.getDebugLoc(), TII->get(AMDGPU::COPY), TmpReg) - .addOperand(MI.getOperand(I)); + BuildMI(*MI.getParent(), &MI, MI.getDebugLoc(), TII->get(AMDGPU::COPY), + TmpReg) + .add(MI.getOperand(I)); MI.getOperand(I).setReg(TmpReg); } diff --git a/lib/Target/AMDGPU/SIISelLowering.cpp b/lib/Target/AMDGPU/SIISelLowering.cpp index 9140fe6cd14..4c0c04cb253 100644 --- a/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/lib/Target/AMDGPU/SIISelLowering.cpp @@ -1464,16 +1464,16 @@ static bool setM0ToIndexFromSGPR(const SIInstrInfo *TII, VGPRIndexMode::SRC0_ENABLE : VGPRIndexMode::DST_ENABLE; if (Offset == 0) { MachineInstr *SetOn = - BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON)) - .addOperand(*Idx) - .addImm(IdxMode); + BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON)) + .add(*Idx) + .addImm(IdxMode); SetOn->getOperand(3).setIsUndef(); } else { unsigned Tmp = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), Tmp) - .addOperand(*Idx) - .addImm(Offset); + .add(*Idx) + .addImm(Offset); MachineInstr *SetOn = BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_ON)) .addReg(Tmp, RegState::Kill) @@ -1486,12 +1486,11 @@ static bool setM0ToIndexFromSGPR(const SIInstrInfo *TII, } if (Offset == 0) { - BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) - .addOperand(*Idx); + BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0).add(*Idx); } else { BuildMI(*MBB, I, DL, TII->get(AMDGPU::S_ADD_I32), AMDGPU::M0) - .addOperand(*Idx) - .addImm(Offset); + .add(*Idx) + .addImm(Offset); } return true; @@ -1628,9 +1627,9 @@ static MachineBasicBlock *emitIndirectDst(MachineInstr &MI, assert(Offset == 0); BuildMI(MBB, I, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dst) - .addOperand(*SrcVec) - .addOperand(*Val) - .addImm(SubReg); + .add(*SrcVec) + .add(*Val) + .addImm(SubReg); MI.eraseFromParent(); return &MBB; @@ -1642,11 +1641,11 @@ static MachineBasicBlock *emitIndirectDst(MachineInstr &MI, if (UseGPRIdxMode) { BuildMI(MBB, I, DL, TII->get(AMDGPU::V_MOV_B32_indirect)) - .addReg(SrcVec->getReg(), RegState::Undef, SubReg) // vdst - .addOperand(*Val) - .addReg(Dst, RegState::ImplicitDefine) - .addReg(SrcVec->getReg(), RegState::Implicit) - .addReg(AMDGPU::M0, RegState::Implicit); + .addReg(SrcVec->getReg(), RegState::Undef, SubReg) // vdst + .add(*Val) + .addReg(Dst, RegState::ImplicitDefine) + .addReg(SrcVec->getReg(), RegState::Implicit) + .addReg(AMDGPU::M0, RegState::Implicit); BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF)); } else { @@ -1655,7 +1654,7 @@ static MachineBasicBlock *emitIndirectDst(MachineInstr &MI, BuildMI(MBB, I, DL, MovRelDesc) .addReg(Dst, RegState::Define) .addReg(SrcVec->getReg()) - .addOperand(*Val) + .add(*Val) .addImm(SubReg - AMDGPU::sub0); } @@ -1688,18 +1687,18 @@ static MachineBasicBlock *emitIndirectDst(MachineInstr &MI, if (UseGPRIdxMode) { BuildMI(*LoopBB, InsPt, DL, TII->get(AMDGPU::V_MOV_B32_indirect)) - .addReg(PhiReg, RegState::Undef, SubReg) // vdst - .addOperand(*Val) // src0 - .addReg(Dst, RegState::ImplicitDefine) - .addReg(PhiReg, RegState::Implicit) - .addReg(AMDGPU::M0, RegState::Implicit); + .addReg(PhiReg, RegState::Undef, SubReg) // vdst + .add(*Val) // src0 + .addReg(Dst, RegState::ImplicitDefine) + .addReg(PhiReg, RegState::Implicit) + .addReg(AMDGPU::M0, RegState::Implicit); } else { const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(VecRC)); BuildMI(*LoopBB, InsPt, DL, MovRelDesc) .addReg(Dst, RegState::Define) .addReg(PhiReg) - .addOperand(*Val) + .add(*Val) .addImm(SubReg - AMDGPU::sub0); } @@ -1738,15 +1737,15 @@ MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter( case AMDGPU::SI_INIT_M0: { BuildMI(*BB, MI.getIterator(), MI.getDebugLoc(), TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0) - .addOperand(MI.getOperand(0)); + .add(MI.getOperand(0)); MI.eraseFromParent(); return BB; } case AMDGPU::GET_GROUPSTATICSIZE: { DebugLoc DL = MI.getDebugLoc(); BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_MOV_B32)) - .addOperand(MI.getOperand(0)) - .addImm(MFI->getLDSSize()); + .add(MI.getOperand(0)) + .addImm(MFI->getLDSSize()); MI.eraseFromParent(); return BB; } @@ -1797,7 +1796,7 @@ MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter( const SIInstrInfo *TII = getSubtarget()->getInstrInfo(); const DebugLoc &DL = MI.getDebugLoc(); MachineInstr *Br = BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_CBRANCH_SCC1)) - .addOperand(MI.getOperand(0)); + .add(MI.getOperand(0)); Br->getOperand(1).setIsUndef(true); // read undef SCC MI.eraseFromParent(); return BB; diff --git a/lib/Target/AMDGPU/SIInsertSkips.cpp b/lib/Target/AMDGPU/SIInsertSkips.cpp index 91e4bf755c5..fe1464726af 100644 --- a/lib/Target/AMDGPU/SIInsertSkips.cpp +++ b/lib/Target/AMDGPU/SIInsertSkips.cpp @@ -195,8 +195,8 @@ void SIInsertSkips::kill(MachineInstr &MI) { } } else { BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_CMPX_LE_F32_e32)) - .addImm(0) - .addOperand(Op); + .addImm(0) + .add(Op); } } diff --git a/lib/Target/AMDGPU/SIInstrInfo.cpp b/lib/Target/AMDGPU/SIInstrInfo.cpp index 26a8d22062a..e9dc6f57ab3 100644 --- a/lib/Target/AMDGPU/SIInstrInfo.cpp +++ b/lib/Target/AMDGPU/SIInstrInfo.cpp @@ -870,9 +870,10 @@ bool SIInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { MachineInstr *MovRel = BuildMI(MBB, MI, DL, MovRelDesc) .addReg(RI.getSubReg(VecReg, SubReg), RegState::Undef) - .addOperand(MI.getOperand(2)) + .add(MI.getOperand(2)) .addReg(VecReg, RegState::ImplicitDefine) - .addReg(VecReg, RegState::Implicit | (IsUndef ? RegState::Undef : 0)); + .addReg(VecReg, + RegState::Implicit | (IsUndef ? RegState::Undef : 0)); const int ImpDefIdx = MovRelDesc.getNumOperands() + MovRelDesc.getNumImplicitUses(); @@ -897,14 +898,14 @@ bool SIInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { // constant data. Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_ADD_U32), RegLo) .addReg(RegLo) - .addOperand(MI.getOperand(1))); + .add(MI.getOperand(1))); MachineInstrBuilder MIB = BuildMI(MF, DL, get(AMDGPU::S_ADDC_U32), RegHi) .addReg(RegHi); if (MI.getOperand(2).getTargetFlags() == SIInstrInfo::MO_NONE) MIB.addImm(0); else - MIB.addOperand(MI.getOperand(2)); + MIB.add(MI.getOperand(2)); Bundler.append(MIB); llvm::finalizeBundle(MBB, Bundler.begin()); @@ -1638,13 +1639,13 @@ MachineInstr *SIInstrInfo::convertToThreeAddress(MachineFunction::iterator &MBB, return BuildMI(*MBB, MI, MI.getDebugLoc(), get(IsF16 ? AMDGPU::V_MAD_F16 : AMDGPU::V_MAD_F32)) - .addOperand(*Dst) + .add(*Dst) .addImm(0) // Src0 mods - .addOperand(*Src0) + .add(*Src0) .addImm(0) // Src1 mods - .addOperand(*Src1) + .add(*Src1) .addImm(0) // Src mods - .addOperand(*Src2) + .add(*Src2) .addImm(0) // clamp .addImm(0); // omod } @@ -2238,7 +2239,7 @@ void SIInstrInfo::legalizeOpWithMove(MachineInstr &MI, unsigned OpIdx) const { unsigned Reg = MRI.createVirtualRegister(VRC); DebugLoc DL = MBB->findDebugLoc(I); - BuildMI(*MI.getParent(), I, DL, get(Opcode), Reg).addOperand(MO); + BuildMI(*MI.getParent(), I, DL, get(Opcode), Reg).add(MO); MO.ChangeToRegister(Reg, false); } @@ -2564,8 +2565,8 @@ void SIInstrInfo::legalizeGenericOperand(MachineBasicBlock &InsertMBB, return; unsigned DstReg = MRI.createVirtualRegister(DstRC); - MachineInstr *Copy = BuildMI(InsertMBB, I, DL, get(AMDGPU::COPY), DstReg) - .addOperand(Op); + MachineInstr *Copy = + BuildMI(InsertMBB, I, DL, get(AMDGPU::COPY), DstReg).add(Op); Op.setReg(DstReg); Op.setSubReg(0); @@ -2810,13 +2811,13 @@ void SIInstrInfo::legalizeOperands(MachineInstr &MI) const { // Regular buffer load / store. MachineInstrBuilder MIB = BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode)) - .addOperand(*VData) + .add(*VData) .addReg(AMDGPU::NoRegister) // Dummy value for vaddr. // This will be replaced later // with the new value of vaddr. - .addOperand(*SRsrc) - .addOperand(*SOffset) - .addOperand(*Offset); + .add(*SRsrc) + .add(*SOffset) + .add(*Offset); // Atomics do not have this operand. if (const MachineOperand *GLC = @@ -2836,14 +2837,14 @@ void SIInstrInfo::legalizeOperands(MachineInstr &MI) const { } else { // Atomics with return. Addr64 = BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode)) - .addOperand(*VData) - .addOperand(*VDataIn) + .add(*VData) + .add(*VDataIn) .addReg(AMDGPU::NoRegister) // Dummy value for vaddr. // This will be replaced later // with the new value of vaddr. - .addOperand(*SRsrc) - .addOperand(*SOffset) - .addOperand(*Offset) + .add(*SRsrc) + .add(*SOffset) + .add(*Offset) .addImm(getNamedImmOperand(MI, AMDGPU::OpName::slc)) .setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); } @@ -3112,15 +3113,13 @@ void SIInstrInfo::splitScalar64BitUnaryOp( const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0); unsigned DestSub0 = MRI.createVirtualRegister(NewDestSubRC); - BuildMI(MBB, MII, DL, InstDesc, DestSub0) - .addOperand(SrcReg0Sub0); + BuildMI(MBB, MII, DL, InstDesc, DestSub0).add(SrcReg0Sub0); MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, AMDGPU::sub1, Src0SubRC); unsigned DestSub1 = MRI.createVirtualRegister(NewDestSubRC); - BuildMI(MBB, MII, DL, InstDesc, DestSub1) - .addOperand(SrcReg0Sub1); + BuildMI(MBB, MII, DL, InstDesc, DestSub1).add(SrcReg0Sub1); unsigned FullDestReg = MRI.createVirtualRegister(NewDestRC); BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) @@ -3174,8 +3173,8 @@ void SIInstrInfo::splitScalar64BitBinaryOp( unsigned DestSub0 = MRI.createVirtualRegister(NewDestSubRC); MachineInstr &LoHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub0) - .addOperand(SrcReg0Sub0) - .addOperand(SrcReg1Sub0); + .add(SrcReg0Sub0) + .add(SrcReg1Sub0); MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, AMDGPU::sub1, Src0SubRC); @@ -3184,8 +3183,8 @@ void SIInstrInfo::splitScalar64BitBinaryOp( unsigned DestSub1 = MRI.createVirtualRegister(NewDestSubRC); MachineInstr &HiHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub1) - .addOperand(SrcReg0Sub1) - .addOperand(SrcReg1Sub1); + .add(SrcReg0Sub1) + .add(SrcReg1Sub1); unsigned FullDestReg = MRI.createVirtualRegister(NewDestRC); BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) @@ -3231,13 +3230,9 @@ void SIInstrInfo::splitScalar64BitBCNT( MachineOperand SrcRegSub1 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC, AMDGPU::sub1, SrcSubRC); - BuildMI(MBB, MII, DL, InstDesc, MidReg) - .addOperand(SrcRegSub0) - .addImm(0); + BuildMI(MBB, MII, DL, InstDesc, MidReg).add(SrcRegSub0).addImm(0); - BuildMI(MBB, MII, DL, InstDesc, ResultReg) - .addOperand(SrcRegSub1) - .addReg(MidReg); + BuildMI(MBB, MII, DL, InstDesc, ResultReg).add(SrcRegSub1).addReg(MidReg); MRI.replaceRegWith(Dest.getReg(), ResultReg); diff --git a/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp b/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp index 99fe96c0be2..ae5aefc2676 100644 --- a/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp +++ b/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp @@ -360,25 +360,24 @@ MachineBasicBlock::iterator SILoadStoreOptimizer::mergeRead2Pair( unsigned DestReg = MRI->createVirtualRegister(SuperRC); DebugLoc DL = I->getDebugLoc(); - MachineInstrBuilder Read2 - = BuildMI(*MBB, Paired, DL, Read2Desc, DestReg) - .addOperand(*AddrReg) // addr - .addImm(NewOffset0) // offset0 - .addImm(NewOffset1) // offset1 - .addImm(0) // gds - .addMemOperand(*I->memoperands_begin()) - .addMemOperand(*Paired->memoperands_begin()); + MachineInstrBuilder Read2 = BuildMI(*MBB, Paired, DL, Read2Desc, DestReg) + .add(*AddrReg) // addr + .addImm(NewOffset0) // offset0 + .addImm(NewOffset1) // offset1 + .addImm(0) // gds + .addMemOperand(*I->memoperands_begin()) + .addMemOperand(*Paired->memoperands_begin()); (void)Read2; const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY); // Copy to the old destination registers. BuildMI(*MBB, Paired, DL, CopyDesc) - .addOperand(*Dest0) // Copy to same destination including flags and sub reg. - .addReg(DestReg, 0, SubRegIdx0); + .add(*Dest0) // Copy to same destination including flags and sub reg. + .addReg(DestReg, 0, SubRegIdx0); MachineInstr *Copy1 = BuildMI(*MBB, Paired, DL, CopyDesc) - .addOperand(*Dest1) - .addReg(DestReg, RegState::Kill, SubRegIdx1); + .add(*Dest1) + .addReg(DestReg, RegState::Kill, SubRegIdx1); moveInstsAfter(Copy1, InstsToMove); @@ -436,16 +435,15 @@ MachineBasicBlock::iterator SILoadStoreOptimizer::mergeWrite2Pair( const MCInstrDesc &Write2Desc = TII->get(Opc); DebugLoc DL = I->getDebugLoc(); - MachineInstrBuilder Write2 - = BuildMI(*MBB, Paired, DL, Write2Desc) - .addOperand(*Addr) // addr - .addOperand(*Data0) // data0 - .addOperand(*Data1) // data1 - .addImm(NewOffset0) // offset0 - .addImm(NewOffset1) // offset1 - .addImm(0) // gds - .addMemOperand(*I->memoperands_begin()) - .addMemOperand(*Paired->memoperands_begin()); + MachineInstrBuilder Write2 = BuildMI(*MBB, Paired, DL, Write2Desc) + .add(*Addr) // addr + .add(*Data0) // data0 + .add(*Data1) // data1 + .addImm(NewOffset0) // offset0 + .addImm(NewOffset1) // offset1 + .addImm(0) // gds + .addMemOperand(*I->memoperands_begin()) + .addMemOperand(*Paired->memoperands_begin()); moveInstsAfter(Write2, InstsToMove); diff --git a/lib/Target/AMDGPU/SILowerControlFlow.cpp b/lib/Target/AMDGPU/SILowerControlFlow.cpp index 7ed18f27e59..4a7dff44dfc 100644 --- a/lib/Target/AMDGPU/SILowerControlFlow.cpp +++ b/lib/Target/AMDGPU/SILowerControlFlow.cpp @@ -175,9 +175,8 @@ void SILowerControlFlow::emitIf(MachineInstr &MI) { // Insert a pseudo terminator to help keep the verifier happy. This will also // be used later when inserting skips. - MachineInstr *NewBr = - BuildMI(MBB, I, DL, TII->get(AMDGPU::SI_MASK_BRANCH)) - .addOperand(MI.getOperand(2)); + MachineInstr *NewBr = BuildMI(MBB, I, DL, TII->get(AMDGPU::SI_MASK_BRANCH)) + .add(MI.getOperand(2)); if (!LIS) { MI.eraseFromParent(); @@ -221,7 +220,7 @@ void SILowerControlFlow::emitElse(MachineInstr &MI) { // the src like it does. unsigned CopyReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass); BuildMI(MBB, Start, DL, TII->get(AMDGPU::COPY), CopyReg) - .addOperand(MI.getOperand(1)); // Saved EXEC + .add(MI.getOperand(1)); // Saved EXEC // This must be inserted before phis and any spill code inserted before the // else. @@ -283,10 +282,9 @@ void SILowerControlFlow::emitBreak(MachineInstr &MI) { const DebugLoc &DL = MI.getDebugLoc(); unsigned Dst = MI.getOperand(0).getReg(); - MachineInstr *Or = - BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst) - .addReg(AMDGPU::EXEC) - .addOperand(MI.getOperand(1)); + MachineInstr *Or = BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst) + .addReg(AMDGPU::EXEC) + .add(MI.getOperand(1)); if (LIS) LIS->ReplaceMachineInstrInMaps(MI, *Or); @@ -306,13 +304,13 @@ void SILowerControlFlow::emitLoop(MachineInstr &MI) { const DebugLoc &DL = MI.getDebugLoc(); MachineInstr *AndN2 = - BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_ANDN2_B64_term), AMDGPU::EXEC) - .addReg(AMDGPU::EXEC) - .addOperand(MI.getOperand(0)); + BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_ANDN2_B64_term), AMDGPU::EXEC) + .addReg(AMDGPU::EXEC) + .add(MI.getOperand(0)); MachineInstr *Branch = - BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ)) - .addOperand(MI.getOperand(1)); + BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ)) + .add(MI.getOperand(1)); if (LIS) { LIS->ReplaceMachineInstrInMaps(MI, *AndN2); @@ -328,9 +326,9 @@ void SILowerControlFlow::emitEndCf(MachineInstr &MI) { MachineBasicBlock::iterator InsPt = MBB.begin(); MachineInstr *NewMI = - BuildMI(MBB, InsPt, DL, TII->get(AMDGPU::S_OR_B64), AMDGPU::EXEC) - .addReg(AMDGPU::EXEC) - .addOperand(MI.getOperand(0)); + BuildMI(MBB, InsPt, DL, TII->get(AMDGPU::S_OR_B64), AMDGPU::EXEC) + .addReg(AMDGPU::EXEC) + .add(MI.getOperand(0)); if (LIS) LIS->ReplaceMachineInstrInMaps(MI, *NewMI); diff --git a/lib/Target/AMDGPU/SILowerI1Copies.cpp b/lib/Target/AMDGPU/SILowerI1Copies.cpp index be2e14fd462..3680e02da57 100644 --- a/lib/Target/AMDGPU/SILowerI1Copies.cpp +++ b/lib/Target/AMDGPU/SILowerI1Copies.cpp @@ -114,18 +114,18 @@ bool SILowerI1Copies::runOnMachineFunction(MachineFunction &MF) { assert(Val == 0 || Val == -1); BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_MOV_B32_e32)) - .addOperand(Dst) - .addImm(Val); + .add(Dst) + .addImm(Val); MI.eraseFromParent(); continue; } } BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64)) - .addOperand(Dst) - .addImm(0) - .addImm(-1) - .addOperand(Src); + .add(Dst) + .addImm(0) + .addImm(-1) + .add(Src); MI.eraseFromParent(); } else if (TRI->getCommonSubClass(DstRC, &AMDGPU::SGPR_64RegClass) && SrcRC == &AMDGPU::VReg_1RegClass) { @@ -140,14 +140,14 @@ bool SILowerI1Copies::runOnMachineFunction(MachineFunction &MF) { MRI.getRegClass(DefInst->getOperand(3).getReg()), &AMDGPU::SGPR_64RegClass)) { BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_AND_B64)) - .addOperand(Dst) - .addReg(AMDGPU::EXEC) - .addOperand(DefInst->getOperand(3)); + .add(Dst) + .addReg(AMDGPU::EXEC) + .add(DefInst->getOperand(3)); } else { BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_CMP_NE_U32_e64)) - .addOperand(Dst) - .addOperand(Src) - .addImm(0); + .add(Dst) + .add(Src) + .addImm(0); } MI.eraseFromParent(); } diff --git a/lib/Target/AMDGPU/SIRegisterInfo.cpp b/lib/Target/AMDGPU/SIRegisterInfo.cpp index 8c4b24a4504..612599b1283 100644 --- a/lib/Target/AMDGPU/SIRegisterInfo.cpp +++ b/lib/Target/AMDGPU/SIRegisterInfo.cpp @@ -415,14 +415,14 @@ static bool buildMUBUFOffsetLoadStore(const SIInstrInfo *TII, unsigned Reg = TII->getNamedOperand(*MI, AMDGPU::OpName::vdata)->getReg(); BuildMI(*MBB, MI, DL, TII->get(LoadStoreOp)) - .addReg(Reg, getDefRegState(!IsStore)) - .addOperand(*TII->getNamedOperand(*MI, AMDGPU::OpName::srsrc)) - .addOperand(*TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)) - .addImm(Offset) - .addImm(0) // glc - .addImm(0) // slc - .addImm(0) // tfe - .setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); + .addReg(Reg, getDefRegState(!IsStore)) + .add(*TII->getNamedOperand(*MI, AMDGPU::OpName::srsrc)) + .add(*TII->getNamedOperand(*MI, AMDGPU::OpName::soffset)) + .addImm(Offset) + .addImm(0) // glc + .addImm(0) // slc + .addImm(0) // tfe + .setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); return true; } diff --git a/lib/Target/AMDGPU/SIShrinkInstructions.cpp b/lib/Target/AMDGPU/SIShrinkInstructions.cpp index dd31dc69084..c5f121757e6 100644 --- a/lib/Target/AMDGPU/SIShrinkInstructions.cpp +++ b/lib/Target/AMDGPU/SIShrinkInstructions.cpp @@ -497,24 +497,24 @@ bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) { int Op32DstIdx = AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::vdst); if (Op32DstIdx != -1) { // dst - Inst32.addOperand(MI.getOperand(0)); + Inst32.add(MI.getOperand(0)); } else { assert(MI.getOperand(0).getReg() == AMDGPU::VCC && "Unexpected case"); } - Inst32.addOperand(*TII->getNamedOperand(MI, AMDGPU::OpName::src0)); + Inst32.add(*TII->getNamedOperand(MI, AMDGPU::OpName::src0)); const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1); if (Src1) - Inst32.addOperand(*Src1); + Inst32.add(*Src1); if (Src2) { int Op32Src2Idx = AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::src2); if (Op32Src2Idx != -1) { - Inst32.addOperand(*Src2); + Inst32.add(*Src2); } else { // In the case of V_CNDMASK_B32_e32, the explicit operand src2 is // replaced with an implicit read of vcc. This was already added diff --git a/lib/Target/ARM/ARMBaseInstrInfo.cpp b/lib/Target/ARM/ARMBaseInstrInfo.cpp index c8a4132ba1c..ee52b938f72 100644 --- a/lib/Target/ARM/ARMBaseInstrInfo.cpp +++ b/lib/Target/ARM/ARMBaseInstrInfo.cpp @@ -437,14 +437,18 @@ unsigned ARMBaseInstrInfo::insertBranch(MachineBasicBlock &MBB, else BuildMI(&MBB, DL, get(BOpc)).addMBB(TBB); } else - BuildMI(&MBB, DL, get(BccOpc)).addMBB(TBB) - .addImm(Cond[0].getImm()).addOperand(Cond[1]); + BuildMI(&MBB, DL, get(BccOpc)) + .addMBB(TBB) + .addImm(Cond[0].getImm()) + .add(Cond[1]); return 1; } // Two-way conditional branch. - BuildMI(&MBB, DL, get(BccOpc)).addMBB(TBB) - .addImm(Cond[0].getImm()).addOperand(Cond[1]); + BuildMI(&MBB, DL, get(BccOpc)) + .addMBB(TBB) + .addImm(Cond[0].getImm()) + .add(Cond[1]); if (isThumb) BuildMI(&MBB, DL, get(BOpc)).addMBB(FBB).addImm(ARMCC::AL).addReg(0); else @@ -1279,7 +1283,7 @@ void ARMBaseInstrInfo::expandMEMCPY(MachineBasicBlock::iterator MI) const { LDM = BuildMI(*BB, MI, dl, TII->get(isThumb2 ? ARM::t2LDMIA_UPD : isThumb1 ? ARM::tLDMIA_UPD : ARM::LDMIA_UPD)) - .addOperand(MI->getOperand(1)); + .add(MI->getOperand(1)); } else { LDM = BuildMI(*BB, MI, dl, TII->get(isThumb2 ? ARM::t2LDMIA : ARM::LDMIA)); } @@ -1288,13 +1292,13 @@ void ARMBaseInstrInfo::expandMEMCPY(MachineBasicBlock::iterator MI) const { STM = BuildMI(*BB, MI, dl, TII->get(isThumb2 ? ARM::t2STMIA_UPD : isThumb1 ? ARM::tSTMIA_UPD : ARM::STMIA_UPD)) - .addOperand(MI->getOperand(0)); + .add(MI->getOperand(0)); } else { STM = BuildMI(*BB, MI, dl, TII->get(isThumb2 ? ARM::t2STMIA : ARM::STMIA)); } - LDM.addOperand(MI->getOperand(3)).add(predOps(ARMCC::AL)); - STM.addOperand(MI->getOperand(2)).add(predOps(ARMCC::AL)); + LDM.add(MI->getOperand(3)).add(predOps(ARMCC::AL)); + STM.add(MI->getOperand(2)).add(predOps(ARMCC::AL)); // Sort the scratch registers into ascending order. const TargetRegisterInfo &TRI = getRegisterInfo(); @@ -1951,14 +1955,14 @@ ARMBaseInstrInfo::optimizeSelect(MachineInstr &MI, const MCInstrDesc &DefDesc = DefMI->getDesc(); for (unsigned i = 1, e = DefDesc.getNumOperands(); i != e && !DefDesc.OpInfo[i].isPredicate(); ++i) - NewMI.addOperand(DefMI->getOperand(i)); + NewMI.add(DefMI->getOperand(i)); unsigned CondCode = MI.getOperand(3).getImm(); if (Invert) NewMI.addImm(ARMCC::getOppositeCondition(ARMCC::CondCodes(CondCode))); else NewMI.addImm(CondCode); - NewMI.addOperand(MI.getOperand(4)); + NewMI.add(MI.getOperand(4)); // DefMI is not the -S version that sets CPSR, so add an optional %noreg. if (NewMI->hasOptionalDef()) @@ -1969,7 +1973,7 @@ ARMBaseInstrInfo::optimizeSelect(MachineInstr &MI, // The tie makes the register allocator ensure the FalseReg is allocated the // same register as operand 0. FalseReg.setImplicit(); - NewMI.addOperand(FalseReg); + NewMI.add(FalseReg); NewMI->tieOperands(0, NewMI->getNumOperands() - 1); // Update SeenMIs set: register newly created MI and erase removed DefMI. @@ -2185,7 +2189,7 @@ bool llvm::tryFoldSPUpdateIntoPushPop(const ARMSubtarget &Subtarget, // Add the complete list back in. MachineInstrBuilder MIB(MF, &*MI); for (int i = RegList.size() - 1; i >= 0; --i) - MIB.addOperand(RegList[i]); + MIB.add(RegList[i]); return true; } diff --git a/lib/Target/ARM/ARMConstantIslandPass.cpp b/lib/Target/ARM/ARMConstantIslandPass.cpp index be1a37e3e36..ee468087241 100644 --- a/lib/Target/ARM/ARMConstantIslandPass.cpp +++ b/lib/Target/ARM/ARMConstantIslandPass.cpp @@ -1477,7 +1477,9 @@ bool ARMConstantIslands::handleConstantPoolUser(unsigned CPUserIndex, // add it to the island. U.HighWaterMark = NewIsland; U.CPEMI = BuildMI(NewIsland, DebugLoc(), CPEMI->getDesc()) - .addImm(ID).addOperand(CPEMI->getOperand(1)).addImm(Size); + .addImm(ID) + .add(CPEMI->getOperand(1)) + .addImm(Size); CPEntries[CPI].push_back(CPEntry(U.CPEMI, ID, 1)); ++NumCPEs; @@ -1709,8 +1711,8 @@ bool ARMConstantIslands::undoLRSpillRestore() { MI->getNumExplicitOperands() == 3) { // Create the new insn and copy the predicate from the old. BuildMI(MI->getParent(), MI->getDebugLoc(), TII->get(ARM::tBX_RET)) - .addOperand(MI->getOperand(0)) - .addOperand(MI->getOperand(1)); + .add(MI->getOperand(0)) + .add(MI->getOperand(1)); MI->eraseFromParent(); MadeChange = true; } diff --git a/lib/Target/ARM/ARMExpandPseudoInsts.cpp b/lib/Target/ARM/ARMExpandPseudoInsts.cpp index 5043d9cf1e4..cc8fa188e82 100644 --- a/lib/Target/ARM/ARMExpandPseudoInsts.cpp +++ b/lib/Target/ARM/ARMExpandPseudoInsts.cpp @@ -97,9 +97,9 @@ void ARMExpandPseudo::TransferImpOps(MachineInstr &OldMI, const MachineOperand &MO = OldMI.getOperand(i); assert(MO.isReg() && MO.getReg()); if (MO.isUse()) - UseMI.addOperand(MO); + UseMI.add(MO); else - DefMI.addOperand(MO); + DefMI.add(MO); } } @@ -415,14 +415,14 @@ void ARMExpandPseudo::ExpandVLD(MachineBasicBlock::iterator &MBBI) { MIB.addReg(D3, RegState::Define | getDeadRegState(DstIsDead)); if (TableEntry->isUpdating) - MIB.addOperand(MI.getOperand(OpIdx++)); + MIB.add(MI.getOperand(OpIdx++)); // Copy the addrmode6 operands. - MIB.addOperand(MI.getOperand(OpIdx++)); - MIB.addOperand(MI.getOperand(OpIdx++)); + MIB.add(MI.getOperand(OpIdx++)); + MIB.add(MI.getOperand(OpIdx++)); // Copy the am6offset operand. if (TableEntry->hasWritebackOperand) - MIB.addOperand(MI.getOperand(OpIdx++)); + MIB.add(MI.getOperand(OpIdx++)); // For an instruction writing double-spaced subregs, the pseudo instruction // has an extra operand that is a use of the super-register. Record the @@ -432,15 +432,15 @@ void ARMExpandPseudo::ExpandVLD(MachineBasicBlock::iterator &MBBI) { SrcOpIdx = OpIdx++; // Copy the predicate operands. - MIB.addOperand(MI.getOperand(OpIdx++)); - MIB.addOperand(MI.getOperand(OpIdx++)); + MIB.add(MI.getOperand(OpIdx++)); + MIB.add(MI.getOperand(OpIdx++)); // Copy the super-register source operand used for double-spaced subregs over // to the new instruction as an implicit operand. if (SrcOpIdx != 0) { MachineOperand MO = MI.getOperand(SrcOpIdx); MO.setImplicit(true); - MIB.addOperand(MO); + MIB.add(MO); } // Add an implicit def for the super-register. MIB.addReg(DstReg, RegState::ImplicitDefine | getDeadRegState(DstIsDead)); @@ -467,14 +467,14 @@ void ARMExpandPseudo::ExpandVST(MachineBasicBlock::iterator &MBBI) { TII->get(TableEntry->RealOpc)); unsigned OpIdx = 0; if (TableEntry->isUpdating) - MIB.addOperand(MI.getOperand(OpIdx++)); + MIB.add(MI.getOperand(OpIdx++)); // Copy the addrmode6 operands. - MIB.addOperand(MI.getOperand(OpIdx++)); - MIB.addOperand(MI.getOperand(OpIdx++)); + MIB.add(MI.getOperand(OpIdx++)); + MIB.add(MI.getOperand(OpIdx++)); // Copy the am6offset operand. if (TableEntry->hasWritebackOperand) - MIB.addOperand(MI.getOperand(OpIdx++)); + MIB.add(MI.getOperand(OpIdx++)); bool SrcIsKill = MI.getOperand(OpIdx).isKill(); bool SrcIsUndef = MI.getOperand(OpIdx).isUndef(); @@ -490,8 +490,8 @@ void ARMExpandPseudo::ExpandVST(MachineBasicBlock::iterator &MBBI) { MIB.addReg(D3, getUndefRegState(SrcIsUndef)); // Copy the predicate operands. - MIB.addOperand(MI.getOperand(OpIdx++)); - MIB.addOperand(MI.getOperand(OpIdx++)); + MIB.add(MI.getOperand(OpIdx++)); + MIB.add(MI.getOperand(OpIdx++)); if (SrcIsKill && !SrcIsUndef) // Add an implicit kill for the super-reg. MIB->addRegisterKilled(SrcReg, TRI, true); @@ -549,14 +549,14 @@ void ARMExpandPseudo::ExpandLaneOp(MachineBasicBlock::iterator &MBBI) { } if (TableEntry->isUpdating) - MIB.addOperand(MI.getOperand(OpIdx++)); + MIB.add(MI.getOperand(OpIdx++)); // Copy the addrmode6 operands. - MIB.addOperand(MI.getOperand(OpIdx++)); - MIB.addOperand(MI.getOperand(OpIdx++)); + MIB.add(MI.getOperand(OpIdx++)); + MIB.add(MI.getOperand(OpIdx++)); // Copy the am6offset operand. if (TableEntry->hasWritebackOperand) - MIB.addOperand(MI.getOperand(OpIdx++)); + MIB.add(MI.getOperand(OpIdx++)); // Grab the super-register source. MachineOperand MO = MI.getOperand(OpIdx++); @@ -579,12 +579,12 @@ void ARMExpandPseudo::ExpandLaneOp(MachineBasicBlock::iterator &MBBI) { OpIdx += 1; // Copy the predicate operands. - MIB.addOperand(MI.getOperand(OpIdx++)); - MIB.addOperand(MI.getOperand(OpIdx++)); + MIB.add(MI.getOperand(OpIdx++)); + MIB.add(MI.getOperand(OpIdx++)); // Copy the super-register source to be an implicit source. MO.setImplicit(true); - MIB.addOperand(MO); + MIB.add(MO); if (TableEntry->IsLoad) // Add an implicit def for the super-register. MIB.addReg(DstReg, RegState::ImplicitDefine | getDeadRegState(DstIsDead)); @@ -605,9 +605,9 @@ void ARMExpandPseudo::ExpandVTBL(MachineBasicBlock::iterator &MBBI, unsigned OpIdx = 0; // Transfer the destination register operand. - MIB.addOperand(MI.getOperand(OpIdx++)); + MIB.add(MI.getOperand(OpIdx++)); if (IsExt) - MIB.addOperand(MI.getOperand(OpIdx++)); + MIB.add(MI.getOperand(OpIdx++)); bool SrcIsKill = MI.getOperand(OpIdx).isKill(); unsigned SrcReg = MI.getOperand(OpIdx++).getReg(); @@ -616,11 +616,11 @@ void ARMExpandPseudo::ExpandVTBL(MachineBasicBlock::iterator &MBBI, MIB.addReg(D0); // Copy the other source register operand. - MIB.addOperand(MI.getOperand(OpIdx++)); + MIB.add(MI.getOperand(OpIdx++)); // Copy the predicate operands. - MIB.addOperand(MI.getOperand(OpIdx++)); - MIB.addOperand(MI.getOperand(OpIdx++)); + MIB.add(MI.getOperand(OpIdx++)); + MIB.add(MI.getOperand(OpIdx++)); // Add an implicit kill and use for the super-reg. MIB.addReg(SrcReg, RegState::Implicit | getKillRegState(SrcIsKill)); @@ -819,7 +819,7 @@ bool ARMExpandPseudo::ExpandCMP_SWAP(MachineBasicBlock &MBB, unsigned CMPrr = IsThumb ? ARM::tCMPhir : ARM::CMPrr; BuildMI(LoadCmpBB, DL, TII->get(CMPrr)) .addReg(Dest.getReg(), getKillRegState(Dest.isDead())) - .addOperand(Desired) + .add(Desired) .add(predOps(ARMCC::AL)); unsigned Bcc = IsThumb ? ARM::tBcc : ARM::Bcc; BuildMI(LoadCmpBB, DL, TII->get(Bcc)) @@ -839,8 +839,8 @@ bool ARMExpandPseudo::ExpandCMP_SWAP(MachineBasicBlock &MBB, MIB = BuildMI(StoreBB, DL, TII->get(StrexOp), StatusReg); - MIB.addOperand(New); - MIB.addOperand(Addr); + MIB.add(New); + MIB.add(Addr); if (StrexOp == ARM::t2STREX) MIB.addImm(0); // a 32-bit Thumb strex (only) allows an offset. MIB.add(predOps(ARMCC::AL)); @@ -961,7 +961,7 @@ bool ARMExpandPseudo::ExpandCMP_SWAP_64(MachineBasicBlock &MBB, unsigned STREXD = IsThumb ? ARM::t2STREXD : ARM::STREXD; MIB = BuildMI(StoreBB, DL, TII->get(STREXD), StatusReg); addExclusiveRegPair(MIB, New, 0, IsThumb, TRI); - MIB.addOperand(Addr).add(predOps(ARMCC::AL)); + MIB.add(Addr).add(predOps(ARMCC::AL)); unsigned CMPri = IsThumb ? ARM::t2CMPri : ARM::CMPri; BuildMI(StoreBB, DL, TII->get(CMPri)) @@ -1049,9 +1049,9 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB, unsigned newOpc = Opcode == ARM::VMOVScc ? ARM::VMOVS : ARM::VMOVD; BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(newOpc), MI.getOperand(1).getReg()) - .addOperand(MI.getOperand(2)) - .addImm(MI.getOperand(3).getImm()) // 'pred' - .addOperand(MI.getOperand(4)); + .add(MI.getOperand(2)) + .addImm(MI.getOperand(3).getImm()) // 'pred' + .add(MI.getOperand(4)); MI.eraseFromParent(); return true; @@ -1061,10 +1061,10 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB, unsigned Opc = AFI->isThumbFunction() ? ARM::t2MOVr : ARM::MOVr; BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(Opc), MI.getOperand(1).getReg()) - .addOperand(MI.getOperand(2)) - .addImm(MI.getOperand(3).getImm()) // 'pred' - .addOperand(MI.getOperand(4)) - .addReg(0); // 's' bit + .add(MI.getOperand(2)) + .addImm(MI.getOperand(3).getImm()) // 'pred' + .add(MI.getOperand(4)) + .addReg(0); // 's' bit MI.eraseFromParent(); return true; @@ -1072,11 +1072,11 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB, case ARM::MOVCCsi: { BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::MOVsi), (MI.getOperand(1).getReg())) - .addOperand(MI.getOperand(2)) - .addImm(MI.getOperand(3).getImm()) - .addImm(MI.getOperand(4).getImm()) // 'pred' - .addOperand(MI.getOperand(5)) - .addReg(0); // 's' bit + .add(MI.getOperand(2)) + .addImm(MI.getOperand(3).getImm()) + .addImm(MI.getOperand(4).getImm()) // 'pred' + .add(MI.getOperand(5)) + .addReg(0); // 's' bit MI.eraseFromParent(); return true; @@ -1084,12 +1084,12 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB, case ARM::MOVCCsr: { BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::MOVsr), (MI.getOperand(1).getReg())) - .addOperand(MI.getOperand(2)) - .addOperand(MI.getOperand(3)) - .addImm(MI.getOperand(4).getImm()) - .addImm(MI.getOperand(5).getImm()) // 'pred' - .addOperand(MI.getOperand(6)) - .addReg(0); // 's' bit + .add(MI.getOperand(2)) + .add(MI.getOperand(3)) + .addImm(MI.getOperand(4).getImm()) + .addImm(MI.getOperand(5).getImm()) // 'pred' + .add(MI.getOperand(6)) + .addReg(0); // 's' bit MI.eraseFromParent(); return true; @@ -1099,9 +1099,9 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB, unsigned NewOpc = AFI->isThumbFunction() ? ARM::t2MOVi16 : ARM::MOVi16; BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(NewOpc), MI.getOperand(1).getReg()) - .addImm(MI.getOperand(2).getImm()) - .addImm(MI.getOperand(3).getImm()) // 'pred' - .addOperand(MI.getOperand(4)); + .addImm(MI.getOperand(2).getImm()) + .addImm(MI.getOperand(3).getImm()) // 'pred' + .add(MI.getOperand(4)); MI.eraseFromParent(); return true; } @@ -1110,10 +1110,10 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB, unsigned Opc = AFI->isThumbFunction() ? ARM::t2MOVi : ARM::MOVi; BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(Opc), MI.getOperand(1).getReg()) - .addImm(MI.getOperand(2).getImm()) - .addImm(MI.getOperand(3).getImm()) // 'pred' - .addOperand(MI.getOperand(4)) - .addReg(0); // 's' bit + .addImm(MI.getOperand(2).getImm()) + .addImm(MI.getOperand(3).getImm()) // 'pred' + .add(MI.getOperand(4)) + .addReg(0); // 's' bit MI.eraseFromParent(); return true; @@ -1123,10 +1123,10 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB, unsigned Opc = AFI->isThumbFunction() ? ARM::t2MVNi : ARM::MVNi; BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(Opc), MI.getOperand(1).getReg()) - .addImm(MI.getOperand(2).getImm()) - .addImm(MI.getOperand(3).getImm()) // 'pred' - .addOperand(MI.getOperand(4)) - .addReg(0); // 's' bit + .addImm(MI.getOperand(2).getImm()) + .addImm(MI.getOperand(3).getImm()) // 'pred' + .add(MI.getOperand(4)) + .addReg(0); // 's' bit MI.eraseFromParent(); return true; @@ -1145,11 +1145,11 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB, } BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(NewOpc), MI.getOperand(1).getReg()) - .addOperand(MI.getOperand(2)) - .addImm(MI.getOperand(3).getImm()) - .addImm(MI.getOperand(4).getImm()) // 'pred' - .addOperand(MI.getOperand(5)) - .addReg(0); // 's' bit + .add(MI.getOperand(2)) + .addImm(MI.getOperand(3).getImm()) + .addImm(MI.getOperand(4).getImm()) // 'pred' + .add(MI.getOperand(5)) + .addReg(0); // 's' bit MI.eraseFromParent(); return true; } @@ -1206,7 +1206,7 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB, // These are just fancy MOVs instructions. BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::MOVsi), MI.getOperand(0).getReg()) - .addOperand(MI.getOperand(1)) + .add(MI.getOperand(1)) .addImm(ARM_AM::getSORegOpc( (Opcode == ARM::MOVsrl_flag ? ARM_AM::lsr : ARM_AM::asr), 1)) .add(predOps(ARMCC::AL)) @@ -1219,7 +1219,7 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB, MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::MOVsi), MI.getOperand(0).getReg()) - .addOperand(MI.getOperand(1)) + .add(MI.getOperand(1)) .addImm(ARM_AM::getSORegOpc(ARM_AM::rrx, 0)) .add(predOps(ARMCC::AL)) .addReg(0); @@ -1253,14 +1253,14 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB, bool DstIsDead = MI.getOperand(0).isDead(); MachineInstrBuilder MIB1 = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(NewLdOpc), DstReg) - .addOperand(MI.getOperand(1)) + .add(MI.getOperand(1)) .add(predOps(ARMCC::AL)); MIB1->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); - MachineInstrBuilder MIB2 = BuildMI(MBB, MBBI, MI.getDebugLoc(), - TII->get(ARM::tPICADD)) - .addReg(DstReg, RegState::Define | getDeadRegState(DstIsDead)) - .addReg(DstReg) - .addOperand(MI.getOperand(2)); + MachineInstrBuilder MIB2 = + BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::tPICADD)) + .addReg(DstReg, RegState::Define | getDeadRegState(DstIsDead)) + .addReg(DstReg) + .add(MI.getOperand(2)); TransferImpOps(MI, MIB1, MIB2); MI.eraseFromParent(); return true; @@ -1372,9 +1372,9 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB, MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::SUBri), ARM::PC) .addReg(ARM::LR) - .addOperand(MI.getOperand(0)) - .addOperand(MI.getOperand(1)) - .addOperand(MI.getOperand(2)) + .add(MI.getOperand(0)) + .add(MI.getOperand(1)) + .add(MI.getOperand(2)) .addReg(ARM::CPSR, RegState::Undef); TransferImpOps(MI, MIB, MIB); MI.eraseFromParent(); @@ -1391,11 +1391,11 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB, unsigned DstReg = MI.getOperand(OpIdx++).getReg(); // Copy the source register. - MIB.addOperand(MI.getOperand(OpIdx++)); + MIB.add(MI.getOperand(OpIdx++)); // Copy the predicate operands. - MIB.addOperand(MI.getOperand(OpIdx++)); - MIB.addOperand(MI.getOperand(OpIdx++)); + MIB.add(MI.getOperand(OpIdx++)); + MIB.add(MI.getOperand(OpIdx++)); // Add the destination operands (D subregs). unsigned D0 = TRI->getSubReg(DstReg, ARM::dsub_0); @@ -1422,11 +1422,11 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB, unsigned SrcReg = MI.getOperand(OpIdx++).getReg(); // Copy the destination register. - MIB.addOperand(MI.getOperand(OpIdx++)); + MIB.add(MI.getOperand(OpIdx++)); // Copy the predicate operands. - MIB.addOperand(MI.getOperand(OpIdx++)); - MIB.addOperand(MI.getOperand(OpIdx++)); + MIB.add(MI.getOperand(OpIdx++)); + MIB.add(MI.getOperand(OpIdx++)); // Add the source operands (D subregs). unsigned D0 = TRI->getSubReg(SrcReg, ARM::dsub_0); diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp index b759919aa13..6ded2cceb08 100644 --- a/lib/Target/ARM/ARMISelLowering.cpp +++ b/lib/Target/ARM/ARMISelLowering.cpp @@ -8827,11 +8827,11 @@ ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, // Thumb1 post-indexed loads are really just single-register LDMs. case ARM::tLDR_postidx: { BuildMI(*BB, MI, dl, TII->get(ARM::tLDMIA_UPD)) - .addOperand(MI.getOperand(1)) // Rn_wb - .addOperand(MI.getOperand(2)) // Rn - .addOperand(MI.getOperand(3)) // PredImm - .addOperand(MI.getOperand(4)) // PredReg - .addOperand(MI.getOperand(0)); // Rt + .add(MI.getOperand(1)) // Rn_wb + .add(MI.getOperand(2)) // Rn + .add(MI.getOperand(3)) // PredImm + .add(MI.getOperand(4)) // PredReg + .add(MI.getOperand(0)); // Rt MI.eraseFromParent(); return BB; } @@ -8862,12 +8862,12 @@ ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, MachineMemOperand *MMO = *MI.memoperands_begin(); BuildMI(*BB, MI, dl, TII->get(NewOpc)) - .addOperand(MI.getOperand(0)) // Rn_wb - .addOperand(MI.getOperand(1)) // Rt - .addOperand(MI.getOperand(2)) // Rn - .addImm(Offset) // offset (skip GPR==zero_reg) - .addOperand(MI.getOperand(5)) // pred - .addOperand(MI.getOperand(6)) + .add(MI.getOperand(0)) // Rn_wb + .add(MI.getOperand(1)) // Rt + .add(MI.getOperand(2)) // Rn + .addImm(Offset) // offset (skip GPR==zero_reg) + .add(MI.getOperand(5)) // pred + .add(MI.getOperand(6)) .addMemOperand(MMO); MI.eraseFromParent(); return BB; @@ -8884,7 +8884,7 @@ ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, } MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(NewOpc)); for (unsigned i = 0; i < MI.getNumOperands(); ++i) - MIB.addOperand(MI.getOperand(i)); + MIB.add(MI.getOperand(i)); MI.eraseFromParent(); return BB; } diff --git a/lib/Target/ARM/ARMLoadStoreOptimizer.cpp b/lib/Target/ARM/ARMLoadStoreOptimizer.cpp index 53199a16301..1bfcb720167 100644 --- a/lib/Target/ARM/ARMLoadStoreOptimizer.cpp +++ b/lib/Target/ARM/ARMLoadStoreOptimizer.cpp @@ -1259,7 +1259,7 @@ bool ARMLoadStoreOpt::MergeBaseUpdateLSMultiple(MachineInstr *MI) { // Transfer the rest of operands. for (unsigned OpNum = 3, e = MI->getNumOperands(); OpNum != e; ++OpNum) - MIB.addOperand(MI->getOperand(OpNum)); + MIB.add(MI->getOperand(OpNum)); // Transfer memoperands. MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); @@ -1462,12 +1462,10 @@ bool ARMLoadStoreOpt::MergeBaseUpdateLSDouble(MachineInstr &MI) const { DebugLoc DL = MI.getDebugLoc(); MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII->get(NewOpc)); if (NewOpc == ARM::t2LDRD_PRE || NewOpc == ARM::t2LDRD_POST) { - MIB.addOperand(Reg0Op).addOperand(Reg1Op) - .addReg(BaseOp.getReg(), RegState::Define); + MIB.add(Reg0Op).add(Reg1Op).addReg(BaseOp.getReg(), RegState::Define); } else { assert(NewOpc == ARM::t2STRD_PRE || NewOpc == ARM::t2STRD_POST); - MIB.addReg(BaseOp.getReg(), RegState::Define) - .addOperand(Reg0Op).addOperand(Reg1Op); + MIB.addReg(BaseOp.getReg(), RegState::Define).add(Reg0Op).add(Reg1Op); } MIB.addReg(BaseOp.getReg(), RegState::Kill) .addImm(Offset).addImm(Pred).addReg(PredReg); @@ -1477,7 +1475,7 @@ bool ARMLoadStoreOpt::MergeBaseUpdateLSDouble(MachineInstr &MI) const { // Transfer implicit operands. for (const MachineOperand &MO : MI.implicit_operands()) - MIB.addOperand(MO); + MIB.add(MO); MIB->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); MBB.erase(MBBI); diff --git a/lib/Target/ARM/Thumb1FrameLowering.cpp b/lib/Target/ARM/Thumb1FrameLowering.cpp index ee2726894aa..1103532075a 100644 --- a/lib/Target/ARM/Thumb1FrameLowering.cpp +++ b/lib/Target/ARM/Thumb1FrameLowering.cpp @@ -501,7 +501,7 @@ bool Thumb1FrameLowering::emitPopSpecialFixUp(MachineBasicBlock &MBB, // Copy implicit ops and popped registers, if any. for (auto MO: MBBI->operands()) if (MO.isReg() && (MO.isImplicit() || MO.isDef())) - MIB.addOperand(MO); + MIB.add(MO); MIB.addReg(ARM::PC, RegState::Define); // Erase the old instruction (tBX_RET or tPOP). MBB.erase(MBBI); @@ -585,7 +585,7 @@ bool Thumb1FrameLowering::emitPopSpecialFixUp(MachineBasicBlock &MBB, for (auto MO: MBBI->operands()) if (MO.isReg() && (MO.isImplicit() || MO.isDef()) && MO.getReg() != ARM::PC) { - MIB.addOperand(MO); + MIB.add(MO); if (!MO.isImplicit()) Popped = true; } diff --git a/lib/Target/ARM/Thumb2SizeReduction.cpp b/lib/Target/ARM/Thumb2SizeReduction.cpp index 881e3cb2577..109c1f55c9b 100644 --- a/lib/Target/ARM/Thumb2SizeReduction.cpp +++ b/lib/Target/ARM/Thumb2SizeReduction.cpp @@ -562,8 +562,8 @@ Thumb2SizeReduce::ReduceLoadStore(MachineBasicBlock &MBB, MachineInstr *MI, MIB.addReg(MI->getOperand(0).getReg(), RegState::Define | RegState::Dead); if (!isLdStMul) { - MIB.addOperand(MI->getOperand(0)); - MIB.addOperand(MI->getOperand(1)); + MIB.add(MI->getOperand(0)); + MIB.add(MI->getOperand(1)); if (HasImmOffset) MIB.addImm(OffsetImm / Scale); @@ -577,7 +577,7 @@ Thumb2SizeReduce::ReduceLoadStore(MachineBasicBlock &MBB, MachineInstr *MI, // Transfer the rest of operands. for (unsigned e = MI->getNumOperands(); OpNum != e; ++OpNum) - MIB.addOperand(MI->getOperand(OpNum)); + MIB.add(MI->getOperand(OpNum)); // Transfer memoperands. MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); @@ -624,8 +624,8 @@ Thumb2SizeReduce::ReduceSpecial(MachineBasicBlock &MBB, MachineInstr *MI, MachineInstrBuilder MIB = BuildMI(MBB, MI, MI->getDebugLoc(), TII->get(ARM::tADDrSPi)) - .addOperand(MI->getOperand(0)) - .addOperand(MI->getOperand(1)) + .add(MI->getOperand(0)) + .add(MI->getOperand(1)) .addImm(Imm / 4) // The tADDrSPi has an implied scale by four. .add(predOps(ARMCC::AL)); @@ -786,7 +786,7 @@ Thumb2SizeReduce::ReduceTo2Addr(MachineBasicBlock &MBB, MachineInstr *MI, // Add the 16-bit instruction. DebugLoc dl = MI->getDebugLoc(); MachineInstrBuilder MIB = BuildMI(MBB, MI, dl, NewMCID); - MIB.addOperand(MI->getOperand(0)); + MIB.add(MI->getOperand(0)); if (NewMCID.hasOptionalDef()) { if (HasCC) AddDefaultT1CC(MIB, CCDead); @@ -801,7 +801,7 @@ Thumb2SizeReduce::ReduceTo2Addr(MachineBasicBlock &MBB, MachineInstr *MI, continue; if (SkipPred && MCID.OpInfo[i].isPredicate()) continue; - MIB.addOperand(MI->getOperand(i)); + MIB.add(MI->getOperand(i)); } // Transfer MI flags. @@ -881,7 +881,7 @@ Thumb2SizeReduce::ReduceToNarrow(MachineBasicBlock &MBB, MachineInstr *MI, // Add the 16-bit instruction. DebugLoc dl = MI->getDebugLoc(); MachineInstrBuilder MIB = BuildMI(MBB, MI, dl, NewMCID); - MIB.addOperand(MI->getOperand(0)); + MIB.add(MI->getOperand(0)); if (NewMCID.hasOptionalDef()) { if (HasCC) AddDefaultT1CC(MIB, CCDead); @@ -910,7 +910,7 @@ Thumb2SizeReduce::ReduceToNarrow(MachineBasicBlock &MBB, MachineInstr *MI, // Skip implicit def of CPSR. Either it's modeled as an optional // def now or it's already an implicit def on the new instruction. continue; - MIB.addOperand(MO); + MIB.add(MO); } if (!MCID.isPredicable() && NewMCID.isPredicable()) MIB.add(predOps(ARMCC::AL)); diff --git a/lib/Target/AVR/AVRExpandPseudoInsts.cpp b/lib/Target/AVR/AVRExpandPseudoInsts.cpp index 1b2f2cec0bc..13080a5d72f 100644 --- a/lib/Target/AVR/AVRExpandPseudoInsts.cpp +++ b/lib/Target/AVR/AVRExpandPseudoInsts.cpp @@ -509,8 +509,8 @@ bool AVRExpandPseudo::expand(Block &MBB, BlockIt MBBI) { const BlockAddress *BA = MI.getOperand(1).getBlockAddress(); unsigned TF = MI.getOperand(1).getTargetFlags(); - MIBLO.addOperand(MachineOperand::CreateBA(BA, TF | AVRII::MO_LO)); - MIBHI.addOperand(MachineOperand::CreateBA(BA, TF | AVRII::MO_HI)); + MIBLO.add(MachineOperand::CreateBA(BA, TF | AVRII::MO_LO)); + MIBHI.add(MachineOperand::CreateBA(BA, TF | AVRII::MO_HI)); break; } case MachineOperand::MO_Immediate: { @@ -785,9 +785,8 @@ bool AVRExpandPseudo::expandAtomicBinaryOp(unsigned Opcode, auto Op1 = MI.getOperand(0); auto Op2 = MI.getOperand(1); - MachineInstr &NewInst = *buildMI(MBB, MBBI, Opcode) - .addOperand(Op1).addOperand(Op2) - .getInstr(); + MachineInstr &NewInst = + *buildMI(MBB, MBBI, Opcode).add(Op1).add(Op2).getInstr(); f(NewInst); }); } @@ -810,15 +809,13 @@ bool AVRExpandPseudo::expandAtomicArithmeticOp(unsigned Width, unsigned StoreOpcode = (Width == 8) ? AVR::STPtrRr : AVR::STWPtrRr; // Create the load - buildMI(MBB, MBBI, LoadOpcode).addOperand(Op1).addOperand(Op2); + buildMI(MBB, MBBI, LoadOpcode).add(Op1).add(Op2); // Create the arithmetic op - buildMI(MBB, MBBI, ArithOpcode) - .addOperand(Op1).addOperand(Op1) - .addOperand(Op2); + buildMI(MBB, MBBI, ArithOpcode).add(Op1).add(Op1).add(Op2); // Create the store - buildMI(MBB, MBBI, StoreOpcode).addOperand(Op2).addOperand(Op1); + buildMI(MBB, MBBI, StoreOpcode).add(Op2).add(Op1); }); } diff --git a/lib/Target/Hexagon/HexagonBitSimplify.cpp b/lib/Target/Hexagon/HexagonBitSimplify.cpp index fe7278fde1b..ef0f18b39f9 100644 --- a/lib/Target/Hexagon/HexagonBitSimplify.cpp +++ b/lib/Target/Hexagon/HexagonBitSimplify.cpp @@ -2599,7 +2599,7 @@ void HexagonLoopRescheduling::moveGroup(InstrGroup &G, MachineBasicBlock &LB, for (unsigned j = 0, m = SI->getNumOperands(); j < m; ++j) { const MachineOperand &Op = SI->getOperand(j); if (!Op.isReg()) { - MIB.addOperand(Op); + MIB.add(Op); continue; } if (!Op.isUse()) diff --git a/lib/Target/Hexagon/HexagonEarlyIfConv.cpp b/lib/Target/Hexagon/HexagonEarlyIfConv.cpp index a5351cd08da..d7c726bb36c 100644 --- a/lib/Target/Hexagon/HexagonEarlyIfConv.cpp +++ b/lib/Target/Hexagon/HexagonEarlyIfConv.cpp @@ -680,12 +680,12 @@ void HexagonEarlyIfConversion::predicateInstr(MachineBasicBlock *ToB, MachineInstrBuilder MIB = BuildMI(*ToB, At, DL, HII->get(COpc)); MachineInstr::mop_iterator MOI = MI->operands_begin(); if (HII->isPostIncrement(*MI)) { - MIB.addOperand(*MOI); + MIB.add(*MOI); ++MOI; } MIB.addReg(PredR); for (const MachineOperand &MO : make_range(MOI, MI->operands_end())) - MIB.addOperand(MO); + MIB.add(MO); // Set memory references. MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin(); diff --git a/lib/Target/Hexagon/HexagonExpandCondsets.cpp b/lib/Target/Hexagon/HexagonExpandCondsets.cpp index 8f070d842b8..624a713a80d 100644 --- a/lib/Target/Hexagon/HexagonExpandCondsets.cpp +++ b/lib/Target/Hexagon/HexagonExpandCondsets.cpp @@ -595,9 +595,9 @@ MachineInstr *HexagonExpandCondsets::genCondTfrFor(MachineOperand &SrcOp, .addReg(SrcOp.getReg(), SrcState, SrcOp.getSubReg()); } else { MIB = BuildMI(B, At, DL, HII->get(Opc)) - .addReg(DstR, DstState, DstSR) - .addReg(PredOp.getReg(), PredState, PredOp.getSubReg()) - .addOperand(SrcOp); + .addReg(DstR, DstState, DstSR) + .addReg(PredOp.getReg(), PredState, PredOp.getSubReg()) + .add(SrcOp); } DEBUG(dbgs() << "created an initial copy: " << *MIB); @@ -828,7 +828,7 @@ void HexagonExpandCondsets::predicateAt(const MachineOperand &DefOp, while (Ox < NP) { MachineOperand &MO = MI.getOperand(Ox); if (!MO.isReg() || !MO.isImplicit()) - MB.addOperand(MO); + MB.add(MO); Ox++; } diff --git a/lib/Target/Hexagon/HexagonFixupHwLoops.cpp b/lib/Target/Hexagon/HexagonFixupHwLoops.cpp index dfd1f1d4f88..015d3b840e6 100644 --- a/lib/Target/Hexagon/HexagonFixupHwLoops.cpp +++ b/lib/Target/Hexagon/HexagonFixupHwLoops.cpp @@ -190,5 +190,5 @@ void HexagonFixupHwLoops::useExtLoopInstr(MachineFunction &MF, MIB = BuildMI(*MBB, MII, DL, TII->get(newOp)); for (unsigned i = 0; i < MII->getNumOperands(); ++i) - MIB.addOperand(MII->getOperand(i)); + MIB.add(MII->getOperand(i)); } diff --git a/lib/Target/Hexagon/HexagonFrameLowering.cpp b/lib/Target/Hexagon/HexagonFrameLowering.cpp index a3f6273f9f6..dd21d763359 100644 --- a/lib/Target/Hexagon/HexagonFrameLowering.cpp +++ b/lib/Target/Hexagon/HexagonFrameLowering.cpp @@ -1473,8 +1473,7 @@ bool HexagonFrameLowering::expandCopy(MachineBasicBlock &B, return false; unsigned TmpR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass); - BuildMI(B, It, DL, HII.get(TargetOpcode::COPY), TmpR) - .addOperand(MI->getOperand(1)); + BuildMI(B, It, DL, HII.get(TargetOpcode::COPY), TmpR).add(MI->getOperand(1)); BuildMI(B, It, DL, HII.get(TargetOpcode::COPY), DstR) .addReg(TmpR, RegState::Kill); @@ -2221,7 +2220,7 @@ void HexagonFrameLowering::optimizeSpillSlots(MachineFunction &MF, if (SrcRR.Reg != FoundR || SrcRR.Sub != 0) { const DebugLoc &DL = SI.getDebugLoc(); CopyIn = BuildMI(B, StartIt, DL, HII.get(TargetOpcode::COPY), FoundR) - .addOperand(SrcOp); + .add(SrcOp); } ++StartIt; diff --git a/lib/Target/Hexagon/HexagonGenMux.cpp b/lib/Target/Hexagon/HexagonGenMux.cpp index a718df9c70a..85222944c77 100644 --- a/lib/Target/Hexagon/HexagonGenMux.cpp +++ b/lib/Target/Hexagon/HexagonGenMux.cpp @@ -324,9 +324,9 @@ bool HexagonGenMux::genMuxInBlock(MachineBasicBlock &B) { if (!MxOpc) continue; BuildMI(B, MX.At, DL, HII->get(MxOpc), MX.DefR) - .addReg(MX.PredR) - .addOperand(*MX.SrcT) - .addOperand(*MX.SrcF); + .addReg(MX.PredR) + .add(*MX.SrcT) + .add(*MX.SrcF); B.erase(MX.Def1); B.erase(MX.Def2); Changed = true; diff --git a/lib/Target/Hexagon/HexagonInstrInfo.cpp b/lib/Target/Hexagon/HexagonInstrInfo.cpp index 0a7dc6b49d0..c2a8b569cde 100644 --- a/lib/Target/Hexagon/HexagonInstrInfo.cpp +++ b/lib/Target/Hexagon/HexagonInstrInfo.cpp @@ -1074,13 +1074,13 @@ bool HexagonInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { unsigned Offset = Is128B ? VecOffset << 7 : VecOffset << 6; MachineInstr *MI1New = BuildMI(MBB, MI, DL, get(NewOpc)) - .addOperand(MI.getOperand(0)) + .add(MI.getOperand(0)) .addImm(MI.getOperand(1).getImm()) .addReg(SrcSubLo) .setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); MI1New->getOperand(0).setIsKill(false); BuildMI(MBB, MI, DL, get(NewOpc)) - .addOperand(MI.getOperand(0)) + .add(MI.getOperand(0)) // The Vectors are indexed in multiples of vector size. .addImm(MI.getOperand(1).getImm() + Offset) .addReg(SrcSubHi) @@ -1106,15 +1106,13 @@ bool HexagonInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { unsigned DstReg = MI.getOperand(0).getReg(); unsigned Offset = Is128B ? VecOffset << 7 : VecOffset << 6; - MachineInstr *MI1New = - BuildMI(MBB, MI, DL, get(NewOpc), - HRI.getSubReg(DstReg, Hexagon::vsub_lo)) - .addOperand(MI.getOperand(1)) - .addImm(MI.getOperand(2).getImm()); + MachineInstr *MI1New = BuildMI(MBB, MI, DL, get(NewOpc), + HRI.getSubReg(DstReg, Hexagon::vsub_lo)) + .add(MI.getOperand(1)) + .addImm(MI.getOperand(2).getImm()); MI1New->getOperand(1).setIsKill(false); - BuildMI(MBB, MI, DL, get(NewOpc), - HRI.getSubReg(DstReg, Hexagon::vsub_hi)) - .addOperand(MI.getOperand(1)) + BuildMI(MBB, MI, DL, get(NewOpc), HRI.getSubReg(DstReg, Hexagon::vsub_hi)) + .add(MI.getOperand(1)) // The Vectors are indexed in multiples of vector size. .addImm(MI.getOperand(2).getImm() + Offset) .setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); @@ -1227,18 +1225,18 @@ bool HexagonInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { bool IsDestLive = !LiveAtMI.available(MRI, Op0.getReg()); if (Op0.getReg() != Op2.getReg()) { auto T = BuildMI(MBB, MI, DL, get(Hexagon::V6_vcmov)) - .addOperand(Op0) - .addOperand(Op1) - .addOperand(Op2); + .add(Op0) + .add(Op1) + .add(Op2); if (IsDestLive) T.addReg(Op0.getReg(), RegState::Implicit); IsDestLive = true; } if (Op0.getReg() != Op3.getReg()) { auto T = BuildMI(MBB, MI, DL, get(Hexagon::V6_vncmov)) - .addOperand(Op0) - .addOperand(Op1) - .addOperand(Op3); + .add(Op0) + .add(Op1) + .add(Op3); if (IsDestLive) T.addReg(Op0.getReg(), RegState::Implicit); } @@ -1259,10 +1257,10 @@ bool HexagonInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { unsigned SrcLo = HRI.getSubReg(Op2.getReg(), Hexagon::vsub_lo); unsigned SrcHi = HRI.getSubReg(Op2.getReg(), Hexagon::vsub_hi); auto T = BuildMI(MBB, MI, DL, get(Hexagon::V6_vccombine)) - .addOperand(Op0) - .addOperand(Op1) - .addReg(SrcHi) - .addReg(SrcLo); + .add(Op0) + .add(Op1) + .addReg(SrcHi) + .addReg(SrcLo); if (IsDestLive) T.addReg(Op0.getReg(), RegState::Implicit); IsDestLive = true; @@ -1271,10 +1269,10 @@ bool HexagonInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { unsigned SrcLo = HRI.getSubReg(Op3.getReg(), Hexagon::vsub_lo); unsigned SrcHi = HRI.getSubReg(Op3.getReg(), Hexagon::vsub_hi); auto T = BuildMI(MBB, MI, DL, get(Hexagon::V6_vnccombine)) - .addOperand(Op0) - .addOperand(Op1) - .addReg(SrcHi) - .addReg(SrcLo); + .add(Op0) + .add(Op1) + .addReg(SrcHi) + .addReg(SrcLo); if (IsDestLive) T.addReg(Op0.getReg(), RegState::Implicit); } @@ -1376,7 +1374,7 @@ bool HexagonInstrInfo::PredicateInstruction( MachineOperand &Op = MI.getOperand(NOp); if (!Op.isReg() || !Op.isDef() || Op.isImplicit()) break; - T.addOperand(Op); + T.add(Op); NOp++; } @@ -1386,7 +1384,7 @@ bool HexagonInstrInfo::PredicateInstruction( assert(GotPredReg); T.addReg(PredReg, PredRegFlags); while (NOp < NumOps) - T.addOperand(MI.getOperand(NOp++)); + T.add(MI.getOperand(NOp++)); MI.setDesc(get(PredOpc)); while (unsigned n = MI.getNumOperands()) diff --git a/lib/Target/Hexagon/HexagonOptAddrMode.cpp b/lib/Target/Hexagon/HexagonOptAddrMode.cpp index 89db46799cb..f99a0645507 100644 --- a/lib/Target/Hexagon/HexagonOptAddrMode.cpp +++ b/lib/Target/Hexagon/HexagonOptAddrMode.cpp @@ -333,17 +333,17 @@ bool HexagonOptAddrMode::changeLoad(MachineInstr *OldMI, MachineOperand ImmOp, short NewOpCode = HII->getBaseWithLongOffset(*OldMI); assert(NewOpCode >= 0 && "Invalid New opcode\n"); MIB = BuildMI(*BB, InsertPt, OldMI->getDebugLoc(), HII->get(NewOpCode)); - MIB.addOperand(OldMI->getOperand(0)); - MIB.addOperand(OldMI->getOperand(2)); - MIB.addOperand(OldMI->getOperand(3)); - MIB.addOperand(ImmOp); + MIB.add(OldMI->getOperand(0)); + MIB.add(OldMI->getOperand(2)); + MIB.add(OldMI->getOperand(3)); + MIB.add(ImmOp); OpStart = 4; Changed = true; } else if (HII->getAddrMode(*OldMI) == HexagonII::BaseImmOffset) { short NewOpCode = HII->getAbsoluteForm(*OldMI); assert(NewOpCode >= 0 && "Invalid New opcode\n"); MIB = BuildMI(*BB, InsertPt, OldMI->getDebugLoc(), HII->get(NewOpCode)) - .addOperand(OldMI->getOperand(0)); + .add(OldMI->getOperand(0)); const GlobalValue *GV = ImmOp.getGlobal(); int64_t Offset = ImmOp.getOffset() + OldMI->getOperand(2).getImm(); @@ -359,9 +359,9 @@ bool HexagonOptAddrMode::changeLoad(MachineInstr *OldMI, MachineOperand ImmOp, short NewOpCode = HII->xformRegToImmOffset(*OldMI); assert(NewOpCode >= 0 && "Invalid New opcode\n"); MIB = BuildMI(*BB, InsertPt, OldMI->getDebugLoc(), HII->get(NewOpCode)); - MIB.addOperand(OldMI->getOperand(0)); - MIB.addOperand(OldMI->getOperand(1)); - MIB.addOperand(ImmOp); + MIB.add(OldMI->getOperand(0)); + MIB.add(OldMI->getOperand(1)); + MIB.add(ImmOp); OpStart = 4; Changed = true; DEBUG(dbgs() << "[Changing]: " << *OldMI << "\n"); @@ -370,7 +370,7 @@ bool HexagonOptAddrMode::changeLoad(MachineInstr *OldMI, MachineOperand ImmOp, if (Changed) for (unsigned i = OpStart; i < OpEnd; ++i) - MIB.addOperand(OldMI->getOperand(i)); + MIB.add(OldMI->getOperand(i)); return Changed; } @@ -390,10 +390,10 @@ bool HexagonOptAddrMode::changeStore(MachineInstr *OldMI, MachineOperand ImmOp, short NewOpCode = HII->getBaseWithLongOffset(*OldMI); assert(NewOpCode >= 0 && "Invalid New opcode\n"); MIB = BuildMI(*BB, InsertPt, OldMI->getDebugLoc(), HII->get(NewOpCode)); - MIB.addOperand(OldMI->getOperand(1)); - MIB.addOperand(OldMI->getOperand(2)); - MIB.addOperand(ImmOp); - MIB.addOperand(OldMI->getOperand(3)); + MIB.add(OldMI->getOperand(1)); + MIB.add(OldMI->getOperand(2)); + MIB.add(ImmOp); + MIB.add(OldMI->getOperand(3)); OpStart = 4; } else if (HII->getAddrMode(*OldMI) == HexagonII::BaseImmOffset) { short NewOpCode = HII->getAbsoluteForm(*OldMI); @@ -402,7 +402,7 @@ bool HexagonOptAddrMode::changeStore(MachineInstr *OldMI, MachineOperand ImmOp, const GlobalValue *GV = ImmOp.getGlobal(); int64_t Offset = ImmOp.getOffset() + OldMI->getOperand(1).getImm(); MIB.addGlobalAddress(GV, Offset, ImmOp.getTargetFlags()); - MIB.addOperand(OldMI->getOperand(2)); + MIB.add(OldMI->getOperand(2)); OpStart = 3; } Changed = true; @@ -412,9 +412,9 @@ bool HexagonOptAddrMode::changeStore(MachineInstr *OldMI, MachineOperand ImmOp, short NewOpCode = HII->xformRegToImmOffset(*OldMI); assert(NewOpCode >= 0 && "Invalid New opcode\n"); MIB = BuildMI(*BB, InsertPt, OldMI->getDebugLoc(), HII->get(NewOpCode)); - MIB.addOperand(OldMI->getOperand(0)); - MIB.addOperand(ImmOp); - MIB.addOperand(OldMI->getOperand(1)); + MIB.add(OldMI->getOperand(0)); + MIB.add(ImmOp); + MIB.add(OldMI->getOperand(1)); OpStart = 2; Changed = true; DEBUG(dbgs() << "[Changing]: " << *OldMI << "\n"); @@ -422,7 +422,7 @@ bool HexagonOptAddrMode::changeStore(MachineInstr *OldMI, MachineOperand ImmOp, } if (Changed) for (unsigned i = OpStart; i < OpEnd; ++i) - MIB.addOperand(OldMI->getOperand(i)); + MIB.add(OldMI->getOperand(i)); return Changed; } @@ -473,26 +473,26 @@ bool HexagonOptAddrMode::changeAddAsl(NodeAddr AddAslUN, BuildMI(*BB, InsertPt, UseMI->getDebugLoc(), HII->get(NewOpCode)); // change mem(Rs + # ) -> mem(Rt << # + ##) if (UseMID.mayLoad()) { - MIB.addOperand(UseMI->getOperand(0)); - MIB.addOperand(AddAslMI->getOperand(2)); - MIB.addOperand(AddAslMI->getOperand(3)); + MIB.add(UseMI->getOperand(0)); + MIB.add(AddAslMI->getOperand(2)); + MIB.add(AddAslMI->getOperand(3)); const GlobalValue *GV = ImmOp.getGlobal(); MIB.addGlobalAddress(GV, UseMI->getOperand(2).getImm(), ImmOp.getTargetFlags()); OpStart = 3; } else if (UseMID.mayStore()) { - MIB.addOperand(AddAslMI->getOperand(2)); - MIB.addOperand(AddAslMI->getOperand(3)); + MIB.add(AddAslMI->getOperand(2)); + MIB.add(AddAslMI->getOperand(3)); const GlobalValue *GV = ImmOp.getGlobal(); MIB.addGlobalAddress(GV, UseMI->getOperand(1).getImm(), ImmOp.getTargetFlags()); - MIB.addOperand(UseMI->getOperand(2)); + MIB.add(UseMI->getOperand(2)); OpStart = 3; } else llvm_unreachable("Unhandled instruction"); for (unsigned i = OpStart; i < OpEnd; ++i) - MIB.addOperand(UseMI->getOperand(i)); + MIB.add(UseMI->getOperand(i)); Deleted.insert(UseMI); } diff --git a/lib/Target/Lanai/LanaiInstrInfo.cpp b/lib/Target/Lanai/LanaiInstrInfo.cpp index fcd5da876b1..a7c9a7a7f28 100644 --- a/lib/Target/Lanai/LanaiInstrInfo.cpp +++ b/lib/Target/Lanai/LanaiInstrInfo.cpp @@ -518,7 +518,7 @@ LanaiInstrInfo::optimizeSelect(MachineInstr &MI, const MCInstrDesc &DefDesc = DefMI->getDesc(); for (unsigned i = 1, e = DefDesc.getNumOperands(); i != e && !DefDesc.OpInfo[i].isPredicate(); ++i) - NewMI.addOperand(DefMI->getOperand(i)); + NewMI.add(DefMI->getOperand(i)); unsigned CondCode = MI.getOperand(3).getImm(); if (Invert) @@ -531,7 +531,7 @@ LanaiInstrInfo::optimizeSelect(MachineInstr &MI, // register operand tied to the first def. The tie makes the register // allocator ensure the FalseReg is allocated the same register as operand 0. FalseReg.setImplicit(); - NewMI.addOperand(FalseReg); + NewMI.add(FalseReg); NewMI->tieOperands(0, NewMI->getNumOperands() - 1); // Update SeenMIs set: register newly created MI and erase removed DefMI. diff --git a/lib/Target/MSP430/MSP430BranchSelector.cpp b/lib/Target/MSP430/MSP430BranchSelector.cpp index 5fd6b6305f6..424b5ae418f 100644 --- a/lib/Target/MSP430/MSP430BranchSelector.cpp +++ b/lib/Target/MSP430/MSP430BranchSelector.cpp @@ -194,8 +194,8 @@ bool MSP430BSel::expandBranches(OffsetVector &BlockOffsets) { // Jump over the long branch on the opposite condition TII->reverseBranchCondition(Cond); MI = BuildMI(*MBB, MI, dl, TII->get(MSP430::JCC)) - .addMBB(NextMBB) - .addOperand(Cond[0]); + .addMBB(NextMBB) + .add(Cond[0]); InstrSizeDiff += TII->getInstSizeInBytes(*MI); ++MI; } diff --git a/lib/Target/Mips/MipsInstrInfo.cpp b/lib/Target/Mips/MipsInstrInfo.cpp index 19af1914c81..86bf1037118 100644 --- a/lib/Target/Mips/MipsInstrInfo.cpp +++ b/lib/Target/Mips/MipsInstrInfo.cpp @@ -482,7 +482,7 @@ MipsInstrInfo::genInstrWithNewOpc(unsigned NewOpc, MIB->RemoveOperand(0); for (unsigned J = 0, E = I->getDesc().getNumOperands(); J < E; ++J) { - MIB.addOperand(I->getOperand(J)); + MIB.add(I->getOperand(J)); } MIB.addImm(0); @@ -492,7 +492,7 @@ MipsInstrInfo::genInstrWithNewOpc(unsigned NewOpc, if (BranchWithZeroOperand && (unsigned)ZeroOperandPosition == J) continue; - MIB.addOperand(I->getOperand(J)); + MIB.add(I->getOperand(J)); } } diff --git a/lib/Target/Mips/MipsSEISelLowering.cpp b/lib/Target/Mips/MipsSEISelLowering.cpp index f28e8b36fdb..69162d4f6e8 100644 --- a/lib/Target/Mips/MipsSEISelLowering.cpp +++ b/lib/Target/Mips/MipsSEISelLowering.cpp @@ -3628,7 +3628,7 @@ MipsSETargetLowering::emitLD_F16_PSEUDO(MachineInstr &MI, MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, TII->get(UsingMips32 ? Mips::LH : Mips::LH64), Rt); for (unsigned i = 1; i < MI.getNumOperands(); i++) - MIB.addOperand(MI.getOperand(i)); + MIB.add(MI.getOperand(i)); BuildMI(*BB, MI, DL, TII->get(Mips::FILL_H), Wd).addReg(Rt); diff --git a/lib/Target/NVPTX/NVPTXPeephole.cpp b/lib/Target/NVPTX/NVPTXPeephole.cpp index 49e639793ef..e10b046f7c9 100644 --- a/lib/Target/NVPTX/NVPTXPeephole.cpp +++ b/lib/Target/NVPTX/NVPTXPeephole.cpp @@ -113,7 +113,7 @@ static void CombineCVTAToLocal(MachineInstr &Root) { BuildMI(MF, Root.getDebugLoc(), TII->get(Prev.getOpcode()), Root.getOperand(0).getReg()) .addReg(NVPTX::VRFrameLocal) - .addOperand(Prev.getOperand(2)); + .add(Prev.getOperand(2)); MBB.insert((MachineBasicBlock::iterator)&Root, MIB); diff --git a/lib/Target/PowerPC/PPCInstrInfo.cpp b/lib/Target/PowerPC/PPCInstrInfo.cpp index 2e0b9355f82..51c86dd2170 100644 --- a/lib/Target/PowerPC/PPCInstrInfo.cpp +++ b/lib/Target/PowerPC/PPCInstrInfo.cpp @@ -662,12 +662,14 @@ unsigned PPCInstrInfo::insertBranch(MachineBasicBlock &MBB, (isPPC64 ? PPC::BDNZ8 : PPC::BDNZ) : (isPPC64 ? PPC::BDZ8 : PPC::BDZ))).addMBB(TBB); else if (Cond[0].getImm() == PPC::PRED_BIT_SET) - BuildMI(&MBB, DL, get(PPC::BC)).addOperand(Cond[1]).addMBB(TBB); + BuildMI(&MBB, DL, get(PPC::BC)).add(Cond[1]).addMBB(TBB); else if (Cond[0].getImm() == PPC::PRED_BIT_UNSET) - BuildMI(&MBB, DL, get(PPC::BCn)).addOperand(Cond[1]).addMBB(TBB); + BuildMI(&MBB, DL, get(PPC::BCn)).add(Cond[1]).addMBB(TBB); else // Conditional branch BuildMI(&MBB, DL, get(PPC::BCC)) - .addImm(Cond[0].getImm()).addOperand(Cond[1]).addMBB(TBB); + .addImm(Cond[0].getImm()) + .add(Cond[1]) + .addMBB(TBB); return 1; } @@ -677,12 +679,14 @@ unsigned PPCInstrInfo::insertBranch(MachineBasicBlock &MBB, (isPPC64 ? PPC::BDNZ8 : PPC::BDNZ) : (isPPC64 ? PPC::BDZ8 : PPC::BDZ))).addMBB(TBB); else if (Cond[0].getImm() == PPC::PRED_BIT_SET) - BuildMI(&MBB, DL, get(PPC::BC)).addOperand(Cond[1]).addMBB(TBB); + BuildMI(&MBB, DL, get(PPC::BC)).add(Cond[1]).addMBB(TBB); else if (Cond[0].getImm() == PPC::PRED_BIT_UNSET) - BuildMI(&MBB, DL, get(PPC::BCn)).addOperand(Cond[1]).addMBB(TBB); + BuildMI(&MBB, DL, get(PPC::BCn)).add(Cond[1]).addMBB(TBB); else BuildMI(&MBB, DL, get(PPC::BCC)) - .addImm(Cond[0].getImm()).addOperand(Cond[1]).addMBB(TBB); + .addImm(Cond[0].getImm()) + .add(Cond[1]) + .addMBB(TBB); BuildMI(&MBB, DL, get(PPC::B)).addMBB(FBB); return 2; } diff --git a/lib/Target/PowerPC/PPCMIPeephole.cpp b/lib/Target/PowerPC/PPCMIPeephole.cpp index 2413af3f704..c6d2c3ebcc0 100644 --- a/lib/Target/PowerPC/PPCMIPeephole.cpp +++ b/lib/Target/PowerPC/PPCMIPeephole.cpp @@ -147,9 +147,9 @@ bool PPCMIPeephole::simplifyCode(void) { << "Optimizing load-and-splat/splat " "to load-and-splat/copy: "); DEBUG(MI.dump()); - BuildMI(MBB, &MI, MI.getDebugLoc(), - TII->get(PPC::COPY), MI.getOperand(0).getReg()) - .addOperand(MI.getOperand(1)); + BuildMI(MBB, &MI, MI.getDebugLoc(), TII->get(PPC::COPY), + MI.getOperand(0).getReg()) + .add(MI.getOperand(1)); ToErase = &MI; Simplified = true; } @@ -169,9 +169,9 @@ bool PPCMIPeephole::simplifyCode(void) { << "Optimizing splat/swap or splat/splat " "to splat/copy: "); DEBUG(MI.dump()); - BuildMI(MBB, &MI, MI.getDebugLoc(), - TII->get(PPC::COPY), MI.getOperand(0).getReg()) - .addOperand(MI.getOperand(1)); + BuildMI(MBB, &MI, MI.getDebugLoc(), TII->get(PPC::COPY), + MI.getOperand(0).getReg()) + .add(MI.getOperand(1)); ToErase = &MI; Simplified = true; } @@ -194,9 +194,9 @@ bool PPCMIPeephole::simplifyCode(void) { else if (Immed == 2 && FeedImmed == 2 && FeedReg1 == FeedReg2) { DEBUG(dbgs() << "Optimizing swap/swap => copy: "); DEBUG(MI.dump()); - BuildMI(MBB, &MI, MI.getDebugLoc(), - TII->get(PPC::COPY), MI.getOperand(0).getReg()) - .addOperand(DefMI->getOperand(1)); + BuildMI(MBB, &MI, MI.getDebugLoc(), TII->get(PPC::COPY), + MI.getOperand(0).getReg()) + .add(DefMI->getOperand(1)); ToErase = &MI; Simplified = true; } @@ -251,7 +251,7 @@ bool PPCMIPeephole::simplifyCode(void) { DEBUG(MI.dump()); BuildMI(MBB, &MI, MI.getDebugLoc(), TII->get(PPC::COPY), MI.getOperand(0).getReg()) - .addOperand(MI.getOperand(OpNo)); + .add(MI.getOperand(OpNo)); ToErase = &MI; Simplified = true; } diff --git a/lib/Target/PowerPC/PPCVSXCopy.cpp b/lib/Target/PowerPC/PPCVSXCopy.cpp index 3b5d8f094fd..f3a0290da05 100644 --- a/lib/Target/PowerPC/PPCVSXCopy.cpp +++ b/lib/Target/PowerPC/PPCVSXCopy.cpp @@ -112,7 +112,7 @@ protected: TII->get(TargetOpcode::SUBREG_TO_REG), NewVReg) .addImm(1) // add 1, not 0, because there is no implicit clearing // of the high bits. - .addOperand(SrcMO) + .add(SrcMO) .addImm(PPC::sub_64); // The source of the original copy is now the new virtual register. @@ -132,7 +132,7 @@ protected: unsigned NewVReg = MRI.createVirtualRegister(DstRC); BuildMI(MBB, MI, MI.getDebugLoc(), TII->get(TargetOpcode::COPY), NewVReg) - .addOperand(SrcMO); + .add(SrcMO); // Transform the original copy into a subregister extraction copy. SrcMO.setReg(NewVReg); diff --git a/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp b/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp index 8197285b7b1..bcf0b1a7381 100644 --- a/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp +++ b/lib/Target/PowerPC/PPCVSXSwapRemoval.cpp @@ -936,9 +936,9 @@ bool PPCVSXSwapRemoval::removeSwaps() { Changed = true; MachineInstr *MI = SwapVector[EntryIdx].VSEMI; MachineBasicBlock *MBB = MI->getParent(); - BuildMI(*MBB, MI, MI->getDebugLoc(), - TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg()) - .addOperand(MI->getOperand(1)); + BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(TargetOpcode::COPY), + MI->getOperand(0).getReg()) + .add(MI->getOperand(1)); DEBUG(dbgs() << format("Replaced %d with copy: ", SwapVector[EntryIdx].VSEId)); diff --git a/lib/Target/SystemZ/SystemZElimCompare.cpp b/lib/Target/SystemZ/SystemZElimCompare.cpp index b4c843f658a..3b2077304ae 100644 --- a/lib/Target/SystemZ/SystemZElimCompare.cpp +++ b/lib/Target/SystemZ/SystemZElimCompare.cpp @@ -216,9 +216,7 @@ bool SystemZElimCompare::convertToBRCT( Branch->RemoveOperand(0); Branch->setDesc(TII->get(BRCT)); MachineInstrBuilder MIB(*Branch->getParent()->getParent(), Branch); - MIB.addOperand(MI.getOperand(0)) - .addOperand(MI.getOperand(1)) - .addOperand(Target); + MIB.add(MI.getOperand(0)).add(MI.getOperand(1)).add(Target); // Add a CC def to BRCT(G), since we may have to split them again if the // branch displacement overflows. BRCTH has a 32-bit displacement, so // this is not necessary there. @@ -261,10 +259,10 @@ bool SystemZElimCompare::convertToLoadAndTrap( Branch->RemoveOperand(0); Branch->setDesc(TII->get(LATOpcode)); MachineInstrBuilder(*Branch->getParent()->getParent(), Branch) - .addOperand(MI.getOperand(0)) - .addOperand(MI.getOperand(1)) - .addOperand(MI.getOperand(2)) - .addOperand(MI.getOperand(3)); + .add(MI.getOperand(0)) + .add(MI.getOperand(1)) + .add(MI.getOperand(2)) + .add(MI.getOperand(3)); MI.eraseFromParent(); return true; } @@ -502,15 +500,15 @@ bool SystemZElimCompare::fuseCompareOperations( Branch->setDesc(TII->get(FusedOpcode)); MachineInstrBuilder MIB(*Branch->getParent()->getParent(), Branch); for (unsigned I = 0; I < SrcNOps; I++) - MIB.addOperand(Compare.getOperand(I)); - MIB.addOperand(CCMask); + MIB.add(Compare.getOperand(I)); + MIB.add(CCMask); if (Type == SystemZII::CompareAndBranch) { // Only conditional branches define CC, as they may be converted back // to a non-fused branch because of a long displacement. Conditional // returns don't have that problem. - MIB.addOperand(Target) - .addReg(SystemZ::CC, RegState::ImplicitDefine | RegState::Dead); + MIB.add(Target).addReg(SystemZ::CC, + RegState::ImplicitDefine | RegState::Dead); } if (Type == SystemZII::CompareAndSibcall) diff --git a/lib/Target/SystemZ/SystemZISelLowering.cpp b/lib/Target/SystemZ/SystemZISelLowering.cpp index 2d0a06af18a..4a6843b5b75 100644 --- a/lib/Target/SystemZ/SystemZISelLowering.cpp +++ b/lib/Target/SystemZ/SystemZISelLowering.cpp @@ -5233,7 +5233,7 @@ static unsigned forceReg(MachineInstr &MI, MachineOperand &Base, unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(SystemZ::LA), Reg) - .addOperand(Base) + .add(Base) .addImm(0) .addReg(0); return Reg; @@ -5322,8 +5322,11 @@ MachineBasicBlock *SystemZTargetLowering::emitCondStore(MachineInstr &MI, if (Invert) CCMask ^= CCValid; BuildMI(*MBB, MI, DL, TII->get(STOCOpcode)) - .addReg(SrcReg).addOperand(Base).addImm(Disp) - .addImm(CCValid).addImm(CCMask); + .addReg(SrcReg) + .add(Base) + .addImm(Disp) + .addImm(CCValid) + .addImm(CCMask); MI.eraseFromParent(); return MBB; } @@ -5350,7 +5353,10 @@ MachineBasicBlock *SystemZTargetLowering::emitCondStore(MachineInstr &MI, // # fallthrough to JoinMBB MBB = FalseMBB; BuildMI(MBB, DL, TII->get(StoreOpcode)) - .addReg(SrcReg).addOperand(Base).addImm(Disp).addReg(IndexReg); + .addReg(SrcReg) + .add(Base) + .addImm(Disp) + .addReg(IndexReg); MBB->addSuccessor(JoinMBB); MI.eraseFromParent(); @@ -5415,8 +5421,7 @@ MachineBasicBlock *SystemZTargetLowering::emitAtomicLoadBinary( // %OrigVal = L Disp(%Base) // # fall through to LoopMMB MBB = StartMBB; - BuildMI(MBB, DL, TII->get(LOpcode), OrigVal) - .addOperand(Base).addImm(Disp).addReg(0); + BuildMI(MBB, DL, TII->get(LOpcode), OrigVal).add(Base).addImm(Disp).addReg(0); MBB->addSuccessor(LoopMBB); // LoopMBB: @@ -5437,8 +5442,7 @@ MachineBasicBlock *SystemZTargetLowering::emitAtomicLoadBinary( if (Invert) { // Perform the operation normally and then invert every bit of the field. unsigned Tmp = MRI.createVirtualRegister(RC); - BuildMI(MBB, DL, TII->get(BinOpcode), Tmp) - .addReg(RotatedOldVal).addOperand(Src2); + BuildMI(MBB, DL, TII->get(BinOpcode), Tmp).addReg(RotatedOldVal).add(Src2); if (BitSize <= 32) // XILF with the upper BitSize bits set. BuildMI(MBB, DL, TII->get(SystemZ::XILF), RotatedNewVal) @@ -5454,7 +5458,8 @@ MachineBasicBlock *SystemZTargetLowering::emitAtomicLoadBinary( } else if (BinOpcode) // A simply binary operation. BuildMI(MBB, DL, TII->get(BinOpcode), RotatedNewVal) - .addReg(RotatedOldVal).addOperand(Src2); + .addReg(RotatedOldVal) + .add(Src2); else if (IsSubWord) // Use RISBG to rotate Src2 into position and use it to replace the // field in RotatedOldVal. @@ -5465,7 +5470,10 @@ MachineBasicBlock *SystemZTargetLowering::emitAtomicLoadBinary( BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal) .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0); BuildMI(MBB, DL, TII->get(CSOpcode), Dest) - .addReg(OldVal).addReg(NewVal).addOperand(Base).addImm(Disp); + .addReg(OldVal) + .addReg(NewVal) + .add(Base) + .addImm(Disp); BuildMI(MBB, DL, TII->get(SystemZ::BRC)) .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB); MBB->addSuccessor(LoopMBB); @@ -5533,8 +5541,7 @@ MachineBasicBlock *SystemZTargetLowering::emitAtomicLoadMinMax( // %OrigVal = L Disp(%Base) // # fall through to LoopMMB MBB = StartMBB; - BuildMI(MBB, DL, TII->get(LOpcode), OrigVal) - .addOperand(Base).addImm(Disp).addReg(0); + BuildMI(MBB, DL, TII->get(LOpcode), OrigVal).add(Base).addImm(Disp).addReg(0); MBB->addSuccessor(LoopMBB); // LoopMBB: @@ -5581,7 +5588,10 @@ MachineBasicBlock *SystemZTargetLowering::emitAtomicLoadMinMax( BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal) .addReg(RotatedNewVal).addReg(NegBitShift).addImm(0); BuildMI(MBB, DL, TII->get(CSOpcode), Dest) - .addReg(OldVal).addReg(NewVal).addOperand(Base).addImm(Disp); + .addReg(OldVal) + .addReg(NewVal) + .add(Base) + .addImm(Disp); BuildMI(MBB, DL, TII->get(SystemZ::BRC)) .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB); MBB->addSuccessor(LoopMBB); @@ -5642,7 +5652,9 @@ SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr &MI, // # fall through to LoopMMB MBB = StartMBB; BuildMI(MBB, DL, TII->get(LOpcode), OrigOldVal) - .addOperand(Base).addImm(Disp).addReg(0); + .add(Base) + .addImm(Disp) + .addReg(0); MBB->addSuccessor(LoopMBB); // LoopMBB: @@ -5696,7 +5708,10 @@ SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr &MI, BuildMI(MBB, DL, TII->get(SystemZ::RLL), StoreVal) .addReg(RetrySwapVal).addReg(NegBitShift).addImm(-BitSize); BuildMI(MBB, DL, TII->get(CSOpcode), RetryOldVal) - .addReg(OldVal).addReg(StoreVal).addOperand(Base).addImm(Disp); + .addReg(OldVal) + .addReg(StoreVal) + .add(Base) + .addImm(Disp); BuildMI(MBB, DL, TII->get(SystemZ::BRC)) .addImm(SystemZ::CCMASK_CS).addImm(SystemZ::CCMASK_CS_NE).addMBB(LoopMBB); MBB->addSuccessor(LoopMBB); @@ -5869,7 +5884,7 @@ MachineBasicBlock *SystemZTargetLowering::emitMemMemWrapper( if (!isUInt<12>(DestDisp)) { unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(SystemZ::LAY), Reg) - .addOperand(DestBase) + .add(DestBase) .addImm(DestDisp) .addReg(0); DestBase = MachineOperand::CreateReg(Reg, false); @@ -5878,15 +5893,18 @@ MachineBasicBlock *SystemZTargetLowering::emitMemMemWrapper( if (!isUInt<12>(SrcDisp)) { unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass); BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(SystemZ::LAY), Reg) - .addOperand(SrcBase) + .add(SrcBase) .addImm(SrcDisp) .addReg(0); SrcBase = MachineOperand::CreateReg(Reg, false); SrcDisp = 0; } BuildMI(*MBB, MI, DL, TII->get(Opcode)) - .addOperand(DestBase).addImm(DestDisp).addImm(ThisLength) - .addOperand(SrcBase).addImm(SrcDisp); + .add(DestBase) + .addImm(DestDisp) + .addImm(ThisLength) + .add(SrcBase) + .addImm(SrcDisp); DestDisp += ThisLength; SrcDisp += ThisLength; Length -= ThisLength; diff --git a/lib/Target/SystemZ/SystemZInstrInfo.cpp b/lib/Target/SystemZ/SystemZInstrInfo.cpp index 3565d5f2c49..4019fc45158 100644 --- a/lib/Target/SystemZ/SystemZInstrInfo.cpp +++ b/lib/Target/SystemZ/SystemZInstrInfo.cpp @@ -780,10 +780,11 @@ bool SystemZInstrInfo::PredicateInstruction( MI.RemoveOperand(0); MI.setDesc(get(SystemZ::CallBRCL)); MachineInstrBuilder(*MI.getParent()->getParent(), MI) - .addImm(CCValid).addImm(CCMask) - .addOperand(FirstOp) - .addRegMask(RegMask) - .addReg(SystemZ::CC, RegState::Implicit); + .addImm(CCValid) + .addImm(CCMask) + .add(FirstOp) + .addRegMask(RegMask) + .addReg(SystemZ::CC, RegState::Implicit); return true; } if (Opcode == SystemZ::CallBR) { @@ -976,12 +977,12 @@ MachineInstr *SystemZInstrInfo::convertToThreeAddress( MachineInstrBuilder MIB( *MF, MF->CreateMachineInstr(get(ThreeOperandOpcode), MI.getDebugLoc(), /*NoImplicit=*/true)); - MIB.addOperand(Dest); + MIB.add(Dest); // Keep the kill state, but drop the tied flag. MIB.addReg(Src.getReg(), getKillRegState(Src.isKill()), Src.getSubReg()); // Keep the remaining operands as-is. for (unsigned I = 2; I < NumOps; ++I) - MIB.addOperand(MI.getOperand(I)); + MIB.add(MI.getOperand(I)); MBB->insert(MI, MIB); return finishConvertToThreeAddress(&MI, MIB, LV); } @@ -1009,7 +1010,7 @@ MachineInstr *SystemZInstrInfo::convertToThreeAddress( MachineOperand &Src = MI.getOperand(1); MachineInstrBuilder MIB = BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpcode)) - .addOperand(Dest) + .add(Dest) .addReg(0) .addReg(Src.getReg(), getKillRegState(Src.isKill()), Src.getSubReg()) @@ -1091,7 +1092,7 @@ MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl( unsigned StoreOpcode = Op1IsGPR ? SystemZ::STG : SystemZ::STD; return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(), get(StoreOpcode)) - .addOperand(MI.getOperand(1)) + .add(MI.getOperand(1)) .addFrameIndex(FrameIndex) .addImm(0) .addReg(0); @@ -1132,7 +1133,7 @@ MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl( .addFrameIndex(FrameIndex) .addImm(0) .addImm(Size) - .addOperand(MI.getOperand(1)) + .add(MI.getOperand(1)) .addImm(MI.getOperand(2).getImm()) .addMemOperand(MMO); } @@ -1140,7 +1141,7 @@ MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl( if (isSimpleBD12Move(&MI, SystemZII::SimpleBDXStore)) { return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(), get(SystemZ::MVC)) - .addOperand(MI.getOperand(1)) + .add(MI.getOperand(1)) .addImm(MI.getOperand(2).getImm()) .addImm(Size) .addFrameIndex(FrameIndex) @@ -1164,7 +1165,7 @@ MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl( MachineInstrBuilder MIB = BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(), get(MemOpcode)); for (unsigned I = 0; I < OpNum; ++I) - MIB.addOperand(MI.getOperand(I)); + MIB.add(MI.getOperand(I)); MIB.addFrameIndex(FrameIndex).addImm(Offset); if (MemDesc.TSFlags & SystemZII::HasIndex) MIB.addReg(0); diff --git a/lib/Target/SystemZ/SystemZLongBranch.cpp b/lib/Target/SystemZ/SystemZLongBranch.cpp index 14ff6afbd4a..c466faba9c8 100644 --- a/lib/Target/SystemZ/SystemZLongBranch.cpp +++ b/lib/Target/SystemZ/SystemZLongBranch.cpp @@ -354,13 +354,13 @@ void SystemZLongBranch::splitBranchOnCount(MachineInstr *MI, MachineBasicBlock *MBB = MI->getParent(); DebugLoc DL = MI->getDebugLoc(); BuildMI(*MBB, MI, DL, TII->get(AddOpcode)) - .addOperand(MI->getOperand(0)) - .addOperand(MI->getOperand(1)) - .addImm(-1); + .add(MI->getOperand(0)) + .add(MI->getOperand(1)) + .addImm(-1); MachineInstr *BRCL = BuildMI(*MBB, MI, DL, TII->get(SystemZ::BRCL)) - .addImm(SystemZ::CCMASK_ICMP) - .addImm(SystemZ::CCMASK_CMP_NE) - .addOperand(MI->getOperand(2)); + .addImm(SystemZ::CCMASK_ICMP) + .addImm(SystemZ::CCMASK_CMP_NE) + .add(MI->getOperand(2)); // The implicit use of CC is a killing use. BRCL->addRegisterKilled(SystemZ::CC, &TII->getRegisterInfo()); MI->eraseFromParent(); @@ -373,12 +373,12 @@ void SystemZLongBranch::splitCompareBranch(MachineInstr *MI, MachineBasicBlock *MBB = MI->getParent(); DebugLoc DL = MI->getDebugLoc(); BuildMI(*MBB, MI, DL, TII->get(CompareOpcode)) - .addOperand(MI->getOperand(0)) - .addOperand(MI->getOperand(1)); + .add(MI->getOperand(0)) + .add(MI->getOperand(1)); MachineInstr *BRCL = BuildMI(*MBB, MI, DL, TII->get(SystemZ::BRCL)) - .addImm(SystemZ::CCMASK_ICMP) - .addOperand(MI->getOperand(2)) - .addOperand(MI->getOperand(3)); + .addImm(SystemZ::CCMASK_ICMP) + .add(MI->getOperand(2)) + .add(MI->getOperand(3)); // The implicit use of CC is a killing use. BRCL->addRegisterKilled(SystemZ::CC, &TII->getRegisterInfo()); MI->eraseFromParent(); diff --git a/lib/Target/SystemZ/SystemZShortenInst.cpp b/lib/Target/SystemZ/SystemZShortenInst.cpp index 83882fc0310..263aff8b7bf 100644 --- a/lib/Target/SystemZ/SystemZShortenInst.cpp +++ b/lib/Target/SystemZ/SystemZShortenInst.cpp @@ -167,10 +167,10 @@ bool SystemZShortenInst::shortenFPConv(MachineInstr &MI, unsigned Opcode) { MI.RemoveOperand(0); MI.setDesc(TII->get(Opcode)); MachineInstrBuilder(*MI.getParent()->getParent(), &MI) - .addOperand(Dest) - .addOperand(Mode) - .addOperand(Src) - .addOperand(Suppress); + .add(Dest) + .add(Mode) + .add(Src) + .add(Suppress); return true; } return false; diff --git a/lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp b/lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp index 0e2d8bbaf64..8846952e5af 100644 --- a/lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp +++ b/lib/Target/WebAssembly/WebAssemblyInstrInfo.cpp @@ -183,11 +183,9 @@ unsigned WebAssemblyInstrInfo::insertBranch(MachineBasicBlock &MBB, assert(Cond.size() == 2 && "Expected a flag and a successor block"); if (Cond[0].getImm()) { - BuildMI(&MBB, DL, get(WebAssembly::BR_IF)).addMBB(TBB).addOperand(Cond[1]); + BuildMI(&MBB, DL, get(WebAssembly::BR_IF)).addMBB(TBB).add(Cond[1]); } else { - BuildMI(&MBB, DL, get(WebAssembly::BR_UNLESS)) - .addMBB(TBB) - .addOperand(Cond[1]); + BuildMI(&MBB, DL, get(WebAssembly::BR_UNLESS)).addMBB(TBB).add(Cond[1]); } if (!FBB) return 1; diff --git a/lib/Target/WebAssembly/WebAssemblyLowerBrUnless.cpp b/lib/Target/WebAssembly/WebAssemblyLowerBrUnless.cpp index 7ea5d05a1b2..744a3ed427a 100644 --- a/lib/Target/WebAssembly/WebAssemblyLowerBrUnless.cpp +++ b/lib/Target/WebAssembly/WebAssemblyLowerBrUnless.cpp @@ -118,7 +118,7 @@ bool WebAssemblyLowerBrUnless::runOnMachineFunction(MachineFunction &MF) { // delete the br_unless. assert(Inverted); BuildMI(MBB, MI, MI->getDebugLoc(), TII.get(WebAssembly::BR_IF)) - .addOperand(MI->getOperand(0)) + .add(MI->getOperand(0)) .addReg(Cond); MBB.erase(MI); } diff --git a/lib/Target/X86/X86CallFrameOptimization.cpp b/lib/Target/X86/X86CallFrameOptimization.cpp index 78bd2add8c3..23606a39d1a 100644 --- a/lib/Target/X86/X86CallFrameOptimization.cpp +++ b/lib/Target/X86/X86CallFrameOptimization.cpp @@ -482,8 +482,7 @@ void X86CallFrameOptimization::adjustCallSequence(MachineFunction &MF, if (isInt<8>(Val)) PushOpcode = Is64Bit ? X86::PUSH64i8 : X86::PUSH32i8; } - Push = BuildMI(MBB, Context.Call, DL, TII->get(PushOpcode)) - .addOperand(PushOp); + Push = BuildMI(MBB, Context.Call, DL, TII->get(PushOpcode)).add(PushOp); break; case X86::MOV32mr: case X86::MOV64mr: @@ -496,9 +495,9 @@ void X86CallFrameOptimization::adjustCallSequence(MachineFunction &MF, Reg = MRI->createVirtualRegister(&X86::GR64RegClass); BuildMI(MBB, Context.Call, DL, TII->get(X86::IMPLICIT_DEF), UndefReg); BuildMI(MBB, Context.Call, DL, TII->get(X86::INSERT_SUBREG), Reg) - .addReg(UndefReg) - .addOperand(PushOp) - .addImm(X86::sub_32bit); + .addReg(UndefReg) + .add(PushOp) + .addImm(X86::sub_32bit); } // If PUSHrmm is not slow on this target, try to fold the source of the diff --git a/lib/Target/X86/X86ExpandPseudo.cpp b/lib/Target/X86/X86ExpandPseudo.cpp index 192b942490e..c4bc29e963e 100644 --- a/lib/Target/X86/X86ExpandPseudo.cpp +++ b/lib/Target/X86/X86ExpandPseudo.cpp @@ -151,7 +151,7 @@ bool X86ExpandPseudo::ExpandMI(MachineBasicBlock &MBB, : (IsWin64 ? X86::TAILJMPm64_REX : X86::TAILJMPm64); MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII->get(Op)); for (unsigned i = 0; i != 5; ++i) - MIB.addOperand(MBBI->getOperand(i)); + MIB.add(MBBI->getOperand(i)); } else if (Opcode == X86::TCRETURNri64) { BuildMI(MBB, MBBI, DL, TII->get(IsWin64 ? X86::TAILJMPr64_REX : X86::TAILJMPr64)) @@ -214,7 +214,7 @@ bool X86ExpandPseudo::ExpandMI(MachineBasicBlock &MBB, MIB = BuildMI(MBB, MBBI, DL, TII->get(X86::RETL)); } for (unsigned I = 1, E = MBBI->getNumOperands(); I != E; ++I) - MIB.addOperand(MBBI->getOperand(I)); + MIB.add(MBBI->getOperand(I)); MBB.erase(MBBI); return true; } diff --git a/lib/Target/X86/X86FixupBWInsts.cpp b/lib/Target/X86/X86FixupBWInsts.cpp index 8bde4bf98d6..3583980ff4f 100644 --- a/lib/Target/X86/X86FixupBWInsts.cpp +++ b/lib/Target/X86/X86FixupBWInsts.cpp @@ -226,7 +226,7 @@ MachineInstr *FixupBWInstPass::tryReplaceLoad(unsigned New32BitOpcode, unsigned NumArgs = MI->getNumOperands(); for (unsigned i = 1; i < NumArgs; ++i) - MIB.addOperand(MI->getOperand(i)); + MIB.add(MI->getOperand(i)); MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); @@ -264,7 +264,7 @@ MachineInstr *FixupBWInstPass::tryReplaceCopy(MachineInstr *MI) const { // Drop imp-defs/uses that would be redundant with the new def/use. for (auto &Op : MI->implicit_operands()) if (Op.getReg() != (Op.isDef() ? NewDestReg : NewSrcReg)) - MIB.addOperand(Op); + MIB.add(Op); return MIB; } diff --git a/lib/Target/X86/X86FixupLEAs.cpp b/lib/Target/X86/X86FixupLEAs.cpp index 12095917ca3..2cd4c1a3e7b 100644 --- a/lib/Target/X86/X86FixupLEAs.cpp +++ b/lib/Target/X86/X86FixupLEAs.cpp @@ -120,8 +120,8 @@ FixupLEAPass::postRAConvertToLEA(MachineFunction::iterator &MFI, BuildMI(*MF, MI.getDebugLoc(), TII->get(MI.getOpcode() == X86::MOV32rr ? X86::LEA32r : X86::LEA64r)) - .addOperand(Dest) - .addOperand(Src) + .add(Dest) + .add(Src) .addImm(1) .addReg(0) .addImm(0) @@ -287,8 +287,8 @@ bool FixupLEAPass::fixupIncDec(MachineBasicBlock::iterator &I, MachineInstr *NewMI = BuildMI(*MFI, I, MI.getDebugLoc(), TII->get(NewOpcode)) - .addOperand(MI.getOperand(0)) - .addOperand(MI.getOperand(1)); + .add(MI.getOperand(0)) + .add(MI.getOperand(1)); MFI->erase(I); I = static_cast(NewMI); return true; @@ -377,9 +377,9 @@ void FixupLEAPass::processInstructionForSLM(MachineBasicBlock::iterator &I, const MachineOperand &Src1 = MI.getOperand(SrcR1 == DstR ? 1 : 3); const MachineOperand &Src2 = MI.getOperand(SrcR1 == DstR ? 3 : 1); NewMI = BuildMI(*MF, MI.getDebugLoc(), TII->get(addrr_opcode)) - .addOperand(Dst) - .addOperand(Src1) - .addOperand(Src2); + .add(Dst) + .add(Src1) + .add(Src2); MFI->insert(I, NewMI); DEBUG(NewMI->dump();); } @@ -387,8 +387,8 @@ void FixupLEAPass::processInstructionForSLM(MachineBasicBlock::iterator &I, if (MI.getOperand(4).getImm() != 0) { const MachineOperand &SrcR = MI.getOperand(SrcR1 == DstR ? 1 : 3); NewMI = BuildMI(*MF, MI.getDebugLoc(), TII->get(addri_opcode)) - .addOperand(Dst) - .addOperand(SrcR) + .add(Dst) + .add(SrcR) .addImm(MI.getOperand(4).getImm()); MFI->insert(I, NewMI); DEBUG(NewMI->dump();); diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index e07e148ee95..8a054462e5f 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -24347,7 +24347,7 @@ static MachineBasicBlock *emitPCMPSTRM(MachineInstr &MI, MachineBasicBlock *BB, for (unsigned i = 1; i < NumArgs; ++i) { MachineOperand &Op = MI.getOperand(i); if (!(Op.isReg() && Op.isImplicit())) - MIB.addOperand(Op); + MIB.add(Op); } if (MI.hasOneMemOperand()) MIB->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); @@ -24383,7 +24383,7 @@ static MachineBasicBlock *emitPCMPSTRI(MachineInstr &MI, MachineBasicBlock *BB, for (unsigned i = 1; i < NumArgs; ++i) { MachineOperand &Op = MI.getOperand(i); if (!(Op.isReg() && Op.isImplicit())) - MIB.addOperand(Op); + MIB.add(Op); } if (MI.hasOneMemOperand()) MIB->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); @@ -24443,7 +24443,7 @@ static MachineBasicBlock *emitMonitor(MachineInstr &MI, MachineBasicBlock *BB, unsigned MemReg = Subtarget.is64Bit() ? X86::RAX : X86::EAX; MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(MemOpc), MemReg); for (int i = 0; i < X86::AddrNumOperands; ++i) - MIB.addOperand(MI.getOperand(i)); + MIB.add(MI.getOperand(i)); unsigned ValOps = X86::AddrNumOperands; BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::ECX) @@ -24581,12 +24581,12 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI, // Load the offset value into a register OffsetReg = MRI.createVirtualRegister(OffsetRegClass); BuildMI(thisMBB, DL, TII->get(X86::MOV32rm), OffsetReg) - .addOperand(Base) - .addOperand(Scale) - .addOperand(Index) - .addDisp(Disp, UseFPOffset ? 4 : 0) - .addOperand(Segment) - .setMemRefs(MMOBegin, MMOEnd); + .add(Base) + .add(Scale) + .add(Index) + .addDisp(Disp, UseFPOffset ? 4 : 0) + .add(Segment) + .setMemRefs(MMOBegin, MMOEnd); // Check if there is enough room left to pull this argument. BuildMI(thisMBB, DL, TII->get(X86::CMP32ri)) @@ -24606,12 +24606,12 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI, // Read the reg_save_area address. unsigned RegSaveReg = MRI.createVirtualRegister(AddrRegClass); BuildMI(offsetMBB, DL, TII->get(X86::MOV64rm), RegSaveReg) - .addOperand(Base) - .addOperand(Scale) - .addOperand(Index) - .addDisp(Disp, 16) - .addOperand(Segment) - .setMemRefs(MMOBegin, MMOEnd); + .add(Base) + .add(Scale) + .add(Index) + .addDisp(Disp, 16) + .add(Segment) + .setMemRefs(MMOBegin, MMOEnd); // Zero-extend the offset unsigned OffsetReg64 = MRI.createVirtualRegister(AddrRegClass); @@ -24633,13 +24633,13 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI, // Store it back into the va_list. BuildMI(offsetMBB, DL, TII->get(X86::MOV32mr)) - .addOperand(Base) - .addOperand(Scale) - .addOperand(Index) - .addDisp(Disp, UseFPOffset ? 4 : 0) - .addOperand(Segment) - .addReg(NextOffsetReg) - .setMemRefs(MMOBegin, MMOEnd); + .add(Base) + .add(Scale) + .add(Index) + .addDisp(Disp, UseFPOffset ? 4 : 0) + .add(Segment) + .addReg(NextOffsetReg) + .setMemRefs(MMOBegin, MMOEnd); // Jump to endMBB BuildMI(offsetMBB, DL, TII->get(X86::JMP_1)) @@ -24653,12 +24653,12 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI, // Load the overflow_area address into a register. unsigned OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass); BuildMI(overflowMBB, DL, TII->get(X86::MOV64rm), OverflowAddrReg) - .addOperand(Base) - .addOperand(Scale) - .addOperand(Index) - .addDisp(Disp, 8) - .addOperand(Segment) - .setMemRefs(MMOBegin, MMOEnd); + .add(Base) + .add(Scale) + .add(Index) + .addDisp(Disp, 8) + .add(Segment) + .setMemRefs(MMOBegin, MMOEnd); // If we need to align it, do so. Otherwise, just copy the address // to OverflowDestReg. @@ -24689,13 +24689,13 @@ X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI, // Store the new overflow address. BuildMI(overflowMBB, DL, TII->get(X86::MOV64mr)) - .addOperand(Base) - .addOperand(Scale) - .addOperand(Index) - .addDisp(Disp, 8) - .addOperand(Segment) - .addReg(NextAddrReg) - .setMemRefs(MMOBegin, MMOEnd); + .add(Base) + .add(Scale) + .add(Index) + .addDisp(Disp, 8) + .add(Segment) + .addReg(NextAddrReg) + .setMemRefs(MMOBegin, MMOEnd); // If we branched, emit the PHI to the front of endMBB. if (offsetMBB) { @@ -25168,12 +25168,12 @@ X86TargetLowering::EmitLoweredAtomicFP(MachineInstr &MI, // instruction using the same address operands. if (Operand.isReg()) Operand.setIsKill(false); - MIB.addOperand(Operand); + MIB.add(Operand); } MachineInstr *FOpMI = MIB; MIB = BuildMI(*BB, MI, DL, TII->get(MOp)); for (int i = 0; i < X86::AddrNumOperands; ++i) - MIB.addOperand(MI.getOperand(i)); + MIB.add(MI.getOperand(i)); MIB.addReg(FOpMI->getOperand(0).getReg(), RegState::Kill); MI.eraseFromParent(); // The pseudo instruction is gone now. return BB; @@ -25553,7 +25553,7 @@ X86TargetLowering::emitEHSjLjSetJmp(MachineInstr &MI, if (i == X86::AddrDisp) MIB.addDisp(MI.getOperand(MemOpndSlot + i), LabelOffset); else - MIB.addOperand(MI.getOperand(MemOpndSlot + i)); + MIB.add(MI.getOperand(MemOpndSlot + i)); } if (!UseImmLabel) MIB.addReg(LabelReg); @@ -25636,7 +25636,7 @@ X86TargetLowering::emitEHSjLjLongJmp(MachineInstr &MI, // Reload FP MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), FP); for (unsigned i = 0; i < X86::AddrNumOperands; ++i) - MIB.addOperand(MI.getOperand(i)); + MIB.add(MI.getOperand(i)); MIB.setMemRefs(MMOBegin, MMOEnd); // Reload IP MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), Tmp); @@ -25644,7 +25644,7 @@ X86TargetLowering::emitEHSjLjLongJmp(MachineInstr &MI, if (i == X86::AddrDisp) MIB.addDisp(MI.getOperand(i), LabelOffset); else - MIB.addOperand(MI.getOperand(i)); + MIB.add(MI.getOperand(i)); } MIB.setMemRefs(MMOBegin, MMOEnd); // Reload SP @@ -25653,7 +25653,7 @@ X86TargetLowering::emitEHSjLjLongJmp(MachineInstr &MI, if (i == X86::AddrDisp) MIB.addDisp(MI.getOperand(i), SPOffset); else - MIB.addOperand(MI.getOperand(i)); + MIB.add(MI.getOperand(i)); } MIB.setMemRefs(MMOBegin, MMOEnd); // Jump diff --git a/lib/Target/X86/X86InstrBuilder.h b/lib/Target/X86/X86InstrBuilder.h index ba970bc2048..dcce7b9951f 100644 --- a/lib/Target/X86/X86InstrBuilder.h +++ b/lib/Target/X86/X86InstrBuilder.h @@ -147,7 +147,7 @@ addOffset(const MachineInstrBuilder &MIB, int Offset) { static inline const MachineInstrBuilder & addOffset(const MachineInstrBuilder &MIB, const MachineOperand& Offset) { - return MIB.addImm(1).addReg(0).addOperand(Offset).addReg(0); + return MIB.addImm(1).addReg(0).add(Offset).addReg(0); } /// addRegOffset - This function is used to add a memory reference of the form diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp index 44343f53074..8c5dbd51866 100644 --- a/lib/Target/X86/X86InstrInfo.cpp +++ b/lib/Target/X86/X86InstrInfo.cpp @@ -3565,7 +3565,7 @@ void X86InstrInfo::reMaterialize(MachineBasicBlock &MBB, const DebugLoc &DL = Orig.getDebugLoc(); BuildMI(MBB, I, DL, get(X86::MOV32ri)) - .addOperand(Orig.getOperand(0)) + .add(Orig.getOperand(0)) .addImm(Value); } else { MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig); @@ -3650,10 +3650,10 @@ bool X86InstrInfo::classifyLEAReg(MachineInstr &MI, const MachineOperand &Src, // Virtual register of the wrong class, we have to create a temporary 64-bit // vreg to feed into the LEA. NewSrc = MF.getRegInfo().createVirtualRegister(RC); - MachineInstr *Copy = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), - get(TargetOpcode::COPY)) - .addReg(NewSrc, RegState::Define | RegState::Undef, X86::sub_32bit) - .addOperand(Src); + MachineInstr *Copy = + BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(TargetOpcode::COPY)) + .addReg(NewSrc, RegState::Define | RegState::Undef, X86::sub_32bit) + .add(Src); // Which is obviously going to be dead after we're done with it. isKill = true; @@ -3819,10 +3819,10 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, return nullptr; NewMI = BuildMI(MF, MI.getDebugLoc(), get(X86::LEA64r)) - .addOperand(Dest) + .add(Dest) .addReg(0) .addImm(1ULL << ShAmt) - .addOperand(Src) + .add(Src) .addImm(0) .addReg(0); break; @@ -3844,14 +3844,14 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc)) - .addOperand(Dest) + .add(Dest) .addReg(0) .addImm(1ULL << ShAmt) .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef)) .addImm(0) .addReg(0); if (ImplicitOp.getReg() != 0) - MIB.addOperand(ImplicitOp); + MIB.add(ImplicitOp); NewMI = MIB; break; @@ -3865,10 +3865,10 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV) : nullptr; NewMI = BuildMI(MF, MI.getDebugLoc(), get(X86::LEA16r)) - .addOperand(Dest) + .add(Dest) .addReg(0) .addImm(1ULL << ShAmt) - .addOperand(Src) + .add(Src) .addImm(0) .addReg(0); break; @@ -3887,11 +3887,11 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc)) - .addOperand(Dest) + .add(Dest) .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef)); if (ImplicitOp.getReg() != 0) - MIB.addOperand(ImplicitOp); + MIB.add(ImplicitOp); NewMI = addOffset(MIB, 1); break; @@ -3901,10 +3901,8 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV) : nullptr; assert(MI.getNumOperands() >= 2 && "Unknown inc instruction!"); - NewMI = addOffset(BuildMI(MF, MI.getDebugLoc(), get(X86::LEA16r)) - .addOperand(Dest) - .addOperand(Src), - 1); + NewMI = addOffset( + BuildMI(MF, MI.getDebugLoc(), get(X86::LEA16r)).add(Dest).add(Src), 1); break; case X86::DEC64r: case X86::DEC32r: { @@ -3920,11 +3918,11 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, return nullptr; MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc)) - .addOperand(Dest) + .add(Dest) .addReg(SrcReg, getUndefRegState(isUndef) | getKillRegState(isKill)); if (ImplicitOp.getReg() != 0) - MIB.addOperand(ImplicitOp); + MIB.add(ImplicitOp); NewMI = addOffset(MIB, -1); @@ -3935,10 +3933,8 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV) : nullptr; assert(MI.getNumOperands() >= 2 && "Unknown dec instruction!"); - NewMI = addOffset(BuildMI(MF, MI.getDebugLoc(), get(X86::LEA16r)) - .addOperand(Dest) - .addOperand(Src), - -1); + NewMI = addOffset( + BuildMI(MF, MI.getDebugLoc(), get(X86::LEA16r)).add(Dest).add(Src), -1); break; case X86::ADD64rr: case X86::ADD64rr_DB: @@ -3966,12 +3962,11 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, SrcReg2, isKill2, isUndef2, ImplicitOp2, LV)) return nullptr; - MachineInstrBuilder MIB = - BuildMI(MF, MI.getDebugLoc(), get(Opc)).addOperand(Dest); + MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc)).add(Dest); if (ImplicitOp.getReg() != 0) - MIB.addOperand(ImplicitOp); + MIB.add(ImplicitOp); if (ImplicitOp2.getReg() != 0) - MIB.addOperand(ImplicitOp2); + MIB.add(ImplicitOp2); NewMI = addRegReg(MIB, SrcReg, isKill, SrcReg2, isKill2); @@ -3991,9 +3986,8 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, assert(MI.getNumOperands() >= 3 && "Unknown add instruction!"); unsigned Src2 = MI.getOperand(2).getReg(); bool isKill2 = MI.getOperand(2).isKill(); - NewMI = addRegReg( - BuildMI(MF, MI.getDebugLoc(), get(X86::LEA16r)).addOperand(Dest), - Src.getReg(), Src.isKill(), Src2, isKill2); + NewMI = addRegReg(BuildMI(MF, MI.getDebugLoc(), get(X86::LEA16r)).add(Dest), + Src.getReg(), Src.isKill(), Src2, isKill2); // Preserve undefness of the operands. bool isUndef = MI.getOperand(1).isUndef(); @@ -4010,10 +4004,9 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, case X86::ADD64ri32_DB: case X86::ADD64ri8_DB: assert(MI.getNumOperands() >= 3 && "Unknown add instruction!"); - NewMI = addOffset(BuildMI(MF, MI.getDebugLoc(), get(X86::LEA64r)) - .addOperand(Dest) - .addOperand(Src), - MI.getOperand(2)); + NewMI = addOffset( + BuildMI(MF, MI.getDebugLoc(), get(X86::LEA64r)).add(Dest).add(Src), + MI.getOperand(2)); break; case X86::ADD32ri: case X86::ADD32ri8: @@ -4030,11 +4023,11 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, return nullptr; MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc)) - .addOperand(Dest) + .add(Dest) .addReg(SrcReg, getUndefRegState(isUndef) | getKillRegState(isKill)); if (ImplicitOp.getReg() != 0) - MIB.addOperand(ImplicitOp); + MIB.add(ImplicitOp); NewMI = addOffset(MIB, MI.getOperand(2)); break; @@ -4047,10 +4040,9 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV) : nullptr; assert(MI.getNumOperands() >= 3 && "Unknown add instruction!"); - NewMI = addOffset(BuildMI(MF, MI.getDebugLoc(), get(X86::LEA16r)) - .addOperand(Dest) - .addOperand(Src), - MI.getOperand(2)); + NewMI = addOffset( + BuildMI(MF, MI.getDebugLoc(), get(X86::LEA16r)).add(Dest).add(Src), + MI.getOperand(2)); break; } @@ -6040,7 +6032,7 @@ void X86InstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg, DebugLoc DL; MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc)); for (unsigned i = 0, e = Addr.size(); i != e; ++i) - MIB.addOperand(Addr[i]); + MIB.add(Addr[i]); MIB.addReg(SrcReg, getKillRegState(isKill)); (*MIB).setMemRefs(MMOBegin, MMOEnd); NewMIs.push_back(MIB); @@ -6075,7 +6067,7 @@ void X86InstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg, DebugLoc DL; MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc), DestReg); for (unsigned i = 0, e = Addr.size(); i != e; ++i) - MIB.addOperand(Addr[i]); + MIB.add(Addr[i]); (*MIB).setMemRefs(MMOBegin, MMOEnd); NewMIs.push_back(MIB); } @@ -6935,7 +6927,7 @@ static void addOperands(MachineInstrBuilder &MIB, ArrayRef MOs, if (NumAddrOps < 4) { // FrameIndex only - add an immediate offset (whether its zero or not). for (unsigned i = 0; i != NumAddrOps; ++i) - MIB.addOperand(MOs[i]); + MIB.add(MOs[i]); addOffset(MIB, PtrOffset); } else { // General Memory Addressing - we need to add any offset to an existing @@ -6946,7 +6938,7 @@ static void addOperands(MachineInstrBuilder &MIB, ArrayRef MOs, if (i == 3 && PtrOffset != 0) { MIB.addDisp(MO, PtrOffset); } else { - MIB.addOperand(MO); + MIB.add(MO); } } } @@ -6968,11 +6960,11 @@ static MachineInstr *FuseTwoAddrInst(MachineFunction &MF, unsigned Opcode, unsigned NumOps = MI.getDesc().getNumOperands() - 2; for (unsigned i = 0; i != NumOps; ++i) { MachineOperand &MO = MI.getOperand(i + 2); - MIB.addOperand(MO); + MIB.add(MO); } for (unsigned i = NumOps + 2, e = MI.getNumOperands(); i != e; ++i) { MachineOperand &MO = MI.getOperand(i); - MIB.addOperand(MO); + MIB.add(MO); } MachineBasicBlock *MBB = InsertPt->getParent(); @@ -6997,7 +6989,7 @@ static MachineInstr *FuseInst(MachineFunction &MF, unsigned Opcode, assert(MO.isReg() && "Expected to fold into reg operand!"); addOperands(MIB, MOs, PtrOffset); } else { - MIB.addOperand(MO); + MIB.add(MO); } } @@ -7875,11 +7867,11 @@ bool X86InstrInfo::unfoldMemoryOperand( if (FoldedStore) MIB.addReg(Reg, RegState::Define); for (MachineOperand &BeforeOp : BeforeOps) - MIB.addOperand(BeforeOp); + MIB.add(BeforeOp); if (FoldedLoad) MIB.addReg(Reg); for (MachineOperand &AfterOp : AfterOps) - MIB.addOperand(AfterOp); + MIB.add(AfterOp); for (MachineOperand &ImpOp : ImpOps) { MIB.addReg(ImpOp.getReg(), getDefRegState(ImpOp.isDef()) |