if (SrcMI->getOpcode() == TargetOpcode::G_CONSTANT) {
const LLT &DstTy = MRI.getType(DstReg);
if (isInstLegal({TargetOpcode::G_CONSTANT, {DstTy}})) {
- auto CstVal = SrcMI->getOperand(1);
- APInt Val = CstVal.isImm()
- ? APInt(DstTy.getSizeInBits(), CstVal.getImm())
- : CstVal.getCImm()->getValue();
- Val = Val.sext(DstTy.getSizeInBits());
- Builder.buildConstant(DstReg, Val);
+ auto &CstVal = SrcMI->getOperand(1);
+ Builder.buildConstant(
+ DstReg, CstVal.getCImm()->getValue().sext(DstTy.getSizeInBits()));
markInstAndDefDead(MI, *SrcMI, DeadInsts);
return true;
}
markInstAndDefDead(MI, *MRI.getVRegDef(SrcReg), DeadInsts);
return true;
}
+
+ // Try to fold zext(g_constant) when the larger constant type is legal.
+ // Can't use MIPattern because we don't have a specific constant in mind.
+ auto *SrcMI = MRI.getVRegDef(SrcReg);
+ if (SrcMI->getOpcode() == TargetOpcode::G_CONSTANT) {
+ const LLT &DstTy = MRI.getType(DstReg);
+ if (isInstLegal({TargetOpcode::G_CONSTANT, {DstTy}})) {
+ auto &CstVal = SrcMI->getOperand(1);
+ Builder.buildConstant(
+ DstReg, CstVal.getCImm()->getValue().zext(DstTy.getSizeInBits()));
+ markInstAndDefDead(MI, *SrcMI, DeadInsts);
+ return true;
+ }
+ }
return tryFoldImplicitDef(MI, DeadInsts);
}
// 47: FPExt vector: 64 to 128. <-- This must match FPExt64To128Idx.
{&AArch64GenRegisterBankInfo::PartMappings[PMI_FPR128 - PMI_Min], 1},
{&AArch64GenRegisterBankInfo::PartMappings[PMI_FPR64 - PMI_Min], 1},
+ // 49: Shift scalar with 64 bit shift imm
+ {&AArch64GenRegisterBankInfo::PartMappings[PMI_GPR32 - PMI_Min], 1},
+ {&AArch64GenRegisterBankInfo::PartMappings[PMI_GPR32 - PMI_Min], 1},
+ {&AArch64GenRegisterBankInfo::PartMappings[PMI_GPR64 - PMI_Min], 1},
};
bool AArch64GenRegisterBankInfo::checkPartialMap(unsigned Idx,
/// the patterns that don't require complex C++.
bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
+ // A lowering phase that runs before any selection attempts.
+
+ void preISelLower(MachineInstr &I) const;
+
+ // An early selection function that runs before the selectImpl() call.
+ bool earlySelect(MachineInstr &I) const;
+
+ bool earlySelectSHL(MachineInstr &I, MachineRegisterInfo &MRI) const;
+
bool selectVaStartAAPCS(MachineInstr &I, MachineFunction &MF,
MachineRegisterInfo &MRI) const;
bool selectVaStartDarwin(MachineInstr &I, MachineFunction &MF,
MachineInstr *emitCSetForICMP(Register DefReg, unsigned Pred,
MachineIRBuilder &MIRBuilder) const;
+ // Equivalent to the i32shift_a and friends from AArch64InstrInfo.td.
+ // We use these manually instead of using the importer since it doesn't
+ // support SDNodeXForm.
+ ComplexRendererFns selectShiftA_32(const MachineOperand &Root) const;
+ ComplexRendererFns selectShiftB_32(const MachineOperand &Root) const;
+ ComplexRendererFns selectShiftA_64(const MachineOperand &Root) const;
+ ComplexRendererFns selectShiftB_64(const MachineOperand &Root) const;
+
ComplexRendererFns selectArithImmed(MachineOperand &Root) const;
ComplexRendererFns selectAddrModeUnscaled(MachineOperand &Root,
return;
}
+void AArch64InstructionSelector::preISelLower(MachineInstr &I) const {
+ MachineBasicBlock &MBB = *I.getParent();
+ MachineFunction &MF = *MBB.getParent();
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+
+ switch (I.getOpcode()) {
+ case TargetOpcode::G_SHL:
+ case TargetOpcode::G_ASHR:
+ case TargetOpcode::G_LSHR: {
+ // These shifts are legalized to have 64 bit shift amounts because we want
+ // to take advantage of the existing imported selection patterns that assume
+ // the immediates are s64s. However, if the shifted type is 32 bits and for
+ // some reason we receive input GMIR that has an s64 shift amount that's not
+ // a G_CONSTANT, insert a truncate so that we can still select the s32
+ // register-register variant.
+ unsigned SrcReg = I.getOperand(1).getReg();
+ unsigned ShiftReg = I.getOperand(2).getReg();
+ const LLT ShiftTy = MRI.getType(ShiftReg);
+ const LLT SrcTy = MRI.getType(SrcReg);
+ if (SrcTy.isVector())
+ return;
+ assert(!ShiftTy.isVector() && "unexpected vector shift ty");
+ if (SrcTy.getSizeInBits() != 32 || ShiftTy.getSizeInBits() != 64)
+ return;
+ auto *AmtMI = MRI.getVRegDef(ShiftReg);
+ assert(AmtMI && "could not find a vreg definition for shift amount");
+ if (AmtMI->getOpcode() != TargetOpcode::G_CONSTANT) {
+ // Insert a subregister copy to implement a 64->32 trunc
+ MachineIRBuilder MIB(I);
+ auto Trunc = MIB.buildInstr(TargetOpcode::COPY, {SrcTy}, {})
+ .addReg(ShiftReg, 0, AArch64::sub_32);
+ MRI.setRegBank(Trunc.getReg(0), RBI.getRegBank(AArch64::GPRRegBankID));
+ I.getOperand(2).setReg(Trunc.getReg(0));
+ }
+ return;
+ }
+ default:
+ return;
+ }
+}
+
+bool AArch64InstructionSelector::earlySelectSHL(
+ MachineInstr &I, MachineRegisterInfo &MRI) const {
+ // We try to match the immediate variant of LSL, which is actually an alias
+ // for a special case of UBFM. Otherwise, we fall back to the imported
+ // selector which will match the register variant.
+ assert(I.getOpcode() == TargetOpcode::G_SHL && "unexpected op");
+ const auto &MO = I.getOperand(2);
+ auto VRegAndVal = getConstantVRegVal(MO.getReg(), MRI);
+ if (!VRegAndVal)
+ return false;
+
+ const LLT DstTy = MRI.getType(I.getOperand(0).getReg());
+ if (DstTy.isVector())
+ return false;
+ bool Is64Bit = DstTy.getSizeInBits() == 64;
+ auto Imm1Fn = Is64Bit ? selectShiftA_64(MO) : selectShiftA_32(MO);
+ auto Imm2Fn = Is64Bit ? selectShiftB_64(MO) : selectShiftB_32(MO);
+ MachineIRBuilder MIB(I);
+
+ if (!Imm1Fn || !Imm2Fn)
+ return false;
+
+ auto NewI =
+ MIB.buildInstr(Is64Bit ? AArch64::UBFMXri : AArch64::UBFMWri,
+ {I.getOperand(0).getReg()}, {I.getOperand(1).getReg()});
+
+ for (auto &RenderFn : *Imm1Fn)
+ RenderFn(NewI);
+ for (auto &RenderFn : *Imm2Fn)
+ RenderFn(NewI);
+
+ I.eraseFromParent();
+ return constrainSelectedInstRegOperands(*NewI, TII, TRI, RBI);
+}
+
+bool AArch64InstructionSelector::earlySelect(MachineInstr &I) const {
+ assert(I.getParent() && "Instruction should be in a basic block!");
+ assert(I.getParent()->getParent() && "Instruction should be in a function!");
+
+ MachineBasicBlock &MBB = *I.getParent();
+ MachineFunction &MF = *MBB.getParent();
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+
+ switch (I.getOpcode()) {
+ case TargetOpcode::G_SHL:
+ return earlySelectSHL(I, MRI);
+ default:
+ return false;
+ }
+}
+
bool AArch64InstructionSelector::select(MachineInstr &I,
CodeGenCoverage &CoverageInfo) const {
assert(I.getParent() && "Instruction should be in a basic block!");
return false;
}
+ // Try to do some lowering before we start instruction selecting. These
+ // lowerings are purely transformations on the input G_MIR and so selection
+ // must continue after any modification of the instruction.
+ preISelLower(I);
+
+ // There may be patterns where the importer can't deal with them optimally,
+ // but does select it to a suboptimal sequence so our custom C++ selection
+ // code later never has a chance to work on it. Therefore, we have an early
+ // selection attempt here to give priority to certain selection routines
+ // over the imported ones.
+ if (earlySelect(I))
+ return true;
+
if (selectImpl(I, CoverageInfo))
return true;
return false;
}
-/// SelectArithImmed - Select an immediate value that can be represented as
-/// a 12-bit value shifted left by either 0 or 12. If so, return true with
-/// Val set to the 12-bit value and Shift set to the shifter operand.
-InstructionSelector::ComplexRendererFns
-AArch64InstructionSelector::selectArithImmed(MachineOperand &Root) const {
- MachineInstr &MI = *Root.getParent();
- MachineBasicBlock &MBB = *MI.getParent();
- MachineFunction &MF = *MBB.getParent();
- MachineRegisterInfo &MRI = MF.getRegInfo();
-
- // This function is called from the addsub_shifted_imm ComplexPattern,
- // which lists [imm] as the list of opcode it's interested in, however
- // we still need to check whether the operand is actually an immediate
- // here because the ComplexPattern opcode list is only used in
- // root-level opcode matching.
+static Optional<uint64_t> getImmedFromMO(const MachineOperand &Root) {
+ auto &MI = *Root.getParent();
+ auto &MBB = *MI.getParent();
+ auto &MF = *MBB.getParent();
+ auto &MRI = MF.getRegInfo();
uint64_t Immed;
if (Root.isImm())
Immed = Root.getImm();
Immed = Op1.getCImm()->getZExtValue();
} else
return None;
+ return Immed;
+}
+
+InstructionSelector::ComplexRendererFns
+AArch64InstructionSelector::selectShiftA_32(const MachineOperand &Root) const {
+ auto MaybeImmed = getImmedFromMO(Root);
+ if (MaybeImmed == None || *MaybeImmed > 31)
+ return None;
+ uint64_t Enc = (32 - *MaybeImmed) & 0x1f;
+ return {{[=](MachineInstrBuilder &MIB) { MIB.addImm(Enc); }}};
+}
+
+InstructionSelector::ComplexRendererFns
+AArch64InstructionSelector::selectShiftB_32(const MachineOperand &Root) const {
+ auto MaybeImmed = getImmedFromMO(Root);
+ if (MaybeImmed == None || *MaybeImmed > 31)
+ return None;
+ uint64_t Enc = 31 - *MaybeImmed;
+ return {{[=](MachineInstrBuilder &MIB) { MIB.addImm(Enc); }}};
+}
+
+InstructionSelector::ComplexRendererFns
+AArch64InstructionSelector::selectShiftA_64(const MachineOperand &Root) const {
+ auto MaybeImmed = getImmedFromMO(Root);
+ if (MaybeImmed == None || *MaybeImmed > 63)
+ return None;
+ uint64_t Enc = (64 - *MaybeImmed) & 0x3f;
+ return {{[=](MachineInstrBuilder &MIB) { MIB.addImm(Enc); }}};
+}
+
+InstructionSelector::ComplexRendererFns
+AArch64InstructionSelector::selectShiftB_64(const MachineOperand &Root) const {
+ auto MaybeImmed = getImmedFromMO(Root);
+ if (MaybeImmed == None || *MaybeImmed > 63)
+ return None;
+ uint64_t Enc = 63 - *MaybeImmed;
+ return {{[=](MachineInstrBuilder &MIB) { MIB.addImm(Enc); }}};
+}
+/// SelectArithImmed - Select an immediate value that can be represented as
+/// a 12-bit value shifted left by either 0 or 12. If so, return true with
+/// Val set to the 12-bit value and Shift set to the shifter operand.
+InstructionSelector::ComplexRendererFns
+AArch64InstructionSelector::selectArithImmed(MachineOperand &Root) const {
+ // This function is called from the addsub_shifted_imm ComplexPattern,
+ // which lists [imm] as the list of opcode it's interested in, however
+ // we still need to check whether the operand is actually an immediate
+ // here because the ComplexPattern opcode list is only used in
+ // root-level opcode matching.
+ auto MaybeImmed = getImmedFromMO(Root);
+ if (MaybeImmed == None)
+ return None;
+ uint64_t Immed = *MaybeImmed;
unsigned ShiftAmt;
if (Immed >> 12 == 0) {
.scalarize(0);
getActionDefinitionsBuilder({G_LSHR, G_ASHR})
- .legalFor({{s32, s32}, {s64, s64}, {v2s32, v2s32}, {v4s32, v4s32}})
+ .customIf([=](const LegalityQuery &Query) {
+ const auto &SrcTy = Query.Types[0];
+ const auto &AmtTy = Query.Types[1];
+ return !SrcTy.isVector() && SrcTy.getSizeInBits() == 32 &&
+ AmtTy.getSizeInBits() == 32;
+ })
+ .legalFor(
+ {{s32, s32}, {s32, s64}, {s64, s64}, {v2s32, v2s32}, {v4s32, v4s32}})
.clampScalar(1, s32, s64)
.clampScalar(0, s32, s64)
.minScalarSameAs(1, 0);
case TargetOpcode::G_LOAD:
case TargetOpcode::G_STORE:
return legalizeLoadStore(MI, MRI, MIRBuilder, Observer);
+ case TargetOpcode::G_SHL:
+ case TargetOpcode::G_ASHR:
+ case TargetOpcode::G_LSHR:
+ return legalizeShlAshrLshr(MI, MRI, MIRBuilder, Observer);
}
llvm_unreachable("expected switch to return");
}
+bool AArch64LegalizerInfo::legalizeShlAshrLshr(
+ MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &MIRBuilder,
+ GISelChangeObserver &Observer) const {
+ assert(MI.getOpcode() == TargetOpcode::G_ASHR ||
+ MI.getOpcode() == TargetOpcode::G_LSHR ||
+ MI.getOpcode() == TargetOpcode::G_SHL);
+ // If the shift amount is a G_CONSTANT, promote it to a 64 bit type so the
+ // imported patterns can select it later. Either way, it will be legal.
+ Register AmtReg = MI.getOperand(2).getReg();
+ auto *CstMI = MRI.getVRegDef(AmtReg);
+ assert(CstMI && "expected to find a vreg def");
+ if (CstMI->getOpcode() != TargetOpcode::G_CONSTANT)
+ return true;
+ // Check the shift amount is in range for an immediate form.
+ unsigned Amount = CstMI->getOperand(1).getCImm()->getZExtValue();
+ if (Amount > 31)
+ return true; // This will have to remain a register variant.
+ assert(MRI.getType(AmtReg).getSizeInBits() == 32);
+ MIRBuilder.setInstr(MI);
+ auto ExtCst = MIRBuilder.buildZExt(LLT::scalar(64), AmtReg);
+ MI.getOperand(2).setReg(ExtCst.getReg(0));
+ return true;
+}
+
bool AArch64LegalizerInfo::legalizeLoadStore(
MachineInstr &MI, MachineRegisterInfo &MRI, MachineIRBuilder &MIRBuilder,
GISelChangeObserver &Observer) const {
bool legalizeLoadStore(MachineInstr &MI, MachineRegisterInfo &MRI,
MachineIRBuilder &MIRBuilder,
GISelChangeObserver &Observer) const;
+ bool legalizeShlAshrLshr(MachineInstr &MI, MachineRegisterInfo &MRI,
+ MachineIRBuilder &MIRBuilder,
+ GISelChangeObserver &Observer) const;
};
} // End llvm namespace.
#endif
case TargetOpcode::G_AND:
case TargetOpcode::G_OR:
case TargetOpcode::G_XOR:
- // Shifts.
- case TargetOpcode::G_SHL:
- case TargetOpcode::G_LSHR:
- case TargetOpcode::G_ASHR:
// Floating point ops.
case TargetOpcode::G_FADD:
case TargetOpcode::G_FSUB:
DefaultMappingID, /*Cost*/ 1,
getFPExtMapping(DstTy.getSizeInBits(), SrcTy.getSizeInBits()),
/*NumOperands*/ 2);
+ }
+ // Shifts.
+ case TargetOpcode::G_SHL:
+ case TargetOpcode::G_LSHR:
+ case TargetOpcode::G_ASHR: {
+ LLT ShiftAmtTy = MRI.getType(MI.getOperand(2).getReg());
+ LLT SrcTy = MRI.getType(MI.getOperand(1).getReg());
+ if (ShiftAmtTy.getSizeInBits() == 64 && SrcTy.getSizeInBits() == 32)
+ return getInstructionMapping(DefaultMappingID, 1,
+ &ValMappings[Shift64Imm], 3);
+ return getSameKindOfOperandsMapping(MI);
}
case TargetOpcode::COPY: {
unsigned DstReg = MI.getOperand(0).getReg();
FPExt16To64Idx = 43,
FPExt32To64Idx = 45,
FPExt64To128Idx = 47,
+ Shift64Imm = 49
};
static bool checkPartialMap(unsigned Idx, unsigned ValStartIdx,
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[TRUNC]], [[C]](s32)
- ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32)
+ ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
+ ; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY [[C1]](s64)
+ ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[COPY2]](s64)
; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[TRUNC1]], [[C]](s32)
- ; CHECK: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SHL1]], [[C]](s32)
+ ; CHECK: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SHL1]], [[C1]](s64)
; CHECK: [[SDIV:%[0-9]+]]:_(s32) = G_SDIV [[ASHR]], [[ASHR1]]
- ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[SDIV]](s32)
- ; CHECK: $w0 = COPY [[COPY2]](s32)
+ ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[SDIV]](s32)
+ ; CHECK: $w0 = COPY [[COPY3]](s32)
; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; CHECK: [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC2]], [[C2]]
; CHECK: [[TRUNC3:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[TRUNC3]], [[C2]]
; CHECK: [[UDIV:%[0-9]+]]:_(s32) = G_UDIV [[AND]], [[AND1]]
- ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[UDIV]](s32)
- ; CHECK: $w0 = COPY [[COPY3]](s32)
+ ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY [[UDIV]](s32)
+ ; CHECK: $w0 = COPY [[COPY4]](s32)
%0:_(s64) = COPY $x0
%1:_(s64) = COPY $x1
%2:_(s8) = G_TRUNC %0(s64)
; CHECK: $x0 = COPY [[COPY3]](s64)
; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
; CHECK: [[COPY4:%[0-9]+]]:_(s64) = COPY [[COPY]](s64)
- ; CHECK: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY4]], [[C1]]
- ; CHECK: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C1]]
+ ; CHECK: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY4]], [[C1]](s64)
+ ; CHECK: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C1]](s64)
; CHECK: $x0 = COPY [[ASHR]](s64)
; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
; CHECK: [[TRUNC4:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
- ; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[TRUNC4]], [[C2]]
- ; CHECK: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SHL1]], [[C2]]
+ ; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[TRUNC4]], [[C2]](s32)
+ ; CHECK: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 31
+ ; CHECK: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SHL1]], [[C3]](s64)
; CHECK: $w0 = COPY [[ASHR1]](s32)
- ; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
+ ; CHECK: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; CHECK: [[TRUNC5:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
- ; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[TRUNC5]], [[C3]]
+ ; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[TRUNC5]], [[C4]]
; CHECK: $w0 = COPY [[AND1]](s32)
; CHECK: [[TRUNC6:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
; CHECK: $w0 = COPY [[TRUNC6]](s32)
- ; CHECK: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK: [[TRUNC7:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
- ; CHECK: [[AND2:%[0-9]+]]:_(s32) = G_AND [[TRUNC7]], [[C4]]
+ ; CHECK: [[AND2:%[0-9]+]]:_(s32) = G_AND [[TRUNC7]], [[C5]]
; CHECK: $w0 = COPY [[AND2]](s32)
; CHECK: [[TRUNC8:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
; CHECK: $w0 = COPY [[TRUNC8]](s32)
- ; CHECK: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; CHECK: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CHECK: [[TRUNC9:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
- ; CHECK: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[TRUNC9]], [[C5]]
- ; CHECK: [[ASHR2:%[0-9]+]]:_(s32) = G_ASHR [[SHL2]], [[C5]]
+ ; CHECK: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[TRUNC9]], [[C6]](s32)
+ ; CHECK: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+ ; CHECK: [[ASHR2:%[0-9]+]]:_(s32) = G_ASHR [[SHL2]], [[C7]](s64)
; CHECK: $w0 = COPY [[ASHR2]](s32)
; CHECK: [[TRUNC10:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
- ; CHECK: [[AND3:%[0-9]+]]:_(s32) = G_AND [[TRUNC10]], [[C4]]
+ ; CHECK: [[AND3:%[0-9]+]]:_(s32) = G_AND [[TRUNC10]], [[C5]]
; CHECK: $w0 = COPY [[AND3]](s32)
; CHECK: [[TRUNC11:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
; CHECK: $w0 = COPY [[TRUNC11]](s32)
; CHECK: $w0 = COPY [[TRUNC12]](s32)
; CHECK: [[FPEXT:%[0-9]+]]:_(s64) = G_FPEXT [[TRUNC12]](s32)
; CHECK: $x0 = COPY [[FPEXT]](s64)
- ; CHECK: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
- ; CHECK: $w0 = COPY [[C7]](s32)
+ ; CHECK: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY [[C8]](s32)
+ ; CHECK: $w0 = COPY [[COPY5]](s32)
+ ; CHECK: $w0 = COPY [[C8]](s32)
; CHECK: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
; CHECK: $w0 = COPY [[DEF]](s32)
%0:_(s64) = COPY $x0
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
- ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[C]]
- ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]]
+ ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[C]](s32)
+ ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 31
+ ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C1]](s64)
; CHECK: $w0 = COPY [[ASHR]](s32)
%0:_(s32) = COPY $w0
%1:_(s1) = G_TRUNC %0(s32)
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[C]](s32)
- ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32)
+ ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 31
+ ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C1]](s64)
; CHECK: [[SITOFP:%[0-9]+]]:_(s32) = G_SITOFP [[ASHR]](s32)
; CHECK: $w0 = COPY [[SITOFP]](s32)
%0:_(s32) = COPY $w0
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[C]](s32)
- ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32)
+ ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
+ ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C1]](s64)
; CHECK: [[SITOFP:%[0-9]+]]:_(s64) = G_SITOFP [[ASHR]](s32)
; CHECK: $x0 = COPY [[SITOFP]](s64)
%0:_(s32) = COPY $w0
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[C]](s32)
- ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32)
+ ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
+ ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C1]](s64)
; CHECK: [[SITOFP:%[0-9]+]]:_(s32) = G_SITOFP [[ASHR]](s32)
; CHECK: $w0 = COPY [[SITOFP]](s32)
%0:_(s32) = COPY $w0
bb.0:
; CHECK-LABEL: name: test_merge_s4
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
- ; CHECK: [[C1:%[0-9]+]]:_(s8) = G_CONSTANT i8 4
- ; CHECK: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[C1]](s8)
+ ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 15
; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[C]](s64)
; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC]], [[C2]]
- ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND]], [[ZEXT]](s32)
+ ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND]], [[C1]](s32)
; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[C]](s64)
; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[TRUNC1]], [[C2]]
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY [[SHL]](s32)
; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
- ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[TRUNC]], [[C]]
- ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]]
+ ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[TRUNC]], [[C]](s32)
+ ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
+ ; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY [[C1]](s64)
+ ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[COPY2]](s64)
; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
- ; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[TRUNC1]], [[C]]
- ; CHECK: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SHL1]], [[C]]
+ ; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[TRUNC1]], [[C]](s32)
+ ; CHECK: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SHL1]], [[C1]](s64)
; CHECK: [[SDIV:%[0-9]+]]:_(s32) = G_SDIV [[ASHR]], [[ASHR1]]
- ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[SDIV]](s32)
+ ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[SDIV]](s32)
; CHECK: [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
- ; CHECK: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[COPY2]], [[TRUNC2]]
+ ; CHECK: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[COPY3]], [[TRUNC2]]
; CHECK: [[TRUNC3:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
- ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[MUL]](s32)
- ; CHECK: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC3]], [[COPY3]]
- ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY [[SUB]](s32)
- ; CHECK: $w0 = COPY [[COPY4]](s32)
+ ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY [[MUL]](s32)
+ ; CHECK: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC3]], [[COPY4]]
+ ; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY [[SUB]](s32)
+ ; CHECK: $w0 = COPY [[COPY5]](s32)
%0:_(s64) = COPY $x0
%1:_(s64) = COPY $x1
%2:_(s8) = G_TRUNC %0(s64)
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[TRUNC1]], [[C1]](s32)
- ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C1]](s32)
+ ; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
+ ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C2]](s64)
; CHECK: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[ASHR]], [[AND]](s32)
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ASHR1]](s32)
; CHECK: $w0 = COPY [[COPY2]](s32)
$q0 = COPY %2
...
+---
+name: shl_cimm_32
+body: |
+ bb.1:
+ liveins: $w0
+
+ ; CHECK-LABEL: name: shl_cimm_32
+ ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+ ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[C]](s32)
+ ; CHECK: $w0 = COPY [[SHL]](s32)
+ ; CHECK: RET_ReallyLR implicit $w0
+ %0:_(s32) = COPY $w0
+ %1:_(s32) = G_CONSTANT i32 8
+ %2:_(s32) = G_SHL %0, %1(s32)
+ $w0 = COPY %2(s32)
+ RET_ReallyLR implicit $w0
+
+...
+---
+name: lshr_cimm_32
+body: |
+ bb.1:
+ liveins: $w0
+
+ ; CHECK-LABEL: name: lshr_cimm_32
+ ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+ ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+ ; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C]](s64)
+ ; CHECK: $w0 = COPY [[LSHR]](s32)
+ ; CHECK: RET_ReallyLR implicit $w0
+ %0:_(s32) = COPY $w0
+ %1:_(s32) = G_CONSTANT i32 8
+ %2:_(s32) = G_LSHR %0, %1(s32)
+ $w0 = COPY %2(s32)
+ RET_ReallyLR implicit $w0
+
+...
+---
+name: ashr_cimm_32
+body: |
+ bb.1:
+ liveins: $w0
+
+ ; CHECK-LABEL: name: ashr_cimm_32
+ ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $w0
+ ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
+ ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[COPY]], [[C]](s64)
+ ; CHECK: $w0 = COPY [[ASHR]](s32)
+ ; CHECK: RET_ReallyLR implicit $w0
+ %0:_(s32) = COPY $w0
+ %1:_(s32) = G_CONSTANT i32 8
+ %2:_(s32) = G_ASHR %0, %1(s32)
+ $w0 = COPY %2(s32)
+ RET_ReallyLR implicit $w0
+
+...
; CHECK-LABEL: name: zext_trunc_dead_inst_crash
; CHECK: bb.0:
; CHECK: successors: %bb.1(0x80000000)
- ; CHECK: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 46
- ; CHECK: [[C1:%[0-9]+]]:_(s8) = G_CONSTANT i8 26
; CHECK: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
; CHECK: bb.1:
; CHECK: successors: %bb.2(0x80000000)
; CHECK: [[PHI:%[0-9]+]]:_(s16) = G_PHI %32(s16), %bb.2, [[DEF]](s16), %bb.0
- ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[PHI]](s16)
- ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C2]]
- ; CHECK: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[C]](s8)
- ; CHECK: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[AND]](s32), [[ZEXT]]
+ ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C]]
+ ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 46
+ ; CHECK: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[AND]](s32), [[C1]]
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY [[ICMP]](s32)
; CHECK: [[DEF1:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
; CHECK: [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY]], [[DEF1]]
- ; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -33
- ; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C3]]
+ ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -33
+ ; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C2]]
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[AND1]](s32)
- ; CHECK: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 -65
- ; CHECK: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY1]], [[C4]]
+ ; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -65
+ ; CHECK: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY1]], [[C3]]
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
- ; CHECK: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C2]]
- ; CHECK: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[C1]](s8)
- ; CHECK: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[AND2]](s32), [[ZEXT1]]
+ ; CHECK: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C]]
+ ; CHECK: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 26
+ ; CHECK: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[AND2]](s32), [[C4]]
; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[ICMP1]](s32)
; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY [[OR]](s32)
; CHECK: [[OR1:%[0-9]+]]:_(s32) = G_OR [[COPY3]], [[COPY4]]
--- /dev/null
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple aarch64-unknown-unknown -run-pass=regbankselect -verify-machineinstrs %s -o - | FileCheck %s
+---
+name: shl_cimm_32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1:
+ liveins: $w0
+
+ ; CHECK-LABEL: name: shl_cimm_32
+ ; CHECK: liveins: $w0
+ ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
+ ; CHECK: [[C:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 8
+ ; CHECK: [[SHL:%[0-9]+]]:gpr(s32) = G_SHL [[COPY]], [[C]](s32)
+ ; CHECK: $w0 = COPY [[SHL]](s32)
+ ; CHECK: RET_ReallyLR implicit $w0
+ %0:_(s32) = COPY $w0
+ %1:_(s32) = G_CONSTANT i32 8
+ %2:_(s32) = G_SHL %0, %1(s32)
+ $w0 = COPY %2(s32)
+ RET_ReallyLR implicit $w0
+
+...
+---
+name: shl_cimm_64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1:
+ liveins: $x0
+
+ ; CHECK-LABEL: name: shl_cimm_64
+ ; CHECK: liveins: $x0
+ ; CHECK: [[COPY:%[0-9]+]]:gpr(s64) = COPY $x0
+ ; CHECK: [[C:%[0-9]+]]:gpr(s64) = G_CONSTANT i64 8
+ ; CHECK: [[SHL:%[0-9]+]]:gpr(s64) = G_SHL [[COPY]], [[C]](s64)
+ ; CHECK: $x0 = COPY [[SHL]](s64)
+ ; CHECK: RET_ReallyLR implicit $x0
+ %0:_(s64) = COPY $x0
+ %1:_(s64) = G_CONSTANT i64 8
+ %2:_(s64) = G_SHL %0, %1(s64)
+ $x0 = COPY %2(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: lshr_cimm_32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1:
+ liveins: $w0
+
+ ; CHECK-LABEL: name: lshr_cimm_32
+ ; CHECK: liveins: $w0
+ ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
+ ; CHECK: [[C:%[0-9]+]]:gpr(s64) = G_CONSTANT i64 8
+ ; CHECK: [[LSHR:%[0-9]+]]:gpr(s32) = G_LSHR [[COPY]], [[C]](s64)
+ ; CHECK: $w0 = COPY [[LSHR]](s32)
+ ; CHECK: RET_ReallyLR implicit $w0
+ %0:_(s32) = COPY $w0
+ %3:_(s64) = G_CONSTANT i64 8
+ %2:_(s32) = G_LSHR %0, %3(s64)
+ $w0 = COPY %2(s32)
+ RET_ReallyLR implicit $w0
+
+...
+---
+name: lshr_cimm_64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1:
+ liveins: $x0
+
+ ; CHECK-LABEL: name: lshr_cimm_64
+ ; CHECK: liveins: $x0
+ ; CHECK: [[COPY:%[0-9]+]]:gpr(s64) = COPY $x0
+ ; CHECK: [[C:%[0-9]+]]:gpr(s64) = G_CONSTANT i64 8
+ ; CHECK: [[LSHR:%[0-9]+]]:gpr(s64) = G_LSHR [[COPY]], [[C]](s64)
+ ; CHECK: $x0 = COPY [[LSHR]](s64)
+ ; CHECK: RET_ReallyLR implicit $x0
+ %0:_(s64) = COPY $x0
+ %1:_(s64) = G_CONSTANT i64 8
+ %2:_(s64) = G_LSHR %0, %1(s64)
+ $x0 = COPY %2(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: ashr_cimm_32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1:
+ liveins: $w0
+
+ ; CHECK-LABEL: name: ashr_cimm_32
+ ; CHECK: liveins: $w0
+ ; CHECK: [[COPY:%[0-9]+]]:gpr(s32) = COPY $w0
+ ; CHECK: [[C:%[0-9]+]]:gpr(s64) = G_CONSTANT i64 8
+ ; CHECK: [[ASHR:%[0-9]+]]:gpr(s32) = G_ASHR [[COPY]], [[C]](s64)
+ ; CHECK: $w0 = COPY [[ASHR]](s32)
+ ; CHECK: RET_ReallyLR implicit $w0
+ %0:_(s32) = COPY $w0
+ %3:_(s64) = G_CONSTANT i64 8
+ %2:_(s32) = G_ASHR %0, %3(s64)
+ $w0 = COPY %2(s32)
+ RET_ReallyLR implicit $w0
+
+...
+---
+name: ashr_cimm_64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1:
+ liveins: $x0
+
+ ; CHECK-LABEL: name: ashr_cimm_64
+ ; CHECK: liveins: $x0
+ ; CHECK: [[COPY:%[0-9]+]]:gpr(s64) = COPY $x0
+ ; CHECK: [[C:%[0-9]+]]:gpr(s64) = G_CONSTANT i64 8
+ ; CHECK: [[ASHR:%[0-9]+]]:gpr(s64) = G_ASHR [[COPY]], [[C]](s64)
+ ; CHECK: $x0 = COPY [[ASHR]](s64)
+ ; CHECK: RET_ReallyLR implicit $x0
+ %0:_(s64) = COPY $x0
+ %1:_(s64) = G_CONSTANT i64 8
+ %2:_(s64) = G_ASHR %0, %1(s64)
+ $x0 = COPY %2(s64)
+ RET_ReallyLR implicit $x0
+
+...
--- /dev/null
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=aarch64-- -O0 -run-pass=instruction-select -verify-machineinstrs %s -global-isel-abort=1 -o - | FileCheck %s
+---
+name: shl_cimm_32
+legalized: true
+regBankSelected: true
+body: |
+ bb.1:
+ liveins: $w0
+
+ ; CHECK-LABEL: name: shl_cimm_32
+ ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+ ; CHECK: [[UBFMWri:%[0-9]+]]:gpr32 = UBFMWri [[COPY]], 24, 23
+ ; CHECK: $w0 = COPY [[UBFMWri]]
+ ; CHECK: RET_ReallyLR implicit $w0
+ %0:gpr(s32) = COPY $w0
+ %1:gpr(s32) = G_CONSTANT i32 8
+ %2:gpr(s32) = G_SHL %0, %1(s32)
+ $w0 = COPY %2(s32)
+ RET_ReallyLR implicit $w0
+
+...
+---
+name: shl_cimm_64
+legalized: true
+regBankSelected: true
+body: |
+ bb.1:
+ liveins: $x0
+
+ ; CHECK-LABEL: name: shl_cimm_64
+ ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK: [[UBFMXri:%[0-9]+]]:gpr64 = UBFMXri [[COPY]], 56, 55
+ ; CHECK: $x0 = COPY [[UBFMXri]]
+ ; CHECK: RET_ReallyLR implicit $x0
+ %0:gpr(s64) = COPY $x0
+ %1:gpr(s64) = G_CONSTANT i64 8
+ %2:gpr(s64) = G_SHL %0, %1(s64)
+ $x0 = COPY %2(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: lshr_cimm_32
+legalized: true
+regBankSelected: true
+body: |
+ bb.1:
+ liveins: $w0
+
+ ; CHECK-LABEL: name: lshr_cimm_32
+ ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+ ; CHECK: [[UBFMWri:%[0-9]+]]:gpr32 = UBFMWri [[COPY]], 8, 31
+ ; CHECK: $w0 = COPY [[UBFMWri]]
+ ; CHECK: RET_ReallyLR implicit $w0
+ %0:gpr(s32) = COPY $w0
+ %3:gpr(s64) = G_CONSTANT i64 8
+ %2:gpr(s32) = G_LSHR %0, %3(s64)
+ $w0 = COPY %2(s32)
+ RET_ReallyLR implicit $w0
+
+...
+---
+name: lshr_cimm_64
+legalized: true
+regBankSelected: true
+body: |
+ bb.1:
+ liveins: $x0
+
+ ; CHECK-LABEL: name: lshr_cimm_64
+ ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK: [[UBFMXri:%[0-9]+]]:gpr64 = UBFMXri [[COPY]], 8, 63
+ ; CHECK: $x0 = COPY [[UBFMXri]]
+ ; CHECK: RET_ReallyLR implicit $x0
+ %0:gpr(s64) = COPY $x0
+ %1:gpr(s64) = G_CONSTANT i64 8
+ %2:gpr(s64) = G_LSHR %0, %1(s64)
+ $x0 = COPY %2(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: ashr_cimm_32
+legalized: true
+regBankSelected: true
+body: |
+ bb.1:
+ liveins: $w0
+
+ ; CHECK-LABEL: name: ashr_cimm_32
+ ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+ ; CHECK: [[SBFMWri:%[0-9]+]]:gpr32 = SBFMWri [[COPY]], 8, 31
+ ; CHECK: $w0 = COPY [[SBFMWri]]
+ ; CHECK: RET_ReallyLR implicit $w0
+ %0:gpr(s32) = COPY $w0
+ %3:gpr(s64) = G_CONSTANT i64 8
+ %2:gpr(s32) = G_ASHR %0, %3(s64)
+ $w0 = COPY %2(s32)
+ RET_ReallyLR implicit $w0
+
+...
+---
+name: ashr_cimm_64
+legalized: true
+regBankSelected: true
+body: |
+ bb.1:
+ liveins: $x0
+
+ ; CHECK-LABEL: name: ashr_cimm_64
+ ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK: [[SBFMXri:%[0-9]+]]:gpr64 = SBFMXri [[COPY]], 8, 63
+ ; CHECK: $x0 = COPY [[SBFMXri]]
+ ; CHECK: RET_ReallyLR implicit $x0
+ %0:gpr(s64) = COPY $x0
+ %1:gpr(s64) = G_CONSTANT i64 8
+ %2:gpr(s64) = G_ASHR %0, %1(s64)
+ $x0 = COPY %2(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: lshr_32_notimm64
+legalized: true
+regBankSelected: true
+body: |
+ bb.1:
+ liveins: $w0
+
+ ; CHECK-LABEL: name: lshr_32_notimm64
+ ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+ ; CHECK: [[MOVi64imm:%[0-9]+]]:gpr64 = MOVi64imm 8
+ ; CHECK: [[ANDXrr:%[0-9]+]]:gpr64 = ANDXrr [[MOVi64imm]], [[MOVi64imm]]
+ ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY [[ANDXrr]].sub_32
+ ; CHECK: [[LSRVWr:%[0-9]+]]:gpr32 = LSRVWr [[COPY]], [[COPY1]]
+ ; CHECK: $w0 = COPY [[LSRVWr]]
+ ; CHECK: RET_ReallyLR implicit $w0
+ %0:gpr(s32) = COPY $w0
+ %3:gpr(s64) = G_CONSTANT i64 8
+ %4:gpr(s64) = G_AND %3, %3
+ %2:gpr(s32) = G_LSHR %0, %4(s64)
+ $w0 = COPY %2(s32)
+ RET_ReallyLR implicit $w0
+
+...
+---
+name: ashr_32_notimm64
+legalized: true
+regBankSelected: true
+body: |
+ bb.1:
+ liveins: $w0
+
+ ; CHECK-LABEL: name: ashr_32_notimm64
+ ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+ ; CHECK: [[MOVi64imm:%[0-9]+]]:gpr64 = MOVi64imm 8
+ ; CHECK: [[ANDXrr:%[0-9]+]]:gpr64 = ANDXrr [[MOVi64imm]], [[MOVi64imm]]
+ ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY [[ANDXrr]].sub_32
+ ; CHECK: [[ASRVWr:%[0-9]+]]:gpr32 = ASRVWr [[COPY]], [[COPY1]]
+ ; CHECK: $w0 = COPY [[ASRVWr]]
+ ; CHECK: RET_ReallyLR implicit $w0
+ %0:gpr(s32) = COPY $w0
+ %3:gpr(s64) = G_CONSTANT i64 8
+ %4:gpr(s64) = G_AND %3, %3
+ %2:gpr(s32) = G_ASHR %0, %4(s64)
+ $w0 = COPY %2(s32)
+ RET_ReallyLR implicit $w0
+
+...
bb.0:
; CHECK-LABEL: name: test_zext_i1_to_s32
- ; CHECK: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
- ; CHECK: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[C]](s1)
- ; CHECK: $vgpr0 = COPY [[ZEXT]](s32)
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK: $vgpr0 = COPY [[C]](s32)
%0:_(s1) = G_CONSTANT i1 0
%1:_(s32) = G_ZEXT %0
$vgpr0 = COPY %1
bb.0:
; CHECK-LABEL: name: test_zext_i1_to_i64
- ; CHECK: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
- ; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[C]](s1)
- ; CHECK: $vgpr0_vgpr1 = COPY [[ZEXT]](s64)
+ ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; CHECK: $vgpr0_vgpr1 = COPY [[C]](s64)
%0:_(s1) = G_CONSTANT i1 0
%1:_(s64) = G_ZEXT %0
$vgpr0_vgpr1 = COPY %1