unsigned TruncSrc;
if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc)))) {
LLT DstTy = MRI.getType(DstReg);
- if (isInstUnsupported({TargetOpcode::G_SHL, {DstTy}}) ||
- isInstUnsupported({TargetOpcode::G_ASHR, {DstTy}}) ||
+ // Guess on the RHS shift amount type, which should be re-legalized if
+ // applicable.
+ if (isInstUnsupported({TargetOpcode::G_SHL, {DstTy, DstTy}}) ||
+ isInstUnsupported({TargetOpcode::G_ASHR, {DstTy, DstTy}}) ||
isInstUnsupported({TargetOpcode::G_CONSTANT, {DstTy}}))
return false;
LLVM_DEBUG(dbgs() << ".. Combine MI: " << MI;);
void widenScalarSrc(MachineInstr &MI, LLT WideTy, unsigned OpIdx,
unsigned ExtOpcode);
+ /// Legalize a single operand \p OpIdx of the machine instruction \p MI as a
+ /// Use by truncating the operand's type to \p NarrowTy using G_TRUNC, and
+ /// replacing the vreg of the operand in place.
+ void narrowScalarSrc(MachineInstr &MI, LLT NarrowTy, unsigned OpIdx);
+
/// Legalize a single operand \p OpIdx of the machine instruction \p MI as a
/// Def by extending the operand's type to \p WideTy and truncating it back
/// with the \p TruncOpcode, and replacing the vreg of the operand in place.
Query.Types[TypeIdx].getSizeInBits();
},
[=](const LegalityQuery &Query) {
+ LLT T = Query.Types[LargeTypeIdx];
return std::make_pair(TypeIdx,
- Query.Types[LargeTypeIdx].getElementType());
+ T.isVector() ? T.getElementType() : T);
});
}
// Generic left-shift.
def G_SHL : GenericInstruction {
let OutOperandList = (outs type0:$dst);
- let InOperandList = (ins type0:$src1, type0:$src2);
+ let InOperandList = (ins type0:$src1, type1:$src2);
let hasSideEffects = 0;
}
// Generic logical right-shift.
def G_LSHR : GenericInstruction {
let OutOperandList = (outs type0:$dst);
- let InOperandList = (ins type0:$src1, type0:$src2);
+ let InOperandList = (ins type0:$src1, type1:$src2);
let hasSideEffects = 0;
}
// Generic arithmetic right-shift.
def G_ASHR : GenericInstruction {
let OutOperandList = (outs type0:$dst);
- let InOperandList = (ins type0:$src1, type0:$src2);
+ let InOperandList = (ins type0:$src1, type1:$src2);
let hasSideEffects = 0;
}
LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
unsigned TypeIdx,
LLT NarrowTy) {
- // FIXME: Don't know how to handle secondary types yet.
- if (TypeIdx != 0 && MI.getOpcode() != TargetOpcode::G_EXTRACT)
- return UnableToLegalize;
-
MIRBuilder.setInstr(MI);
uint64_t SizeOp0 = MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
return Legalized;
}
case TargetOpcode::G_INSERT: {
+ // FIXME: Don't know how to handle secondary types yet.
+ if (TypeIdx != 0)
+ return UnableToLegalize;
+
// FIXME: add support for when SizeOp0 isn't an exact multiple of
// NarrowSize.
if (SizeOp0 % NarrowSize != 0)
MI.eraseFromParent();
return Legalized;
}
+ case TargetOpcode::G_SHL:
+ case TargetOpcode::G_LSHR:
+ case TargetOpcode::G_ASHR: {
+ if (TypeIdx != 1)
+ return UnableToLegalize; // TODO
+ narrowScalarSrc(MI, NarrowTy, 2);
+ return Legalized;
+ }
}
}
MO.setReg(ExtB->getOperand(0).getReg());
}
+void LegalizerHelper::narrowScalarSrc(MachineInstr &MI, LLT NarrowTy,
+ unsigned OpIdx) {
+ MachineOperand &MO = MI.getOperand(OpIdx);
+ auto ExtB = MIRBuilder.buildInstr(TargetOpcode::G_TRUNC, {NarrowTy},
+ {MO.getReg()});
+ MO.setReg(ExtB->getOperand(0).getReg());
+}
+
void LegalizerHelper::widenScalarDst(MachineInstr &MI, LLT WideTy,
unsigned OpIdx, unsigned TruncOpcode) {
MachineOperand &MO = MI.getOperand(OpIdx);
return Legalized;
case TargetOpcode::G_SHL:
- Observer.changingInstr(MI);
- widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT);
- // The "number of bits to shift" operand must preserve its value as an
- // unsigned integer:
- widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ZEXT);
- widenScalarDst(MI, WideTy);
+ Observer.changingInstr(MI);
+
+ if (TypeIdx == 0) {
+ widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ANYEXT);
+ widenScalarDst(MI, WideTy);
+ } else {
+ assert(TypeIdx == 1);
+ // The "number of bits to shift" operand must preserve its value as an
+ // unsigned integer:
+ widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ZEXT);
+ }
+
Observer.changedInstr(MI);
return Legalized;
return Legalized;
case TargetOpcode::G_ASHR:
+ case TargetOpcode::G_LSHR:
Observer.changingInstr(MI);
- widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_SEXT);
- // The "number of bits to shift" operand must preserve its value as an
- // unsigned integer:
- widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ZEXT);
- widenScalarDst(MI, WideTy);
+
+ if (TypeIdx == 0) {
+ unsigned CvtOp = MI.getOpcode() == TargetOpcode::G_ASHR ?
+ TargetOpcode::G_SEXT : TargetOpcode::G_ZEXT;
+
+ widenScalarSrc(MI, WideTy, 1, CvtOp);
+ widenScalarDst(MI, WideTy);
+ } else {
+ assert(TypeIdx == 1);
+ // The "number of bits to shift" operand must preserve its value as an
+ // unsigned integer:
+ widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ZEXT);
+ }
+
Observer.changedInstr(MI);
return Legalized;
-
case TargetOpcode::G_UDIV:
case TargetOpcode::G_UREM:
- case TargetOpcode::G_LSHR:
Observer.changingInstr(MI);
widenScalarSrc(MI, WideTy, 1, TargetOpcode::G_ZEXT);
widenScalarSrc(MI, WideTy, 2, TargetOpcode::G_ZEXT);
.clampScalar(0, s16, s64)
.widenScalarToNextPow2(0);
- getActionDefinitionsBuilder({G_ADD, G_SUB, G_MUL, G_AND, G_OR, G_XOR, G_SHL})
+ getActionDefinitionsBuilder({G_ADD, G_SUB, G_MUL, G_AND, G_OR, G_XOR})
.legalFor({s32, s64, v2s32, v4s32, v2s64})
.clampScalar(0, s32, s64)
.widenScalarToNextPow2(0)
.clampNumElements(0, v2s64, v2s64)
.moreElementsToNextPow2(0);
+ getActionDefinitionsBuilder(G_SHL)
+ .legalFor({{s32, s32}, {s64, s64},
+ {v2s32, v2s32}, {v4s32, v4s32}, {v2s64, v2s64}})
+ .clampScalar(0, s32, s64)
+ .widenScalarToNextPow2(0)
+ .clampNumElements(0, v2s32, v4s32)
+ .clampNumElements(0, v2s64, v2s64)
+ .moreElementsToNextPow2(0)
+ .minScalarSameAs(1, 0);
+
getActionDefinitionsBuilder(G_GEP)
.legalFor({{p0, s64}})
.clampScalar(1, s64, s64);
getActionDefinitionsBuilder(G_PTR_MASK).legalFor({p0});
- getActionDefinitionsBuilder({G_LSHR, G_ASHR, G_SDIV, G_UDIV})
+ getActionDefinitionsBuilder({G_SDIV, G_UDIV})
.legalFor({s32, s64})
.clampScalar(0, s32, s64)
.widenScalarToNextPow2(0);
+ getActionDefinitionsBuilder({G_LSHR, G_ASHR})
+ .legalFor({{s32, s32}, {s64, s64}})
+ .clampScalar(0, s32, s64)
+ .minScalarSameAs(1, 0);
+
getActionDefinitionsBuilder({G_SREM, G_UREM})
.lowerFor({s1, s8, s16, s32, s64});
setAction({G_ADD, S32}, Legal);
setAction({G_ASHR, S32}, Legal);
+ setAction({G_ASHR, 1, S32}, Legal);
setAction({G_SUB, S32}, Legal);
setAction({G_MUL, S32}, Legal);
.clampScalar(0, S32, S64);
setAction({G_SHL, S32}, Legal);
+ setAction({G_SHL, 1, S32}, Legal);
// FIXME: When RegBankSelect inserts copies, it will only create new
setAction({Op, s32}, Libcall);
}
- getActionDefinitionsBuilder({G_ASHR, G_LSHR, G_SHL}).legalFor({s32});
+ getActionDefinitionsBuilder(G_INTTOPTR).legalFor({{p0, s32}});
+ getActionDefinitionsBuilder(G_PTRTOINT).legalFor({{s32, p0}});
+
+ getActionDefinitionsBuilder({G_ASHR, G_LSHR, G_SHL})
+ .legalFor({{s32, s32}})
+ .clampScalar(1, s32, s32);
if (ST.hasV5TOps()) {
getActionDefinitionsBuilder(G_CTLZ)
.legalFor({s32})
.clampScalar(0, s32, s32);
- getActionDefinitionsBuilder({G_SHL, G_ASHR, G_LSHR})
- .legalFor({s32});
-
getActionDefinitionsBuilder({G_SDIV, G_SREM, G_UREM, G_UDIV})
.legalFor({s32})
.minScalar(0, s32)
.libcallFor({s64});
+ getActionDefinitionsBuilder({G_SHL, G_ASHR, G_LSHR})
+ .legalFor({s32, s32})
+ .minScalar(1, s32);
+
getActionDefinitionsBuilder(G_ICMP)
.legalFor({{s32, s32}})
.minScalar(0, s32);
const static struct ShiftEntry {
unsigned SizeInBits;
- unsigned CReg;
unsigned OpLSHR;
unsigned OpASHR;
unsigned OpSHL;
} OpTable[] = {
- {8, X86::CL, X86::SHR8rCL, X86::SAR8rCL, X86::SHL8rCL}, // i8
- {16, X86::CX, X86::SHR16rCL, X86::SAR16rCL, X86::SHL16rCL}, // i16
- {32, X86::ECX, X86::SHR32rCL, X86::SAR32rCL, X86::SHL32rCL}, // i32
- {64, X86::RCX, X86::SHR64rCL, X86::SAR64rCL, X86::SHL64rCL} // i64
+ {8, X86::SHR8rCL, X86::SAR8rCL, X86::SHL8rCL}, // i8
+ {16, X86::SHR16rCL, X86::SAR16rCL, X86::SHL16rCL}, // i16
+ {32, X86::SHR32rCL, X86::SAR32rCL, X86::SHL32rCL}, // i32
+ {64, X86::SHR64rCL, X86::SAR64rCL, X86::SHL64rCL} // i64
};
if (DstRB.getID() != X86::GPRRegBankID)
if (ShiftEntryIt == std::end(OpTable))
return false;
- unsigned CReg = ShiftEntryIt->CReg;
unsigned Opcode = 0;
switch (I.getOpcode()) {
case TargetOpcode::G_SHL:
unsigned Op0Reg = I.getOperand(1).getReg();
unsigned Op1Reg = I.getOperand(2).getReg();
- BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(TargetOpcode::COPY),
- ShiftEntryIt->CReg)
- .addReg(Op1Reg);
+ assert(MRI.getType(Op1Reg).getSizeInBits() == 8);
- // The shift instruction uses X86::CL. If we defined a super-register
- // of X86::CL, emit a subreg KILL to precisely describe what we're doing here.
- if (CReg != X86::CL)
- BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(TargetOpcode::KILL),
- X86::CL)
- .addReg(CReg, RegState::Kill);
+ BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(TargetOpcode::COPY),
+ X86::CL)
+ .addReg(Op1Reg);
MachineInstr &ShiftInst =
*BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode), DstReg)
// Shifts and SDIV
getActionDefinitionsBuilder(
- {G_SHL, G_LSHR, G_ASHR, G_SDIV, G_SREM, G_UDIV, G_UREM})
- .legalFor({s8, s16, s32})
- .clampScalar(0, s8, s32);
+ {G_SDIV, G_SREM, G_UDIV, G_UREM})
+ .legalFor({s8, s16, s32})
+ .clampScalar(0, s8, s32);
+
+ getActionDefinitionsBuilder(
+ {G_SHL, G_LSHR, G_ASHR})
+ .legalFor({{s8, s8}, {s16, s8}, {s32, s8}})
+ .clampScalar(0, s8, s32)
+ .clampScalar(1, s8, s8);
}
// Control-flow
.clampScalar(1, s32, s64)
.widenScalarToNextPow2(1);
- // Shifts and SDIV
+ // Divisions
getActionDefinitionsBuilder(
- {G_SHL, G_LSHR, G_ASHR, G_SDIV, G_SREM, G_UDIV, G_UREM})
+ {G_SDIV, G_SREM, G_UDIV, G_UREM})
.legalFor({s8, s16, s32, s64})
.clampScalar(0, s8, s64);
+ // Shifts
+ getActionDefinitionsBuilder(
+ {G_SHL, G_LSHR, G_ASHR})
+ .legalFor({{s8, s8}, {s16, s8}, {s32, s8}, {s64, s8}})
+ .clampScalar(0, s8, s64)
+ .clampScalar(1, s8, s8);
+
// Merge/Unmerge
setAction({G_MERGE_VALUES, s128}, Legal);
setAction({G_UNMERGE_VALUES, 1, s128}, Legal);
case TargetOpcode::G_ADD:
case TargetOpcode::G_SUB:
case TargetOpcode::G_MUL:
- case TargetOpcode::G_SHL:
- case TargetOpcode::G_LSHR:
- case TargetOpcode::G_ASHR:
return getSameOperandsMapping(MI, false);
- break;
case TargetOpcode::G_FADD:
case TargetOpcode::G_FSUB:
case TargetOpcode::G_FMUL:
case TargetOpcode::G_FDIV:
return getSameOperandsMapping(MI, true);
- break;
+ case TargetOpcode::G_SHL:
+ case TargetOpcode::G_LSHR:
+ case TargetOpcode::G_ASHR: {
+ const MachineFunction &MF = *MI.getParent()->getParent();
+ const MachineRegisterInfo &MRI = MF.getRegInfo();
+
+ unsigned NumOperands = MI.getNumOperands();
+ LLT Ty = MRI.getType(MI.getOperand(0).getReg());
+
+ auto Mapping = getValueMapping(getPartialMappingIdx(Ty, false), 3);
+ return getInstructionMapping(DefaultMappingID, 1, Mapping, NumOperands);
+
+ }
default:
break;
}
; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
- ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[TRUNC]], [[C]]
- ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]]
+ ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[TRUNC]], [[C]](s32)
+ ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32)
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC1]], [[C1]]
- ; CHECK: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[ASHR]], [[AND]]
+ ; CHECK: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[ASHR]], [[AND]](s32)
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ASHR1]](s32)
; CHECK: $w0 = COPY [[COPY2]](s32)
; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; CHECK: [[TRUNC3:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
; CHECK: [[AND2:%[0-9]+]]:_(s32) = G_AND [[TRUNC3]], [[C3]]
- ; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[AND1]], [[AND2]]
+ ; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[AND1]], [[AND2]](s32)
; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
; CHECK: $w0 = COPY [[COPY3]](s32)
; CHECK: [[TRUNC4:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
; CHECK: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; CHECK: [[TRUNC5:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
; CHECK: [[AND3:%[0-9]+]]:_(s32) = G_AND [[TRUNC5]], [[C4]]
- ; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[TRUNC4]], [[AND3]]
+ ; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[TRUNC4]], [[AND3]](s32)
; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY [[SHL1]](s32)
; CHECK: $w0 = COPY [[COPY4]](s32)
%0:_(s64) = COPY $x0
$w0 = COPY %9(s32)
...
+---
+name: test_shl_i64_i32
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: test_shl_i64_i32
+ ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+ ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+ ; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[COPY1]](s32)
+ ; CHECK: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[ZEXT]](s64)
+ ; CHECK: $x0 = COPY [[SHL]](s64)
+ %0:_(s64) = COPY $x0
+ %1:_(s32) = COPY $w1
+ %2:_(s64) = G_SHL %0, %1
+ $x0 = COPY %2(s64)
+
+...
+---
+name: test_ashr_i64_i32
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: test_ashr_i64_i32
+ ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+ ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+ ; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[COPY1]](s32)
+ ; CHECK: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY]], [[ZEXT]](s64)
+ ; CHECK: $x0 = COPY [[ASHR]](s64)
+ %0:_(s64) = COPY $x0
+ %1:_(s32) = COPY $w1
+ %2:_(s64) = G_ASHR %0, %1
+ $x0 = COPY %2(s64)
+
+...
+---
+name: test_lshr_i64_i32
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: test_lshr_i64_i32
+ ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+ ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+ ; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[COPY1]](s32)
+ ; CHECK: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[COPY]], [[ZEXT]](s64)
+ ; CHECK: $x0 = COPY [[LSHR]](s64)
+ %0:_(s64) = COPY $x0
+ %1:_(s32) = COPY $w1
+ %2:_(s64) = G_LSHR %0, %1
+ $x0 = COPY %2(s64)
+
+...
; CHECK-LABEL: name: test_implicit_def_s3
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 61
; CHECK: [[DEF:%[0-9]+]]:_(s64) = G_IMPLICIT_DEF
- ; CHECK: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[DEF]], [[C]]
- ; CHECK: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C]]
+ ; CHECK: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[DEF]], [[C]](s64)
+ ; CHECK: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C]](s64)
; CHECK: $x0 = COPY [[ASHR]](s64)
%0:_(s3) = G_IMPLICIT_DEF
%1:_(s64) = G_SEXT %0
# DEBUG-NEXT: G_ZEXT (opcode {{[0-9]+}}): 2 type indices
# DEBUG: .. the first uncovered type index: 2, OK
#
-# DEBUG-NEXT: G_SHL (opcode {{[0-9]+}}): 1 type index
-# DEBUG: .. the first uncovered type index: 1, OK
+# DEBUG-NEXT: G_SHL (opcode {{[0-9]+}}): 2 type indices
+# DEBUG:.. type index coverage check SKIPPED: user-defined predicate detected
#
-# DEBUG-NEXT: G_LSHR (opcode {{[0-9]+}}): 1 type index
-# DEBUG: .. the first uncovered type index: 1, OK
+# DEBUG-NEXT: G_LSHR (opcode {{[0-9]+}}): 2 type indices
+# DEBUG: .. type index coverage check SKIPPED: user-defined predicate detected
#
-# DEBUG-NEXT: G_ASHR (opcode {{[0-9]+}}): 1 type index
-# DEBUG: .. the first uncovered type index: 1, OK
+# DEBUG-NEXT: G_ASHR (opcode {{[0-9]+}}): 2 type indices
+# DEBUG: .. type index coverage check SKIPPED: user-defined predicate detected
#
# DEBUG-NEXT: G_ICMP (opcode {{[0-9]+}}): 2 type indices
# DEBUG: .. the first uncovered type index: 2, OK
; X64: # %bb.0:
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: movq %rsi, %rcx
-; X64-NEXT: # kill: def $cl killed $rcx
+; X64-NEXT: # kill: def $cl killed $cl killed $rcx
; X64-NEXT: sarq %cl, %rax
; X64-NEXT: retq
%res = ashr i64 %arg1, %arg2
; X64: # %bb.0:
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: movq $5, %rcx
-; X64-NEXT: # kill: def $cl killed $rcx
; X64-NEXT: sarq %cl, %rax
; X64-NEXT: retq
%res = ashr i64 %arg1, 5
; X64: # %bb.0:
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: movq $1, %rcx
-; X64-NEXT: # kill: def $cl killed $rcx
; X64-NEXT: sarq %cl, %rax
; X64-NEXT: retq
%res = ashr i64 %arg1, 1
; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: movl %esi, %ecx
-; X64-NEXT: # kill: def $cl killed $ecx
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
; X64-NEXT: sarl %cl, %eax
; X64-NEXT: retq
%res = ashr i32 %arg1, %arg2
; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: movl $5, %ecx
-; X64-NEXT: # kill: def $cl killed $ecx
; X64-NEXT: sarl %cl, %eax
; X64-NEXT: retq
%res = ashr i32 %arg1, 5
; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: movl $1, %ecx
-; X64-NEXT: # kill: def $cl killed $ecx
; X64-NEXT: sarl %cl, %eax
; X64-NEXT: retq
%res = ashr i32 %arg1, 1
; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: movl %esi, %ecx
-; X64-NEXT: # kill: def $cx killed $cx killed $ecx
-; X64-NEXT: # kill: def $cl killed $cx
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
; X64-NEXT: sarw %cl, %ax
; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: movw $5, %cx
-; X64-NEXT: # kill: def $cl killed $cx
; X64-NEXT: sarw %cl, %ax
; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: movw $1, %cx
-; X64-NEXT: # kill: def $cl killed $cx
; X64-NEXT: sarw %cl, %ax
; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: movq $56, %rcx
-; X64-NEXT: # kill: def $cl killed $rcx
; X64-NEXT: shlq %cl, %rax
; X64-NEXT: movq $56, %rcx
-; X64-NEXT: # kill: def $cl killed $rcx
; X64-NEXT: sarq %cl, %rax
; X64-NEXT: retq
%r = sext i8 %val to i64
; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: movq $48, %rcx
-; X64-NEXT: # kill: def $cl killed $rcx
; X64-NEXT: shlq %cl, %rax
; X64-NEXT: movq $48, %rcx
-; X64-NEXT: # kill: def $cl killed $rcx
; X64-NEXT: sarq %cl, %rax
; X64-NEXT: retq
%r = sext i16 %val to i64
; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: movl $24, %ecx
-; X64-NEXT: # kill: def $cl killed $ecx
; X64-NEXT: shll %cl, %eax
; X64-NEXT: movl $24, %ecx
-; X64-NEXT: # kill: def $cl killed $ecx
; X64-NEXT: sarl %cl, %eax
; X64-NEXT: retq
;
; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: movl $16, %ecx
-; X64-NEXT: # kill: def $cl killed $ecx
; X64-NEXT: shll %cl, %eax
; X64-NEXT: movl $16, %ecx
-; X64-NEXT: # kill: def $cl killed $ecx
; X64-NEXT: sarl %cl, %eax
; X64-NEXT: retq
;
; X64_GISEL-NEXT: # kill: def $esi killed $esi def $rsi
; X64_GISEL-NEXT: movq $4, %rax
; X64_GISEL-NEXT: movq $56, %rcx
-; X64_GISEL-NEXT: # kill: def $cl killed $rcx
; X64_GISEL-NEXT: shlq %cl, %rsi
; X64_GISEL-NEXT: movq $56, %rcx
-; X64_GISEL-NEXT: # kill: def $cl killed $rcx
; X64_GISEL-NEXT: sarq %cl, %rsi
; X64_GISEL-NEXT: imulq %rax, %rsi
; X64_GISEL-NEXT: leaq (%rdi,%rsi), %rax
; X64_GISEL-NEXT: # kill: def $esi killed $esi def $rsi
; X64_GISEL-NEXT: movq $4, %rax
; X64_GISEL-NEXT: movq $48, %rcx
-; X64_GISEL-NEXT: # kill: def $cl killed $rcx
; X64_GISEL-NEXT: shlq %cl, %rsi
; X64_GISEL-NEXT: movq $48, %rcx
-; X64_GISEL-NEXT: # kill: def $cl killed $rcx
; X64_GISEL-NEXT: sarq %cl, %rsi
; X64_GISEL-NEXT: imulq %rax, %rsi
; X64_GISEL-NEXT: leaq (%rdi,%rsi), %rax
; CHECK: [[COPY:%[0-9]+]]:_(s8) = COPY $dil
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 63
; CHECK: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[COPY]](s8)
- ; CHECK: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C]]
- ; CHECK: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C]]
+ ; CHECK: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[C]](s64)
+ ; CHECK: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[TRUNC]](s8)
+ ; CHECK: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[C]](s64)
+ ; CHECK: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[TRUNC1]](s8)
; CHECK: $rax = COPY [[ASHR]](s64)
; CHECK: RET 0, implicit $rax
%0(s8) = COPY $dil
; X64: # %bb.0:
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: movq %rsi, %rcx
-; X64-NEXT: # kill: def $cl killed $rcx
+; X64-NEXT: # kill: def $cl killed $cl killed $rcx
; X64-NEXT: shrq %cl, %rax
; X64-NEXT: retq
%res = lshr i64 %arg1, %arg2
; X64: # %bb.0:
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: movq $5, %rcx
-; X64-NEXT: # kill: def $cl killed $rcx
; X64-NEXT: shrq %cl, %rax
; X64-NEXT: retq
%res = lshr i64 %arg1, 5
; X64: # %bb.0:
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: movq $1, %rcx
-; X64-NEXT: # kill: def $cl killed $rcx
; X64-NEXT: shrq %cl, %rax
; X64-NEXT: retq
%res = lshr i64 %arg1, 1
; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: movl %esi, %ecx
-; X64-NEXT: # kill: def $cl killed $ecx
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
; X64-NEXT: shrl %cl, %eax
; X64-NEXT: retq
%res = lshr i32 %arg1, %arg2
; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: movl $5, %ecx
-; X64-NEXT: # kill: def $cl killed $ecx
; X64-NEXT: shrl %cl, %eax
; X64-NEXT: retq
%res = lshr i32 %arg1, 5
; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: movl $1, %ecx
-; X64-NEXT: # kill: def $cl killed $ecx
; X64-NEXT: shrl %cl, %eax
; X64-NEXT: retq
%res = lshr i32 %arg1, 1
; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: movl %esi, %ecx
-; X64-NEXT: # kill: def $cx killed $cx killed $ecx
-; X64-NEXT: # kill: def $cl killed $cx
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
; X64-NEXT: shrw %cl, %ax
; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: movw $5, %cx
-; X64-NEXT: # kill: def $cl killed $cx
; X64-NEXT: shrw %cl, %ax
; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: movw $1, %cx
-; X64-NEXT: # kill: def $cl killed $cx
; X64-NEXT: shrw %cl, %ax
; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
- { id: 0, class: gpr, preferred-register: '' }
- { id: 1, class: gpr, preferred-register: '' }
- { id: 2, class: gpr, preferred-register: '' }
+ - { id: 3, class: gpr, preferred-register: '' }
liveins:
fixedStack:
stack:
; ALL-LABEL: name: test_ashr_i64
; ALL: liveins: $rdi, $rsi
; ALL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
- ; ALL: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
- ; ALL: $rcx = COPY [[COPY1]]
- ; ALL: $cl = KILL killed $rcx
+ ; ALL: [[COPY1:%[0-9]+]]:gr64_with_sub_8bit = COPY $rsi
+ ; ALL: [[COPY2:%[0-9]+]]:gr8 = COPY [[COPY1]].sub_8bit
+ ; ALL: $cl = COPY [[COPY2]]
; ALL: [[SAR64rCL:%[0-9]+]]:gr64 = SAR64rCL [[COPY]], implicit-def $eflags, implicit $cl
; ALL: $rax = COPY [[SAR64rCL]]
; ALL: RET 0, implicit $rax
%0(s64) = COPY $rdi
%1(s64) = COPY $rsi
- %2(s64) = G_ASHR %0, %1
- $rax = COPY %2(s64)
+ %2(s8) = G_TRUNC %1
+ %3(s64) = G_ASHR %0, %2
+ $rax = COPY %3(s64)
RET 0, implicit $rax
...
; ALL-LABEL: name: test_ashr_i64_imm
; ALL: liveins: $rdi
; ALL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
- ; ALL: [[MOV64ri32_:%[0-9]+]]:gr64 = MOV64ri32 5
- ; ALL: $rcx = COPY [[MOV64ri32_]]
- ; ALL: $cl = KILL killed $rcx
- ; ALL: [[SAR64rCL:%[0-9]+]]:gr64 = SAR64rCL [[COPY]], implicit-def $eflags, implicit $cl
- ; ALL: $rax = COPY [[SAR64rCL]]
+ ; ALL: [[SAR64ri:%[0-9]+]]:gr64 = SAR64ri [[COPY]], 5, implicit-def $eflags
+ ; ALL: $rax = COPY [[SAR64ri]]
; ALL: RET 0, implicit $rax
%0(s64) = COPY $rdi
- %1(s64) = G_CONSTANT i64 5
+ %1(s8) = G_CONSTANT i64 5
%2(s64) = G_ASHR %0, %1
$rax = COPY %2(s64)
RET 0, implicit $rax
; ALL-LABEL: name: test_ashr_i64_imm1
; ALL: liveins: $rdi
; ALL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
- ; ALL: [[MOV64ri32_:%[0-9]+]]:gr64 = MOV64ri32 1
- ; ALL: $rcx = COPY [[MOV64ri32_]]
- ; ALL: $cl = KILL killed $rcx
- ; ALL: [[SAR64rCL:%[0-9]+]]:gr64 = SAR64rCL [[COPY]], implicit-def $eflags, implicit $cl
- ; ALL: $rax = COPY [[SAR64rCL]]
+ ; ALL: [[SAR64r1_:%[0-9]+]]:gr64 = SAR64r1 [[COPY]], implicit-def $eflags
+ ; ALL: $rax = COPY [[SAR64r1_]]
; ALL: RET 0, implicit $rax
%0(s64) = COPY $rdi
- %1(s64) = G_CONSTANT i64 1
+ %1(s8) = G_CONSTANT i64 1
%2(s64) = G_ASHR %0, %1
$rax = COPY %2(s64)
RET 0, implicit $rax
- { id: 0, class: gpr, preferred-register: '' }
- { id: 1, class: gpr, preferred-register: '' }
- { id: 2, class: gpr, preferred-register: '' }
+ - { id: 3, class: gpr, preferred-register: '' }
liveins:
fixedStack:
stack:
; ALL: liveins: $edi, $esi
; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi
; ALL: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
- ; ALL: $ecx = COPY [[COPY1]]
- ; ALL: $cl = KILL killed $ecx
+ ; ALL: [[COPY2:%[0-9]+]]:gr8 = COPY [[COPY1]].sub_8bit
+ ; ALL: $cl = COPY [[COPY2]]
; ALL: [[SAR32rCL:%[0-9]+]]:gr32 = SAR32rCL [[COPY]], implicit-def $eflags, implicit $cl
; ALL: $eax = COPY [[SAR32rCL]]
; ALL: RET 0, implicit $eax
%0(s32) = COPY $edi
%1(s32) = COPY $esi
- %2(s32) = G_ASHR %0, %1
- $eax = COPY %2(s32)
+ %2(s8) = G_TRUNC %1
+ %3(s32) = G_ASHR %0, %2
+ $eax = COPY %3(s32)
RET 0, implicit $eax
...
; ALL-LABEL: name: test_ashr_i32_imm
; ALL: liveins: $edi
; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi
- ; ALL: [[MOV32ri:%[0-9]+]]:gr32 = MOV32ri 5
- ; ALL: $ecx = COPY [[MOV32ri]]
- ; ALL: $cl = KILL killed $ecx
- ; ALL: [[SAR32rCL:%[0-9]+]]:gr32 = SAR32rCL [[COPY]], implicit-def $eflags, implicit $cl
- ; ALL: $eax = COPY [[SAR32rCL]]
+ ; ALL: [[SAR32ri:%[0-9]+]]:gr32 = SAR32ri [[COPY]], 5, implicit-def $eflags
+ ; ALL: $eax = COPY [[SAR32ri]]
; ALL: RET 0, implicit $eax
%0(s32) = COPY $edi
- %1(s32) = G_CONSTANT i32 5
+ %1(s8) = G_CONSTANT i32 5
%2(s32) = G_ASHR %0, %1
$eax = COPY %2(s32)
RET 0, implicit $eax
; ALL-LABEL: name: test_ashr_i32_imm1
; ALL: liveins: $edi
; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi
- ; ALL: [[MOV32ri:%[0-9]+]]:gr32 = MOV32ri 1
- ; ALL: $ecx = COPY [[MOV32ri]]
- ; ALL: $cl = KILL killed $ecx
- ; ALL: [[SAR32rCL:%[0-9]+]]:gr32 = SAR32rCL [[COPY]], implicit-def $eflags, implicit $cl
- ; ALL: $eax = COPY [[SAR32rCL]]
+ ; ALL: [[SAR32r1_:%[0-9]+]]:gr32 = SAR32r1 [[COPY]], implicit-def $eflags
+ ; ALL: $eax = COPY [[SAR32r1_]]
; ALL: RET 0, implicit $eax
%0(s32) = COPY $edi
- %1(s32) = G_CONSTANT i32 1
+ %1(s8) = G_CONSTANT i32 1
%2(s32) = G_ASHR %0, %1
$eax = COPY %2(s32)
RET 0, implicit $eax
; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi
; ALL: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
; ALL: [[COPY2:%[0-9]+]]:gr16 = COPY [[COPY]].sub_16bit
- ; ALL: [[COPY3:%[0-9]+]]:gr16 = COPY [[COPY1]].sub_16bit
- ; ALL: $cx = COPY [[COPY3]]
- ; ALL: $cl = KILL killed $cx
+ ; ALL: [[COPY3:%[0-9]+]]:gr8 = COPY [[COPY1]].sub_8bit
+ ; ALL: $cl = COPY [[COPY3]]
; ALL: [[SAR16rCL:%[0-9]+]]:gr16 = SAR16rCL [[COPY2]], implicit-def $eflags, implicit $cl
; ALL: $ax = COPY [[SAR16rCL]]
; ALL: RET 0, implicit $ax
%0(s32) = COPY $edi
%1(s32) = COPY $esi
%2(s16) = G_TRUNC %0(s32)
- %3(s16) = G_TRUNC %1(s32)
+ %3(s8) = G_TRUNC %1(s32)
%4(s16) = G_ASHR %2, %3
$ax = COPY %4(s16)
RET 0, implicit $ax
; ALL-LABEL: name: test_ashr_i16_imm
; ALL: liveins: $edi
; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi
- ; ALL: [[MOV16ri:%[0-9]+]]:gr16 = MOV16ri 5
; ALL: [[COPY1:%[0-9]+]]:gr16 = COPY [[COPY]].sub_16bit
- ; ALL: $cx = COPY [[MOV16ri]]
- ; ALL: $cl = KILL killed $cx
- ; ALL: [[SAR16rCL:%[0-9]+]]:gr16 = SAR16rCL [[COPY1]], implicit-def $eflags, implicit $cl
- ; ALL: $ax = COPY [[SAR16rCL]]
+ ; ALL: [[SAR16ri:%[0-9]+]]:gr16 = SAR16ri [[COPY1]], 5, implicit-def $eflags
+ ; ALL: $ax = COPY [[SAR16ri]]
; ALL: RET 0, implicit $ax
%0(s32) = COPY $edi
- %2(s16) = G_CONSTANT i16 5
+ %2(s8) = G_CONSTANT i16 5
%1(s16) = G_TRUNC %0(s32)
%3(s16) = G_ASHR %1, %2
$ax = COPY %3(s16)
; ALL-LABEL: name: test_ashr_i16_imm1
; ALL: liveins: $edi
; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi
- ; ALL: [[MOV16ri:%[0-9]+]]:gr16 = MOV16ri 1
; ALL: [[COPY1:%[0-9]+]]:gr16 = COPY [[COPY]].sub_16bit
- ; ALL: $cx = COPY [[MOV16ri]]
- ; ALL: $cl = KILL killed $cx
- ; ALL: [[SAR16rCL:%[0-9]+]]:gr16 = SAR16rCL [[COPY1]], implicit-def $eflags, implicit $cl
- ; ALL: $ax = COPY [[SAR16rCL]]
+ ; ALL: [[SAR16r1_:%[0-9]+]]:gr16 = SAR16r1 [[COPY1]], implicit-def $eflags
+ ; ALL: $ax = COPY [[SAR16r1_]]
; ALL: RET 0, implicit $ax
%0(s32) = COPY $edi
- %2(s16) = G_CONSTANT i16 1
+ %2(s8) = G_CONSTANT i16 1
%1(s16) = G_TRUNC %0(s32)
%3(s16) = G_ASHR %1, %2
$ax = COPY %3(s16)
- { id: 0, class: gpr, preferred-register: '' }
- { id: 1, class: gpr, preferred-register: '' }
- { id: 2, class: gpr, preferred-register: '' }
+ - { id: 3, class: gpr, preferred-register: '' }
liveins:
fixedStack:
stack:
; ALL-LABEL: name: test_lshr_i64
; ALL: liveins: $rdi, $rsi
; ALL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
- ; ALL: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
- ; ALL: $rcx = COPY [[COPY1]]
- ; ALL: $cl = KILL killed $rcx
+ ; ALL: [[COPY1:%[0-9]+]]:gr64_with_sub_8bit = COPY $rsi
+ ; ALL: [[COPY2:%[0-9]+]]:gr8 = COPY [[COPY1]].sub_8bit
+ ; ALL: $cl = COPY [[COPY2]]
; ALL: [[SHR64rCL:%[0-9]+]]:gr64 = SHR64rCL [[COPY]], implicit-def $eflags, implicit $cl
; ALL: $rax = COPY [[SHR64rCL]]
; ALL: RET 0, implicit $rax
%0(s64) = COPY $rdi
%1(s64) = COPY $rsi
- %2(s64) = G_LSHR %0, %1
- $rax = COPY %2(s64)
+ %2(s8) = G_TRUNC %1
+ %3(s64) = G_LSHR %0, %2
+ $rax = COPY %3(s64)
RET 0, implicit $rax
...
; ALL-LABEL: name: test_lshr_i64_imm
; ALL: liveins: $rdi
; ALL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
- ; ALL: [[MOV64ri32_:%[0-9]+]]:gr64 = MOV64ri32 5
- ; ALL: $rcx = COPY [[MOV64ri32_]]
- ; ALL: $cl = KILL killed $rcx
- ; ALL: [[SHR64rCL:%[0-9]+]]:gr64 = SHR64rCL [[COPY]], implicit-def $eflags, implicit $cl
- ; ALL: $rax = COPY [[SHR64rCL]]
+ ; ALL: [[SHR64ri:%[0-9]+]]:gr64 = SHR64ri [[COPY]], 5, implicit-def $eflags
+ ; ALL: $rax = COPY [[SHR64ri]]
; ALL: RET 0, implicit $rax
%0(s64) = COPY $rdi
- %1(s64) = G_CONSTANT i64 5
+ %1(s8) = G_CONSTANT i64 5
%2(s64) = G_LSHR %0, %1
$rax = COPY %2(s64)
RET 0, implicit $rax
; ALL-LABEL: name: test_lshr_i64_imm1
; ALL: liveins: $rdi
; ALL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
- ; ALL: [[MOV64ri32_:%[0-9]+]]:gr64 = MOV64ri32 1
- ; ALL: $rcx = COPY [[MOV64ri32_]]
- ; ALL: $cl = KILL killed $rcx
- ; ALL: [[SHR64rCL:%[0-9]+]]:gr64 = SHR64rCL [[COPY]], implicit-def $eflags, implicit $cl
- ; ALL: $rax = COPY [[SHR64rCL]]
+ ; ALL: [[SHR64r1_:%[0-9]+]]:gr64 = SHR64r1 [[COPY]], implicit-def $eflags
+ ; ALL: $rax = COPY [[SHR64r1_]]
; ALL: RET 0, implicit $rax
%0(s64) = COPY $rdi
- %1(s64) = G_CONSTANT i64 1
+ %1(s8) = G_CONSTANT i64 1
%2(s64) = G_LSHR %0, %1
$rax = COPY %2(s64)
RET 0, implicit $rax
- { id: 0, class: gpr, preferred-register: '' }
- { id: 1, class: gpr, preferred-register: '' }
- { id: 2, class: gpr, preferred-register: '' }
+ - { id: 3, class: gpr, preferred-register: '' }
liveins:
fixedStack:
stack:
; ALL: liveins: $edi, $esi
; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi
; ALL: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
- ; ALL: $ecx = COPY [[COPY1]]
- ; ALL: $cl = KILL killed $ecx
+ ; ALL: [[COPY2:%[0-9]+]]:gr8 = COPY [[COPY1]].sub_8bit
+ ; ALL: $cl = COPY [[COPY2]]
; ALL: [[SHR32rCL:%[0-9]+]]:gr32 = SHR32rCL [[COPY]], implicit-def $eflags, implicit $cl
; ALL: $eax = COPY [[SHR32rCL]]
; ALL: RET 0, implicit $eax
%0(s32) = COPY $edi
%1(s32) = COPY $esi
- %2(s32) = G_LSHR %0, %1
- $eax = COPY %2(s32)
+ %2(s8) = G_TRUNC %1
+ %3(s32) = G_LSHR %0, %2
+ $eax = COPY %3(s32)
RET 0, implicit $eax
...
; ALL-LABEL: name: test_lshr_i32_imm
; ALL: liveins: $edi
; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi
- ; ALL: [[MOV32ri:%[0-9]+]]:gr32 = MOV32ri 5
- ; ALL: $ecx = COPY [[MOV32ri]]
- ; ALL: $cl = KILL killed $ecx
- ; ALL: [[SHR32rCL:%[0-9]+]]:gr32 = SHR32rCL [[COPY]], implicit-def $eflags, implicit $cl
- ; ALL: $eax = COPY [[SHR32rCL]]
+ ; ALL: [[SHR32ri:%[0-9]+]]:gr32 = SHR32ri [[COPY]], 5, implicit-def $eflags
+ ; ALL: $eax = COPY [[SHR32ri]]
; ALL: RET 0, implicit $eax
%0(s32) = COPY $edi
- %1(s32) = G_CONSTANT i32 5
+ %1(s8) = G_CONSTANT i32 5
%2(s32) = G_LSHR %0, %1
$eax = COPY %2(s32)
RET 0, implicit $eax
; ALL-LABEL: name: test_lshr_i32_imm1
; ALL: liveins: $edi
; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi
- ; ALL: [[MOV32ri:%[0-9]+]]:gr32 = MOV32ri 1
- ; ALL: $ecx = COPY [[MOV32ri]]
- ; ALL: $cl = KILL killed $ecx
- ; ALL: [[SHR32rCL:%[0-9]+]]:gr32 = SHR32rCL [[COPY]], implicit-def $eflags, implicit $cl
- ; ALL: $eax = COPY [[SHR32rCL]]
+ ; ALL: [[SHR32r1_:%[0-9]+]]:gr32 = SHR32r1 [[COPY]], implicit-def $eflags
+ ; ALL: $eax = COPY [[SHR32r1_]]
; ALL: RET 0, implicit $eax
%0(s32) = COPY $edi
- %1(s32) = G_CONSTANT i32 1
+ %1(s8) = G_CONSTANT i32 1
%2(s32) = G_LSHR %0, %1
$eax = COPY %2(s32)
RET 0, implicit $eax
; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi
; ALL: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
; ALL: [[COPY2:%[0-9]+]]:gr16 = COPY [[COPY]].sub_16bit
- ; ALL: [[COPY3:%[0-9]+]]:gr16 = COPY [[COPY1]].sub_16bit
- ; ALL: $cx = COPY [[COPY3]]
- ; ALL: $cl = KILL killed $cx
+ ; ALL: [[COPY3:%[0-9]+]]:gr8 = COPY [[COPY1]].sub_8bit
+ ; ALL: $cl = COPY [[COPY3]]
; ALL: [[SHR16rCL:%[0-9]+]]:gr16 = SHR16rCL [[COPY2]], implicit-def $eflags, implicit $cl
; ALL: $ax = COPY [[SHR16rCL]]
; ALL: RET 0, implicit $ax
%0(s32) = COPY $edi
%1(s32) = COPY $esi
%2(s16) = G_TRUNC %0(s32)
- %3(s16) = G_TRUNC %1(s32)
+ %3(s8) = G_TRUNC %1(s32)
%4(s16) = G_LSHR %2, %3
$ax = COPY %4(s16)
RET 0, implicit $ax
; ALL-LABEL: name: test_lshr_i16_imm
; ALL: liveins: $edi
; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi
- ; ALL: [[MOV16ri:%[0-9]+]]:gr16 = MOV16ri 5
; ALL: [[COPY1:%[0-9]+]]:gr16 = COPY [[COPY]].sub_16bit
- ; ALL: $cx = COPY [[MOV16ri]]
- ; ALL: $cl = KILL killed $cx
- ; ALL: [[SHR16rCL:%[0-9]+]]:gr16 = SHR16rCL [[COPY1]], implicit-def $eflags, implicit $cl
- ; ALL: $ax = COPY [[SHR16rCL]]
+ ; ALL: [[SHR16ri:%[0-9]+]]:gr16 = SHR16ri [[COPY1]], 5, implicit-def $eflags
+ ; ALL: $ax = COPY [[SHR16ri]]
; ALL: RET 0, implicit $ax
%0(s32) = COPY $edi
- %2(s16) = G_CONSTANT i16 5
+ %2(s8) = G_CONSTANT i16 5
%1(s16) = G_TRUNC %0(s32)
%3(s16) = G_LSHR %1, %2
$ax = COPY %3(s16)
; ALL-LABEL: name: test_lshr_i16_imm1
; ALL: liveins: $edi
; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi
- ; ALL: [[MOV16ri:%[0-9]+]]:gr16 = MOV16ri 1
; ALL: [[COPY1:%[0-9]+]]:gr16 = COPY [[COPY]].sub_16bit
- ; ALL: $cx = COPY [[MOV16ri]]
- ; ALL: $cl = KILL killed $cx
- ; ALL: [[SHR16rCL:%[0-9]+]]:gr16 = SHR16rCL [[COPY1]], implicit-def $eflags, implicit $cl
- ; ALL: $ax = COPY [[SHR16rCL]]
+ ; ALL: [[SHR16r1_:%[0-9]+]]:gr16 = SHR16r1 [[COPY1]], implicit-def $eflags
+ ; ALL: $ax = COPY [[SHR16r1_]]
; ALL: RET 0, implicit $ax
%0(s32) = COPY $edi
- %2(s16) = G_CONSTANT i16 1
+ %2(s8) = G_CONSTANT i16 1
%1(s16) = G_TRUNC %0(s32)
%3(s16) = G_LSHR %1, %2
$ax = COPY %3(s16)
- { id: 0, class: gpr, preferred-register: '' }
- { id: 1, class: gpr, preferred-register: '' }
- { id: 2, class: gpr, preferred-register: '' }
+ - { id: 3, class: gpr, preferred-register: '' }
liveins:
fixedStack:
stack:
; ALL-LABEL: name: test_shl_i64
; ALL: liveins: $rdi, $rsi
; ALL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
- ; ALL: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
- ; ALL: $rcx = COPY [[COPY1]]
- ; ALL: $cl = KILL killed $rcx
+ ; ALL: [[COPY1:%[0-9]+]]:gr64_with_sub_8bit = COPY $rsi
+ ; ALL: [[COPY2:%[0-9]+]]:gr8 = COPY [[COPY1]].sub_8bit
+ ; ALL: $cl = COPY [[COPY2]]
; ALL: [[SHL64rCL:%[0-9]+]]:gr64 = SHL64rCL [[COPY]], implicit-def $eflags, implicit $cl
; ALL: $rax = COPY [[SHL64rCL]]
; ALL: RET 0, implicit $rax
%0(s64) = COPY $rdi
%1(s64) = COPY $rsi
- %2(s64) = G_SHL %0, %1
- $rax = COPY %2(s64)
+ %2(s8) = G_TRUNC %1
+ %3(s64) = G_SHL %0, %2
+ $rax = COPY %3(s64)
RET 0, implicit $rax
...
; ALL-LABEL: name: test_shl_i64_imm
; ALL: liveins: $rdi
; ALL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
- ; ALL: [[MOV64ri32_:%[0-9]+]]:gr64 = MOV64ri32 5
- ; ALL: $rcx = COPY [[MOV64ri32_]]
- ; ALL: $cl = KILL killed $rcx
- ; ALL: [[SHL64rCL:%[0-9]+]]:gr64 = SHL64rCL [[COPY]], implicit-def $eflags, implicit $cl
- ; ALL: $rax = COPY [[SHL64rCL]]
+ ; ALL: [[SHL64ri:%[0-9]+]]:gr64 = SHL64ri [[COPY]], 5, implicit-def $eflags
+ ; ALL: $rax = COPY [[SHL64ri]]
; ALL: RET 0, implicit $rax
%0(s64) = COPY $rdi
- %1(s64) = G_CONSTANT i64 5
+ %1(s8) = G_CONSTANT i64 5
%2(s64) = G_SHL %0, %1
$rax = COPY %2(s64)
RET 0, implicit $rax
; ALL-LABEL: name: test_shl_i64_imm1
; ALL: liveins: $rdi
; ALL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
- ; ALL: [[MOV64ri32_:%[0-9]+]]:gr64 = MOV64ri32 1
- ; ALL: $rcx = COPY [[MOV64ri32_]]
- ; ALL: $cl = KILL killed $rcx
- ; ALL: [[SHL64rCL:%[0-9]+]]:gr64 = SHL64rCL [[COPY]], implicit-def $eflags, implicit $cl
- ; ALL: $rax = COPY [[SHL64rCL]]
+ ; ALL: [[ADD64rr:%[0-9]+]]:gr64 = ADD64rr [[COPY]], [[COPY]], implicit-def $eflags
+ ; ALL: $rax = COPY [[ADD64rr]]
; ALL: RET 0, implicit $rax
%0(s64) = COPY $rdi
- %1(s64) = G_CONSTANT i64 1
+ %1(s8) = G_CONSTANT i64 1
%2(s64) = G_SHL %0, %1
$rax = COPY %2(s64)
RET 0, implicit $rax
- { id: 0, class: gpr, preferred-register: '' }
- { id: 1, class: gpr, preferred-register: '' }
- { id: 2, class: gpr, preferred-register: '' }
+ - { id: 3, class: gpr, preferred-register: '' }
liveins:
fixedStack:
stack:
; ALL: liveins: $edi, $esi
; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi
; ALL: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
- ; ALL: $ecx = COPY [[COPY1]]
- ; ALL: $cl = KILL killed $ecx
+ ; ALL: [[COPY2:%[0-9]+]]:gr8 = COPY [[COPY1]].sub_8bit
+ ; ALL: $cl = COPY [[COPY2]]
; ALL: [[SHL32rCL:%[0-9]+]]:gr32 = SHL32rCL [[COPY]], implicit-def $eflags, implicit $cl
; ALL: $eax = COPY [[SHL32rCL]]
; ALL: RET 0, implicit $eax
%0(s32) = COPY $edi
%1(s32) = COPY $esi
- %2(s32) = G_SHL %0, %1
- $eax = COPY %2(s32)
+ %2(s8) = G_TRUNC %1
+ %3(s32) = G_SHL %0, %2
+ $eax = COPY %3(s32)
RET 0, implicit $eax
...
; ALL-LABEL: name: test_shl_i32_imm
; ALL: liveins: $edi
; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi
- ; ALL: [[MOV32ri:%[0-9]+]]:gr32 = MOV32ri 5
- ; ALL: $ecx = COPY [[MOV32ri]]
- ; ALL: $cl = KILL killed $ecx
- ; ALL: [[SHL32rCL:%[0-9]+]]:gr32 = SHL32rCL [[COPY]], implicit-def $eflags, implicit $cl
- ; ALL: $eax = COPY [[SHL32rCL]]
+ ; ALL: [[SHL32ri:%[0-9]+]]:gr32 = SHL32ri [[COPY]], 5, implicit-def $eflags
+ ; ALL: $eax = COPY [[SHL32ri]]
; ALL: RET 0, implicit $eax
%0(s32) = COPY $edi
- %1(s32) = G_CONSTANT i32 5
+ %1(s8) = G_CONSTANT i32 5
%2(s32) = G_SHL %0, %1
$eax = COPY %2(s32)
RET 0, implicit $eax
; ALL-LABEL: name: test_shl_i32_imm1
; ALL: liveins: $edi
; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi
- ; ALL: [[MOV32ri:%[0-9]+]]:gr32 = MOV32ri 1
- ; ALL: $ecx = COPY [[MOV32ri]]
- ; ALL: $cl = KILL killed $ecx
- ; ALL: [[SHL32rCL:%[0-9]+]]:gr32 = SHL32rCL [[COPY]], implicit-def $eflags, implicit $cl
- ; ALL: $eax = COPY [[SHL32rCL]]
+ ; ALL: [[ADD32rr:%[0-9]+]]:gr32 = ADD32rr [[COPY]], [[COPY]], implicit-def $eflags
+ ; ALL: $eax = COPY [[ADD32rr]]
; ALL: RET 0, implicit $eax
%0(s32) = COPY $edi
- %1(s32) = G_CONSTANT i32 1
+ %1(s8) = G_CONSTANT i32 1
%2(s32) = G_SHL %0, %1
$eax = COPY %2(s32)
RET 0, implicit $eax
; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi
; ALL: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
; ALL: [[COPY2:%[0-9]+]]:gr16 = COPY [[COPY]].sub_16bit
- ; ALL: [[COPY3:%[0-9]+]]:gr16 = COPY [[COPY1]].sub_16bit
- ; ALL: $cx = COPY [[COPY3]]
- ; ALL: $cl = KILL killed $cx
+ ; ALL: [[COPY3:%[0-9]+]]:gr8 = COPY [[COPY1]].sub_8bit
+ ; ALL: $cl = COPY [[COPY3]]
; ALL: [[SHL16rCL:%[0-9]+]]:gr16 = SHL16rCL [[COPY2]], implicit-def $eflags, implicit $cl
; ALL: $ax = COPY [[SHL16rCL]]
; ALL: RET 0, implicit $ax
%0(s32) = COPY $edi
%1(s32) = COPY $esi
%2(s16) = G_TRUNC %0(s32)
- %3(s16) = G_TRUNC %1(s32)
+ %3(s8) = G_TRUNC %1(s32)
%4(s16) = G_SHL %2, %3
$ax = COPY %4(s16)
RET 0, implicit $ax
; ALL-LABEL: name: test_shl_i16_imm
; ALL: liveins: $edi
; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi
- ; ALL: [[MOV16ri:%[0-9]+]]:gr16 = MOV16ri 5
; ALL: [[COPY1:%[0-9]+]]:gr16 = COPY [[COPY]].sub_16bit
- ; ALL: $cx = COPY [[MOV16ri]]
- ; ALL: $cl = KILL killed $cx
- ; ALL: [[SHL16rCL:%[0-9]+]]:gr16 = SHL16rCL [[COPY1]], implicit-def $eflags, implicit $cl
- ; ALL: $ax = COPY [[SHL16rCL]]
+ ; ALL: [[SHL16ri:%[0-9]+]]:gr16 = SHL16ri [[COPY1]], 5, implicit-def $eflags
+ ; ALL: $ax = COPY [[SHL16ri]]
; ALL: RET 0, implicit $ax
%0(s32) = COPY $edi
- %2(s16) = G_CONSTANT i16 5
+ %2(s8) = G_CONSTANT i16 5
%1(s16) = G_TRUNC %0(s32)
%3(s16) = G_SHL %1, %2
$ax = COPY %3(s16)
; ALL-LABEL: name: test_shl_i16_imm1
; ALL: liveins: $edi
; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi
- ; ALL: [[MOV16ri:%[0-9]+]]:gr16 = MOV16ri 1
; ALL: [[COPY1:%[0-9]+]]:gr16 = COPY [[COPY]].sub_16bit
- ; ALL: $cx = COPY [[MOV16ri]]
- ; ALL: $cl = KILL killed $cx
- ; ALL: [[SHL16rCL:%[0-9]+]]:gr16 = SHL16rCL [[COPY1]], implicit-def $eflags, implicit $cl
- ; ALL: $ax = COPY [[SHL16rCL]]
+ ; ALL: [[ADD16rr:%[0-9]+]]:gr16 = ADD16rr [[COPY1]], [[COPY1]], implicit-def $eflags
+ ; ALL: $ax = COPY [[ADD16rr]]
; ALL: RET 0, implicit $ax
%0(s32) = COPY $edi
- %2(s16) = G_CONSTANT i16 1
+ %2(s8) = G_CONSTANT i16 1
%1(s16) = G_TRUNC %0(s32)
%3(s16) = G_SHL %1, %2
$ax = COPY %3(s16)
; X64: # %bb.0:
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: movq %rsi, %rcx
-; X64-NEXT: # kill: def $cl killed $rcx
+; X64-NEXT: # kill: def $cl killed $cl killed $rcx
; X64-NEXT: shlq %cl, %rax
; X64-NEXT: retq
%res = shl i64 %arg1, %arg2
; X64: # %bb.0:
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: movq $5, %rcx
-; X64-NEXT: # kill: def $cl killed $rcx
; X64-NEXT: shlq %cl, %rax
; X64-NEXT: retq
%res = shl i64 %arg1, 5
; X64: # %bb.0:
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: movq $1, %rcx
-; X64-NEXT: # kill: def $cl killed $rcx
; X64-NEXT: shlq %cl, %rax
; X64-NEXT: retq
%res = shl i64 %arg1, 1
; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: movl %esi, %ecx
-; X64-NEXT: # kill: def $cl killed $ecx
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
; X64-NEXT: shll %cl, %eax
; X64-NEXT: retq
%res = shl i32 %arg1, %arg2
; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: movl $5, %ecx
-; X64-NEXT: # kill: def $cl killed $ecx
; X64-NEXT: shll %cl, %eax
; X64-NEXT: retq
%res = shl i32 %arg1, 5
; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: movl $1, %ecx
-; X64-NEXT: # kill: def $cl killed $ecx
; X64-NEXT: shll %cl, %eax
; X64-NEXT: retq
%res = shl i32 %arg1, 1
; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: movl %esi, %ecx
-; X64-NEXT: # kill: def $cx killed $cx killed $ecx
-; X64-NEXT: # kill: def $cl killed $cx
+; X64-NEXT: # kill: def $cl killed $cl killed $ecx
; X64-NEXT: shlw %cl, %ax
; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: movw $5, %cx
-; X64-NEXT: # kill: def $cl killed $cx
; X64-NEXT: shlw %cl, %ax
; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: movw $1, %cx
-; X64-NEXT: # kill: def $cl killed $cx
; X64-NEXT: shlw %cl, %ax
; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
- ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[C]]
- ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]]
+ ; CHECK: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[C]](s32)
+ ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[TRUNC]](s8)
+ ; CHECK: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[C]](s32)
+ ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[TRUNC1]](s8)
; CHECK: [[SITOFP:%[0-9]+]]:_(s32) = G_SITOFP [[ASHR]](s32)
; CHECK: [[ANYEXT:%[0-9]+]]:_(s128) = G_ANYEXT [[SITOFP]](s32)
; CHECK: $xmm0 = COPY [[ANYEXT]](s128)
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
- ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[C]]
- ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]]
+ ; CHECK: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[C]](s32)
+ ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[TRUNC]](s8)
+ ; CHECK: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[C]](s32)
+ ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[TRUNC1]](s8)
; CHECK: [[SITOFP:%[0-9]+]]:_(s32) = G_SITOFP [[ASHR]](s32)
; CHECK: [[ANYEXT:%[0-9]+]]:_(s128) = G_ANYEXT [[SITOFP]](s32)
; CHECK: $xmm0 = COPY [[ANYEXT]](s128)
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
- ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[C]]
- ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]]
+ ; CHECK: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[C]](s32)
+ ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[TRUNC]](s8)
+ ; CHECK: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[C]](s32)
+ ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[TRUNC1]](s8)
; CHECK: [[SITOFP:%[0-9]+]]:_(s64) = G_SITOFP [[ASHR]](s32)
; CHECK: [[ANYEXT:%[0-9]+]]:_(s128) = G_ANYEXT [[SITOFP]](s64)
; CHECK: $xmm0 = COPY [[ANYEXT]](s128)
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
- ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[C]]
- ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]]
+ ; CHECK: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[C]](s32)
+ ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[TRUNC]](s8)
+ ; CHECK: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[C]](s32)
+ ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[TRUNC1]](s8)
; CHECK: [[SITOFP:%[0-9]+]]:_(s64) = G_SITOFP [[ASHR]](s32)
; CHECK: [[ANYEXT:%[0-9]+]]:_(s128) = G_ANYEXT [[SITOFP]](s64)
; CHECK: $xmm0 = COPY [[ANYEXT]](s128)