return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
}
+ case TargetOpcode::G_UNMERGE_VALUES: {
+ //
+ LLT SrcTy = MRI.getType(I.getOperand(1).getReg());
+ // Larger extracts are vectors, same-size extracts should be something else
+ // by now (either split up or simplified to a COPY).
+ if (SrcTy.getSizeInBits() > 64 || Ty.getSizeInBits() > 32)
+ return false;
+
+ I.setDesc(TII.get(AArch64::UBFMXri));
+ MachineInstrBuilder(MF, I).addImm(I.getOperand(2).getImm() +
+ Ty.getSizeInBits() - 1);
+
+ unsigned DstReg = MRI.createGenericVirtualRegister(LLT::scalar(64));
+ BuildMI(MBB, std::next(I.getIterator()), I.getDebugLoc(),
+ TII.get(AArch64::COPY))
+ .addDef(I.getOperand(0).getReg())
+ .addUse(DstReg, 0, AArch64::sub_32);
+ RBI.constrainGenericRegister(I.getOperand(0).getReg(),
+ AArch64::GPR32RegClass, MRI);
+ I.getOperand(0).setReg(DstReg);
+
+ return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
+ }
case TargetOpcode::G_INSERT: {
LLT SrcTy = MRI.getType(I.getOperand(2).getReg());
BuildMI(MBB, I.getIterator(), I.getDebugLoc(),
TII.get(AArch64::SUBREG_TO_REG))
.addDef(SrcReg)
- .addUse(0)
+ .addImm(0)
.addUse(I.getOperand(2).getReg())
.addImm(AArch64::sub_32);
RBI.constrainGenericRegister(I.getOperand(2).getReg(),
%1:gpr(s64) = G_IMPLICIT_DEF
; CHECK: body:
- ; CHECK: [[TMP:%[0-9]+]] = SUBREG_TO_REG _, %0, 15
+ ; CHECK: [[TMP:%[0-9]+]] = SUBREG_TO_REG 0, %0, 15
; CHECK: %2 = BFMXri %1, [[TMP]], 0, 31
%2:gpr(s64) = G_INSERT %1, %0, 0
- ; CHECK: [[TMP:%[0-9]+]] = SUBREG_TO_REG _, %0, 15
+ ; CHECK: [[TMP:%[0-9]+]] = SUBREG_TO_REG 0, %0, 15
; CHECK: %3 = BFMXri %1, [[TMP]], 51, 31
%3:gpr(s64) = G_INSERT %1, %0, 13