multiclass ArithBinOp_RF<bits<8> BaseOpc, bits<8> BaseOpc2, bits<8> BaseOpc4,
string mnemonic, Format RegMRM, Format MemMRM,
SDNode opnodeflag, SDNode opnode,
- bit CommutableRR, bit ConvertibleToThreeAddress> {
+ bit CommutableRR, bit ConvertibleToThreeAddress,
+ bit ConvertibleToThreeAddressRR> {
let Defs = [EFLAGS] in {
let Constraints = "$src1 = $dst" in {
let isCommutable = CommutableRR in {
- let isConvertibleToThreeAddress = ConvertibleToThreeAddress in {
+ let isConvertibleToThreeAddress = ConvertibleToThreeAddressRR in {
def NAME#8rr : BinOpRR_RF<BaseOpc, mnemonic, Xi8 , opnodeflag>;
def NAME#16rr : BinOpRR_RF<BaseOpc, mnemonic, Xi16, opnodeflag>;
def NAME#32rr : BinOpRR_RF<BaseOpc, mnemonic, Xi32, opnodeflag>;
defm AND : ArithBinOp_RF<0x20, 0x22, 0x24, "and", MRM4r, MRM4m,
- X86and_flag, and, 1, 0>;
+ X86and_flag, and, 1, 0, 0>;
defm OR : ArithBinOp_RF<0x08, 0x0A, 0x0C, "or", MRM1r, MRM1m,
- X86or_flag, or, 1, 0>;
+ X86or_flag, or, 1, 0, 0>;
defm XOR : ArithBinOp_RF<0x30, 0x32, 0x34, "xor", MRM6r, MRM6m,
- X86xor_flag, xor, 1, 0>;
+ X86xor_flag, xor, 1, 0, 0>;
defm ADD : ArithBinOp_RF<0x00, 0x02, 0x04, "add", MRM0r, MRM0m,
- X86add_flag, add, 1, 1>;
+ X86add_flag, add, 1, 1, 1>;
let isCompare = 1 in {
defm SUB : ArithBinOp_RF<0x28, 0x2A, 0x2C, "sub", MRM5r, MRM5m,
- X86sub_flag, sub, 0, 0>;
+ X86sub_flag, sub, 0, 1, 0>;
}
// Arithmetic.
case X86::ADD16ri_DB:
case X86::ADD16ri8_DB:
return convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV, Is8BitOp);
+ case X86::SUB8ri:
+ case X86::SUB16ri8:
+ case X86::SUB16ri:
+ /// FIXME: Support these similar to ADD8ri/ADD16ri*.
+ return nullptr;
+ case X86::SUB32ri8:
+ case X86::SUB32ri: {
+ int64_t Imm = MI.getOperand(2).getImm();
+ if (!isInt<32>(-Imm))
+ return nullptr;
+
+ assert(MI.getNumOperands() >= 3 && "Unknown add instruction!");
+ unsigned Opc = Is64Bit ? X86::LEA64_32r : X86::LEA32r;
+
+ bool isKill;
+ unsigned SrcReg;
+ MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
+ if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ true,
+ SrcReg, isKill, ImplicitOp, LV))
+ return nullptr;
+
+ MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc))
+ .add(Dest)
+ .addReg(SrcReg, getKillRegState(isKill));
+ if (ImplicitOp.getReg() != 0)
+ MIB.add(ImplicitOp);
+
+ NewMI = addOffset(MIB, -Imm);
+ break;
+ }
+
+ case X86::SUB64ri8:
+ case X86::SUB64ri32: {
+ int64_t Imm = MI.getOperand(2).getImm();
+ if (!isInt<32>(-Imm))
+ return nullptr;
+
+ assert(MI.getNumOperands() >= 3 && "Unknown sub instruction!");
+
+ MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(),
+ get(X86::LEA64r)).add(Dest).add(Src);
+ NewMI = addOffset(MIB, -Imm);
+ break;
+ }
+
case X86::VMOVDQU8Z128rmk:
case X86::VMOVDQU8Z256rmk:
case X86::VMOVDQU8Zrmk:
;
; X64-LINUX-LABEL: test1:
; X64-LINUX: # %bb.0: # %entry
-; X64-LINUX-NEXT: movl %edi, %eax
-; X64-LINUX-NEXT: subl $-128, %eax
+; X64-LINUX-NEXT: # kill: def $edi killed $edi def $rdi
+; X64-LINUX-NEXT: leal 128(%rdi), %eax
; X64-LINUX-NEXT: retq
;
; X64-WIN32-LABEL: test1:
; X64-WIN32: # %bb.0: # %entry
-; X64-WIN32-NEXT: movl %ecx, %eax
-; X64-WIN32-NEXT: subl $-128, %eax
+; X64-WIN32-NEXT: # kill: def $ecx killed $ecx def $rcx
+; X64-WIN32-NEXT: leal 128(%rcx), %eax
; X64-WIN32-NEXT: retq
entry:
%b = add i32 %a, 128
;
; X64-LINUX-LABEL: test3:
; X64-LINUX: # %bb.0: # %entry
-; X64-LINUX-NEXT: movq %rdi, %rax
-; X64-LINUX-NEXT: subq $-128, %rax
+; X64-LINUX-NEXT: leaq 128(%rdi), %rax
; X64-LINUX-NEXT: retq
;
; X64-WIN32-LABEL: test3:
; X64-WIN32: # %bb.0: # %entry
-; X64-WIN32-NEXT: movq %rcx, %rax
-; X64-WIN32-NEXT: subq $-128, %rax
+; X64-WIN32-NEXT: leaq 128(%rcx), %rax
; X64-WIN32-NEXT: retq
entry:
%b = add i64 %a, 128
define i64 @test__blsmsk_u64(i64 %a0) {
; X64-LABEL: test__blsmsk_u64:
; X64: # %bb.0:
-; X64-NEXT: movq %rdi, %rax
-; X64-NEXT: subq $1, %rax
+; X64-NEXT: leaq -1(%rdi), %rax
; X64-NEXT: xorq %rdi, %rax
; X64-NEXT: retq
%dec = sub i64 %a0, 1
define i64 @test__blsr_u64(i64 %a0) {
; X64-LABEL: test__blsr_u64:
; X64: # %bb.0:
-; X64-NEXT: movq %rdi, %rax
-; X64-NEXT: subq $1, %rax
+; X64-NEXT: leaq -1(%rdi), %rax
; X64-NEXT: andq %rdi, %rax
; X64-NEXT: retq
%dec = sub i64 %a0, 1
define i64 @test_blsmsk_u64(i64 %a0) {
; X64-LABEL: test_blsmsk_u64:
; X64: # %bb.0:
-; X64-NEXT: movq %rdi, %rax
-; X64-NEXT: subq $1, %rax
+; X64-NEXT: leaq -1(%rdi), %rax
; X64-NEXT: xorq %rdi, %rax
; X64-NEXT: retq
%dec = sub i64 %a0, 1
define i64 @test_blsr_u64(i64 %a0) {
; X64-LABEL: test_blsr_u64:
; X64: # %bb.0:
-; X64-NEXT: movq %rdi, %rax
-; X64-NEXT: subq $1, %rax
+; X64-NEXT: leaq -1(%rdi), %rax
; X64-NEXT: andq %rdi, %rax
; X64-NEXT: retq
%dec = sub i64 %a0, 1
; X32-LABEL: test__blsmsk_u32:
; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: subl $1, %eax
+; X32-NEXT: leal -1(%ecx), %eax
; X32-NEXT: xorl %ecx, %eax
; X32-NEXT: retl
;
; X64-LABEL: test__blsmsk_u32:
; X64: # %bb.0:
-; X64-NEXT: movl %edi, %eax
-; X64-NEXT: subl $1, %eax
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
+; X64-NEXT: leal -1(%rdi), %eax
; X64-NEXT: xorl %edi, %eax
; X64-NEXT: retq
%dec = sub i32 %a0, 1
; X32-LABEL: test__blsr_u32:
; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: subl $1, %eax
+; X32-NEXT: leal -1(%ecx), %eax
; X32-NEXT: andl %ecx, %eax
; X32-NEXT: retl
;
; X64-LABEL: test__blsr_u32:
; X64: # %bb.0:
-; X64-NEXT: movl %edi, %eax
-; X64-NEXT: subl $1, %eax
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
+; X64-NEXT: leal -1(%rdi), %eax
; X64-NEXT: andl %edi, %eax
; X64-NEXT: retq
%dec = sub i32 %a0, 1
; X32-LABEL: test_blsmsk_u32:
; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: subl $1, %eax
+; X32-NEXT: leal -1(%ecx), %eax
; X32-NEXT: xorl %ecx, %eax
; X32-NEXT: retl
;
; X64-LABEL: test_blsmsk_u32:
; X64: # %bb.0:
-; X64-NEXT: movl %edi, %eax
-; X64-NEXT: subl $1, %eax
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
+; X64-NEXT: leal -1(%rdi), %eax
; X64-NEXT: xorl %edi, %eax
; X64-NEXT: retq
%dec = sub i32 %a0, 1
; X32-LABEL: test_blsr_u32:
; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: subl $1, %eax
+; X32-NEXT: leal -1(%ecx), %eax
; X32-NEXT: andl %ecx, %eax
; X32-NEXT: retl
;
; X64-LABEL: test_blsr_u32:
; X64: # %bb.0:
-; X64-NEXT: movl %edi, %eax
-; X64-NEXT: subl $1, %eax
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
+; X64-NEXT: leal -1(%rdi), %eax
; X64-NEXT: andl %edi, %eax
; X64-NEXT: retq
%dec = sub i32 %a0, 1
define i32 @PR42571(i32 %x, i32 %y) {
; CHECK-LABEL: PR42571:
; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %eax
-; CHECK-NEXT: subl $1, %eax
+; CHECK-NEXT: # kill: def $edi killed $edi def $rdi
+; CHECK-NEXT: leal -1(%rdi), %eax
; CHECK-NEXT: andl %edi, %eax
; CHECK-NEXT: cmpl $1, %edi
; CHECK-NEXT: cmovbl %esi, %eax
define i64 @test__blsfill_u64(i64 %a0) {
; X64-LABEL: test__blsfill_u64:
; X64: # %bb.0:
-; X64-NEXT: movq %rdi, %rax
-; X64-NEXT: subq $1, %rax
+; X64-NEXT: leaq -1(%rdi), %rax
; X64-NEXT: orq %rdi, %rax
; X64-NEXT: retq
%1 = sub i64 %a0, 1
; X32-LABEL: test__blsfill_u32:
; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X32-NEXT: movl %ecx, %eax
-; X32-NEXT: subl $1, %eax
+; X32-NEXT: leal -1(%ecx), %eax
; X32-NEXT: orl %ecx, %eax
; X32-NEXT: retl
;
; X64-LABEL: test__blsfill_u32:
; X64: # %bb.0:
-; X64-NEXT: movl %edi, %eax
-; X64-NEXT: subl $1, %eax
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
+; X64-NEXT: leal -1(%rdi), %eax
; X64-NEXT: orl %edi, %eax
; X64-NEXT: retq
%1 = sub i32 %a0, 1