From 25aad6c4139ed0a45ef7c0a0de047c89c8e3ab2a Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Thu, 21 Mar 2019 17:38:58 +0000 Subject: [PATCH] [X86] Don't avoid folding multiple use sign extended 8-bit immediate into instructions under optsize. Under optsize we try to avoid folding immediates into instructions under optsize. But if the immediate is 16-bits or 32 bits, but can be encoded as an 8-bit immediate we don't save enough from disabling the folding unless the immediate has enough uses to make up for the size of the move which is either 3 bytes or 5 bytes since there are no sign extended 8-bit moves. We would also save something if the immediate was a live out of the basic block and thus a move was unavoidable, but that would require a more advanced heuristic than just counting uses. Note we only avoid folding multiple use immediates into the patterns that use X86ISD::ADD/SUB/XOR/OR/AND/CMP/ADC/SBB nodes and not the more common ISD::ADD/SUB/XOR/OR/AND nodes. Differential Revision: https://reviews.llvm.org/D59522 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@356688 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86InstrArithmetic.td | 6 +++--- lib/Target/X86/X86InstrCompiler.td | 4 ++-- lib/Target/X86/X86InstrInfo.td | 13 ------------- test/CodeGen/X86/immediate_merging.ll | 10 ++++------ test/CodeGen/X86/immediate_merging64.ll | 2 +- 5 files changed, 10 insertions(+), 25 deletions(-) diff --git a/lib/Target/X86/X86InstrArithmetic.td b/lib/Target/X86/X86InstrArithmetic.td index cf27e6826e8..68d804d0d72 100644 --- a/lib/Target/X86/X86InstrArithmetic.td +++ b/lib/Target/X86/X86InstrArithmetic.td @@ -605,13 +605,13 @@ def Xi8 : X86TypeInfo; def Xi16 : X86TypeInfo; def Xi32 : X86TypeInfo; def Xi64 : X86TypeInfo; /// ITy - This instruction base class takes the type info for the instruction. diff --git a/lib/Target/X86/X86InstrCompiler.td b/lib/Target/X86/X86InstrCompiler.td index 4c06b176543..124d1314db0 100644 --- a/lib/Target/X86/X86InstrCompiler.td +++ b/lib/Target/X86/X86InstrCompiler.td @@ -1994,8 +1994,8 @@ def : Pat<(X86sub_flag 0, GR32:$src), (NEG32r GR32:$src)>; def : Pat<(X86sub_flag 0, GR64:$src), (NEG64r GR64:$src)>; // sub reg, relocImm -def : Pat<(X86sub_flag GR64:$src1, i64relocImmSExt8_su:$src2), - (SUB64ri8 GR64:$src1, i64relocImmSExt8_su:$src2)>; +def : Pat<(X86sub_flag GR64:$src1, i64relocImmSExt8:$src2), + (SUB64ri8 GR64:$src1, i64relocImmSExt8:$src2)>; // mul reg, reg def : Pat<(mul GR16:$src1, GR16:$src2), diff --git a/lib/Target/X86/X86InstrInfo.td b/lib/Target/X86/X86InstrInfo.td index 67ceceb6698..b1cdc5f3f82 100644 --- a/lib/Target/X86/X86InstrInfo.td +++ b/lib/Target/X86/X86InstrInfo.td @@ -1004,19 +1004,6 @@ def relocImm32_su : PatLeaf<(i32 relocImm), [{ return !shouldAvoidImmediateInstFormsForSize(N); }]>; -def i16immSExt8_su : PatLeaf<(i16immSExt8), [{ - return !shouldAvoidImmediateInstFormsForSize(N); -}]>; -def i32immSExt8_su : PatLeaf<(i32immSExt8), [{ - return !shouldAvoidImmediateInstFormsForSize(N); -}]>; -def i64immSExt8_su : PatLeaf<(i64immSExt8), [{ - return !shouldAvoidImmediateInstFormsForSize(N); -}]>; - -def i64relocImmSExt8_su : PatLeaf<(i64relocImmSExt8), [{ - return !shouldAvoidImmediateInstFormsForSize(N); -}]>; def i64relocImmSExt32_su : PatLeaf<(i64relocImmSExt32), [{ return !shouldAvoidImmediateInstFormsForSize(N); }]>; diff --git a/test/CodeGen/X86/immediate_merging.ll b/test/CodeGen/X86/immediate_merging.ll index a6e36c73467..ab796c7577c 100644 --- a/test/CodeGen/X86/immediate_merging.ll +++ b/test/CodeGen/X86/immediate_merging.ll @@ -19,9 +19,8 @@ define i32 @foo() optsize { ; X86-NEXT: movl $1234, %eax # imm = 0x4D2 ; X86-NEXT: movl %eax, a ; X86-NEXT: movl %eax, b -; X86-NEXT: movl $12, %eax -; X86-NEXT: movl %eax, c -; X86-NEXT: cmpl %eax, e +; X86-NEXT: movl $12, c +; X86-NEXT: cmpl $12, e ; X86-NEXT: jne .LBB0_2 ; X86-NEXT: # %bb.1: # %if.then ; X86-NEXT: movl $1, x @@ -38,9 +37,8 @@ define i32 @foo() optsize { ; X64-NEXT: movl $1234, %eax # imm = 0x4D2 ; X64-NEXT: movl %eax, {{.*}}(%rip) ; X64-NEXT: movl %eax, {{.*}}(%rip) -; X64-NEXT: movl $12, %eax -; X64-NEXT: movl %eax, {{.*}}(%rip) -; X64-NEXT: cmpl %eax, {{.*}}(%rip) +; X64-NEXT: movl $12, {{.*}}(%rip) +; X64-NEXT: cmpl $12, {{.*}}(%rip) ; X64-NEXT: jne .LBB0_2 ; X64-NEXT: # %bb.1: # %if.then ; X64-NEXT: movl $1, {{.*}}(%rip) diff --git a/test/CodeGen/X86/immediate_merging64.ll b/test/CodeGen/X86/immediate_merging64.ll index 12be8bdff83..d11516d00ab 100644 --- a/test/CodeGen/X86/immediate_merging64.ll +++ b/test/CodeGen/X86/immediate_merging64.ll @@ -11,7 +11,7 @@ define i1 @imm_multiple_users(i64 %a, i64* %b) optsize { ; CHECK: # %bb.0: ; CHECK-NEXT: movq $-1, %rax ; CHECK-NEXT: movq %rax, (%rsi) -; CHECK-NEXT: cmpq %rax, %rdi +; CHECK-NEXT: cmpq $-1, %rdi ; CHECK-NEXT: sete %al ; CHECK-NEXT: retq store i64 -1, i64* %b, align 8 -- 2.50.1