From: Craig Topper Date: Tue, 24 Jul 2018 18:36:46 +0000 (+0000) Subject: [X86] Add test case to show failure to combine away negates that may be created by... X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=a3de0cbb8f4d886a968d20a8c6a6e8aa01d28c2a;p=llvm [X86] Add test case to show failure to combine away negates that may be created by mul by constant expansion. Mul by constant can expand to a sequence that ends with a negate. If the next instruction is an add or sub we might be able to fold the negate away. We currently fail to do this because we explicitly don't add anything to the DAG combine worklist when we expand multiplies. This is primarily to keep the multipy from being reformed, but we should consider adding the users to worklist. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@337843 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/test/CodeGen/X86/mul-constant-i32.ll b/test/CodeGen/X86/mul-constant-i32.ll index 7dd7a7a7ad6..d80e69dd61b 100644 --- a/test/CodeGen/X86/mul-constant-i32.ll +++ b/test/CodeGen/X86/mul-constant-i32.ll @@ -1748,3 +1748,67 @@ define i32 @test_mul_spec(i32 %x) nounwind { %mul3 = mul nsw i32 %add, %add2 ret i32 %mul3 } + +; This makes sure we are able to fold the negate generated by the mul expansion +; into the next instruction. +; FIXME: We make this work. +define i32 @mul_neg_fold(i32 %a, i32 %b) { +; X86-LABEL: mul_neg_fold: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: leal (%eax,%eax,8), %eax +; X86-NEXT: negl %eax +; X86-NEXT: addl {{[0-9]+}}(%esp), %eax +; X86-NEXT: retl +; +; X64-HSW-LABEL: mul_neg_fold: +; X64-HSW: # %bb.0: +; X64-HSW-NEXT: # kill: def $edi killed $edi def $rdi +; X64-HSW-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:0.50] +; X64-HSW-NEXT: negl %eax # sched: [1:0.25] +; X64-HSW-NEXT: addl %esi, %eax # sched: [1:0.25] +; X64-HSW-NEXT: retq # sched: [7:1.00] +; +; X64-JAG-LABEL: mul_neg_fold: +; X64-JAG: # %bb.0: +; X64-JAG-NEXT: # kill: def $edi killed $edi def $rdi +; X64-JAG-NEXT: leal (%rdi,%rdi,8), %eax # sched: [2:1.00] +; X64-JAG-NEXT: negl %eax # sched: [1:0.50] +; X64-JAG-NEXT: addl %esi, %eax # sched: [1:0.50] +; X64-JAG-NEXT: retq # sched: [4:1.00] +; +; X86-NOOPT-LABEL: mul_neg_fold: +; X86-NOOPT: # %bb.0: +; X86-NOOPT-NEXT: imull $-9, {{[0-9]+}}(%esp), %eax +; X86-NOOPT-NEXT: addl {{[0-9]+}}(%esp), %eax +; X86-NOOPT-NEXT: retl +; +; HSW-NOOPT-LABEL: mul_neg_fold: +; HSW-NOOPT: # %bb.0: +; HSW-NOOPT-NEXT: imull $-9, %edi, %eax # sched: [3:1.00] +; HSW-NOOPT-NEXT: addl %esi, %eax # sched: [1:0.25] +; HSW-NOOPT-NEXT: retq # sched: [7:1.00] +; +; JAG-NOOPT-LABEL: mul_neg_fold: +; JAG-NOOPT: # %bb.0: +; JAG-NOOPT-NEXT: imull $-9, %edi, %eax # sched: [3:1.00] +; JAG-NOOPT-NEXT: addl %esi, %eax # sched: [1:0.50] +; JAG-NOOPT-NEXT: retq # sched: [4:1.00] +; +; X64-SLM-LABEL: mul_neg_fold: +; X64-SLM: # %bb.0: +; X64-SLM-NEXT: # kill: def $edi killed $edi def $rdi +; X64-SLM-NEXT: leal (%rdi,%rdi,8), %eax # sched: [1:1.00] +; X64-SLM-NEXT: negl %eax # sched: [1:0.50] +; X64-SLM-NEXT: addl %esi, %eax # sched: [1:0.50] +; X64-SLM-NEXT: retq # sched: [4:1.00] +; +; SLM-NOOPT-LABEL: mul_neg_fold: +; SLM-NOOPT: # %bb.0: +; SLM-NOOPT-NEXT: imull $-9, %edi, %eax # sched: [3:1.00] +; SLM-NOOPT-NEXT: addl %esi, %eax # sched: [1:0.50] +; SLM-NOOPT-NEXT: retq # sched: [4:1.00] + %c = mul i32 %a, -9 + %d = add i32 %b, %c + ret i32 %d +}