From: Craig Topper Date: Thu, 4 Apr 2019 05:00:18 +0000 (+0000) Subject: [X86] Use INSERT_SUBREG rather than SUBREG_TO_REG when creating LEA64_32 during isel. X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=c0ee5fdcb714da8352236d89f6251f2636bf9c58;p=llvm [X86] Use INSERT_SUBREG rather than SUBREG_TO_REG when creating LEA64_32 during isel. SUBREG_TO_REG is supposed to be used to assert that we know the upper bits are zero. But that isn't the case here. We've done no analysis of the inputs. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@357673 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Target/X86/X86ISelDAGToDAG.cpp b/lib/Target/X86/X86ISelDAGToDAG.cpp index 272be22996f..77c3aa73b7e 100644 --- a/lib/Target/X86/X86ISelDAGToDAG.cpp +++ b/lib/Target/X86/X86ISelDAGToDAG.cpp @@ -2131,12 +2131,10 @@ bool X86DAGToDAGISel::selectLEA64_32Addr(SDValue N, SDValue &Base, Base = CurDAG->getRegister(0, MVT::i64); else if (Base.getValueType() == MVT::i32 && !dyn_cast(Base)) { // Base could already be %rip, particularly in the x32 ABI. - Base = SDValue(CurDAG->getMachineNode( - TargetOpcode::SUBREG_TO_REG, DL, MVT::i64, - CurDAG->getTargetConstant(0, DL, MVT::i64), - Base, - CurDAG->getTargetConstant(X86::sub_32bit, DL, MVT::i32)), - 0); + SDValue ImplDef = SDValue(CurDAG->getMachineNode(X86::IMPLICIT_DEF, DL, + MVT::i64), 0); + Base = CurDAG->getTargetInsertSubreg(X86::sub_32bit, DL, MVT::i64, ImplDef, + Base); } RN = dyn_cast(Index); @@ -2145,13 +2143,10 @@ bool X86DAGToDAGISel::selectLEA64_32Addr(SDValue N, SDValue &Base, else { assert(Index.getValueType() == MVT::i32 && "Expect to be extending 32-bit registers for use in LEA"); - Index = SDValue(CurDAG->getMachineNode( - TargetOpcode::SUBREG_TO_REG, DL, MVT::i64, - CurDAG->getTargetConstant(0, DL, MVT::i64), - Index, - CurDAG->getTargetConstant(X86::sub_32bit, DL, - MVT::i32)), - 0); + SDValue ImplDef = SDValue(CurDAG->getMachineNode(X86::IMPLICIT_DEF, DL, + MVT::i64), 0); + Index = CurDAG->getTargetInsertSubreg(X86::sub_32bit, DL, MVT::i64, ImplDef, + Index); } return true; diff --git a/test/CodeGen/X86/avg.ll b/test/CodeGen/X86/avg.ll index 0494b0aeda9..011640c0d3f 100644 --- a/test/CodeGen/X86/avg.ll +++ b/test/CodeGen/X86/avg.ll @@ -2048,106 +2048,108 @@ define void @not_avg_v16i8_wide_constants(<16 x i8>* %a, <16 x i8>* %b) nounwind ; AVX1-NEXT: pushq %r12 ; AVX1-NEXT: pushq %rbx ; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero -; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero -; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero ; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero -; AVX1-NEXT: vpxor %xmm5, %xmm5, %xmm5 -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm7 = xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7] -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm1[4],xmm5[4],xmm1[5],xmm5[5],xmm1[6],xmm5[6],xmm1[7],xmm5[7] -; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm6 = xmm2[2],xmm5[2],xmm2[3],xmm5[3] -; AVX1-NEXT: vpextrq $1, %xmm6, %r15 -; AVX1-NEXT: vmovq %xmm6, %r12 -; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero -; AVX1-NEXT: vpextrq $1, %xmm2, %r11 -; AVX1-NEXT: vmovq %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill -; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero -; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm2 = xmm2[2],xmm5[2],xmm2[3],xmm5[3] -; AVX1-NEXT: vpextrq $1, %xmm2, %r13 -; AVX1-NEXT: vmovq %xmm2, %r14 -; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero -; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero +; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero +; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm5 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero +; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm6 = xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7] +; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] +; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm7 = xmm4[2],xmm2[2],xmm4[3],xmm2[3] +; AVX1-NEXT: vpextrq $1, %xmm7, %r15 +; AVX1-NEXT: vmovq %xmm7, %r14 +; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero +; AVX1-NEXT: vpextrq $1, %xmm4, %r11 +; AVX1-NEXT: vmovq %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm4 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero +; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm4 = xmm4[2],xmm2[2],xmm4[3],xmm2[3] +; AVX1-NEXT: vpextrq $1, %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill +; AVX1-NEXT: vmovq %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero +; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm7 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero ; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm8 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7] -; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm6 = xmm1[2],xmm5[2],xmm1[3],xmm5[3] -; AVX1-NEXT: vpextrq $1, %xmm6, %rbx -; AVX1-NEXT: vmovq %xmm6, %rdx -; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm6 = xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7] -; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero -; AVX1-NEXT: vpextrq $1, %xmm1, %r9 -; AVX1-NEXT: vmovq %xmm1, %r10 -; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero -; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm5 = xmm1[2],xmm5[2],xmm1[3],xmm5[3] -; AVX1-NEXT: vmovd %xmm7, %esi -; AVX1-NEXT: vpextrd $1, %xmm7, %edi -; AVX1-NEXT: vpextrd $2, %xmm7, %ecx -; AVX1-NEXT: vpextrd $3, %xmm7, %ebp -; AVX1-NEXT: vpextrd $3, %xmm6, %eax -; AVX1-NEXT: leal -1(%rbp,%rax), %eax -; AVX1-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill -; AVX1-NEXT: vpextrd $2, %xmm6, %eax -; AVX1-NEXT: leal -1(%rcx,%rax), %eax -; AVX1-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill -; AVX1-NEXT: vpextrd $1, %xmm6, %eax -; AVX1-NEXT: leal -1(%rdi,%rax), %eax +; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm5[4],xmm2[4],xmm5[5],xmm2[5],xmm5[6],xmm2[6],xmm5[7],xmm2[7] +; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] +; AVX1-NEXT: vmovd %xmm6, %ecx +; AVX1-NEXT: vpextrd $1, %xmm6, %edx +; AVX1-NEXT: vpextrd $2, %xmm6, %r13d +; AVX1-NEXT: vpextrd $3, %xmm6, %r12d +; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm6 = xmm3[2],xmm2[2],xmm3[3],xmm2[3] +; AVX1-NEXT: vmovd %xmm1, %ebx +; AVX1-NEXT: vpextrd $1, %xmm1, %ebp +; AVX1-NEXT: vpextrd $2, %xmm1, %esi +; AVX1-NEXT: vpextrd $3, %xmm1, %edi +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm5[0],zero,xmm5[1],zero,xmm5[2],zero,xmm5[3],zero +; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm5 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero +; AVX1-NEXT: vmovd %xmm7, %r8d +; AVX1-NEXT: leal -1(%r12,%rdi), %eax ; AVX1-NEXT: movl %eax, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill -; AVX1-NEXT: vmovd %xmm6, %eax -; AVX1-NEXT: leal -1(%rsi,%rax), %r8d -; AVX1-NEXT: vpextrq $1, %xmm5, %rax -; AVX1-NEXT: leal -1(%r15,%rbx), %r15d -; AVX1-NEXT: vmovq %xmm5, %rsi -; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero -; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero -; AVX1-NEXT: leal -1(%r12,%rdx), %edx -; AVX1-NEXT: vmovd %xmm4, %r12d -; AVX1-NEXT: leal -1(%r11,%r9), %r11d +; AVX1-NEXT: vpextrd $2, %xmm7, %eax +; AVX1-NEXT: leal -1(%r13,%rsi), %esi +; AVX1-NEXT: movl %esi, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill ; AVX1-NEXT: vpextrd $2, %xmm4, %edi -; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload -; AVX1-NEXT: leal -1(%rcx,%r10), %r10d -; AVX1-NEXT: vpextrd $2, %xmm2, %ebx -; AVX1-NEXT: leal -1(%r13,%rax), %r9d -; AVX1-NEXT: vpextrd $3, %xmm2, %eax -; AVX1-NEXT: leal -1(%r14,%rsi), %esi +; AVX1-NEXT: leal -1(%rdx,%rbp), %edx +; AVX1-NEXT: movl %edx, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill +; AVX1-NEXT: vpextrd $3, %xmm4, %edx +; AVX1-NEXT: leal -1(%rcx,%rbx), %r10d ; AVX1-NEXT: vpextrd $3, %xmm1, %ecx -; AVX1-NEXT: leal -1(%rax,%rcx), %eax +; AVX1-NEXT: leal -1(%rdx,%rcx), %r9d ; AVX1-NEXT: vpextrd $2, %xmm1, %ecx -; AVX1-NEXT: leal -1(%rbx,%rcx), %ebx -; AVX1-NEXT: vpextrd $2, %xmm3, %ecx -; AVX1-NEXT: leal -1(%rdi,%rcx), %ecx -; AVX1-NEXT: vmovd %xmm3, %edi -; AVX1-NEXT: leal -1(%r12,%rdi), %edi -; AVX1-NEXT: vpextrq $1, %xmm8, %r12 +; AVX1-NEXT: leal -1(%rdi,%rcx), %edi +; AVX1-NEXT: vpextrd $2, %xmm5, %ecx +; AVX1-NEXT: leal -1(%rax,%rcx), %eax +; AVX1-NEXT: vmovd %xmm5, %ecx +; AVX1-NEXT: leal -1(%r8,%rcx), %r8d +; AVX1-NEXT: vpextrq $1, %xmm6, %rdx +; AVX1-NEXT: leal -1(%r15,%rdx), %r15d +; AVX1-NEXT: vmovq %xmm6, %rdx +; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm3[0],zero,xmm3[1],zero +; AVX1-NEXT: leal -1(%r14,%rdx), %r14d +; AVX1-NEXT: vpextrq $1, %xmm1, %rdx +; AVX1-NEXT: leal -1(%r11,%rdx), %edx +; AVX1-NEXT: vmovq %xmm1, %rcx +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero +; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3] +; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload +; AVX1-NEXT: leal -1(%rsi,%rcx), %ecx +; AVX1-NEXT: vpextrq $1, %xmm1, %rsi +; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload +; AVX1-NEXT: leal -1(%rbp,%rsi), %esi +; AVX1-NEXT: vmovq %xmm1, %rbx +; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload +; AVX1-NEXT: leal -1(%rbp,%rbx), %ebx +; AVX1-NEXT: vpextrq $1, %xmm8, %r11 ; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero -; AVX1-NEXT: vpextrq $1, %xmm0, %r13 -; AVX1-NEXT: leal -1(%r12,%r13), %r12d -; AVX1-NEXT: vmovq %xmm8, %r13 -; AVX1-NEXT: vmovq %xmm0, %r14 -; AVX1-NEXT: leal -1(%r13,%r14), %ebp +; AVX1-NEXT: vpextrq $1, %xmm0, %r12 +; AVX1-NEXT: leal -1(%r11,%r12), %r11d +; AVX1-NEXT: vmovq %xmm8, %r12 +; AVX1-NEXT: vmovq %xmm0, %r13 +; AVX1-NEXT: leal -1(%r12,%r13), %ebp ; AVX1-NEXT: shrl %ebp ; AVX1-NEXT: vmovd %ebp, %xmm0 -; AVX1-NEXT: shrl %r12d -; AVX1-NEXT: vpinsrb $1, %r12d, %xmm0, %xmm0 -; AVX1-NEXT: shrl %esi -; AVX1-NEXT: vpinsrb $2, %esi, %xmm0, %xmm0 -; AVX1-NEXT: shrl %r9d -; AVX1-NEXT: vpinsrb $3, %r9d, %xmm0, %xmm0 -; AVX1-NEXT: shrl %r10d -; AVX1-NEXT: vpinsrb $4, %r10d, %xmm0, %xmm0 ; AVX1-NEXT: shrl %r11d -; AVX1-NEXT: vpinsrb $5, %r11d, %xmm0, %xmm0 +; AVX1-NEXT: vpinsrb $1, %r11d, %xmm0, %xmm0 +; AVX1-NEXT: shrl %ebx +; AVX1-NEXT: vpinsrb $2, %ebx, %xmm0, %xmm0 +; AVX1-NEXT: shrl %esi +; AVX1-NEXT: vpinsrb $3, %esi, %xmm0, %xmm0 +; AVX1-NEXT: shrl %ecx +; AVX1-NEXT: vpinsrb $4, %ecx, %xmm0, %xmm0 ; AVX1-NEXT: shrl %edx -; AVX1-NEXT: vpinsrb $6, %edx, %xmm0, %xmm0 +; AVX1-NEXT: vpinsrb $5, %edx, %xmm0, %xmm0 +; AVX1-NEXT: shrl %r14d +; AVX1-NEXT: vpinsrb $6, %r14d, %xmm0, %xmm0 ; AVX1-NEXT: shrl %r15d ; AVX1-NEXT: vpinsrb $7, %r15d, %xmm0, %xmm0 -; AVX1-NEXT: shrl %edi -; AVX1-NEXT: vpinsrb $8, %edi, %xmm0, %xmm0 -; AVX1-NEXT: shrl %ecx -; AVX1-NEXT: vpinsrb $9, %ecx, %xmm0, %xmm0 -; AVX1-NEXT: shrl %ebx -; AVX1-NEXT: vpinsrb $10, %ebx, %xmm0, %xmm0 -; AVX1-NEXT: shrl %eax -; AVX1-NEXT: vpinsrb $11, %eax, %xmm0, %xmm0 ; AVX1-NEXT: shrl %r8d -; AVX1-NEXT: vpinsrb $12, %r8d, %xmm0, %xmm0 +; AVX1-NEXT: vpinsrb $8, %r8d, %xmm0, %xmm0 +; AVX1-NEXT: shrl %eax +; AVX1-NEXT: vpinsrb $9, %eax, %xmm0, %xmm0 +; AVX1-NEXT: shrl %edi +; AVX1-NEXT: vpinsrb $10, %edi, %xmm0, %xmm0 +; AVX1-NEXT: shrl %r9d +; AVX1-NEXT: vpinsrb $11, %r9d, %xmm0, %xmm0 +; AVX1-NEXT: shrl %r10d +; AVX1-NEXT: vpinsrb $12, %r10d, %xmm0, %xmm0 ; AVX1-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 4-byte Reload ; AVX1-NEXT: shrl %eax ; AVX1-NEXT: vpinsrb $13, %eax, %xmm0, %xmm0 diff --git a/test/CodeGen/X86/x86-64-baseptr.ll b/test/CodeGen/X86/x86-64-baseptr.ll index 30855e60e1d..84bb03c227f 100644 --- a/test/CodeGen/X86/x86-64-baseptr.ll +++ b/test/CodeGen/X86/x86-64-baseptr.ll @@ -39,19 +39,19 @@ define void @base() #0 { ; X32ABI: # %bb.0: # %entry ; X32ABI-NEXT: pushq %rbp ; X32ABI-NEXT: movl %esp, %ebp -; X32ABI-NEXT: pushq %rbx +; X32ABI-NEXT: pushq %rbx ; X32ABI-NEXT: andl $-32, %esp ; X32ABI-NEXT: subl $32, %esp ; X32ABI-NEXT: movl %esp, %ebx ; X32ABI-NEXT: callq helper ; X32ABI-NEXT: # kill: def $eax killed $eax def $rax -; X32ABI-NEXT: movl %esp, %ecx ; X32ABI-NEXT: leal 31(,%rax,4), %eax ; X32ABI-NEXT: andl $-32, %eax +; X32ABI-NEXT: movl %esp, %ecx ; X32ABI-NEXT: movl %ecx, %edx ; X32ABI-NEXT: subl %eax, %edx -; X32ABI-NEXT: movl %edx, %esp ; X32ABI-NEXT: negl %eax +; X32ABI-NEXT: movl %edx, %esp ; X32ABI-NEXT: movl $0, (%ecx,%eax) ; X32ABI-NEXT: leal -8(%ebp), %esp ; X32ABI-NEXT: popq %rbx