From: Simon Pilgrim Date: Fri, 10 Feb 2017 14:37:25 +0000 (+0000) Subject: [DAGCombine] Allow vector constant folding of any value type before type legalization X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=ada0a4f5b0aa9e699598efcc405f599d7e6c5df3;p=llvm [DAGCombine] Allow vector constant folding of any value type before type legalization The patch comes in 2 parts: 1 - it makes use of the SelectionDAG::NewNodesMustHaveLegalTypes flag to tell when it can safely constant fold illegal types. 2 - it correctly resets SelectionDAG::NewNodesMustHaveLegalTypes at the start of each call to SelectionDAGISel::CodeGenAndEmitDAG so all the pre-legalization stages can make use of it - not just the first basic block that gets handled. Fix for PR30760 Differential Revision: https://reviews.llvm.org/D29568 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@294749 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 9e802b99ed4..16425a08200 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -3762,7 +3762,7 @@ SDValue SelectionDAG::FoldConstantVectorArithmetic(unsigned Opcode, // Find legal integer scalar type for constant promotion and // ensure that its scalar size is at least as large as source. EVT LegalSVT = VT.getScalarType(); - if (LegalSVT.isInteger()) { + if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) { LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT); if (LegalSVT.bitsLT(VT.getScalarType())) return SDValue(); diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp index 25646358d96..989c33e0fcb 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp @@ -763,6 +763,10 @@ void SelectionDAGISel::CodeGenAndEmitDAG() { int BlockNumber = -1; (void)BlockNumber; bool MatchFilterBB = false; (void)MatchFilterBB; + + // Pre-type legalization allow creation of any node types. + CurDAG->NewNodesMustHaveLegalTypes = false; + #ifndef NDEBUG MatchFilterBB = (FilterDAGBasicBlockName.empty() || FilterDAGBasicBlockName == @@ -809,6 +813,7 @@ void SelectionDAGISel::CodeGenAndEmitDAG() { DEBUG(dbgs() << "Type-legalized selection DAG: BB#" << BlockNumber << " '" << BlockName << "'\n"; CurDAG->dump()); + // Only allow creation of legal node types. CurDAG->NewNodesMustHaveLegalTypes = true; if (Changed) { diff --git a/test/CodeGen/X86/vector-lzcnt-128.ll b/test/CodeGen/X86/vector-lzcnt-128.ll index 6445a363787..9e11edcc29d 100644 --- a/test/CodeGen/X86/vector-lzcnt-128.ll +++ b/test/CodeGen/X86/vector-lzcnt-128.ll @@ -1596,35 +1596,8 @@ define <2 x i64> @foldv2i64() nounwind { ; ; X32-SSE-LABEL: foldv2i64: ; X32-SSE: # BB#0: -; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; X32-SSE-NEXT: movdqa {{.*#+}} xmm1 = [256,0,4294967295,4294967295] -; X32-SSE-NEXT: movdqa %xmm1, %xmm0 -; X32-SSE-NEXT: pand %xmm2, %xmm0 -; X32-SSE-NEXT: movdqa {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0] -; X32-SSE-NEXT: movdqa %xmm3, %xmm4 -; X32-SSE-NEXT: pshufb %xmm0, %xmm4 -; X32-SSE-NEXT: movdqa %xmm1, %xmm0 -; X32-SSE-NEXT: psrlw $4, %xmm0 -; X32-SSE-NEXT: pand %xmm2, %xmm0 -; X32-SSE-NEXT: pxor %xmm2, %xmm2 -; X32-SSE-NEXT: pshufb %xmm0, %xmm3 -; X32-SSE-NEXT: pcmpeqb %xmm2, %xmm0 -; X32-SSE-NEXT: pand %xmm4, %xmm0 -; X32-SSE-NEXT: paddb %xmm3, %xmm0 -; X32-SSE-NEXT: movdqa %xmm1, %xmm3 -; X32-SSE-NEXT: pcmpeqb %xmm2, %xmm3 -; X32-SSE-NEXT: psrlw $8, %xmm3 -; X32-SSE-NEXT: pand %xmm0, %xmm3 -; X32-SSE-NEXT: psrlw $8, %xmm0 -; X32-SSE-NEXT: paddw %xmm3, %xmm0 -; X32-SSE-NEXT: pcmpeqw %xmm2, %xmm1 -; X32-SSE-NEXT: psrld $16, %xmm1 -; X32-SSE-NEXT: pand %xmm0, %xmm1 -; X32-SSE-NEXT: psrld $16, %xmm0 -; X32-SSE-NEXT: paddd %xmm1, %xmm0 -; X32-SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1],xmm2[2,3,4,5,6,7] -; X32-SSE-NEXT: psrlq $32, %xmm0 -; X32-SSE-NEXT: paddq %xmm2, %xmm0 +; X32-SSE-NEXT: movl $55, %eax +; X32-SSE-NEXT: movd %eax, %xmm0 ; X32-SSE-NEXT: retl %out = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> , i1 0) ret <2 x i64> %out @@ -1651,35 +1624,8 @@ define <2 x i64> @foldv2i64u() nounwind { ; ; X32-SSE-LABEL: foldv2i64u: ; X32-SSE: # BB#0: -; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; X32-SSE-NEXT: movdqa {{.*#+}} xmm1 = [256,0,4294967295,4294967295] -; X32-SSE-NEXT: movdqa %xmm1, %xmm0 -; X32-SSE-NEXT: pand %xmm2, %xmm0 -; X32-SSE-NEXT: movdqa {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0] -; X32-SSE-NEXT: movdqa %xmm3, %xmm4 -; X32-SSE-NEXT: pshufb %xmm0, %xmm4 -; X32-SSE-NEXT: movdqa %xmm1, %xmm0 -; X32-SSE-NEXT: psrlw $4, %xmm0 -; X32-SSE-NEXT: pand %xmm2, %xmm0 -; X32-SSE-NEXT: pxor %xmm2, %xmm2 -; X32-SSE-NEXT: pshufb %xmm0, %xmm3 -; X32-SSE-NEXT: pcmpeqb %xmm2, %xmm0 -; X32-SSE-NEXT: pand %xmm4, %xmm0 -; X32-SSE-NEXT: paddb %xmm3, %xmm0 -; X32-SSE-NEXT: movdqa %xmm1, %xmm3 -; X32-SSE-NEXT: pcmpeqb %xmm2, %xmm3 -; X32-SSE-NEXT: psrlw $8, %xmm3 -; X32-SSE-NEXT: pand %xmm0, %xmm3 -; X32-SSE-NEXT: psrlw $8, %xmm0 -; X32-SSE-NEXT: paddw %xmm3, %xmm0 -; X32-SSE-NEXT: pcmpeqw %xmm2, %xmm1 -; X32-SSE-NEXT: psrld $16, %xmm1 -; X32-SSE-NEXT: pand %xmm0, %xmm1 -; X32-SSE-NEXT: psrld $16, %xmm0 -; X32-SSE-NEXT: paddd %xmm1, %xmm0 -; X32-SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1],xmm2[2,3,4,5,6,7] -; X32-SSE-NEXT: psrlq $32, %xmm0 -; X32-SSE-NEXT: paddq %xmm2, %xmm0 +; X32-SSE-NEXT: movl $55, %eax +; X32-SSE-NEXT: movd %eax, %xmm0 ; X32-SSE-NEXT: retl %out = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> , i1 -1) ret <2 x i64> %out diff --git a/test/CodeGen/X86/vector-lzcnt-256.ll b/test/CodeGen/X86/vector-lzcnt-256.ll index c6839549302..04f74c94759 100644 --- a/test/CodeGen/X86/vector-lzcnt-256.ll +++ b/test/CodeGen/X86/vector-lzcnt-256.ll @@ -830,31 +830,7 @@ define <4 x i64> @foldv4i64() nounwind { ; ; X32-AVX-LABEL: foldv4i64: ; X32-AVX: # BB#0: -; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm0 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [256,0,4294967295,4294967295,0,0,255,0] -; X32-AVX-NEXT: vpand %ymm0, %ymm1, %ymm2 -; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0] -; X32-AVX-NEXT: vpshufb %ymm2, %ymm3, %ymm2 -; X32-AVX-NEXT: vpsrlw $4, %ymm1, %ymm4 -; X32-AVX-NEXT: vpand %ymm0, %ymm4, %ymm0 -; X32-AVX-NEXT: vpxor %ymm4, %ymm4, %ymm4 -; X32-AVX-NEXT: vpcmpeqb %ymm4, %ymm0, %ymm5 -; X32-AVX-NEXT: vpand %ymm5, %ymm2, %ymm2 -; X32-AVX-NEXT: vpshufb %ymm0, %ymm3, %ymm0 -; X32-AVX-NEXT: vpaddb %ymm0, %ymm2, %ymm0 -; X32-AVX-NEXT: vpcmpeqb %ymm4, %ymm1, %ymm2 -; X32-AVX-NEXT: vpsrlw $8, %ymm2, %ymm2 -; X32-AVX-NEXT: vpand %ymm2, %ymm0, %ymm2 -; X32-AVX-NEXT: vpsrlw $8, %ymm0, %ymm0 -; X32-AVX-NEXT: vpaddw %ymm2, %ymm0, %ymm0 -; X32-AVX-NEXT: vpcmpeqw %ymm4, %ymm1, %ymm1 -; X32-AVX-NEXT: vpsrld $16, %ymm1, %ymm1 -; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm1 -; X32-AVX-NEXT: vpsrld $16, %ymm0, %ymm0 -; X32-AVX-NEXT: vpaddd %ymm1, %ymm0, %ymm0 -; X32-AVX-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0],ymm4[1,2,3],ymm0[4],ymm4[5],ymm0[6],ymm4[7] -; X32-AVX-NEXT: vpsrlq $32, %ymm0, %ymm0 -; X32-AVX-NEXT: vpaddq %ymm1, %ymm0, %ymm0 +; X32-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [55,0,0,0,64,0,56,0] ; X32-AVX-NEXT: retl %out = call <4 x i64> @llvm.ctlz.v4i64(<4 x i64> , i1 0) ret <4 x i64> %out @@ -873,31 +849,7 @@ define <4 x i64> @foldv4i64u() nounwind { ; ; X32-AVX-LABEL: foldv4i64u: ; X32-AVX: # BB#0: -; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm0 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [256,0,4294967295,4294967295,0,0,255,0] -; X32-AVX-NEXT: vpand %ymm0, %ymm1, %ymm2 -; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0] -; X32-AVX-NEXT: vpshufb %ymm2, %ymm3, %ymm2 -; X32-AVX-NEXT: vpsrlw $4, %ymm1, %ymm4 -; X32-AVX-NEXT: vpand %ymm0, %ymm4, %ymm0 -; X32-AVX-NEXT: vpxor %ymm4, %ymm4, %ymm4 -; X32-AVX-NEXT: vpcmpeqb %ymm4, %ymm0, %ymm5 -; X32-AVX-NEXT: vpand %ymm5, %ymm2, %ymm2 -; X32-AVX-NEXT: vpshufb %ymm0, %ymm3, %ymm0 -; X32-AVX-NEXT: vpaddb %ymm0, %ymm2, %ymm0 -; X32-AVX-NEXT: vpcmpeqb %ymm4, %ymm1, %ymm2 -; X32-AVX-NEXT: vpsrlw $8, %ymm2, %ymm2 -; X32-AVX-NEXT: vpand %ymm2, %ymm0, %ymm2 -; X32-AVX-NEXT: vpsrlw $8, %ymm0, %ymm0 -; X32-AVX-NEXT: vpaddw %ymm2, %ymm0, %ymm0 -; X32-AVX-NEXT: vpcmpeqw %ymm4, %ymm1, %ymm1 -; X32-AVX-NEXT: vpsrld $16, %ymm1, %ymm1 -; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm1 -; X32-AVX-NEXT: vpsrld $16, %ymm0, %ymm0 -; X32-AVX-NEXT: vpaddd %ymm1, %ymm0, %ymm0 -; X32-AVX-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0],ymm4[1,2,3],ymm0[4],ymm4[5],ymm0[6],ymm4[7] -; X32-AVX-NEXT: vpsrlq $32, %ymm0, %ymm0 -; X32-AVX-NEXT: vpaddq %ymm1, %ymm0, %ymm0 +; X32-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [55,0,0,0,64,0,56,0] ; X32-AVX-NEXT: retl %out = call <4 x i64> @llvm.ctlz.v4i64(<4 x i64> , i1 -1) ret <4 x i64> %out diff --git a/test/CodeGen/X86/vector-tzcnt-128.ll b/test/CodeGen/X86/vector-tzcnt-128.ll index bf32e672138..3967b1d1c1b 100644 --- a/test/CodeGen/X86/vector-tzcnt-128.ll +++ b/test/CodeGen/X86/vector-tzcnt-128.ll @@ -1258,23 +1258,8 @@ define <2 x i64> @foldv2i64() nounwind { ; ; X32-SSE-LABEL: foldv2i64: ; X32-SSE: # BB#0: -; X32-SSE-NEXT: movdqa {{.*#+}} xmm0 = [256,0,4294967295,4294967295] -; X32-SSE-NEXT: pxor %xmm1, %xmm1 -; X32-SSE-NEXT: pxor %xmm2, %xmm2 -; X32-SSE-NEXT: psubq %xmm0, %xmm2 -; X32-SSE-NEXT: pand %xmm0, %xmm2 -; X32-SSE-NEXT: psubq {{\.LCPI.*}}, %xmm2 -; X32-SSE-NEXT: movdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; X32-SSE-NEXT: movdqa %xmm2, %xmm4 -; X32-SSE-NEXT: pand %xmm3, %xmm4 -; X32-SSE-NEXT: movdqa {{.*#+}} xmm0 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] -; X32-SSE-NEXT: movdqa %xmm0, %xmm5 -; X32-SSE-NEXT: pshufb %xmm4, %xmm5 -; X32-SSE-NEXT: psrlw $4, %xmm2 -; X32-SSE-NEXT: pand %xmm3, %xmm2 -; X32-SSE-NEXT: pshufb %xmm2, %xmm0 -; X32-SSE-NEXT: paddb %xmm5, %xmm0 -; X32-SSE-NEXT: psadbw %xmm1, %xmm0 +; X32-SSE-NEXT: movl $8, %eax +; X32-SSE-NEXT: movd %eax, %xmm0 ; X32-SSE-NEXT: retl %out = call <2 x i64> @llvm.cttz.v2i64(<2 x i64> , i1 0) ret <2 x i64> %out @@ -1295,23 +1280,8 @@ define <2 x i64> @foldv2i64u() nounwind { ; ; X32-SSE-LABEL: foldv2i64u: ; X32-SSE: # BB#0: -; X32-SSE-NEXT: movdqa {{.*#+}} xmm0 = [256,0,4294967295,4294967295] -; X32-SSE-NEXT: pxor %xmm1, %xmm1 -; X32-SSE-NEXT: pxor %xmm2, %xmm2 -; X32-SSE-NEXT: psubq %xmm0, %xmm2 -; X32-SSE-NEXT: pand %xmm0, %xmm2 -; X32-SSE-NEXT: psubq {{\.LCPI.*}}, %xmm2 -; X32-SSE-NEXT: movdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; X32-SSE-NEXT: movdqa %xmm2, %xmm4 -; X32-SSE-NEXT: pand %xmm3, %xmm4 -; X32-SSE-NEXT: movdqa {{.*#+}} xmm0 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] -; X32-SSE-NEXT: movdqa %xmm0, %xmm5 -; X32-SSE-NEXT: pshufb %xmm4, %xmm5 -; X32-SSE-NEXT: psrlw $4, %xmm2 -; X32-SSE-NEXT: pand %xmm3, %xmm2 -; X32-SSE-NEXT: pshufb %xmm2, %xmm0 -; X32-SSE-NEXT: paddb %xmm5, %xmm0 -; X32-SSE-NEXT: psadbw %xmm1, %xmm0 +; X32-SSE-NEXT: movl $8, %eax +; X32-SSE-NEXT: movd %eax, %xmm0 ; X32-SSE-NEXT: retl %out = call <2 x i64> @llvm.cttz.v2i64(<2 x i64> , i1 -1) ret <2 x i64> %out diff --git a/test/CodeGen/X86/vector-tzcnt-256.ll b/test/CodeGen/X86/vector-tzcnt-256.ll index 0ced0c5b263..a0b277ddd73 100644 --- a/test/CodeGen/X86/vector-tzcnt-256.ll +++ b/test/CodeGen/X86/vector-tzcnt-256.ll @@ -871,20 +871,7 @@ define <4 x i64> @foldv4i64() nounwind { ; ; X32-AVX-LABEL: foldv4i64: ; X32-AVX: # BB#0: -; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm0 = [256,0,4294967295,4294967295,0,0,255,0] -; X32-AVX-NEXT: vpxor %ymm1, %ymm1, %ymm1 -; X32-AVX-NEXT: vpsubq %ymm0, %ymm1, %ymm2 -; X32-AVX-NEXT: vpand %ymm2, %ymm0, %ymm0 -; X32-AVX-NEXT: vpsubq {{\.LCPI.*}}, %ymm0, %ymm0 -; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; X32-AVX-NEXT: vpand %ymm2, %ymm0, %ymm3 -; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] -; X32-AVX-NEXT: vpshufb %ymm3, %ymm4, %ymm3 -; X32-AVX-NEXT: vpsrlw $4, %ymm0, %ymm0 -; X32-AVX-NEXT: vpand %ymm2, %ymm0, %ymm0 -; X32-AVX-NEXT: vpshufb %ymm0, %ymm4, %ymm0 -; X32-AVX-NEXT: vpaddb %ymm3, %ymm0, %ymm0 -; X32-AVX-NEXT: vpsadbw %ymm1, %ymm0, %ymm0 +; X32-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,0,0,64,0,0,0] ; X32-AVX-NEXT: retl %out = call <4 x i64> @llvm.cttz.v4i64(<4 x i64> , i1 0) ret <4 x i64> %out @@ -898,20 +885,7 @@ define <4 x i64> @foldv4i64u() nounwind { ; ; X32-AVX-LABEL: foldv4i64u: ; X32-AVX: # BB#0: -; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm0 = [256,0,4294967295,4294967295,0,0,255,0] -; X32-AVX-NEXT: vpxor %ymm1, %ymm1, %ymm1 -; X32-AVX-NEXT: vpsubq %ymm0, %ymm1, %ymm2 -; X32-AVX-NEXT: vpand %ymm2, %ymm0, %ymm0 -; X32-AVX-NEXT: vpsubq {{\.LCPI.*}}, %ymm0, %ymm0 -; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15] -; X32-AVX-NEXT: vpand %ymm2, %ymm0, %ymm3 -; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4] -; X32-AVX-NEXT: vpshufb %ymm3, %ymm4, %ymm3 -; X32-AVX-NEXT: vpsrlw $4, %ymm0, %ymm0 -; X32-AVX-NEXT: vpand %ymm2, %ymm0, %ymm0 -; X32-AVX-NEXT: vpshufb %ymm0, %ymm4, %ymm0 -; X32-AVX-NEXT: vpaddb %ymm3, %ymm0, %ymm0 -; X32-AVX-NEXT: vpsadbw %ymm1, %ymm0, %ymm0 +; X32-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,0,0,64,0,0,0] ; X32-AVX-NEXT: retl %out = call <4 x i64> @llvm.cttz.v4i64(<4 x i64> , i1 -1) ret <4 x i64> %out