// Find legal integer scalar type for constant promotion and
// ensure that its scalar size is at least as large as source.
EVT LegalSVT = VT.getScalarType();
- if (LegalSVT.isInteger()) {
+ if (NewNodesMustHaveLegalTypes && LegalSVT.isInteger()) {
LegalSVT = TLI->getTypeToTransformTo(*getContext(), LegalSVT);
if (LegalSVT.bitsLT(VT.getScalarType()))
return SDValue();
int BlockNumber = -1;
(void)BlockNumber;
bool MatchFilterBB = false; (void)MatchFilterBB;
+
+ // Pre-type legalization allow creation of any node types.
+ CurDAG->NewNodesMustHaveLegalTypes = false;
+
#ifndef NDEBUG
MatchFilterBB = (FilterDAGBasicBlockName.empty() ||
FilterDAGBasicBlockName ==
DEBUG(dbgs() << "Type-legalized selection DAG: BB#" << BlockNumber
<< " '" << BlockName << "'\n"; CurDAG->dump());
+ // Only allow creation of legal node types.
CurDAG->NewNodesMustHaveLegalTypes = true;
if (Changed) {
;
; X32-SSE-LABEL: foldv2i64:
; X32-SSE: # BB#0:
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm1 = [256,0,4294967295,4294967295]
-; X32-SSE-NEXT: movdqa %xmm1, %xmm0
-; X32-SSE-NEXT: pand %xmm2, %xmm0
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; X32-SSE-NEXT: movdqa %xmm3, %xmm4
-; X32-SSE-NEXT: pshufb %xmm0, %xmm4
-; X32-SSE-NEXT: movdqa %xmm1, %xmm0
-; X32-SSE-NEXT: psrlw $4, %xmm0
-; X32-SSE-NEXT: pand %xmm2, %xmm0
-; X32-SSE-NEXT: pxor %xmm2, %xmm2
-; X32-SSE-NEXT: pshufb %xmm0, %xmm3
-; X32-SSE-NEXT: pcmpeqb %xmm2, %xmm0
-; X32-SSE-NEXT: pand %xmm4, %xmm0
-; X32-SSE-NEXT: paddb %xmm3, %xmm0
-; X32-SSE-NEXT: movdqa %xmm1, %xmm3
-; X32-SSE-NEXT: pcmpeqb %xmm2, %xmm3
-; X32-SSE-NEXT: psrlw $8, %xmm3
-; X32-SSE-NEXT: pand %xmm0, %xmm3
-; X32-SSE-NEXT: psrlw $8, %xmm0
-; X32-SSE-NEXT: paddw %xmm3, %xmm0
-; X32-SSE-NEXT: pcmpeqw %xmm2, %xmm1
-; X32-SSE-NEXT: psrld $16, %xmm1
-; X32-SSE-NEXT: pand %xmm0, %xmm1
-; X32-SSE-NEXT: psrld $16, %xmm0
-; X32-SSE-NEXT: paddd %xmm1, %xmm0
-; X32-SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1],xmm2[2,3,4,5,6,7]
-; X32-SSE-NEXT: psrlq $32, %xmm0
-; X32-SSE-NEXT: paddq %xmm2, %xmm0
+; X32-SSE-NEXT: movl $55, %eax
+; X32-SSE-NEXT: movd %eax, %xmm0
; X32-SSE-NEXT: retl
%out = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> <i64 256, i64 -1>, i1 0)
ret <2 x i64> %out
;
; X32-SSE-LABEL: foldv2i64u:
; X32-SSE: # BB#0:
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm1 = [256,0,4294967295,4294967295]
-; X32-SSE-NEXT: movdqa %xmm1, %xmm0
-; X32-SSE-NEXT: pand %xmm2, %xmm0
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; X32-SSE-NEXT: movdqa %xmm3, %xmm4
-; X32-SSE-NEXT: pshufb %xmm0, %xmm4
-; X32-SSE-NEXT: movdqa %xmm1, %xmm0
-; X32-SSE-NEXT: psrlw $4, %xmm0
-; X32-SSE-NEXT: pand %xmm2, %xmm0
-; X32-SSE-NEXT: pxor %xmm2, %xmm2
-; X32-SSE-NEXT: pshufb %xmm0, %xmm3
-; X32-SSE-NEXT: pcmpeqb %xmm2, %xmm0
-; X32-SSE-NEXT: pand %xmm4, %xmm0
-; X32-SSE-NEXT: paddb %xmm3, %xmm0
-; X32-SSE-NEXT: movdqa %xmm1, %xmm3
-; X32-SSE-NEXT: pcmpeqb %xmm2, %xmm3
-; X32-SSE-NEXT: psrlw $8, %xmm3
-; X32-SSE-NEXT: pand %xmm0, %xmm3
-; X32-SSE-NEXT: psrlw $8, %xmm0
-; X32-SSE-NEXT: paddw %xmm3, %xmm0
-; X32-SSE-NEXT: pcmpeqw %xmm2, %xmm1
-; X32-SSE-NEXT: psrld $16, %xmm1
-; X32-SSE-NEXT: pand %xmm0, %xmm1
-; X32-SSE-NEXT: psrld $16, %xmm0
-; X32-SSE-NEXT: paddd %xmm1, %xmm0
-; X32-SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1],xmm2[2,3,4,5,6,7]
-; X32-SSE-NEXT: psrlq $32, %xmm0
-; X32-SSE-NEXT: paddq %xmm2, %xmm0
+; X32-SSE-NEXT: movl $55, %eax
+; X32-SSE-NEXT: movd %eax, %xmm0
; X32-SSE-NEXT: retl
%out = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> <i64 256, i64 -1>, i1 -1)
ret <2 x i64> %out
;
; X32-AVX-LABEL: foldv4i64:
; X32-AVX: # BB#0:
-; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm0 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [256,0,4294967295,4294967295,0,0,255,0]
-; X32-AVX-NEXT: vpand %ymm0, %ymm1, %ymm2
-; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; X32-AVX-NEXT: vpshufb %ymm2, %ymm3, %ymm2
-; X32-AVX-NEXT: vpsrlw $4, %ymm1, %ymm4
-; X32-AVX-NEXT: vpand %ymm0, %ymm4, %ymm0
-; X32-AVX-NEXT: vpxor %ymm4, %ymm4, %ymm4
-; X32-AVX-NEXT: vpcmpeqb %ymm4, %ymm0, %ymm5
-; X32-AVX-NEXT: vpand %ymm5, %ymm2, %ymm2
-; X32-AVX-NEXT: vpshufb %ymm0, %ymm3, %ymm0
-; X32-AVX-NEXT: vpaddb %ymm0, %ymm2, %ymm0
-; X32-AVX-NEXT: vpcmpeqb %ymm4, %ymm1, %ymm2
-; X32-AVX-NEXT: vpsrlw $8, %ymm2, %ymm2
-; X32-AVX-NEXT: vpand %ymm2, %ymm0, %ymm2
-; X32-AVX-NEXT: vpsrlw $8, %ymm0, %ymm0
-; X32-AVX-NEXT: vpaddw %ymm2, %ymm0, %ymm0
-; X32-AVX-NEXT: vpcmpeqw %ymm4, %ymm1, %ymm1
-; X32-AVX-NEXT: vpsrld $16, %ymm1, %ymm1
-; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm1
-; X32-AVX-NEXT: vpsrld $16, %ymm0, %ymm0
-; X32-AVX-NEXT: vpaddd %ymm1, %ymm0, %ymm0
-; X32-AVX-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0],ymm4[1,2,3],ymm0[4],ymm4[5],ymm0[6],ymm4[7]
-; X32-AVX-NEXT: vpsrlq $32, %ymm0, %ymm0
-; X32-AVX-NEXT: vpaddq %ymm1, %ymm0, %ymm0
+; X32-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [55,0,0,0,64,0,56,0]
; X32-AVX-NEXT: retl
%out = call <4 x i64> @llvm.ctlz.v4i64(<4 x i64> <i64 256, i64 -1, i64 0, i64 255>, i1 0)
ret <4 x i64> %out
;
; X32-AVX-LABEL: foldv4i64u:
; X32-AVX: # BB#0:
-; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm0 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [256,0,4294967295,4294967295,0,0,255,0]
-; X32-AVX-NEXT: vpand %ymm0, %ymm1, %ymm2
-; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0,4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
-; X32-AVX-NEXT: vpshufb %ymm2, %ymm3, %ymm2
-; X32-AVX-NEXT: vpsrlw $4, %ymm1, %ymm4
-; X32-AVX-NEXT: vpand %ymm0, %ymm4, %ymm0
-; X32-AVX-NEXT: vpxor %ymm4, %ymm4, %ymm4
-; X32-AVX-NEXT: vpcmpeqb %ymm4, %ymm0, %ymm5
-; X32-AVX-NEXT: vpand %ymm5, %ymm2, %ymm2
-; X32-AVX-NEXT: vpshufb %ymm0, %ymm3, %ymm0
-; X32-AVX-NEXT: vpaddb %ymm0, %ymm2, %ymm0
-; X32-AVX-NEXT: vpcmpeqb %ymm4, %ymm1, %ymm2
-; X32-AVX-NEXT: vpsrlw $8, %ymm2, %ymm2
-; X32-AVX-NEXT: vpand %ymm2, %ymm0, %ymm2
-; X32-AVX-NEXT: vpsrlw $8, %ymm0, %ymm0
-; X32-AVX-NEXT: vpaddw %ymm2, %ymm0, %ymm0
-; X32-AVX-NEXT: vpcmpeqw %ymm4, %ymm1, %ymm1
-; X32-AVX-NEXT: vpsrld $16, %ymm1, %ymm1
-; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm1
-; X32-AVX-NEXT: vpsrld $16, %ymm0, %ymm0
-; X32-AVX-NEXT: vpaddd %ymm1, %ymm0, %ymm0
-; X32-AVX-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0],ymm4[1,2,3],ymm0[4],ymm4[5],ymm0[6],ymm4[7]
-; X32-AVX-NEXT: vpsrlq $32, %ymm0, %ymm0
-; X32-AVX-NEXT: vpaddq %ymm1, %ymm0, %ymm0
+; X32-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [55,0,0,0,64,0,56,0]
; X32-AVX-NEXT: retl
%out = call <4 x i64> @llvm.ctlz.v4i64(<4 x i64> <i64 256, i64 -1, i64 0, i64 255>, i1 -1)
ret <4 x i64> %out
;
; X32-SSE-LABEL: foldv2i64:
; X32-SSE: # BB#0:
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm0 = [256,0,4294967295,4294967295]
-; X32-SSE-NEXT: pxor %xmm1, %xmm1
-; X32-SSE-NEXT: pxor %xmm2, %xmm2
-; X32-SSE-NEXT: psubq %xmm0, %xmm2
-; X32-SSE-NEXT: pand %xmm0, %xmm2
-; X32-SSE-NEXT: psubq {{\.LCPI.*}}, %xmm2
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; X32-SSE-NEXT: movdqa %xmm2, %xmm4
-; X32-SSE-NEXT: pand %xmm3, %xmm4
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm0 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; X32-SSE-NEXT: movdqa %xmm0, %xmm5
-; X32-SSE-NEXT: pshufb %xmm4, %xmm5
-; X32-SSE-NEXT: psrlw $4, %xmm2
-; X32-SSE-NEXT: pand %xmm3, %xmm2
-; X32-SSE-NEXT: pshufb %xmm2, %xmm0
-; X32-SSE-NEXT: paddb %xmm5, %xmm0
-; X32-SSE-NEXT: psadbw %xmm1, %xmm0
+; X32-SSE-NEXT: movl $8, %eax
+; X32-SSE-NEXT: movd %eax, %xmm0
; X32-SSE-NEXT: retl
%out = call <2 x i64> @llvm.cttz.v2i64(<2 x i64> <i64 256, i64 -1>, i1 0)
ret <2 x i64> %out
;
; X32-SSE-LABEL: foldv2i64u:
; X32-SSE: # BB#0:
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm0 = [256,0,4294967295,4294967295]
-; X32-SSE-NEXT: pxor %xmm1, %xmm1
-; X32-SSE-NEXT: pxor %xmm2, %xmm2
-; X32-SSE-NEXT: psubq %xmm0, %xmm2
-; X32-SSE-NEXT: pand %xmm0, %xmm2
-; X32-SSE-NEXT: psubq {{\.LCPI.*}}, %xmm2
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; X32-SSE-NEXT: movdqa %xmm2, %xmm4
-; X32-SSE-NEXT: pand %xmm3, %xmm4
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm0 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; X32-SSE-NEXT: movdqa %xmm0, %xmm5
-; X32-SSE-NEXT: pshufb %xmm4, %xmm5
-; X32-SSE-NEXT: psrlw $4, %xmm2
-; X32-SSE-NEXT: pand %xmm3, %xmm2
-; X32-SSE-NEXT: pshufb %xmm2, %xmm0
-; X32-SSE-NEXT: paddb %xmm5, %xmm0
-; X32-SSE-NEXT: psadbw %xmm1, %xmm0
+; X32-SSE-NEXT: movl $8, %eax
+; X32-SSE-NEXT: movd %eax, %xmm0
; X32-SSE-NEXT: retl
%out = call <2 x i64> @llvm.cttz.v2i64(<2 x i64> <i64 256, i64 -1>, i1 -1)
ret <2 x i64> %out
;
; X32-AVX-LABEL: foldv4i64:
; X32-AVX: # BB#0:
-; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm0 = [256,0,4294967295,4294967295,0,0,255,0]
-; X32-AVX-NEXT: vpxor %ymm1, %ymm1, %ymm1
-; X32-AVX-NEXT: vpsubq %ymm0, %ymm1, %ymm2
-; X32-AVX-NEXT: vpand %ymm2, %ymm0, %ymm0
-; X32-AVX-NEXT: vpsubq {{\.LCPI.*}}, %ymm0, %ymm0
-; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; X32-AVX-NEXT: vpand %ymm2, %ymm0, %ymm3
-; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; X32-AVX-NEXT: vpshufb %ymm3, %ymm4, %ymm3
-; X32-AVX-NEXT: vpsrlw $4, %ymm0, %ymm0
-; X32-AVX-NEXT: vpand %ymm2, %ymm0, %ymm0
-; X32-AVX-NEXT: vpshufb %ymm0, %ymm4, %ymm0
-; X32-AVX-NEXT: vpaddb %ymm3, %ymm0, %ymm0
-; X32-AVX-NEXT: vpsadbw %ymm1, %ymm0, %ymm0
+; X32-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,0,0,64,0,0,0]
; X32-AVX-NEXT: retl
%out = call <4 x i64> @llvm.cttz.v4i64(<4 x i64> <i64 256, i64 -1, i64 0, i64 255>, i1 0)
ret <4 x i64> %out
;
; X32-AVX-LABEL: foldv4i64u:
; X32-AVX: # BB#0:
-; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm0 = [256,0,4294967295,4294967295,0,0,255,0]
-; X32-AVX-NEXT: vpxor %ymm1, %ymm1, %ymm1
-; X32-AVX-NEXT: vpsubq %ymm0, %ymm1, %ymm2
-; X32-AVX-NEXT: vpand %ymm2, %ymm0, %ymm0
-; X32-AVX-NEXT: vpsubq {{\.LCPI.*}}, %ymm0, %ymm0
-; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; X32-AVX-NEXT: vpand %ymm2, %ymm0, %ymm3
-; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4,0,1,1,2,1,2,2,3,1,2,2,3,2,3,3,4]
-; X32-AVX-NEXT: vpshufb %ymm3, %ymm4, %ymm3
-; X32-AVX-NEXT: vpsrlw $4, %ymm0, %ymm0
-; X32-AVX-NEXT: vpand %ymm2, %ymm0, %ymm0
-; X32-AVX-NEXT: vpshufb %ymm0, %ymm4, %ymm0
-; X32-AVX-NEXT: vpaddb %ymm3, %ymm0, %ymm0
-; X32-AVX-NEXT: vpsadbw %ymm1, %ymm0, %ymm0
+; X32-AVX-NEXT: vmovaps {{.*#+}} ymm0 = [8,0,0,0,64,0,0,0]
; X32-AVX-NEXT: retl
%out = call <4 x i64> @llvm.cttz.v4i64(<4 x i64> <i64 256, i64 -1, i64 0, i64 255>, i1 -1)
ret <4 x i64> %out