From 54fe64b5b4f5ecfaa2b4b48cd9336562efd56790 Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Fri, 11 Aug 2017 08:37:00 +0000 Subject: [PATCH] [DAGCombiner] Remove shuffle support from simplifyShuffleMask rL310372 enabled simplifyShuffleMask to support undef shuffle mask inputs, but its causing hangs. Removing support until I can triage the problem git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@310699 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 2 -- test/CodeGen/X86/oddshuffles.ll | 4 ++-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 7515a7ab6d7..a247f2656be 100644 --- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -15161,8 +15161,6 @@ static SDValue simplifyShuffleMask(ShuffleVectorSDNode *SVN, SDValue N0, // TODO - handle more cases as required. if (V.getOpcode() == ISD::BUILD_VECTOR) return V.getOperand(Idx).isUndef(); - if (auto *SVN = dyn_cast(V)) - return SVN->getMaskElt(Idx) < 0; return false; }; diff --git a/test/CodeGen/X86/oddshuffles.ll b/test/CodeGen/X86/oddshuffles.ll index 5dba96ad56a..0bda41a30c6 100644 --- a/test/CodeGen/X86/oddshuffles.ll +++ b/test/CodeGen/X86/oddshuffles.ll @@ -258,7 +258,7 @@ define void @v7i8(<4 x i8> %a, <4 x i8> %b, <7 x i8>* %p) nounwind { ; SSE42-NEXT: pextrb $0, %xmm1, 6(%rdi) ; SSE42-NEXT: pshufb {{.*#+}} xmm1 = xmm1[8,9,8,9,4,5,8,9,0,1,12,13,0,1,14,15] ; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5,6,7] -; SSE42-NEXT: pshufb {{.*#+}} xmm1 = xmm1[0,2,4,6,8,10,12,u,u,u,u,u,u,u,u,u] +; SSE42-NEXT: pshufb {{.*#+}} xmm1 = xmm1[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u] ; SSE42-NEXT: pextrw $2, %xmm1, 4(%rdi) ; SSE42-NEXT: movd %xmm1, (%rdi) ; SSE42-NEXT: retq @@ -268,7 +268,7 @@ define void @v7i8(<4 x i8> %a, <4 x i8> %b, <7 x i8>* %p) nounwind { ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,3,1,3] ; AVX-NEXT: vpshufb {{.*#+}} xmm2 = xmm1[8,9,8,9,4,5,8,9,0,1,12,13,0,1,14,15] ; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3],xmm0[4],xmm2[5,6,7] -; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,u,u,u,u,u,u,u,u,u] +; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u] ; AVX-NEXT: vpextrb $0, %xmm1, 6(%rdi) ; AVX-NEXT: vpextrw $2, %xmm0, 4(%rdi) ; AVX-NEXT: vmovd %xmm0, (%rdi) -- 2.50.1