From: Simon Pilgrim Date: Thu, 20 Jul 2017 11:03:30 +0000 (+0000) Subject: [DAGCombiner] Match ISD::SRL non-uniform constant vectors patterns using predicates. X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=0f6dee58a66501d600be97b5cf796efbe265ac25;p=llvm [DAGCombiner] Match ISD::SRL non-uniform constant vectors patterns using predicates. Use predicate matchers introduced in D35492 to match more ISD::SRL constant folds git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@308602 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 947b7070a1a..535b272f793 100644 --- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -5790,7 +5790,11 @@ SDValue DAGCombiner::visitSRL(SDNode *N) { if (isNullConstantOrNullSplatConstant(N0)) return N0; // fold (srl x, c >= size(x)) -> undef - if (N1C && N1C->getAPIntValue().uge(OpSizeInBits)) + // NOTE: ALL vector elements must be too big to avoid partial UNDEFs. + auto MatchShiftTooBig = [OpSizeInBits](ConstantSDNode *Val) { + return Val->getAPIntValue().uge(OpSizeInBits); + }; + if (matchUnaryPredicate(N1, MatchShiftTooBig)) return DAG.getUNDEF(VT); // fold (srl x, 0) -> x if (N1C && N1C->isNullValue()) @@ -5805,20 +5809,29 @@ SDValue DAGCombiner::visitSRL(SDNode *N) { return DAG.getConstant(0, SDLoc(N), VT); // fold (srl (srl x, c1), c2) -> 0 or (srl x, (add c1, c2)) - if (N1C && N0.getOpcode() == ISD::SRL) { - if (ConstantSDNode *N0C1 = isConstOrConstSplat(N0.getOperand(1))) { - SDLoc DL(N); - APInt c1 = N0C1->getAPIntValue(); - APInt c2 = N1C->getAPIntValue(); + if (N0.getOpcode() == ISD::SRL) { + auto MatchOutOfRange = [OpSizeInBits](ConstantSDNode *LHS, + ConstantSDNode *RHS) { + APInt c1 = LHS->getAPIntValue(); + APInt c2 = RHS->getAPIntValue(); zeroExtendToMatch(c1, c2, 1 /* Overflow Bit */); + return (c1 + c2).uge(OpSizeInBits); + }; + if (matchBinaryPredicate(N1, N0.getOperand(1), MatchOutOfRange)) + return DAG.getConstant(0, SDLoc(N), VT); - APInt Sum = c1 + c2; - if (Sum.uge(OpSizeInBits)) - return DAG.getConstant(0, DL, VT); - - return DAG.getNode( - ISD::SRL, DL, VT, N0.getOperand(0), - DAG.getConstant(Sum.getZExtValue(), DL, N1.getValueType())); + auto MatchInRange = [OpSizeInBits](ConstantSDNode *LHS, + ConstantSDNode *RHS) { + APInt c1 = LHS->getAPIntValue(); + APInt c2 = RHS->getAPIntValue(); + zeroExtendToMatch(c1, c2, 1 /* Overflow Bit */); + return (c1 + c2).ult(OpSizeInBits); + }; + if (matchBinaryPredicate(N1, N0.getOperand(1), MatchInRange)) { + SDLoc DL(N); + EVT ShiftVT = N1.getValueType(); + SDValue Sum = DAG.getNode(ISD::ADD, DL, ShiftVT, N1, N0.getOperand(1)); + return DAG.getNode(ISD::SRL, DL, VT, N0.getOperand(0), Sum); } } diff --git a/test/CodeGen/X86/combine-srl.ll b/test/CodeGen/X86/combine-srl.ll index 473fae19f4f..1d064b36ee6 100644 --- a/test/CodeGen/X86/combine-srl.ll +++ b/test/CodeGen/X86/combine-srl.ll @@ -33,12 +33,10 @@ define <4 x i32> @combine_vec_lshr_outofrange0(<4 x i32> %x) { define <4 x i32> @combine_vec_lshr_outofrange1(<4 x i32> %x) { ; SSE-LABEL: combine_vec_lshr_outofrange1: ; SSE: # BB#0: -; SSE-NEXT: xorps %xmm0, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: combine_vec_lshr_outofrange1: ; AVX: # BB#0: -; AVX-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0 ; AVX-NEXT: retq %1 = lshr <4 x i32> %x, ret <4 x i32> %1 @@ -119,31 +117,21 @@ define <4 x i32> @combine_vec_lshr_lshr0(<4 x i32> %x) { define <4 x i32> @combine_vec_lshr_lshr1(<4 x i32> %x) { ; SSE-LABEL: combine_vec_lshr_lshr1: ; SSE: # BB#0: +; SSE-NEXT: movdqa %xmm0, %xmm1 +; SSE-NEXT: psrld $10, %xmm1 ; SSE-NEXT: movdqa %xmm0, %xmm2 +; SSE-NEXT: psrld $6, %xmm2 +; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7] ; SSE-NEXT: movdqa %xmm0, %xmm1 -; SSE-NEXT: psrld $2, %xmm1 -; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm1[4,5,6,7] -; SSE-NEXT: psrld $3, %xmm0 -; SSE-NEXT: psrld $1, %xmm2 -; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm0[4,5,6,7] -; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7] -; SSE-NEXT: movdqa %xmm1, %xmm0 -; SSE-NEXT: psrld $7, %xmm0 -; SSE-NEXT: movdqa %xmm1, %xmm2 -; SSE-NEXT: psrld $5, %xmm2 -; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm0[4,5,6,7] -; SSE-NEXT: movdqa %xmm1, %xmm0 -; SSE-NEXT: psrld $6, %xmm0 -; SSE-NEXT: psrld $4, %xmm1 -; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm0[4,5,6,7] -; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7] -; SSE-NEXT: movdqa %xmm1, %xmm0 +; SSE-NEXT: psrld $8, %xmm1 +; SSE-NEXT: psrld $4, %xmm0 +; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] +; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] ; SSE-NEXT: retq ; ; AVX-LABEL: combine_vec_lshr_lshr1: ; AVX: # BB#0: ; AVX-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0 -; AVX-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0 ; AVX-NEXT: retq %1 = lshr <4 x i32> %x, %2 = lshr <4 x i32> %1, @@ -169,32 +157,12 @@ define <4 x i32> @combine_vec_lshr_lshr_zero0(<4 x i32> %x) { define <4 x i32> @combine_vec_lshr_lshr_zero1(<4 x i32> %x) { ; SSE-LABEL: combine_vec_lshr_lshr_zero1: ; SSE: # BB#0: -; SSE-NEXT: movdqa %xmm0, %xmm1 -; SSE-NEXT: psrld $20, %xmm1 -; SSE-NEXT: movdqa %xmm0, %xmm2 -; SSE-NEXT: psrld $18, %xmm2 -; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7] -; SSE-NEXT: movdqa %xmm0, %xmm1 -; SSE-NEXT: psrld $19, %xmm1 -; SSE-NEXT: psrld $17, %xmm0 -; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] -; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] -; SSE-NEXT: movdqa %xmm0, %xmm1 -; SSE-NEXT: psrld $28, %xmm1 -; SSE-NEXT: movdqa %xmm0, %xmm2 -; SSE-NEXT: psrld $26, %xmm2 -; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7] -; SSE-NEXT: movdqa %xmm0, %xmm1 -; SSE-NEXT: psrld $27, %xmm1 -; SSE-NEXT: psrld $25, %xmm0 -; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] -; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] +; SSE-NEXT: xorps %xmm0, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: combine_vec_lshr_lshr_zero1: ; AVX: # BB#0: -; AVX-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0 -; AVX-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0 +; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0 ; AVX-NEXT: retq %1 = lshr <4 x i32> %x, %2 = lshr <4 x i32> %1,