From c09fbb030f3ec176556cc6db4f7b2e08fe58fd30 Mon Sep 17 00:00:00 2001 From: Sanjay Patel Date: Sat, 15 Jul 2017 17:26:01 +0000 Subject: [PATCH] [InstCombine] improve (1 << x) & 1 --> zext(x == 0) folding 1. Add a one-use check to prevent increasing instruction count. 2. Generalize the pattern matching to include vector types. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@308105 91177308-0d34-0410-b5e6-96231b3b80d8 --- .../InstCombine/InstCombineAndOrXor.cpp | 28 +++++++++---------- test/Transforms/InstCombine/and2.ll | 26 +++++++++++------ 2 files changed, 31 insertions(+), 23 deletions(-) diff --git a/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp index e8560099f9d..6f0703178f3 100644 --- a/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp +++ b/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp @@ -1284,10 +1284,19 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) { if (Value *V = SimplifyBSwap(I, Builder)) return replaceInstUsesWith(I, V); - // (0 - x) & 1 --> x & 1 - Value *X; - if (match(Op1, m_One()) && match(Op0, m_Sub(m_Zero(), m_Value(X)))) - return BinaryOperator::CreateAnd(X, Op1); + if (match(Op1, m_One())) { + Value *X; + // (0 - x) & 1 --> x & 1 + if (match(Op0, m_Sub(m_Zero(), m_Value(X)))) + return BinaryOperator::CreateAnd(X, Op1); + + // (1 << x) & 1 --> zext(x == 0) + // (1 >> x) & 1 --> zext(x == 0) + if (match(Op0, m_OneUse(m_LogicalShift(m_One(), m_Value(X))))) { + Value *IsZero = Builder.CreateICmpEQ(X, ConstantInt::get(I.getType(), 0)); + return new ZExtInst(IsZero, I.getType()); + } + } if (ConstantInt *AndRHS = dyn_cast(Op1)) { const APInt &AndRHSMask = AndRHS->getValue(); @@ -1320,17 +1329,6 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) { break; } - - case Instruction::Shl: - case Instruction::LShr: - // (1 << x) & 1 --> zext(x == 0) - // (1 >> x) & 1 --> zext(x == 0) - if (AndRHSMask.isOneValue() && Op0LHS == AndRHS) { - Value *NewICmp = - Builder.CreateICmpEQ(Op0RHS, Constant::getNullValue(I.getType())); - return new ZExtInst(NewICmp, I.getType()); - } - break; } // ((C1 OP zext(X)) & C2) -> zext((C1-X) & C2) if C2 fits in the bitwidth diff --git a/test/Transforms/InstCombine/and2.ll b/test/Transforms/InstCombine/and2.ll index 177e9d25700..15772d158f6 100644 --- a/test/Transforms/InstCombine/and2.ll +++ b/test/Transforms/InstCombine/and2.ll @@ -118,6 +118,8 @@ define i64 @test10(i64 %x) { ret i64 %add } +; (1 << x) & 1 --> zext(x == 0) + define i8 @and1_shl1_is_cmp_eq_0(i8 %x) { ; CHECK-LABEL: @and1_shl1_is_cmp_eq_0( ; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i8 %x, 0 @@ -129,11 +131,12 @@ define i8 @and1_shl1_is_cmp_eq_0(i8 %x) { ret i8 %and } +; Don't do it if the shift has another use. + define i8 @and1_shl1_is_cmp_eq_0_multiuse(i8 %x) { ; CHECK-LABEL: @and1_shl1_is_cmp_eq_0_multiuse( ; CHECK-NEXT: [[SH:%.*]] = shl i8 1, %x -; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i8 %x, 0 -; CHECK-NEXT: [[AND:%.*]] = zext i1 [[TMP1]] to i8 +; CHECK-NEXT: [[AND:%.*]] = and i8 [[SH]], 1 ; CHECK-NEXT: [[ADD:%.*]] = add i8 [[SH]], [[AND]] ; CHECK-NEXT: ret i8 [[ADD]] ; @@ -143,10 +146,12 @@ define i8 @and1_shl1_is_cmp_eq_0_multiuse(i8 %x) { ret i8 %add } +; (1 << x) & 1 --> zext(x == 0) + define <2 x i8> @and1_shl1_is_cmp_eq_0_vec(<2 x i8> %x) { ; CHECK-LABEL: @and1_shl1_is_cmp_eq_0_vec( -; CHECK-NEXT: [[SH:%.*]] = shl <2 x i8> , %x -; CHECK-NEXT: [[AND:%.*]] = and <2 x i8> [[SH]], +; CHECK-NEXT: [[TMP1:%.*]] = icmp eq <2 x i8> %x, zeroinitializer +; CHECK-NEXT: [[AND:%.*]] = zext <2 x i1> [[TMP1]] to <2 x i8> ; CHECK-NEXT: ret <2 x i8> [[AND]] ; %sh = shl <2 x i8> , %x @@ -154,6 +159,8 @@ define <2 x i8> @and1_shl1_is_cmp_eq_0_vec(<2 x i8> %x) { ret <2 x i8> %and } +; (1 >> x) & 1 --> zext(x == 0) + define i8 @and1_lshr1_is_cmp_eq_0(i8 %x) { ; CHECK-LABEL: @and1_lshr1_is_cmp_eq_0( ; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i8 %x, 0 @@ -165,11 +172,12 @@ define i8 @and1_lshr1_is_cmp_eq_0(i8 %x) { ret i8 %and } +; Don't do it if the shift has another use. + define i8 @and1_lshr1_is_cmp_eq_0_multiuse(i8 %x) { ; CHECK-LABEL: @and1_lshr1_is_cmp_eq_0_multiuse( ; CHECK-NEXT: [[SH:%.*]] = lshr i8 1, %x -; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i8 %x, 0 -; CHECK-NEXT: [[AND:%.*]] = zext i1 [[TMP1]] to i8 +; CHECK-NEXT: [[AND:%.*]] = and i8 [[SH]], 1 ; CHECK-NEXT: [[ADD:%.*]] = add i8 [[SH]], [[AND]] ; CHECK-NEXT: ret i8 [[ADD]] ; @@ -179,10 +187,12 @@ define i8 @and1_lshr1_is_cmp_eq_0_multiuse(i8 %x) { ret i8 %add } +; (1 >> x) & 1 --> zext(x == 0) + define <2 x i8> @and1_lshr1_is_cmp_eq_0_vec(<2 x i8> %x) { ; CHECK-LABEL: @and1_lshr1_is_cmp_eq_0_vec( -; CHECK-NEXT: [[SH:%.*]] = lshr <2 x i8> , %x -; CHECK-NEXT: [[AND:%.*]] = and <2 x i8> [[SH]], +; CHECK-NEXT: [[TMP1:%.*]] = icmp eq <2 x i8> %x, zeroinitializer +; CHECK-NEXT: [[AND:%.*]] = zext <2 x i1> [[TMP1]] to <2 x i8> ; CHECK-NEXT: ret <2 x i8> [[AND]] ; %sh = lshr <2 x i8> , %x -- 2.50.0