From: Sanjay Patel Date: Sun, 29 Jul 2018 18:13:16 +0000 (+0000) Subject: [InstCombine] try to fold 'add+sub' to 'not+add' X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=19a2e2a495feab825a915c32b2f9a7c95313c6fa;p=llvm [InstCombine] try to fold 'add+sub' to 'not+add' These are reassociated versions of the same pattern and similar transforms as in rL338200 and rL338118. The motivation is identical to those commits: Patterns with add/sub combos can be improved using 'not' ops. This is better for analysis and may lead to follow-on transforms because 'xor' and 'add' are commutative/associative. It can also help codegen. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@338221 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/lib/Transforms/InstCombine/InstCombineAddSub.cpp index e79bdf75d40..aa66b442b07 100644 --- a/lib/Transforms/InstCombine/InstCombineAddSub.cpp +++ b/lib/Transforms/InstCombine/InstCombineAddSub.cpp @@ -1614,6 +1614,14 @@ Instruction *InstCombiner::visitSub(BinaryOperator &I) { if (match(Op0, m_Not(m_Value(X))) && match(Op1, m_Not(m_Value(Y)))) return BinaryOperator::CreateSub(Y, X); + // (X + -1) - Y --> ~Y + X + if (match(Op0, m_OneUse(m_Add(m_Value(X), m_AllOnes())))) + return BinaryOperator::CreateAdd(Builder.CreateNot(Op1), X); + + // Y - (X + 1) --> ~X + Y + if (match(Op1, m_OneUse(m_Add(m_Value(X), m_One())))) + return BinaryOperator::CreateAdd(Builder.CreateNot(X), Op0); + if (Constant *C = dyn_cast(Op0)) { bool IsNegate = match(C, m_ZeroInt()); Value *X; diff --git a/test/Transforms/InstCombine/sub-not.ll b/test/Transforms/InstCombine/sub-not.ll index 9335fdafcc1..cd1f8f3bd52 100644 --- a/test/Transforms/InstCombine/sub-not.ll +++ b/test/Transforms/InstCombine/sub-not.ll @@ -75,8 +75,8 @@ define <2 x i8> @dec_sub_vec(<2 x i8> %x, <2 x i8> %y) { define i8 @sub_inc(i8 %x, i8 %y) { ; CHECK-LABEL: @sub_inc( -; CHECK-NEXT: [[S:%.*]] = add i8 [[X:%.*]], 1 -; CHECK-NEXT: [[R:%.*]] = sub i8 [[Y:%.*]], [[S]] +; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X:%.*]], -1 +; CHECK-NEXT: [[R:%.*]] = add i8 [[TMP1]], [[Y:%.*]] ; CHECK-NEXT: ret i8 [[R]] ; %s = add i8 %x, 1 @@ -99,8 +99,8 @@ define i8 @sub_inc_extra_use(i8 %x, i8 %y) { define <2 x i8> @sub_inc_vec(<2 x i8> %x, <2 x i8> %y) { ; CHECK-LABEL: @sub_inc_vec( -; CHECK-NEXT: [[S:%.*]] = add <2 x i8> [[X:%.*]], -; CHECK-NEXT: [[R:%.*]] = sub <2 x i8> [[Y:%.*]], [[S]] +; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i8> [[X:%.*]], +; CHECK-NEXT: [[R:%.*]] = add <2 x i8> [[TMP1]], [[Y:%.*]] ; CHECK-NEXT: ret <2 x i8> [[R]] ; %s = add <2 x i8> %x, @@ -110,8 +110,8 @@ define <2 x i8> @sub_inc_vec(<2 x i8> %x, <2 x i8> %y) { define i8 @sub_dec(i8 %x, i8 %y) { ; CHECK-LABEL: @sub_dec( -; CHECK-NEXT: [[S:%.*]] = add i8 [[X:%.*]], -1 -; CHECK-NEXT: [[R:%.*]] = sub i8 [[S]], [[Y:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y:%.*]], -1 +; CHECK-NEXT: [[R:%.*]] = add i8 [[TMP1]], [[X:%.*]] ; CHECK-NEXT: ret i8 [[R]] ; %s = add i8 %x, -1 @@ -134,8 +134,8 @@ define i8 @sub_dec_extra_use(i8 %x, i8 %y) { define <2 x i8> @sub_dec_vec(<2 x i8> %x, <2 x i8> %y) { ; CHECK-LABEL: @sub_dec_vec( -; CHECK-NEXT: [[S:%.*]] = add <2 x i8> [[X:%.*]], -; CHECK-NEXT: [[R:%.*]] = sub <2 x i8> [[S]], [[Y:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i8> [[Y:%.*]], +; CHECK-NEXT: [[R:%.*]] = add <2 x i8> [[TMP1]], [[X:%.*]] ; CHECK-NEXT: ret <2 x i8> [[R]] ; %s = add <2 x i8> %x,