From: Roman Lebedev Date: Mon, 1 Jul 2019 15:55:24 +0000 (+0000) Subject: [InstCombine] (Y + ~X) + 1 --> Y - X fold (PR42459) X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=5b80637fd33a726d813623cf0ed2aeba10893ee1;p=llvm [InstCombine] (Y + ~X) + 1 --> Y - X fold (PR42459) Summary: To be noted, this pattern is not unhandled by instcombine per-se, it is somehow does end up being folded when one runs opt -O3, but not if it's just -instcombine. Regardless, that fold is indirect, depends on some other folds, and is thus blind when there are extra uses. This does address the regression being exposed in D63992. https://godbolt.org/z/7DGltU https://rise4fun.com/Alive/EPO0 Fixes [[ https://bugs.llvm.org/show_bug.cgi?id=42459 | PR42459 ]] Reviewers: spatel, nikic, huihuiz Reviewed By: spatel Subscribers: llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D63993 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@364792 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/lib/Transforms/InstCombine/InstCombineAddSub.cpp index f71d23974ba..0b0cc4bc6e7 100644 --- a/lib/Transforms/InstCombine/InstCombineAddSub.cpp +++ b/lib/Transforms/InstCombine/InstCombineAddSub.cpp @@ -1202,7 +1202,10 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) { // (A + 1) + ~B --> A - B // ~B + (A + 1) --> A - B - if (match(&I, m_c_BinOp(m_Add(m_Value(A), m_One()), m_Not(m_Value(B))))) + // (~B + A) + 1 --> A - B + // (A + ~B) + 1 --> A - B + if (match(&I, m_c_BinOp(m_Add(m_Value(A), m_One()), m_Not(m_Value(B)))) || + match(&I, m_BinOp(m_c_Add(m_Not(m_Value(B)), m_Value(A)), m_One()))) return BinaryOperator::CreateSub(A, B); // X % C0 + (( X / C0 ) % C1) * C0 => X % (C0 * C1) diff --git a/test/Transforms/InstCombine/add.ll b/test/Transforms/InstCombine/add.ll index a2f35cc4359..c31c74020fa 100644 --- a/test/Transforms/InstCombine/add.ll +++ b/test/Transforms/InstCombine/add.ll @@ -951,9 +951,7 @@ define i32 @add_not_increment_commuted(i32 %A, i32 %B) { define i32 @add_to_sub(i32 %M, i32 %B) { ; CHECK-LABEL: @add_to_sub( ; CHECK-NEXT: [[A:%.*]] = mul i32 [[M:%.*]], 42 -; CHECK-NEXT: [[C:%.*]] = xor i32 [[B:%.*]], -1 -; CHECK-NEXT: [[D:%.*]] = add i32 [[A]], [[C]] -; CHECK-NEXT: [[E:%.*]] = add i32 [[D]], 1 +; CHECK-NEXT: [[E:%.*]] = sub i32 [[A]], [[B:%.*]] ; CHECK-NEXT: ret i32 [[E]] ; %A = mul i32 %M, 42 ; thwart complexity-based ordering @@ -966,10 +964,8 @@ define i32 @add_to_sub(i32 %M, i32 %B) { ; E = (~B + A) + 1 = A - B define i32 @add_to_sub2(i32 %A, i32 %M) { ; CHECK-LABEL: @add_to_sub2( -; CHECK-NEXT: [[B:%.*]] = mul i32 [[M:%.*]], 42 -; CHECK-NEXT: [[C:%.*]] = xor i32 [[B]], -1 -; CHECK-NEXT: [[D:%.*]] = add i32 [[C]], [[A:%.*]] -; CHECK-NEXT: [[E:%.*]] = add i32 [[D]], 1 +; CHECK-NEXT: [[TMP1:%.*]] = mul i32 [[M:%.*]], -42 +; CHECK-NEXT: [[E:%.*]] = add i32 [[TMP1]], [[A:%.*]] ; CHECK-NEXT: ret i32 [[E]] ; %B = mul i32 %M, 42 ; thwart complexity-based ordering diff --git a/test/Transforms/InstCombine/fold-inc-of-add-of-not-x-and-y-to-sub-x-from-y.ll b/test/Transforms/InstCombine/fold-inc-of-add-of-not-x-and-y-to-sub-x-from-y.ll index b47863650c3..ae24b923b5d 100644 --- a/test/Transforms/InstCombine/fold-inc-of-add-of-not-x-and-y-to-sub-x-from-y.ll +++ b/test/Transforms/InstCombine/fold-inc-of-add-of-not-x-and-y-to-sub-x-from-y.ll @@ -12,9 +12,7 @@ define i32 @t0(i32 %x, i32 %y) { ; CHECK-LABEL: @t0( -; CHECK-NEXT: [[T0:%.*]] = xor i32 [[X:%.*]], -1 -; CHECK-NEXT: [[T1:%.*]] = add i32 [[T0]], [[Y:%.*]] -; CHECK-NEXT: [[T2:%.*]] = add i32 [[T1]], 1 +; CHECK-NEXT: [[T2:%.*]] = sub i32 [[Y:%.*]], [[X:%.*]] ; CHECK-NEXT: ret i32 [[T2]] ; %t0 = xor i32 %x, -1 @@ -29,9 +27,7 @@ define i32 @t0(i32 %x, i32 %y) { define <4 x i32> @t1_vec_splat(<4 x i32> %x, <4 x i32> %y) { ; CHECK-LABEL: @t1_vec_splat( -; CHECK-NEXT: [[T0:%.*]] = xor <4 x i32> [[X:%.*]], -; CHECK-NEXT: [[T1:%.*]] = add <4 x i32> [[T0]], [[Y:%.*]] -; CHECK-NEXT: [[T2:%.*]] = add <4 x i32> [[T1]], +; CHECK-NEXT: [[T2:%.*]] = sub <4 x i32> [[Y:%.*]], [[X:%.*]] ; CHECK-NEXT: ret <4 x i32> [[T2]] ; %t0 = xor <4 x i32> %x, @@ -42,9 +38,7 @@ define <4 x i32> @t1_vec_splat(<4 x i32> %x, <4 x i32> %y) { define <4 x i32> @t2_vec_undef0(<4 x i32> %x, <4 x i32> %y) { ; CHECK-LABEL: @t2_vec_undef0( -; CHECK-NEXT: [[T0:%.*]] = xor <4 x i32> [[X:%.*]], -; CHECK-NEXT: [[T1:%.*]] = add <4 x i32> [[T0]], [[Y:%.*]] -; CHECK-NEXT: [[T2:%.*]] = add <4 x i32> [[T1]], +; CHECK-NEXT: [[T2:%.*]] = sub <4 x i32> [[Y:%.*]], [[X:%.*]] ; CHECK-NEXT: ret <4 x i32> [[T2]] ; %t0 = xor <4 x i32> %x, @@ -55,9 +49,7 @@ define <4 x i32> @t2_vec_undef0(<4 x i32> %x, <4 x i32> %y) { define <4 x i32> @t3_vec_undef1(<4 x i32> %x, <4 x i32> %y) { ; CHECK-LABEL: @t3_vec_undef1( -; CHECK-NEXT: [[T0:%.*]] = xor <4 x i32> [[X:%.*]], -; CHECK-NEXT: [[T1:%.*]] = add <4 x i32> [[T0]], [[Y:%.*]] -; CHECK-NEXT: [[T2:%.*]] = add <4 x i32> [[T1]], +; CHECK-NEXT: [[T2:%.*]] = sub <4 x i32> [[Y:%.*]], [[X:%.*]] ; CHECK-NEXT: ret <4 x i32> [[T2]] ; %t0 = xor <4 x i32> %x, @@ -68,9 +60,7 @@ define <4 x i32> @t3_vec_undef1(<4 x i32> %x, <4 x i32> %y) { define <4 x i32> @t4_vec_undef2(<4 x i32> %x, <4 x i32> %y) { ; CHECK-LABEL: @t4_vec_undef2( -; CHECK-NEXT: [[T0:%.*]] = xor <4 x i32> [[X:%.*]], -; CHECK-NEXT: [[T1:%.*]] = add <4 x i32> [[T0]], [[Y:%.*]] -; CHECK-NEXT: [[T2:%.*]] = add <4 x i32> [[T1]], +; CHECK-NEXT: [[T2:%.*]] = sub <4 x i32> [[Y:%.*]], [[X:%.*]] ; CHECK-NEXT: ret <4 x i32> [[T2]] ; %t0 = xor <4 x i32> %x, @@ -89,8 +79,7 @@ define i32 @t5(i32 %x, i32 %y) { ; CHECK-LABEL: @t5( ; CHECK-NEXT: [[T0:%.*]] = xor i32 [[X:%.*]], -1 ; CHECK-NEXT: call void @use32(i32 [[T0]]) -; CHECK-NEXT: [[T1:%.*]] = add i32 [[T0]], [[Y:%.*]] -; CHECK-NEXT: [[T2:%.*]] = add i32 [[T1]], 1 +; CHECK-NEXT: [[T2:%.*]] = sub i32 [[Y:%.*]], [[X]] ; CHECK-NEXT: ret i32 [[T2]] ; %t0 = xor i32 %x, -1 @@ -105,7 +94,7 @@ define i32 @t6(i32 %x, i32 %y) { ; CHECK-NEXT: [[T0:%.*]] = xor i32 [[X:%.*]], -1 ; CHECK-NEXT: [[T1:%.*]] = add i32 [[T0]], [[Y:%.*]] ; CHECK-NEXT: call void @use32(i32 [[T1]]) -; CHECK-NEXT: [[T2:%.*]] = add i32 [[T1]], 1 +; CHECK-NEXT: [[T2:%.*]] = sub i32 [[Y]], [[X]] ; CHECK-NEXT: ret i32 [[T2]] ; %t0 = xor i32 %x, -1 @@ -121,7 +110,7 @@ define i32 @t7(i32 %x, i32 %y) { ; CHECK-NEXT: call void @use32(i32 [[T0]]) ; CHECK-NEXT: [[T1:%.*]] = add i32 [[T0]], [[Y:%.*]] ; CHECK-NEXT: call void @use32(i32 [[T1]]) -; CHECK-NEXT: [[T2:%.*]] = add i32 [[T1]], 1 +; CHECK-NEXT: [[T2:%.*]] = sub i32 [[Y]], [[X]] ; CHECK-NEXT: ret i32 [[T2]] ; %t0 = xor i32 %x, -1 @@ -145,7 +134,7 @@ define i32 @t8_commutative0(i32 %x) { ; CHECK-NEXT: call void @use32(i32 [[T0]]) ; CHECK-NEXT: [[T1:%.*]] = add i32 [[Y]], [[T0]] ; CHECK-NEXT: call void @use32(i32 [[T1]]) -; CHECK-NEXT: [[T2:%.*]] = add i32 [[T1]], 1 +; CHECK-NEXT: [[T2:%.*]] = sub i32 [[Y]], [[X]] ; CHECK-NEXT: ret i32 [[T2]] ; %y = call i32 @gen32()