From: Sanjay Patel Date: Tue, 16 May 2017 21:51:04 +0000 (+0000) Subject: [InstSimplify] add folds for constant mask of value shifted by constant X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=25529caa1bfe205fbd7175e3136b484910195985;p=llvm [InstSimplify] add folds for constant mask of value shifted by constant We would eventually catch these via demanded bits and computing known bits in InstCombine, but I think it's better to handle the simple cases as soon as possible as a matter of efficiency. This fold allows further simplifications based on distributed ops transforms. eg: %a = lshr i8 %x, 7 %b = or i8 %a, 2 %c = and i8 %b, 1 InstSimplify can directly fold this now: %a = lshr i8 %x, 7 Differential Revision: https://reviews.llvm.org/D33221 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@303213 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Analysis/InstructionSimplify.cpp b/lib/Analysis/InstructionSimplify.cpp index 5728887cc1e..5652248a60c 100644 --- a/lib/Analysis/InstructionSimplify.cpp +++ b/lib/Analysis/InstructionSimplify.cpp @@ -1752,6 +1752,24 @@ static Value *SimplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, (A == Op0 || B == Op0)) return Op0; + // A mask that only clears known zeros of a shifted value is a no-op. + Value *X; + const APInt *Mask; + const APInt *ShAmt; + if (match(Op1, m_APInt(Mask))) { + // If all bits in the inverted and shifted mask are clear: + // and (shl X, ShAmt), Mask --> shl X, ShAmt + if (match(Op0, m_Shl(m_Value(X), m_APInt(ShAmt))) && + (~(*Mask)).lshr(*ShAmt).isNullValue()) + return Op0; + + // If all bits in the inverted and shifted mask are clear: + // and (lshr X, ShAmt), Mask --> lshr X, ShAmt + if (match(Op0, m_LShr(m_Value(X), m_APInt(ShAmt))) && + (~(*Mask)).shl(*ShAmt).isNullValue()) + return Op0; + } + // A & (-A) = A if A is a power of two or zero. if (match(Op0, m_Neg(m_Specific(Op1))) || match(Op1, m_Neg(m_Specific(Op0)))) { diff --git a/test/Transforms/InstSimplify/AndOrXor.ll b/test/Transforms/InstSimplify/AndOrXor.ll index 427ea655fcb..a9b4e4e5cfc 100644 --- a/test/Transforms/InstSimplify/AndOrXor.ll +++ b/test/Transforms/InstSimplify/AndOrXor.ll @@ -738,8 +738,7 @@ define i32 @test54(i32 %a, i32 %b) { define i8 @lshr_perfect_mask(i8 %x) { ; CHECK-LABEL: @lshr_perfect_mask( ; CHECK-NEXT: [[SH:%.*]] = lshr i8 %x, 5 -; CHECK-NEXT: [[MASK:%.*]] = and i8 [[SH]], 7 -; CHECK-NEXT: ret i8 [[MASK]] +; CHECK-NEXT: ret i8 [[SH]] ; %sh = lshr i8 %x, 5 %mask = and i8 %sh, 7 ; 0x07 @@ -749,8 +748,7 @@ define i8 @lshr_perfect_mask(i8 %x) { define <2 x i8> @lshr_oversized_mask_splat(<2 x i8> %x) { ; CHECK-LABEL: @lshr_oversized_mask_splat( ; CHECK-NEXT: [[SH:%.*]] = lshr <2 x i8> %x, -; CHECK-NEXT: [[MASK:%.*]] = and <2 x i8> [[SH]], -; CHECK-NEXT: ret <2 x i8> [[MASK]] +; CHECK-NEXT: ret <2 x i8> [[SH]] ; %sh = lshr <2 x i8> %x, %mask = and <2 x i8> %sh, ; 0x87 @@ -771,8 +769,7 @@ define i8 @lshr_undersized_mask(i8 %x) { define <2 x i8> @shl_perfect_mask_splat(<2 x i8> %x) { ; CHECK-LABEL: @shl_perfect_mask_splat( ; CHECK-NEXT: [[SH:%.*]] = shl <2 x i8> %x, -; CHECK-NEXT: [[MASK:%.*]] = and <2 x i8> [[SH]], -; CHECK-NEXT: ret <2 x i8> [[MASK]] +; CHECK-NEXT: ret <2 x i8> [[SH]] ; %sh = shl <2 x i8> %x, %mask = and <2 x i8> %sh, ; 0xC0 @@ -782,8 +779,7 @@ define <2 x i8> @shl_perfect_mask_splat(<2 x i8> %x) { define i8 @shl_oversized_mask(i8 %x) { ; CHECK-LABEL: @shl_oversized_mask( ; CHECK-NEXT: [[SH:%.*]] = shl i8 %x, 6 -; CHECK-NEXT: [[MASK:%.*]] = and i8 [[SH]], -61 -; CHECK-NEXT: ret i8 [[MASK]] +; CHECK-NEXT: ret i8 [[SH]] ; %sh = shl i8 %x, 6 %mask = and i8 %sh, 195 ; 0xC3