From: Roman Lebedev Date: Mon, 23 Sep 2019 17:04:28 +0000 (+0000) Subject: [InstCombine] dropRedundantMaskingOfLeftShiftInput(): pat. c/d/e with mask (PR42563) X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=3b27a5309530874b52125f7c23ce3e592dd0c772;p=llvm [InstCombine] dropRedundantMaskingOfLeftShiftInput(): pat. c/d/e with mask (PR42563) Summary: If we have a pattern `(x & (-1 >> maskNbits)) << shiftNbits`, we already know (have a fold) that will drop the `& (-1 >> maskNbits)` mask iff `(shiftNbits-maskNbits) s>= 0` (i.e. `shiftNbits u>= maskNbits`). So even if `(shiftNbits-maskNbits) s< 0`, we can still fold, we will just need to apply a **constant** mask afterwards: ``` Name: c, normal+mask %t0 = lshr i32 -1, C1 %t1 = and i32 %t0, %x %r = shl i32 %t1, C2 => %n0 = shl i32 %x, C2 %n1 = i32 ((-(C2-C1))+32) %n2 = zext i32 %n1 to i64 %n3 = lshr i64 -1, %n2 %n4 = trunc i64 %n3 to i32 %r = and i32 %n0, %n4 ``` https://rise4fun.com/Alive/gslRa Naturally, old `%masked` will have to be one-use. This is not valid for pattern f - where "masking" is done via `ashr`. https://bugs.llvm.org/show_bug.cgi?id=42563 Reviewers: spatel, nikic, xbolva00 Reviewed By: spatel Subscribers: hiraditya, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D67725 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@372630 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Transforms/InstCombine/InstCombineShifts.cpp b/lib/Transforms/InstCombine/InstCombineShifts.cpp index 3f466495c5e..2dea4c45cbd 100644 --- a/lib/Transforms/InstCombine/InstCombineShifts.cpp +++ b/lib/Transforms/InstCombine/InstCombineShifts.cpp @@ -202,10 +202,35 @@ dropRedundantMaskingOfLeftShiftInput(BinaryOperator *OuterShift, // shall be unset in the root value (OuterShift). If ShAmtsDiff is negative, // we'll need to also produce a mask to unset ShAmtsDiff high bits. // So, does *any* channel need a mask? (is ShiftShAmt u>= MaskShAmt ?) - if (!match(ShAmtsDiff, m_NonNegative())) - return nullptr; // FIXME. + if (!match(ShAmtsDiff, m_NonNegative())) { + // This sub-fold (with mask) is invalid for 'ashr' "masking" instruction. + if (match(Masked, m_AShr(m_Value(), m_Value()))) + return nullptr; + // For a mask we need to get rid of old masking instruction. + if (!Masked->hasOneUse()) + return nullptr; // Else we can't perform the fold. + Type *Ty = X->getType(); + unsigned BitWidth = Ty->getScalarSizeInBits(); + // We should produce compute the mask in wider type, and truncate later! + // Get type twice as wide element-wise (same number of elements!). + Type *ExtendedScalarTy = Type::getIntNTy(Ty->getContext(), 2 * BitWidth); + Type *ExtendedTy = + Ty->isVectorTy() + ? VectorType::get(ExtendedScalarTy, Ty->getVectorNumElements()) + : ExtendedScalarTy; + auto *ExtendedNumHighBitsToClear = ConstantExpr::getZExt( + ConstantExpr::getAdd( + ConstantExpr::getNeg(ShAmtsDiff), + ConstantInt::get(Ty, BitWidth, /*isSigned=*/false)), + ExtendedTy); + // And compute the mask as usual: (-1 l>> (ShAmtsDiff)) + auto *ExtendedAllOnes = ConstantExpr::getAllOnesValue(ExtendedTy); + auto *ExtendedMask = + ConstantExpr::getLShr(ExtendedAllOnes, ExtendedNumHighBitsToClear); + NewMask = ConstantExpr::getTrunc(ExtendedMask, Ty); + } else + NewMask = nullptr; // No mask needed. // All good, we can do this fold. - NewMask = nullptr; // No mask needed. } else return nullptr; // Don't know anything about this pattern. diff --git a/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-c.ll b/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-c.ll index eb59f8c0a0d..00154bba29c 100644 --- a/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-c.ll +++ b/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-c.ll @@ -16,11 +16,11 @@ declare void @use32(i32) define i32 @t0_basic(i32 %x, i32 %nbits) { ; CHECK-LABEL: @t0_basic( ; CHECK-NEXT: [[T0:%.*]] = lshr i32 -1, [[NBITS:%.*]] -; CHECK-NEXT: [[T1:%.*]] = and i32 [[T0]], [[X:%.*]] ; CHECK-NEXT: [[T2:%.*]] = add i32 [[NBITS]], -1 ; CHECK-NEXT: call void @use32(i32 [[T0]]) ; CHECK-NEXT: call void @use32(i32 [[T2]]) -; CHECK-NEXT: [[T3:%.*]] = shl i32 [[T1]], [[T2]] +; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[X:%.*]], [[T2]] +; CHECK-NEXT: [[T3:%.*]] = and i32 [[TMP1]], 2147483647 ; CHECK-NEXT: ret i32 [[T3]] ; %t0 = lshr i32 -1, %nbits @@ -39,11 +39,11 @@ declare void @use8xi32(<8 x i32>) define <8 x i32> @t1_vec_splat(<8 x i32> %x, <8 x i32> %nbits) { ; CHECK-LABEL: @t1_vec_splat( ; CHECK-NEXT: [[T0:%.*]] = lshr <8 x i32> , [[NBITS:%.*]] -; CHECK-NEXT: [[T1:%.*]] = and <8 x i32> [[T0]], [[X:%.*]] ; CHECK-NEXT: [[T2:%.*]] = add <8 x i32> [[NBITS]], ; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T0]]) ; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T2]]) -; CHECK-NEXT: [[T3:%.*]] = shl <8 x i32> [[T1]], [[T2]] +; CHECK-NEXT: [[TMP1:%.*]] = shl <8 x i32> [[X:%.*]], [[T2]] +; CHECK-NEXT: [[T3:%.*]] = and <8 x i32> [[TMP1]], ; CHECK-NEXT: ret <8 x i32> [[T3]] ; %t0 = lshr <8 x i32> , %nbits @@ -58,11 +58,11 @@ define <8 x i32> @t1_vec_splat(<8 x i32> %x, <8 x i32> %nbits) { define <8 x i32> @t1_vec_nonsplat(<8 x i32> %x, <8 x i32> %nbits) { ; CHECK-LABEL: @t1_vec_nonsplat( ; CHECK-NEXT: [[T0:%.*]] = lshr <8 x i32> , [[NBITS:%.*]] -; CHECK-NEXT: [[T1:%.*]] = and <8 x i32> [[T0]], [[X:%.*]] ; CHECK-NEXT: [[T2:%.*]] = add <8 x i32> [[NBITS]], ; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T0]]) ; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T2]]) -; CHECK-NEXT: [[T3:%.*]] = shl <8 x i32> [[T1]], [[T2]] +; CHECK-NEXT: [[TMP1:%.*]] = shl <8 x i32> [[X:%.*]], [[T2]] +; CHECK-NEXT: [[T3:%.*]] = and <8 x i32> [[TMP1]], ; CHECK-NEXT: ret <8 x i32> [[T3]] ; %t0 = lshr <8 x i32> , %nbits diff --git a/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-d.ll b/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-d.ll index de80f764771..6e5cb0e9193 100644 --- a/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-d.ll +++ b/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-d.ll @@ -17,12 +17,12 @@ define i32 @t0_basic(i32 %x, i32 %nbits) { ; CHECK-LABEL: @t0_basic( ; CHECK-NEXT: [[T0:%.*]] = shl i32 -1, [[NBITS:%.*]] ; CHECK-NEXT: [[T1:%.*]] = lshr i32 [[T0]], [[NBITS]] -; CHECK-NEXT: [[T2:%.*]] = and i32 [[T1]], [[X:%.*]] ; CHECK-NEXT: [[T3:%.*]] = add i32 [[NBITS]], -1 ; CHECK-NEXT: call void @use32(i32 [[T0]]) ; CHECK-NEXT: call void @use32(i32 [[T1]]) ; CHECK-NEXT: call void @use32(i32 [[T3]]) -; CHECK-NEXT: [[T4:%.*]] = shl i32 [[T2]], [[T3]] +; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[X:%.*]], [[T3]] +; CHECK-NEXT: [[T4:%.*]] = and i32 [[TMP1]], 2147483647 ; CHECK-NEXT: ret i32 [[T4]] ; %t0 = shl i32 -1, %nbits @@ -44,12 +44,12 @@ define <8 x i32> @t2_vec_splat(<8 x i32> %x, <8 x i32> %nbits) { ; CHECK-LABEL: @t2_vec_splat( ; CHECK-NEXT: [[T0:%.*]] = shl <8 x i32> , [[NBITS:%.*]] ; CHECK-NEXT: [[T1:%.*]] = lshr <8 x i32> [[T0]], [[NBITS]] -; CHECK-NEXT: [[T2:%.*]] = and <8 x i32> [[T1]], [[X:%.*]] ; CHECK-NEXT: [[T3:%.*]] = add <8 x i32> [[NBITS]], ; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T0]]) ; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T1]]) ; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T3]]) -; CHECK-NEXT: [[T4:%.*]] = shl <8 x i32> [[T2]], [[T3]] +; CHECK-NEXT: [[TMP1:%.*]] = shl <8 x i32> [[X:%.*]], [[T3]] +; CHECK-NEXT: [[T4:%.*]] = and <8 x i32> [[TMP1]], ; CHECK-NEXT: ret <8 x i32> [[T4]] ; %t0 = shl <8 x i32> , %nbits @@ -67,12 +67,12 @@ define <8 x i32> @t2_vec_nonsplat(<8 x i32> %x, <8 x i32> %nbits) { ; CHECK-LABEL: @t2_vec_nonsplat( ; CHECK-NEXT: [[T0:%.*]] = shl <8 x i32> , [[NBITS:%.*]] ; CHECK-NEXT: [[T1:%.*]] = lshr <8 x i32> [[T0]], [[NBITS]] -; CHECK-NEXT: [[T2:%.*]] = and <8 x i32> [[T1]], [[X:%.*]] ; CHECK-NEXT: [[T3:%.*]] = add <8 x i32> [[NBITS]], ; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T0]]) ; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T1]]) ; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T3]]) -; CHECK-NEXT: [[T4:%.*]] = shl <8 x i32> [[T2]], [[T3]] +; CHECK-NEXT: [[TMP1:%.*]] = shl <8 x i32> [[X:%.*]], [[T3]] +; CHECK-NEXT: [[T4:%.*]] = and <8 x i32> [[TMP1]], ; CHECK-NEXT: ret <8 x i32> [[T4]] ; %t0 = shl <8 x i32> , %nbits diff --git a/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-e.ll b/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-e.ll index 609b3b94adb..bbe35605a71 100644 --- a/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-e.ll +++ b/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-e.ll @@ -16,11 +16,11 @@ declare void @use32(i32) define i32 @t0_basic(i32 %x, i32 %nbits) { ; CHECK-LABEL: @t0_basic( ; CHECK-NEXT: [[T0:%.*]] = shl i32 [[X:%.*]], [[NBITS:%.*]] -; CHECK-NEXT: [[T1:%.*]] = lshr i32 [[T0]], [[NBITS]] ; CHECK-NEXT: [[T2:%.*]] = add i32 [[NBITS]], -1 ; CHECK-NEXT: call void @use32(i32 [[T0]]) ; CHECK-NEXT: call void @use32(i32 [[T2]]) -; CHECK-NEXT: [[T3:%.*]] = shl i32 [[T1]], [[T2]] +; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[X]], [[T2]] +; CHECK-NEXT: [[T3:%.*]] = and i32 [[TMP1]], 2147483647 ; CHECK-NEXT: ret i32 [[T3]] ; %t0 = shl i32 %x, %nbits @@ -39,11 +39,11 @@ declare void @use8xi32(<8 x i32>) define <8 x i32> @t1_vec_splat(<8 x i32> %x, <8 x i32> %nbits) { ; CHECK-LABEL: @t1_vec_splat( ; CHECK-NEXT: [[T0:%.*]] = shl <8 x i32> [[X:%.*]], [[NBITS:%.*]] -; CHECK-NEXT: [[T1:%.*]] = lshr <8 x i32> [[T0]], [[NBITS]] ; CHECK-NEXT: [[T2:%.*]] = add <8 x i32> [[NBITS]], ; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T0]]) ; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T2]]) -; CHECK-NEXT: [[T3:%.*]] = shl <8 x i32> [[T1]], [[T2]] +; CHECK-NEXT: [[TMP1:%.*]] = shl <8 x i32> [[X]], [[T2]] +; CHECK-NEXT: [[T3:%.*]] = and <8 x i32> [[TMP1]], ; CHECK-NEXT: ret <8 x i32> [[T3]] ; %t0 = shl <8 x i32> %x, %nbits @@ -58,11 +58,11 @@ define <8 x i32> @t1_vec_splat(<8 x i32> %x, <8 x i32> %nbits) { define <8 x i32> @t1_vec_nonsplat(<8 x i32> %x, <8 x i32> %nbits) { ; CHECK-LABEL: @t1_vec_nonsplat( ; CHECK-NEXT: [[T0:%.*]] = shl <8 x i32> [[X:%.*]], [[NBITS:%.*]] -; CHECK-NEXT: [[T1:%.*]] = lshr <8 x i32> [[T0]], [[NBITS]] ; CHECK-NEXT: [[T2:%.*]] = add <8 x i32> [[NBITS]], ; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T0]]) ; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T2]]) -; CHECK-NEXT: [[T3:%.*]] = shl <8 x i32> [[T1]], [[T2]] +; CHECK-NEXT: [[TMP1:%.*]] = shl <8 x i32> [[X]], [[T2]] +; CHECK-NEXT: [[T3:%.*]] = and <8 x i32> [[TMP1]], ; CHECK-NEXT: ret <8 x i32> [[T3]] ; %t0 = shl <8 x i32> %x, %nbits