// (and (rotl Input, Rotate), Mask)
//
// otherwise. The output value has BitSize bits, although Input may be
-// narrower (in which case the upper bits are don't care).
+// narrower (in which case the upper bits are don't care), or wider (in which
+// case the result will be truncated as part of the operation).
struct RxSBGOperands {
RxSBGOperands(unsigned Op, SDValue N)
: Opcode(Op), BitSize(N.getValueType().getSizeInBits()),
SDValue N = RxSBG.Input;
unsigned Opcode = N.getOpcode();
switch (Opcode) {
+ case ISD::TRUNCATE: {
+ if (RxSBG.Opcode == SystemZ::RNSBG)
+ return false;
+ uint64_t BitSize = N.getValueType().getSizeInBits();
+ uint64_t Mask = allOnes(BitSize);
+ if (!refineRxSBGMask(RxSBG, Mask))
+ return false;
+ RxSBG.Input = N.getOperand(0);
+ return true;
+ }
case ISD::AND: {
if (RxSBG.Opcode == SystemZ::RNSBG)
return false;
RxSBGOperands RISBG(SystemZ::RISBG, SDValue(N, 0));
unsigned Count = 0;
while (expandRxSBG(RISBG))
- if (RISBG.Input.getOpcode() != ISD::ANY_EXTEND)
+ // The widening or narrowing is expected to be free.
+ // Counting widening or narrowing as a saved operation will result in
+ // preferring an R*SBG over a simple shift/logical instruction.
+ if (RISBG.Input.getOpcode() != ISD::ANY_EXTEND &&
+ RISBG.Input.getOpcode() != ISD::TRUNCATE)
Count += 1;
if (Count == 0)
return false;
unsigned Count[] = { 0, 0 };
for (unsigned I = 0; I < 2; ++I)
while (expandRxSBG(RxSBG[I]))
- if (RxSBG[I].Input.getOpcode() != ISD::ANY_EXTEND)
+ // The widening or narrowing is expected to be free.
+ // Counting widening or narrowing as a saved operation will result in
+ // preferring an R*SBG over a simple shift/logical instruction.
+ if (RxSBG[I].Input.getOpcode() != ISD::ANY_EXTEND &&
+ RxSBG[I].Input.getOpcode() != ISD::TRUNCATE)
Count[I] += 1;
// Do nothing if neither operand is suitable.
%ext2 = zext i8 %ext to i64
ret i64 %ext2
}
+
+; Check that we get the case where a 64-bit shift is used by a 32-bit and.
+define signext i32 @f43(i64 %x) {
+; CHECK-LABEL: f43:
+; CHECK: risbg [[REG:%r[0-5]]], %r2, 32, 189, 52
+; CHECK: lgfr %r2, [[REG]]
+ %shr3 = lshr i64 %x, 12
+ %shr3.tr = trunc i64 %shr3 to i32
+ %conv = and i32 %shr3.tr, -4
+ ret i32 %conv
+}
+
+; Check that we don't get the case where the 32-bit and mask is not contiguous
+define signext i32 @f44(i64 %x) {
+; CHECK-LABEL: f44:
+; CHECK: srlg [[REG:%r[0-5]]], %r2, 12
+ %shr4 = lshr i64 %x, 12
+ %conv = trunc i64 %shr4 to i32
+ %and = and i32 %conv, 10
+ ret i32 %and
+}
%or = or i64 %anda, %shrb
ret i64 %or
}
+
+; Check that we can get the case where a 64-bit shift feeds a 32-bit or of
+; ands with complement masks.
+define signext i32 @f9(i64 %x, i32 signext %y) {
+; CHECK-LABEL: f9:
+; CHECK: risbg [[REG:%r[0-5]]], %r2, 48, 63, 16
+; CHECK: lgfr %r2, [[REG]]
+ %shr6 = lshr i64 %x, 48
+ %conv = trunc i64 %shr6 to i32
+ %and1 = and i32 %y, -65536
+ %or = or i32 %conv, %and1
+ ret i32 %or
+}
+
+; Check that we don't get the case where a 64-bit shift feeds a 32-bit or of
+; ands with incompatible masks.
+define signext i32 @f10(i64 %x, i32 signext %y) {
+; CHECK-LABEL: f10:
+; CHECK: nilf %r3, 4278190080
+ %shr6 = lshr i64 %x, 48
+ %conv = trunc i64 %shr6 to i32
+ %and1 = and i32 %y, -16777216
+ %or = or i32 %conv, %and1
+ ret i32 %or
+}