return InsertNewInstWith(And, *I);
}
- // If the RHS is a constant, see if we can simplify it.
- // FIXME: for XOR, we prefer to force bits to 1 if they will make a -1.
- if (ShrinkDemandedConstant(I, 1, DemandedMask))
- return I;
+ // If the RHS is a constant, see if we can change it. Don't alter a -1
+ // constant because that's a canonical 'not' op, and that is better for
+ // combining, SCEV, and codegen.
+ const APInt *C;
+ if (match(I->getOperand(1), m_APInt(C)) && !C->isAllOnesValue()) {
+ if ((*C | ~DemandedMask).isAllOnesValue()) {
+ // Force bits to 1 to create a 'not' op.
+ I->setOperand(1, ConstantInt::getAllOnesValue(VTy));
+ return I;
+ }
+ // If we can't turn this into a 'not', try to shrink the constant.
+ if (ShrinkDemandedConstant(I, 1, DemandedMask))
+ return I;
+ }
// If our LHS is an 'and' and if it has one use, and if any of the bits we
// are flipping are known to be set, then the xor is just resetting those
%xor = xor i32 %and, %B
ret i32 %xor
}
+
+; PR32706 - https://bugs.llvm.org/show_bug.cgi?id=32706
+; Pin an xor constant operand to -1 if possible because 'not' is better for SCEV and codegen.
+
+define i32 @not_is_canonical(i32 %x, i32 %y) {
+; CHECK-LABEL: @not_is_canonical(
+; CHECK-NEXT: [[SUB:%.*]] = xor i32 %x, -1
+; CHECK-NEXT: [[ADD:%.*]] = add i32 [[SUB]], %y
+; CHECK-NEXT: [[MUL:%.*]] = shl i32 [[ADD]], 2
+; CHECK-NEXT: ret i32 [[MUL]]
+;
+ %sub = xor i32 %x, 1073741823
+ %add = add i32 %sub, %y
+ %mul = shl i32 %add, 2
+ ret i32 %mul
+}
+
define <2 x i64> @test4(<2 x i64> %A) {
; CHECK-LABEL: @test4(
-; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i64> %A, <i64 63, i64 63>
+; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i64> %A, <i64 -1, i64 -1>
; CHECK-NEXT: [[XOR:%.*]] = and <2 x i64> [[TMP1]], <i64 23, i64 42>
; CHECK-NEXT: ret <2 x i64> [[XOR]]
;