return SDValue();
}
+
+/// Fold a xor(setcc cond, val), 1 --> setcc (inverted(cond), val)
+static SDValue foldXor1SetCC(SDNode *N, SelectionDAG &DAG) {
+ if (N->getOpcode() != ISD::XOR)
+ return SDValue();
+
+ SDValue LHS = N->getOperand(0);
+ auto *RHSC = dyn_cast<ConstantSDNode>(N->getOperand(1));
+ if (!RHSC || RHSC->getZExtValue() != 1 || LHS->getOpcode() != X86ISD::SETCC)
+ return SDValue();
+
+ X86::CondCode NewCC = X86::GetOppositeBranchCondition(
+ X86::CondCode(LHS->getConstantOperandVal(0)));
+ SDLoc DL(N);
+ return getSETCC(NewCC, LHS->getOperand(1), DL, DAG);
+}
+
static SDValue combineXor(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
if (DCI.isBeforeLegalizeOps())
return SDValue();
+ if (SDValue SetCC = foldXor1SetCC(N, DAG))
+ return SetCC;
+
if (SDValue RV = foldXorTruncShiftIntoCmp(N, DAG))
return RV;
; CHECK-LABEL: saddo_not_i32:
; CHECK: ## BB#0: ## %entry
; CHECK-NEXT: addl %esi, %edi
-; CHECK-NEXT: seto %al
-; CHECK-NEXT: xorb $1, %al
+; CHECK-NEXT: setno %al
; CHECK-NEXT: retq
entry:
%t = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %v1, i32 %v2)
; CHECK-LABEL: saddo_not_i64:
; CHECK: ## BB#0: ## %entry
; CHECK-NEXT: addq %rsi, %rdi
-; CHECK-NEXT: seto %al
-; CHECK-NEXT: xorb $1, %al
+; CHECK-NEXT: setno %al
; CHECK-NEXT: retq
entry:
%t = call {i64, i1} @llvm.sadd.with.overflow.i64(i64 %v1, i64 %v2)
; CHECK-LABEL: uaddo_not_i32:
; CHECK: ## BB#0: ## %entry
; CHECK-NEXT: addl %esi, %edi
-; CHECK-NEXT: setb %al
-; CHECK-NEXT: xorb $1, %al
+; CHECK-NEXT: setae %al
; CHECK-NEXT: retq
entry:
%t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 %v2)
; CHECK-LABEL: uaddo_not_i64:
; CHECK: ## BB#0: ## %entry
; CHECK-NEXT: addq %rsi, %rdi
-; CHECK-NEXT: setb %al
-; CHECK-NEXT: xorb $1, %al
+; CHECK-NEXT: setae %al
; CHECK-NEXT: retq
entry:
%t = call {i64, i1} @llvm.uadd.with.overflow.i64(i64 %v1, i64 %v2)
; CHECK-LABEL: ssubo_not_i32:
; CHECK: ## BB#0: ## %entry
; CHECK-NEXT: cmpl %esi, %edi
-; CHECK-NEXT: seto %al
-; CHECK-NEXT: xorb $1, %al
+; CHECK-NEXT: setno %al
; CHECK-NEXT: retq
entry:
%t = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %v1, i32 %v2)
; CHECK-LABEL: ssub_not_i64:
; CHECK: ## BB#0: ## %entry
; CHECK-NEXT: cmpq %rsi, %rdi
-; CHECK-NEXT: seto %al
-; CHECK-NEXT: xorb $1, %al
+; CHECK-NEXT: setno %al
; CHECK-NEXT: retq
entry:
%t = call {i64, i1} @llvm.ssub.with.overflow.i64(i64 %v1, i64 %v2)
; CHECK-LABEL: usubo_not_i32:
; CHECK: ## BB#0: ## %entry
; CHECK-NEXT: cmpl %esi, %edi
-; CHECK-NEXT: setb %al
-; CHECK-NEXT: xorb $1, %al
+; CHECK-NEXT: setae %al
; CHECK-NEXT: retq
entry:
%t = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %v1, i32 %v2)
; CHECK-LABEL: usubo_not_i64:
; CHECK: ## BB#0: ## %entry
; CHECK-NEXT: cmpq %rsi, %rdi
-; CHECK-NEXT: setb %al
-; CHECK-NEXT: xorb $1, %al
+; CHECK-NEXT: setae %al
; CHECK-NEXT: retq
entry:
%t = call {i64, i1} @llvm.usub.with.overflow.i64(i64 %v1, i64 %v2)
; CHECK-LABEL: smulo_not_i32:
; CHECK: ## BB#0: ## %entry
; CHECK-NEXT: imull %esi, %edi
-; CHECK-NEXT: seto %al
-; CHECK-NEXT: xorb $1, %al
+; CHECK-NEXT: setno %al
; CHECK-NEXT: retq
entry:
%t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2)
; CHECK-LABEL: smulo_not_i64:
; CHECK: ## BB#0: ## %entry
; CHECK-NEXT: imulq %rsi, %rdi
-; CHECK-NEXT: seto %al
-; CHECK-NEXT: xorb $1, %al
+; CHECK-NEXT: setno %al
; CHECK-NEXT: retq
entry:
%t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2)
; CHECK: ## BB#0: ## %entry
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: mull %esi
-; CHECK-NEXT: seto %al
-; CHECK-NEXT: xorb $1, %al
+; CHECK-NEXT: setno %al
; CHECK-NEXT: retq
entry:
%t = call {i32, i1} @llvm.umul.with.overflow.i32(i32 %v1, i32 %v2)
; CHECK: ## BB#0: ## %entry
; CHECK-NEXT: movq %rdi, %rax
; CHECK-NEXT: mulq %rsi
-; CHECK-NEXT: seto %al
-; CHECK-NEXT: xorb $1, %al
+; CHECK-NEXT: setno %al
; CHECK-NEXT: retq
entry:
%t = call {i64, i1} @llvm.umul.with.overflow.i64(i64 %v1, i64 %v2)