SDValue visitADDSAT(SDNode *N);
SDValue visitSUBSAT(SDNode *N);
SDValue visitADDC(SDNode *N);
+ SDValue visitSADDO(SDNode *N);
SDValue visitUADDO(SDNode *N);
SDValue visitUADDOLike(SDValue N0, SDValue N1, SDNode *N);
SDValue visitSUBC(SDNode *N);
+ SDValue visitSSUBO(SDNode *N);
SDValue visitUSUBO(SDNode *N);
SDValue visitADDE(SDNode *N);
SDValue visitADDCARRY(SDNode *N);
case ISD::SSUBSAT:
case ISD::USUBSAT: return visitSUBSAT(N);
case ISD::ADDC: return visitADDC(N);
+ case ISD::SADDO: return visitSADDO(N);
case ISD::UADDO: return visitUADDO(N);
case ISD::SUBC: return visitSUBC(N);
+ case ISD::SSUBO: return visitSSUBO(N);
case ISD::USUBO: return visitUSUBO(N);
case ISD::ADDE: return visitADDE(N);
case ISD::ADDCARRY: return visitADDCARRY(N);
llvm_unreachable("Unsupported boolean content");
}
+// TODO: merge this with DAGCombiner::visitUADDO
+SDValue DAGCombiner::visitSADDO(SDNode *N) {
+ SDValue N0 = N->getOperand(0);
+ SDValue N1 = N->getOperand(1);
+ EVT VT = N0.getValueType();
+ EVT CarryVT = N->getValueType(1);
+ SDLoc DL(N);
+
+ // If the flag result is dead, turn this into an ADD.
+ if (!N->hasAnyUseOfValue(1))
+ return CombineTo(N, DAG.getNode(ISD::ADD, DL, VT, N0, N1),
+ DAG.getUNDEF(CarryVT));
+
+ // canonicalize constant to RHS.
+ if (DAG.isConstantIntBuildVectorOrConstantInt(N0) &&
+ !DAG.isConstantIntBuildVectorOrConstantInt(N1))
+ return DAG.getNode(ISD::SADDO, DL, N->getVTList(), N1, N0);
+
+ // fold (saddo x, 0) -> x + no carry out
+ if (isNullOrNullSplat(N1))
+ return CombineTo(N, N0, DAG.getConstant(0, DL, CarryVT));
+
+ return SDValue();
+}
+
SDValue DAGCombiner::visitUADDO(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
return SDValue();
}
+// TODO: merge this with DAGCombiner::visitUSUBO
+SDValue DAGCombiner::visitSSUBO(SDNode *N) {
+ SDValue N0 = N->getOperand(0);
+ SDValue N1 = N->getOperand(1);
+ EVT VT = N0.getValueType();
+ EVT CarryVT = N->getValueType(1);
+ SDLoc DL(N);
+
+ // If the flag result is dead, turn this into an SUB.
+ if (!N->hasAnyUseOfValue(1))
+ return CombineTo(N, DAG.getNode(ISD::SUB, DL, VT, N0, N1),
+ DAG.getUNDEF(CarryVT));
+
+ // fold (ssubo x, x) -> 0 + no borrow
+ if (N0 == N1)
+ return CombineTo(N, DAG.getConstant(0, DL, VT),
+ DAG.getConstant(0, DL, CarryVT));
+
+ // fold (ssubo x, 0) -> x + no borrow
+ if (isNullOrNullSplat(N1))
+ return CombineTo(N, N0, DAG.getConstant(0, DL, CarryVT));
+
+ return SDValue();
+}
+
SDValue DAGCombiner::visitUSUBO(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
; SSE-LABEL: combine_sadd_zero:
; SSE: # %bb.0:
; SSE-NEXT: movl %edi, %eax
-; SSE-NEXT: addl $0, %eax
-; SSE-NEXT: cmovol %esi, %eax
; SSE-NEXT: retq
;
; AVX-LABEL: combine_sadd_zero:
; AVX: # %bb.0:
; AVX-NEXT: movl %edi, %eax
-; AVX-NEXT: addl $0, %eax
-; AVX-NEXT: cmovol %esi, %eax
; AVX-NEXT: retq
%1 = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a0, i32 zeroinitializer)
%2 = extractvalue {i32, i1} %1, 0
define <4 x i32> @combine_vec_sadd_zero(<4 x i32> %a0, <4 x i32> %a1) {
; SSE-LABEL: combine_vec_sadd_zero:
; SSE: # %bb.0:
-; SSE-NEXT: movdqa %xmm0, %xmm2
-; SSE-NEXT: pxor %xmm0, %xmm0
-; SSE-NEXT: pcmpgtd %xmm2, %xmm0
-; SSE-NEXT: pcmpeqd %xmm3, %xmm3
-; SSE-NEXT: pxor %xmm3, %xmm0
-; SSE-NEXT: pcmpeqd %xmm0, %xmm3
-; SSE-NEXT: pcmpeqd %xmm0, %xmm0
-; SSE-NEXT: pandn %xmm3, %xmm0
-; SSE-NEXT: blendvps %xmm0, %xmm1, %xmm2
-; SSE-NEXT: movaps %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_sadd_zero:
; AVX: # %bb.0:
-; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; AVX-NEXT: vpcmpgtd %xmm0, %xmm2, %xmm2
-; AVX-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
-; AVX-NEXT: vpxor %xmm3, %xmm2, %xmm2
-; AVX-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm3
-; AVX-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX-NEXT: vpandn %xmm3, %xmm2, %xmm2
-; AVX-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = call {<4 x i32>, <4 x i1>} @llvm.sadd.with.overflow.v4i32(<4 x i32> %a0, <4 x i32> zeroinitializer)
%2 = extractvalue {<4 x i32>, <4 x i1>} %1, 0
; SSE-LABEL: combine_ssub_zero:
; SSE: # %bb.0:
; SSE-NEXT: movl %edi, %eax
-; SSE-NEXT: subl $0, %eax
-; SSE-NEXT: cmovol %esi, %eax
; SSE-NEXT: retq
;
; AVX-LABEL: combine_ssub_zero:
; AVX: # %bb.0:
; AVX-NEXT: movl %edi, %eax
-; AVX-NEXT: subl $0, %eax
-; AVX-NEXT: cmovol %esi, %eax
; AVX-NEXT: retq
%1 = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a0, i32 zeroinitializer)
%2 = extractvalue {i32, i1} %1, 0
define <4 x i32> @combine_vec_ssub_zero(<4 x i32> %a0, <4 x i32> %a1) {
; SSE-LABEL: combine_vec_ssub_zero:
; SSE: # %bb.0:
-; SSE-NEXT: movdqa %xmm0, %xmm2
-; SSE-NEXT: pxor %xmm3, %xmm3
-; SSE-NEXT: pcmpgtd %xmm0, %xmm3
-; SSE-NEXT: pcmpeqd %xmm4, %xmm4
-; SSE-NEXT: pxor %xmm4, %xmm3
-; SSE-NEXT: movdqa %xmm3, %xmm0
-; SSE-NEXT: pcmpeqd %xmm4, %xmm0
-; SSE-NEXT: pcmpeqd %xmm3, %xmm3
-; SSE-NEXT: pxor %xmm4, %xmm3
-; SSE-NEXT: pandn %xmm3, %xmm0
-; SSE-NEXT: blendvps %xmm0, %xmm1, %xmm2
-; SSE-NEXT: movaps %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_ssub_zero:
; AVX: # %bb.0:
-; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; AVX-NEXT: vpcmpgtd %xmm0, %xmm2, %xmm2
-; AVX-NEXT: vpcmpeqd %xmm3, %xmm3, %xmm3
-; AVX-NEXT: vpxor %xmm3, %xmm2, %xmm2
-; AVX-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm4
-; AVX-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX-NEXT: vpxor %xmm3, %xmm2, %xmm2
-; AVX-NEXT: vpandn %xmm2, %xmm4, %xmm2
-; AVX-NEXT: vblendvps %xmm2, %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = call {<4 x i32>, <4 x i1>} @llvm.ssub.with.overflow.v4i32(<4 x i32> %a0, <4 x i32> zeroinitializer)
%2 = extractvalue {<4 x i32>, <4 x i1>} %1, 0
define i32 @combine_ssub_self(i32 %a0, i32 %a1) {
; SSE-LABEL: combine_ssub_self:
; SSE: # %bb.0:
-; SSE-NEXT: movl %edi, %eax
-; SSE-NEXT: subl %edi, %eax
-; SSE-NEXT: cmovol %esi, %eax
+; SSE-NEXT: xorl %eax, %eax
; SSE-NEXT: retq
;
; AVX-LABEL: combine_ssub_self:
; AVX: # %bb.0:
-; AVX-NEXT: movl %edi, %eax
-; AVX-NEXT: subl %edi, %eax
-; AVX-NEXT: cmovol %esi, %eax
+; AVX-NEXT: xorl %eax, %eax
; AVX-NEXT: retq
%1 = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a0, i32 %a0)
%2 = extractvalue {i32, i1} %1, 0
define <4 x i32> @combine_vec_ssub_self(<4 x i32> %a0, <4 x i32> %a1) {
; SSE-LABEL: combine_vec_ssub_self:
; SSE: # %bb.0:
-; SSE-NEXT: psubd %xmm0, %xmm0
+; SSE-NEXT: xorps %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_ssub_self:
; AVX: # %bb.0:
-; AVX-NEXT: vpsubd %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = call {<4 x i32>, <4 x i1>} @llvm.ssub.with.overflow.v4i32(<4 x i32> %a0, <4 x i32> %a0)
%2 = extractvalue {<4 x i32>, <4 x i1>} %1, 0
}
; SADDO reg, imm | imm, reg
-; FIXME: DAG doesn't optimize immediates on the LHS.
define zeroext i1 @saddoi64imm1(i64 %v1, i64* %res) {
; SDAG-LABEL: saddoi64imm1:
; SDAG: ## %bb.0:
-; SDAG-NEXT: movl $2, %ecx
-; SDAG-NEXT: addq %rdi, %rcx
+; SDAG-NEXT: addq $2, %rdi
; SDAG-NEXT: seto %al
-; SDAG-NEXT: movq %rcx, (%rsi)
+; SDAG-NEXT: movq %rdi, (%rsi)
; SDAG-NEXT: retq
;
; FAST-LABEL: saddoi64imm1: