From: James Molloy Date: Mon, 13 Feb 2017 12:32:47 +0000 (+0000) Subject: [ARM] Use VCMP, not VCMPE, for floating point equality comparisons X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=9b264f7915075aee21b4f57d80ce72ab29865144;p=llvm [ARM] Use VCMP, not VCMPE, for floating point equality comparisons When generating a floating point comparison we currently unconditionally generate VCMPE. This has the sideeffect of setting the cumulative Invalid bit in FPSCR if any of the operands are QNaN. It is expected that use of a relational predicate on a QNaN value should raise Invalid. Quoting from the C standard: The relational and equality operators support the usual mathematical relationships between numeric values. For any ordered pair of numeric values exactly one of relationships the less, greater, equal and is true. Relational operators may raise the floating-point exception when argument values are NaNs. The standard doesn't explicitly state the expectation for equality operators, but the implication and obvious expectation is that equality operators should not raise Invalid on a QNaN input, as those predicates are wholly defined on unordered inputs (to return not equal). Therefore, add a new operand to ARMISD::FPCMP and FPCMPZ indicating if QNaN should raise Invalid, and pipe that through to TableGen. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@294945 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Target/ARM/ARMFastISel.cpp b/lib/Target/ARM/ARMFastISel.cpp index 3f510aa1b65..b484ce00962 100644 --- a/lib/Target/ARM/ARMFastISel.cpp +++ b/lib/Target/ARM/ARMFastISel.cpp @@ -187,7 +187,7 @@ class ARMFastISel final : public FastISel { bool isTypeLegal(Type *Ty, MVT &VT); bool isLoadTypeLegal(Type *Ty, MVT &VT); bool ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, - bool isZExt); + bool isZExt, bool isEquality); bool ARMEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr, unsigned Alignment = 0, bool isZExt = true, bool allocReg = true); @@ -1256,7 +1256,8 @@ bool ARMFastISel::SelectBranch(const Instruction *I) { if (ARMPred == ARMCC::AL) return false; // Emit the compare. - if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) + if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned(), + CI->isEquality())) return false; unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; @@ -1343,7 +1344,7 @@ bool ARMFastISel::SelectIndirectBr(const Instruction *I) { } bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, - bool isZExt) { + bool isZExt, bool isEquality) { Type *Ty = Src1Value->getType(); EVT SrcEVT = TLI.getValueType(DL, Ty, true); if (!SrcEVT.isSimple()) return false; @@ -1389,10 +1390,18 @@ bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, // TODO: Verify compares. case MVT::f32: isICmp = false; - CmpOpc = UseImm ? ARM::VCMPEZS : ARM::VCMPES; + // Equality comparisons shouldn't raise Invalid on uordered inputs. + if (isEquality) + CmpOpc = UseImm ? ARM::VCMPZS : ARM::VCMPS; + else + CmpOpc = UseImm ? ARM::VCMPEZS : ARM::VCMPES; break; case MVT::f64: isICmp = false; + // Equality comparisons shouldn't raise Invalid on uordered inputs. + if (isEquality) + CmpOpc = UseImm ? ARM::VCMPZD : ARM::VCMPD; + else CmpOpc = UseImm ? ARM::VCMPEZD : ARM::VCMPED; break; case MVT::i1: @@ -1469,7 +1478,8 @@ bool ARMFastISel::SelectCmp(const Instruction *I) { if (ARMPred == ARMCC::AL) return false; // Emit the compare. - if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) + if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned(), + CI->isEquality())) return false; // Now set a register based on the comparison. Explicitly set the predicates diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp index 6e674d96db3..f85149bb1f6 100644 --- a/lib/Target/ARM/ARMISelLowering.cpp +++ b/lib/Target/ARM/ARMISelLowering.cpp @@ -1472,22 +1472,34 @@ static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) { /// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC. static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode, - ARMCC::CondCodes &CondCode2) { + ARMCC::CondCodes &CondCode2, bool &InvalidOnQNaN) { CondCode2 = ARMCC::AL; + InvalidOnQNaN = true; switch (CC) { default: llvm_unreachable("Unknown FP condition!"); case ISD::SETEQ: - case ISD::SETOEQ: CondCode = ARMCC::EQ; break; + case ISD::SETOEQ: + CondCode = ARMCC::EQ; + InvalidOnQNaN = false; + break; case ISD::SETGT: case ISD::SETOGT: CondCode = ARMCC::GT; break; case ISD::SETGE: case ISD::SETOGE: CondCode = ARMCC::GE; break; case ISD::SETOLT: CondCode = ARMCC::MI; break; case ISD::SETOLE: CondCode = ARMCC::LS; break; - case ISD::SETONE: CondCode = ARMCC::MI; CondCode2 = ARMCC::GT; break; + case ISD::SETONE: + CondCode = ARMCC::MI; + CondCode2 = ARMCC::GT; + InvalidOnQNaN = false; + break; case ISD::SETO: CondCode = ARMCC::VC; break; case ISD::SETUO: CondCode = ARMCC::VS; break; - case ISD::SETUEQ: CondCode = ARMCC::EQ; CondCode2 = ARMCC::VS; break; + case ISD::SETUEQ: + CondCode = ARMCC::EQ; + CondCode2 = ARMCC::VS; + InvalidOnQNaN = false; + break; case ISD::SETUGT: CondCode = ARMCC::HI; break; case ISD::SETUGE: CondCode = ARMCC::PL; break; case ISD::SETLT: @@ -1495,7 +1507,10 @@ static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode, case ISD::SETLE: case ISD::SETULE: CondCode = ARMCC::LE; break; case ISD::SETNE: - case ISD::SETUNE: CondCode = ARMCC::NE; break; + case ISD::SETUNE: + CondCode = ARMCC::NE; + InvalidOnQNaN = false; + break; } } @@ -3772,13 +3787,15 @@ SDValue ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, /// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands. SDValue ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS, - SelectionDAG &DAG, const SDLoc &dl) const { + SelectionDAG &DAG, const SDLoc &dl, + bool InvalidOnQNaN) const { assert(!Subtarget->isFPOnlySP() || RHS.getValueType() != MVT::f64); SDValue Cmp; + SDValue C = DAG.getConstant(InvalidOnQNaN, dl, MVT::i32); if (!isFloatingPointZero(RHS)) - Cmp = DAG.getNode(ARMISD::CMPFP, dl, MVT::Glue, LHS, RHS); + Cmp = DAG.getNode(ARMISD::CMPFP, dl, MVT::Glue, LHS, RHS, C); else - Cmp = DAG.getNode(ARMISD::CMPFPw0, dl, MVT::Glue, LHS); + Cmp = DAG.getNode(ARMISD::CMPFPw0, dl, MVT::Glue, LHS, C); return DAG.getNode(ARMISD::FMSTAT, dl, MVT::Glue, Cmp); } @@ -4198,7 +4215,8 @@ SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { } ARMCC::CondCodes CondCode, CondCode2; - FPCCToARMCC(CC, CondCode, CondCode2); + bool InvalidOnQNaN; + FPCCToARMCC(CC, CondCode, CondCode2, InvalidOnQNaN); // Try to generate VMAXNM/VMINNM on ARMv8. if (Subtarget->hasFPARMv8() && (TrueVal.getValueType() == MVT::f32 || @@ -4217,13 +4235,13 @@ SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { } SDValue ARMcc = DAG.getConstant(CondCode, dl, MVT::i32); - SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); + SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl, InvalidOnQNaN); SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); SDValue Result = getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG); if (CondCode2 != ARMCC::AL) { SDValue ARMcc2 = DAG.getConstant(CondCode2, dl, MVT::i32); // FIXME: Needs another CMP because flag can have but one use. - SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl); + SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl, InvalidOnQNaN); Result = getCMOV(dl, VT, Result, TrueVal, ARMcc2, CCR, Cmp2, DAG); } return Result; @@ -4384,10 +4402,11 @@ SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { } ARMCC::CondCodes CondCode, CondCode2; - FPCCToARMCC(CC, CondCode, CondCode2); + bool InvalidOnQNaN; + FPCCToARMCC(CC, CondCode, CondCode2, InvalidOnQNaN); SDValue ARMcc = DAG.getConstant(CondCode, dl, MVT::i32); - SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); + SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl, InvalidOnQNaN); SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue); SDValue Ops[] = { Chain, Dest, ARMcc, CCR, Cmp }; diff --git a/lib/Target/ARM/ARMISelLowering.h b/lib/Target/ARM/ARMISelLowering.h index 84c6eb845bb..a20de3b5716 100644 --- a/lib/Target/ARM/ARMISelLowering.h +++ b/lib/Target/ARM/ARMISelLowering.h @@ -698,7 +698,7 @@ class InstrItineraryData; SDValue getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, SDValue &ARMcc, SelectionDAG &DAG, const SDLoc &dl) const; SDValue getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG, - const SDLoc &dl) const; + const SDLoc &dl, bool InvalidOnQNaN) const; SDValue duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const; SDValue OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const; diff --git a/lib/Target/ARM/ARMInstrInfo.td b/lib/Target/ARM/ARMInstrInfo.td index 08915aad551..bc90430fbc9 100644 --- a/lib/Target/ARM/ARMInstrInfo.td +++ b/lib/Target/ARM/ARMInstrInfo.td @@ -51,6 +51,8 @@ def SDT_ARMAnd : SDTypeProfile<1, 2, SDTCisVT<2, i32>]>; def SDT_ARMCmp : SDTypeProfile<0, 2, [SDTCisSameAs<0, 1>]>; +def SDT_ARMFCmp : SDTypeProfile<0, 3, [SDTCisSameAs<0, 1>, + SDTCisVT<2, i32>]>; def SDT_ARMPICAdd : SDTypeProfile<1, 2, [SDTCisSameAs<0, 1>, SDTCisPtrTy<1>, SDTCisVT<2, i32>]>; diff --git a/lib/Target/ARM/ARMInstrVFP.td b/lib/Target/ARM/ARMInstrVFP.td index 4142e8db574..794b4d2fbc8 100644 --- a/lib/Target/ARM/ARMInstrVFP.td +++ b/lib/Target/ARM/ARMInstrVFP.td @@ -11,12 +11,12 @@ // //===----------------------------------------------------------------------===// -def SDT_CMPFP0 : SDTypeProfile<0, 1, [SDTCisFP<0>]>; +def SDT_CMPFP0 : SDTypeProfile<0, 2, [SDTCisFP<0>, SDTCisVT<1, i32>]>; def SDT_VMOVDRR : SDTypeProfile<1, 2, [SDTCisVT<0, f64>, SDTCisVT<1, i32>, SDTCisSameAs<1, 2>]>; def arm_fmstat : SDNode<"ARMISD::FMSTAT", SDTNone, [SDNPInGlue, SDNPOutGlue]>; -def arm_cmpfp : SDNode<"ARMISD::CMPFP", SDT_ARMCmp, [SDNPOutGlue]>; +def arm_cmpfp : SDNode<"ARMISD::CMPFP", SDT_ARMFCmp, [SDNPOutGlue]>; def arm_cmpfp0 : SDNode<"ARMISD::CMPFPw0", SDT_CMPFP0, [SDNPOutGlue]>; def arm_fmdrr : SDNode<"ARMISD::VMOVDRR", SDT_VMOVDRR>; @@ -516,12 +516,12 @@ let Defs = [FPSCR_NZCV] in { def VCMPED : ADuI<0b11101, 0b11, 0b0100, 0b11, 0, (outs), (ins DPR:$Dd, DPR:$Dm), IIC_fpCMP64, "vcmpe", ".f64\t$Dd, $Dm", - [(arm_cmpfp DPR:$Dd, (f64 DPR:$Dm))]>; + [(arm_cmpfp DPR:$Dd, (f64 DPR:$Dm), (i32 1))]>; def VCMPES : ASuI<0b11101, 0b11, 0b0100, 0b11, 0, (outs), (ins SPR:$Sd, SPR:$Sm), IIC_fpCMP32, "vcmpe", ".f32\t$Sd, $Sm", - [(arm_cmpfp SPR:$Sd, SPR:$Sm)]> { + [(arm_cmpfp SPR:$Sd, SPR:$Sm, (i32 1))]> { // Some single precision VFP instructions may be executed on both NEON and // VFP pipelines on A8. let D = VFPNeonA8Domain; @@ -537,12 +537,12 @@ def VCMPEH : AHuI<0b11101, 0b11, 0b0100, 0b11, 0, def VCMPD : ADuI<0b11101, 0b11, 0b0100, 0b01, 0, (outs), (ins DPR:$Dd, DPR:$Dm), IIC_fpCMP64, "vcmp", ".f64\t$Dd, $Dm", - [/* For disassembly only; pattern left blank */]>; + [(arm_cmpfp DPR:$Dd, (f64 DPR:$Dm), (i32 0))]>; def VCMPS : ASuI<0b11101, 0b11, 0b0100, 0b01, 0, (outs), (ins SPR:$Sd, SPR:$Sm), IIC_fpCMP32, "vcmp", ".f32\t$Sd, $Sm", - [/* For disassembly only; pattern left blank */]> { + [(arm_cmpfp SPR:$Sd, SPR:$Sm, (i32 0))]> { // Some single precision VFP instructions may be executed on both NEON and // VFP pipelines on A8. let D = VFPNeonA8Domain; @@ -581,7 +581,7 @@ let Defs = [FPSCR_NZCV] in { def VCMPEZD : ADuI<0b11101, 0b11, 0b0101, 0b11, 0, (outs), (ins DPR:$Dd), IIC_fpCMP64, "vcmpe", ".f64\t$Dd, #0", - [(arm_cmpfp0 (f64 DPR:$Dd))]> { + [(arm_cmpfp0 (f64 DPR:$Dd), (i32 1))]> { let Inst{3-0} = 0b0000; let Inst{5} = 0; } @@ -589,7 +589,7 @@ def VCMPEZD : ADuI<0b11101, 0b11, 0b0101, 0b11, 0, def VCMPEZS : ASuI<0b11101, 0b11, 0b0101, 0b11, 0, (outs), (ins SPR:$Sd), IIC_fpCMP32, "vcmpe", ".f32\t$Sd, #0", - [(arm_cmpfp0 SPR:$Sd)]> { + [(arm_cmpfp0 SPR:$Sd, (i32 1))]> { let Inst{3-0} = 0b0000; let Inst{5} = 0; @@ -610,7 +610,7 @@ def VCMPEZH : AHuI<0b11101, 0b11, 0b0101, 0b11, 0, def VCMPZD : ADuI<0b11101, 0b11, 0b0101, 0b01, 0, (outs), (ins DPR:$Dd), IIC_fpCMP64, "vcmp", ".f64\t$Dd, #0", - [/* For disassembly only; pattern left blank */]> { + [(arm_cmpfp0 (f64 DPR:$Dd), (i32 0))]> { let Inst{3-0} = 0b0000; let Inst{5} = 0; } @@ -618,7 +618,7 @@ def VCMPZD : ADuI<0b11101, 0b11, 0b0101, 0b01, 0, def VCMPZS : ASuI<0b11101, 0b11, 0b0101, 0b01, 0, (outs), (ins SPR:$Sd), IIC_fpCMP32, "vcmp", ".f32\t$Sd, #0", - [/* For disassembly only; pattern left blank */]> { + [(arm_cmpfp0 SPR:$Sd, (i32 0))]> { let Inst{3-0} = 0b0000; let Inst{5} = 0; diff --git a/test/CodeGen/ARM/fast-isel-align.ll b/test/CodeGen/ARM/fast-isel-align.ll index 701884e926a..71cd73a4a25 100644 --- a/test/CodeGen/ARM/fast-isel-align.ll +++ b/test/CodeGen/ARM/fast-isel-align.ll @@ -72,10 +72,10 @@ entry: %4 = fcmp une float %3, 0.000000e+00 ; ARM: ldr r[[R:[0-9]+]], [r0, #2] ; ARM: vmov s0, r[[R]] -; ARM: vcmpe.f32 s0, #0 +; ARM: vcmp.f32 s0, #0 ; THUMB: ldr.w r[[R:[0-9]+]], [r0, #2] ; THUMB: vmov s0, r[[R]] -; THUMB: vcmpe.f32 s0, #0 +; THUMB: vcmp.f32 s0, #0 ret i1 %4 } diff --git a/test/CodeGen/ARM/fast-isel-cmp-imm.ll b/test/CodeGen/ARM/fast-isel-cmp-imm.ll index a9d7e458063..543b6c285f3 100644 --- a/test/CodeGen/ARM/fast-isel-cmp-imm.ll +++ b/test/CodeGen/ARM/fast-isel-cmp-imm.ll @@ -7,8 +7,8 @@ entry: ; ARM: t1a ; THUMB: t1a %cmp = fcmp oeq float %a, 0.000000e+00 -; ARM: vcmpe.f32 s{{[0-9]+}}, #0 -; THUMB: vcmpe.f32 s{{[0-9]+}}, #0 +; ARM: vcmp.f32 s{{[0-9]+}}, #0 +; THUMB: vcmp.f32 s{{[0-9]+}}, #0 br i1 %cmp, label %if.then, label %if.end if.then: ; preds = %entry @@ -28,9 +28,9 @@ entry: ; THUMB: t1b %cmp = fcmp oeq float %a, -0.000000e+00 ; ARM: vldr -; ARM: vcmpe.f32 s{{[0-9]+}}, s{{[0-9]+}} +; ARM: vcmp.f32 s{{[0-9]+}}, s{{[0-9]+}} ; THUMB: vldr -; THUMB: vcmpe.f32 s{{[0-9]+}}, s{{[0-9]+}} +; THUMB: vcmp.f32 s{{[0-9]+}}, s{{[0-9]+}} br i1 %cmp, label %if.then, label %if.end if.then: ; preds = %entry @@ -46,8 +46,8 @@ entry: ; ARM: t2a ; THUMB: t2a %cmp = fcmp oeq double %a, 0.000000e+00 -; ARM: vcmpe.f64 d{{[0-9]+}}, #0 -; THUMB: vcmpe.f64 d{{[0-9]+}}, #0 +; ARM: vcmp.f64 d{{[0-9]+}}, #0 +; THUMB: vcmp.f64 d{{[0-9]+}}, #0 br i1 %cmp, label %if.then, label %if.end if.then: ; preds = %entry @@ -65,9 +65,9 @@ entry: ; THUMB: t2b %cmp = fcmp oeq double %a, -0.000000e+00 ; ARM: vldr -; ARM: vcmpe.f64 d{{[0-9]+}}, d{{[0-9]+}} +; ARM: vcmp.f64 d{{[0-9]+}}, d{{[0-9]+}} ; THUMB: vldr -; THUMB: vcmpe.f64 d{{[0-9]+}}, d{{[0-9]+}} +; THUMB: vcmp.f64 d{{[0-9]+}}, d{{[0-9]+}} br i1 %cmp, label %if.then, label %if.end if.then: ; preds = %entry diff --git a/test/CodeGen/ARM/fp16-promote.ll b/test/CodeGen/ARM/fp16-promote.ll index c9dafa8dfff..2f7dff70b9b 100644 --- a/test/CodeGen/ARM/fp16-promote.ll +++ b/test/CodeGen/ARM/fp16-promote.ll @@ -161,14 +161,14 @@ define void @test_select(half* %p, half* %q, i1 zeroext %c) #0 { ret void } -; Test only two variants of fcmp. These get translated to f32 vcmpe +; Test only two variants of fcmp. These get translated to f32 vcmp ; instructions anyway. ; CHECK-ALL-LABEL: test_fcmp_une: ; CHECK-FP16: vcvtb.f32.f16 ; CHECK-FP16: vcvtb.f32.f16 ; CHECK-LIBCALL: bl __aeabi_h2f ; CHECK-LIBCALL: bl __aeabi_h2f -; CHECK-VFP: vcmpe.f32 +; CHECK-VFP: vcmp.f32 ; CHECK-NOVFP: bl __aeabi_fcmpeq ; CHECK-FP16: vmrs APSR_nzcv, fpscr ; CHECK-ALL: movw{{ne|eq}} @@ -184,7 +184,7 @@ define i1 @test_fcmp_une(half* %p, half* %q) #0 { ; CHECK-FP16: vcvtb.f32.f16 ; CHECK-LIBCALL: bl __aeabi_h2f ; CHECK-LIBCALL: bl __aeabi_h2f -; CHECK-VFP: vcmpe.f32 +; CHECK-VFP: vcmp.f32 ; CHECK-NOVFP: bl __aeabi_fcmpeq ; CHECK-FP16: vmrs APSR_nzcv, fpscr ; CHECK-LIBCALL: movw{{ne|eq}} diff --git a/test/CodeGen/ARM/fpcmp-opt.ll b/test/CodeGen/ARM/fpcmp-opt.ll index 45bb6d2f702..ae2b57a83f0 100644 --- a/test/CodeGen/ARM/fpcmp-opt.ll +++ b/test/CodeGen/ARM/fpcmp-opt.ll @@ -10,7 +10,7 @@ entry: ; CHECK-LABEL: t1: ; CHECK: vldr [[S0:s[0-9]+]], ; CHECK: vldr [[S1:s[0-9]+]], -; CHECK: vcmpe.f32 [[S1]], [[S0]] +; CHECK: vcmp.f32 [[S1]], [[S0]] ; CHECK: vmrs APSR_nzcv, fpscr ; CHECK: beq %0 = load float, float* %a @@ -38,7 +38,7 @@ entry: ; CHECK: bfc [[REG2]], #31, #1 ; CHECK: cmp [[REG1]], #0 ; CHECK: cmpeq [[REG2]], #0 -; CHECK-NOT: vcmpe.f32 +; CHECK-NOT: vcmp.f32 ; CHECK-NOT: vmrs ; CHECK: bne %0 = load double, double* %a @@ -61,7 +61,7 @@ entry: ; CHECK: ldr [[REG3:(r[0-9]+)]], [r0] ; CHECK: mvn [[REG4:(r[0-9]+)]], #-2147483648 ; CHECK: tst [[REG3]], [[REG4]] -; CHECK-NOT: vcmpe.f32 +; CHECK-NOT: vcmp.f32 ; CHECK-NOT: vmrs ; CHECK: bne %0 = load float, float* %a diff --git a/test/CodeGen/ARM/fpcmp.ll b/test/CodeGen/ARM/fpcmp.ll index e3ffd45a396..67326e00016 100644 --- a/test/CodeGen/ARM/fpcmp.ll +++ b/test/CodeGen/ARM/fpcmp.ll @@ -12,7 +12,7 @@ entry: define i32 @f2(float %a) { ;CHECK-LABEL: f2: -;CHECK: vcmpe.f32 +;CHECK: vcmp.f32 ;CHECK: moveq entry: %tmp = fcmp oeq float %a, 1.000000e+00 ; [#uses=1] @@ -52,7 +52,7 @@ entry: define i32 @f6(float %a) { ;CHECK-LABEL: f6: -;CHECK: vcmpe.f32 +;CHECK: vcmp.f32 ;CHECK: movne entry: %tmp = fcmp une float %a, 1.000000e+00 ; [#uses=1] diff --git a/test/CodeGen/ARM/fpcmp_ueq.ll b/test/CodeGen/ARM/fpcmp_ueq.ll index c1696c9be1b..698c7506cc5 100644 --- a/test/CodeGen/ARM/fpcmp_ueq.ll +++ b/test/CodeGen/ARM/fpcmp_ueq.ll @@ -17,7 +17,7 @@ entry: ; CHECK-ARMv4: moveq r0, #42 ; CHECK-ARMv7-LABEL: f7: -; CHECK-ARMv7: vcmpe.f32 +; CHECK-ARMv7: vcmp.f32 ; CHECK-ARMv7: vmrs APSR_nzcv, fpscr ; CHECK-ARMv7: movweq ; CHECK-ARMv7-NOT: vmrs diff --git a/test/CodeGen/ARM/vsel.ll b/test/CodeGen/ARM/vsel.ll index 746b1b000ef..daea41399b4 100644 --- a/test/CodeGen/ARM/vsel.ll +++ b/test/CodeGen/ARM/vsel.ll @@ -132,7 +132,7 @@ define void @test_vsel32oeq(float %lhs32, float %rhs32, float %a, float %b) { %tst1 = fcmp oeq float %lhs32, %rhs32 %val1 = select i1 %tst1, float %a, float %b store float %val1, float* @varfloat -; CHECK: vcmpe.f32 s0, s1 +; CHECK: vcmp.f32 s0, s1 ; CHECK: vseleq.f32 s0, s2, s3 ret void } @@ -141,7 +141,7 @@ define void @test_vsel64oeq(float %lhs32, float %rhs32, double %a, double %b) { %tst1 = fcmp oeq float %lhs32, %rhs32 %val1 = select i1 %tst1, double %a, double %b store double %val1, double* @vardouble -; CHECK: vcmpe.f32 s0, s1 +; CHECK: vcmp.f32 s0, s1 ; CHECK: vseleq.f64 d16, d1, d2 ret void } @@ -276,7 +276,7 @@ define void @test_vsel32une(float %lhs32, float %rhs32, float %a, float %b) { %tst1 = fcmp une float %lhs32, %rhs32 %val1 = select i1 %tst1, float %a, float %b store float %val1, float* @varfloat -; CHECK: vcmpe.f32 s0, s1 +; CHECK: vcmp.f32 s0, s1 ; CHECK: vseleq.f32 s0, s3, s2 ret void } @@ -285,7 +285,7 @@ define void @test_vsel64une(float %lhs32, float %rhs32, double %a, double %b) { %tst1 = fcmp une float %lhs32, %rhs32 %val1 = select i1 %tst1, double %a, double %b store double %val1, double* @vardouble -; CHECK: vcmpe.f32 s0, s1 +; CHECK: vcmp.f32 s0, s1 ; CHECK: vseleq.f64 d16, d2, d1 ret void } diff --git a/test/CodeGen/Thumb2/float-cmp.ll b/test/CodeGen/Thumb2/float-cmp.ll index 77b0999337c..834812cddd6 100644 --- a/test/CodeGen/Thumb2/float-cmp.ll +++ b/test/CodeGen/Thumb2/float-cmp.ll @@ -15,7 +15,7 @@ define i1 @cmp_f_false(float %a, float %b) { define i1 @cmp_f_oeq(float %a, float %b) { ; CHECK-LABEL: cmp_f_oeq: ; NONE: bl __aeabi_fcmpeq -; HARD: vcmpe.f32 +; HARD: vcmp.f32 ; HARD: moveq r0, #1 %1 = fcmp oeq float %a, %b ret i1 %1 @@ -56,7 +56,7 @@ define i1 @cmp_f_one(float %a, float %b) { ; CHECK-LABEL: cmp_f_one: ; NONE: bl __aeabi_fcmpgt ; NONE: bl __aeabi_fcmplt -; HARD: vcmpe.f32 +; HARD: vcmp.f32 ; HARD: movmi r0, #1 ; HARD: movgt r0, #1 %1 = fcmp one float %a, %b @@ -73,7 +73,7 @@ define i1 @cmp_f_ord(float %a, float %b) { ; CHECK-LABEL: cmp_f_ueq: ; NONE: bl __aeabi_fcmpeq ; NONE: bl __aeabi_fcmpun -; HARD: vcmpe.f32 +; HARD: vcmp.f32 ; HARD: moveq r0, #1 ; HARD: movvs r0, #1 %1 = fcmp ueq float %a, %b @@ -122,7 +122,7 @@ define i1 @cmp_f_ule(float %a, float %b) { define i1 @cmp_f_une(float %a, float %b) { ; CHECK-LABEL: cmp_f_une: ; NONE: bl __aeabi_fcmpeq -; HARD: vcmpe.f32 +; HARD: vcmp.f32 ; HARD: movne r0, #1 %1 = fcmp une float %a, %b ret i1 %1 @@ -154,7 +154,7 @@ define i1 @cmp_d_oeq(double %a, double %b) { ; CHECK-LABEL: cmp_d_oeq: ; NONE: bl __aeabi_dcmpeq ; SP: bl __aeabi_dcmpeq -; DP: vcmpe.f64 +; DP: vcmp.f64 ; DP: moveq r0, #1 %1 = fcmp oeq double %a, %b ret i1 %1 @@ -201,7 +201,7 @@ define i1 @cmp_d_one(double %a, double %b) { ; NONE: bl __aeabi_dcmplt ; SP: bl __aeabi_dcmpgt ; SP: bl __aeabi_dcmplt -; DP: vcmpe.f64 +; DP: vcmp.f64 ; DP: movmi r0, #1 ; DP: movgt r0, #1 %1 = fcmp one double %a, %b @@ -259,7 +259,7 @@ define i1 @cmp_d_ueq(double %a, double %b) { ; NONE: bl __aeabi_dcmpun ; SP: bl __aeabi_dcmpeq ; SP: bl __aeabi_dcmpun -; DP: vcmpe.f64 +; DP: vcmp.f64 ; DP: moveq r0, #1 ; DP: movvs r0, #1 %1 = fcmp ueq double %a, %b @@ -290,7 +290,7 @@ define i1 @cmp_d_une(double %a, double %b) { ; CHECK-LABEL: cmp_d_une: ; NONE: bl __aeabi_dcmpeq ; SP: bl __aeabi_dcmpeq -; DP: vcmpe.f64 +; DP: vcmp.f64 ; DP: movne r0, #1 %1 = fcmp une double %a, %b ret i1 %1