From 2a0eea05ecf213f2cfffb74c4245abe6963d2957 Mon Sep 17 00:00:00 2001 From: Kristof Beyls Date: Tue, 8 Oct 2019 08:25:42 +0000 Subject: [PATCH] [ARM] Generate vcmp instead of vcmpe Based on the discussion in http://lists.llvm.org/pipermail/llvm-dev/2019-October/135574.html, the conclusion was reached that the ARM backend should produce vcmp instead of vcmpe instructions by default, i.e. not be producing an Invalid Operation exception when either arguments in a floating point compare are quiet NaNs. In the future, after constrained floating point intrinsics for floating point compare have been introduced, vcmpe instructions probably should be produced for those intrinsics - depending on the exact semantics they'll be defined to have. This patch logically consists of the following parts: - Revert http://llvm.org/viewvc/llvm-project?rev=294945&view=rev and http://llvm.org/viewvc/llvm-project?rev=294968&view=rev, which implemented fine-tuning for when to produce vcmpe (i.e. not do it for equality comparisons). The complexity introduced by those patches isn't needed anymore if we just always produce vcmp instead. Maybe these patches need to be reintroduced again once support is needed to map potential LLVM-IR constrained floating point compare intrinsics to the ARM instruction set. - Simply select vcmp, instead of vcmpe, see simple changes in lib/Target/ARM/ARMInstrVFP.td - Adapt lots of tests that tested for vcmpe (instead of vcmp). For all of these test, the intent of what is tested for isn't related to whether the vcmp should produce an Invalid Operation exception or not. Fixes PR43374. Differential Revision: https://reviews.llvm.org/D68463 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@374025 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/ARM/ARMFastISel.cpp | 22 +- lib/Target/ARM/ARMISelLowering.cpp | 51 ++--- lib/Target/ARM/ARMISelLowering.h | 2 +- lib/Target/ARM/ARMInstrInfo.td | 2 - lib/Target/ARM/ARMInstrVFP.td | 28 +-- test/CodeGen/ARM/2009-07-18-RewriterBug.ll | 26 +-- test/CodeGen/ARM/arm-shrink-wrapping.ll | 4 +- test/CodeGen/ARM/compare-call.ll | 2 +- test/CodeGen/ARM/fcmp-xo.ll | 12 +- test/CodeGen/ARM/float-helpers.s | 40 ++-- test/CodeGen/ARM/fp16-instructions.ll | 64 +++--- test/CodeGen/ARM/fp16-promote.ll | 2 +- test/CodeGen/ARM/fpcmp.ll | 10 +- test/CodeGen/ARM/ifcvt11.ll | 6 +- test/CodeGen/ARM/swifterror.ll | 2 +- test/CodeGen/ARM/vcmp-crash.ll | 11 - test/CodeGen/ARM/vfp.ll | 2 +- test/CodeGen/ARM/vsel-fp16.ll | 40 ++-- test/CodeGen/ARM/vsel.ll | 80 +++---- test/CodeGen/Thumb2/float-cmp.ll | 40 ++-- test/CodeGen/Thumb2/mve-vcmpf.ll | 240 ++++++++++----------- test/CodeGen/Thumb2/mve-vcmpfr.ll | 240 ++++++++++----------- test/CodeGen/Thumb2/mve-vcmpfz.ll | 240 ++++++++++----------- 23 files changed, 561 insertions(+), 605 deletions(-) delete mode 100644 test/CodeGen/ARM/vcmp-crash.ll diff --git a/lib/Target/ARM/ARMFastISel.cpp b/lib/Target/ARM/ARMFastISel.cpp index 2fd11426c5a..3e3745f129c 100644 --- a/lib/Target/ARM/ARMFastISel.cpp +++ b/lib/Target/ARM/ARMFastISel.cpp @@ -191,7 +191,7 @@ class ARMFastISel final : public FastISel { bool isTypeLegal(Type *Ty, MVT &VT); bool isLoadTypeLegal(Type *Ty, MVT &VT); bool ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, - bool isZExt, bool isEquality); + bool isZExt); bool ARMEmitLoad(MVT VT, Register &ResultReg, Address &Addr, unsigned Alignment = 0, bool isZExt = true, bool allocReg = true); @@ -1259,8 +1259,7 @@ bool ARMFastISel::SelectBranch(const Instruction *I) { if (ARMPred == ARMCC::AL) return false; // Emit the compare. - if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned(), - CI->isEquality())) + if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) return false; unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc; @@ -1349,7 +1348,7 @@ bool ARMFastISel::SelectIndirectBr(const Instruction *I) { } bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, - bool isZExt, bool isEquality) { + bool isZExt) { Type *Ty = Src1Value->getType(); EVT SrcEVT = TLI.getValueType(DL, Ty, true); if (!SrcEVT.isSimple()) return false; @@ -1397,19 +1396,11 @@ bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, // TODO: Verify compares. case MVT::f32: isICmp = false; - // Equality comparisons shouldn't raise Invalid on uordered inputs. - if (isEquality) - CmpOpc = UseImm ? ARM::VCMPZS : ARM::VCMPS; - else - CmpOpc = UseImm ? ARM::VCMPEZS : ARM::VCMPES; + CmpOpc = UseImm ? ARM::VCMPZS : ARM::VCMPS; break; case MVT::f64: isICmp = false; - // Equality comparisons shouldn't raise Invalid on uordered inputs. - if (isEquality) - CmpOpc = UseImm ? ARM::VCMPZD : ARM::VCMPD; - else - CmpOpc = UseImm ? ARM::VCMPEZD : ARM::VCMPED; + CmpOpc = UseImm ? ARM::VCMPZD : ARM::VCMPD; break; case MVT::i1: case MVT::i8: @@ -1485,8 +1476,7 @@ bool ARMFastISel::SelectCmp(const Instruction *I) { if (ARMPred == ARMCC::AL) return false; // Emit the compare. - if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned(), - CI->isEquality())) + if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned())) return false; // Now set a register based on the comparison. Explicitly set the predicates diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp index 45bf6763382..ec553708798 100644 --- a/lib/Target/ARM/ARMISelLowering.cpp +++ b/lib/Target/ARM/ARMISelLowering.cpp @@ -1793,34 +1793,22 @@ static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) { /// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC. static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode, - ARMCC::CondCodes &CondCode2, bool &InvalidOnQNaN) { + ARMCC::CondCodes &CondCode2) { CondCode2 = ARMCC::AL; - InvalidOnQNaN = true; switch (CC) { default: llvm_unreachable("Unknown FP condition!"); case ISD::SETEQ: - case ISD::SETOEQ: - CondCode = ARMCC::EQ; - InvalidOnQNaN = false; - break; + case ISD::SETOEQ: CondCode = ARMCC::EQ; break; case ISD::SETGT: case ISD::SETOGT: CondCode = ARMCC::GT; break; case ISD::SETGE: case ISD::SETOGE: CondCode = ARMCC::GE; break; case ISD::SETOLT: CondCode = ARMCC::MI; break; case ISD::SETOLE: CondCode = ARMCC::LS; break; - case ISD::SETONE: - CondCode = ARMCC::MI; - CondCode2 = ARMCC::GT; - InvalidOnQNaN = false; - break; + case ISD::SETONE: CondCode = ARMCC::MI; CondCode2 = ARMCC::GT; break; case ISD::SETO: CondCode = ARMCC::VC; break; case ISD::SETUO: CondCode = ARMCC::VS; break; - case ISD::SETUEQ: - CondCode = ARMCC::EQ; - CondCode2 = ARMCC::VS; - InvalidOnQNaN = false; - break; + case ISD::SETUEQ: CondCode = ARMCC::EQ; CondCode2 = ARMCC::VS; break; case ISD::SETUGT: CondCode = ARMCC::HI; break; case ISD::SETUGE: CondCode = ARMCC::PL; break; case ISD::SETLT: @@ -1828,10 +1816,7 @@ static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode, case ISD::SETLE: case ISD::SETULE: CondCode = ARMCC::LE; break; case ISD::SETNE: - case ISD::SETUNE: - CondCode = ARMCC::NE; - InvalidOnQNaN = false; - break; + case ISD::SETUNE: CondCode = ARMCC::NE; break; } } @@ -4259,15 +4244,13 @@ SDValue ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, /// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands. SDValue ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS, - SelectionDAG &DAG, const SDLoc &dl, - bool InvalidOnQNaN) const { + SelectionDAG &DAG, const SDLoc &dl) const { assert(Subtarget->hasFP64() || RHS.getValueType() != MVT::f64); SDValue Cmp; - SDValue C = DAG.getConstant(InvalidOnQNaN, dl, MVT::i32); if (!isFloatingPointZero(RHS)) - Cmp = DAG.getNode(ARMISD::CMPFP, dl, MVT::Glue, LHS, RHS, C); + Cmp = DAG.getNode(ARMISD::CMPFP, dl, MVT::Glue, LHS, RHS); else - Cmp = DAG.getNode(ARMISD::CMPFPw0, dl, MVT::Glue, LHS, C); + Cmp = DAG.getNode(ARMISD::CMPFPw0, dl, MVT::Glue, LHS); return DAG.getNode(ARMISD::FMSTAT, dl, MVT::Glue, Cmp); } @@ -4284,12 +4267,10 @@ ARMTargetLowering::duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const { Cmp = Cmp.getOperand(0); Opc = Cmp.getOpcode(); if (Opc == ARMISD::CMPFP) - Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0), - Cmp.getOperand(1), Cmp.getOperand(2)); + Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1)); else { assert(Opc == ARMISD::CMPFPw0 && "unexpected operand of FMSTAT"); - Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0), - Cmp.getOperand(1)); + Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0)); } return DAG.getNode(ARMISD::FMSTAT, DL, MVT::Glue, Cmp); } @@ -4929,8 +4910,7 @@ SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { } ARMCC::CondCodes CondCode, CondCode2; - bool InvalidOnQNaN; - FPCCToARMCC(CC, CondCode, CondCode2, InvalidOnQNaN); + FPCCToARMCC(CC, CondCode, CondCode2); // Normalize the fp compare. If RHS is zero we prefer to keep it there so we // match CMPFPw0 instead of CMPFP, though we don't do this for f16 because we @@ -4955,13 +4935,13 @@ SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { } SDValue ARMcc = DAG.getConstant(CondCode, dl, MVT::i32); - SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl, InvalidOnQNaN); + SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); SDValue Result = getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG); if (CondCode2 != ARMCC::AL) { SDValue ARMcc2 = DAG.getConstant(CondCode2, dl, MVT::i32); // FIXME: Needs another CMP because flag can have but one use. - SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl, InvalidOnQNaN); + SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl); Result = getCMOV(dl, VT, Result, TrueVal, ARMcc2, CCR, Cmp2, DAG); } return Result; @@ -5188,11 +5168,10 @@ SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { } ARMCC::CondCodes CondCode, CondCode2; - bool InvalidOnQNaN; - FPCCToARMCC(CC, CondCode, CondCode2, InvalidOnQNaN); + FPCCToARMCC(CC, CondCode, CondCode2); SDValue ARMcc = DAG.getConstant(CondCode, dl, MVT::i32); - SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl, InvalidOnQNaN); + SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl); SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue); SDValue Ops[] = { Chain, Dest, ARMcc, CCR, Cmp }; diff --git a/lib/Target/ARM/ARMISelLowering.h b/lib/Target/ARM/ARMISelLowering.h index b8ce4d65f75..a89ef250c0e 100644 --- a/lib/Target/ARM/ARMISelLowering.h +++ b/lib/Target/ARM/ARMISelLowering.h @@ -818,7 +818,7 @@ class VectorType; SDValue getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, SDValue &ARMcc, SelectionDAG &DAG, const SDLoc &dl) const; SDValue getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG, - const SDLoc &dl, bool InvalidOnQNaN) const; + const SDLoc &dl) const; SDValue duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const; SDValue OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const; diff --git a/lib/Target/ARM/ARMInstrInfo.td b/lib/Target/ARM/ARMInstrInfo.td index e25260d8b47..f75343675da 100644 --- a/lib/Target/ARM/ARMInstrInfo.td +++ b/lib/Target/ARM/ARMInstrInfo.td @@ -51,8 +51,6 @@ def SDT_ARMAnd : SDTypeProfile<1, 2, SDTCisVT<2, i32>]>; def SDT_ARMCmp : SDTypeProfile<0, 2, [SDTCisSameAs<0, 1>]>; -def SDT_ARMFCmp : SDTypeProfile<0, 3, [SDTCisSameAs<0, 1>, - SDTCisVT<2, i32>]>; def SDT_ARMPICAdd : SDTypeProfile<1, 2, [SDTCisSameAs<0, 1>, SDTCisPtrTy<1>, SDTCisVT<2, i32>]>; diff --git a/lib/Target/ARM/ARMInstrVFP.td b/lib/Target/ARM/ARMInstrVFP.td index d3380ab7cef..fdd961bfbb2 100644 --- a/lib/Target/ARM/ARMInstrVFP.td +++ b/lib/Target/ARM/ARMInstrVFP.td @@ -10,7 +10,7 @@ // //===----------------------------------------------------------------------===// -def SDT_CMPFP0 : SDTypeProfile<0, 2, [SDTCisFP<0>, SDTCisVT<1, i32>]>; +def SDT_CMPFP0 : SDTypeProfile<0, 1, [SDTCisFP<0>]>; def SDT_VMOVDRR : SDTypeProfile<1, 2, [SDTCisVT<0, f64>, SDTCisVT<1, i32>, SDTCisSameAs<1, 2>]>; def SDT_VMOVRRD : SDTypeProfile<2, 1, [SDTCisVT<0, i32>, SDTCisSameAs<0, 1>, @@ -19,7 +19,7 @@ def SDT_VMOVRRD : SDTypeProfile<2, 1, [SDTCisVT<0, i32>, SDTCisSameAs<0, 1>, def SDT_VMOVSR : SDTypeProfile<1, 1, [SDTCisVT<0, f32>, SDTCisVT<1, i32>]>; def arm_fmstat : SDNode<"ARMISD::FMSTAT", SDTNone, [SDNPInGlue, SDNPOutGlue]>; -def arm_cmpfp : SDNode<"ARMISD::CMPFP", SDT_ARMFCmp, [SDNPOutGlue]>; +def arm_cmpfp : SDNode<"ARMISD::CMPFP", SDT_ARMCmp, [SDNPOutGlue]>; def arm_cmpfp0 : SDNode<"ARMISD::CMPFPw0", SDT_CMPFP0, [SDNPOutGlue]>; def arm_fmdrr : SDNode<"ARMISD::VMOVDRR", SDT_VMOVDRR>; def arm_fmrrd : SDNode<"ARMISD::VMOVRRD", SDT_VMOVRRD>; @@ -548,12 +548,12 @@ let Defs = [FPSCR_NZCV] in { def VCMPED : ADuI<0b11101, 0b11, 0b0100, 0b11, 0, (outs), (ins DPR:$Dd, DPR:$Dm), IIC_fpCMP64, "vcmpe", ".f64\t$Dd, $Dm", - [(arm_cmpfp DPR:$Dd, (f64 DPR:$Dm), (i32 1))]>; + [/* For disassembly only; pattern left blank */]>; def VCMPES : ASuI<0b11101, 0b11, 0b0100, 0b11, 0, (outs), (ins SPR:$Sd, SPR:$Sm), IIC_fpCMP32, "vcmpe", ".f32\t$Sd, $Sm", - [(arm_cmpfp SPR:$Sd, SPR:$Sm, (i32 1))]> { + [/* For disassembly only; pattern left blank */]> { // Some single precision VFP instructions may be executed on both NEON and // VFP pipelines on A8. let D = VFPNeonA8Domain; @@ -562,17 +562,17 @@ def VCMPES : ASuI<0b11101, 0b11, 0b0100, 0b11, 0, def VCMPEH : AHuI<0b11101, 0b11, 0b0100, 0b11, 0, (outs), (ins HPR:$Sd, HPR:$Sm), IIC_fpCMP16, "vcmpe", ".f16\t$Sd, $Sm", - [(arm_cmpfp HPR:$Sd, HPR:$Sm, (i32 1))]>; + [/* For disassembly only; pattern left blank */]>; def VCMPD : ADuI<0b11101, 0b11, 0b0100, 0b01, 0, (outs), (ins DPR:$Dd, DPR:$Dm), IIC_fpCMP64, "vcmp", ".f64\t$Dd, $Dm", - [(arm_cmpfp DPR:$Dd, (f64 DPR:$Dm), (i32 0))]>; + [(arm_cmpfp DPR:$Dd, (f64 DPR:$Dm))]>; def VCMPS : ASuI<0b11101, 0b11, 0b0100, 0b01, 0, (outs), (ins SPR:$Sd, SPR:$Sm), IIC_fpCMP32, "vcmp", ".f32\t$Sd, $Sm", - [(arm_cmpfp SPR:$Sd, SPR:$Sm, (i32 0))]> { + [(arm_cmpfp SPR:$Sd, SPR:$Sm)]> { // Some single precision VFP instructions may be executed on both NEON and // VFP pipelines on A8. let D = VFPNeonA8Domain; @@ -581,7 +581,7 @@ def VCMPS : ASuI<0b11101, 0b11, 0b0100, 0b01, 0, def VCMPH : AHuI<0b11101, 0b11, 0b0100, 0b01, 0, (outs), (ins HPR:$Sd, HPR:$Sm), IIC_fpCMP16, "vcmp", ".f16\t$Sd, $Sm", - [(arm_cmpfp HPR:$Sd, HPR:$Sm, (i32 0))]>; + [(arm_cmpfp HPR:$Sd, HPR:$Sm)]>; } // Defs = [FPSCR_NZCV] //===----------------------------------------------------------------------===// @@ -611,7 +611,7 @@ let Defs = [FPSCR_NZCV] in { def VCMPEZD : ADuI<0b11101, 0b11, 0b0101, 0b11, 0, (outs), (ins DPR:$Dd), IIC_fpCMP64, "vcmpe", ".f64\t$Dd, #0", - [(arm_cmpfp0 (f64 DPR:$Dd), (i32 1))]> { + [/* For disassembly only; pattern left blank */]> { let Inst{3-0} = 0b0000; let Inst{5} = 0; } @@ -619,7 +619,7 @@ def VCMPEZD : ADuI<0b11101, 0b11, 0b0101, 0b11, 0, def VCMPEZS : ASuI<0b11101, 0b11, 0b0101, 0b11, 0, (outs), (ins SPR:$Sd), IIC_fpCMP32, "vcmpe", ".f32\t$Sd, #0", - [(arm_cmpfp0 SPR:$Sd, (i32 1))]> { + [/* For disassembly only; pattern left blank */]> { let Inst{3-0} = 0b0000; let Inst{5} = 0; @@ -631,7 +631,7 @@ def VCMPEZS : ASuI<0b11101, 0b11, 0b0101, 0b11, 0, def VCMPEZH : AHuI<0b11101, 0b11, 0b0101, 0b11, 0, (outs), (ins HPR:$Sd), IIC_fpCMP16, "vcmpe", ".f16\t$Sd, #0", - [(arm_cmpfp0 HPR:$Sd, (i32 1))]> { + [/* For disassembly only; pattern left blank */]> { let Inst{3-0} = 0b0000; let Inst{5} = 0; } @@ -639,7 +639,7 @@ def VCMPEZH : AHuI<0b11101, 0b11, 0b0101, 0b11, 0, def VCMPZD : ADuI<0b11101, 0b11, 0b0101, 0b01, 0, (outs), (ins DPR:$Dd), IIC_fpCMP64, "vcmp", ".f64\t$Dd, #0", - [(arm_cmpfp0 (f64 DPR:$Dd), (i32 0))]> { + [(arm_cmpfp0 (f64 DPR:$Dd))]> { let Inst{3-0} = 0b0000; let Inst{5} = 0; } @@ -647,7 +647,7 @@ def VCMPZD : ADuI<0b11101, 0b11, 0b0101, 0b01, 0, def VCMPZS : ASuI<0b11101, 0b11, 0b0101, 0b01, 0, (outs), (ins SPR:$Sd), IIC_fpCMP32, "vcmp", ".f32\t$Sd, #0", - [(arm_cmpfp0 SPR:$Sd, (i32 0))]> { + [(arm_cmpfp0 SPR:$Sd)]> { let Inst{3-0} = 0b0000; let Inst{5} = 0; @@ -659,7 +659,7 @@ def VCMPZS : ASuI<0b11101, 0b11, 0b0101, 0b01, 0, def VCMPZH : AHuI<0b11101, 0b11, 0b0101, 0b01, 0, (outs), (ins HPR:$Sd), IIC_fpCMP16, "vcmp", ".f16\t$Sd, #0", - [(arm_cmpfp0 HPR:$Sd, (i32 0))]> { + [(arm_cmpfp0 HPR:$Sd)]> { let Inst{3-0} = 0b0000; let Inst{5} = 0; } diff --git a/test/CodeGen/ARM/2009-07-18-RewriterBug.ll b/test/CodeGen/ARM/2009-07-18-RewriterBug.ll index 10b5ae4e237..9eae0d75e87 100644 --- a/test/CodeGen/ARM/2009-07-18-RewriterBug.ll +++ b/test/CodeGen/ARM/2009-07-18-RewriterBug.ll @@ -1317,19 +1317,19 @@ bb15: } ; CHECK-LABEL: _build_delaunay: -; CHECK: vcmpe -; CHECK: vcmpe -; CHECK: vcmpe -; CHECK: vcmpe -; CHECK: vcmpe -; CHECK: vcmpe -; CHECK: vcmpe -; CHECK: vcmpe -; CHECK: vcmpe -; CHECK: vcmpe -; CHECK: vcmpe -; CHECK: vcmpe -; CHECK: vcmpe +; CHECK: vcmp +; CHECK: vcmp +; CHECK: vcmp +; CHECK: vcmp +; CHECK: vcmp +; CHECK: vcmp +; CHECK: vcmp +; CHECK: vcmp +; CHECK: vcmp +; CHECK: vcmp +; CHECK: vcmp +; CHECK: vcmp +; CHECK: vcmp declare i32 @puts(i8* nocapture) nounwind diff --git a/test/CodeGen/ARM/arm-shrink-wrapping.ll b/test/CodeGen/ARM/arm-shrink-wrapping.ll index 4b043362afa..99936cd7eef 100644 --- a/test/CodeGen/ARM/arm-shrink-wrapping.ll +++ b/test/CodeGen/ARM/arm-shrink-wrapping.ll @@ -1781,7 +1781,7 @@ define float @debug_info(float %gamma, float %slopeLimit, i1 %or.cond, double %t ; ARM-NEXT: vmov.f32 s0, #1.000000e+00 ; ARM-NEXT: vmov.f64 d16, #1.000000e+00 ; ARM-NEXT: vadd.f64 d16, d9, d16 -; ARM-NEXT: vcmpe.f32 s16, s0 +; ARM-NEXT: vcmp.f32 s16, s0 ; ARM-NEXT: vmrs APSR_nzcv, fpscr ; ARM-NEXT: vmov d17, r0, r1 ; ARM-NEXT: vmov.f64 d18, d9 @@ -1828,7 +1828,7 @@ define float @debug_info(float %gamma, float %slopeLimit, i1 %or.cond, double %t ; THUMB-NEXT: vmov.f32 s0, #1.000000e+00 ; THUMB-NEXT: vmov.f64 d16, #1.000000e+00 ; THUMB-NEXT: vmov.f64 d18, d9 -; THUMB-NEXT: vcmpe.f32 s16, s0 +; THUMB-NEXT: vcmp.f32 s16, s0 ; THUMB-NEXT: vadd.f64 d16, d9, d16 ; THUMB-NEXT: vmrs APSR_nzcv, fpscr ; THUMB-NEXT: it gt diff --git a/test/CodeGen/ARM/compare-call.ll b/test/CodeGen/ARM/compare-call.ll index f45ed73adb7..47f20a28b8a 100644 --- a/test/CodeGen/ARM/compare-call.ll +++ b/test/CodeGen/ARM/compare-call.ll @@ -18,5 +18,5 @@ UnifiedReturnBlock: ; preds = %entry declare i32 @bar(...) -; CHECK: vcmpe.f32 +; CHECK: vcmp.f32 diff --git a/test/CodeGen/ARM/fcmp-xo.ll b/test/CodeGen/ARM/fcmp-xo.ll index 8ff3b9017a5..3d5972f0658 100644 --- a/test/CodeGen/ARM/fcmp-xo.ll +++ b/test/CodeGen/ARM/fcmp-xo.ll @@ -5,7 +5,7 @@ define arm_aapcs_vfpcc float @foo0(float %a0) local_unnamed_addr { ; CHECK-LABEL: foo0: ; CHECK: @ %bb.0: -; CHECK-NEXT: vcmpe.f32 s0, #0 +; CHECK-NEXT: vcmp.f32 s0, #0 ; CHECK-NEXT: vmov.f32 s2, #5.000000e-01 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vmov.f32 s4, #-5.000000e-01 @@ -24,7 +24,7 @@ define arm_aapcs_vfpcc float @float1(float %a0) local_unnamed_addr { ; CHECK-NEXT: vmov.f32 s2, #1.000000e+00 ; CHECK-NEXT: vmov.f32 s4, #5.000000e-01 ; CHECK-NEXT: vmov.f32 s6, #-5.000000e-01 -; CHECK-NEXT: vcmpe.f32 s2, s0 +; CHECK-NEXT: vcmp.f32 s2, s0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vselgt.f32 s0, s6, s4 ; CHECK-NEXT: bx lr @@ -46,7 +46,7 @@ define arm_aapcs_vfpcc float @float128(float %a0) local_unnamed_addr { ; VMOVSR-NEXT: vmov.f32 s4, #5.000000e-01 ; VMOVSR-NEXT: vmov s2, r0 ; VMOVSR-NEXT: vmov.f32 s6, #-5.000000e-01 -; VMOVSR-NEXT: vcmpe.f32 s2, s0 +; VMOVSR-NEXT: vcmp.f32 s2, s0 ; VMOVSR-NEXT: vmrs APSR_nzcv, fpscr ; VMOVSR-NEXT: vselgt.f32 s0, s6, s4 ; VMOVSR-NEXT: bx lr @@ -57,7 +57,7 @@ define arm_aapcs_vfpcc float @float128(float %a0) local_unnamed_addr { ; NEON-NEXT: vmov.f32 s2, #5.000000e-01 ; NEON-NEXT: vmov d3, r0, r0 ; NEON-NEXT: vmov.f32 s4, #-5.000000e-01 -; NEON-NEXT: vcmpe.f32 s6, s0 +; NEON-NEXT: vcmp.f32 s6, s0 ; NEON-NEXT: vmrs APSR_nzcv, fpscr ; NEON-NEXT: vselgt.f32 s0, s4, s2 ; NEON-NEXT: bx lr @@ -70,7 +70,7 @@ define arm_aapcs_vfpcc double @double1(double %a0) local_unnamed_addr { ; CHECK-LABEL: double1: ; CHECK: @ %bb.0: ; CHECK-NEXT: vmov.f64 d18, #1.000000e+00 -; CHECK-NEXT: vcmpe.f64 d18, d0 +; CHECK-NEXT: vcmp.f64 d18, d0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vmov.f64 d16, #5.000000e-01 ; CHECK-NEXT: vmov.f64 d17, #-5.000000e-01 @@ -89,7 +89,7 @@ define arm_aapcs_vfpcc double @double128(double %a0) local_unnamed_addr { ; CHECK-NEXT: movt r0, #16480 ; CHECK-NEXT: vmov.f64 d16, #5.000000e-01 ; CHECK-NEXT: vmov d18, r1, r0 -; CHECK-NEXT: vcmpe.f64 d18, d0 +; CHECK-NEXT: vcmp.f64 d18, d0 ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vmov.f64 d17, #-5.000000e-01 ; CHECK-NEXT: vselgt.f64 d0, d17, d16 diff --git a/test/CodeGen/ARM/float-helpers.s b/test/CodeGen/ARM/float-helpers.s index d5388a372b8..1225b4c999f 100644 --- a/test/CodeGen/ARM/float-helpers.s +++ b/test/CodeGen/ARM/float-helpers.s @@ -174,13 +174,13 @@ define i32 @fcmplt(float %a, float %b) #0 { ; CHECK-SOFTFP: vmov s2, r0 ; CHECK-SOFTFP-NEXT: mov r0, #0 ; CHECK-SOFTFP-NEXT: vmov s0, r1 -; CHECK-SOFTFP-NEXT: vcmpe.f32 s2, s0 +; CHECK-SOFTFP-NEXT: vcmp.f32 s2, s0 ; CHECK-SOFTFP-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-SOFTFP-NEXT: movmi r0, #1 ; CHECK-SOFTFP-NEXT: mov pc, lr ; ; CHECK-HARDFP-SP-LABEL: fcmplt: -; CHECK-HARDFP-SP: vcmpe.f32 s0, s1 +; CHECK-HARDFP-SP: vcmp.f32 s0, s1 ; CHECK-HARDFP-SP-NEXT: mov r0, #0 ; CHECK-HARDFP-SP-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-HARDFP-SP-NEXT: movmi r0, #1 @@ -205,13 +205,13 @@ define i32 @fcmple(float %a, float %b) #0 { ; CHECK-SOFTFP: vmov s2, r0 ; CHECK-SOFTFP-NEXT: mov r0, #0 ; CHECK-SOFTFP-NEXT: vmov s0, r1 -; CHECK-SOFTFP-NEXT: vcmpe.f32 s2, s0 +; CHECK-SOFTFP-NEXT: vcmp.f32 s2, s0 ; CHECK-SOFTFP-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-SOFTFP-NEXT: movls r0, #1 ; CHECK-SOFTFP-NEXT: mov pc, lr ; ; CHECK-HARDFP-SP-LABEL: fcmple: -; CHECK-HARDFP-SP: vcmpe.f32 s0, s1 +; CHECK-HARDFP-SP: vcmp.f32 s0, s1 ; CHECK-HARDFP-SP-NEXT: mov r0, #0 ; CHECK-HARDFP-SP-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-HARDFP-SP-NEXT: movls r0, #1 @@ -236,13 +236,13 @@ define i32 @fcmpge(float %a, float %b) #0 { ; CHECK-SOFTFP: vmov s2, r0 ; CHECK-SOFTFP-NEXT: mov r0, #0 ; CHECK-SOFTFP-NEXT: vmov s0, r1 -; CHECK-SOFTFP-NEXT: vcmpe.f32 s2, s0 +; CHECK-SOFTFP-NEXT: vcmp.f32 s2, s0 ; CHECK-SOFTFP-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-SOFTFP-NEXT: movge r0, #1 ; CHECK-SOFTFP-NEXT: mov pc, lr ; ; CHECK-HARDFP-SP-LABEL: fcmpge: -; CHECK-HARDFP-SP: vcmpe.f32 s0, s1 +; CHECK-HARDFP-SP: vcmp.f32 s0, s1 ; CHECK-HARDFP-SP-NEXT: mov r0, #0 ; CHECK-HARDFP-SP-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-HARDFP-SP-NEXT: movge r0, #1 @@ -267,13 +267,13 @@ define i32 @fcmpgt(float %a, float %b) #0 { ; CHECK-SOFTFP: vmov s2, r0 ; CHECK-SOFTFP-NEXT: mov r0, #0 ; CHECK-SOFTFP-NEXT: vmov s0, r1 -; CHECK-SOFTFP-NEXT: vcmpe.f32 s2, s0 +; CHECK-SOFTFP-NEXT: vcmp.f32 s2, s0 ; CHECK-SOFTFP-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-SOFTFP-NEXT: movgt r0, #1 ; CHECK-SOFTFP-NEXT: mov pc, lr ; ; CHECK-HARDFP-SP-LABEL: fcmpgt: -; CHECK-HARDFP-SP: vcmpe.f32 s0, s1 +; CHECK-HARDFP-SP: vcmp.f32 s0, s1 ; CHECK-HARDFP-SP-NEXT: mov r0, #0 ; CHECK-HARDFP-SP-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-HARDFP-SP-NEXT: movgt r0, #1 @@ -298,13 +298,13 @@ define i32 @fcmpun(float %a, float %b) #0 { ; CHECK-SOFTFP: vmov s2, r0 ; CHECK-SOFTFP-NEXT: mov r0, #0 ; CHECK-SOFTFP-NEXT: vmov s0, r1 -; CHECK-SOFTFP-NEXT: vcmpe.f32 s2, s0 +; CHECK-SOFTFP-NEXT: vcmp.f32 s2, s0 ; CHECK-SOFTFP-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-SOFTFP-NEXT: movvs r0, #1 ; CHECK-SOFTFP-NEXT: mov pc, lr ; ; CHECK-HARDFP-SP-LABEL: fcmpun: -; CHECK-HARDFP-SP: vcmpe.f32 s0, s1 +; CHECK-HARDFP-SP: vcmp.f32 s0, s1 ; CHECK-HARDFP-SP-NEXT: mov r0, #0 ; CHECK-HARDFP-SP-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-HARDFP-SP-NEXT: movvs r0, #1 @@ -503,13 +503,13 @@ define i32 @dcmplt(double %a, double %b) #0 { ; CHECK-SOFTFP: vmov d16, r2, r3 ; CHECK-SOFTFP-NEXT: vmov d17, r0, r1 ; CHECK-SOFTFP-NEXT: mov r0, #0 -; CHECK-SOFTFP-NEXT: vcmpe.f64 d17, d16 +; CHECK-SOFTFP-NEXT: vcmp.f64 d17, d16 ; CHECK-SOFTFP-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-SOFTFP-NEXT: movmi r0, #1 ; CHECK-SOFTFP-NEXT: mov pc, lr ; ; CHECK-HARDFP-DP-LABEL: dcmplt: -; CHECK-HARDFP-DP: vcmpe.f64 d0, d1 +; CHECK-HARDFP-DP: vcmp.f64 d0, d1 ; CHECK-HARDFP-DP-NEXT: mov r0, #0 ; CHECK-HARDFP-DP-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-HARDFP-DP-NEXT: movmi r0, #1 @@ -545,13 +545,13 @@ define i32 @dcmple(double %a, double %b) #0 { ; CHECK-SOFTFP: vmov d16, r2, r3 ; CHECK-SOFTFP-NEXT: vmov d17, r0, r1 ; CHECK-SOFTFP-NEXT: mov r0, #0 -; CHECK-SOFTFP-NEXT: vcmpe.f64 d17, d16 +; CHECK-SOFTFP-NEXT: vcmp.f64 d17, d16 ; CHECK-SOFTFP-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-SOFTFP-NEXT: movls r0, #1 ; CHECK-SOFTFP-NEXT: mov pc, lr ; ; CHECK-HARDFP-DP-LABEL: dcmple: -; CHECK-HARDFP-DP: vcmpe.f64 d0, d1 +; CHECK-HARDFP-DP: vcmp.f64 d0, d1 ; CHECK-HARDFP-DP-NEXT: mov r0, #0 ; CHECK-HARDFP-DP-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-HARDFP-DP-NEXT: movls r0, #1 @@ -587,13 +587,13 @@ define i32 @dcmpge(double %a, double %b) #0 { ; CHECK-SOFTFP: vmov d16, r2, r3 ; CHECK-SOFTFP-NEXT: vmov d17, r0, r1 ; CHECK-SOFTFP-NEXT: mov r0, #0 -; CHECK-SOFTFP-NEXT: vcmpe.f64 d17, d16 +; CHECK-SOFTFP-NEXT: vcmp.f64 d17, d16 ; CHECK-SOFTFP-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-SOFTFP-NEXT: movge r0, #1 ; CHECK-SOFTFP-NEXT: mov pc, lr ; ; CHECK-HARDFP-DP-LABEL: dcmpge: -; CHECK-HARDFP-DP: vcmpe.f64 d0, d1 +; CHECK-HARDFP-DP: vcmp.f64 d0, d1 ; CHECK-HARDFP-DP-NEXT: mov r0, #0 ; CHECK-HARDFP-DP-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-HARDFP-DP-NEXT: movge r0, #1 @@ -629,13 +629,13 @@ define i32 @dcmpgt(double %a, double %b) #0 { ; CHECK-SOFTFP: vmov d16, r2, r3 ; CHECK-SOFTFP-NEXT: vmov d17, r0, r1 ; CHECK-SOFTFP-NEXT: mov r0, #0 -; CHECK-SOFTFP-NEXT: vcmpe.f64 d17, d16 +; CHECK-SOFTFP-NEXT: vcmp.f64 d17, d16 ; CHECK-SOFTFP-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-SOFTFP-NEXT: movgt r0, #1 ; CHECK-SOFTFP-NEXT: mov pc, lr ; ; CHECK-HARDFP-DP-LABEL: dcmpgt: -; CHECK-HARDFP-DP: vcmpe.f64 d0, d1 +; CHECK-HARDFP-DP: vcmp.f64 d0, d1 ; CHECK-HARDFP-DP-NEXT: mov r0, #0 ; CHECK-HARDFP-DP-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-HARDFP-DP-NEXT: movgt r0, #1 @@ -671,13 +671,13 @@ define i32 @dcmpun(double %a, double %b) #0 { ; CHECK-SOFTFP: vmov d16, r2, r3 ; CHECK-SOFTFP-NEXT: vmov d17, r0, r1 ; CHECK-SOFTFP-NEXT: mov r0, #0 -; CHECK-SOFTFP-NEXT: vcmpe.f64 d17, d16 +; CHECK-SOFTFP-NEXT: vcmp.f64 d17, d16 ; CHECK-SOFTFP-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-SOFTFP-NEXT: movvs r0, #1 ; CHECK-SOFTFP-NEXT: mov pc, lr ; ; CHECK-HARDFP-DP-LABEL: dcmpun: -; CHECK-HARDFP-DP: vcmpe.f64 d0, d1 +; CHECK-HARDFP-DP: vcmp.f64 d0, d1 ; CHECK-HARDFP-DP-NEXT: mov r0, #0 ; CHECK-HARDFP-DP-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-HARDFP-DP-NEXT: movvs r0, #1 diff --git a/test/CodeGen/ARM/fp16-instructions.ll b/test/CodeGen/ARM/fp16-instructions.ll index a8fc532070e..260dd12b3e2 100644 --- a/test/CodeGen/ARM/fp16-instructions.ll +++ b/test/CodeGen/ARM/fp16-instructions.ll @@ -164,9 +164,9 @@ entry: ; CHECK-LABEL: VCMPE1: ; CHECK-SOFT: bl __aeabi_fcmplt -; CHECK-SOFTFP-FP16: vcmpe.f32 s0, #0 -; CHECK-SOFTFP-FULLFP16: vcmpe.f16 s0, #0 -; CHECK-HARDFP-FULLFP16: vcmpe.f16 s0, #0 +; CHECK-SOFTFP-FP16: vcmp.f32 s0, #0 +; CHECK-SOFTFP-FULLFP16: vcmp.f16 s0, #0 +; CHECK-HARDFP-FULLFP16: vcmp.f16 s0, #0 } define i32 @VCMPE2(float %F.coerce, float %G.coerce) { @@ -184,9 +184,9 @@ entry: ; CHECK-LABEL: VCMPE2: ; CHECK-SOFT: bl __aeabi_fcmplt -; CHECK-SOFTFP-FP16: vcmpe.f32 s{{.}}, s{{.}} -; CHECK-SOFTFP-FULLFP16: vcmpe.f16 s{{.}}, s{{.}} -; CHECK-HARDFP-FULLFP16: vcmpe.f16 s{{.}}, s{{.}} +; CHECK-SOFTFP-FP16: vcmp.f32 s{{.}}, s{{.}} +; CHECK-SOFTFP-FULLFP16: vcmp.f16 s{{.}}, s{{.}} +; CHECK-HARDFP-FULLFP16: vcmp.f16 s{{.}}, s{{.}} } ; Test lowering of BR_CC @@ -212,10 +212,10 @@ for.end: ; CHECK-SOFT: cmp r0, #{{0|1}} ; CHECK-SOFTFP-FP16: vcvtb.f32.f16 [[S2:s[0-9]]], [[S2]] -; CHECK-SOFTFP-FP16: vcmpe.f32 [[S2]], s0 +; CHECK-SOFTFP-FP16: vcmp.f32 [[S2]], s0 ; CHECK-SOFTFP-FP16: vmrs APSR_nzcv, fpscr -; CHECK-SOFTFP-FULLFP16: vcmpe.f16 s{{.}}, s{{.}} +; CHECK-SOFTFP-FULLFP16: vcmp.f16 s{{.}}, s{{.}} ; CHECK-SOFTFP-FULLFP16: vmrs APSR_nzcv, fpscr } @@ -727,15 +727,15 @@ define half @select_cc_ge1(half* %a0) { ; CHECK-LABEL: select_cc_ge1: -; CHECK-HARDFP-FULLFP16: vcmpe.f16 s6, s0 +; CHECK-HARDFP-FULLFP16: vcmp.f16 s6, s0 ; CHECK-HARDFP-FULLFP16-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-HARDFP-FULLFP16-NEXT: vselge.f16 s0, s{{.}}, s{{.}} -; CHECK-SOFTFP-FP16-A32: vcmpe.f32 s6, s0 +; CHECK-SOFTFP-FP16-A32: vcmp.f32 s6, s0 ; CHECK-SOFTFP-FP16-A32-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-SOFTFP-FP16-A32-NEXT: vmovge.f32 s{{.}}, s{{.}} -; CHECK-SOFTFP-FP16-T32: vcmpe.f32 s6, s0 +; CHECK-SOFTFP-FP16-T32: vcmp.f32 s6, s0 ; CHECK-SOFTFP-FP16-T32-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-SOFTFP-FP16-T32-NEXT: it ge ; CHECK-SOFTFP-FP16-T32-NEXT: vmovge.f32 s{{.}}, s{{.}} @@ -749,15 +749,15 @@ define half @select_cc_ge2(half* %a0) { ; CHECK-LABEL: select_cc_ge2: -; CHECK-HARDFP-FULLFP16: vcmpe.f16 s0, s6 +; CHECK-HARDFP-FULLFP16: vcmp.f16 s0, s6 ; CHECK-HARDFP-FULLFP16-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-HARDFP-FULLFP16-NEXT: vselge.f16 s0, s{{.}}, s{{.}} -; CHECK-SOFTFP-FP16-A32: vcmpe.f32 s6, s0 +; CHECK-SOFTFP-FP16-A32: vcmp.f32 s6, s0 ; CHECK-SOFTFP-FP16-A32-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-SOFTFP-FP16-A32-NEXT: vmovls.f32 s{{.}}, s{{.}} -; CHECK-SOFTFP-FP16-T32: vcmpe.f32 s6, s0 +; CHECK-SOFTFP-FP16-T32: vcmp.f32 s6, s0 ; CHECK-SOFTFP-FP16-T32-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-SOFTFP-FP16-T32-NEXT: it ls ; CHECK-SOFTFP-FP16-T32-NEXT: vmovls.f32 s{{.}}, s{{.}} @@ -771,15 +771,15 @@ define half @select_cc_ge3(half* %a0) { ; CHECK-LABEL: select_cc_ge3: -; CHECK-HARDFP-FULLFP16: vcmpe.f16 s0, s6 +; CHECK-HARDFP-FULLFP16: vcmp.f16 s0, s6 ; CHECK-HARDFP-FULLFP16-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-HARDFP-FULLFP16-NEXT: vselge.f16 s0, s{{.}}, s{{.}} -; CHECK-SOFTFP-FP16-A32: vcmpe.f32 s6, s0 +; CHECK-SOFTFP-FP16-A32: vcmp.f32 s6, s0 ; CHECK-SOFTFP-FP16-A32-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-SOFTFP-FP16-A32-NEXT: vmovhi.f32 s{{.}}, s{{.}} -; CHECK-SOFTFP-FP16-T32: vcmpe.f32 s6, s0 +; CHECK-SOFTFP-FP16-T32: vcmp.f32 s6, s0 ; CHECK-SOFTFP-FP16-T32-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-SOFTFP-FP16-T32-NEXT: it hi ; CHECK-SOFTFP-FP16-T32-NEXT: vmovhi.f32 s{{.}}, s{{.}} @@ -793,15 +793,15 @@ define half @select_cc_ge4(half* %a0) { ; CHECK-LABEL: select_cc_ge4: -; CHECK-HARDFP-FULLFP16: vcmpe.f16 s6, s0 +; CHECK-HARDFP-FULLFP16: vcmp.f16 s6, s0 ; CHECK-HARDFP-FULLFP16-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-HARDFP-FULLFP16-NEXT: vselge.f16 s0, s{{.}}, s{{.}} -; CHECK-SOFTFP-FP16-A32: vcmpe.f32 s6, s0 +; CHECK-SOFTFP-FP16-A32: vcmp.f32 s6, s0 ; CHECK-SOFTFP-FP16-A32-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-SOFTFP-FP16-A32-NEXT: vmovlt.f32 s{{.}}, s{{.}} -; CHECK-SOFTFP-FP16-T32: vcmpe.f32 s6, s0 +; CHECK-SOFTFP-FP16-T32: vcmp.f32 s6, s0 ; CHECK-SOFTFP-FP16-T32-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-SOFTFP-FP16-T32-NEXT: it lt ; CHECK-SOFTFP-FP16-T32-NEXT: vmovlt.f32 s{{.}}, s{{.}} @@ -816,15 +816,15 @@ define half @select_cc_gt1(half* %a0) { ; CHECK-LABEL: select_cc_gt1: -; CHECK-HARDFP-FULLFP16: vcmpe.f16 s6, s0 +; CHECK-HARDFP-FULLFP16: vcmp.f16 s6, s0 ; CHECK-HARDFP-FULLFP16-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-HARDFP-FULLFP16-NEXT: vselgt.f16 s0, s{{.}}, s{{.}} -; CHECK-SOFTFP-FP16-A32: vcmpe.f32 s6, s0 +; CHECK-SOFTFP-FP16-A32: vcmp.f32 s6, s0 ; CHECK-SOFTFP-FP16-A32-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-SOFTFP-FP16-A32-NEXT: vmovgt.f32 s{{.}}, s{{.}} -; CHECK-SOFTFP-FP16-T32: vcmpe.f32 s6, s0 +; CHECK-SOFTFP-FP16-T32: vcmp.f32 s6, s0 ; CHECK-SOFTFP-FP16-T32-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-SOFTFP-FP16-T32-NEXT: it gt ; CHECK-SOFTFP-FP16-T32-NEXT: vmovgt.f32 s{{.}}, s{{.}} @@ -838,15 +838,15 @@ define half @select_cc_gt2(half* %a0) { ; CHECK-LABEL: select_cc_gt2: -; CHECK-HARDFP-FULLFP16: vcmpe.f16 s0, s6 +; CHECK-HARDFP-FULLFP16: vcmp.f16 s0, s6 ; CHECK-HARDFP-FULLFP16-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-HARDFP-FULLFP16-NEXT: vselgt.f16 s0, s{{.}}, s{{.}} -; CHECK-SOFTFP-FP16-A32: vcmpe.f32 s6, s0 +; CHECK-SOFTFP-FP16-A32: vcmp.f32 s6, s0 ; CHECK-SOFTFP-FP16-A32-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-SOFTFP-FP16-A32-NEXT: vmovpl.f32 s{{.}}, s{{.}} -; CHECK-SOFTFP-FP16-T32: vcmpe.f32 s6, s0 +; CHECK-SOFTFP-FP16-T32: vcmp.f32 s6, s0 ; CHECK-SOFTFP-FP16-T32-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-SOFTFP-FP16-T32-NEXT: it pl ; CHECK-SOFTFP-FP16-T32-NEXT: vmovpl.f32 s{{.}}, s{{.}} @@ -860,15 +860,15 @@ define half @select_cc_gt3(half* %a0) { ; CHECK-LABEL: select_cc_gt3: -; CHECK-HARDFP-FULLFP16: vcmpe.f16 s6, s0 +; CHECK-HARDFP-FULLFP16: vcmp.f16 s6, s0 ; CHECK-HARDFP-FULLFP16-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-HARDFP-FULLFP16-NEXT: vselgt.f16 s0, s{{.}}, s{{.}} -; CHECK-SOFTFP-FP16-A32: vcmpe.f32 s6, s0 +; CHECK-SOFTFP-FP16-A32: vcmp.f32 s6, s0 ; CHECK-SOFTFP-FP16-A32-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-SOFTFP-FP16-A32-NEXT: vmovle.f32 s{{.}}, s{{.}} -; CHECK-SOFTFP-FP16-T32: vcmpe.f32 s6, s0 +; CHECK-SOFTFP-FP16-T32: vcmp.f32 s6, s0 ; CHECK-SOFTFP-FP16-T32-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-SOFTFP-FP16-T32-NEXT: it le ; CHECK-SOFTFP-FP16-T32-NEXT: vmovle.f32 s{{.}}, s{{.}} @@ -882,15 +882,15 @@ define half @select_cc_gt4(half* %a0) { ; CHECK-LABEL: select_cc_gt4: -; CHECK-HARDFP-FULLFP16: vcmpe.f16 s0, s6 +; CHECK-HARDFP-FULLFP16: vcmp.f16 s0, s6 ; CHECK-HARDFP-FULLFP16-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-HARDFP-FULLFP16-NEXT: vselgt.f16 s0, s{{.}}, s{{.}} -; CHECK-SOFTFP-FP16-A32: vcmpe.f32 s6, s0 +; CHECK-SOFTFP-FP16-A32: vcmp.f32 s6, s0 ; CHECK-SOFTFP-FP16-A32-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-SOFTFP-FP16-A32-NEXT: vmovmi.f32 s{{.}}, s{{.}} -; CHECK-SOFTFP-FP16-T32: vcmpe.f32 s6, s0 +; CHECK-SOFTFP-FP16-T32: vcmp.f32 s6, s0 ; CHECK-SOFTFP-FP16-T32-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-SOFTFP-FP16-T32-NEXT: it mi ; CHECK-SOFTFP-FP16-T32-NEXT: vmovmi.f32 s{{.}}, s{{.}} diff --git a/test/CodeGen/ARM/fp16-promote.ll b/test/CodeGen/ARM/fp16-promote.ll index f382144cf95..183653036f3 100644 --- a/test/CodeGen/ARM/fp16-promote.ll +++ b/test/CodeGen/ARM/fp16-promote.ll @@ -202,7 +202,7 @@ define i1 @test_fcmp_ueq(half* %p, half* %q) #0 { ; CHECK-FP16: vcvtb.f32.f16 ; CHECK-LIBCALL: bl __aeabi_h2f ; CHECK-LIBCALL: bl __aeabi_h2f -; CHECK-VFP: vcmpe.f32 +; CHECK-VFP: vcmp.f32 ; CHECK-NOVFP: bl __aeabi_fcmplt ; CHECK-FP16: vmrs APSR_nzcv, fpscr ; CHECK-VFP: strmi diff --git a/test/CodeGen/ARM/fpcmp.ll b/test/CodeGen/ARM/fpcmp.ll index 67326e00016..b8fc21f8146 100644 --- a/test/CodeGen/ARM/fpcmp.ll +++ b/test/CodeGen/ARM/fpcmp.ll @@ -2,7 +2,7 @@ define i32 @f1(float %a) { ;CHECK-LABEL: f1: -;CHECK: vcmpe.f32 +;CHECK: vcmp.f32 ;CHECK: movmi entry: %tmp = fcmp olt float %a, 1.000000e+00 ; [#uses=1] @@ -22,7 +22,7 @@ entry: define i32 @f3(float %a) { ;CHECK-LABEL: f3: -;CHECK: vcmpe.f32 +;CHECK: vcmp.f32 ;CHECK: movgt entry: %tmp = fcmp ogt float %a, 1.000000e+00 ; [#uses=1] @@ -32,7 +32,7 @@ entry: define i32 @f4(float %a) { ;CHECK-LABEL: f4: -;CHECK: vcmpe.f32 +;CHECK: vcmp.f32 ;CHECK: movge entry: %tmp = fcmp oge float %a, 1.000000e+00 ; [#uses=1] @@ -42,7 +42,7 @@ entry: define i32 @f5(float %a) { ;CHECK-LABEL: f5: -;CHECK: vcmpe.f32 +;CHECK: vcmp.f32 ;CHECK: movls entry: %tmp = fcmp ole float %a, 1.000000e+00 ; [#uses=1] @@ -62,7 +62,7 @@ entry: define i32 @g1(double %a) { ;CHECK-LABEL: g1: -;CHECK: vcmpe.f64 +;CHECK: vcmp.f64 ;CHECK: movmi entry: %tmp = fcmp olt double %a, 1.000000e+00 ; [#uses=1] diff --git a/test/CodeGen/ARM/ifcvt11.ll b/test/CodeGen/ARM/ifcvt11.ll index eae41e21c61..7d577065a6d 100644 --- a/test/CodeGen/ARM/ifcvt11.ll +++ b/test/CodeGen/ARM/ifcvt11.ll @@ -17,7 +17,7 @@ bb.nph: ; preds = %entry br label %bb bb: ; preds = %bb4, %bb.nph -; CHECK: vcmpe.f64 +; CHECK: vcmp.f64 ; CHECK: vmrs APSR_nzcv, fpscr %r.19 = phi i32 [ 0, %bb.nph ], [ %r.0, %bb4 ] %n.08 = phi i32 [ 0, %bb.nph ], [ %10, %bb4 ] @@ -30,9 +30,9 @@ bb: ; preds = %bb4, %bb.nph bb1: ; preds = %bb ; CHECK-NOT: it -; CHECK-NOT: vcmpemi +; CHECK-NOT: vcmpmi ; CHECK-NOT: vmrsmi -; CHECK: vcmpe.f64 +; CHECK: vcmp.f64 ; CHECK: vmrs APSR_nzcv, fpscr %scevgep12 = getelementptr %struct.xyz_t, %struct.xyz_t* %p, i32 %n.08, i32 2 %6 = load double, double* %scevgep12, align 4 diff --git a/test/CodeGen/ARM/swifterror.ll b/test/CodeGen/ARM/swifterror.ll index 6424754a982..d96bc0249b4 100644 --- a/test/CodeGen/ARM/swifterror.ll +++ b/test/CodeGen/ARM/swifterror.ll @@ -194,7 +194,7 @@ define float @foo_loop(%swift_error** swifterror %error_ptr_ref, i32 %cc, float ; CHECK-O0: strb [[ID2]], [{{.*}}[[ID]], #8] ; spill r0 ; CHECK-O0: str r0, [sp{{.*}}] -; CHECK-O0: vcmpe +; CHECK-O0: vcmp ; CHECK-O0: ble ; reload from stack ; CHECK-O0: ldr r8 diff --git a/test/CodeGen/ARM/vcmp-crash.ll b/test/CodeGen/ARM/vcmp-crash.ll deleted file mode 100644 index 2d3262be584..00000000000 --- a/test/CodeGen/ARM/vcmp-crash.ll +++ /dev/null @@ -1,11 +0,0 @@ -; RUN: llc -mcpu=cortex-m4 < %s | FileCheck %s - -target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64" -target triple = "thumbv7em-none--eabi" - -; CHECK: vcmp.f32 -define double @f(double %a, double %b, double %c, float %d) { - %1 = fcmp oeq float %d, 0.0 - %2 = select i1 %1, double %a, double %c - ret double %2 -} diff --git a/test/CodeGen/ARM/vfp.ll b/test/CodeGen/ARM/vfp.ll index 8fa5113d8a3..c18855abd87 100644 --- a/test/CodeGen/ARM/vfp.ll +++ b/test/CodeGen/ARM/vfp.ll @@ -142,7 +142,7 @@ define void @test_cmpfp0(float* %glob, i32 %X) { ;CHECK-LABEL: test_cmpfp0: entry: %tmp = load float, float* %glob ; [#uses=1] -;CHECK: vcmpe.f32 +;CHECK: vcmp.f32 %tmp.upgrd.3 = fcmp ogt float %tmp, 0.000000e+00 ; [#uses=1] br i1 %tmp.upgrd.3, label %cond_true, label %cond_false diff --git a/test/CodeGen/ARM/vsel-fp16.ll b/test/CodeGen/ARM/vsel-fp16.ll index 9ccc6f42728..fda1fcb5f87 100644 --- a/test/CodeGen/ARM/vsel-fp16.ll +++ b/test/CodeGen/ARM/vsel-fp16.ll @@ -106,7 +106,7 @@ define void @test_vsel32ogt(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half* ; CHECK-NEXT: vldr.16 s4, [r0] ; CHECK-NEXT: vldr.16 s6, [r1] ; CHECK-NEXT: movw r0, :lower16:varhalf -; CHECK-NEXT: vcmpe.f16 s4, s6 +; CHECK-NEXT: vcmp.f16 s4, s6 ; CHECK-NEXT: movt r0, :upper16:varhalf ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vselgt.f16 s0, s0, s2 @@ -130,7 +130,7 @@ define void @test_vsel32oge(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half* ; CHECK-NEXT: vldr.16 s4, [r0] ; CHECK-NEXT: vldr.16 s6, [r1] ; CHECK-NEXT: movw r0, :lower16:varhalf -; CHECK-NEXT: vcmpe.f16 s4, s6 +; CHECK-NEXT: vcmp.f16 s4, s6 ; CHECK-NEXT: movt r0, :upper16:varhalf ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vselge.f16 s0, s0, s2 @@ -178,7 +178,7 @@ define void @test_vsel32ugt(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half* ; CHECK-NEXT: vldr.16 s4, [r0] ; CHECK-NEXT: vldr.16 s6, [r1] ; CHECK-NEXT: movw r0, :lower16:varhalf -; CHECK-NEXT: vcmpe.f16 s6, s4 +; CHECK-NEXT: vcmp.f16 s6, s4 ; CHECK-NEXT: movt r0, :upper16:varhalf ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vselge.f16 s0, s2, s0 @@ -202,7 +202,7 @@ define void @test_vsel32uge(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half* ; CHECK-NEXT: vldr.16 s4, [r0] ; CHECK-NEXT: vldr.16 s6, [r1] ; CHECK-NEXT: movw r0, :lower16:varhalf -; CHECK-NEXT: vcmpe.f16 s6, s4 +; CHECK-NEXT: vcmp.f16 s6, s4 ; CHECK-NEXT: movt r0, :upper16:varhalf ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vselgt.f16 s0, s2, s0 @@ -226,7 +226,7 @@ define void @test_vsel32olt(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half* ; CHECK-NEXT: vldr.16 s4, [r0] ; CHECK-NEXT: vldr.16 s6, [r1] ; CHECK-NEXT: movw r0, :lower16:varhalf -; CHECK-NEXT: vcmpe.f16 s6, s4 +; CHECK-NEXT: vcmp.f16 s6, s4 ; CHECK-NEXT: movt r0, :upper16:varhalf ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vselgt.f16 s0, s0, s2 @@ -250,7 +250,7 @@ define void @test_vsel32ult(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half* ; CHECK-NEXT: vldr.16 s4, [r0] ; CHECK-NEXT: vldr.16 s6, [r1] ; CHECK-NEXT: movw r0, :lower16:varhalf -; CHECK-NEXT: vcmpe.f16 s4, s6 +; CHECK-NEXT: vcmp.f16 s4, s6 ; CHECK-NEXT: movt r0, :upper16:varhalf ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vselge.f16 s0, s2, s0 @@ -274,7 +274,7 @@ define void @test_vsel32ole(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half* ; CHECK-NEXT: vldr.16 s4, [r0] ; CHECK-NEXT: vldr.16 s6, [r1] ; CHECK-NEXT: movw r0, :lower16:varhalf -; CHECK-NEXT: vcmpe.f16 s6, s4 +; CHECK-NEXT: vcmp.f16 s6, s4 ; CHECK-NEXT: movt r0, :upper16:varhalf ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vselge.f16 s0, s0, s2 @@ -298,7 +298,7 @@ define void @test_vsel32ule(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half* ; CHECK-NEXT: vldr.16 s4, [r0] ; CHECK-NEXT: vldr.16 s6, [r1] ; CHECK-NEXT: movw r0, :lower16:varhalf -; CHECK-NEXT: vcmpe.f16 s4, s6 +; CHECK-NEXT: vcmp.f16 s4, s6 ; CHECK-NEXT: movt r0, :upper16:varhalf ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vselgt.f16 s0, s2, s0 @@ -322,7 +322,7 @@ define void @test_vsel32ord(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half* ; CHECK-NEXT: vldr.16 s4, [r0] ; CHECK-NEXT: vldr.16 s6, [r1] ; CHECK-NEXT: movw r0, :lower16:varhalf -; CHECK-NEXT: vcmpe.f16 s4, s6 +; CHECK-NEXT: vcmp.f16 s4, s6 ; CHECK-NEXT: movt r0, :upper16:varhalf ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vselvs.f16 s0, s2, s0 @@ -370,7 +370,7 @@ define void @test_vsel32uno(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, half* ; CHECK-NEXT: vldr.16 s4, [r0] ; CHECK-NEXT: vldr.16 s6, [r1] ; CHECK-NEXT: movw r0, :lower16:varhalf -; CHECK-NEXT: vcmpe.f16 s4, s6 +; CHECK-NEXT: vcmp.f16 s4, s6 ; CHECK-NEXT: movt r0, :upper16:varhalf ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vselvs.f16 s0, s0, s2 @@ -395,7 +395,7 @@ define void @test_vsel32ogt_nnan(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, h ; CHECK-NEXT: vldr.16 s4, [r0] ; CHECK-NEXT: vldr.16 s6, [r1] ; CHECK-NEXT: movw r0, :lower16:varhalf -; CHECK-NEXT: vcmpe.f16 s4, s6 +; CHECK-NEXT: vcmp.f16 s4, s6 ; CHECK-NEXT: movt r0, :upper16:varhalf ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vselgt.f16 s0, s0, s2 @@ -419,7 +419,7 @@ define void @test_vsel32oge_nnan(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, h ; CHECK-NEXT: vldr.16 s4, [r0] ; CHECK-NEXT: vldr.16 s6, [r1] ; CHECK-NEXT: movw r0, :lower16:varhalf -; CHECK-NEXT: vcmpe.f16 s4, s6 +; CHECK-NEXT: vcmp.f16 s4, s6 ; CHECK-NEXT: movt r0, :upper16:varhalf ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vselge.f16 s0, s0, s2 @@ -467,7 +467,7 @@ define void @test_vsel32ugt_nnan(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, h ; CHECK-NEXT: vldr.16 s4, [r0] ; CHECK-NEXT: vldr.16 s6, [r1] ; CHECK-NEXT: movw r0, :lower16:varhalf -; CHECK-NEXT: vcmpe.f16 s4, s6 +; CHECK-NEXT: vcmp.f16 s4, s6 ; CHECK-NEXT: movt r0, :upper16:varhalf ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vselgt.f16 s0, s0, s2 @@ -491,7 +491,7 @@ define void @test_vsel32uge_nnan(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, h ; CHECK-NEXT: vldr.16 s4, [r0] ; CHECK-NEXT: vldr.16 s6, [r1] ; CHECK-NEXT: movw r0, :lower16:varhalf -; CHECK-NEXT: vcmpe.f16 s4, s6 +; CHECK-NEXT: vcmp.f16 s4, s6 ; CHECK-NEXT: movt r0, :upper16:varhalf ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vselge.f16 s0, s0, s2 @@ -515,7 +515,7 @@ define void @test_vsel32olt_nnan(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, h ; CHECK-NEXT: vldr.16 s4, [r0] ; CHECK-NEXT: vldr.16 s6, [r1] ; CHECK-NEXT: movw r0, :lower16:varhalf -; CHECK-NEXT: vcmpe.f16 s6, s4 +; CHECK-NEXT: vcmp.f16 s6, s4 ; CHECK-NEXT: movt r0, :upper16:varhalf ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vselgt.f16 s0, s0, s2 @@ -539,7 +539,7 @@ define void @test_vsel32ult_nnan(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, h ; CHECK-NEXT: vldr.16 s4, [r0] ; CHECK-NEXT: vldr.16 s6, [r1] ; CHECK-NEXT: movw r0, :lower16:varhalf -; CHECK-NEXT: vcmpe.f16 s6, s4 +; CHECK-NEXT: vcmp.f16 s6, s4 ; CHECK-NEXT: movt r0, :upper16:varhalf ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vselgt.f16 s0, s0, s2 @@ -563,7 +563,7 @@ define void @test_vsel32ole_nnan(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, h ; CHECK-NEXT: vldr.16 s4, [r0] ; CHECK-NEXT: vldr.16 s6, [r1] ; CHECK-NEXT: movw r0, :lower16:varhalf -; CHECK-NEXT: vcmpe.f16 s6, s4 +; CHECK-NEXT: vcmp.f16 s6, s4 ; CHECK-NEXT: movt r0, :upper16:varhalf ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vselge.f16 s0, s0, s2 @@ -587,7 +587,7 @@ define void @test_vsel32ule_nnan(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, h ; CHECK-NEXT: vldr.16 s4, [r0] ; CHECK-NEXT: vldr.16 s6, [r1] ; CHECK-NEXT: movw r0, :lower16:varhalf -; CHECK-NEXT: vcmpe.f16 s6, s4 +; CHECK-NEXT: vcmp.f16 s6, s4 ; CHECK-NEXT: movt r0, :upper16:varhalf ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vselge.f16 s0, s0, s2 @@ -611,7 +611,7 @@ define void @test_vsel32ord_nnan(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, h ; CHECK-NEXT: vldr.16 s4, [r0] ; CHECK-NEXT: vldr.16 s6, [r1] ; CHECK-NEXT: movw r0, :lower16:varhalf -; CHECK-NEXT: vcmpe.f16 s4, s6 +; CHECK-NEXT: vcmp.f16 s4, s6 ; CHECK-NEXT: movt r0, :upper16:varhalf ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vselvs.f16 s0, s2, s0 @@ -659,7 +659,7 @@ define void @test_vsel32uno_nnan(half* %lhs_ptr, half* %rhs_ptr, half* %a_ptr, h ; CHECK-NEXT: vldr.16 s4, [r0] ; CHECK-NEXT: vldr.16 s6, [r1] ; CHECK-NEXT: movw r0, :lower16:varhalf -; CHECK-NEXT: vcmpe.f16 s4, s6 +; CHECK-NEXT: vcmp.f16 s4, s6 ; CHECK-NEXT: movt r0, :upper16:varhalf ; CHECK-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-NEXT: vselvs.f16 s0, s0, s2 diff --git a/test/CodeGen/ARM/vsel.ll b/test/CodeGen/ARM/vsel.ll index 9408424e3a6..33d16ad45e2 100644 --- a/test/CodeGen/ARM/vsel.ll +++ b/test/CodeGen/ARM/vsel.ll @@ -96,7 +96,7 @@ define void @test_vsel32ogt(float %lhs32, float %rhs32, float %a, float %b) { %tst1 = fcmp ogt float %lhs32, %rhs32 %val1 = select i1 %tst1, float %a, float %b store float %val1, float* @varfloat -; CHECK: vcmpe.f32 s0, s1 +; CHECK: vcmp.f32 s0, s1 ; CHECK: vselgt.f32 s0, s2, s3 ret void } @@ -105,7 +105,7 @@ define void @test_vsel64ogt(float %lhs32, float %rhs32, double %a, double %b) { %tst1 = fcmp ogt float %lhs32, %rhs32 %val1 = select i1 %tst1, double %a, double %b store double %val1, double* @vardouble -; CHECK: vcmpe.f32 s0, s1 +; CHECK: vcmp.f32 s0, s1 ; CHECK: vselgt.f64 d16, d1, d2 ret void } @@ -114,7 +114,7 @@ define void @test_vsel32oge(float %lhs32, float %rhs32, float %a, float %b) { %tst1 = fcmp oge float %lhs32, %rhs32 %val1 = select i1 %tst1, float %a, float %b store float %val1, float* @varfloat -; CHECK: vcmpe.f32 s0, s1 +; CHECK: vcmp.f32 s0, s1 ; CHECK: vselge.f32 s0, s2, s3 ret void } @@ -123,7 +123,7 @@ define void @test_vsel64oge(float %lhs32, float %rhs32, double %a, double %b) { %tst1 = fcmp oge float %lhs32, %rhs32 %val1 = select i1 %tst1, double %a, double %b store double %val1, double* @vardouble -; CHECK: vcmpe.f32 s0, s1 +; CHECK: vcmp.f32 s0, s1 ; CHECK: vselge.f64 d16, d1, d2 ret void } @@ -150,7 +150,7 @@ define void @test_vsel32ugt(float %lhs32, float %rhs32, float %a, float %b) { %tst1 = fcmp ugt float %lhs32, %rhs32 %val1 = select i1 %tst1, float %a, float %b store float %val1, float* @varfloat -; CHECK: vcmpe.f32 s1, s0 +; CHECK: vcmp.f32 s1, s0 ; CHECK: vselge.f32 s0, s3, s2 ret void } @@ -159,7 +159,7 @@ define void @test_vsel64ugt(float %lhs32, float %rhs32, double %a, double %b) { %tst1 = fcmp ugt float %lhs32, %rhs32 %val1 = select i1 %tst1, double %a, double %b store double %val1, double* @vardouble -; CHECK: vcmpe.f32 s1, s0 +; CHECK: vcmp.f32 s1, s0 ; CHECK: vselge.f64 d16, d2, d1 ret void } @@ -168,7 +168,7 @@ define void @test_vsel32uge(float %lhs32, float %rhs32, float %a, float %b) { %tst1 = fcmp uge float %lhs32, %rhs32 %val1 = select i1 %tst1, float %a, float %b store float %val1, float* @varfloat -; CHECK: vcmpe.f32 s1, s0 +; CHECK: vcmp.f32 s1, s0 ; CHECK: vselgt.f32 s0, s3, s2 ret void } @@ -177,7 +177,7 @@ define void @test_vsel64uge(float %lhs32, float %rhs32, double %a, double %b) { %tst1 = fcmp uge float %lhs32, %rhs32 %val1 = select i1 %tst1, double %a, double %b store double %val1, double* @vardouble -; CHECK: vcmpe.f32 s1, s0 +; CHECK: vcmp.f32 s1, s0 ; CHECK: vselgt.f64 d16, d2, d1 ret void } @@ -186,7 +186,7 @@ define void @test_vsel32olt(float %lhs32, float %rhs32, float %a, float %b) { %tst1 = fcmp olt float %lhs32, %rhs32 %val1 = select i1 %tst1, float %a, float %b store float %val1, float* @varfloat -; CHECK: vcmpe.f32 s1, s0 +; CHECK: vcmp.f32 s1, s0 ; CHECK: vselgt.f32 s0, s2, s3 ret void } @@ -195,7 +195,7 @@ define void @test_vsel64olt(float %lhs32, float %rhs32, double %a, double %b) { %tst1 = fcmp olt float %lhs32, %rhs32 %val1 = select i1 %tst1, double %a, double %b store double %val1, double* @vardouble -; CHECK: vcmpe.f32 s1, s0 +; CHECK: vcmp.f32 s1, s0 ; CHECK: vselgt.f64 d16, d1, d2 ret void } @@ -204,7 +204,7 @@ define void @test_vsel32ult(float %lhs32, float %rhs32, float %a, float %b) { %tst1 = fcmp ult float %lhs32, %rhs32 %val1 = select i1 %tst1, float %a, float %b store float %val1, float* @varfloat -; CHECK: vcmpe.f32 s0, s1 +; CHECK: vcmp.f32 s0, s1 ; CHECK: vselge.f32 s0, s3, s2 ret void } @@ -213,7 +213,7 @@ define void @test_vsel64ult(float %lhs32, float %rhs32, double %a, double %b) { %tst1 = fcmp ult float %lhs32, %rhs32 %val1 = select i1 %tst1, double %a, double %b store double %val1, double* @vardouble -; CHECK: vcmpe.f32 s0, s1 +; CHECK: vcmp.f32 s0, s1 ; CHECK: vselge.f64 d16, d2, d1 ret void } @@ -222,7 +222,7 @@ define void @test_vsel32ole(float %lhs32, float %rhs32, float %a, float %b) { %tst1 = fcmp ole float %lhs32, %rhs32 %val1 = select i1 %tst1, float %a, float %b store float %val1, float* @varfloat -; CHECK: vcmpe.f32 s1, s0 +; CHECK: vcmp.f32 s1, s0 ; CHECK: vselge.f32 s0, s2, s3 ret void } @@ -231,7 +231,7 @@ define void @test_vsel64ole(float %lhs32, float %rhs32, double %a, double %b) { %tst1 = fcmp ole float %lhs32, %rhs32 %val1 = select i1 %tst1, double %a, double %b store double %val1, double* @vardouble -; CHECK: vcmpe.f32 s1, s0 +; CHECK: vcmp.f32 s1, s0 ; CHECK: vselge.f64 d16, d1, d2 ret void } @@ -240,7 +240,7 @@ define void @test_vsel32ule(float %lhs32, float %rhs32, float %a, float %b) { %tst1 = fcmp ule float %lhs32, %rhs32 %val1 = select i1 %tst1, float %a, float %b store float %val1, float* @varfloat -; CHECK: vcmpe.f32 s0, s1 +; CHECK: vcmp.f32 s0, s1 ; CHECK: vselgt.f32 s0, s3, s2 ret void } @@ -249,7 +249,7 @@ define void @test_vsel64ule(float %lhs32, float %rhs32, double %a, double %b) { %tst1 = fcmp ule float %lhs32, %rhs32 %val1 = select i1 %tst1, double %a, double %b store double %val1, double* @vardouble -; CHECK: vcmpe.f32 s0, s1 +; CHECK: vcmp.f32 s0, s1 ; CHECK: vselgt.f64 d16, d2, d1 ret void } @@ -258,7 +258,7 @@ define void @test_vsel32ord(float %lhs32, float %rhs32, float %a, float %b) { %tst1 = fcmp ord float %lhs32, %rhs32 %val1 = select i1 %tst1, float %a, float %b store float %val1, float* @varfloat -; CHECK: vcmpe.f32 s0, s1 +; CHECK: vcmp.f32 s0, s1 ; CHECK: vselvs.f32 s0, s3, s2 ret void } @@ -267,7 +267,7 @@ define void @test_vsel64ord(float %lhs32, float %rhs32, double %a, double %b) { %tst1 = fcmp ord float %lhs32, %rhs32 %val1 = select i1 %tst1, double %a, double %b store double %val1, double* @vardouble -; CHECK: vcmpe.f32 s0, s1 +; CHECK: vcmp.f32 s0, s1 ; CHECK: vselvs.f64 d16, d2, d1 ret void } @@ -294,7 +294,7 @@ define void @test_vsel32uno(float %lhs32, float %rhs32, float %a, float %b) { %tst1 = fcmp uno float %lhs32, %rhs32 %val1 = select i1 %tst1, float %a, float %b store float %val1, float* @varfloat -; CHECK: vcmpe.f32 s0, s1 +; CHECK: vcmp.f32 s0, s1 ; CHECK: vselvs.f32 s0, s2, s3 ret void } @@ -303,7 +303,7 @@ define void @test_vsel64uno(float %lhs32, float %rhs32, double %a, double %b) { %tst1 = fcmp uno float %lhs32, %rhs32 %val1 = select i1 %tst1, double %a, double %b store double %val1, double* @vardouble -; CHECK: vcmpe.f32 s0, s1 +; CHECK: vcmp.f32 s0, s1 ; CHECK: vselvs.f64 d16, d1, d2 ret void } @@ -313,7 +313,7 @@ define void @test_vsel32ogt_nnan(float %lhs32, float %rhs32, float %a, float %b) %tst1 = fcmp nnan ogt float %lhs32, %rhs32 %val1 = select i1 %tst1, float %a, float %b store float %val1, float* @varfloat -; CHECK: vcmpe.f32 s0, s1 +; CHECK: vcmp.f32 s0, s1 ; CHECK: vselgt.f32 s0, s2, s3 ret void } @@ -322,7 +322,7 @@ define void @test_vsel64ogt_nnan(float %lhs32, float %rhs32, double %a, double % %tst1 = fcmp nnan ogt float %lhs32, %rhs32 %val1 = select i1 %tst1, double %a, double %b store double %val1, double* @vardouble -; CHECK: vcmpe.f32 s0, s1 +; CHECK: vcmp.f32 s0, s1 ; CHECK: vselgt.f64 d16, d1, d2 ret void } @@ -331,7 +331,7 @@ define void @test_vsel32oge_nnan(float %lhs32, float %rhs32, float %a, float %b) %tst1 = fcmp nnan oge float %lhs32, %rhs32 %val1 = select i1 %tst1, float %a, float %b store float %val1, float* @varfloat -; CHECK: vcmpe.f32 s0, s1 +; CHECK: vcmp.f32 s0, s1 ; CHECK: vselge.f32 s0, s2, s3 ret void } @@ -340,7 +340,7 @@ define void @test_vsel64oge_nnan(float %lhs32, float %rhs32, double %a, double % %tst1 = fcmp nnan oge float %lhs32, %rhs32 %val1 = select i1 %tst1, double %a, double %b store double %val1, double* @vardouble -; CHECK: vcmpe.f32 s0, s1 +; CHECK: vcmp.f32 s0, s1 ; CHECK: vselge.f64 d16, d1, d2 ret void } @@ -367,7 +367,7 @@ define void @test_vsel32ugt_nnan(float %lhs32, float %rhs32, float %a, float %b) %tst1 = fcmp nnan ugt float %lhs32, %rhs32 %val1 = select i1 %tst1, float %a, float %b store float %val1, float* @varfloat -; CHECK: vcmpe.f32 s0, s1 +; CHECK: vcmp.f32 s0, s1 ; CHECK: vselgt.f32 s0, s2, s3 ret void } @@ -376,7 +376,7 @@ define void @test_vsel64ugt_nnan(float %lhs32, float %rhs32, double %a, double % %tst1 = fcmp nnan ugt float %lhs32, %rhs32 %val1 = select i1 %tst1, double %a, double %b store double %val1, double* @vardouble -; CHECK: vcmpe.f32 s0, s1 +; CHECK: vcmp.f32 s0, s1 ; CHECK: vselgt.f64 d16, d1, d2 ret void } @@ -385,7 +385,7 @@ define void @test_vsel32uge_nnan(float %lhs32, float %rhs32, float %a, float %b) %tst1 = fcmp nnan uge float %lhs32, %rhs32 %val1 = select i1 %tst1, float %a, float %b store float %val1, float* @varfloat -; CHECK: vcmpe.f32 s0, s1 +; CHECK: vcmp.f32 s0, s1 ; CHECK: vselge.f32 s0, s2, s3 ret void } @@ -394,7 +394,7 @@ define void @test_vsel64uge_nnan(float %lhs32, float %rhs32, double %a, double % %tst1 = fcmp nnan uge float %lhs32, %rhs32 %val1 = select i1 %tst1, double %a, double %b store double %val1, double* @vardouble -; CHECK: vcmpe.f32 s0, s1 +; CHECK: vcmp.f32 s0, s1 ; CHECK: vselge.f64 d16, d1, d2 ret void } @@ -403,7 +403,7 @@ define void @test_vsel32olt_nnan(float %lhs32, float %rhs32, float %a, float %b) %tst1 = fcmp nnan olt float %lhs32, %rhs32 %val1 = select i1 %tst1, float %a, float %b store float %val1, float* @varfloat -; CHECK: vcmpe.f32 s1, s0 +; CHECK: vcmp.f32 s1, s0 ; CHECK: vselgt.f32 s0, s2, s3 ret void } @@ -412,7 +412,7 @@ define void @test_vsel64olt_nnan(float %lhs32, float %rhs32, double %a, double % %tst1 = fcmp nnan olt float %lhs32, %rhs32 %val1 = select i1 %tst1, double %a, double %b store double %val1, double* @vardouble -; CHECK: vcmpe.f32 s1, s0 +; CHECK: vcmp.f32 s1, s0 ; CHECK: vselgt.f64 d16, d1, d2 ret void } @@ -421,7 +421,7 @@ define void @test_vsel32ult_nnan(float %lhs32, float %rhs32, float %a, float %b) %tst1 = fcmp nnan ult float %lhs32, %rhs32 %val1 = select i1 %tst1, float %a, float %b store float %val1, float* @varfloat -; CHECK: vcmpe.f32 s1, s0 +; CHECK: vcmp.f32 s1, s0 ; CHECK: vselgt.f32 s0, s2, s3 ret void } @@ -430,7 +430,7 @@ define void @test_vsel64ult_nnan(float %lhs32, float %rhs32, double %a, double % %tst1 = fcmp nnan ult float %lhs32, %rhs32 %val1 = select i1 %tst1, double %a, double %b store double %val1, double* @vardouble -; CHECK: vcmpe.f32 s1, s0 +; CHECK: vcmp.f32 s1, s0 ; CHECK: vselgt.f64 d16, d1, d2 ret void } @@ -439,7 +439,7 @@ define void @test_vsel32ole_nnan(float %lhs32, float %rhs32, float %a, float %b) %tst1 = fcmp nnan ole float %lhs32, %rhs32 %val1 = select i1 %tst1, float %a, float %b store float %val1, float* @varfloat -; CHECK: vcmpe.f32 s1, s0 +; CHECK: vcmp.f32 s1, s0 ; CHECK: vselge.f32 s0, s2, s3 ret void } @@ -448,7 +448,7 @@ define void @test_vsel64ole_nnan(float %lhs32, float %rhs32, double %a, double % %tst1 = fcmp nnan ole float %lhs32, %rhs32 %val1 = select i1 %tst1, double %a, double %b store double %val1, double* @vardouble -; CHECK: vcmpe.f32 s1, s0 +; CHECK: vcmp.f32 s1, s0 ; CHECK: vselge.f64 d16, d1, d2 ret void } @@ -457,7 +457,7 @@ define void @test_vsel32ule_nnan(float %lhs32, float %rhs32, float %a, float %b) %tst1 = fcmp nnan ule float %lhs32, %rhs32 %val1 = select i1 %tst1, float %a, float %b store float %val1, float* @varfloat -; CHECK: vcmpe.f32 s1, s0 +; CHECK: vcmp.f32 s1, s0 ; CHECK: vselge.f32 s0, s2, s3 ret void } @@ -466,7 +466,7 @@ define void @test_vsel64ule_nnan(float %lhs32, float %rhs32, double %a, double % %tst1 = fcmp nnan ule float %lhs32, %rhs32 %val1 = select i1 %tst1, double %a, double %b store double %val1, double* @vardouble -; CHECK: vcmpe.f32 s1, s0 +; CHECK: vcmp.f32 s1, s0 ; CHECK: vselge.f64 d16, d1, d2 ret void } @@ -475,7 +475,7 @@ define void @test_vsel32ord_nnan(float %lhs32, float %rhs32, float %a, float %b) %tst1 = fcmp nnan ord float %lhs32, %rhs32 %val1 = select i1 %tst1, float %a, float %b store float %val1, float* @varfloat -; CHECK: vcmpe.f32 s0, s1 +; CHECK: vcmp.f32 s0, s1 ; CHECK: vselvs.f32 s0, s3, s2 ret void } @@ -484,7 +484,7 @@ define void @test_vsel64ord_nnan(float %lhs32, float %rhs32, double %a, double % %tst1 = fcmp nnan ord float %lhs32, %rhs32 %val1 = select i1 %tst1, double %a, double %b store double %val1, double* @vardouble -; CHECK: vcmpe.f32 s0, s1 +; CHECK: vcmp.f32 s0, s1 ; CHECK: vselvs.f64 d16, d2, d1 ret void } @@ -511,7 +511,7 @@ define void @test_vsel32uno_nnan(float %lhs32, float %rhs32, float %a, float %b) %tst1 = fcmp nnan uno float %lhs32, %rhs32 %val1 = select i1 %tst1, float %a, float %b store float %val1, float* @varfloat -; CHECK: vcmpe.f32 s0, s1 +; CHECK: vcmp.f32 s0, s1 ; CHECK: vselvs.f32 s0, s2, s3 ret void } @@ -520,7 +520,7 @@ define void @test_vsel64uno_nnan(float %lhs32, float %rhs32, double %a, double % %tst1 = fcmp nnan uno float %lhs32, %rhs32 %val1 = select i1 %tst1, double %a, double %b store double %val1, double* @vardouble -; CHECK: vcmpe.f32 s0, s1 +; CHECK: vcmp.f32 s0, s1 ; CHECK: vselvs.f64 d16, d1, d2 ret void } diff --git a/test/CodeGen/Thumb2/float-cmp.ll b/test/CodeGen/Thumb2/float-cmp.ll index 87d6ad36531..ca9326ad66a 100644 --- a/test/CodeGen/Thumb2/float-cmp.ll +++ b/test/CodeGen/Thumb2/float-cmp.ll @@ -23,7 +23,7 @@ define i1 @cmp_f_oeq(float %a, float %b) { define i1 @cmp_f_ogt(float %a, float %b) { ; CHECK-LABEL: cmp_f_ogt: ; NONE: bl __aeabi_fcmpgt -; HARD: vcmpe.f32 +; HARD: vcmp.f32 ; HARD: movgt r0, #1 %1 = fcmp ogt float %a, %b ret i1 %1 @@ -31,7 +31,7 @@ define i1 @cmp_f_ogt(float %a, float %b) { define i1 @cmp_f_oge(float %a, float %b) { ; CHECK-LABEL: cmp_f_oge: ; NONE: bl __aeabi_fcmpge -; HARD: vcmpe.f32 +; HARD: vcmp.f32 ; HARD: movge r0, #1 %1 = fcmp oge float %a, %b ret i1 %1 @@ -39,7 +39,7 @@ define i1 @cmp_f_oge(float %a, float %b) { define i1 @cmp_f_olt(float %a, float %b) { ; CHECK-LABEL: cmp_f_olt: ; NONE: bl __aeabi_fcmplt -; HARD: vcmpe.f32 +; HARD: vcmp.f32 ; HARD: movmi r0, #1 %1 = fcmp olt float %a, %b ret i1 %1 @@ -47,7 +47,7 @@ define i1 @cmp_f_olt(float %a, float %b) { define i1 @cmp_f_ole(float %a, float %b) { ; CHECK-LABEL: cmp_f_ole: ; NONE: bl __aeabi_fcmple -; HARD: vcmpe.f32 +; HARD: vcmp.f32 ; HARD: movls r0, #1 %1 = fcmp ole float %a, %b ret i1 %1 @@ -65,7 +65,7 @@ define i1 @cmp_f_one(float %a, float %b) { define i1 @cmp_f_ord(float %a, float %b) { ; CHECK-LABEL: cmp_f_ord: ; NONE: bl __aeabi_fcmpun -; HARD: vcmpe.f32 +; HARD: vcmp.f32 ; HARD: movvc r0, #1 %1 = fcmp ord float %a, %b ret i1 %1 @@ -85,7 +85,7 @@ define i1 @cmp_f_ugt(float %a, float %b) { ; NONE: bl __aeabi_fcmple ; NONE-NEXT: clz r0, r0 ; NONE-NEXT: lsrs r0, r0, #5 -; HARD: vcmpe.f32 +; HARD: vcmp.f32 ; HARD: movhi r0, #1 %1 = fcmp ugt float %a, %b ret i1 %1 @@ -95,7 +95,7 @@ define i1 @cmp_f_uge(float %a, float %b) { ; NONE: bl __aeabi_fcmplt ; NONE-NEXT: clz r0, r0 ; NONE-NEXT: lsrs r0, r0, #5 -; HARD: vcmpe.f32 +; HARD: vcmp.f32 ; HARD: movpl r0, #1 %1 = fcmp uge float %a, %b ret i1 %1 @@ -105,7 +105,7 @@ define i1 @cmp_f_ult(float %a, float %b) { ; NONE: bl __aeabi_fcmpge ; NONE-NEXT: clz r0, r0 ; NONE-NEXT: lsrs r0, r0, #5 -; HARD: vcmpe.f32 +; HARD: vcmp.f32 ; HARD: movlt r0, #1 %1 = fcmp ult float %a, %b ret i1 %1 @@ -115,7 +115,7 @@ define i1 @cmp_f_ule(float %a, float %b) { ; NONE: bl __aeabi_fcmpgt ; NONE-NEXT: clz r0, r0 ; NONE-NEXT: lsrs r0, r0, #5 -; HARD: vcmpe.f32 +; HARD: vcmp.f32 ; HARD: movle r0, #1 %1 = fcmp ule float %a, %b ret i1 %1 @@ -131,7 +131,7 @@ define i1 @cmp_f_une(float %a, float %b) { define i1 @cmp_f_uno(float %a, float %b) { ; CHECK-LABEL: cmp_f_uno: ; NONE: bl __aeabi_fcmpun -; HARD: vcmpe.f32 +; HARD: vcmp.f32 ; HARD: movvs r0, #1 %1 = fcmp uno float %a, %b ret i1 %1 @@ -164,7 +164,7 @@ define i1 @cmp_d_ogt(double %a, double %b) { ; CHECK-LABEL: cmp_d_ogt: ; NONE: bl __aeabi_dcmpgt ; SP: bl __aeabi_dcmpgt -; DP: vcmpe.f64 +; DP: vcmp.f64 ; DP: movgt r0, #1 %1 = fcmp ogt double %a, %b ret i1 %1 @@ -173,7 +173,7 @@ define i1 @cmp_d_oge(double %a, double %b) { ; CHECK-LABEL: cmp_d_oge: ; NONE: bl __aeabi_dcmpge ; SP: bl __aeabi_dcmpge -; DP: vcmpe.f64 +; DP: vcmp.f64 ; DP: movge r0, #1 %1 = fcmp oge double %a, %b ret i1 %1 @@ -182,7 +182,7 @@ define i1 @cmp_d_olt(double %a, double %b) { ; CHECK-LABEL: cmp_d_olt: ; NONE: bl __aeabi_dcmplt ; SP: bl __aeabi_dcmplt -; DP: vcmpe.f64 +; DP: vcmp.f64 ; DP: movmi r0, #1 %1 = fcmp olt double %a, %b ret i1 %1 @@ -191,7 +191,7 @@ define i1 @cmp_d_ole(double %a, double %b) { ; CHECK-LABEL: cmp_d_ole: ; NONE: bl __aeabi_dcmple ; SP: bl __aeabi_dcmple -; DP: vcmpe.f64 +; DP: vcmp.f64 ; DP: movls r0, #1 %1 = fcmp ole double %a, %b ret i1 %1 @@ -212,7 +212,7 @@ define i1 @cmp_d_ord(double %a, double %b) { ; CHECK-LABEL: cmp_d_ord: ; NONE: bl __aeabi_dcmpun ; SP: bl __aeabi_dcmpun -; DP: vcmpe.f64 +; DP: vcmp.f64 ; DP: movvc r0, #1 %1 = fcmp ord double %a, %b ret i1 %1 @@ -221,7 +221,7 @@ define i1 @cmp_d_ugt(double %a, double %b) { ; CHECK-LABEL: cmp_d_ugt: ; NONE: bl __aeabi_dcmple ; SP: bl __aeabi_dcmple -; DP: vcmpe.f64 +; DP: vcmp.f64 ; DP: movhi r0, #1 %1 = fcmp ugt double %a, %b ret i1 %1 @@ -231,7 +231,7 @@ define i1 @cmp_d_ult(double %a, double %b) { ; CHECK-LABEL: cmp_d_ult: ; NONE: bl __aeabi_dcmpge ; SP: bl __aeabi_dcmpge -; DP: vcmpe.f64 +; DP: vcmp.f64 ; DP: movlt r0, #1 %1 = fcmp ult double %a, %b ret i1 %1 @@ -242,7 +242,7 @@ define i1 @cmp_d_uno(double %a, double %b) { ; CHECK-LABEL: cmp_d_uno: ; NONE: bl __aeabi_dcmpun ; SP: bl __aeabi_dcmpun -; DP: vcmpe.f64 +; DP: vcmp.f64 ; DP: movvs r0, #1 %1 = fcmp uno double %a, %b ret i1 %1 @@ -271,7 +271,7 @@ define i1 @cmp_d_uge(double %a, double %b) { ; CHECK-LABEL: cmp_d_uge: ; NONE: bl __aeabi_dcmplt ; SP: bl __aeabi_dcmplt -; DP: vcmpe.f64 +; DP: vcmp.f64 ; DP: movpl r0, #1 %1 = fcmp uge double %a, %b ret i1 %1 @@ -281,7 +281,7 @@ define i1 @cmp_d_ule(double %a, double %b) { ; CHECK-LABEL: cmp_d_ule: ; NONE: bl __aeabi_dcmpgt ; SP: bl __aeabi_dcmpgt -; DP: vcmpe.f64 +; DP: vcmp.f64 ; DP: movle r0, #1 %1 = fcmp ule double %a, %b ret i1 %1 diff --git a/test/CodeGen/Thumb2/mve-vcmpf.ll b/test/CodeGen/Thumb2/mve-vcmpf.ll index 0786849dae2..9e793caac3d 100644 --- a/test/CodeGen/Thumb2/mve-vcmpf.ll +++ b/test/CodeGen/Thumb2/mve-vcmpf.ll @@ -121,24 +121,24 @@ entry: define arm_aapcs_vfpcc <4 x float> @vcmp_ogt_v4f32(<4 x float> %src, <4 x float> %src2, <4 x float> %a, <4 x float> %b) { ; CHECK-MVE-LABEL: vcmp_ogt_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry -; CHECK-MVE-NEXT: vcmpe.f32 s0, s4 +; CHECK-MVE-NEXT: vcmp.f32 s0, s4 ; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: it gt ; CHECK-MVE-NEXT: movgt r1, #1 ; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s1, s5 +; CHECK-MVE-NEXT: vcmp.f32 s1, s5 ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r2, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s2, s6 +; CHECK-MVE-NEXT: vcmp.f32 s2, s6 ; CHECK-MVE-NEXT: it gt ; CHECK-MVE-NEXT: movgt r2, #1 ; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: cset r2, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r3, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s3, s7 +; CHECK-MVE-NEXT: vcmp.f32 s3, s7 ; CHECK-MVE-NEXT: it gt ; CHECK-MVE-NEXT: movgt r3, #1 ; CHECK-MVE-NEXT: cmp r3, #0 @@ -173,24 +173,24 @@ entry: define arm_aapcs_vfpcc <4 x float> @vcmp_oge_v4f32(<4 x float> %src, <4 x float> %src2, <4 x float> %a, <4 x float> %b) { ; CHECK-MVE-LABEL: vcmp_oge_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry -; CHECK-MVE-NEXT: vcmpe.f32 s0, s4 +; CHECK-MVE-NEXT: vcmp.f32 s0, s4 ; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: it ge ; CHECK-MVE-NEXT: movge r1, #1 ; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s1, s5 +; CHECK-MVE-NEXT: vcmp.f32 s1, s5 ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r2, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s2, s6 +; CHECK-MVE-NEXT: vcmp.f32 s2, s6 ; CHECK-MVE-NEXT: it ge ; CHECK-MVE-NEXT: movge r2, #1 ; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: cset r2, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r3, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s3, s7 +; CHECK-MVE-NEXT: vcmp.f32 s3, s7 ; CHECK-MVE-NEXT: it ge ; CHECK-MVE-NEXT: movge r3, #1 ; CHECK-MVE-NEXT: cmp r3, #0 @@ -225,24 +225,24 @@ entry: define arm_aapcs_vfpcc <4 x float> @vcmp_olt_v4f32(<4 x float> %src, <4 x float> %src2, <4 x float> %a, <4 x float> %b) { ; CHECK-MVE-LABEL: vcmp_olt_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry -; CHECK-MVE-NEXT: vcmpe.f32 s0, s4 +; CHECK-MVE-NEXT: vcmp.f32 s0, s4 ; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: it mi ; CHECK-MVE-NEXT: movmi r1, #1 ; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s1, s5 +; CHECK-MVE-NEXT: vcmp.f32 s1, s5 ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r2, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s2, s6 +; CHECK-MVE-NEXT: vcmp.f32 s2, s6 ; CHECK-MVE-NEXT: it mi ; CHECK-MVE-NEXT: movmi r2, #1 ; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: cset r2, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r3, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s3, s7 +; CHECK-MVE-NEXT: vcmp.f32 s3, s7 ; CHECK-MVE-NEXT: it mi ; CHECK-MVE-NEXT: movmi r3, #1 ; CHECK-MVE-NEXT: cmp r3, #0 @@ -277,24 +277,24 @@ entry: define arm_aapcs_vfpcc <4 x float> @vcmp_ole_v4f32(<4 x float> %src, <4 x float> %src2, <4 x float> %a, <4 x float> %b) { ; CHECK-MVE-LABEL: vcmp_ole_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry -; CHECK-MVE-NEXT: vcmpe.f32 s0, s4 +; CHECK-MVE-NEXT: vcmp.f32 s0, s4 ; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: it ls ; CHECK-MVE-NEXT: movls r1, #1 ; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s1, s5 +; CHECK-MVE-NEXT: vcmp.f32 s1, s5 ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r2, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s2, s6 +; CHECK-MVE-NEXT: vcmp.f32 s2, s6 ; CHECK-MVE-NEXT: it ls ; CHECK-MVE-NEXT: movls r2, #1 ; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: cset r2, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r3, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s3, s7 +; CHECK-MVE-NEXT: vcmp.f32 s3, s7 ; CHECK-MVE-NEXT: it ls ; CHECK-MVE-NEXT: movls r3, #1 ; CHECK-MVE-NEXT: cmp r3, #0 @@ -444,24 +444,24 @@ entry: define arm_aapcs_vfpcc <4 x float> @vcmp_ugt_v4f32(<4 x float> %src, <4 x float> %src2, <4 x float> %a, <4 x float> %b) { ; CHECK-MVE-LABEL: vcmp_ugt_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry -; CHECK-MVE-NEXT: vcmpe.f32 s0, s4 +; CHECK-MVE-NEXT: vcmp.f32 s0, s4 ; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: it hi ; CHECK-MVE-NEXT: movhi r1, #1 ; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s1, s5 +; CHECK-MVE-NEXT: vcmp.f32 s1, s5 ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r2, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s2, s6 +; CHECK-MVE-NEXT: vcmp.f32 s2, s6 ; CHECK-MVE-NEXT: it hi ; CHECK-MVE-NEXT: movhi r2, #1 ; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: cset r2, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r3, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s3, s7 +; CHECK-MVE-NEXT: vcmp.f32 s3, s7 ; CHECK-MVE-NEXT: it hi ; CHECK-MVE-NEXT: movhi r3, #1 ; CHECK-MVE-NEXT: cmp r3, #0 @@ -497,24 +497,24 @@ entry: define arm_aapcs_vfpcc <4 x float> @vcmp_uge_v4f32(<4 x float> %src, <4 x float> %src2, <4 x float> %a, <4 x float> %b) { ; CHECK-MVE-LABEL: vcmp_uge_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry -; CHECK-MVE-NEXT: vcmpe.f32 s0, s4 +; CHECK-MVE-NEXT: vcmp.f32 s0, s4 ; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: it pl ; CHECK-MVE-NEXT: movpl r1, #1 ; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s1, s5 +; CHECK-MVE-NEXT: vcmp.f32 s1, s5 ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r2, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s2, s6 +; CHECK-MVE-NEXT: vcmp.f32 s2, s6 ; CHECK-MVE-NEXT: it pl ; CHECK-MVE-NEXT: movpl r2, #1 ; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: cset r2, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r3, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s3, s7 +; CHECK-MVE-NEXT: vcmp.f32 s3, s7 ; CHECK-MVE-NEXT: it pl ; CHECK-MVE-NEXT: movpl r3, #1 ; CHECK-MVE-NEXT: cmp r3, #0 @@ -550,24 +550,24 @@ entry: define arm_aapcs_vfpcc <4 x float> @vcmp_ult_v4f32(<4 x float> %src, <4 x float> %src2, <4 x float> %a, <4 x float> %b) { ; CHECK-MVE-LABEL: vcmp_ult_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry -; CHECK-MVE-NEXT: vcmpe.f32 s0, s4 +; CHECK-MVE-NEXT: vcmp.f32 s0, s4 ; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: it lt ; CHECK-MVE-NEXT: movlt r1, #1 ; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s1, s5 +; CHECK-MVE-NEXT: vcmp.f32 s1, s5 ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r2, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s2, s6 +; CHECK-MVE-NEXT: vcmp.f32 s2, s6 ; CHECK-MVE-NEXT: it lt ; CHECK-MVE-NEXT: movlt r2, #1 ; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: cset r2, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r3, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s3, s7 +; CHECK-MVE-NEXT: vcmp.f32 s3, s7 ; CHECK-MVE-NEXT: it lt ; CHECK-MVE-NEXT: movlt r3, #1 ; CHECK-MVE-NEXT: cmp r3, #0 @@ -603,24 +603,24 @@ entry: define arm_aapcs_vfpcc <4 x float> @vcmp_ule_v4f32(<4 x float> %src, <4 x float> %src2, <4 x float> %a, <4 x float> %b) { ; CHECK-MVE-LABEL: vcmp_ule_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry -; CHECK-MVE-NEXT: vcmpe.f32 s0, s4 +; CHECK-MVE-NEXT: vcmp.f32 s0, s4 ; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: it le ; CHECK-MVE-NEXT: movle r1, #1 ; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s1, s5 +; CHECK-MVE-NEXT: vcmp.f32 s1, s5 ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r2, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s2, s6 +; CHECK-MVE-NEXT: vcmp.f32 s2, s6 ; CHECK-MVE-NEXT: it le ; CHECK-MVE-NEXT: movle r2, #1 ; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: cset r2, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r3, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s3, s7 +; CHECK-MVE-NEXT: vcmp.f32 s3, s7 ; CHECK-MVE-NEXT: it le ; CHECK-MVE-NEXT: movle r3, #1 ; CHECK-MVE-NEXT: cmp r3, #0 @@ -656,24 +656,24 @@ entry: define arm_aapcs_vfpcc <4 x float> @vcmp_ord_v4f32(<4 x float> %src, <4 x float> %src2, <4 x float> %a, <4 x float> %b) { ; CHECK-MVE-LABEL: vcmp_ord_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry -; CHECK-MVE-NEXT: vcmpe.f32 s0, s4 +; CHECK-MVE-NEXT: vcmp.f32 s0, s4 ; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: it vc ; CHECK-MVE-NEXT: movvc r1, #1 ; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s1, s5 +; CHECK-MVE-NEXT: vcmp.f32 s1, s5 ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r2, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s2, s6 +; CHECK-MVE-NEXT: vcmp.f32 s2, s6 ; CHECK-MVE-NEXT: it vc ; CHECK-MVE-NEXT: movvc r2, #1 ; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: cset r2, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r3, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s3, s7 +; CHECK-MVE-NEXT: vcmp.f32 s3, s7 ; CHECK-MVE-NEXT: it vc ; CHECK-MVE-NEXT: movvc r3, #1 ; CHECK-MVE-NEXT: cmp r3, #0 @@ -710,24 +710,24 @@ entry: define arm_aapcs_vfpcc <4 x float> @vcmp_uno_v4f32(<4 x float> %src, <4 x float> %src2, <4 x float> %a, <4 x float> %b) { ; CHECK-MVE-LABEL: vcmp_uno_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry -; CHECK-MVE-NEXT: vcmpe.f32 s0, s4 +; CHECK-MVE-NEXT: vcmp.f32 s0, s4 ; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: it vs ; CHECK-MVE-NEXT: movvs r1, #1 ; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s1, s5 +; CHECK-MVE-NEXT: vcmp.f32 s1, s5 ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r2, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s2, s6 +; CHECK-MVE-NEXT: vcmp.f32 s2, s6 ; CHECK-MVE-NEXT: it vs ; CHECK-MVE-NEXT: movvs r2, #1 ; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: cset r2, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r3, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s3, s7 +; CHECK-MVE-NEXT: vcmp.f32 s3, s7 ; CHECK-MVE-NEXT: it vs ; CHECK-MVE-NEXT: movvs r3, #1 ; CHECK-MVE-NEXT: cmp r3, #0 @@ -1035,13 +1035,13 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ogt_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: vpush {d8, d9, d10, d11} ; CHECK-MVE-NEXT: vmovx.f16 s16, s4 ; CHECK-MVE-NEXT: vmovx.f16 s18, s0 -; CHECK-MVE-NEXT: vcmpe.f16 s18, s16 +; CHECK-MVE-NEXT: vcmp.f16 s18, s16 ; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: it gt ; CHECK-MVE-NEXT: movgt r1, #1 ; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vcmpe.f16 s0, s4 +; CHECK-MVE-NEXT: vcmp.f16 s0, s4 ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s16, s8 ; CHECK-MVE-NEXT: vmovx.f16 s18, s12 @@ -1054,7 +1054,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ogt_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: movgt r2, #1 ; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: cset r2, ne -; CHECK-MVE-NEXT: vcmpe.f16 s1, s5 +; CHECK-MVE-NEXT: vcmp.f16 s1, s5 ; CHECK-MVE-NEXT: lsls r2, r2, #31 ; CHECK-MVE-NEXT: vmovx.f16 s22, s1 ; CHECK-MVE-NEXT: vseleq.f16 s16, s12, s8 @@ -1074,7 +1074,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ogt_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: vseleq.f16 s20, s13, s9 ; CHECK-MVE-NEXT: vmov r1, s20 ; CHECK-MVE-NEXT: vmovx.f16 s20, s5 -; CHECK-MVE-NEXT: vcmpe.f16 s22, s20 +; CHECK-MVE-NEXT: vcmp.f16 s22, s20 ; CHECK-MVE-NEXT: vmov.16 q4[2], r1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r1, #0 @@ -1086,7 +1086,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ogt_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: vmovx.f16 s22, s13 ; CHECK-MVE-NEXT: lsls r1, r1, #31 ; CHECK-MVE-NEXT: vseleq.f16 s20, s22, s20 -; CHECK-MVE-NEXT: vcmpe.f16 s2, s6 +; CHECK-MVE-NEXT: vcmp.f16 s2, s6 ; CHECK-MVE-NEXT: vmov r1, s20 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov.16 q4[3], r1 @@ -1101,7 +1101,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ogt_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: vseleq.f16 s20, s14, s10 ; CHECK-MVE-NEXT: vmov r1, s20 ; CHECK-MVE-NEXT: vmovx.f16 s20, s6 -; CHECK-MVE-NEXT: vcmpe.f16 s22, s20 +; CHECK-MVE-NEXT: vcmp.f16 s22, s20 ; CHECK-MVE-NEXT: vmov.16 q4[4], r1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r1, #0 @@ -1113,7 +1113,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ogt_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: vmovx.f16 s22, s14 ; CHECK-MVE-NEXT: lsls r1, r1, #31 ; CHECK-MVE-NEXT: vseleq.f16 s20, s22, s20 -; CHECK-MVE-NEXT: vcmpe.f16 s3, s7 +; CHECK-MVE-NEXT: vcmp.f16 s3, s7 ; CHECK-MVE-NEXT: vmov r1, s20 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov.16 q4[5], r1 @@ -1122,7 +1122,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ogt_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: movgt r1, #1 ; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: vcmpe.f16 s0, s4 +; CHECK-MVE-NEXT: vcmp.f16 s0, s4 ; CHECK-MVE-NEXT: lsls r1, r1, #31 ; CHECK-MVE-NEXT: vmovx.f16 s0, s11 ; CHECK-MVE-NEXT: vseleq.f16 s20, s15, s11 @@ -1159,13 +1159,13 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_oge_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: vpush {d8, d9, d10, d11} ; CHECK-MVE-NEXT: vmovx.f16 s16, s4 ; CHECK-MVE-NEXT: vmovx.f16 s18, s0 -; CHECK-MVE-NEXT: vcmpe.f16 s18, s16 +; CHECK-MVE-NEXT: vcmp.f16 s18, s16 ; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: it ge ; CHECK-MVE-NEXT: movge r1, #1 ; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vcmpe.f16 s0, s4 +; CHECK-MVE-NEXT: vcmp.f16 s0, s4 ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s16, s8 ; CHECK-MVE-NEXT: vmovx.f16 s18, s12 @@ -1178,7 +1178,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_oge_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: movge r2, #1 ; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: cset r2, ne -; CHECK-MVE-NEXT: vcmpe.f16 s1, s5 +; CHECK-MVE-NEXT: vcmp.f16 s1, s5 ; CHECK-MVE-NEXT: lsls r2, r2, #31 ; CHECK-MVE-NEXT: vmovx.f16 s22, s1 ; CHECK-MVE-NEXT: vseleq.f16 s16, s12, s8 @@ -1198,7 +1198,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_oge_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: vseleq.f16 s20, s13, s9 ; CHECK-MVE-NEXT: vmov r1, s20 ; CHECK-MVE-NEXT: vmovx.f16 s20, s5 -; CHECK-MVE-NEXT: vcmpe.f16 s22, s20 +; CHECK-MVE-NEXT: vcmp.f16 s22, s20 ; CHECK-MVE-NEXT: vmov.16 q4[2], r1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r1, #0 @@ -1210,7 +1210,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_oge_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: vmovx.f16 s22, s13 ; CHECK-MVE-NEXT: lsls r1, r1, #31 ; CHECK-MVE-NEXT: vseleq.f16 s20, s22, s20 -; CHECK-MVE-NEXT: vcmpe.f16 s2, s6 +; CHECK-MVE-NEXT: vcmp.f16 s2, s6 ; CHECK-MVE-NEXT: vmov r1, s20 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov.16 q4[3], r1 @@ -1225,7 +1225,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_oge_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: vseleq.f16 s20, s14, s10 ; CHECK-MVE-NEXT: vmov r1, s20 ; CHECK-MVE-NEXT: vmovx.f16 s20, s6 -; CHECK-MVE-NEXT: vcmpe.f16 s22, s20 +; CHECK-MVE-NEXT: vcmp.f16 s22, s20 ; CHECK-MVE-NEXT: vmov.16 q4[4], r1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r1, #0 @@ -1237,7 +1237,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_oge_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: vmovx.f16 s22, s14 ; CHECK-MVE-NEXT: lsls r1, r1, #31 ; CHECK-MVE-NEXT: vseleq.f16 s20, s22, s20 -; CHECK-MVE-NEXT: vcmpe.f16 s3, s7 +; CHECK-MVE-NEXT: vcmp.f16 s3, s7 ; CHECK-MVE-NEXT: vmov r1, s20 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov.16 q4[5], r1 @@ -1246,7 +1246,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_oge_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: movge r1, #1 ; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: vcmpe.f16 s0, s4 +; CHECK-MVE-NEXT: vcmp.f16 s0, s4 ; CHECK-MVE-NEXT: lsls r1, r1, #31 ; CHECK-MVE-NEXT: vmovx.f16 s0, s11 ; CHECK-MVE-NEXT: vseleq.f16 s20, s15, s11 @@ -1283,13 +1283,13 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_olt_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: vpush {d8, d9, d10, d11} ; CHECK-MVE-NEXT: vmovx.f16 s16, s4 ; CHECK-MVE-NEXT: vmovx.f16 s18, s0 -; CHECK-MVE-NEXT: vcmpe.f16 s18, s16 +; CHECK-MVE-NEXT: vcmp.f16 s18, s16 ; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: it mi ; CHECK-MVE-NEXT: movmi r1, #1 ; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vcmpe.f16 s0, s4 +; CHECK-MVE-NEXT: vcmp.f16 s0, s4 ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s16, s8 ; CHECK-MVE-NEXT: vmovx.f16 s18, s12 @@ -1302,7 +1302,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_olt_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: movmi r2, #1 ; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: cset r2, ne -; CHECK-MVE-NEXT: vcmpe.f16 s1, s5 +; CHECK-MVE-NEXT: vcmp.f16 s1, s5 ; CHECK-MVE-NEXT: lsls r2, r2, #31 ; CHECK-MVE-NEXT: vmovx.f16 s22, s1 ; CHECK-MVE-NEXT: vseleq.f16 s16, s12, s8 @@ -1322,7 +1322,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_olt_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: vseleq.f16 s20, s13, s9 ; CHECK-MVE-NEXT: vmov r1, s20 ; CHECK-MVE-NEXT: vmovx.f16 s20, s5 -; CHECK-MVE-NEXT: vcmpe.f16 s22, s20 +; CHECK-MVE-NEXT: vcmp.f16 s22, s20 ; CHECK-MVE-NEXT: vmov.16 q4[2], r1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r1, #0 @@ -1334,7 +1334,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_olt_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: vmovx.f16 s22, s13 ; CHECK-MVE-NEXT: lsls r1, r1, #31 ; CHECK-MVE-NEXT: vseleq.f16 s20, s22, s20 -; CHECK-MVE-NEXT: vcmpe.f16 s2, s6 +; CHECK-MVE-NEXT: vcmp.f16 s2, s6 ; CHECK-MVE-NEXT: vmov r1, s20 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov.16 q4[3], r1 @@ -1349,7 +1349,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_olt_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: vseleq.f16 s20, s14, s10 ; CHECK-MVE-NEXT: vmov r1, s20 ; CHECK-MVE-NEXT: vmovx.f16 s20, s6 -; CHECK-MVE-NEXT: vcmpe.f16 s22, s20 +; CHECK-MVE-NEXT: vcmp.f16 s22, s20 ; CHECK-MVE-NEXT: vmov.16 q4[4], r1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r1, #0 @@ -1361,7 +1361,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_olt_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: vmovx.f16 s22, s14 ; CHECK-MVE-NEXT: lsls r1, r1, #31 ; CHECK-MVE-NEXT: vseleq.f16 s20, s22, s20 -; CHECK-MVE-NEXT: vcmpe.f16 s3, s7 +; CHECK-MVE-NEXT: vcmp.f16 s3, s7 ; CHECK-MVE-NEXT: vmov r1, s20 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov.16 q4[5], r1 @@ -1370,7 +1370,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_olt_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: movmi r1, #1 ; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: vcmpe.f16 s0, s4 +; CHECK-MVE-NEXT: vcmp.f16 s0, s4 ; CHECK-MVE-NEXT: lsls r1, r1, #31 ; CHECK-MVE-NEXT: vmovx.f16 s0, s11 ; CHECK-MVE-NEXT: vseleq.f16 s20, s15, s11 @@ -1407,13 +1407,13 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ole_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: vpush {d8, d9, d10, d11} ; CHECK-MVE-NEXT: vmovx.f16 s16, s4 ; CHECK-MVE-NEXT: vmovx.f16 s18, s0 -; CHECK-MVE-NEXT: vcmpe.f16 s18, s16 +; CHECK-MVE-NEXT: vcmp.f16 s18, s16 ; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: it ls ; CHECK-MVE-NEXT: movls r1, #1 ; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vcmpe.f16 s0, s4 +; CHECK-MVE-NEXT: vcmp.f16 s0, s4 ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s16, s8 ; CHECK-MVE-NEXT: vmovx.f16 s18, s12 @@ -1426,7 +1426,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ole_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: movls r2, #1 ; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: cset r2, ne -; CHECK-MVE-NEXT: vcmpe.f16 s1, s5 +; CHECK-MVE-NEXT: vcmp.f16 s1, s5 ; CHECK-MVE-NEXT: lsls r2, r2, #31 ; CHECK-MVE-NEXT: vmovx.f16 s22, s1 ; CHECK-MVE-NEXT: vseleq.f16 s16, s12, s8 @@ -1446,7 +1446,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ole_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: vseleq.f16 s20, s13, s9 ; CHECK-MVE-NEXT: vmov r1, s20 ; CHECK-MVE-NEXT: vmovx.f16 s20, s5 -; CHECK-MVE-NEXT: vcmpe.f16 s22, s20 +; CHECK-MVE-NEXT: vcmp.f16 s22, s20 ; CHECK-MVE-NEXT: vmov.16 q4[2], r1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r1, #0 @@ -1458,7 +1458,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ole_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: vmovx.f16 s22, s13 ; CHECK-MVE-NEXT: lsls r1, r1, #31 ; CHECK-MVE-NEXT: vseleq.f16 s20, s22, s20 -; CHECK-MVE-NEXT: vcmpe.f16 s2, s6 +; CHECK-MVE-NEXT: vcmp.f16 s2, s6 ; CHECK-MVE-NEXT: vmov r1, s20 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov.16 q4[3], r1 @@ -1473,7 +1473,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ole_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: vseleq.f16 s20, s14, s10 ; CHECK-MVE-NEXT: vmov r1, s20 ; CHECK-MVE-NEXT: vmovx.f16 s20, s6 -; CHECK-MVE-NEXT: vcmpe.f16 s22, s20 +; CHECK-MVE-NEXT: vcmp.f16 s22, s20 ; CHECK-MVE-NEXT: vmov.16 q4[4], r1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r1, #0 @@ -1485,7 +1485,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ole_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: vmovx.f16 s22, s14 ; CHECK-MVE-NEXT: lsls r1, r1, #31 ; CHECK-MVE-NEXT: vseleq.f16 s20, s22, s20 -; CHECK-MVE-NEXT: vcmpe.f16 s3, s7 +; CHECK-MVE-NEXT: vcmp.f16 s3, s7 ; CHECK-MVE-NEXT: vmov r1, s20 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov.16 q4[5], r1 @@ -1494,7 +1494,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ole_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: movls r1, #1 ; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: vcmpe.f16 s0, s4 +; CHECK-MVE-NEXT: vcmp.f16 s0, s4 ; CHECK-MVE-NEXT: lsls r1, r1, #31 ; CHECK-MVE-NEXT: vmovx.f16 s0, s11 ; CHECK-MVE-NEXT: vseleq.f16 s20, s15, s11 @@ -1796,13 +1796,13 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ugt_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: vpush {d8, d9, d10, d11} ; CHECK-MVE-NEXT: vmovx.f16 s16, s4 ; CHECK-MVE-NEXT: vmovx.f16 s18, s0 -; CHECK-MVE-NEXT: vcmpe.f16 s18, s16 +; CHECK-MVE-NEXT: vcmp.f16 s18, s16 ; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: it hi ; CHECK-MVE-NEXT: movhi r1, #1 ; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vcmpe.f16 s0, s4 +; CHECK-MVE-NEXT: vcmp.f16 s0, s4 ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s16, s8 ; CHECK-MVE-NEXT: vmovx.f16 s18, s12 @@ -1815,7 +1815,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ugt_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: movhi r2, #1 ; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: cset r2, ne -; CHECK-MVE-NEXT: vcmpe.f16 s1, s5 +; CHECK-MVE-NEXT: vcmp.f16 s1, s5 ; CHECK-MVE-NEXT: lsls r2, r2, #31 ; CHECK-MVE-NEXT: vmovx.f16 s22, s1 ; CHECK-MVE-NEXT: vseleq.f16 s16, s12, s8 @@ -1835,7 +1835,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ugt_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: vseleq.f16 s20, s13, s9 ; CHECK-MVE-NEXT: vmov r1, s20 ; CHECK-MVE-NEXT: vmovx.f16 s20, s5 -; CHECK-MVE-NEXT: vcmpe.f16 s22, s20 +; CHECK-MVE-NEXT: vcmp.f16 s22, s20 ; CHECK-MVE-NEXT: vmov.16 q4[2], r1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r1, #0 @@ -1847,7 +1847,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ugt_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: vmovx.f16 s22, s13 ; CHECK-MVE-NEXT: lsls r1, r1, #31 ; CHECK-MVE-NEXT: vseleq.f16 s20, s22, s20 -; CHECK-MVE-NEXT: vcmpe.f16 s2, s6 +; CHECK-MVE-NEXT: vcmp.f16 s2, s6 ; CHECK-MVE-NEXT: vmov r1, s20 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov.16 q4[3], r1 @@ -1862,7 +1862,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ugt_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: vseleq.f16 s20, s14, s10 ; CHECK-MVE-NEXT: vmov r1, s20 ; CHECK-MVE-NEXT: vmovx.f16 s20, s6 -; CHECK-MVE-NEXT: vcmpe.f16 s22, s20 +; CHECK-MVE-NEXT: vcmp.f16 s22, s20 ; CHECK-MVE-NEXT: vmov.16 q4[4], r1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r1, #0 @@ -1874,7 +1874,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ugt_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: vmovx.f16 s22, s14 ; CHECK-MVE-NEXT: lsls r1, r1, #31 ; CHECK-MVE-NEXT: vseleq.f16 s20, s22, s20 -; CHECK-MVE-NEXT: vcmpe.f16 s3, s7 +; CHECK-MVE-NEXT: vcmp.f16 s3, s7 ; CHECK-MVE-NEXT: vmov r1, s20 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov.16 q4[5], r1 @@ -1883,7 +1883,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ugt_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: movhi r1, #1 ; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: vcmpe.f16 s0, s4 +; CHECK-MVE-NEXT: vcmp.f16 s0, s4 ; CHECK-MVE-NEXT: lsls r1, r1, #31 ; CHECK-MVE-NEXT: vmovx.f16 s0, s11 ; CHECK-MVE-NEXT: vseleq.f16 s20, s15, s11 @@ -1921,13 +1921,13 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_uge_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: vpush {d8, d9, d10, d11} ; CHECK-MVE-NEXT: vmovx.f16 s16, s4 ; CHECK-MVE-NEXT: vmovx.f16 s18, s0 -; CHECK-MVE-NEXT: vcmpe.f16 s18, s16 +; CHECK-MVE-NEXT: vcmp.f16 s18, s16 ; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: it pl ; CHECK-MVE-NEXT: movpl r1, #1 ; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vcmpe.f16 s0, s4 +; CHECK-MVE-NEXT: vcmp.f16 s0, s4 ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s16, s8 ; CHECK-MVE-NEXT: vmovx.f16 s18, s12 @@ -1940,7 +1940,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_uge_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: movpl r2, #1 ; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: cset r2, ne -; CHECK-MVE-NEXT: vcmpe.f16 s1, s5 +; CHECK-MVE-NEXT: vcmp.f16 s1, s5 ; CHECK-MVE-NEXT: lsls r2, r2, #31 ; CHECK-MVE-NEXT: vmovx.f16 s22, s1 ; CHECK-MVE-NEXT: vseleq.f16 s16, s12, s8 @@ -1960,7 +1960,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_uge_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: vseleq.f16 s20, s13, s9 ; CHECK-MVE-NEXT: vmov r1, s20 ; CHECK-MVE-NEXT: vmovx.f16 s20, s5 -; CHECK-MVE-NEXT: vcmpe.f16 s22, s20 +; CHECK-MVE-NEXT: vcmp.f16 s22, s20 ; CHECK-MVE-NEXT: vmov.16 q4[2], r1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r1, #0 @@ -1972,7 +1972,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_uge_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: vmovx.f16 s22, s13 ; CHECK-MVE-NEXT: lsls r1, r1, #31 ; CHECK-MVE-NEXT: vseleq.f16 s20, s22, s20 -; CHECK-MVE-NEXT: vcmpe.f16 s2, s6 +; CHECK-MVE-NEXT: vcmp.f16 s2, s6 ; CHECK-MVE-NEXT: vmov r1, s20 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov.16 q4[3], r1 @@ -1987,7 +1987,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_uge_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: vseleq.f16 s20, s14, s10 ; CHECK-MVE-NEXT: vmov r1, s20 ; CHECK-MVE-NEXT: vmovx.f16 s20, s6 -; CHECK-MVE-NEXT: vcmpe.f16 s22, s20 +; CHECK-MVE-NEXT: vcmp.f16 s22, s20 ; CHECK-MVE-NEXT: vmov.16 q4[4], r1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r1, #0 @@ -1999,7 +1999,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_uge_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: vmovx.f16 s22, s14 ; CHECK-MVE-NEXT: lsls r1, r1, #31 ; CHECK-MVE-NEXT: vseleq.f16 s20, s22, s20 -; CHECK-MVE-NEXT: vcmpe.f16 s3, s7 +; CHECK-MVE-NEXT: vcmp.f16 s3, s7 ; CHECK-MVE-NEXT: vmov r1, s20 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov.16 q4[5], r1 @@ -2008,7 +2008,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_uge_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: movpl r1, #1 ; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: vcmpe.f16 s0, s4 +; CHECK-MVE-NEXT: vcmp.f16 s0, s4 ; CHECK-MVE-NEXT: lsls r1, r1, #31 ; CHECK-MVE-NEXT: vmovx.f16 s0, s11 ; CHECK-MVE-NEXT: vseleq.f16 s20, s15, s11 @@ -2046,13 +2046,13 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ult_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: vpush {d8, d9, d10, d11} ; CHECK-MVE-NEXT: vmovx.f16 s16, s4 ; CHECK-MVE-NEXT: vmovx.f16 s18, s0 -; CHECK-MVE-NEXT: vcmpe.f16 s18, s16 +; CHECK-MVE-NEXT: vcmp.f16 s18, s16 ; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: it lt ; CHECK-MVE-NEXT: movlt r1, #1 ; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vcmpe.f16 s0, s4 +; CHECK-MVE-NEXT: vcmp.f16 s0, s4 ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s16, s8 ; CHECK-MVE-NEXT: vmovx.f16 s18, s12 @@ -2065,7 +2065,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ult_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: movlt r2, #1 ; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: cset r2, ne -; CHECK-MVE-NEXT: vcmpe.f16 s1, s5 +; CHECK-MVE-NEXT: vcmp.f16 s1, s5 ; CHECK-MVE-NEXT: lsls r2, r2, #31 ; CHECK-MVE-NEXT: vmovx.f16 s22, s1 ; CHECK-MVE-NEXT: vseleq.f16 s16, s12, s8 @@ -2085,7 +2085,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ult_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: vseleq.f16 s20, s13, s9 ; CHECK-MVE-NEXT: vmov r1, s20 ; CHECK-MVE-NEXT: vmovx.f16 s20, s5 -; CHECK-MVE-NEXT: vcmpe.f16 s22, s20 +; CHECK-MVE-NEXT: vcmp.f16 s22, s20 ; CHECK-MVE-NEXT: vmov.16 q4[2], r1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r1, #0 @@ -2097,7 +2097,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ult_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: vmovx.f16 s22, s13 ; CHECK-MVE-NEXT: lsls r1, r1, #31 ; CHECK-MVE-NEXT: vseleq.f16 s20, s22, s20 -; CHECK-MVE-NEXT: vcmpe.f16 s2, s6 +; CHECK-MVE-NEXT: vcmp.f16 s2, s6 ; CHECK-MVE-NEXT: vmov r1, s20 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov.16 q4[3], r1 @@ -2112,7 +2112,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ult_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: vseleq.f16 s20, s14, s10 ; CHECK-MVE-NEXT: vmov r1, s20 ; CHECK-MVE-NEXT: vmovx.f16 s20, s6 -; CHECK-MVE-NEXT: vcmpe.f16 s22, s20 +; CHECK-MVE-NEXT: vcmp.f16 s22, s20 ; CHECK-MVE-NEXT: vmov.16 q4[4], r1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r1, #0 @@ -2124,7 +2124,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ult_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: vmovx.f16 s22, s14 ; CHECK-MVE-NEXT: lsls r1, r1, #31 ; CHECK-MVE-NEXT: vseleq.f16 s20, s22, s20 -; CHECK-MVE-NEXT: vcmpe.f16 s3, s7 +; CHECK-MVE-NEXT: vcmp.f16 s3, s7 ; CHECK-MVE-NEXT: vmov r1, s20 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov.16 q4[5], r1 @@ -2133,7 +2133,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ult_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: movlt r1, #1 ; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: vcmpe.f16 s0, s4 +; CHECK-MVE-NEXT: vcmp.f16 s0, s4 ; CHECK-MVE-NEXT: lsls r1, r1, #31 ; CHECK-MVE-NEXT: vmovx.f16 s0, s11 ; CHECK-MVE-NEXT: vseleq.f16 s20, s15, s11 @@ -2171,13 +2171,13 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ule_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: vpush {d8, d9, d10, d11} ; CHECK-MVE-NEXT: vmovx.f16 s16, s4 ; CHECK-MVE-NEXT: vmovx.f16 s18, s0 -; CHECK-MVE-NEXT: vcmpe.f16 s18, s16 +; CHECK-MVE-NEXT: vcmp.f16 s18, s16 ; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: it le ; CHECK-MVE-NEXT: movle r1, #1 ; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vcmpe.f16 s0, s4 +; CHECK-MVE-NEXT: vcmp.f16 s0, s4 ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s16, s8 ; CHECK-MVE-NEXT: vmovx.f16 s18, s12 @@ -2190,7 +2190,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ule_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: movle r2, #1 ; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: cset r2, ne -; CHECK-MVE-NEXT: vcmpe.f16 s1, s5 +; CHECK-MVE-NEXT: vcmp.f16 s1, s5 ; CHECK-MVE-NEXT: lsls r2, r2, #31 ; CHECK-MVE-NEXT: vmovx.f16 s22, s1 ; CHECK-MVE-NEXT: vseleq.f16 s16, s12, s8 @@ -2210,7 +2210,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ule_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: vseleq.f16 s20, s13, s9 ; CHECK-MVE-NEXT: vmov r1, s20 ; CHECK-MVE-NEXT: vmovx.f16 s20, s5 -; CHECK-MVE-NEXT: vcmpe.f16 s22, s20 +; CHECK-MVE-NEXT: vcmp.f16 s22, s20 ; CHECK-MVE-NEXT: vmov.16 q4[2], r1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r1, #0 @@ -2222,7 +2222,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ule_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: vmovx.f16 s22, s13 ; CHECK-MVE-NEXT: lsls r1, r1, #31 ; CHECK-MVE-NEXT: vseleq.f16 s20, s22, s20 -; CHECK-MVE-NEXT: vcmpe.f16 s2, s6 +; CHECK-MVE-NEXT: vcmp.f16 s2, s6 ; CHECK-MVE-NEXT: vmov r1, s20 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov.16 q4[3], r1 @@ -2237,7 +2237,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ule_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: vseleq.f16 s20, s14, s10 ; CHECK-MVE-NEXT: vmov r1, s20 ; CHECK-MVE-NEXT: vmovx.f16 s20, s6 -; CHECK-MVE-NEXT: vcmpe.f16 s22, s20 +; CHECK-MVE-NEXT: vcmp.f16 s22, s20 ; CHECK-MVE-NEXT: vmov.16 q4[4], r1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r1, #0 @@ -2249,7 +2249,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ule_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: vmovx.f16 s22, s14 ; CHECK-MVE-NEXT: lsls r1, r1, #31 ; CHECK-MVE-NEXT: vseleq.f16 s20, s22, s20 -; CHECK-MVE-NEXT: vcmpe.f16 s3, s7 +; CHECK-MVE-NEXT: vcmp.f16 s3, s7 ; CHECK-MVE-NEXT: vmov r1, s20 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov.16 q4[5], r1 @@ -2258,7 +2258,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ule_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: movle r1, #1 ; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: vcmpe.f16 s0, s4 +; CHECK-MVE-NEXT: vcmp.f16 s0, s4 ; CHECK-MVE-NEXT: lsls r1, r1, #31 ; CHECK-MVE-NEXT: vmovx.f16 s0, s11 ; CHECK-MVE-NEXT: vseleq.f16 s20, s15, s11 @@ -2296,13 +2296,13 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ord_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: vpush {d8, d9, d10, d11} ; CHECK-MVE-NEXT: vmovx.f16 s16, s4 ; CHECK-MVE-NEXT: vmovx.f16 s18, s0 -; CHECK-MVE-NEXT: vcmpe.f16 s18, s16 +; CHECK-MVE-NEXT: vcmp.f16 s18, s16 ; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: it vc ; CHECK-MVE-NEXT: movvc r1, #1 ; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vcmpe.f16 s0, s4 +; CHECK-MVE-NEXT: vcmp.f16 s0, s4 ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s16, s8 ; CHECK-MVE-NEXT: vmovx.f16 s18, s12 @@ -2315,7 +2315,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ord_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: movvc r2, #1 ; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: cset r2, ne -; CHECK-MVE-NEXT: vcmpe.f16 s1, s5 +; CHECK-MVE-NEXT: vcmp.f16 s1, s5 ; CHECK-MVE-NEXT: lsls r2, r2, #31 ; CHECK-MVE-NEXT: vmovx.f16 s22, s1 ; CHECK-MVE-NEXT: vseleq.f16 s16, s12, s8 @@ -2335,7 +2335,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ord_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: vseleq.f16 s20, s13, s9 ; CHECK-MVE-NEXT: vmov r1, s20 ; CHECK-MVE-NEXT: vmovx.f16 s20, s5 -; CHECK-MVE-NEXT: vcmpe.f16 s22, s20 +; CHECK-MVE-NEXT: vcmp.f16 s22, s20 ; CHECK-MVE-NEXT: vmov.16 q4[2], r1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r1, #0 @@ -2347,7 +2347,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ord_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: vmovx.f16 s22, s13 ; CHECK-MVE-NEXT: lsls r1, r1, #31 ; CHECK-MVE-NEXT: vseleq.f16 s20, s22, s20 -; CHECK-MVE-NEXT: vcmpe.f16 s2, s6 +; CHECK-MVE-NEXT: vcmp.f16 s2, s6 ; CHECK-MVE-NEXT: vmov r1, s20 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov.16 q4[3], r1 @@ -2362,7 +2362,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ord_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: vseleq.f16 s20, s14, s10 ; CHECK-MVE-NEXT: vmov r1, s20 ; CHECK-MVE-NEXT: vmovx.f16 s20, s6 -; CHECK-MVE-NEXT: vcmpe.f16 s22, s20 +; CHECK-MVE-NEXT: vcmp.f16 s22, s20 ; CHECK-MVE-NEXT: vmov.16 q4[4], r1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r1, #0 @@ -2374,7 +2374,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ord_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: vmovx.f16 s22, s14 ; CHECK-MVE-NEXT: lsls r1, r1, #31 ; CHECK-MVE-NEXT: vseleq.f16 s20, s22, s20 -; CHECK-MVE-NEXT: vcmpe.f16 s3, s7 +; CHECK-MVE-NEXT: vcmp.f16 s3, s7 ; CHECK-MVE-NEXT: vmov r1, s20 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov.16 q4[5], r1 @@ -2383,7 +2383,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ord_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: movvc r1, #1 ; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: vcmpe.f16 s0, s4 +; CHECK-MVE-NEXT: vcmp.f16 s0, s4 ; CHECK-MVE-NEXT: lsls r1, r1, #31 ; CHECK-MVE-NEXT: vmovx.f16 s0, s11 ; CHECK-MVE-NEXT: vseleq.f16 s20, s15, s11 @@ -2422,13 +2422,13 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_uno_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: vpush {d8, d9, d10, d11} ; CHECK-MVE-NEXT: vmovx.f16 s16, s4 ; CHECK-MVE-NEXT: vmovx.f16 s18, s0 -; CHECK-MVE-NEXT: vcmpe.f16 s18, s16 +; CHECK-MVE-NEXT: vcmp.f16 s18, s16 ; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: it vs ; CHECK-MVE-NEXT: movvs r1, #1 ; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vcmpe.f16 s0, s4 +; CHECK-MVE-NEXT: vcmp.f16 s0, s4 ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s16, s8 ; CHECK-MVE-NEXT: vmovx.f16 s18, s12 @@ -2441,7 +2441,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_uno_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: movvs r2, #1 ; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: cset r2, ne -; CHECK-MVE-NEXT: vcmpe.f16 s1, s5 +; CHECK-MVE-NEXT: vcmp.f16 s1, s5 ; CHECK-MVE-NEXT: lsls r2, r2, #31 ; CHECK-MVE-NEXT: vmovx.f16 s22, s1 ; CHECK-MVE-NEXT: vseleq.f16 s16, s12, s8 @@ -2461,7 +2461,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_uno_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: vseleq.f16 s20, s13, s9 ; CHECK-MVE-NEXT: vmov r1, s20 ; CHECK-MVE-NEXT: vmovx.f16 s20, s5 -; CHECK-MVE-NEXT: vcmpe.f16 s22, s20 +; CHECK-MVE-NEXT: vcmp.f16 s22, s20 ; CHECK-MVE-NEXT: vmov.16 q4[2], r1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r1, #0 @@ -2473,7 +2473,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_uno_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: vmovx.f16 s22, s13 ; CHECK-MVE-NEXT: lsls r1, r1, #31 ; CHECK-MVE-NEXT: vseleq.f16 s20, s22, s20 -; CHECK-MVE-NEXT: vcmpe.f16 s2, s6 +; CHECK-MVE-NEXT: vcmp.f16 s2, s6 ; CHECK-MVE-NEXT: vmov r1, s20 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov.16 q4[3], r1 @@ -2488,7 +2488,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_uno_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: vseleq.f16 s20, s14, s10 ; CHECK-MVE-NEXT: vmov r1, s20 ; CHECK-MVE-NEXT: vmovx.f16 s20, s6 -; CHECK-MVE-NEXT: vcmpe.f16 s22, s20 +; CHECK-MVE-NEXT: vcmp.f16 s22, s20 ; CHECK-MVE-NEXT: vmov.16 q4[4], r1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r1, #0 @@ -2500,7 +2500,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_uno_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: vmovx.f16 s22, s14 ; CHECK-MVE-NEXT: lsls r1, r1, #31 ; CHECK-MVE-NEXT: vseleq.f16 s20, s22, s20 -; CHECK-MVE-NEXT: vcmpe.f16 s3, s7 +; CHECK-MVE-NEXT: vcmp.f16 s3, s7 ; CHECK-MVE-NEXT: vmov r1, s20 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov.16 q4[5], r1 @@ -2509,7 +2509,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_uno_v8f16(<8 x half> %src, <8 x half> %s ; CHECK-MVE-NEXT: movvs r1, #1 ; CHECK-MVE-NEXT: cmp r1, #0 ; CHECK-MVE-NEXT: cset r1, ne -; CHECK-MVE-NEXT: vcmpe.f16 s0, s4 +; CHECK-MVE-NEXT: vcmp.f16 s0, s4 ; CHECK-MVE-NEXT: lsls r1, r1, #31 ; CHECK-MVE-NEXT: vmovx.f16 s0, s11 ; CHECK-MVE-NEXT: vseleq.f16 s20, s15, s11 diff --git a/test/CodeGen/Thumb2/mve-vcmpfr.ll b/test/CodeGen/Thumb2/mve-vcmpfr.ll index 608689dc465..66d90c892e5 100644 --- a/test/CodeGen/Thumb2/mve-vcmpfr.ll +++ b/test/CodeGen/Thumb2/mve-vcmpfr.ll @@ -128,24 +128,24 @@ entry: define arm_aapcs_vfpcc <4 x float> @vcmp_ogt_v4f32(<4 x float> %src, float %src2, <4 x float> %a, <4 x float> %b) { ; CHECK-MVE-LABEL: vcmp_ogt_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry -; CHECK-MVE-NEXT: vcmpe.f32 s0, s4 +; CHECK-MVE-NEXT: vcmp.f32 s0, s4 ; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: it gt ; CHECK-MVE-NEXT: movgt r1, #1 ; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s1, s4 +; CHECK-MVE-NEXT: vcmp.f32 s1, s4 ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r2, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s2, s4 +; CHECK-MVE-NEXT: vcmp.f32 s2, s4 ; CHECK-MVE-NEXT: it gt ; CHECK-MVE-NEXT: movgt r2, #1 ; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: cset r2, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r3, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s3, s4 +; CHECK-MVE-NEXT: vcmp.f32 s3, s4 ; CHECK-MVE-NEXT: it gt ; CHECK-MVE-NEXT: movgt r3, #1 ; CHECK-MVE-NEXT: cmp r3, #0 @@ -183,24 +183,24 @@ entry: define arm_aapcs_vfpcc <4 x float> @vcmp_oge_v4f32(<4 x float> %src, float %src2, <4 x float> %a, <4 x float> %b) { ; CHECK-MVE-LABEL: vcmp_oge_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry -; CHECK-MVE-NEXT: vcmpe.f32 s0, s4 +; CHECK-MVE-NEXT: vcmp.f32 s0, s4 ; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: it ge ; CHECK-MVE-NEXT: movge r1, #1 ; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s1, s4 +; CHECK-MVE-NEXT: vcmp.f32 s1, s4 ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r2, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s2, s4 +; CHECK-MVE-NEXT: vcmp.f32 s2, s4 ; CHECK-MVE-NEXT: it ge ; CHECK-MVE-NEXT: movge r2, #1 ; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: cset r2, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r3, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s3, s4 +; CHECK-MVE-NEXT: vcmp.f32 s3, s4 ; CHECK-MVE-NEXT: it ge ; CHECK-MVE-NEXT: movge r3, #1 ; CHECK-MVE-NEXT: cmp r3, #0 @@ -238,24 +238,24 @@ entry: define arm_aapcs_vfpcc <4 x float> @vcmp_olt_v4f32(<4 x float> %src, float %src2, <4 x float> %a, <4 x float> %b) { ; CHECK-MVE-LABEL: vcmp_olt_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry -; CHECK-MVE-NEXT: vcmpe.f32 s0, s4 +; CHECK-MVE-NEXT: vcmp.f32 s0, s4 ; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: it mi ; CHECK-MVE-NEXT: movmi r1, #1 ; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s1, s4 +; CHECK-MVE-NEXT: vcmp.f32 s1, s4 ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r2, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s2, s4 +; CHECK-MVE-NEXT: vcmp.f32 s2, s4 ; CHECK-MVE-NEXT: it mi ; CHECK-MVE-NEXT: movmi r2, #1 ; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: cset r2, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r3, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s3, s4 +; CHECK-MVE-NEXT: vcmp.f32 s3, s4 ; CHECK-MVE-NEXT: it mi ; CHECK-MVE-NEXT: movmi r3, #1 ; CHECK-MVE-NEXT: cmp r3, #0 @@ -294,24 +294,24 @@ entry: define arm_aapcs_vfpcc <4 x float> @vcmp_ole_v4f32(<4 x float> %src, float %src2, <4 x float> %a, <4 x float> %b) { ; CHECK-MVE-LABEL: vcmp_ole_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry -; CHECK-MVE-NEXT: vcmpe.f32 s0, s4 +; CHECK-MVE-NEXT: vcmp.f32 s0, s4 ; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: it ls ; CHECK-MVE-NEXT: movls r1, #1 ; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s1, s4 +; CHECK-MVE-NEXT: vcmp.f32 s1, s4 ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r2, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s2, s4 +; CHECK-MVE-NEXT: vcmp.f32 s2, s4 ; CHECK-MVE-NEXT: it ls ; CHECK-MVE-NEXT: movls r2, #1 ; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: cset r2, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r3, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s3, s4 +; CHECK-MVE-NEXT: vcmp.f32 s3, s4 ; CHECK-MVE-NEXT: it ls ; CHECK-MVE-NEXT: movls r3, #1 ; CHECK-MVE-NEXT: cmp r3, #0 @@ -472,24 +472,24 @@ entry: define arm_aapcs_vfpcc <4 x float> @vcmp_ugt_v4f32(<4 x float> %src, float %src2, <4 x float> %a, <4 x float> %b) { ; CHECK-MVE-LABEL: vcmp_ugt_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry -; CHECK-MVE-NEXT: vcmpe.f32 s0, s4 +; CHECK-MVE-NEXT: vcmp.f32 s0, s4 ; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: it hi ; CHECK-MVE-NEXT: movhi r1, #1 ; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s1, s4 +; CHECK-MVE-NEXT: vcmp.f32 s1, s4 ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r2, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s2, s4 +; CHECK-MVE-NEXT: vcmp.f32 s2, s4 ; CHECK-MVE-NEXT: it hi ; CHECK-MVE-NEXT: movhi r2, #1 ; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: cset r2, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r3, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s3, s4 +; CHECK-MVE-NEXT: vcmp.f32 s3, s4 ; CHECK-MVE-NEXT: it hi ; CHECK-MVE-NEXT: movhi r3, #1 ; CHECK-MVE-NEXT: cmp r3, #0 @@ -529,24 +529,24 @@ entry: define arm_aapcs_vfpcc <4 x float> @vcmp_uge_v4f32(<4 x float> %src, float %src2, <4 x float> %a, <4 x float> %b) { ; CHECK-MVE-LABEL: vcmp_uge_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry -; CHECK-MVE-NEXT: vcmpe.f32 s0, s4 +; CHECK-MVE-NEXT: vcmp.f32 s0, s4 ; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: it pl ; CHECK-MVE-NEXT: movpl r1, #1 ; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s1, s4 +; CHECK-MVE-NEXT: vcmp.f32 s1, s4 ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r2, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s2, s4 +; CHECK-MVE-NEXT: vcmp.f32 s2, s4 ; CHECK-MVE-NEXT: it pl ; CHECK-MVE-NEXT: movpl r2, #1 ; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: cset r2, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r3, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s3, s4 +; CHECK-MVE-NEXT: vcmp.f32 s3, s4 ; CHECK-MVE-NEXT: it pl ; CHECK-MVE-NEXT: movpl r3, #1 ; CHECK-MVE-NEXT: cmp r3, #0 @@ -586,24 +586,24 @@ entry: define arm_aapcs_vfpcc <4 x float> @vcmp_ult_v4f32(<4 x float> %src, float %src2, <4 x float> %a, <4 x float> %b) { ; CHECK-MVE-LABEL: vcmp_ult_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry -; CHECK-MVE-NEXT: vcmpe.f32 s0, s4 +; CHECK-MVE-NEXT: vcmp.f32 s0, s4 ; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: it lt ; CHECK-MVE-NEXT: movlt r1, #1 ; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s1, s4 +; CHECK-MVE-NEXT: vcmp.f32 s1, s4 ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r2, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s2, s4 +; CHECK-MVE-NEXT: vcmp.f32 s2, s4 ; CHECK-MVE-NEXT: it lt ; CHECK-MVE-NEXT: movlt r2, #1 ; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: cset r2, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r3, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s3, s4 +; CHECK-MVE-NEXT: vcmp.f32 s3, s4 ; CHECK-MVE-NEXT: it lt ; CHECK-MVE-NEXT: movlt r3, #1 ; CHECK-MVE-NEXT: cmp r3, #0 @@ -642,24 +642,24 @@ entry: define arm_aapcs_vfpcc <4 x float> @vcmp_ule_v4f32(<4 x float> %src, float %src2, <4 x float> %a, <4 x float> %b) { ; CHECK-MVE-LABEL: vcmp_ule_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry -; CHECK-MVE-NEXT: vcmpe.f32 s0, s4 +; CHECK-MVE-NEXT: vcmp.f32 s0, s4 ; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: it le ; CHECK-MVE-NEXT: movle r1, #1 ; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s1, s4 +; CHECK-MVE-NEXT: vcmp.f32 s1, s4 ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r2, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s2, s4 +; CHECK-MVE-NEXT: vcmp.f32 s2, s4 ; CHECK-MVE-NEXT: it le ; CHECK-MVE-NEXT: movle r2, #1 ; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: cset r2, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r3, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s3, s4 +; CHECK-MVE-NEXT: vcmp.f32 s3, s4 ; CHECK-MVE-NEXT: it le ; CHECK-MVE-NEXT: movle r3, #1 ; CHECK-MVE-NEXT: cmp r3, #0 @@ -698,24 +698,24 @@ entry: define arm_aapcs_vfpcc <4 x float> @vcmp_ord_v4f32(<4 x float> %src, float %src2, <4 x float> %a, <4 x float> %b) { ; CHECK-MVE-LABEL: vcmp_ord_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry -; CHECK-MVE-NEXT: vcmpe.f32 s0, s4 +; CHECK-MVE-NEXT: vcmp.f32 s0, s4 ; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: it vc ; CHECK-MVE-NEXT: movvc r1, #1 ; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s1, s4 +; CHECK-MVE-NEXT: vcmp.f32 s1, s4 ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r2, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s2, s4 +; CHECK-MVE-NEXT: vcmp.f32 s2, s4 ; CHECK-MVE-NEXT: it vc ; CHECK-MVE-NEXT: movvc r2, #1 ; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: cset r2, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r3, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s3, s4 +; CHECK-MVE-NEXT: vcmp.f32 s3, s4 ; CHECK-MVE-NEXT: it vc ; CHECK-MVE-NEXT: movvc r3, #1 ; CHECK-MVE-NEXT: cmp r3, #0 @@ -756,24 +756,24 @@ entry: define arm_aapcs_vfpcc <4 x float> @vcmp_uno_v4f32(<4 x float> %src, float %src2, <4 x float> %a, <4 x float> %b) { ; CHECK-MVE-LABEL: vcmp_uno_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry -; CHECK-MVE-NEXT: vcmpe.f32 s0, s4 +; CHECK-MVE-NEXT: vcmp.f32 s0, s4 ; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: it vs ; CHECK-MVE-NEXT: movvs r1, #1 ; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s1, s4 +; CHECK-MVE-NEXT: vcmp.f32 s1, s4 ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r2, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s2, s4 +; CHECK-MVE-NEXT: vcmp.f32 s2, s4 ; CHECK-MVE-NEXT: it vs ; CHECK-MVE-NEXT: movvs r2, #1 ; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: cset r2, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r3, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s3, s4 +; CHECK-MVE-NEXT: vcmp.f32 s3, s4 ; CHECK-MVE-NEXT: it vs ; CHECK-MVE-NEXT: movvs r3, #1 ; CHECK-MVE-NEXT: cmp r3, #0 @@ -1092,13 +1092,13 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ogt_v8f16(<8 x half> %src, half* %src2p, ; CHECK-MVE-NEXT: vmovx.f16 s12, s0 ; CHECK-MVE-NEXT: movs r0, #0 ; CHECK-MVE-NEXT: vmovx.f16 s14, s8 -; CHECK-MVE-NEXT: vcmpe.f16 s12, s16 +; CHECK-MVE-NEXT: vcmp.f16 s12, s16 ; CHECK-MVE-NEXT: vmovx.f16 s12, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: it gt ; CHECK-MVE-NEXT: movgt r0, #1 ; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: vcmpe.f16 s0, s16 +; CHECK-MVE-NEXT: vcmp.f16 s0, s16 ; CHECK-MVE-NEXT: cset r0, ne ; CHECK-MVE-NEXT: movs r2, #0 ; CHECK-MVE-NEXT: lsls r0, r0, #31 @@ -1111,7 +1111,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ogt_v8f16(<8 x half> %src, half* %src2p, ; CHECK-MVE-NEXT: cset r2, ne ; CHECK-MVE-NEXT: vmov r0, s12 ; CHECK-MVE-NEXT: lsls r2, r2, #31 -; CHECK-MVE-NEXT: vcmpe.f16 s1, s16 +; CHECK-MVE-NEXT: vcmp.f16 s1, s16 ; CHECK-MVE-NEXT: vseleq.f16 s12, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov r2, s12 @@ -1128,7 +1128,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ogt_v8f16(<8 x half> %src, half* %src2p, ; CHECK-MVE-NEXT: vseleq.f16 s18, s9, s5 ; CHECK-MVE-NEXT: vmov r0, s18 ; CHECK-MVE-NEXT: vmovx.f16 s18, s1 -; CHECK-MVE-NEXT: vcmpe.f16 s18, s16 +; CHECK-MVE-NEXT: vcmp.f16 s18, s16 ; CHECK-MVE-NEXT: vmov.16 q3[2], r0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r0, #0 @@ -1138,7 +1138,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ogt_v8f16(<8 x half> %src, half* %src2p, ; CHECK-MVE-NEXT: cset r0, ne ; CHECK-MVE-NEXT: vmovx.f16 s18, s5 ; CHECK-MVE-NEXT: lsls r0, r0, #31 -; CHECK-MVE-NEXT: vcmpe.f16 s2, s16 +; CHECK-MVE-NEXT: vcmp.f16 s2, s16 ; CHECK-MVE-NEXT: vseleq.f16 s18, s20, s18 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov r0, s18 @@ -1153,7 +1153,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ogt_v8f16(<8 x half> %src, half* %src2p, ; CHECK-MVE-NEXT: vseleq.f16 s18, s10, s6 ; CHECK-MVE-NEXT: vmov r0, s18 ; CHECK-MVE-NEXT: vmovx.f16 s18, s2 -; CHECK-MVE-NEXT: vcmpe.f16 s18, s16 +; CHECK-MVE-NEXT: vcmp.f16 s18, s16 ; CHECK-MVE-NEXT: vmov.16 q3[4], r0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r0, #0 @@ -1163,11 +1163,11 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ogt_v8f16(<8 x half> %src, half* %src2p, ; CHECK-MVE-NEXT: cset r0, ne ; CHECK-MVE-NEXT: vmovx.f16 s18, s6 ; CHECK-MVE-NEXT: lsls r0, r0, #31 -; CHECK-MVE-NEXT: vcmpe.f16 s3, s16 +; CHECK-MVE-NEXT: vcmp.f16 s3, s16 ; CHECK-MVE-NEXT: vseleq.f16 s18, s20, s18 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov r0, s18 -; CHECK-MVE-NEXT: vcmpe.f16 s0, s16 +; CHECK-MVE-NEXT: vcmp.f16 s0, s16 ; CHECK-MVE-NEXT: vmov.16 q3[5], r0 ; CHECK-MVE-NEXT: mov.w r0, #0 ; CHECK-MVE-NEXT: it gt @@ -1218,13 +1218,13 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_oge_v8f16(<8 x half> %src, half* %src2p, ; CHECK-MVE-NEXT: vmovx.f16 s12, s0 ; CHECK-MVE-NEXT: movs r0, #0 ; CHECK-MVE-NEXT: vmovx.f16 s14, s8 -; CHECK-MVE-NEXT: vcmpe.f16 s12, s16 +; CHECK-MVE-NEXT: vcmp.f16 s12, s16 ; CHECK-MVE-NEXT: vmovx.f16 s12, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: it ge ; CHECK-MVE-NEXT: movge r0, #1 ; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: vcmpe.f16 s0, s16 +; CHECK-MVE-NEXT: vcmp.f16 s0, s16 ; CHECK-MVE-NEXT: cset r0, ne ; CHECK-MVE-NEXT: movs r2, #0 ; CHECK-MVE-NEXT: lsls r0, r0, #31 @@ -1237,7 +1237,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_oge_v8f16(<8 x half> %src, half* %src2p, ; CHECK-MVE-NEXT: cset r2, ne ; CHECK-MVE-NEXT: vmov r0, s12 ; CHECK-MVE-NEXT: lsls r2, r2, #31 -; CHECK-MVE-NEXT: vcmpe.f16 s1, s16 +; CHECK-MVE-NEXT: vcmp.f16 s1, s16 ; CHECK-MVE-NEXT: vseleq.f16 s12, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov r2, s12 @@ -1254,7 +1254,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_oge_v8f16(<8 x half> %src, half* %src2p, ; CHECK-MVE-NEXT: vseleq.f16 s18, s9, s5 ; CHECK-MVE-NEXT: vmov r0, s18 ; CHECK-MVE-NEXT: vmovx.f16 s18, s1 -; CHECK-MVE-NEXT: vcmpe.f16 s18, s16 +; CHECK-MVE-NEXT: vcmp.f16 s18, s16 ; CHECK-MVE-NEXT: vmov.16 q3[2], r0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r0, #0 @@ -1264,7 +1264,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_oge_v8f16(<8 x half> %src, half* %src2p, ; CHECK-MVE-NEXT: cset r0, ne ; CHECK-MVE-NEXT: vmovx.f16 s18, s5 ; CHECK-MVE-NEXT: lsls r0, r0, #31 -; CHECK-MVE-NEXT: vcmpe.f16 s2, s16 +; CHECK-MVE-NEXT: vcmp.f16 s2, s16 ; CHECK-MVE-NEXT: vseleq.f16 s18, s20, s18 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov r0, s18 @@ -1279,7 +1279,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_oge_v8f16(<8 x half> %src, half* %src2p, ; CHECK-MVE-NEXT: vseleq.f16 s18, s10, s6 ; CHECK-MVE-NEXT: vmov r0, s18 ; CHECK-MVE-NEXT: vmovx.f16 s18, s2 -; CHECK-MVE-NEXT: vcmpe.f16 s18, s16 +; CHECK-MVE-NEXT: vcmp.f16 s18, s16 ; CHECK-MVE-NEXT: vmov.16 q3[4], r0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r0, #0 @@ -1289,11 +1289,11 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_oge_v8f16(<8 x half> %src, half* %src2p, ; CHECK-MVE-NEXT: cset r0, ne ; CHECK-MVE-NEXT: vmovx.f16 s18, s6 ; CHECK-MVE-NEXT: lsls r0, r0, #31 -; CHECK-MVE-NEXT: vcmpe.f16 s3, s16 +; CHECK-MVE-NEXT: vcmp.f16 s3, s16 ; CHECK-MVE-NEXT: vseleq.f16 s18, s20, s18 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov r0, s18 -; CHECK-MVE-NEXT: vcmpe.f16 s0, s16 +; CHECK-MVE-NEXT: vcmp.f16 s0, s16 ; CHECK-MVE-NEXT: vmov.16 q3[5], r0 ; CHECK-MVE-NEXT: mov.w r0, #0 ; CHECK-MVE-NEXT: it ge @@ -1344,13 +1344,13 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_olt_v8f16(<8 x half> %src, half* %src2p, ; CHECK-MVE-NEXT: vmovx.f16 s12, s0 ; CHECK-MVE-NEXT: movs r0, #0 ; CHECK-MVE-NEXT: vmovx.f16 s14, s8 -; CHECK-MVE-NEXT: vcmpe.f16 s12, s16 +; CHECK-MVE-NEXT: vcmp.f16 s12, s16 ; CHECK-MVE-NEXT: vmovx.f16 s12, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: it mi ; CHECK-MVE-NEXT: movmi r0, #1 ; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: vcmpe.f16 s0, s16 +; CHECK-MVE-NEXT: vcmp.f16 s0, s16 ; CHECK-MVE-NEXT: cset r0, ne ; CHECK-MVE-NEXT: movs r2, #0 ; CHECK-MVE-NEXT: lsls r0, r0, #31 @@ -1363,7 +1363,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_olt_v8f16(<8 x half> %src, half* %src2p, ; CHECK-MVE-NEXT: cset r2, ne ; CHECK-MVE-NEXT: vmov r0, s12 ; CHECK-MVE-NEXT: lsls r2, r2, #31 -; CHECK-MVE-NEXT: vcmpe.f16 s1, s16 +; CHECK-MVE-NEXT: vcmp.f16 s1, s16 ; CHECK-MVE-NEXT: vseleq.f16 s12, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov r2, s12 @@ -1380,7 +1380,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_olt_v8f16(<8 x half> %src, half* %src2p, ; CHECK-MVE-NEXT: vseleq.f16 s18, s9, s5 ; CHECK-MVE-NEXT: vmov r0, s18 ; CHECK-MVE-NEXT: vmovx.f16 s18, s1 -; CHECK-MVE-NEXT: vcmpe.f16 s18, s16 +; CHECK-MVE-NEXT: vcmp.f16 s18, s16 ; CHECK-MVE-NEXT: vmov.16 q3[2], r0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r0, #0 @@ -1390,7 +1390,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_olt_v8f16(<8 x half> %src, half* %src2p, ; CHECK-MVE-NEXT: cset r0, ne ; CHECK-MVE-NEXT: vmovx.f16 s18, s5 ; CHECK-MVE-NEXT: lsls r0, r0, #31 -; CHECK-MVE-NEXT: vcmpe.f16 s2, s16 +; CHECK-MVE-NEXT: vcmp.f16 s2, s16 ; CHECK-MVE-NEXT: vseleq.f16 s18, s20, s18 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov r0, s18 @@ -1405,7 +1405,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_olt_v8f16(<8 x half> %src, half* %src2p, ; CHECK-MVE-NEXT: vseleq.f16 s18, s10, s6 ; CHECK-MVE-NEXT: vmov r0, s18 ; CHECK-MVE-NEXT: vmovx.f16 s18, s2 -; CHECK-MVE-NEXT: vcmpe.f16 s18, s16 +; CHECK-MVE-NEXT: vcmp.f16 s18, s16 ; CHECK-MVE-NEXT: vmov.16 q3[4], r0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r0, #0 @@ -1415,11 +1415,11 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_olt_v8f16(<8 x half> %src, half* %src2p, ; CHECK-MVE-NEXT: cset r0, ne ; CHECK-MVE-NEXT: vmovx.f16 s18, s6 ; CHECK-MVE-NEXT: lsls r0, r0, #31 -; CHECK-MVE-NEXT: vcmpe.f16 s3, s16 +; CHECK-MVE-NEXT: vcmp.f16 s3, s16 ; CHECK-MVE-NEXT: vseleq.f16 s18, s20, s18 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov r0, s18 -; CHECK-MVE-NEXT: vcmpe.f16 s0, s16 +; CHECK-MVE-NEXT: vcmp.f16 s0, s16 ; CHECK-MVE-NEXT: vmov.16 q3[5], r0 ; CHECK-MVE-NEXT: mov.w r0, #0 ; CHECK-MVE-NEXT: it mi @@ -1471,13 +1471,13 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ole_v8f16(<8 x half> %src, half* %src2p, ; CHECK-MVE-NEXT: vmovx.f16 s12, s0 ; CHECK-MVE-NEXT: movs r0, #0 ; CHECK-MVE-NEXT: vmovx.f16 s14, s8 -; CHECK-MVE-NEXT: vcmpe.f16 s12, s16 +; CHECK-MVE-NEXT: vcmp.f16 s12, s16 ; CHECK-MVE-NEXT: vmovx.f16 s12, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: it ls ; CHECK-MVE-NEXT: movls r0, #1 ; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: vcmpe.f16 s0, s16 +; CHECK-MVE-NEXT: vcmp.f16 s0, s16 ; CHECK-MVE-NEXT: cset r0, ne ; CHECK-MVE-NEXT: movs r2, #0 ; CHECK-MVE-NEXT: lsls r0, r0, #31 @@ -1490,7 +1490,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ole_v8f16(<8 x half> %src, half* %src2p, ; CHECK-MVE-NEXT: cset r2, ne ; CHECK-MVE-NEXT: vmov r0, s12 ; CHECK-MVE-NEXT: lsls r2, r2, #31 -; CHECK-MVE-NEXT: vcmpe.f16 s1, s16 +; CHECK-MVE-NEXT: vcmp.f16 s1, s16 ; CHECK-MVE-NEXT: vseleq.f16 s12, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov r2, s12 @@ -1507,7 +1507,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ole_v8f16(<8 x half> %src, half* %src2p, ; CHECK-MVE-NEXT: vseleq.f16 s18, s9, s5 ; CHECK-MVE-NEXT: vmov r0, s18 ; CHECK-MVE-NEXT: vmovx.f16 s18, s1 -; CHECK-MVE-NEXT: vcmpe.f16 s18, s16 +; CHECK-MVE-NEXT: vcmp.f16 s18, s16 ; CHECK-MVE-NEXT: vmov.16 q3[2], r0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r0, #0 @@ -1517,7 +1517,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ole_v8f16(<8 x half> %src, half* %src2p, ; CHECK-MVE-NEXT: cset r0, ne ; CHECK-MVE-NEXT: vmovx.f16 s18, s5 ; CHECK-MVE-NEXT: lsls r0, r0, #31 -; CHECK-MVE-NEXT: vcmpe.f16 s2, s16 +; CHECK-MVE-NEXT: vcmp.f16 s2, s16 ; CHECK-MVE-NEXT: vseleq.f16 s18, s20, s18 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov r0, s18 @@ -1532,7 +1532,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ole_v8f16(<8 x half> %src, half* %src2p, ; CHECK-MVE-NEXT: vseleq.f16 s18, s10, s6 ; CHECK-MVE-NEXT: vmov r0, s18 ; CHECK-MVE-NEXT: vmovx.f16 s18, s2 -; CHECK-MVE-NEXT: vcmpe.f16 s18, s16 +; CHECK-MVE-NEXT: vcmp.f16 s18, s16 ; CHECK-MVE-NEXT: vmov.16 q3[4], r0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r0, #0 @@ -1542,11 +1542,11 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ole_v8f16(<8 x half> %src, half* %src2p, ; CHECK-MVE-NEXT: cset r0, ne ; CHECK-MVE-NEXT: vmovx.f16 s18, s6 ; CHECK-MVE-NEXT: lsls r0, r0, #31 -; CHECK-MVE-NEXT: vcmpe.f16 s3, s16 +; CHECK-MVE-NEXT: vcmp.f16 s3, s16 ; CHECK-MVE-NEXT: vseleq.f16 s18, s20, s18 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov r0, s18 -; CHECK-MVE-NEXT: vcmpe.f16 s0, s16 +; CHECK-MVE-NEXT: vcmp.f16 s0, s16 ; CHECK-MVE-NEXT: vmov.16 q3[5], r0 ; CHECK-MVE-NEXT: mov.w r0, #0 ; CHECK-MVE-NEXT: it ls @@ -1868,13 +1868,13 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ugt_v8f16(<8 x half> %src, half* %src2p, ; CHECK-MVE-NEXT: vmovx.f16 s12, s0 ; CHECK-MVE-NEXT: movs r0, #0 ; CHECK-MVE-NEXT: vmovx.f16 s14, s8 -; CHECK-MVE-NEXT: vcmpe.f16 s12, s16 +; CHECK-MVE-NEXT: vcmp.f16 s12, s16 ; CHECK-MVE-NEXT: vmovx.f16 s12, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: it hi ; CHECK-MVE-NEXT: movhi r0, #1 ; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: vcmpe.f16 s0, s16 +; CHECK-MVE-NEXT: vcmp.f16 s0, s16 ; CHECK-MVE-NEXT: cset r0, ne ; CHECK-MVE-NEXT: movs r2, #0 ; CHECK-MVE-NEXT: lsls r0, r0, #31 @@ -1887,7 +1887,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ugt_v8f16(<8 x half> %src, half* %src2p, ; CHECK-MVE-NEXT: cset r2, ne ; CHECK-MVE-NEXT: vmov r0, s12 ; CHECK-MVE-NEXT: lsls r2, r2, #31 -; CHECK-MVE-NEXT: vcmpe.f16 s1, s16 +; CHECK-MVE-NEXT: vcmp.f16 s1, s16 ; CHECK-MVE-NEXT: vseleq.f16 s12, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov r2, s12 @@ -1904,7 +1904,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ugt_v8f16(<8 x half> %src, half* %src2p, ; CHECK-MVE-NEXT: vseleq.f16 s18, s9, s5 ; CHECK-MVE-NEXT: vmov r0, s18 ; CHECK-MVE-NEXT: vmovx.f16 s18, s1 -; CHECK-MVE-NEXT: vcmpe.f16 s18, s16 +; CHECK-MVE-NEXT: vcmp.f16 s18, s16 ; CHECK-MVE-NEXT: vmov.16 q3[2], r0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r0, #0 @@ -1914,7 +1914,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ugt_v8f16(<8 x half> %src, half* %src2p, ; CHECK-MVE-NEXT: cset r0, ne ; CHECK-MVE-NEXT: vmovx.f16 s18, s5 ; CHECK-MVE-NEXT: lsls r0, r0, #31 -; CHECK-MVE-NEXT: vcmpe.f16 s2, s16 +; CHECK-MVE-NEXT: vcmp.f16 s2, s16 ; CHECK-MVE-NEXT: vseleq.f16 s18, s20, s18 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov r0, s18 @@ -1929,7 +1929,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ugt_v8f16(<8 x half> %src, half* %src2p, ; CHECK-MVE-NEXT: vseleq.f16 s18, s10, s6 ; CHECK-MVE-NEXT: vmov r0, s18 ; CHECK-MVE-NEXT: vmovx.f16 s18, s2 -; CHECK-MVE-NEXT: vcmpe.f16 s18, s16 +; CHECK-MVE-NEXT: vcmp.f16 s18, s16 ; CHECK-MVE-NEXT: vmov.16 q3[4], r0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r0, #0 @@ -1939,11 +1939,11 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ugt_v8f16(<8 x half> %src, half* %src2p, ; CHECK-MVE-NEXT: cset r0, ne ; CHECK-MVE-NEXT: vmovx.f16 s18, s6 ; CHECK-MVE-NEXT: lsls r0, r0, #31 -; CHECK-MVE-NEXT: vcmpe.f16 s3, s16 +; CHECK-MVE-NEXT: vcmp.f16 s3, s16 ; CHECK-MVE-NEXT: vseleq.f16 s18, s20, s18 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov r0, s18 -; CHECK-MVE-NEXT: vcmpe.f16 s0, s16 +; CHECK-MVE-NEXT: vcmp.f16 s0, s16 ; CHECK-MVE-NEXT: vmov.16 q3[5], r0 ; CHECK-MVE-NEXT: mov.w r0, #0 ; CHECK-MVE-NEXT: it hi @@ -1996,13 +1996,13 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_uge_v8f16(<8 x half> %src, half* %src2p, ; CHECK-MVE-NEXT: vmovx.f16 s12, s0 ; CHECK-MVE-NEXT: movs r0, #0 ; CHECK-MVE-NEXT: vmovx.f16 s14, s8 -; CHECK-MVE-NEXT: vcmpe.f16 s12, s16 +; CHECK-MVE-NEXT: vcmp.f16 s12, s16 ; CHECK-MVE-NEXT: vmovx.f16 s12, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: it pl ; CHECK-MVE-NEXT: movpl r0, #1 ; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: vcmpe.f16 s0, s16 +; CHECK-MVE-NEXT: vcmp.f16 s0, s16 ; CHECK-MVE-NEXT: cset r0, ne ; CHECK-MVE-NEXT: movs r2, #0 ; CHECK-MVE-NEXT: lsls r0, r0, #31 @@ -2015,7 +2015,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_uge_v8f16(<8 x half> %src, half* %src2p, ; CHECK-MVE-NEXT: cset r2, ne ; CHECK-MVE-NEXT: vmov r0, s12 ; CHECK-MVE-NEXT: lsls r2, r2, #31 -; CHECK-MVE-NEXT: vcmpe.f16 s1, s16 +; CHECK-MVE-NEXT: vcmp.f16 s1, s16 ; CHECK-MVE-NEXT: vseleq.f16 s12, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov r2, s12 @@ -2032,7 +2032,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_uge_v8f16(<8 x half> %src, half* %src2p, ; CHECK-MVE-NEXT: vseleq.f16 s18, s9, s5 ; CHECK-MVE-NEXT: vmov r0, s18 ; CHECK-MVE-NEXT: vmovx.f16 s18, s1 -; CHECK-MVE-NEXT: vcmpe.f16 s18, s16 +; CHECK-MVE-NEXT: vcmp.f16 s18, s16 ; CHECK-MVE-NEXT: vmov.16 q3[2], r0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r0, #0 @@ -2042,7 +2042,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_uge_v8f16(<8 x half> %src, half* %src2p, ; CHECK-MVE-NEXT: cset r0, ne ; CHECK-MVE-NEXT: vmovx.f16 s18, s5 ; CHECK-MVE-NEXT: lsls r0, r0, #31 -; CHECK-MVE-NEXT: vcmpe.f16 s2, s16 +; CHECK-MVE-NEXT: vcmp.f16 s2, s16 ; CHECK-MVE-NEXT: vseleq.f16 s18, s20, s18 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov r0, s18 @@ -2057,7 +2057,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_uge_v8f16(<8 x half> %src, half* %src2p, ; CHECK-MVE-NEXT: vseleq.f16 s18, s10, s6 ; CHECK-MVE-NEXT: vmov r0, s18 ; CHECK-MVE-NEXT: vmovx.f16 s18, s2 -; CHECK-MVE-NEXT: vcmpe.f16 s18, s16 +; CHECK-MVE-NEXT: vcmp.f16 s18, s16 ; CHECK-MVE-NEXT: vmov.16 q3[4], r0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r0, #0 @@ -2067,11 +2067,11 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_uge_v8f16(<8 x half> %src, half* %src2p, ; CHECK-MVE-NEXT: cset r0, ne ; CHECK-MVE-NEXT: vmovx.f16 s18, s6 ; CHECK-MVE-NEXT: lsls r0, r0, #31 -; CHECK-MVE-NEXT: vcmpe.f16 s3, s16 +; CHECK-MVE-NEXT: vcmp.f16 s3, s16 ; CHECK-MVE-NEXT: vseleq.f16 s18, s20, s18 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov r0, s18 -; CHECK-MVE-NEXT: vcmpe.f16 s0, s16 +; CHECK-MVE-NEXT: vcmp.f16 s0, s16 ; CHECK-MVE-NEXT: vmov.16 q3[5], r0 ; CHECK-MVE-NEXT: mov.w r0, #0 ; CHECK-MVE-NEXT: it pl @@ -2124,13 +2124,13 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ult_v8f16(<8 x half> %src, half* %src2p, ; CHECK-MVE-NEXT: vmovx.f16 s12, s0 ; CHECK-MVE-NEXT: movs r0, #0 ; CHECK-MVE-NEXT: vmovx.f16 s14, s8 -; CHECK-MVE-NEXT: vcmpe.f16 s12, s16 +; CHECK-MVE-NEXT: vcmp.f16 s12, s16 ; CHECK-MVE-NEXT: vmovx.f16 s12, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: it lt ; CHECK-MVE-NEXT: movlt r0, #1 ; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: vcmpe.f16 s0, s16 +; CHECK-MVE-NEXT: vcmp.f16 s0, s16 ; CHECK-MVE-NEXT: cset r0, ne ; CHECK-MVE-NEXT: movs r2, #0 ; CHECK-MVE-NEXT: lsls r0, r0, #31 @@ -2143,7 +2143,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ult_v8f16(<8 x half> %src, half* %src2p, ; CHECK-MVE-NEXT: cset r2, ne ; CHECK-MVE-NEXT: vmov r0, s12 ; CHECK-MVE-NEXT: lsls r2, r2, #31 -; CHECK-MVE-NEXT: vcmpe.f16 s1, s16 +; CHECK-MVE-NEXT: vcmp.f16 s1, s16 ; CHECK-MVE-NEXT: vseleq.f16 s12, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov r2, s12 @@ -2160,7 +2160,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ult_v8f16(<8 x half> %src, half* %src2p, ; CHECK-MVE-NEXT: vseleq.f16 s18, s9, s5 ; CHECK-MVE-NEXT: vmov r0, s18 ; CHECK-MVE-NEXT: vmovx.f16 s18, s1 -; CHECK-MVE-NEXT: vcmpe.f16 s18, s16 +; CHECK-MVE-NEXT: vcmp.f16 s18, s16 ; CHECK-MVE-NEXT: vmov.16 q3[2], r0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r0, #0 @@ -2170,7 +2170,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ult_v8f16(<8 x half> %src, half* %src2p, ; CHECK-MVE-NEXT: cset r0, ne ; CHECK-MVE-NEXT: vmovx.f16 s18, s5 ; CHECK-MVE-NEXT: lsls r0, r0, #31 -; CHECK-MVE-NEXT: vcmpe.f16 s2, s16 +; CHECK-MVE-NEXT: vcmp.f16 s2, s16 ; CHECK-MVE-NEXT: vseleq.f16 s18, s20, s18 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov r0, s18 @@ -2185,7 +2185,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ult_v8f16(<8 x half> %src, half* %src2p, ; CHECK-MVE-NEXT: vseleq.f16 s18, s10, s6 ; CHECK-MVE-NEXT: vmov r0, s18 ; CHECK-MVE-NEXT: vmovx.f16 s18, s2 -; CHECK-MVE-NEXT: vcmpe.f16 s18, s16 +; CHECK-MVE-NEXT: vcmp.f16 s18, s16 ; CHECK-MVE-NEXT: vmov.16 q3[4], r0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r0, #0 @@ -2195,11 +2195,11 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ult_v8f16(<8 x half> %src, half* %src2p, ; CHECK-MVE-NEXT: cset r0, ne ; CHECK-MVE-NEXT: vmovx.f16 s18, s6 ; CHECK-MVE-NEXT: lsls r0, r0, #31 -; CHECK-MVE-NEXT: vcmpe.f16 s3, s16 +; CHECK-MVE-NEXT: vcmp.f16 s3, s16 ; CHECK-MVE-NEXT: vseleq.f16 s18, s20, s18 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov r0, s18 -; CHECK-MVE-NEXT: vcmpe.f16 s0, s16 +; CHECK-MVE-NEXT: vcmp.f16 s0, s16 ; CHECK-MVE-NEXT: vmov.16 q3[5], r0 ; CHECK-MVE-NEXT: mov.w r0, #0 ; CHECK-MVE-NEXT: it lt @@ -2251,13 +2251,13 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ule_v8f16(<8 x half> %src, half* %src2p, ; CHECK-MVE-NEXT: vmovx.f16 s12, s0 ; CHECK-MVE-NEXT: movs r0, #0 ; CHECK-MVE-NEXT: vmovx.f16 s14, s8 -; CHECK-MVE-NEXT: vcmpe.f16 s12, s16 +; CHECK-MVE-NEXT: vcmp.f16 s12, s16 ; CHECK-MVE-NEXT: vmovx.f16 s12, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: it le ; CHECK-MVE-NEXT: movle r0, #1 ; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: vcmpe.f16 s0, s16 +; CHECK-MVE-NEXT: vcmp.f16 s0, s16 ; CHECK-MVE-NEXT: cset r0, ne ; CHECK-MVE-NEXT: movs r2, #0 ; CHECK-MVE-NEXT: lsls r0, r0, #31 @@ -2270,7 +2270,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ule_v8f16(<8 x half> %src, half* %src2p, ; CHECK-MVE-NEXT: cset r2, ne ; CHECK-MVE-NEXT: vmov r0, s12 ; CHECK-MVE-NEXT: lsls r2, r2, #31 -; CHECK-MVE-NEXT: vcmpe.f16 s1, s16 +; CHECK-MVE-NEXT: vcmp.f16 s1, s16 ; CHECK-MVE-NEXT: vseleq.f16 s12, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov r2, s12 @@ -2287,7 +2287,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ule_v8f16(<8 x half> %src, half* %src2p, ; CHECK-MVE-NEXT: vseleq.f16 s18, s9, s5 ; CHECK-MVE-NEXT: vmov r0, s18 ; CHECK-MVE-NEXT: vmovx.f16 s18, s1 -; CHECK-MVE-NEXT: vcmpe.f16 s18, s16 +; CHECK-MVE-NEXT: vcmp.f16 s18, s16 ; CHECK-MVE-NEXT: vmov.16 q3[2], r0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r0, #0 @@ -2297,7 +2297,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ule_v8f16(<8 x half> %src, half* %src2p, ; CHECK-MVE-NEXT: cset r0, ne ; CHECK-MVE-NEXT: vmovx.f16 s18, s5 ; CHECK-MVE-NEXT: lsls r0, r0, #31 -; CHECK-MVE-NEXT: vcmpe.f16 s2, s16 +; CHECK-MVE-NEXT: vcmp.f16 s2, s16 ; CHECK-MVE-NEXT: vseleq.f16 s18, s20, s18 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov r0, s18 @@ -2312,7 +2312,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ule_v8f16(<8 x half> %src, half* %src2p, ; CHECK-MVE-NEXT: vseleq.f16 s18, s10, s6 ; CHECK-MVE-NEXT: vmov r0, s18 ; CHECK-MVE-NEXT: vmovx.f16 s18, s2 -; CHECK-MVE-NEXT: vcmpe.f16 s18, s16 +; CHECK-MVE-NEXT: vcmp.f16 s18, s16 ; CHECK-MVE-NEXT: vmov.16 q3[4], r0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r0, #0 @@ -2322,11 +2322,11 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ule_v8f16(<8 x half> %src, half* %src2p, ; CHECK-MVE-NEXT: cset r0, ne ; CHECK-MVE-NEXT: vmovx.f16 s18, s6 ; CHECK-MVE-NEXT: lsls r0, r0, #31 -; CHECK-MVE-NEXT: vcmpe.f16 s3, s16 +; CHECK-MVE-NEXT: vcmp.f16 s3, s16 ; CHECK-MVE-NEXT: vseleq.f16 s18, s20, s18 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov r0, s18 -; CHECK-MVE-NEXT: vcmpe.f16 s0, s16 +; CHECK-MVE-NEXT: vcmp.f16 s0, s16 ; CHECK-MVE-NEXT: vmov.16 q3[5], r0 ; CHECK-MVE-NEXT: mov.w r0, #0 ; CHECK-MVE-NEXT: it le @@ -2378,13 +2378,13 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ord_v8f16(<8 x half> %src, half* %src2p, ; CHECK-MVE-NEXT: vmovx.f16 s12, s0 ; CHECK-MVE-NEXT: movs r0, #0 ; CHECK-MVE-NEXT: vmovx.f16 s14, s8 -; CHECK-MVE-NEXT: vcmpe.f16 s12, s16 +; CHECK-MVE-NEXT: vcmp.f16 s12, s16 ; CHECK-MVE-NEXT: vmovx.f16 s12, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: it vc ; CHECK-MVE-NEXT: movvc r0, #1 ; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: vcmpe.f16 s0, s16 +; CHECK-MVE-NEXT: vcmp.f16 s0, s16 ; CHECK-MVE-NEXT: cset r0, ne ; CHECK-MVE-NEXT: movs r2, #0 ; CHECK-MVE-NEXT: lsls r0, r0, #31 @@ -2397,7 +2397,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ord_v8f16(<8 x half> %src, half* %src2p, ; CHECK-MVE-NEXT: cset r2, ne ; CHECK-MVE-NEXT: vmov r0, s12 ; CHECK-MVE-NEXT: lsls r2, r2, #31 -; CHECK-MVE-NEXT: vcmpe.f16 s1, s16 +; CHECK-MVE-NEXT: vcmp.f16 s1, s16 ; CHECK-MVE-NEXT: vseleq.f16 s12, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov r2, s12 @@ -2414,7 +2414,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ord_v8f16(<8 x half> %src, half* %src2p, ; CHECK-MVE-NEXT: vseleq.f16 s18, s9, s5 ; CHECK-MVE-NEXT: vmov r0, s18 ; CHECK-MVE-NEXT: vmovx.f16 s18, s1 -; CHECK-MVE-NEXT: vcmpe.f16 s18, s16 +; CHECK-MVE-NEXT: vcmp.f16 s18, s16 ; CHECK-MVE-NEXT: vmov.16 q3[2], r0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r0, #0 @@ -2424,7 +2424,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ord_v8f16(<8 x half> %src, half* %src2p, ; CHECK-MVE-NEXT: cset r0, ne ; CHECK-MVE-NEXT: vmovx.f16 s18, s5 ; CHECK-MVE-NEXT: lsls r0, r0, #31 -; CHECK-MVE-NEXT: vcmpe.f16 s2, s16 +; CHECK-MVE-NEXT: vcmp.f16 s2, s16 ; CHECK-MVE-NEXT: vseleq.f16 s18, s20, s18 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov r0, s18 @@ -2439,7 +2439,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ord_v8f16(<8 x half> %src, half* %src2p, ; CHECK-MVE-NEXT: vseleq.f16 s18, s10, s6 ; CHECK-MVE-NEXT: vmov r0, s18 ; CHECK-MVE-NEXT: vmovx.f16 s18, s2 -; CHECK-MVE-NEXT: vcmpe.f16 s18, s16 +; CHECK-MVE-NEXT: vcmp.f16 s18, s16 ; CHECK-MVE-NEXT: vmov.16 q3[4], r0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r0, #0 @@ -2449,11 +2449,11 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ord_v8f16(<8 x half> %src, half* %src2p, ; CHECK-MVE-NEXT: cset r0, ne ; CHECK-MVE-NEXT: vmovx.f16 s18, s6 ; CHECK-MVE-NEXT: lsls r0, r0, #31 -; CHECK-MVE-NEXT: vcmpe.f16 s3, s16 +; CHECK-MVE-NEXT: vcmp.f16 s3, s16 ; CHECK-MVE-NEXT: vseleq.f16 s18, s20, s18 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov r0, s18 -; CHECK-MVE-NEXT: vcmpe.f16 s0, s16 +; CHECK-MVE-NEXT: vcmp.f16 s0, s16 ; CHECK-MVE-NEXT: vmov.16 q3[5], r0 ; CHECK-MVE-NEXT: mov.w r0, #0 ; CHECK-MVE-NEXT: it vc @@ -2507,13 +2507,13 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_uno_v8f16(<8 x half> %src, half* %src2p, ; CHECK-MVE-NEXT: vmovx.f16 s12, s0 ; CHECK-MVE-NEXT: movs r0, #0 ; CHECK-MVE-NEXT: vmovx.f16 s14, s8 -; CHECK-MVE-NEXT: vcmpe.f16 s12, s16 +; CHECK-MVE-NEXT: vcmp.f16 s12, s16 ; CHECK-MVE-NEXT: vmovx.f16 s12, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: it vs ; CHECK-MVE-NEXT: movvs r0, #1 ; CHECK-MVE-NEXT: cmp r0, #0 -; CHECK-MVE-NEXT: vcmpe.f16 s0, s16 +; CHECK-MVE-NEXT: vcmp.f16 s0, s16 ; CHECK-MVE-NEXT: cset r0, ne ; CHECK-MVE-NEXT: movs r2, #0 ; CHECK-MVE-NEXT: lsls r0, r0, #31 @@ -2526,7 +2526,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_uno_v8f16(<8 x half> %src, half* %src2p, ; CHECK-MVE-NEXT: cset r2, ne ; CHECK-MVE-NEXT: vmov r0, s12 ; CHECK-MVE-NEXT: lsls r2, r2, #31 -; CHECK-MVE-NEXT: vcmpe.f16 s1, s16 +; CHECK-MVE-NEXT: vcmp.f16 s1, s16 ; CHECK-MVE-NEXT: vseleq.f16 s12, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov r2, s12 @@ -2543,7 +2543,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_uno_v8f16(<8 x half> %src, half* %src2p, ; CHECK-MVE-NEXT: vseleq.f16 s18, s9, s5 ; CHECK-MVE-NEXT: vmov r0, s18 ; CHECK-MVE-NEXT: vmovx.f16 s18, s1 -; CHECK-MVE-NEXT: vcmpe.f16 s18, s16 +; CHECK-MVE-NEXT: vcmp.f16 s18, s16 ; CHECK-MVE-NEXT: vmov.16 q3[2], r0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r0, #0 @@ -2553,7 +2553,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_uno_v8f16(<8 x half> %src, half* %src2p, ; CHECK-MVE-NEXT: cset r0, ne ; CHECK-MVE-NEXT: vmovx.f16 s18, s5 ; CHECK-MVE-NEXT: lsls r0, r0, #31 -; CHECK-MVE-NEXT: vcmpe.f16 s2, s16 +; CHECK-MVE-NEXT: vcmp.f16 s2, s16 ; CHECK-MVE-NEXT: vseleq.f16 s18, s20, s18 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov r0, s18 @@ -2568,7 +2568,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_uno_v8f16(<8 x half> %src, half* %src2p, ; CHECK-MVE-NEXT: vseleq.f16 s18, s10, s6 ; CHECK-MVE-NEXT: vmov r0, s18 ; CHECK-MVE-NEXT: vmovx.f16 s18, s2 -; CHECK-MVE-NEXT: vcmpe.f16 s18, s16 +; CHECK-MVE-NEXT: vcmp.f16 s18, s16 ; CHECK-MVE-NEXT: vmov.16 q3[4], r0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r0, #0 @@ -2578,11 +2578,11 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_uno_v8f16(<8 x half> %src, half* %src2p, ; CHECK-MVE-NEXT: cset r0, ne ; CHECK-MVE-NEXT: vmovx.f16 s18, s6 ; CHECK-MVE-NEXT: lsls r0, r0, #31 -; CHECK-MVE-NEXT: vcmpe.f16 s3, s16 +; CHECK-MVE-NEXT: vcmp.f16 s3, s16 ; CHECK-MVE-NEXT: vseleq.f16 s18, s20, s18 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov r0, s18 -; CHECK-MVE-NEXT: vcmpe.f16 s0, s16 +; CHECK-MVE-NEXT: vcmp.f16 s0, s16 ; CHECK-MVE-NEXT: vmov.16 q3[5], r0 ; CHECK-MVE-NEXT: mov.w r0, #0 ; CHECK-MVE-NEXT: it vs diff --git a/test/CodeGen/Thumb2/mve-vcmpfz.ll b/test/CodeGen/Thumb2/mve-vcmpfz.ll index 126e00a31a8..6aae7e7665a 100644 --- a/test/CodeGen/Thumb2/mve-vcmpfz.ll +++ b/test/CodeGen/Thumb2/mve-vcmpfz.ll @@ -122,24 +122,24 @@ entry: define arm_aapcs_vfpcc <4 x float> @vcmp_ogt_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) { ; CHECK-MVE-LABEL: vcmp_ogt_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry -; CHECK-MVE-NEXT: vcmpe.f32 s0, #0 +; CHECK-MVE-NEXT: vcmp.f32 s0, #0 ; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: it gt ; CHECK-MVE-NEXT: movgt r1, #1 ; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s1, #0 +; CHECK-MVE-NEXT: vcmp.f32 s1, #0 ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r2, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s2, #0 +; CHECK-MVE-NEXT: vcmp.f32 s2, #0 ; CHECK-MVE-NEXT: it gt ; CHECK-MVE-NEXT: movgt r2, #1 ; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: cset r2, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r3, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s3, #0 +; CHECK-MVE-NEXT: vcmp.f32 s3, #0 ; CHECK-MVE-NEXT: it gt ; CHECK-MVE-NEXT: movgt r3, #1 ; CHECK-MVE-NEXT: cmp r3, #0 @@ -174,24 +174,24 @@ entry: define arm_aapcs_vfpcc <4 x float> @vcmp_oge_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) { ; CHECK-MVE-LABEL: vcmp_oge_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry -; CHECK-MVE-NEXT: vcmpe.f32 s0, #0 +; CHECK-MVE-NEXT: vcmp.f32 s0, #0 ; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: it ge ; CHECK-MVE-NEXT: movge r1, #1 ; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s1, #0 +; CHECK-MVE-NEXT: vcmp.f32 s1, #0 ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r2, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s2, #0 +; CHECK-MVE-NEXT: vcmp.f32 s2, #0 ; CHECK-MVE-NEXT: it ge ; CHECK-MVE-NEXT: movge r2, #1 ; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: cset r2, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r3, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s3, #0 +; CHECK-MVE-NEXT: vcmp.f32 s3, #0 ; CHECK-MVE-NEXT: it ge ; CHECK-MVE-NEXT: movge r3, #1 ; CHECK-MVE-NEXT: cmp r3, #0 @@ -226,24 +226,24 @@ entry: define arm_aapcs_vfpcc <4 x float> @vcmp_olt_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) { ; CHECK-MVE-LABEL: vcmp_olt_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry -; CHECK-MVE-NEXT: vcmpe.f32 s0, #0 +; CHECK-MVE-NEXT: vcmp.f32 s0, #0 ; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: it mi ; CHECK-MVE-NEXT: movmi r1, #1 ; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s1, #0 +; CHECK-MVE-NEXT: vcmp.f32 s1, #0 ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r2, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s2, #0 +; CHECK-MVE-NEXT: vcmp.f32 s2, #0 ; CHECK-MVE-NEXT: it mi ; CHECK-MVE-NEXT: movmi r2, #1 ; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: cset r2, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r3, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s3, #0 +; CHECK-MVE-NEXT: vcmp.f32 s3, #0 ; CHECK-MVE-NEXT: it mi ; CHECK-MVE-NEXT: movmi r3, #1 ; CHECK-MVE-NEXT: cmp r3, #0 @@ -278,24 +278,24 @@ entry: define arm_aapcs_vfpcc <4 x float> @vcmp_ole_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) { ; CHECK-MVE-LABEL: vcmp_ole_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry -; CHECK-MVE-NEXT: vcmpe.f32 s0, #0 +; CHECK-MVE-NEXT: vcmp.f32 s0, #0 ; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: it ls ; CHECK-MVE-NEXT: movls r1, #1 ; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s1, #0 +; CHECK-MVE-NEXT: vcmp.f32 s1, #0 ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r2, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s2, #0 +; CHECK-MVE-NEXT: vcmp.f32 s2, #0 ; CHECK-MVE-NEXT: it ls ; CHECK-MVE-NEXT: movls r2, #1 ; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: cset r2, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r3, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s3, #0 +; CHECK-MVE-NEXT: vcmp.f32 s3, #0 ; CHECK-MVE-NEXT: it ls ; CHECK-MVE-NEXT: movls r3, #1 ; CHECK-MVE-NEXT: cmp r3, #0 @@ -446,24 +446,24 @@ entry: define arm_aapcs_vfpcc <4 x float> @vcmp_ugt_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) { ; CHECK-MVE-LABEL: vcmp_ugt_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry -; CHECK-MVE-NEXT: vcmpe.f32 s0, #0 +; CHECK-MVE-NEXT: vcmp.f32 s0, #0 ; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: it hi ; CHECK-MVE-NEXT: movhi r1, #1 ; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s1, #0 +; CHECK-MVE-NEXT: vcmp.f32 s1, #0 ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r2, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s2, #0 +; CHECK-MVE-NEXT: vcmp.f32 s2, #0 ; CHECK-MVE-NEXT: it hi ; CHECK-MVE-NEXT: movhi r2, #1 ; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: cset r2, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r3, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s3, #0 +; CHECK-MVE-NEXT: vcmp.f32 s3, #0 ; CHECK-MVE-NEXT: it hi ; CHECK-MVE-NEXT: movhi r3, #1 ; CHECK-MVE-NEXT: cmp r3, #0 @@ -499,24 +499,24 @@ entry: define arm_aapcs_vfpcc <4 x float> @vcmp_uge_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) { ; CHECK-MVE-LABEL: vcmp_uge_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry -; CHECK-MVE-NEXT: vcmpe.f32 s0, #0 +; CHECK-MVE-NEXT: vcmp.f32 s0, #0 ; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: it pl ; CHECK-MVE-NEXT: movpl r1, #1 ; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s1, #0 +; CHECK-MVE-NEXT: vcmp.f32 s1, #0 ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r2, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s2, #0 +; CHECK-MVE-NEXT: vcmp.f32 s2, #0 ; CHECK-MVE-NEXT: it pl ; CHECK-MVE-NEXT: movpl r2, #1 ; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: cset r2, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r3, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s3, #0 +; CHECK-MVE-NEXT: vcmp.f32 s3, #0 ; CHECK-MVE-NEXT: it pl ; CHECK-MVE-NEXT: movpl r3, #1 ; CHECK-MVE-NEXT: cmp r3, #0 @@ -552,24 +552,24 @@ entry: define arm_aapcs_vfpcc <4 x float> @vcmp_ult_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) { ; CHECK-MVE-LABEL: vcmp_ult_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry -; CHECK-MVE-NEXT: vcmpe.f32 s0, #0 +; CHECK-MVE-NEXT: vcmp.f32 s0, #0 ; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: it lt ; CHECK-MVE-NEXT: movlt r1, #1 ; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s1, #0 +; CHECK-MVE-NEXT: vcmp.f32 s1, #0 ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r2, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s2, #0 +; CHECK-MVE-NEXT: vcmp.f32 s2, #0 ; CHECK-MVE-NEXT: it lt ; CHECK-MVE-NEXT: movlt r2, #1 ; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: cset r2, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r3, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s3, #0 +; CHECK-MVE-NEXT: vcmp.f32 s3, #0 ; CHECK-MVE-NEXT: it lt ; CHECK-MVE-NEXT: movlt r3, #1 ; CHECK-MVE-NEXT: cmp r3, #0 @@ -605,24 +605,24 @@ entry: define arm_aapcs_vfpcc <4 x float> @vcmp_ule_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) { ; CHECK-MVE-LABEL: vcmp_ule_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry -; CHECK-MVE-NEXT: vcmpe.f32 s0, #0 +; CHECK-MVE-NEXT: vcmp.f32 s0, #0 ; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: it le ; CHECK-MVE-NEXT: movle r1, #1 ; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s1, #0 +; CHECK-MVE-NEXT: vcmp.f32 s1, #0 ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r2, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s2, #0 +; CHECK-MVE-NEXT: vcmp.f32 s2, #0 ; CHECK-MVE-NEXT: it le ; CHECK-MVE-NEXT: movle r2, #1 ; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: cset r2, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r3, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s3, #0 +; CHECK-MVE-NEXT: vcmp.f32 s3, #0 ; CHECK-MVE-NEXT: it le ; CHECK-MVE-NEXT: movle r3, #1 ; CHECK-MVE-NEXT: cmp r3, #0 @@ -658,24 +658,24 @@ entry: define arm_aapcs_vfpcc <4 x float> @vcmp_ord_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) { ; CHECK-MVE-LABEL: vcmp_ord_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry -; CHECK-MVE-NEXT: vcmpe.f32 s0, s0 +; CHECK-MVE-NEXT: vcmp.f32 s0, s0 ; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: it vc ; CHECK-MVE-NEXT: movvc r1, #1 ; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s1, s1 +; CHECK-MVE-NEXT: vcmp.f32 s1, s1 ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r2, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s2, s2 +; CHECK-MVE-NEXT: vcmp.f32 s2, s2 ; CHECK-MVE-NEXT: it vc ; CHECK-MVE-NEXT: movvc r2, #1 ; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: cset r2, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r3, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s3, s3 +; CHECK-MVE-NEXT: vcmp.f32 s3, s3 ; CHECK-MVE-NEXT: it vc ; CHECK-MVE-NEXT: movvc r3, #1 ; CHECK-MVE-NEXT: cmp r3, #0 @@ -713,24 +713,24 @@ entry: define arm_aapcs_vfpcc <4 x float> @vcmp_uno_v4f32(<4 x float> %src, <4 x float> %a, <4 x float> %b) { ; CHECK-MVE-LABEL: vcmp_uno_v4f32: ; CHECK-MVE: @ %bb.0: @ %entry -; CHECK-MVE-NEXT: vcmpe.f32 s0, s0 +; CHECK-MVE-NEXT: vcmp.f32 s0, s0 ; CHECK-MVE-NEXT: movs r1, #0 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: it vs ; CHECK-MVE-NEXT: movvs r1, #1 ; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s1, s1 +; CHECK-MVE-NEXT: vcmp.f32 s1, s1 ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r2, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s2, s2 +; CHECK-MVE-NEXT: vcmp.f32 s2, s2 ; CHECK-MVE-NEXT: it vs ; CHECK-MVE-NEXT: movvs r2, #1 ; CHECK-MVE-NEXT: cmp r2, #0 ; CHECK-MVE-NEXT: cset r2, ne ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r3, #0 -; CHECK-MVE-NEXT: vcmpe.f32 s3, s3 +; CHECK-MVE-NEXT: vcmp.f32 s3, s3 ; CHECK-MVE-NEXT: it vs ; CHECK-MVE-NEXT: movvs r3, #1 ; CHECK-MVE-NEXT: cmp r3, #0 @@ -1032,13 +1032,13 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ogt_v8f16(<8 x half> %src, <8 x half> %a ; CHECK-MVE-NEXT: vpush {d8, d9} ; CHECK-MVE-NEXT: vmovx.f16 s12, s0 ; CHECK-MVE-NEXT: movs r1, #0 -; CHECK-MVE-NEXT: vcmpe.f16 s12, #0 +; CHECK-MVE-NEXT: vcmp.f16 s12, #0 ; CHECK-MVE-NEXT: vmovx.f16 s12, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: it gt ; CHECK-MVE-NEXT: movgt r1, #1 ; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vcmpe.f16 s0, #0 +; CHECK-MVE-NEXT: vcmp.f16 s0, #0 ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s14, s8 ; CHECK-MVE-NEXT: lsls r1, r1, #31 @@ -1051,7 +1051,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ogt_v8f16(<8 x half> %src, <8 x half> %a ; CHECK-MVE-NEXT: cset r2, ne ; CHECK-MVE-NEXT: vmov r1, s12 ; CHECK-MVE-NEXT: lsls r2, r2, #31 -; CHECK-MVE-NEXT: vcmpe.f16 s1, #0 +; CHECK-MVE-NEXT: vcmp.f16 s1, #0 ; CHECK-MVE-NEXT: vseleq.f16 s12, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov r2, s12 @@ -1069,7 +1069,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ogt_v8f16(<8 x half> %src, <8 x half> %a ; CHECK-MVE-NEXT: vseleq.f16 s16, s9, s5 ; CHECK-MVE-NEXT: vmov r1, s16 ; CHECK-MVE-NEXT: vmovx.f16 s16, s1 -; CHECK-MVE-NEXT: vcmpe.f16 s16, #0 +; CHECK-MVE-NEXT: vcmp.f16 s16, #0 ; CHECK-MVE-NEXT: vmov.16 q3[2], r1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r1, #0 @@ -1079,7 +1079,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ogt_v8f16(<8 x half> %src, <8 x half> %a ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s16, s5 ; CHECK-MVE-NEXT: lsls r1, r1, #31 -; CHECK-MVE-NEXT: vcmpe.f16 s2, #0 +; CHECK-MVE-NEXT: vcmp.f16 s2, #0 ; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov r1, s16 @@ -1094,7 +1094,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ogt_v8f16(<8 x half> %src, <8 x half> %a ; CHECK-MVE-NEXT: vseleq.f16 s16, s10, s6 ; CHECK-MVE-NEXT: vmov r1, s16 ; CHECK-MVE-NEXT: vmovx.f16 s16, s2 -; CHECK-MVE-NEXT: vcmpe.f16 s16, #0 +; CHECK-MVE-NEXT: vcmp.f16 s16, #0 ; CHECK-MVE-NEXT: vmov.16 q3[4], r1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r1, #0 @@ -1104,11 +1104,11 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ogt_v8f16(<8 x half> %src, <8 x half> %a ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s16, s6 ; CHECK-MVE-NEXT: lsls r1, r1, #31 -; CHECK-MVE-NEXT: vcmpe.f16 s3, #0 +; CHECK-MVE-NEXT: vcmp.f16 s3, #0 ; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov r1, s16 -; CHECK-MVE-NEXT: vcmpe.f16 s0, #0 +; CHECK-MVE-NEXT: vcmp.f16 s0, #0 ; CHECK-MVE-NEXT: vmov.16 q3[5], r1 ; CHECK-MVE-NEXT: mov.w r1, #0 ; CHECK-MVE-NEXT: it gt @@ -1152,13 +1152,13 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_oge_v8f16(<8 x half> %src, <8 x half> %a ; CHECK-MVE-NEXT: vpush {d8, d9} ; CHECK-MVE-NEXT: vmovx.f16 s12, s0 ; CHECK-MVE-NEXT: movs r1, #0 -; CHECK-MVE-NEXT: vcmpe.f16 s12, #0 +; CHECK-MVE-NEXT: vcmp.f16 s12, #0 ; CHECK-MVE-NEXT: vmovx.f16 s12, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: it ge ; CHECK-MVE-NEXT: movge r1, #1 ; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vcmpe.f16 s0, #0 +; CHECK-MVE-NEXT: vcmp.f16 s0, #0 ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s14, s8 ; CHECK-MVE-NEXT: lsls r1, r1, #31 @@ -1171,7 +1171,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_oge_v8f16(<8 x half> %src, <8 x half> %a ; CHECK-MVE-NEXT: cset r2, ne ; CHECK-MVE-NEXT: vmov r1, s12 ; CHECK-MVE-NEXT: lsls r2, r2, #31 -; CHECK-MVE-NEXT: vcmpe.f16 s1, #0 +; CHECK-MVE-NEXT: vcmp.f16 s1, #0 ; CHECK-MVE-NEXT: vseleq.f16 s12, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov r2, s12 @@ -1189,7 +1189,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_oge_v8f16(<8 x half> %src, <8 x half> %a ; CHECK-MVE-NEXT: vseleq.f16 s16, s9, s5 ; CHECK-MVE-NEXT: vmov r1, s16 ; CHECK-MVE-NEXT: vmovx.f16 s16, s1 -; CHECK-MVE-NEXT: vcmpe.f16 s16, #0 +; CHECK-MVE-NEXT: vcmp.f16 s16, #0 ; CHECK-MVE-NEXT: vmov.16 q3[2], r1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r1, #0 @@ -1199,7 +1199,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_oge_v8f16(<8 x half> %src, <8 x half> %a ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s16, s5 ; CHECK-MVE-NEXT: lsls r1, r1, #31 -; CHECK-MVE-NEXT: vcmpe.f16 s2, #0 +; CHECK-MVE-NEXT: vcmp.f16 s2, #0 ; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov r1, s16 @@ -1214,7 +1214,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_oge_v8f16(<8 x half> %src, <8 x half> %a ; CHECK-MVE-NEXT: vseleq.f16 s16, s10, s6 ; CHECK-MVE-NEXT: vmov r1, s16 ; CHECK-MVE-NEXT: vmovx.f16 s16, s2 -; CHECK-MVE-NEXT: vcmpe.f16 s16, #0 +; CHECK-MVE-NEXT: vcmp.f16 s16, #0 ; CHECK-MVE-NEXT: vmov.16 q3[4], r1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r1, #0 @@ -1224,11 +1224,11 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_oge_v8f16(<8 x half> %src, <8 x half> %a ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s16, s6 ; CHECK-MVE-NEXT: lsls r1, r1, #31 -; CHECK-MVE-NEXT: vcmpe.f16 s3, #0 +; CHECK-MVE-NEXT: vcmp.f16 s3, #0 ; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov r1, s16 -; CHECK-MVE-NEXT: vcmpe.f16 s0, #0 +; CHECK-MVE-NEXT: vcmp.f16 s0, #0 ; CHECK-MVE-NEXT: vmov.16 q3[5], r1 ; CHECK-MVE-NEXT: mov.w r1, #0 ; CHECK-MVE-NEXT: it ge @@ -1272,13 +1272,13 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_olt_v8f16(<8 x half> %src, <8 x half> %a ; CHECK-MVE-NEXT: vpush {d8, d9} ; CHECK-MVE-NEXT: vmovx.f16 s12, s0 ; CHECK-MVE-NEXT: movs r1, #0 -; CHECK-MVE-NEXT: vcmpe.f16 s12, #0 +; CHECK-MVE-NEXT: vcmp.f16 s12, #0 ; CHECK-MVE-NEXT: vmovx.f16 s12, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: it mi ; CHECK-MVE-NEXT: movmi r1, #1 ; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vcmpe.f16 s0, #0 +; CHECK-MVE-NEXT: vcmp.f16 s0, #0 ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s14, s8 ; CHECK-MVE-NEXT: lsls r1, r1, #31 @@ -1291,7 +1291,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_olt_v8f16(<8 x half> %src, <8 x half> %a ; CHECK-MVE-NEXT: cset r2, ne ; CHECK-MVE-NEXT: vmov r1, s12 ; CHECK-MVE-NEXT: lsls r2, r2, #31 -; CHECK-MVE-NEXT: vcmpe.f16 s1, #0 +; CHECK-MVE-NEXT: vcmp.f16 s1, #0 ; CHECK-MVE-NEXT: vseleq.f16 s12, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov r2, s12 @@ -1309,7 +1309,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_olt_v8f16(<8 x half> %src, <8 x half> %a ; CHECK-MVE-NEXT: vseleq.f16 s16, s9, s5 ; CHECK-MVE-NEXT: vmov r1, s16 ; CHECK-MVE-NEXT: vmovx.f16 s16, s1 -; CHECK-MVE-NEXT: vcmpe.f16 s16, #0 +; CHECK-MVE-NEXT: vcmp.f16 s16, #0 ; CHECK-MVE-NEXT: vmov.16 q3[2], r1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r1, #0 @@ -1319,7 +1319,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_olt_v8f16(<8 x half> %src, <8 x half> %a ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s16, s5 ; CHECK-MVE-NEXT: lsls r1, r1, #31 -; CHECK-MVE-NEXT: vcmpe.f16 s2, #0 +; CHECK-MVE-NEXT: vcmp.f16 s2, #0 ; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov r1, s16 @@ -1334,7 +1334,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_olt_v8f16(<8 x half> %src, <8 x half> %a ; CHECK-MVE-NEXT: vseleq.f16 s16, s10, s6 ; CHECK-MVE-NEXT: vmov r1, s16 ; CHECK-MVE-NEXT: vmovx.f16 s16, s2 -; CHECK-MVE-NEXT: vcmpe.f16 s16, #0 +; CHECK-MVE-NEXT: vcmp.f16 s16, #0 ; CHECK-MVE-NEXT: vmov.16 q3[4], r1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r1, #0 @@ -1344,11 +1344,11 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_olt_v8f16(<8 x half> %src, <8 x half> %a ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s16, s6 ; CHECK-MVE-NEXT: lsls r1, r1, #31 -; CHECK-MVE-NEXT: vcmpe.f16 s3, #0 +; CHECK-MVE-NEXT: vcmp.f16 s3, #0 ; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov r1, s16 -; CHECK-MVE-NEXT: vcmpe.f16 s0, #0 +; CHECK-MVE-NEXT: vcmp.f16 s0, #0 ; CHECK-MVE-NEXT: vmov.16 q3[5], r1 ; CHECK-MVE-NEXT: mov.w r1, #0 ; CHECK-MVE-NEXT: it mi @@ -1392,13 +1392,13 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ole_v8f16(<8 x half> %src, <8 x half> %a ; CHECK-MVE-NEXT: vpush {d8, d9} ; CHECK-MVE-NEXT: vmovx.f16 s12, s0 ; CHECK-MVE-NEXT: movs r1, #0 -; CHECK-MVE-NEXT: vcmpe.f16 s12, #0 +; CHECK-MVE-NEXT: vcmp.f16 s12, #0 ; CHECK-MVE-NEXT: vmovx.f16 s12, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: it ls ; CHECK-MVE-NEXT: movls r1, #1 ; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vcmpe.f16 s0, #0 +; CHECK-MVE-NEXT: vcmp.f16 s0, #0 ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s14, s8 ; CHECK-MVE-NEXT: lsls r1, r1, #31 @@ -1411,7 +1411,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ole_v8f16(<8 x half> %src, <8 x half> %a ; CHECK-MVE-NEXT: cset r2, ne ; CHECK-MVE-NEXT: vmov r1, s12 ; CHECK-MVE-NEXT: lsls r2, r2, #31 -; CHECK-MVE-NEXT: vcmpe.f16 s1, #0 +; CHECK-MVE-NEXT: vcmp.f16 s1, #0 ; CHECK-MVE-NEXT: vseleq.f16 s12, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov r2, s12 @@ -1429,7 +1429,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ole_v8f16(<8 x half> %src, <8 x half> %a ; CHECK-MVE-NEXT: vseleq.f16 s16, s9, s5 ; CHECK-MVE-NEXT: vmov r1, s16 ; CHECK-MVE-NEXT: vmovx.f16 s16, s1 -; CHECK-MVE-NEXT: vcmpe.f16 s16, #0 +; CHECK-MVE-NEXT: vcmp.f16 s16, #0 ; CHECK-MVE-NEXT: vmov.16 q3[2], r1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r1, #0 @@ -1439,7 +1439,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ole_v8f16(<8 x half> %src, <8 x half> %a ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s16, s5 ; CHECK-MVE-NEXT: lsls r1, r1, #31 -; CHECK-MVE-NEXT: vcmpe.f16 s2, #0 +; CHECK-MVE-NEXT: vcmp.f16 s2, #0 ; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov r1, s16 @@ -1454,7 +1454,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ole_v8f16(<8 x half> %src, <8 x half> %a ; CHECK-MVE-NEXT: vseleq.f16 s16, s10, s6 ; CHECK-MVE-NEXT: vmov r1, s16 ; CHECK-MVE-NEXT: vmovx.f16 s16, s2 -; CHECK-MVE-NEXT: vcmpe.f16 s16, #0 +; CHECK-MVE-NEXT: vcmp.f16 s16, #0 ; CHECK-MVE-NEXT: vmov.16 q3[4], r1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r1, #0 @@ -1464,11 +1464,11 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ole_v8f16(<8 x half> %src, <8 x half> %a ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s16, s6 ; CHECK-MVE-NEXT: lsls r1, r1, #31 -; CHECK-MVE-NEXT: vcmpe.f16 s3, #0 +; CHECK-MVE-NEXT: vcmp.f16 s3, #0 ; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov r1, s16 -; CHECK-MVE-NEXT: vcmpe.f16 s0, #0 +; CHECK-MVE-NEXT: vcmp.f16 s0, #0 ; CHECK-MVE-NEXT: vmov.16 q3[5], r1 ; CHECK-MVE-NEXT: mov.w r1, #0 ; CHECK-MVE-NEXT: it ls @@ -1770,13 +1770,13 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ugt_v8f16(<8 x half> %src, <8 x half> %a ; CHECK-MVE-NEXT: vpush {d8, d9} ; CHECK-MVE-NEXT: vmovx.f16 s12, s0 ; CHECK-MVE-NEXT: movs r1, #0 -; CHECK-MVE-NEXT: vcmpe.f16 s12, #0 +; CHECK-MVE-NEXT: vcmp.f16 s12, #0 ; CHECK-MVE-NEXT: vmovx.f16 s12, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: it hi ; CHECK-MVE-NEXT: movhi r1, #1 ; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vcmpe.f16 s0, #0 +; CHECK-MVE-NEXT: vcmp.f16 s0, #0 ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s14, s8 ; CHECK-MVE-NEXT: lsls r1, r1, #31 @@ -1789,7 +1789,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ugt_v8f16(<8 x half> %src, <8 x half> %a ; CHECK-MVE-NEXT: cset r2, ne ; CHECK-MVE-NEXT: vmov r1, s12 ; CHECK-MVE-NEXT: lsls r2, r2, #31 -; CHECK-MVE-NEXT: vcmpe.f16 s1, #0 +; CHECK-MVE-NEXT: vcmp.f16 s1, #0 ; CHECK-MVE-NEXT: vseleq.f16 s12, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov r2, s12 @@ -1807,7 +1807,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ugt_v8f16(<8 x half> %src, <8 x half> %a ; CHECK-MVE-NEXT: vseleq.f16 s16, s9, s5 ; CHECK-MVE-NEXT: vmov r1, s16 ; CHECK-MVE-NEXT: vmovx.f16 s16, s1 -; CHECK-MVE-NEXT: vcmpe.f16 s16, #0 +; CHECK-MVE-NEXT: vcmp.f16 s16, #0 ; CHECK-MVE-NEXT: vmov.16 q3[2], r1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r1, #0 @@ -1817,7 +1817,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ugt_v8f16(<8 x half> %src, <8 x half> %a ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s16, s5 ; CHECK-MVE-NEXT: lsls r1, r1, #31 -; CHECK-MVE-NEXT: vcmpe.f16 s2, #0 +; CHECK-MVE-NEXT: vcmp.f16 s2, #0 ; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov r1, s16 @@ -1832,7 +1832,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ugt_v8f16(<8 x half> %src, <8 x half> %a ; CHECK-MVE-NEXT: vseleq.f16 s16, s10, s6 ; CHECK-MVE-NEXT: vmov r1, s16 ; CHECK-MVE-NEXT: vmovx.f16 s16, s2 -; CHECK-MVE-NEXT: vcmpe.f16 s16, #0 +; CHECK-MVE-NEXT: vcmp.f16 s16, #0 ; CHECK-MVE-NEXT: vmov.16 q3[4], r1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r1, #0 @@ -1842,11 +1842,11 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ugt_v8f16(<8 x half> %src, <8 x half> %a ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s16, s6 ; CHECK-MVE-NEXT: lsls r1, r1, #31 -; CHECK-MVE-NEXT: vcmpe.f16 s3, #0 +; CHECK-MVE-NEXT: vcmp.f16 s3, #0 ; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov r1, s16 -; CHECK-MVE-NEXT: vcmpe.f16 s0, #0 +; CHECK-MVE-NEXT: vcmp.f16 s0, #0 ; CHECK-MVE-NEXT: vmov.16 q3[5], r1 ; CHECK-MVE-NEXT: mov.w r1, #0 ; CHECK-MVE-NEXT: it hi @@ -1891,13 +1891,13 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_uge_v8f16(<8 x half> %src, <8 x half> %a ; CHECK-MVE-NEXT: vpush {d8, d9} ; CHECK-MVE-NEXT: vmovx.f16 s12, s0 ; CHECK-MVE-NEXT: movs r1, #0 -; CHECK-MVE-NEXT: vcmpe.f16 s12, #0 +; CHECK-MVE-NEXT: vcmp.f16 s12, #0 ; CHECK-MVE-NEXT: vmovx.f16 s12, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: it pl ; CHECK-MVE-NEXT: movpl r1, #1 ; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vcmpe.f16 s0, #0 +; CHECK-MVE-NEXT: vcmp.f16 s0, #0 ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s14, s8 ; CHECK-MVE-NEXT: lsls r1, r1, #31 @@ -1910,7 +1910,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_uge_v8f16(<8 x half> %src, <8 x half> %a ; CHECK-MVE-NEXT: cset r2, ne ; CHECK-MVE-NEXT: vmov r1, s12 ; CHECK-MVE-NEXT: lsls r2, r2, #31 -; CHECK-MVE-NEXT: vcmpe.f16 s1, #0 +; CHECK-MVE-NEXT: vcmp.f16 s1, #0 ; CHECK-MVE-NEXT: vseleq.f16 s12, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov r2, s12 @@ -1928,7 +1928,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_uge_v8f16(<8 x half> %src, <8 x half> %a ; CHECK-MVE-NEXT: vseleq.f16 s16, s9, s5 ; CHECK-MVE-NEXT: vmov r1, s16 ; CHECK-MVE-NEXT: vmovx.f16 s16, s1 -; CHECK-MVE-NEXT: vcmpe.f16 s16, #0 +; CHECK-MVE-NEXT: vcmp.f16 s16, #0 ; CHECK-MVE-NEXT: vmov.16 q3[2], r1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r1, #0 @@ -1938,7 +1938,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_uge_v8f16(<8 x half> %src, <8 x half> %a ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s16, s5 ; CHECK-MVE-NEXT: lsls r1, r1, #31 -; CHECK-MVE-NEXT: vcmpe.f16 s2, #0 +; CHECK-MVE-NEXT: vcmp.f16 s2, #0 ; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov r1, s16 @@ -1953,7 +1953,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_uge_v8f16(<8 x half> %src, <8 x half> %a ; CHECK-MVE-NEXT: vseleq.f16 s16, s10, s6 ; CHECK-MVE-NEXT: vmov r1, s16 ; CHECK-MVE-NEXT: vmovx.f16 s16, s2 -; CHECK-MVE-NEXT: vcmpe.f16 s16, #0 +; CHECK-MVE-NEXT: vcmp.f16 s16, #0 ; CHECK-MVE-NEXT: vmov.16 q3[4], r1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r1, #0 @@ -1963,11 +1963,11 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_uge_v8f16(<8 x half> %src, <8 x half> %a ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s16, s6 ; CHECK-MVE-NEXT: lsls r1, r1, #31 -; CHECK-MVE-NEXT: vcmpe.f16 s3, #0 +; CHECK-MVE-NEXT: vcmp.f16 s3, #0 ; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov r1, s16 -; CHECK-MVE-NEXT: vcmpe.f16 s0, #0 +; CHECK-MVE-NEXT: vcmp.f16 s0, #0 ; CHECK-MVE-NEXT: vmov.16 q3[5], r1 ; CHECK-MVE-NEXT: mov.w r1, #0 ; CHECK-MVE-NEXT: it pl @@ -2012,13 +2012,13 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ult_v8f16(<8 x half> %src, <8 x half> %a ; CHECK-MVE-NEXT: vpush {d8, d9} ; CHECK-MVE-NEXT: vmovx.f16 s12, s0 ; CHECK-MVE-NEXT: movs r1, #0 -; CHECK-MVE-NEXT: vcmpe.f16 s12, #0 +; CHECK-MVE-NEXT: vcmp.f16 s12, #0 ; CHECK-MVE-NEXT: vmovx.f16 s12, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: it lt ; CHECK-MVE-NEXT: movlt r1, #1 ; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vcmpe.f16 s0, #0 +; CHECK-MVE-NEXT: vcmp.f16 s0, #0 ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s14, s8 ; CHECK-MVE-NEXT: lsls r1, r1, #31 @@ -2031,7 +2031,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ult_v8f16(<8 x half> %src, <8 x half> %a ; CHECK-MVE-NEXT: cset r2, ne ; CHECK-MVE-NEXT: vmov r1, s12 ; CHECK-MVE-NEXT: lsls r2, r2, #31 -; CHECK-MVE-NEXT: vcmpe.f16 s1, #0 +; CHECK-MVE-NEXT: vcmp.f16 s1, #0 ; CHECK-MVE-NEXT: vseleq.f16 s12, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov r2, s12 @@ -2049,7 +2049,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ult_v8f16(<8 x half> %src, <8 x half> %a ; CHECK-MVE-NEXT: vseleq.f16 s16, s9, s5 ; CHECK-MVE-NEXT: vmov r1, s16 ; CHECK-MVE-NEXT: vmovx.f16 s16, s1 -; CHECK-MVE-NEXT: vcmpe.f16 s16, #0 +; CHECK-MVE-NEXT: vcmp.f16 s16, #0 ; CHECK-MVE-NEXT: vmov.16 q3[2], r1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r1, #0 @@ -2059,7 +2059,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ult_v8f16(<8 x half> %src, <8 x half> %a ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s16, s5 ; CHECK-MVE-NEXT: lsls r1, r1, #31 -; CHECK-MVE-NEXT: vcmpe.f16 s2, #0 +; CHECK-MVE-NEXT: vcmp.f16 s2, #0 ; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov r1, s16 @@ -2074,7 +2074,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ult_v8f16(<8 x half> %src, <8 x half> %a ; CHECK-MVE-NEXT: vseleq.f16 s16, s10, s6 ; CHECK-MVE-NEXT: vmov r1, s16 ; CHECK-MVE-NEXT: vmovx.f16 s16, s2 -; CHECK-MVE-NEXT: vcmpe.f16 s16, #0 +; CHECK-MVE-NEXT: vcmp.f16 s16, #0 ; CHECK-MVE-NEXT: vmov.16 q3[4], r1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r1, #0 @@ -2084,11 +2084,11 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ult_v8f16(<8 x half> %src, <8 x half> %a ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s16, s6 ; CHECK-MVE-NEXT: lsls r1, r1, #31 -; CHECK-MVE-NEXT: vcmpe.f16 s3, #0 +; CHECK-MVE-NEXT: vcmp.f16 s3, #0 ; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov r1, s16 -; CHECK-MVE-NEXT: vcmpe.f16 s0, #0 +; CHECK-MVE-NEXT: vcmp.f16 s0, #0 ; CHECK-MVE-NEXT: vmov.16 q3[5], r1 ; CHECK-MVE-NEXT: mov.w r1, #0 ; CHECK-MVE-NEXT: it lt @@ -2133,13 +2133,13 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ule_v8f16(<8 x half> %src, <8 x half> %a ; CHECK-MVE-NEXT: vpush {d8, d9} ; CHECK-MVE-NEXT: vmovx.f16 s12, s0 ; CHECK-MVE-NEXT: movs r1, #0 -; CHECK-MVE-NEXT: vcmpe.f16 s12, #0 +; CHECK-MVE-NEXT: vcmp.f16 s12, #0 ; CHECK-MVE-NEXT: vmovx.f16 s12, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: it le ; CHECK-MVE-NEXT: movle r1, #1 ; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vcmpe.f16 s0, #0 +; CHECK-MVE-NEXT: vcmp.f16 s0, #0 ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s14, s8 ; CHECK-MVE-NEXT: lsls r1, r1, #31 @@ -2152,7 +2152,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ule_v8f16(<8 x half> %src, <8 x half> %a ; CHECK-MVE-NEXT: cset r2, ne ; CHECK-MVE-NEXT: vmov r1, s12 ; CHECK-MVE-NEXT: lsls r2, r2, #31 -; CHECK-MVE-NEXT: vcmpe.f16 s1, #0 +; CHECK-MVE-NEXT: vcmp.f16 s1, #0 ; CHECK-MVE-NEXT: vseleq.f16 s12, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov r2, s12 @@ -2170,7 +2170,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ule_v8f16(<8 x half> %src, <8 x half> %a ; CHECK-MVE-NEXT: vseleq.f16 s16, s9, s5 ; CHECK-MVE-NEXT: vmov r1, s16 ; CHECK-MVE-NEXT: vmovx.f16 s16, s1 -; CHECK-MVE-NEXT: vcmpe.f16 s16, #0 +; CHECK-MVE-NEXT: vcmp.f16 s16, #0 ; CHECK-MVE-NEXT: vmov.16 q3[2], r1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r1, #0 @@ -2180,7 +2180,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ule_v8f16(<8 x half> %src, <8 x half> %a ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s16, s5 ; CHECK-MVE-NEXT: lsls r1, r1, #31 -; CHECK-MVE-NEXT: vcmpe.f16 s2, #0 +; CHECK-MVE-NEXT: vcmp.f16 s2, #0 ; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov r1, s16 @@ -2195,7 +2195,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ule_v8f16(<8 x half> %src, <8 x half> %a ; CHECK-MVE-NEXT: vseleq.f16 s16, s10, s6 ; CHECK-MVE-NEXT: vmov r1, s16 ; CHECK-MVE-NEXT: vmovx.f16 s16, s2 -; CHECK-MVE-NEXT: vcmpe.f16 s16, #0 +; CHECK-MVE-NEXT: vcmp.f16 s16, #0 ; CHECK-MVE-NEXT: vmov.16 q3[4], r1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r1, #0 @@ -2205,11 +2205,11 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ule_v8f16(<8 x half> %src, <8 x half> %a ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s16, s6 ; CHECK-MVE-NEXT: lsls r1, r1, #31 -; CHECK-MVE-NEXT: vcmpe.f16 s3, #0 +; CHECK-MVE-NEXT: vcmp.f16 s3, #0 ; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov r1, s16 -; CHECK-MVE-NEXT: vcmpe.f16 s0, #0 +; CHECK-MVE-NEXT: vcmp.f16 s0, #0 ; CHECK-MVE-NEXT: vmov.16 q3[5], r1 ; CHECK-MVE-NEXT: mov.w r1, #0 ; CHECK-MVE-NEXT: it le @@ -2254,13 +2254,13 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ord_v8f16(<8 x half> %src, <8 x half> %a ; CHECK-MVE-NEXT: vpush {d8, d9} ; CHECK-MVE-NEXT: vmovx.f16 s12, s0 ; CHECK-MVE-NEXT: movs r1, #0 -; CHECK-MVE-NEXT: vcmpe.f16 s12, s12 +; CHECK-MVE-NEXT: vcmp.f16 s12, s12 ; CHECK-MVE-NEXT: vmovx.f16 s12, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: it vc ; CHECK-MVE-NEXT: movvc r1, #1 ; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vcmpe.f16 s0, s0 +; CHECK-MVE-NEXT: vcmp.f16 s0, s0 ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s14, s8 ; CHECK-MVE-NEXT: lsls r1, r1, #31 @@ -2273,7 +2273,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ord_v8f16(<8 x half> %src, <8 x half> %a ; CHECK-MVE-NEXT: cset r2, ne ; CHECK-MVE-NEXT: vmov r1, s12 ; CHECK-MVE-NEXT: lsls r2, r2, #31 -; CHECK-MVE-NEXT: vcmpe.f16 s1, s1 +; CHECK-MVE-NEXT: vcmp.f16 s1, s1 ; CHECK-MVE-NEXT: vseleq.f16 s12, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov r2, s12 @@ -2291,7 +2291,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ord_v8f16(<8 x half> %src, <8 x half> %a ; CHECK-MVE-NEXT: vseleq.f16 s16, s9, s5 ; CHECK-MVE-NEXT: vmov r1, s16 ; CHECK-MVE-NEXT: vmovx.f16 s16, s1 -; CHECK-MVE-NEXT: vcmpe.f16 s16, s16 +; CHECK-MVE-NEXT: vcmp.f16 s16, s16 ; CHECK-MVE-NEXT: vmov.16 q3[2], r1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r1, #0 @@ -2301,7 +2301,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ord_v8f16(<8 x half> %src, <8 x half> %a ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s16, s5 ; CHECK-MVE-NEXT: lsls r1, r1, #31 -; CHECK-MVE-NEXT: vcmpe.f16 s2, s2 +; CHECK-MVE-NEXT: vcmp.f16 s2, s2 ; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov r1, s16 @@ -2316,7 +2316,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ord_v8f16(<8 x half> %src, <8 x half> %a ; CHECK-MVE-NEXT: vseleq.f16 s16, s10, s6 ; CHECK-MVE-NEXT: vmov r1, s16 ; CHECK-MVE-NEXT: vmovx.f16 s16, s2 -; CHECK-MVE-NEXT: vcmpe.f16 s16, s16 +; CHECK-MVE-NEXT: vcmp.f16 s16, s16 ; CHECK-MVE-NEXT: vmov.16 q3[4], r1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r1, #0 @@ -2326,11 +2326,11 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_ord_v8f16(<8 x half> %src, <8 x half> %a ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s16, s6 ; CHECK-MVE-NEXT: lsls r1, r1, #31 -; CHECK-MVE-NEXT: vcmpe.f16 s3, s3 +; CHECK-MVE-NEXT: vcmp.f16 s3, s3 ; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov r1, s16 -; CHECK-MVE-NEXT: vcmpe.f16 s0, s0 +; CHECK-MVE-NEXT: vcmp.f16 s0, s0 ; CHECK-MVE-NEXT: vmov.16 q3[5], r1 ; CHECK-MVE-NEXT: mov.w r1, #0 ; CHECK-MVE-NEXT: it vc @@ -2377,13 +2377,13 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_uno_v8f16(<8 x half> %src, <8 x half> %a ; CHECK-MVE-NEXT: vpush {d8, d9} ; CHECK-MVE-NEXT: vmovx.f16 s12, s0 ; CHECK-MVE-NEXT: movs r1, #0 -; CHECK-MVE-NEXT: vcmpe.f16 s12, s12 +; CHECK-MVE-NEXT: vcmp.f16 s12, s12 ; CHECK-MVE-NEXT: vmovx.f16 s12, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: it vs ; CHECK-MVE-NEXT: movvs r1, #1 ; CHECK-MVE-NEXT: cmp r1, #0 -; CHECK-MVE-NEXT: vcmpe.f16 s0, s0 +; CHECK-MVE-NEXT: vcmp.f16 s0, s0 ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s14, s8 ; CHECK-MVE-NEXT: lsls r1, r1, #31 @@ -2396,7 +2396,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_uno_v8f16(<8 x half> %src, <8 x half> %a ; CHECK-MVE-NEXT: cset r2, ne ; CHECK-MVE-NEXT: vmov r1, s12 ; CHECK-MVE-NEXT: lsls r2, r2, #31 -; CHECK-MVE-NEXT: vcmpe.f16 s1, s1 +; CHECK-MVE-NEXT: vcmp.f16 s1, s1 ; CHECK-MVE-NEXT: vseleq.f16 s12, s8, s4 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov r2, s12 @@ -2414,7 +2414,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_uno_v8f16(<8 x half> %src, <8 x half> %a ; CHECK-MVE-NEXT: vseleq.f16 s16, s9, s5 ; CHECK-MVE-NEXT: vmov r1, s16 ; CHECK-MVE-NEXT: vmovx.f16 s16, s1 -; CHECK-MVE-NEXT: vcmpe.f16 s16, s16 +; CHECK-MVE-NEXT: vcmp.f16 s16, s16 ; CHECK-MVE-NEXT: vmov.16 q3[2], r1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r1, #0 @@ -2424,7 +2424,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_uno_v8f16(<8 x half> %src, <8 x half> %a ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s16, s5 ; CHECK-MVE-NEXT: lsls r1, r1, #31 -; CHECK-MVE-NEXT: vcmpe.f16 s2, s2 +; CHECK-MVE-NEXT: vcmp.f16 s2, s2 ; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov r1, s16 @@ -2439,7 +2439,7 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_uno_v8f16(<8 x half> %src, <8 x half> %a ; CHECK-MVE-NEXT: vseleq.f16 s16, s10, s6 ; CHECK-MVE-NEXT: vmov r1, s16 ; CHECK-MVE-NEXT: vmovx.f16 s16, s2 -; CHECK-MVE-NEXT: vcmpe.f16 s16, s16 +; CHECK-MVE-NEXT: vcmp.f16 s16, s16 ; CHECK-MVE-NEXT: vmov.16 q3[4], r1 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: mov.w r1, #0 @@ -2449,11 +2449,11 @@ define arm_aapcs_vfpcc <8 x half> @vcmp_uno_v8f16(<8 x half> %src, <8 x half> %a ; CHECK-MVE-NEXT: cset r1, ne ; CHECK-MVE-NEXT: vmovx.f16 s16, s6 ; CHECK-MVE-NEXT: lsls r1, r1, #31 -; CHECK-MVE-NEXT: vcmpe.f16 s3, s3 +; CHECK-MVE-NEXT: vcmp.f16 s3, s3 ; CHECK-MVE-NEXT: vseleq.f16 s16, s18, s16 ; CHECK-MVE-NEXT: vmrs APSR_nzcv, fpscr ; CHECK-MVE-NEXT: vmov r1, s16 -; CHECK-MVE-NEXT: vcmpe.f16 s0, s0 +; CHECK-MVE-NEXT: vcmp.f16 s0, s0 ; CHECK-MVE-NEXT: vmov.16 q3[5], r1 ; CHECK-MVE-NEXT: mov.w r1, #0 ; CHECK-MVE-NEXT: it vs -- 2.40.0