From 2edda5220da6d627835088ddd969b20b563cae88 Mon Sep 17 00:00:00 2001 From: Simon Tatham Date: Tue, 28 May 2019 16:13:20 +0000 Subject: [PATCH] [ARM] Replace fp-only-sp and d16 with fp64 and d32. Those two subtarget features were awkward because their semantics are reversed: each one indicates the _lack_ of support for something in the architecture, rather than the presence. As a consequence, you don't get the behavior you want if you combine two sets of feature bits. Each SubtargetFeature for an FP architecture version now comes in four versions, one for each combination of those options. So you can still say (for example) '+vfp2' in a feature string and it will mean what it's always meant, but there's a new string '+vfp2d16sp' meaning the version without those extra options. A lot of this change is just mechanically replacing positive checks for the old features with negative checks for the new ones. But one more interesting change is that I've rearranged getFPUFeatures() so that the main FPU feature is appended to the output list *before* rather than after the features derived from the Restriction field, so that -fp64 and -d32 can override defaults added by the main feature. Reviewers: dmgreen, samparker, SjoerdMeijer Subscribers: srhines, javed.absar, eraman, kristof.beyls, hiraditya, zzheng, Petar.Avramovic, cfe-commits, llvm-commits Tags: #clang, #llvm Differential Revision: https://reviews.llvm.org/D60691 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@361845 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/MC/MCSubtargetInfo.h | 4 + lib/MC/MCSubtargetInfo.cpp | 17 ++++ lib/Object/ELFObjectFile.cpp | 6 +- lib/Support/ARMTargetParser.cpp | 39 +++++---- lib/Target/ARM/ARM.td | 86 ++++++++++--------- lib/Target/ARM/ARMAsmPrinter.cpp | 4 +- lib/Target/ARM/ARMBaseInstrInfo.cpp | 9 +- lib/Target/ARM/ARMBaseRegisterInfo.cpp | 4 +- lib/Target/ARM/ARMFastISel.cpp | 40 +++++---- lib/Target/ARM/ARMISelDAGToDAG.cpp | 6 +- lib/Target/ARM/ARMISelLowering.cpp | 70 +++++++-------- lib/Target/ARM/ARMInstrInfo.td | 24 +++--- lib/Target/ARM/ARMInstructionSelector.cpp | 11 +-- lib/Target/ARM/ARMLegalizerInfo.cpp | 4 +- lib/Target/ARM/ARMRegisterBankInfo.cpp | 2 +- lib/Target/ARM/ARMSubtarget.h | 35 +++++--- lib/Target/ARM/ARMTargetTransformInfo.h | 2 +- lib/Target/ARM/AsmParser/ARMAsmParser.cpp | 31 +++---- .../ARM/Disassembler/ARMDisassembler.cpp | 4 +- .../ARM/MCTargetDesc/ARMTargetStreamer.cpp | 46 +++++----- .../GlobalISel/arm-legalize-load-store.mir | 2 +- test/CodeGen/ARM/arm-storebytesmerge.ll | 2 +- test/CodeGen/ARM/arm32-rounding.ll | 4 +- test/CodeGen/ARM/build-attributes.ll | 38 ++++---- test/CodeGen/ARM/fast-isel-call.ll | 6 +- test/CodeGen/ARM/float-helpers.s | 4 +- test/CodeGen/ARM/fp-only-sp.ll | 2 +- test/CodeGen/ARM/fp16-instructions.ll | 16 ++-- test/CodeGen/ARM/fp16-promote.ll | 2 +- test/CodeGen/ARM/fpconv.ll | 2 +- test/CodeGen/ARM/half.ll | 2 +- test/CodeGen/ARM/inlineasm-X-allocation.ll | 2 +- .../ARM/inlineasm-operand-implicit-cast.ll | 4 +- test/CodeGen/ARM/no-fpu.ll | 4 +- test/CodeGen/Thumb2/aapcs.ll | 2 +- .../CodeGen/Thumb2/float-intrinsics-double.ll | 2 +- test/CodeGen/Thumb2/float-intrinsics-float.ll | 2 +- test/CodeGen/Thumb2/t2sizereduction.mir | 2 +- test/MC/ARM/armv8.3a-js.s | 4 +- test/MC/ARM/d16.s | 4 +- test/MC/ARM/invalid-neon-v8.s | 2 +- test/MC/ARM/single-precision-fp.s | 2 +- test/MC/ARM/vldm-vstm-diags.s | 2 +- test/Transforms/Inline/ARM/inline-fp.ll | 2 +- .../LoopUnroll/runtime-epilog-debuginfo.ll | 2 +- 45 files changed, 307 insertions(+), 253 deletions(-) diff --git a/include/llvm/MC/MCSubtargetInfo.h b/include/llvm/MC/MCSubtargetInfo.h index 767a7abcbaf..9490a6eceda 100644 --- a/include/llvm/MC/MCSubtargetInfo.h +++ b/include/llvm/MC/MCSubtargetInfo.h @@ -141,6 +141,10 @@ public: /// all feature bits implied by the flag. FeatureBitset ApplyFeatureFlag(StringRef FS); + /// Set/clear additional feature bits, including all other bits they imply. + FeatureBitset SetFeatureBitsTransitively(const FeatureBitset& FB); + FeatureBitset ClearFeatureBitsTransitively(const FeatureBitset &FB); + /// Check whether the subtarget features are enabled/disabled as per /// the provided string, ignoring all other features. bool checkFeatures(StringRef FS) const; diff --git a/lib/MC/MCSubtargetInfo.cpp b/lib/MC/MCSubtargetInfo.cpp index 2af8a5559b5..9b73800978c 100644 --- a/lib/MC/MCSubtargetInfo.cpp +++ b/lib/MC/MCSubtargetInfo.cpp @@ -195,6 +195,23 @@ FeatureBitset MCSubtargetInfo::ToggleFeature(const FeatureBitset &FB) { return FeatureBits; } +FeatureBitset MCSubtargetInfo::SetFeatureBitsTransitively( + const FeatureBitset &FB) { + SetImpliedBits(FeatureBits, FB, ProcFeatures); + return FeatureBits; +} + +FeatureBitset MCSubtargetInfo::ClearFeatureBitsTransitively( + const FeatureBitset &FB) { + for (unsigned I = 0, E = FB.size(); I < E; I++) { + if (FB[I]) { + FeatureBits.reset(I); + ClearImpliedBits(FeatureBits, I, ProcFeatures); + } + } + return FeatureBits; +} + FeatureBitset MCSubtargetInfo::ToggleFeature(StringRef Feature) { // Find feature in table. const SubtargetFeatureKV *FeatureEntry = diff --git a/lib/Object/ELFObjectFile.cpp b/lib/Object/ELFObjectFile.cpp index c0ac7a357f8..1c3469b5971 100644 --- a/lib/Object/ELFObjectFile.cpp +++ b/lib/Object/ELFObjectFile.cpp @@ -194,9 +194,9 @@ SubtargetFeatures ELFObjectFileBase::getARMFeatures() const { default: break; case ARMBuildAttrs::Not_Allowed: - Features.AddFeature("vfp2", false); - Features.AddFeature("vfp3", false); - Features.AddFeature("vfp4", false); + Features.AddFeature("vfp2d16sp", false); + Features.AddFeature("vfp3d16sp", false); + Features.AddFeature("vfp4d16sp", false); break; case ARMBuildAttrs::AllowFPv2: Features.AddFeature("vfp2"); diff --git a/lib/Support/ARMTargetParser.cpp b/lib/Support/ARMTargetParser.cpp index c57da4cb202..02f0d95ff27 100644 --- a/lib/Support/ARMTargetParser.cpp +++ b/lib/Support/ARMTargetParser.cpp @@ -159,23 +159,6 @@ bool ARM::getFPUFeatures(unsigned FPUKind, std::vector &Features) { if (FPUKind >= FK_LAST || FPUKind == FK_INVALID) return false; - // fp-only-sp and d16 subtarget features are independent of each other, so we - // must enable/disable both. - switch (FPUNames[FPUKind].Restriction) { - case FPURestriction::SP_D16: - Features.push_back("+fp-only-sp"); - Features.push_back("+d16"); - break; - case FPURestriction::D16: - Features.push_back("-fp-only-sp"); - Features.push_back("+d16"); - break; - case FPURestriction::None: - Features.push_back("-fp-only-sp"); - Features.push_back("-d16"); - break; - } - // FPU version subtarget features are inclusive of lower-numbered ones, so // enable the one corresponding to this version and disable all that are // higher. We also have to make sure to disable fp16 when vfp4 is disabled, @@ -216,6 +199,28 @@ bool ARM::getFPUFeatures(unsigned FPUKind, std::vector &Features) { break; } + // fp64 and d32 subtarget features are independent of each other, so we + // must disable/enable both. + if (FPUKind == FK_NONE) { + Features.push_back("-fp64"); + Features.push_back("-d32"); + } else { + switch (FPUNames[FPUKind].Restriction) { + case FPURestriction::SP_D16: + Features.push_back("-fp64"); + Features.push_back("-d32"); + break; + case FPURestriction::D16: + Features.push_back("+fp64"); + Features.push_back("-d32"); + break; + case FPURestriction::None: + Features.push_back("+fp64"); + Features.push_back("+d32"); + break; + } + } + // crypto includes neon, so we handle this similarly to FPU version. switch (FPUNames[FPUKind].NeonSupport) { case NeonSupportLevel::Crypto: diff --git a/lib/Target/ARM/ARM.td b/lib/Target/ARM/ARM.td index 48eba2246c5..20a61d343b3 100644 --- a/lib/Target/ARM/ARM.td +++ b/lib/Target/ARM/ARM.td @@ -32,12 +32,40 @@ def ModeSoftFloat : SubtargetFeature<"soft-float","UseSoftFloat", // // Floating Point, HW Division and Neon Support -def FeatureVFP2 : SubtargetFeature<"vfp2", "HasVFPv2", "true", - "Enable VFP2 instructions">; +def FeatureFP64 : SubtargetFeature<"fp64", "HasFP64", "true", + "Floating point unit supports " + "double precision">; + +def FeatureD32 : SubtargetFeature<"d32", "HasD32", "true", + "Extend FP to 32 double registers">; + +multiclass VFPver prev = [], + list otherimplies = []> { + def _D16_SP: SubtargetFeature< + name#"d16sp", query#"D16SP", "true", + description#" with only 16 d-registers and no double precision", + !foreach(v, prev, !cast(v # "_D16_SP")) # otherimplies>; + def _SP: SubtargetFeature< + name#"sp", query#"SP", "true", + description#" with no double precision", + !foreach(v, prev, !cast(v # "_SP")) # + otherimplies # [FeatureD32, !cast(NAME # "_D16_SP")]>; + def _D16: SubtargetFeature< + name#"d16", query#"D16", "true", + description#" with only 16 d-registers", + !foreach(v, prev, !cast(v # "_D16")) # + otherimplies # [FeatureFP64, !cast(NAME # "_D16_SP")]>; + def "": SubtargetFeature< + name, query, "true", description, + prev # otherimplies # [ + !cast(NAME # "_D16"), + !cast(NAME # "_SP")]>; +} -def FeatureVFP3 : SubtargetFeature<"vfp3", "HasVFPv3", "true", - "Enable VFP3 instructions", - [FeatureVFP2]>; +defm FeatureVFP2: VFPver<"vfp2", "HasVFPv2", "Enable VFP2 instructions">; +defm FeatureVFP3: VFPver<"vfp3", "HasVFPv3", "Enable VFP3 instructions", + [FeatureVFP2]>; def FeatureNEON : SubtargetFeature<"neon", "HasNEON", "true", "Enable NEON instructions", @@ -47,31 +75,22 @@ def FeatureFP16 : SubtargetFeature<"fp16", "HasFP16", "true", "Enable half-precision " "floating point">; -def FeatureVFP4 : SubtargetFeature<"vfp4", "HasVFPv4", "true", - "Enable VFP4 instructions", - [FeatureVFP3, FeatureFP16]>; +defm FeatureVFP4: VFPver<"vfp4", "HasVFPv4", "Enable VFP4 instructions", + [FeatureVFP3], [FeatureFP16]>; -def FeatureFPARMv8 : SubtargetFeature<"fp-armv8", "HasFPARMv8", - "true", "Enable ARMv8 FP", - [FeatureVFP4]>; +defm FeatureFPARMv8: VFPver<"fp-armv8", "HasFPARMv8", "Enable ARMv8 FP", + [FeatureVFP4]>; def FeatureFullFP16 : SubtargetFeature<"fullfp16", "HasFullFP16", "true", "Enable full half-precision " "floating point", - [FeatureFPARMv8]>; + [FeatureFPARMv8_D16_SP]>; def FeatureFP16FML : SubtargetFeature<"fp16fml", "HasFP16FML", "true", "Enable full half-precision " "floating point fml instructions", [FeatureFullFP16]>; -def FeatureVFPOnlySP : SubtargetFeature<"fp-only-sp", "FPOnlySP", "true", - "Floating point unit supports " - "single precision only">; - -def FeatureD16 : SubtargetFeature<"d16", "HasD16", "true", - "Restrict FP to 16 double registers">; - def FeatureHWDivThumb : SubtargetFeature<"hwdiv", "HasHardwareDivideInThumb", "true", "Enable divide instructions in Thumb">; @@ -943,14 +962,12 @@ def : ProcessorModel<"cortex-r4f", CortexA8Model, [ARMv7r, ProcR4, FeatureHasRetAddrStack, FeatureSlowFPBrcc, FeatureHasSlowFPVMLx, - FeatureVFP3, - FeatureD16, + FeatureVFP3_D16, FeatureAvoidPartialCPSR]>; def : ProcessorModel<"cortex-r5", CortexA8Model, [ARMv7r, ProcR5, FeatureHasRetAddrStack, - FeatureVFP3, - FeatureD16, + FeatureVFP3_D16, FeatureSlowFPBrcc, FeatureHWDivARM, FeatureHasSlowFPVMLx, @@ -958,8 +975,7 @@ def : ProcessorModel<"cortex-r5", CortexA8Model, [ARMv7r, ProcR5, def : ProcessorModel<"cortex-r7", CortexA8Model, [ARMv7r, ProcR7, FeatureHasRetAddrStack, - FeatureVFP3, - FeatureD16, + FeatureVFP3_D16, FeatureFP16, FeatureMP, FeatureSlowFPBrcc, @@ -969,8 +985,7 @@ def : ProcessorModel<"cortex-r7", CortexA8Model, [ARMv7r, ProcR7, def : ProcessorModel<"cortex-r8", CortexA8Model, [ARMv7r, FeatureHasRetAddrStack, - FeatureVFP3, - FeatureD16, + FeatureVFP3_D16, FeatureFP16, FeatureMP, FeatureSlowFPBrcc, @@ -991,10 +1006,8 @@ def : ProcessorModel<"sc300", CortexM4Model, [ARMv7m, FeatureUseAA, FeatureHasNoBranchPredictor]>; -def : ProcessorModel<"cortex-m4", CortexM4Model, [ARMv7em, - FeatureVFP4, - FeatureVFPOnlySP, - FeatureD16, +def : ProcessorModel<"cortex-m4", CortexM4Model, [ARMv7em, + FeatureVFP4_D16_SP, FeaturePrefLoopAlign32, FeatureHasSlowFPVMLx, FeatureUseMISched, @@ -1002,17 +1015,14 @@ def : ProcessorModel<"cortex-m4", CortexM4Model, [ARMv7em, FeatureHasNoBranchPredictor]>; def : ProcNoItin<"cortex-m7", [ARMv7em, - FeatureFPARMv8, - FeatureD16]>; + FeatureFPARMv8_D16]>; def : ProcNoItin<"cortex-m23", [ARMv8mBaseline, FeatureNoMovt]>; def : ProcessorModel<"cortex-m33", CortexM4Model, [ARMv8mMainline, FeatureDSP, - FeatureFPARMv8, - FeatureD16, - FeatureVFPOnlySP, + FeatureFPARMv8_D16_SP, FeaturePrefLoopAlign32, FeatureHasSlowFPVMLx, FeatureUseMISched, @@ -1021,9 +1031,7 @@ def : ProcessorModel<"cortex-m33", CortexM4Model, [ARMv8mMainline, def : ProcessorModel<"cortex-m35p", CortexM4Model, [ARMv8mMainline, FeatureDSP, - FeatureFPARMv8, - FeatureD16, - FeatureVFPOnlySP, + FeatureFPARMv8_D16_SP, FeaturePrefLoopAlign32, FeatureHasSlowFPVMLx, FeatureUseMISched, diff --git a/lib/Target/ARM/ARMAsmPrinter.cpp b/lib/Target/ARM/ARMAsmPrinter.cpp index 239b95ffb8e..6bede80adaa 100644 --- a/lib/Target/ARM/ARMAsmPrinter.cpp +++ b/lib/Target/ARM/ARMAsmPrinter.cpp @@ -657,7 +657,7 @@ void ARMAsmPrinter::emitAttributes() { ATS.emitAttribute(ARMBuildAttrs::ABI_FP_denormal, ARMBuildAttrs::IEEEDenormals); else { - if (!STI.hasVFP2()) { + if (!STI.hasVFP2Base()) { // When the target doesn't have an FPU (by design or // intention), the assumptions made on the software support // mirror that of the equivalent hardware support *if it @@ -667,7 +667,7 @@ void ARMAsmPrinter::emitAttributes() { if (STI.hasV7Ops()) ATS.emitAttribute(ARMBuildAttrs::ABI_FP_denormal, ARMBuildAttrs::PreserveFPSign); - } else if (STI.hasVFP3()) { + } else if (STI.hasVFP3Base()) { // In VFPv4, VFPv4U, VFPv3, or VFPv3U, it is preserved. That is, // the sign bit of the zero matches the sign bit of the input or // result that is being flushed to zero. diff --git a/lib/Target/ARM/ARMBaseInstrInfo.cpp b/lib/Target/ARM/ARMBaseInstrInfo.cpp index 22c53d9e26c..fbef5d790a4 100644 --- a/lib/Target/ARM/ARMBaseInstrInfo.cpp +++ b/lib/Target/ARM/ARMBaseInstrInfo.cpp @@ -133,7 +133,7 @@ ARMBaseInstrInfo::CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, ScheduleHazardRecognizer *ARMBaseInstrInfo:: CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, const ScheduleDAG *DAG) const { - if (Subtarget.isThumb2() || Subtarget.hasVFP2()) + if (Subtarget.isThumb2() || Subtarget.hasVFP2Base()) return (ScheduleHazardRecognizer *)new ARMHazardRecognizer(II, DAG); return TargetInstrInfo::CreateTargetPostRAHazardRecognizer(II, DAG); } @@ -830,7 +830,7 @@ void ARMBaseInstrInfo::copyPhysReg(MachineBasicBlock &MBB, Opc = ARM::VMOVRS; else if (SPRDest && GPRSrc) Opc = ARM::VMOVSR; - else if (ARM::DPRRegClass.contains(DestReg, SrcReg) && !Subtarget.isFPOnlySP()) + else if (ARM::DPRRegClass.contains(DestReg, SrcReg) && Subtarget.hasFP64()) Opc = ARM::VMOVD; else if (ARM::QPRRegClass.contains(DestReg, SrcReg)) Opc = ARM::VORRq; @@ -890,7 +890,8 @@ void ARMBaseInstrInfo::copyPhysReg(MachineBasicBlock &MBB, BeginIdx = ARM::dsub_0; SubRegs = 4; Spacing = 2; - } else if (ARM::DPRRegClass.contains(DestReg, SrcReg) && Subtarget.isFPOnlySP()) { + } else if (ARM::DPRRegClass.contains(DestReg, SrcReg) && + !Subtarget.hasFP64()) { Opc = ARM::VMOVS; BeginIdx = ARM::ssub_0; SubRegs = 2; @@ -1481,7 +1482,7 @@ bool ARMBaseInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { // copyPhysReg() calls. Look for VMOVS instructions that can legally be // widened to VMOVD. We prefer the VMOVD when possible because it may be // changed into a VORR that can go down the NEON pipeline. - if (!MI.isCopy() || Subtarget.dontWidenVMOVS() || Subtarget.isFPOnlySP()) + if (!MI.isCopy() || Subtarget.dontWidenVMOVS() || !Subtarget.hasFP64()) return false; // Look for a copy between even S-registers. That is where we keep floats diff --git a/lib/Target/ARM/ARMBaseRegisterInfo.cpp b/lib/Target/ARM/ARMBaseRegisterInfo.cpp index 1870e4c0b7f..96200a09109 100644 --- a/lib/Target/ARM/ARMBaseRegisterInfo.cpp +++ b/lib/Target/ARM/ARMBaseRegisterInfo.cpp @@ -149,7 +149,7 @@ ARMBaseRegisterInfo::getTLSCallPreservedMask(const MachineFunction &MF) const { const uint32_t * ARMBaseRegisterInfo::getSjLjDispatchPreservedMask(const MachineFunction &MF) const { const ARMSubtarget &STI = MF.getSubtarget(); - if (!STI.useSoftFloat() && STI.hasVFP2() && !STI.isThumb1Only()) + if (!STI.useSoftFloat() && STI.hasVFP2Base() && !STI.isThumb1Only()) return CSR_NoRegs_RegMask; else return CSR_FPRegs_RegMask; @@ -193,7 +193,7 @@ getReservedRegs(const MachineFunction &MF) const { if (STI.isR9Reserved()) markSuperRegs(Reserved, ARM::R9); // Reserve D16-D31 if the subtarget doesn't support them. - if (!STI.hasVFP3() || STI.hasD16()) { + if (!STI.hasD32()) { static_assert(ARM::D31 == ARM::D16 + 15, "Register list not consecutive!"); for (unsigned R = 0; R < 16; ++R) markSuperRegs(Reserved, ARM::D16 + R); diff --git a/lib/Target/ARM/ARMFastISel.cpp b/lib/Target/ARM/ARMFastISel.cpp index cd01b70b378..6e274d269bf 100644 --- a/lib/Target/ARM/ARMFastISel.cpp +++ b/lib/Target/ARM/ARMFastISel.cpp @@ -441,7 +441,7 @@ unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, MVT VT) { } // Require VFP2 for loading fp constants. - if (!Subtarget->hasVFP2()) return false; + if (!Subtarget->hasVFP2Base()) return false; // MachineConstantPool wants an explicit alignment. unsigned Align = DL.getPrefTypeAlignment(CFP->getType()); @@ -969,7 +969,7 @@ bool ARMFastISel::ARMEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr, RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass; break; case MVT::f32: - if (!Subtarget->hasVFP2()) return false; + if (!Subtarget->hasVFP2Base()) return false; // Unaligned loads need special handling. Floats require word-alignment. if (Alignment && Alignment < 4) { needVMOV = true; @@ -982,7 +982,8 @@ bool ARMFastISel::ARMEmitLoad(MVT VT, unsigned &ResultReg, Address &Addr, } break; case MVT::f64: - if (!Subtarget->hasVFP2()) return false; + // Can load and store double precision even without FeatureFP64 + if (!Subtarget->hasVFP2Base()) return false; // FIXME: Unaligned loads need special handling. Doublewords require // word-alignment. if (Alignment && Alignment < 4) @@ -1107,7 +1108,7 @@ bool ARMFastISel::ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr, } break; case MVT::f32: - if (!Subtarget->hasVFP2()) return false; + if (!Subtarget->hasVFP2Base()) return false; // Unaligned stores need special handling. Floats require word-alignment. if (Alignment && Alignment < 4) { unsigned MoveReg = createResultReg(TLI.getRegClassFor(MVT::i32)); @@ -1122,7 +1123,8 @@ bool ARMFastISel::ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr, } break; case MVT::f64: - if (!Subtarget->hasVFP2()) return false; + // Can load and store double precision even without FeatureFP64 + if (!Subtarget->hasVFP2Base()) return false; // FIXME: Unaligned stores need special handling. Doublewords require // word-alignment. if (Alignment && Alignment < 4) @@ -1353,10 +1355,10 @@ bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value, if (!SrcEVT.isSimple()) return false; MVT SrcVT = SrcEVT.getSimpleVT(); - if (Ty->isFloatTy() && !Subtarget->hasVFP2()) + if (Ty->isFloatTy() && !Subtarget->hasVFP2Base()) return false; - if (Ty->isDoubleTy() && (!Subtarget->hasVFP2() || Subtarget->isFPOnlySP())) + if (Ty->isDoubleTy() && (!Subtarget->hasVFP2Base() || !Subtarget->hasFP64())) return false; // Check to see if the 2nd operand is a constant that we can encode directly @@ -1506,7 +1508,7 @@ bool ARMFastISel::SelectCmp(const Instruction *I) { bool ARMFastISel::SelectFPExt(const Instruction *I) { // Make sure we have VFP and that we're extending float to double. - if (!Subtarget->hasVFP2() || Subtarget->isFPOnlySP()) return false; + if (!Subtarget->hasVFP2Base() || !Subtarget->hasFP64()) return false; Value *V = I->getOperand(0); if (!I->getType()->isDoubleTy() || @@ -1525,7 +1527,7 @@ bool ARMFastISel::SelectFPExt(const Instruction *I) { bool ARMFastISel::SelectFPTrunc(const Instruction *I) { // Make sure we have VFP and that we're truncating double to float. - if (!Subtarget->hasVFP2() || Subtarget->isFPOnlySP()) return false; + if (!Subtarget->hasVFP2Base() || !Subtarget->hasFP64()) return false; Value *V = I->getOperand(0); if (!(I->getType()->isFloatTy() && @@ -1544,7 +1546,7 @@ bool ARMFastISel::SelectFPTrunc(const Instruction *I) { bool ARMFastISel::SelectIToFP(const Instruction *I, bool isSigned) { // Make sure we have VFP. - if (!Subtarget->hasVFP2()) return false; + if (!Subtarget->hasVFP2Base()) return false; MVT DstVT; Type *Ty = I->getType(); @@ -1576,7 +1578,7 @@ bool ARMFastISel::SelectIToFP(const Instruction *I, bool isSigned) { unsigned Opc; if (Ty->isFloatTy()) Opc = isSigned ? ARM::VSITOS : ARM::VUITOS; - else if (Ty->isDoubleTy() && !Subtarget->isFPOnlySP()) + else if (Ty->isDoubleTy() && Subtarget->hasFP64()) Opc = isSigned ? ARM::VSITOD : ARM::VUITOD; else return false; @@ -1589,7 +1591,7 @@ bool ARMFastISel::SelectIToFP(const Instruction *I, bool isSigned) { bool ARMFastISel::SelectFPToI(const Instruction *I, bool isSigned) { // Make sure we have VFP. - if (!Subtarget->hasVFP2()) return false; + if (!Subtarget->hasVFP2Base()) return false; MVT DstVT; Type *RetTy = I->getType(); @@ -1602,7 +1604,7 @@ bool ARMFastISel::SelectFPToI(const Instruction *I, bool isSigned) { unsigned Opc; Type *OpTy = I->getOperand(0)->getType(); if (OpTy->isFloatTy()) Opc = isSigned ? ARM::VTOSIZS : ARM::VTOUIZS; - else if (OpTy->isDoubleTy() && !Subtarget->isFPOnlySP()) + else if (OpTy->isDoubleTy() && Subtarget->hasFP64()) Opc = isSigned ? ARM::VTOSIZD : ARM::VTOUIZD; else return false; @@ -1808,9 +1810,9 @@ bool ARMFastISel::SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode) { // if we have them. // FIXME: It'd be nice to use NEON instructions. Type *Ty = I->getType(); - if (Ty->isFloatTy() && !Subtarget->hasVFP2()) + if (Ty->isFloatTy() && !Subtarget->hasVFP2Base()) return false; - if (Ty->isDoubleTy() && (!Subtarget->hasVFP2() || Subtarget->isFPOnlySP())) + if (Ty->isDoubleTy() && (!Subtarget->hasVFP2Base() || !Subtarget->hasFP64())) return false; unsigned Opc; @@ -1852,7 +1854,7 @@ CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC, default: report_fatal_error("Unsupported calling convention"); case CallingConv::Fast: - if (Subtarget->hasVFP2() && !isVarArg) { + if (Subtarget->hasVFP2Base() && !isVarArg) { if (!Subtarget->isAAPCS_ABI()) return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS); // For AAPCS ABI targets, just use VFP variant of the calling convention. @@ -1863,7 +1865,7 @@ CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC, case CallingConv::CXX_FAST_TLS: // Use target triple & subtarget features to do actual dispatch. if (Subtarget->isAAPCS_ABI()) { - if (Subtarget->hasVFP2() && + if (Subtarget->hasVFP2Base() && TM.Options.FloatABIType == FloatABI::Hard && !isVarArg) return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP); else @@ -1932,11 +1934,11 @@ bool ARMFastISel::ProcessCallArgs(SmallVectorImpl &Args, case MVT::i32: break; case MVT::f32: - if (!Subtarget->hasVFP2()) + if (!Subtarget->hasVFP2Base()) return false; break; case MVT::f64: - if (!Subtarget->hasVFP2()) + if (!Subtarget->hasVFP2Base()) return false; break; } diff --git a/lib/Target/ARM/ARMISelDAGToDAG.cpp b/lib/Target/ARM/ARMISelDAGToDAG.cpp index cb66d16a194..492c83c2bf7 100644 --- a/lib/Target/ARM/ARMISelDAGToDAG.cpp +++ b/lib/Target/ARM/ARMISelDAGToDAG.cpp @@ -4043,9 +4043,9 @@ bool ARMDAGToDAGISel::tryReadRegister(SDNode *N){ // If an opcode was found then we can lower the read to a VFP instruction. if (Opcode) { - if (!Subtarget->hasVFP2()) + if (!Subtarget->hasVFP2Base()) return false; - if (Opcode == ARM::VMRS_MVFR2 && !Subtarget->hasFPARMv8()) + if (Opcode == ARM::VMRS_MVFR2 && !Subtarget->hasFPARMv8Base()) return false; Ops = { getAL(CurDAG, DL), CurDAG->getRegister(0, MVT::i32), @@ -4154,7 +4154,7 @@ bool ARMDAGToDAGISel::tryWriteRegister(SDNode *N){ .Default(0); if (Opcode) { - if (!Subtarget->hasVFP2()) + if (!Subtarget->hasVFP2Base()) return false; Ops = { N->getOperand(2), getAL(CurDAG, DL), CurDAG->getRegister(0, MVT::i32), N->getOperand(0) }; diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp index 88d318e7bb3..7dd2fef89ee 100644 --- a/lib/Target/ARM/ARMISelLowering.cpp +++ b/lib/Target/ARM/ARMISelLowering.cpp @@ -241,7 +241,7 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM, if (Subtarget->isTargetMachO()) { // Uses VFP for Thumb libfuncs if available. - if (Subtarget->isThumb() && Subtarget->hasVFP2() && + if (Subtarget->isThumb() && Subtarget->hasVFP2Base() && Subtarget->hasARMOps() && !Subtarget->useSoftFloat()) { static const struct { const RTLIB::Libcall Op; @@ -510,7 +510,7 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM, else addRegisterClass(MVT::i32, &ARM::GPRRegClass); - if (!Subtarget->useSoftFloat() && Subtarget->hasVFP2() && + if (!Subtarget->useSoftFloat() && Subtarget->hasVFP2Base() && !Subtarget->isThumb1Only()) { addRegisterClass(MVT::f32, &ARM::SPRRegClass); addRegisterClass(MVT::f64, &ARM::DPRRegClass); @@ -698,7 +698,7 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM, setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v2i64, Custom); // NEON only has FMA instructions as of VFP4. - if (!Subtarget->hasVFP4()) { + if (!Subtarget->hasVFP4Base()) { setOperationAction(ISD::FMA, MVT::v2f32, Expand); setOperationAction(ISD::FMA, MVT::v4f32, Expand); } @@ -732,7 +732,7 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM, } } - if (Subtarget->isFPOnlySP()) { + if (!Subtarget->hasFP64()) { // When targeting a floating-point unit with only single-precision // operations, f64 is legal for the few double-precision instructions which // are present However, no double-precision operations other than moves, @@ -1030,7 +1030,7 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM, } setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); - if (!Subtarget->useSoftFloat() && Subtarget->hasVFP2() && + if (!Subtarget->useSoftFloat() && Subtarget->hasVFP2Base() && !Subtarget->isThumb1Only()) { // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR // iff target supports vfp2. @@ -1080,7 +1080,7 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM, setOperationAction(ISD::FSINCOS, MVT::f32, Expand); setOperationAction(ISD::FREM, MVT::f64, Expand); setOperationAction(ISD::FREM, MVT::f32, Expand); - if (!Subtarget->useSoftFloat() && Subtarget->hasVFP2() && + if (!Subtarget->useSoftFloat() && Subtarget->hasVFP2Base() && !Subtarget->isThumb1Only()) { setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); @@ -1088,7 +1088,7 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM, setOperationAction(ISD::FPOW, MVT::f64, Expand); setOperationAction(ISD::FPOW, MVT::f32, Expand); - if (!Subtarget->hasVFP4()) { + if (!Subtarget->hasVFP4Base()) { setOperationAction(ISD::FMA, MVT::f64, Expand); setOperationAction(ISD::FMA, MVT::f32, Expand); } @@ -1096,7 +1096,7 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM, // Various VFP goodness if (!Subtarget->useSoftFloat() && !Subtarget->isThumb1Only()) { // FP-ARMv8 adds f64 <-> f16 conversion. Before that it should be expanded. - if (!Subtarget->hasFPARMv8() || Subtarget->isFPOnlySP()) { + if (!Subtarget->hasFPARMv8Base() || !Subtarget->hasFP64()) { setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand); setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand); } @@ -1116,7 +1116,7 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM, } // FP-ARMv8 implements a lot of rounding-like FP operations. - if (Subtarget->hasFPARMv8()) { + if (Subtarget->hasFPARMv8Base()) { setOperationAction(ISD::FFLOOR, MVT::f32, Legal); setOperationAction(ISD::FCEIL, MVT::f32, Legal); setOperationAction(ISD::FROUND, MVT::f32, Legal); @@ -1130,7 +1130,7 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM, setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal); setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal); - if (!Subtarget->isFPOnlySP()) { + if (Subtarget->hasFP64()) { setOperationAction(ISD::FFLOOR, MVT::f64, Legal); setOperationAction(ISD::FCEIL, MVT::f64, Legal); setOperationAction(ISD::FROUND, MVT::f64, Legal); @@ -1202,7 +1202,7 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM, setStackPointerRegisterToSaveRestore(ARM::SP); if (Subtarget->useSoftFloat() || Subtarget->isThumb1Only() || - !Subtarget->hasVFP2() || Subtarget->hasMinSize()) + !Subtarget->hasVFP2Base() || Subtarget->hasMinSize()) setSchedulingPreference(Sched::RegPressure); else setSchedulingPreference(Sched::Hybrid); @@ -1637,7 +1637,7 @@ ARMTargetLowering::getEffectiveCallingConv(CallingConv::ID CC, case CallingConv::C: if (!Subtarget->isAAPCS_ABI()) return CallingConv::ARM_APCS; - else if (Subtarget->hasVFP2() && !Subtarget->isThumb1Only() && + else if (Subtarget->hasVFP2Base() && !Subtarget->isThumb1Only() && getTargetMachine().Options.FloatABIType == FloatABI::Hard && !isVarArg) return CallingConv::ARM_AAPCS_VFP; @@ -1646,10 +1646,11 @@ ARMTargetLowering::getEffectiveCallingConv(CallingConv::ID CC, case CallingConv::Fast: case CallingConv::CXX_FAST_TLS: if (!Subtarget->isAAPCS_ABI()) { - if (Subtarget->hasVFP2() && !Subtarget->isThumb1Only() && !isVarArg) + if (Subtarget->hasVFP2Base() && !Subtarget->isThumb1Only() && !isVarArg) return CallingConv::Fast; return CallingConv::ARM_APCS; - } else if (Subtarget->hasVFP2() && !Subtarget->isThumb1Only() && !isVarArg) + } else if (Subtarget->hasVFP2Base() && + !Subtarget->isThumb1Only() && !isVarArg) return CallingConv::ARM_AAPCS_VFP; else return CallingConv::ARM_AAPCS; @@ -3912,7 +3913,7 @@ SDValue ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, SDValue ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG, const SDLoc &dl, bool InvalidOnQNaN) const { - assert(!Subtarget->isFPOnlySP() || RHS.getValueType() != MVT::f64); + assert(Subtarget->hasFP64() || RHS.getValueType() != MVT::f64); SDValue Cmp; SDValue C = DAG.getConstant(InvalidOnQNaN, dl, MVT::i32); if (!isFloatingPointZero(RHS)) @@ -4225,7 +4226,7 @@ static void checkVSELConstraints(ISD::CondCode CC, ARMCC::CondCodes &CondCode, SDValue ARMTargetLowering::getCMOV(const SDLoc &dl, EVT VT, SDValue FalseVal, SDValue TrueVal, SDValue ARMcc, SDValue CCR, SDValue Cmp, SelectionDAG &DAG) const { - if (Subtarget->isFPOnlySP() && VT == MVT::f64) { + if (!Subtarget->hasFP64() && VT == MVT::f64) { FalseVal = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32), FalseVal); TrueVal = DAG.getNode(ARMISD::VMOVRRD, dl, @@ -4474,7 +4475,7 @@ SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { SDValue TrueVal = Op.getOperand(2); SDValue FalseVal = Op.getOperand(3); - if (Subtarget->isFPOnlySP() && LHS.getValueType() == MVT::f64) { + if (!Subtarget->hasFP64() && LHS.getValueType() == MVT::f64) { DAG.getTargetLoweringInfo().softenSetCCOperands(DAG, MVT::f64, LHS, RHS, CC, dl); @@ -4497,9 +4498,9 @@ SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { // inverting the compare condition, swapping 'less' and 'greater') and // sometimes need to swap the operands to the VSEL (which inverts the // condition in the sense of firing whenever the previous condition didn't) - if (Subtarget->hasFPARMv8() && (TrueVal.getValueType() == MVT::f16 || - TrueVal.getValueType() == MVT::f32 || - TrueVal.getValueType() == MVT::f64)) { + if (Subtarget->hasFPARMv8Base() && (TrueVal.getValueType() == MVT::f16 || + TrueVal.getValueType() == MVT::f32 || + TrueVal.getValueType() == MVT::f64)) { ARMCC::CondCodes CondCode = IntCCToARMCC(CC); if (CondCode == ARMCC::LT || CondCode == ARMCC::LE || CondCode == ARMCC::VC || CondCode == ARMCC::NE) { @@ -4522,7 +4523,7 @@ SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { // match CMPFPw0 instead of CMPFP, though we don't do this for f16 because we // must use VSEL (limited condition codes), due to not having conditional f16 // moves. - if (Subtarget->hasFPARMv8() && + if (Subtarget->hasFPARMv8Base() && !(isFloatingPointZero(RHS) && TrueVal.getValueType() != MVT::f16) && (TrueVal.getValueType() == MVT::f16 || TrueVal.getValueType() == MVT::f32 || @@ -4715,7 +4716,7 @@ SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { SDValue Dest = Op.getOperand(4); SDLoc dl(Op); - if (Subtarget->isFPOnlySP() && LHS.getValueType() == MVT::f64) { + if (!Subtarget->hasFP64() && LHS.getValueType() == MVT::f64) { DAG.getTargetLoweringInfo().softenSetCCOperands(DAG, MVT::f64, LHS, RHS, CC, dl); @@ -4862,7 +4863,7 @@ SDValue ARMTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const { EVT VT = Op.getValueType(); if (VT.isVector()) return LowerVectorFP_TO_INT(Op, DAG); - if (Subtarget->isFPOnlySP() && Op.getOperand(0).getValueType() == MVT::f64) { + if (!Subtarget->hasFP64() && Op.getOperand(0).getValueType() == MVT::f64) { RTLIB::Libcall LC; if (Op.getOpcode() == ISD::FP_TO_SINT) LC = RTLIB::getFPTOSINT(Op.getOperand(0).getValueType(), @@ -4926,7 +4927,7 @@ SDValue ARMTargetLowering::LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const { EVT VT = Op.getValueType(); if (VT.isVector()) return LowerVectorINT_TO_FP(Op, DAG); - if (Subtarget->isFPOnlySP() && Op.getValueType() == MVT::f64) { + if (!Subtarget->hasFP64() && Op.getValueType() == MVT::f64) { RTLIB::Libcall LC; if (Op.getOpcode() == ISD::SINT_TO_FP) LC = RTLIB::getSINTTOFP(Op.getOperand(0).getValueType(), @@ -5909,12 +5910,12 @@ SDValue ARMTargetLowering::LowerConstantFP(SDValue Op, SelectionDAG &DAG, } } - if (!ST->hasVFP3()) + if (!ST->hasVFP3Base()) return SDValue(); // Use the default (constant pool) lowering for double constants when we have // an SP-only FPU - if (IsDouble && Subtarget->isFPOnlySP()) + if (IsDouble && !Subtarget->hasFP64()) return SDValue(); // Try splatting with a VMOV.f32... @@ -11356,7 +11357,7 @@ static SDValue PerformVMOVRRDCombine(SDNode *N, const ARMSubtarget *Subtarget) { // vmovrrd(vmovdrr x, y) -> x,y SDValue InDouble = N->getOperand(0); - if (InDouble.getOpcode() == ARMISD::VMOVDRR && !Subtarget->isFPOnlySP()) + if (InDouble.getOpcode() == ARMISD::VMOVDRR && Subtarget->hasFP64()) return DCI.CombineTo(N, InDouble.getOperand(0), InDouble.getOperand(1)); // vmovrrd(load f64) -> (load i32), (load i32) @@ -13303,7 +13304,7 @@ static bool isLegalT2AddressImmediate(int64_t V, EVT VT, unsigned NumBytes = std::max(VT.getSizeInBits() / 8, 1U); // VLDR and LDRD: 4 * imm8 - if ((VT.isFloatingPoint() && Subtarget->hasVFP2()) || NumBytes == 8) + if ((VT.isFloatingPoint() && Subtarget->hasVFP2Base()) || NumBytes == 8) return isShiftedUInt<8, 2>(V); if (NumBytes == 1 || NumBytes == 2 || NumBytes == 4) { @@ -13347,7 +13348,7 @@ static bool isLegalAddressImmediate(int64_t V, EVT VT, return isUInt<8>(V); case MVT::f32: case MVT::f64: - if (!Subtarget->hasVFP2()) // FIXME: NEON? + if (!Subtarget->hasVFP2Base()) // FIXME: NEON? return false; return isShiftedUInt<8, 2>(V); } @@ -13910,7 +13911,7 @@ const char *ARMTargetLowering::LowerXConstraint(EVT ConstraintVT) const { // Although we are correct (we are free to emit anything, without // constraints), we might break use cases that would expect us to be more // efficient and emit something else. - if (!Subtarget->hasVFP2()) + if (!Subtarget->hasVFP2Base()) return "r"; if (ConstraintVT.isFloatingPoint()) return "w"; @@ -14392,7 +14393,7 @@ ARMTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const } SDValue ARMTargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const { - assert(Op.getValueType() == MVT::f64 && Subtarget->isFPOnlySP() && + assert(Op.getValueType() == MVT::f64 && !Subtarget->hasFP64() && "Unexpected type for custom-lowering FP_EXTEND"); RTLIB::Libcall LC; @@ -14404,8 +14405,7 @@ SDValue ARMTargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const { } SDValue ARMTargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const { - assert(Op.getOperand(0).getValueType() == MVT::f64 && - Subtarget->isFPOnlySP() && + assert(Op.getOperand(0).getValueType() == MVT::f64 && !Subtarget->hasFP64() && "Unexpected type for custom-lowering FP_ROUND"); RTLIB::Libcall LC; @@ -14468,13 +14468,13 @@ bool ARM::isBitFieldInvertedMask(unsigned v) { /// materialize the FP immediate as a load from a constant pool. bool ARMTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize) const { - if (!Subtarget->hasVFP3()) + if (!Subtarget->hasVFP3Base()) return false; if (VT == MVT::f16 && Subtarget->hasFullFP16()) return ARM_AM::getFP16Imm(Imm) != -1; if (VT == MVT::f32) return ARM_AM::getFP32Imm(Imm) != -1; - if (VT == MVT::f64 && !Subtarget->isFPOnlySP()) + if (VT == MVT::f64 && Subtarget->hasFP64()) return ARM_AM::getFP64Imm(Imm) != -1; return false; } diff --git a/lib/Target/ARM/ARMInstrInfo.td b/lib/Target/ARM/ARMInstrInfo.td index f55e73abbd7..d0821b94477 100644 --- a/lib/Target/ARM/ARMInstrInfo.td +++ b/lib/Target/ARM/ARMInstrInfo.td @@ -258,18 +258,18 @@ def HasV8_4a : Predicate<"Subtarget->hasV8_4aOps()">, AssemblerPredicate<"HasV8_4aOps", "armv8.4a">; def HasV8_5a : Predicate<"Subtarget->hasV8_5aOps()">, AssemblerPredicate<"HasV8_5aOps", "armv8.5a">; -def NoVFP : Predicate<"!Subtarget->hasVFP2()">; -def HasVFP2 : Predicate<"Subtarget->hasVFP2()">, - AssemblerPredicate<"FeatureVFP2", "VFP2">; -def HasVFP3 : Predicate<"Subtarget->hasVFP3()">, - AssemblerPredicate<"FeatureVFP3", "VFP3">; -def HasVFP4 : Predicate<"Subtarget->hasVFP4()">, - AssemblerPredicate<"FeatureVFP4", "VFP4">; -def HasDPVFP : Predicate<"!Subtarget->isFPOnlySP()">, - AssemblerPredicate<"!FeatureVFPOnlySP", +def NoVFP : Predicate<"!Subtarget->hasVFP2Base()">; +def HasVFP2 : Predicate<"Subtarget->hasVFP2Base()">, + AssemblerPredicate<"FeatureVFP2_D16_SP", "VFP2">; +def HasVFP3 : Predicate<"Subtarget->hasVFP3Base()">, + AssemblerPredicate<"FeatureVFP3_D16_SP", "VFP3">; +def HasVFP4 : Predicate<"Subtarget->hasVFP4Base()">, + AssemblerPredicate<"FeatureVFP4_D16_SP", "VFP4">; +def HasDPVFP : Predicate<"Subtarget->hasFP64()">, + AssemblerPredicate<"FeatureFP64", "double precision VFP">; -def HasFPARMv8 : Predicate<"Subtarget->hasFPARMv8()">, - AssemblerPredicate<"FeatureFPARMv8", "FPARMv8">; +def HasFPARMv8 : Predicate<"Subtarget->hasFPARMv8Base()">, + AssemblerPredicate<"FeatureFPARMv8_D16_SP", "FPARMv8">; def HasNEON : Predicate<"Subtarget->hasNEON()">, AssemblerPredicate<"FeatureNEON", "NEON">; def HasSHA2 : Predicate<"Subtarget->hasSHA2()">, @@ -371,7 +371,7 @@ def UseMulOps : Predicate<"Subtarget->useMulOps()">; // Do not use them for Darwin platforms. def UseFusedMAC : Predicate<"(TM.Options.AllowFPOpFusion ==" " FPOpFusion::Fast && " - " Subtarget->hasVFP4()) && " + " Subtarget->hasVFP4Base()) && " "!Subtarget->isTargetDarwin() &&" "Subtarget->useFPVMLx()">; diff --git a/lib/Target/ARM/ARMInstructionSelector.cpp b/lib/Target/ARM/ARMInstructionSelector.cpp index b97924cf975..4485a474a6d 100644 --- a/lib/Target/ARM/ARMInstructionSelector.cpp +++ b/lib/Target/ARM/ARMInstructionSelector.cpp @@ -232,7 +232,7 @@ static bool selectMergeValues(MachineInstrBuilder &MIB, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) { - assert(TII.getSubtarget().hasVFP2() && "Can't select merge without VFP"); + assert(TII.getSubtarget().hasVFP2Base() && "Can't select merge without VFP"); // We only support G_MERGE_VALUES as a way to stick together two scalar GPRs // into one DPR. @@ -263,7 +263,8 @@ static bool selectUnmergeValues(MachineInstrBuilder &MIB, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) { - assert(TII.getSubtarget().hasVFP2() && "Can't select unmerge without VFP"); + assert(TII.getSubtarget().hasVFP2Base() && + "Can't select unmerge without VFP"); // We only support G_UNMERGE_VALUES as a way to break up one DPR into two // GPRs. @@ -1036,12 +1037,12 @@ bool ARMInstructionSelector::select(MachineInstr &I, return selectCmp(Helper, MIB, MRI); } case G_FCMP: { - assert(STI.hasVFP2() && "Can't select fcmp without VFP"); + assert(STI.hasVFP2Base() && "Can't select fcmp without VFP"); unsigned OpReg = I.getOperand(2).getReg(); unsigned Size = MRI.getType(OpReg).getSizeInBits(); - if (Size == 64 && STI.isFPOnlySP()) { + if (Size == 64 && !STI.hasFP64()) { LLVM_DEBUG(dbgs() << "Subtarget only supports single precision"); return false; } @@ -1087,7 +1088,7 @@ bool ARMInstructionSelector::select(MachineInstr &I, LLT ValTy = MRI.getType(Reg); const auto ValSize = ValTy.getSizeInBits(); - assert((ValSize != 64 || STI.hasVFP2()) && + assert((ValSize != 64 || STI.hasVFP2Base()) && "Don't know how to load/store 64-bit value without VFP"); const auto NewOpc = selectLoadStoreOpCode(I.getOpcode(), RegBank, ValSize); diff --git a/lib/Target/ARM/ARMLegalizerInfo.cpp b/lib/Target/ARM/ARMLegalizerInfo.cpp index 8f2029312d2..458cafdc7a5 100644 --- a/lib/Target/ARM/ARMLegalizerInfo.cpp +++ b/lib/Target/ARM/ARMLegalizerInfo.cpp @@ -157,7 +157,7 @@ ARMLegalizerInfo::ARMLegalizerInfo(const ARMSubtarget &ST) { getActionDefinitionsBuilder(G_BRCOND).legalFor({s1}); - if (!ST.useSoftFloat() && ST.hasVFP2()) { + if (!ST.useSoftFloat() && ST.hasVFP2Base()) { getActionDefinitionsBuilder( {G_FADD, G_FSUB, G_FMUL, G_FDIV, G_FCONSTANT, G_FNEG}) .legalFor({s32, s64}); @@ -208,7 +208,7 @@ ARMLegalizerInfo::ARMLegalizerInfo(const ARMSubtarget &ST) { .libcallForCartesianProduct({s32, s64}, {s32}); } - if (!ST.useSoftFloat() && ST.hasVFP4()) + if (!ST.useSoftFloat() && ST.hasVFP4Base()) getActionDefinitionsBuilder(G_FMA).legalFor({s32, s64}); else getActionDefinitionsBuilder(G_FMA).libcallFor({s32, s64}); diff --git a/lib/Target/ARM/ARMRegisterBankInfo.cpp b/lib/Target/ARM/ARMRegisterBankInfo.cpp index d03b482043e..4566ac2c9dd 100644 --- a/lib/Target/ARM/ARMRegisterBankInfo.cpp +++ b/lib/Target/ARM/ARMRegisterBankInfo.cpp @@ -453,7 +453,7 @@ ARMRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const { for (const auto &Mapping : OperandsMapping[i]) { assert( (Mapping.RegBank->getID() != ARM::FPRRegBankID || - MF.getSubtarget().hasVFP2()) && + MF.getSubtarget().hasVFP2Base()) && "Trying to use floating point register bank on target without vfp"); } } diff --git a/lib/Target/ARM/ARMSubtarget.h b/lib/Target/ARM/ARMSubtarget.h index 9500a9faf4e..abedc6f6d81 100644 --- a/lib/Target/ARM/ARMSubtarget.h +++ b/lib/Target/ARM/ARMSubtarget.h @@ -166,6 +166,21 @@ protected: bool HasFPARMv8 = false; bool HasNEON = false; + /// Versions of the VFP flags restricted to single precision, or to + /// 16 d-registers, or both. + bool HasVFPv2SP = false; + bool HasVFPv3SP = false; + bool HasVFPv4SP = false; + bool HasFPARMv8SP = false; + bool HasVFPv2D16 = false; + bool HasVFPv3D16 = false; + bool HasVFPv4D16 = false; + bool HasFPARMv8D16 = false; + bool HasVFPv2D16SP = false; + bool HasVFPv3D16SP = false; + bool HasVFPv4D16SP = false; + bool HasFPARMv8D16SP = false; + /// HasDotProd - True if the ARMv8.2A dot product instructions are supported. bool HasDotProd = false; @@ -232,9 +247,9 @@ protected: /// HasFP16FML - True if subtarget supports half-precision FP fml operations bool HasFP16FML = false; - /// HasD16 - True if subtarget is limited to 16 double precision + /// HasD32 - True if subtarget has the full 32 double precision /// FP registers for VFPv3. - bool HasD16 = false; + bool HasD32 = false; /// HasHardwareDivide - True if subtarget supports [su]div in Thumb mode bool HasHardwareDivideInThumb = false; @@ -291,9 +306,9 @@ protected: /// extension. bool HasVirtualization = false; - /// FPOnlySP - If true, the floating point unit only supports single + /// HasFP64 - If true, the floating point unit supports double /// precision. - bool FPOnlySP = false; + bool HasFP64 = false; /// If true, the processor supports the Performance Monitor Extensions. These /// include a generic cycle-counter as well as more fine-grained (often @@ -569,10 +584,10 @@ public: bool hasARMOps() const { return !NoARM; } - bool hasVFP2() const { return HasVFPv2; } - bool hasVFP3() const { return HasVFPv3; } - bool hasVFP4() const { return HasVFPv4; } - bool hasFPARMv8() const { return HasFPARMv8; } + bool hasVFP2Base() const { return HasVFPv2D16SP; } + bool hasVFP3Base() const { return HasVFPv3D16SP; } + bool hasVFP4Base() const { return HasVFPv4D16SP; } + bool hasFPARMv8Base() const { return HasFPARMv8D16SP; } bool hasNEON() const { return HasNEON; } bool hasSHA2() const { return HasSHA2; } bool hasAES() const { return HasAES; } @@ -601,7 +616,7 @@ public: bool useFPVMLx() const { return !SlowFPVMLx; } bool hasVMLxForwarding() const { return HasVMLxForwarding; } bool isFPBrccSlow() const { return SlowFPBrcc; } - bool isFPOnlySP() const { return FPOnlySP; } + bool hasFP64() const { return HasFP64; } bool hasPerfMon() const { return HasPerfMon; } bool hasTrustZone() const { return HasTrustZone; } bool has8MSecExt() const { return Has8MSecExt; } @@ -638,7 +653,7 @@ public: bool genExecuteOnly() const { return GenExecuteOnly; } bool hasFP16() const { return HasFP16; } - bool hasD16() const { return HasD16; } + bool hasD32() const { return HasD32; } bool hasFullFP16() const { return HasFullFP16; } bool hasFP16FML() const { return HasFP16FML; } diff --git a/lib/Target/ARM/ARMTargetTransformInfo.h b/lib/Target/ARM/ARMTargetTransformInfo.h index 2fbcd8b2ba6..882a63c33a5 100644 --- a/lib/Target/ARM/ARMTargetTransformInfo.h +++ b/lib/Target/ARM/ARMTargetTransformInfo.h @@ -48,7 +48,7 @@ class ARMTTIImpl : public BasicTTIImplBase { const ARMTargetLowering *TLI; // Currently the following features are excluded from InlineFeatureWhitelist. - // ModeThumb, FeatureNoARM, ModeSoftFloat, FeatureVFPOnlySP, FeatureD16 + // ModeThumb, FeatureNoARM, ModeSoftFloat, FeatureFP64, FeatureD32 // Depending on whether they are set or unset, different // instructions/registers are available. For example, inlining a callee with // -thumb-mode in a caller with +thumb-mode, may cause the assembler to diff --git a/lib/Target/ARM/AsmParser/ARMAsmParser.cpp b/lib/Target/ARM/AsmParser/ARMAsmParser.cpp index f4af747f3ee..f8a00f713e4 100644 --- a/lib/Target/ARM/AsmParser/ARMAsmParser.cpp +++ b/lib/Target/ARM/AsmParser/ARMAsmParser.cpp @@ -492,8 +492,8 @@ class ARMAsmParser : public MCTargetAsmParser { return getSTI().getFeatureBits()[ARM::FeatureDSP]; } - bool hasD16() const { - return getSTI().getFeatureBits()[ARM::FeatureD16]; + bool hasD32() const { + return getSTI().getFeatureBits()[ARM::FeatureD32]; } bool hasV8_1aOps() const { @@ -3424,7 +3424,7 @@ int ARMAsmParser::tryParseRegister() { } // Some FPUs only have 16 D registers, so D16-D31 are invalid - if (hasD16() && RegNum >= ARM::D16 && RegNum <= ARM::D31) + if (!hasD32() && RegNum >= ARM::D16 && RegNum <= ARM::D31) return -1; Parser.Lex(); // Eat identifier token. @@ -10415,11 +10415,11 @@ ARMAsmParser::getCustomOperandDiag(ARMMatchResultTy MatchError) { : "operand must be a register in range [r0, r12] or r14"; // DPR contains 16 registers for some FPUs, and 32 for others. case Match_DPR: - return hasD16() ? "operand must be a register in range [d0, d15]" - : "operand must be a register in range [d0, d31]"; + return hasD32() ? "operand must be a register in range [d0, d31]" + : "operand must be a register in range [d0, d15]"; case Match_DPR_RegList: - return hasD16() ? "operand must be a list of registers in range [d0, d15]" - : "operand must be a list of registers in range [d0, d31]"; + return hasD32() ? "operand must be a list of registers in range [d0, d31]" + : "operand must be a list of registers in range [d0, d15]"; // For all other diags, use the static string from tablegen. default: @@ -10621,14 +10621,15 @@ bool ARMAsmParser::parseDirectiveArchExtension(SMLoc L) { { ARM::AEK_CRC, {Feature_HasV8Bit}, {ARM::FeatureCRC} }, { ARM::AEK_CRYPTO, {Feature_HasV8Bit}, {ARM::FeatureCrypto, ARM::FeatureNEON, ARM::FeatureFPARMv8} }, - { ARM::AEK_FP, {Feature_HasV8Bit}, {ARM::FeatureFPARMv8} }, + { ARM::AEK_FP, {Feature_HasV8Bit}, + {ARM::FeatureVFP2_D16_SP, ARM::FeatureFPARMv8} }, { (ARM::AEK_HWDIVTHUMB | ARM::AEK_HWDIVARM), {Feature_HasV7Bit, Feature_IsNotMClassBit}, {ARM::FeatureHWDivThumb, ARM::FeatureHWDivARM} }, { ARM::AEK_MP, {Feature_HasV7Bit, Feature_IsNotMClassBit}, {ARM::FeatureMP} }, { ARM::AEK_SIMD, {Feature_HasV8Bit}, - {ARM::FeatureNEON, ARM::FeatureFPARMv8} }, + {ARM::FeatureNEON, ARM::FeatureVFP2_D16_SP, ARM::FeatureFPARMv8} }, { ARM::AEK_SEC, {Feature_HasV6KBit}, {ARM::FeatureTrustZone} }, // FIXME: Only available in A-class, isel not predicated { ARM::AEK_VIRT, {Feature_HasV7Bit}, {ARM::FeatureVirtualization} }, @@ -10678,12 +10679,12 @@ bool ARMAsmParser::parseDirectiveArchExtension(SMLoc L) { "allowed for the current base architecture"); MCSubtargetInfo &STI = copySTI(); - FeatureBitset ToggleFeatures = EnableFeature - ? (~STI.getFeatureBits() & Extension.Features) - : ( STI.getFeatureBits() & Extension.Features); - - FeatureBitset Features = - ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures)); + if (EnableFeature) { + STI.SetFeatureBitsTransitively(Extension.Features); + } else { + STI.ClearFeatureBitsTransitively(Extension.Features); + } + FeatureBitset Features = ComputeAvailableFeatures(STI.getFeatureBits()); setAvailableFeatures(Features); return false; } diff --git a/lib/Target/ARM/Disassembler/ARMDisassembler.cpp b/lib/Target/ARM/Disassembler/ARMDisassembler.cpp index d4b2be7d381..6948f7af469 100644 --- a/lib/Target/ARM/Disassembler/ARMDisassembler.cpp +++ b/lib/Target/ARM/Disassembler/ARMDisassembler.cpp @@ -1043,9 +1043,9 @@ static DecodeStatus DecodeDPRRegisterClass(MCInst &Inst, unsigned RegNo, const FeatureBitset &featureBits = ((const MCDisassembler*)Decoder)->getSubtargetInfo().getFeatureBits(); - bool hasD16 = featureBits[ARM::FeatureD16]; + bool hasD32 = featureBits[ARM::FeatureD32]; - if (RegNo > 31 || (hasD16 && RegNo > 15)) + if (RegNo > 31 || (!hasD32 && RegNo > 15)) return MCDisassembler::Fail; unsigned Register = DPRDecoderTable[RegNo]; diff --git a/lib/Target/ARM/MCTargetDesc/ARMTargetStreamer.cpp b/lib/Target/ARM/MCTargetDesc/ARMTargetStreamer.cpp index 8f9c66507a4..9502a5d7c39 100644 --- a/lib/Target/ARM/MCTargetDesc/ARMTargetStreamer.cpp +++ b/lib/Target/ARM/MCTargetDesc/ARMTargetStreamer.cpp @@ -222,37 +222,37 @@ void ARMTargetStreamer::emitTargetAttributes(const MCSubtargetInfo &STI) { ? ARMBuildAttrs::AllowNeonARMv8_1a : ARMBuildAttrs::AllowNeonARMv8); } else { - if (STI.hasFeature(ARM::FeatureFPARMv8)) + if (STI.hasFeature(ARM::FeatureFPARMv8_D16_SP)) // FPv5 and FP-ARMv8 have the same instructions, so are modeled as one // FPU, but there are two different names for it depending on the CPU. - emitFPU(STI.hasFeature(ARM::FeatureD16) - ? (STI.hasFeature(ARM::FeatureVFPOnlySP) ? ARM::FK_FPV5_SP_D16 - : ARM::FK_FPV5_D16) - : ARM::FK_FP_ARMV8); - else if (STI.hasFeature(ARM::FeatureVFP4)) - emitFPU(STI.hasFeature(ARM::FeatureD16) - ? (STI.hasFeature(ARM::FeatureVFPOnlySP) ? ARM::FK_FPV4_SP_D16 - : ARM::FK_VFPV4_D16) - : ARM::FK_VFPV4); - else if (STI.hasFeature(ARM::FeatureVFP3)) + emitFPU(STI.hasFeature(ARM::FeatureD32) + ? ARM::FK_FP_ARMV8 + : (STI.hasFeature(ARM::FeatureFP64) ? ARM::FK_FPV5_D16 + : ARM::FK_FPV5_SP_D16)); + else if (STI.hasFeature(ARM::FeatureVFP4_D16_SP)) + emitFPU(STI.hasFeature(ARM::FeatureD32) + ? ARM::FK_VFPV4 + : (STI.hasFeature(ARM::FeatureFP64) ? ARM::FK_VFPV4_D16 + : ARM::FK_FPV4_SP_D16)); + else if (STI.hasFeature(ARM::FeatureVFP3_D16_SP)) emitFPU( - STI.hasFeature(ARM::FeatureD16) - // +d16 - ? (STI.hasFeature(ARM::FeatureVFPOnlySP) - ? (STI.hasFeature(ARM::FeatureFP16) ? ARM::FK_VFPV3XD_FP16 - : ARM::FK_VFPV3XD) - : (STI.hasFeature(ARM::FeatureFP16) + STI.hasFeature(ARM::FeatureD32) + // +d32 + ? (STI.hasFeature(ARM::FeatureFP16) ? ARM::FK_VFPV3_FP16 + : ARM::FK_VFPV3) + // -d32 + : (STI.hasFeature(ARM::FeatureFP64) + ? (STI.hasFeature(ARM::FeatureFP16) ? ARM::FK_VFPV3_D16_FP16 - : ARM::FK_VFPV3_D16)) - // -d16 - : (STI.hasFeature(ARM::FeatureFP16) ? ARM::FK_VFPV3_FP16 - : ARM::FK_VFPV3)); - else if (STI.hasFeature(ARM::FeatureVFP2)) + : ARM::FK_VFPV3_D16) + : (STI.hasFeature(ARM::FeatureFP16) ? ARM::FK_VFPV3XD_FP16 + : ARM::FK_VFPV3XD))); + else if (STI.hasFeature(ARM::FeatureVFP2_D16_SP)) emitFPU(ARM::FK_VFPV2); } // ABI_HardFP_use attribute to indicate single precision FP. - if (STI.hasFeature(ARM::FeatureVFPOnlySP)) + if (STI.hasFeature(ARM::FeatureVFP2_D16_SP) && !STI.hasFeature(ARM::FeatureFP64)) emitAttribute(ARMBuildAttrs::ABI_HardFP_use, ARMBuildAttrs::HardFPSinglePrecision); diff --git a/test/CodeGen/ARM/GlobalISel/arm-legalize-load-store.mir b/test/CodeGen/ARM/GlobalISel/arm-legalize-load-store.mir index fd3e20c40c0..63137071ed6 100644 --- a/test/CodeGen/ARM/GlobalISel/arm-legalize-load-store.mir +++ b/test/CodeGen/ARM/GlobalISel/arm-legalize-load-store.mir @@ -11,7 +11,7 @@ define void @test_gep_s16() { ret void } attributes #0 = { "target-features"="+vfp2" } - attributes #1 = { "target-features"="-vfp2" } + attributes #1 = { "target-features"="-vfp2d16sp" } ... --- name: test_legal_loads_stores diff --git a/test/CodeGen/ARM/arm-storebytesmerge.ll b/test/CodeGen/ARM/arm-storebytesmerge.ll index 00c5914b34b..c159ca49c44 100644 --- a/test/CodeGen/ARM/arm-storebytesmerge.ll +++ b/test/CodeGen/ARM/arm-storebytesmerge.ll @@ -337,5 +337,5 @@ define arm_aapcs_vfpcc void @test(i8* %v50) #0 { ret void } -attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="cortex-m7" "target-features"="+d16,+dsp,+fp-armv8,+hwdiv,+thumb-mode,-crc,-crypto,-dotprod,-fp-only-sp,-fullfp16,-hwdiv-arm,-neon,-ras" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "denormal-fp-math"="preserve-sign" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="cortex-m7" "target-features"="-d32,+dsp,+fp-armv8,+hwdiv,+thumb-mode,-crc,-crypto,-dotprod,-fullfp16,-hwdiv-arm,-neon,-ras" "unsafe-fp-math"="false" "use-soft-float"="false" } diff --git a/test/CodeGen/ARM/arm32-rounding.ll b/test/CodeGen/ARM/arm32-rounding.ll index f247648d814..b0a9f54e424 100644 --- a/test/CodeGen/ARM/arm32-rounding.ll +++ b/test/CodeGen/ARM/arm32-rounding.ll @@ -1,6 +1,6 @@ ; RUN: llc < %s -mtriple=armv8-linux-gnueabihf -mattr=+fp-armv8 | FileCheck --check-prefix=CHECK --check-prefix=DP %s -; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabihf -mattr=+fp-armv8,+d16,+fp-only-sp | FileCheck --check-prefix=SP %s -; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabihf -mattr=+fp-armv8,+d16 | FileCheck --check-prefix=DP %s +; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabihf -mattr=+fp-armv8,-d32,-fp64 | FileCheck --check-prefix=SP %s +; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabihf -mattr=+fp-armv8,-d32 | FileCheck --check-prefix=DP %s ; CHECK-LABEL: test1 ; CHECK: vrintm.f32 diff --git a/test/CodeGen/ARM/build-attributes.ll b/test/CodeGen/ARM/build-attributes.ll index 32ffa457388..f349530fb48 100644 --- a/test/CodeGen/ARM/build-attributes.ll +++ b/test/CodeGen/ARM/build-attributes.ll @@ -33,9 +33,9 @@ ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a5 | FileCheck %s --check-prefix=CORTEX-A5-DEFAULT ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a5 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A5-DEFAULT-FAST ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a5 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING -; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a5 -mattr=-neon,+d16 | FileCheck %s --check-prefix=CORTEX-A5-NONEON -; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a5 -mattr=-vfp2 | FileCheck %s --check-prefix=CORTEX-A5-NOFPU -; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a5 -mattr=-vfp2 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A5-NOFPU-FAST +; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a5 -mattr=-neon,-d32 | FileCheck %s --check-prefix=CORTEX-A5-NONEON +; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a5 -mattr=-vfp2d16sp | FileCheck %s --check-prefix=CORTEX-A5-NOFPU +; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a5 -mattr=-vfp2d16sp -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A5-NOFPU-FAST ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a8 -float-abi=soft | FileCheck %s --check-prefix=CORTEX-A8-SOFT ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a8 -float-abi=soft -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A8-SOFT-FAST ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a8 -float-abi=hard | FileCheck %s --check-prefix=CORTEX-A8-HARD @@ -50,16 +50,16 @@ ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a12 | FileCheck %s --check-prefix=CORTEX-A12-DEFAULT ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a9 -float-abi=soft | FileCheck %s --check-prefix=CORTEX-A9-SOFT ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a12 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A12-DEFAULT-FAST -; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a12 -mattr=-vfp2 | FileCheck %s --check-prefix=CORTEX-A12-NOFPU -; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a12 -mattr=-vfp2 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A12-NOFPU-FAST +; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a12 -mattr=-vfp2d16sp | FileCheck %s --check-prefix=CORTEX-A12-NOFPU +; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a12 -mattr=-vfp2d16sp -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A12-NOFPU-FAST ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a12 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a15 | FileCheck %s --check-prefix=CORTEX-A15 ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a15 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A15-FAST ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a15 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a17 | FileCheck %s --check-prefix=CORTEX-A17-DEFAULT ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a17 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A17-FAST -; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a17 -mattr=-vfp2 | FileCheck %s --check-prefix=CORTEX-A17-NOFPU -; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a17 -mattr=-vfp2 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A17-NOFPU-FAST +; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a17 -mattr=-vfp2d16sp | FileCheck %s --check-prefix=CORTEX-A17-NOFPU +; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a17 -mattr=-vfp2d16sp -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A17-NOFPU-FAST ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a15 -enable-no-trapping-fp-math | FileCheck %s --check-prefix=NO-TRAPPING-MATH ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a15 -denormal-fp-math=ieee | FileCheck %s --check-prefix=DENORMAL-IEEE @@ -67,9 +67,9 @@ ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a15 -denormal-fp-math=positive-zero | FileCheck %s --check-prefix=DENORMAL-POSITIVE-ZERO ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mattr=-neon,+vfp3,+fp16 | FileCheck %s --check-prefix=GENERIC-FPU-VFPV3-FP16 -; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mattr=-neon,+vfp3,+d16,+fp16 | FileCheck %s --check-prefix=GENERIC-FPU-VFPV3-D16-FP16 -; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mattr=-neon,+vfp3,+fp-only-sp,+d16 | FileCheck %s --check-prefix=GENERIC-FPU-VFPV3XD -; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mattr=-neon,+vfp3,+fp-only-sp,+d16,+fp16 | FileCheck %s --check-prefix=GENERIC-FPU-VFPV3XD-FP16 +; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mattr=-neon,+vfp3,-d32,+fp16 | FileCheck %s --check-prefix=GENERIC-FPU-VFPV3-D16-FP16 +; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mattr=-neon,+vfp3,-fp64,-d32 | FileCheck %s --check-prefix=GENERIC-FPU-VFPV3XD +; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mattr=-neon,+vfp3,-fp64,-d32,+fp16 | FileCheck %s --check-prefix=GENERIC-FPU-VFPV3XD-FP16 ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mattr=+neon,+fp16 | FileCheck %s --check-prefix=GENERIC-FPU-NEON-FP16 ; RUN: llc < %s -mtriple=armv7-linux-gnueabi -mcpu=cortex-a17 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING @@ -96,10 +96,10 @@ ; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m4 -float-abi=hard | FileCheck %s --check-prefix=CORTEX-M4-HARD ; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m4 -float-abi=hard -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M4-HARD-FAST ; RUN: llc < %s -mtriple=thumbv7m-linux-gnueabi -mcpu=cortex-m4 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING -; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabi -mcpu=cortex-m7 -mattr=-vfp2 | FileCheck %s --check-prefix=CORTEX-M7 --check-prefix=CORTEX-M7-SOFT -; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabi -mcpu=cortex-m7 -mattr=-vfp2 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M7-NOFPU-FAST -; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabi -mcpu=cortex-m7 -mattr=+fp-only-sp | FileCheck %s --check-prefix=CORTEX-M7 --check-prefix=CORTEX-M7-SINGLE -; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabi -mcpu=cortex-m7 -mattr=+fp-only-sp -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M7-FAST +; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabi -mcpu=cortex-m7 -mattr=-vfp2d16sp | FileCheck %s --check-prefix=CORTEX-M7 --check-prefix=CORTEX-M7-SOFT +; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabi -mcpu=cortex-m7 -mattr=-vfp2d16sp -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M7-NOFPU-FAST +; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabi -mcpu=cortex-m7 -mattr=-fp64 | FileCheck %s --check-prefix=CORTEX-M7 --check-prefix=CORTEX-M7-SINGLE +; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabi -mcpu=cortex-m7 -mattr=-fp64 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-M7-FAST ; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabi -mcpu=cortex-m7 | FileCheck %s --check-prefix=CORTEX-M7-DOUBLE ; RUN: llc < %s -mtriple=thumbv7em-linux-gnueabi -mcpu=cortex-m7 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=thumbv8-linux-gnueabi -mcpu=cortex-m23 | FileCheck %s --check-prefix=CORTEX-M23 @@ -157,12 +157,12 @@ ; RUN: llc < %s -mtriple=armv8.1a-linux-gnueabi -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 | FileCheck %s --check-prefix=CORTEX-A7-CHECK ; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A7-CHECK-FAST -; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -mattr=-vfp2,-vfp3,-vfp4,-neon,-fp16 | FileCheck %s --check-prefix=CORTEX-A7-NOFPU -; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -mattr=-vfp2,-vfp3,-vfp4,-neon,-fp16 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A7-NOFPU-FAST +; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -mattr=-vfp2d16sp,-vfp3,-vfp4,-neon,-fp16 | FileCheck %s --check-prefix=CORTEX-A7-NOFPU +; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -mattr=-vfp2d16sp,-vfp3,-vfp4,-neon,-fp16 -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A7-NOFPU-FAST ; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -mattr=+vfp4,-neon | FileCheck %s --check-prefix=CORTEX-A7-FPUV4 ; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -enable-sign-dependent-rounding-fp-math | FileCheck %s --check-prefix=DYN-ROUNDING ; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -mattr=+vfp4,-neon -enable-unsafe-fp-math -frame-pointer=all -enable-no-infs-fp-math -enable-no-nans-fp-math -fp-contract=fast | FileCheck %s --check-prefix=CORTEX-A7-FPUV4-FAST -; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -mattr=+vfp4,,+d16,-neon | FileCheck %s --check-prefix=CORTEX-A7-FPUV4 +; RUN: llc < %s -mtriple=armv7-none-linux-gnueabi -mcpu=cortex-a7 -mattr=+vfp4,,-d32,-neon | FileCheck %s --check-prefix=CORTEX-A7-FPUV4 ; RUN: llc < %s -mtriple=arm-none-linux-gnueabi -mattr=+strict-align -relocation-model=pic | FileCheck %s --check-prefix=RELOC-PIC ; RUN: llc < %s -mtriple=arm-none-linux-gnueabi -mattr=+strict-align -relocation-model=static | FileCheck %s --check-prefix=RELOC-OTHER ; RUN: llc < %s -mtriple=arm-none-linux-gnueabi -mattr=+strict-align -relocation-model=dynamic-no-pic | FileCheck %s --check-prefix=RELOC-OTHER @@ -230,8 +230,8 @@ ; RUN: llc < %s -mtriple=armv5-none-linux-gnueabi -mcpu=arm1022e -mattr=+strict-align | FileCheck %s --check-prefix=STRICT-ALIGN ; ARMv8-R -; RUN: llc < %s -mtriple=arm-none-none-eabi -mcpu=cortex-r52 -mattr=-vfp2,-fp16 | FileCheck %s --check-prefix=ARMv8R --check-prefix=ARMv8R-NOFPU -; RUN: llc < %s -mtriple=arm-none-none-eabi -mcpu=cortex-r52 -mattr=-neon,+fp-only-sp,+d16 | FileCheck %s --check-prefix=ARMv8R --check-prefix=ARMv8R-SP +; RUN: llc < %s -mtriple=arm-none-none-eabi -mcpu=cortex-r52 -mattr=-vfp2d16sp,-fp16 | FileCheck %s --check-prefix=ARMv8R --check-prefix=ARMv8R-NOFPU +; RUN: llc < %s -mtriple=arm-none-none-eabi -mcpu=cortex-r52 -mattr=-neon,-fp64,-d32 | FileCheck %s --check-prefix=ARMv8R --check-prefix=ARMv8R-SP ; RUN: llc < %s -mtriple=arm-none-none-eabi -mcpu=cortex-r52 | FileCheck %s --check-prefix=ARMv8R --check-prefix=ARMv8R-NEON ; ARMv8-M diff --git a/test/CodeGen/ARM/fast-isel-call.ll b/test/CodeGen/ARM/fast-isel-call.ll index e6094cb63a1..3e5c79dc633 100644 --- a/test/CodeGen/ARM/fast-isel-call.ll +++ b/test/CodeGen/ARM/fast-isel-call.ll @@ -4,9 +4,9 @@ ; RUN: llc -fast-isel-sink-local-values < %s -O0 -verify-machineinstrs -fast-isel-abort=1 -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios -mattr=+long-calls | FileCheck %s --check-prefix=ARM-LONG --check-prefix=ARM-LONG-MACHO ; RUN: llc -fast-isel-sink-local-values < %s -O0 -verify-machineinstrs -fast-isel-abort=1 -relocation-model=dynamic-no-pic -mtriple=armv7-linux-gnueabi -mattr=+long-calls | FileCheck %s --check-prefix=ARM-LONG --check-prefix=ARM-LONG-ELF ; RUN: llc -fast-isel-sink-local-values < %s -O0 -verify-machineinstrs -fast-isel-abort=1 -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios -mattr=+long-calls | FileCheck %s --check-prefix=THUMB-LONG -; RUN: llc -fast-isel-sink-local-values < %s -O0 -verify-machineinstrs -fast-isel-abort=1 -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios -mattr=-vfp2 | FileCheck %s --check-prefix=ARM-NOVFP -; RUN: llc -fast-isel-sink-local-values < %s -O0 -verify-machineinstrs -fast-isel-abort=1 -relocation-model=dynamic-no-pic -mtriple=armv7-linux-gnueabi -mattr=-vfp2 | FileCheck %s --check-prefix=ARM-NOVFP -; RUN: llc -fast-isel-sink-local-values < %s -O0 -verify-machineinstrs -fast-isel-abort=1 -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios -mattr=-vfp2 | FileCheck %s --check-prefix=THUMB-NOVFP +; RUN: llc -fast-isel-sink-local-values < %s -O0 -verify-machineinstrs -fast-isel-abort=1 -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios -mattr=-vfp2d16sp | FileCheck %s --check-prefix=ARM-NOVFP +; RUN: llc -fast-isel-sink-local-values < %s -O0 -verify-machineinstrs -fast-isel-abort=1 -relocation-model=dynamic-no-pic -mtriple=armv7-linux-gnueabi -mattr=-vfp2d16sp | FileCheck %s --check-prefix=ARM-NOVFP +; RUN: llc -fast-isel-sink-local-values < %s -O0 -verify-machineinstrs -fast-isel-abort=1 -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios -mattr=-vfp2d16sp | FileCheck %s --check-prefix=THUMB-NOVFP ; Note that some of these tests assume that relocations are either ; movw/movt or constant pool loads. Different platforms will select diff --git a/test/CodeGen/ARM/float-helpers.s b/test/CodeGen/ARM/float-helpers.s index 42ab56084d4..d5388a372b8 100644 --- a/test/CodeGen/ARM/float-helpers.s +++ b/test/CodeGen/ARM/float-helpers.s @@ -5,8 +5,8 @@ ; RUN: llc -asm-verbose=false -mattr=+vfp3 -meabi=gnu -mtriple=arm-eabi < %s | FileCheck %s -check-prefix=CHECK-SOFTFP ; RUN: llc -asm-verbose=false -mattr=+vfp3 -float-abi=hard -mtriple=arm-eabi < %s | FileCheck %s -check-prefix=CHECK-HARDFP-SP -check-prefix=CHECK-HARDFP-DP ; RUN: llc -asm-verbose=false -mattr=+vfp3 -float-abi=hard -meabi=gnu -mtriple=arm-eabi < %s | FileCheck %s -check-prefix=CHECK-HARDFP-SP -check-prefix=CHECK-HARDFP-DP -; RUN: llc -asm-verbose=false -mattr=+vfp3,+fp-only-sp -float-abi=hard -mtriple=arm-eabi < %s | FileCheck %s -check-prefix=CHECK-HARDFP-SP -check-prefix=CHECK-HARDFP-SPONLY -; RUN: llc -asm-verbose=false -mattr=+vfp3,+fp-only-sp -float-abi=hard -mtriple=arm-eabi -meabi=gnu < %s | FileCheck %s -check-prefix=CHECK-HARDFP-SP -check-prefix=CHECK-HARDFP-SPONLY +; RUN: llc -asm-verbose=false -mattr=+vfp3,-fp64 -float-abi=hard -mtriple=arm-eabi < %s | FileCheck %s -check-prefix=CHECK-HARDFP-SP -check-prefix=CHECK-HARDFP-SPONLY +; RUN: llc -asm-verbose=false -mattr=+vfp3,-fp64 -float-abi=hard -mtriple=arm-eabi -meabi=gnu < %s | FileCheck %s -check-prefix=CHECK-HARDFP-SP -check-prefix=CHECK-HARDFP-SPONLY ; The Runtime ABI for the ARM Architecture IHI0043 section 4.1.2 The ; floating-point helper functions to always use the base AAPCS (soft-float) diff --git a/test/CodeGen/ARM/fp-only-sp.ll b/test/CodeGen/ARM/fp-only-sp.ll index 2c7b2acbde9..ebfa41a8294 100644 --- a/test/CodeGen/ARM/fp-only-sp.ll +++ b/test/CodeGen/ARM/fp-only-sp.ll @@ -1,7 +1,7 @@ ; RUN: llc -mtriple=thumbv7em-apple-macho -mcpu=cortex-m4 %s -o - -O0 | FileCheck %s ; RUN: llc -mtriple=thumbv7em-apple-macho -mcpu=cortex-m4 %s -o - | FileCheck %s -; Note: vldr and vstr really do have 64-bit variants even with fp-only-sp +; Note: vldr and vstr really do have 64-bit variants even with -fp64 define void @test_load_store(double* %addr) { ; CHECK-LABEL: test_load_store: ; CHECK: vldr [[TMP:d[0-9]+]], [r0] diff --git a/test/CodeGen/ARM/fp16-instructions.ll b/test/CodeGen/ARM/fp16-instructions.ll index 514d3c7ae0a..e27631ce164 100644 --- a/test/CodeGen/ARM/fp16-instructions.ll +++ b/test/CodeGen/ARM/fp16-instructions.ll @@ -5,28 +5,28 @@ ; SOFTFP: ; RUN: llc < %s -mtriple=arm-none-eabi -mattr=+vfp3 | FileCheck %s --check-prefixes=CHECK,CHECK-SOFTFP-VFP3 ; RUN: llc < %s -mtriple=arm-none-eabi -mattr=+vfp4 | FileCheck %s --check-prefixes=CHECK,CHECK-SOFTFP-FP16,CHECK-SOFTFP-FP16-A32 -; RUN: llc < %s -mtriple=arm-none-eabi -mattr=+fullfp16 | FileCheck %s --check-prefixes=CHECK,CHECK-SOFTFP-FULLFP16 +; RUN: llc < %s -mtriple=arm-none-eabi -mattr=+fullfp16,+fp64 | FileCheck %s --check-prefixes=CHECK,CHECK-SOFTFP-FULLFP16 ; RUN: llc < %s -mtriple=thumbv7-none-eabi -mattr=+vfp3 | FileCheck %s --check-prefixes=CHECK,CHECK-SOFTFP-VFP3 ; RUN: llc < %s -mtriple=thumbv7-none-eabi -mattr=+vfp4 | FileCheck %s --check-prefixes=CHECK,CHECK-SOFTFP-FP16,CHECK-SOFTFP-FP16-T32 -; RUN: llc < %s -mtriple=thumbv7-none-eabi -mattr=+fullfp16 | FileCheck %s --check-prefixes=CHECK,CHECK-SOFTFP-FULLFP16 +; RUN: llc < %s -mtriple=thumbv7-none-eabi -mattr=+fullfp16,+fp64 | FileCheck %s --check-prefixes=CHECK,CHECK-SOFTFP-FULLFP16 ; Test fast-isel -; RUN: llc < %s -mtriple=arm-none-eabi -mattr=+fullfp16 -O0 | FileCheck %s --check-prefixes=CHECK-SPILL-RELOAD -; RUN: llc < %s -mtriple=thumbv7-none-eabi -mattr=+fullfp16 -O0 | FileCheck %s --check-prefixes=CHECK-SPILL-RELOAD +; RUN: llc < %s -mtriple=arm-none-eabi -mattr=+fullfp16,+fp64 -O0 | FileCheck %s --check-prefixes=CHECK-SPILL-RELOAD +; RUN: llc < %s -mtriple=thumbv7-none-eabi -mattr=+fullfp16,+fp64 -O0 | FileCheck %s --check-prefixes=CHECK-SPILL-RELOAD ; HARD: ; RUN: llc < %s -mtriple=arm-none-eabihf -mattr=+vfp3 | FileCheck %s --check-prefixes=CHECK,CHECK-HARDFP-VFP3 ; RUN: llc < %s -mtriple=arm-none-eabihf -mattr=+vfp4 | FileCheck %s --check-prefixes=CHECK,CHECK-HARDFP-FP16 -; RUN: llc < %s -mtriple=arm-none-eabihf -mattr=+fullfp16 | FileCheck %s --check-prefixes=CHECK,CHECK-HARDFP-FULLFP16 +; RUN: llc < %s -mtriple=arm-none-eabihf -mattr=+fullfp16,+fp64 | FileCheck %s --check-prefixes=CHECK,CHECK-HARDFP-FULLFP16 ; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mattr=+vfp3 | FileCheck %s --check-prefixes=CHECK,CHECK-HARDFP-VFP3 ; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mattr=+vfp4 | FileCheck %s --check-prefixes=CHECK,CHECK-HARDFP-FP16 -; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mattr=+fullfp16 | FileCheck %s --check-prefixes=CHECK,CHECK-HARDFP-FULLFP16 +; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mattr=+fullfp16,fp64 | FileCheck %s --check-prefixes=CHECK,CHECK-HARDFP-FULLFP16 ; FP-CONTRACT=FAST -; RUN: llc < %s -mtriple=arm-none-eabihf -mattr=+fullfp16 -fp-contract=fast | FileCheck %s --check-prefixes=CHECK,CHECK-HARDFP-FULLFP16-FAST -; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mattr=+fullfp16 -fp-contract=fast | FileCheck %s --check-prefixes=CHECK,CHECK-HARDFP-FULLFP16-FAST +; RUN: llc < %s -mtriple=arm-none-eabihf -mattr=+fullfp16,+fp64 -fp-contract=fast | FileCheck %s --check-prefixes=CHECK,CHECK-HARDFP-FULLFP16-FAST +; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mattr=+fullfp16,+fp64 -fp-contract=fast | FileCheck %s --check-prefixes=CHECK,CHECK-HARDFP-FULLFP16-FAST ; TODO: we can't pass half-precision arguments as "half" types yet. We do ; that for the time being by passing "float %f.coerce" and the necessary diff --git a/test/CodeGen/ARM/fp16-promote.ll b/test/CodeGen/ARM/fp16-promote.ll index d7eaddc9e40..855f8d55dcb 100644 --- a/test/CodeGen/ARM/fp16-promote.ll +++ b/test/CodeGen/ARM/fp16-promote.ll @@ -1,6 +1,6 @@ ; RUN: llc -asm-verbose=false < %s -mattr=+vfp3,+fp16 | FileCheck -allow-deprecated-dag-overlap %s -check-prefix=CHECK-FP16 --check-prefix=CHECK-VFP -check-prefix=CHECK-ALL ; RUN: llc -asm-verbose=false < %s | FileCheck -allow-deprecated-dag-overlap %s -check-prefix=CHECK-LIBCALL --check-prefix=CHECK-VFP -check-prefix=CHECK-ALL --check-prefix=CHECK-LIBCALL-VFP -; RUN: llc -asm-verbose=false < %s -mattr=-vfp2 | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK-LIBCALL -check-prefix=CHECK-NOVFP -check-prefix=CHECK-ALL +; RUN: llc -asm-verbose=false < %s -mattr=-vfp2d16sp | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK-LIBCALL -check-prefix=CHECK-NOVFP -check-prefix=CHECK-ALL target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n32" target triple = "armv7---eabihf" diff --git a/test/CodeGen/ARM/fpconv.ll b/test/CodeGen/ARM/fpconv.ll index 8d740d88fc4..929da5f18c8 100644 --- a/test/CodeGen/ARM/fpconv.ll +++ b/test/CodeGen/ARM/fpconv.ll @@ -1,7 +1,7 @@ ; RUN: llc -mtriple=arm-eabi -mattr=+vfp2 %s -o - | FileCheck %s --check-prefix=CHECK-VFP ; RUN: llc -mtriple=arm-apple-darwin %s -o - | FileCheck %s ; RUN: llc -mtriple=armv8r-none-none-eabi %s -o - | FileCheck %s --check-prefix=CHECK-VFP -; RUN: llc -mtriple=armv8r-none-none-eabi -mattr=+fp-only-sp %s -o - | FileCheck %s --check-prefix=CHECK-VFP-SP +; RUN: llc -mtriple=armv8r-none-none-eabi -mattr=-fp64 %s -o - | FileCheck %s --check-prefix=CHECK-VFP-SP define float @f1(double %x) { ;CHECK-VFP-LABEL: f1: diff --git a/test/CodeGen/ARM/half.ll b/test/CodeGen/ARM/half.ll index a334adc3791..6759a0576e7 100644 --- a/test/CodeGen/ARM/half.ll +++ b/test/CodeGen/ARM/half.ll @@ -2,7 +2,7 @@ ; RUN: llc < %s -mtriple=thumbv7s-apple-ios7.0 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-F16 ; RUN: llc < %s -mtriple=thumbv8-apple-ios7.0 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-V8 ; RUN: llc < %s -mtriple=armv8r-none-none-eabi | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-V8 -; RUN: llc < %s -mtriple=armv8r-none-none-eabi -mattr=+fp-only-sp | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-V8-SP +; RUN: llc < %s -mtriple=armv8r-none-none-eabi -mattr=-fp64 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-V8-SP define void @test_load_store(half* %in, half* %out) { ; CHECK-LABEL: test_load_store: diff --git a/test/CodeGen/ARM/inlineasm-X-allocation.ll b/test/CodeGen/ARM/inlineasm-X-allocation.ll index b2cb932f905..ff8dba6e38c 100644 --- a/test/CodeGen/ARM/inlineasm-X-allocation.ll +++ b/test/CodeGen/ARM/inlineasm-X-allocation.ll @@ -1,4 +1,4 @@ -; RUN: llc -mtriple=armv7-none-eabi -mattr=-neon,-vfp2 %s -o - | FileCheck %s -check-prefixes=COMMON,NOVFP +; RUN: llc -mtriple=armv7-none-eabi -mattr=-neon,-vfp2d16sp %s -o - | FileCheck %s -check-prefixes=COMMON,NOVFP ; RUN: llc -mtriple=armv7-none-eabi -mattr=+neon %s -float-abi=hard -o - | FileCheck %s -check-prefixes=COMMON,VFP ; The intent here is to test "X", which says that any operand whatsoever is allowed. diff --git a/test/CodeGen/ARM/inlineasm-operand-implicit-cast.ll b/test/CodeGen/ARM/inlineasm-operand-implicit-cast.ll index 7b98f0f0de3..8ae9f704fb9 100644 --- a/test/CodeGen/ARM/inlineasm-operand-implicit-cast.ll +++ b/test/CodeGen/ARM/inlineasm-operand-implicit-cast.ll @@ -134,7 +134,7 @@ define arm_aapcscc double @dbl_gprs_matching_spec_reg_in_op_soft(double %d1, dou ret double %add } -attributes #0 = { nounwind "target-features"="+d16,+vfp2,+vfp3,-fp-only-sp" "use-soft-float"="true" } +attributes #0 = { nounwind "target-features"="-d32,+vfp2,+vfp3" "use-soft-float"="true" } ; Check support for returning a float in GPR with hard float ABI @@ -304,4 +304,4 @@ define %struct.twodouble @dbl_gprs_matching_spec_reg_in_op_hard(double %d1, doub ret %struct.twodouble %res } -attributes #1 = { nounwind "target-features"="+d16,+vfp2,+vfp3,-fp-only-sp" "use-soft-float"="false" } +attributes #1 = { nounwind "target-features"="-d32,+vfp2,+vfp3" "use-soft-float"="false" } diff --git a/test/CodeGen/ARM/no-fpu.ll b/test/CodeGen/ARM/no-fpu.ll index c5d1f1951d7..13da7190a9f 100644 --- a/test/CodeGen/ARM/no-fpu.ll +++ b/test/CodeGen/ARM/no-fpu.ll @@ -1,6 +1,6 @@ -; RUN: llc < %s -mtriple=armv7-none-gnueabi -mattr=-neon,-vfp2 | FileCheck --check-prefix=NONEON-NOVFP %s +; RUN: llc < %s -mtriple=armv7-none-gnueabi -mattr=-neon,-vfp2d16sp | FileCheck --check-prefix=NONEON-NOVFP %s ; RUN: llc < %s -mtriple=armv7-none-gnueabi -mattr=-neon | FileCheck --check-prefix=NONEON %s -; RUN: llc < %s -mtriple=armv7-none-gnueabi -mattr=-vfp2 | FileCheck --check-prefix=NOVFP %s +; RUN: llc < %s -mtriple=armv7-none-gnueabi -mattr=-vfp2d16sp | FileCheck --check-prefix=NOVFP %s ; RUN: llc < %s -mtriple=armv7-none-gnueabi -mattr=-neon,+vfp2 | FileCheck --check-prefix=NONEON-VFP %s ; Check no NEON instructions are selected when feature is disabled. diff --git a/test/CodeGen/Thumb2/aapcs.ll b/test/CodeGen/Thumb2/aapcs.ll index 179c35c052a..651b9945825 100644 --- a/test/CodeGen/Thumb2/aapcs.ll +++ b/test/CodeGen/Thumb2/aapcs.ll @@ -1,5 +1,5 @@ ; RUN: llc < %s -mtriple=thumbv7-none-eabi -mcpu=cortex-m4 -mattr=-vfp2 | FileCheck %s -check-prefix=CHECK -check-prefix=SOFT -; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-m4 -mattr=+vfp4,+fp-only-sp | FileCheck %s -check-prefix=CHECK -check-prefix=HARD -check-prefix=SP +; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-m4 -mattr=+vfp4,-fp64 | FileCheck %s -check-prefix=CHECK -check-prefix=HARD -check-prefix=SP ; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-a8 -mattr=+vfp3 | FileCheck %s -check-prefix=CHECK -check-prefix=HARD -check-prefix=DP define float @float_in_reg(float %a, float %b) { diff --git a/test/CodeGen/Thumb2/float-intrinsics-double.ll b/test/CodeGen/Thumb2/float-intrinsics-double.ll index 657d1b172da..05d303adb55 100644 --- a/test/CodeGen/Thumb2/float-intrinsics-double.ll +++ b/test/CodeGen/Thumb2/float-intrinsics-double.ll @@ -1,7 +1,7 @@ ; RUN: llc < %s -mtriple=thumbv7-none-eabi -mcpu=cortex-m3 | FileCheck %s -check-prefix=CHECK -check-prefix=SOFT -check-prefix=NONE ; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-m4 | FileCheck %s -check-prefix=CHECK -check-prefix=SOFT -check-prefix=SP ; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-m7 | FileCheck %s -check-prefix=CHECK -check-prefix=HARD -check-prefix=DP -check-prefix=VFP -check-prefix=FP-ARMv8 -; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-m7 -mattr=+fp-only-sp | FileCheck %s -check-prefix=CHECK -check-prefix=SOFT -check-prefix=SP +; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-m7 -mattr=-fp64 | FileCheck %s -check-prefix=CHECK -check-prefix=SOFT -check-prefix=SP ; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-a7 | FileCheck %s -check-prefix=CHECK -check-prefix=HARD -check-prefix=DP -check-prefix=NEON -check-prefix=VFP4 ; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-a57 | FileCheck %s -check-prefix=CHECK -check-prefix=HARD -check-prefix=DP -check-prefix=NEON -check-prefix=FP-ARMv8 diff --git a/test/CodeGen/Thumb2/float-intrinsics-float.ll b/test/CodeGen/Thumb2/float-intrinsics-float.ll index 8ee2af03eca..ec81164b422 100644 --- a/test/CodeGen/Thumb2/float-intrinsics-float.ll +++ b/test/CodeGen/Thumb2/float-intrinsics-float.ll @@ -2,7 +2,7 @@ ; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-m4 | FileCheck %s -check-prefix=CHECK -check-prefix=HARD -check-prefix=SP -check-prefix=NO-VMLA ; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-m33 | FileCheck %s -check-prefix=CHECK -check-prefix=HARD -check-prefix=SP -check-prefix=NO-VMLA ; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-m7 | FileCheck %s -check-prefix=CHECK -check-prefix=HARD -check-prefix=DP -check-prefix=VFP -check-prefix=FP-ARMv8 -check-prefix=VMLA -; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-m7 -mattr=+fp-only-sp | FileCheck %s -check-prefix=CHECK -check-prefix=HARD -check-prefix=SP -check-prefix=FP-ARMv8 -check-prefix=VMLA +; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-m7 -mattr=-fp64 | FileCheck %s -check-prefix=CHECK -check-prefix=HARD -check-prefix=SP -check-prefix=FP-ARMv8 -check-prefix=VMLA ; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-a7 | FileCheck %s -check-prefix=CHECK -check-prefix=HARD -check-prefix=DP -check-prefix=NEON -check-prefix=VFP4 -check-prefix=NO-VMLA ; RUN: llc < %s -mtriple=thumbv7-none-eabihf -mcpu=cortex-a57 | FileCheck %s -check-prefix=CHECK -check-prefix=HARD -check-prefix=DP -check-prefix=NEON -check-prefix=FP-ARMv8 -check-prefix=VMLA diff --git a/test/CodeGen/Thumb2/t2sizereduction.mir b/test/CodeGen/Thumb2/t2sizereduction.mir index 6b05f7f42a5..aa92b19dbdc 100644 --- a/test/CodeGen/Thumb2/t2sizereduction.mir +++ b/test/CodeGen/Thumb2/t2sizereduction.mir @@ -29,7 +29,7 @@ br i1 %exitcond, label %for.cond.cleanup, label %for.body } - attributes #0 = { norecurse nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="cortex-m7" "target-features"="+d16,+dsp,+fp-armv8,+fp-only-sp,+hwdiv,+strict-align,+thumb-mode,-crc,-dotprod,-hwdiv-arm,-ras" "unsafe-fp-math"="false" "use-soft-float"="false" } + attributes #0 = { norecurse nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="cortex-m7" "target-features"="-d32,+dsp,+fp-armv8,-fp64,+hwdiv,+strict-align,+thumb-mode,-crc,-dotprod,-hwdiv-arm,-ras" "unsafe-fp-math"="false" "use-soft-float"="false" } ... --- diff --git a/test/MC/ARM/armv8.3a-js.s b/test/MC/ARM/armv8.3a-js.s index fbbdd981864..9a2ab9cfe52 100644 --- a/test/MC/ARM/armv8.3a-js.s +++ b/test/MC/ARM/armv8.3a-js.s @@ -1,7 +1,7 @@ // RUN: llvm-mc -triple arm-none-none-eabi -show-encoding -mattr=+v8.3a,+fp-armv8 < %s 2>&1 | FileCheck %s --check-prefix=ARM // RUN: llvm-mc -triple thumb-none-none-eabi -show-encoding -mattr=+v8.3a,+fp-armv8 < %s 2>&1 | FileCheck %s --check-prefix=THUMB // RUN: not llvm-mc -triple arm-none-none-eabi -show-encoding -mattr=+v8.2a,+fp-armv8 < %s 2>&1 | FileCheck --check-prefix=REQ-V83 %s -// RUN: not llvm-mc -triple arm-none-none-eabi -show-encoding -mattr=+v8.3a,-fp-armv8 < %s 2>&1 | FileCheck --check-prefix=REQ-FP %s +// RUN: not llvm-mc -triple arm-none-none-eabi -show-encoding -mattr=+v8.3a,-fp-armv8d16fp < %s 2>&1 | FileCheck --check-prefix=REQ-FP %s vjcvt.s32.f64 s1, d2 // ARM: vjcvt.s32.f64 s1, d2 @ encoding: [0xc2,0x0b,0xf9,0xee] @@ -13,4 +13,4 @@ // ARM: vjcvt.s32.f64 s17, d18 @ encoding: [0xe2,0x8b,0xf9,0xee] // THUMB: vjcvt.s32.f64 s17, d18 @ encoding: [0xf9,0xee,0xe2,0x8b] // REQ-V83: error: instruction requires: armv8.3a -// REQ-FP: error: instruction requires: FPARMv8 +// REQ-FP: error: invalid instruction diff --git a/test/MC/ARM/d16.s b/test/MC/ARM/d16.s index 648992e9a7b..67b5095a132 100644 --- a/test/MC/ARM/d16.s +++ b/test/MC/ARM/d16.s @@ -1,5 +1,5 @@ -@ RUN: llvm-mc < %s -triple thumbv7-unknown-unknown -show-encoding -mattr=+vfp4,-d16 2>&1 | FileCheck %s --check-prefix=D32 -@ RUN: not llvm-mc < %s -triple thumbv7-unknown-unknown -show-encoding -mattr=+vfp4,+d16 2>&1 | FileCheck %s --check-prefix=D16 +@ RUN: llvm-mc < %s -triple thumbv7-unknown-unknown -show-encoding -mattr=+vfp4,+d32 2>&1 | FileCheck %s --check-prefix=D32 +@ RUN: not llvm-mc < %s -triple thumbv7-unknown-unknown -show-encoding -mattr=+vfp4,-d32 2>&1 | FileCheck %s --check-prefix=D16 @ D32-NOT: error: diff --git a/test/MC/ARM/invalid-neon-v8.s b/test/MC/ARM/invalid-neon-v8.s index cae1fb331cf..ff087399595 100644 --- a/test/MC/ARM/invalid-neon-v8.s +++ b/test/MC/ARM/invalid-neon-v8.s @@ -1,4 +1,4 @@ -@ RUN: not llvm-mc -triple armv8 -mattr=-fp-armv8 -show-encoding < %s 2>&1 | FileCheck %s +@ RUN: not llvm-mc -triple armv8 -mattr=-fp-armv8d16sp -show-encoding < %s 2>&1 | FileCheck %s vmaxnm.f32 s4, d5, q1 @ CHECK: error: invalid instruction diff --git a/test/MC/ARM/single-precision-fp.s b/test/MC/ARM/single-precision-fp.s index f658e712319..9de4b101837 100644 --- a/test/MC/ARM/single-precision-fp.s +++ b/test/MC/ARM/single-precision-fp.s @@ -1,4 +1,4 @@ -@ RUN: not llvm-mc < %s -triple thumbv8-unknown-unknown -show-encoding -mattr=+fp-only-sp,-neon 2> %t > %t2 +@ RUN: not llvm-mc < %s -triple thumbv8-unknown-unknown -show-encoding -mattr=-fp64,-neon 2> %t > %t2 @ RUN: FileCheck %s < %t --check-prefix=CHECK-ERRORS @ RUN: FileCheck %s < %t2 diff --git a/test/MC/ARM/vldm-vstm-diags.s b/test/MC/ARM/vldm-vstm-diags.s index 854d5c55f2a..acefa71ccaf 100644 --- a/test/MC/ARM/vldm-vstm-diags.s +++ b/test/MC/ARM/vldm-vstm-diags.s @@ -1,5 +1,5 @@ @ RUN: not llvm-mc -triple armv7-eabi -filetype asm -o /dev/null %s 2>&1 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-D32 -@ RUN: not llvm-mc -triple armv7-eabi -filetype asm -o /dev/null -mattr=+d16 %s 2>&1 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-D16 +@ RUN: not llvm-mc -triple armv7-eabi -filetype asm -o /dev/null -mattr=-d32 %s 2>&1 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-D16 // First operand must be a GPR vldm s0, {s1, s2} diff --git a/test/Transforms/Inline/ARM/inline-fp.ll b/test/Transforms/Inline/ARM/inline-fp.ll index be3dd2a93fd..fdc066c9ba6 100644 --- a/test/Transforms/Inline/ARM/inline-fp.ll +++ b/test/Transforms/Inline/ARM/inline-fp.ll @@ -1,6 +1,6 @@ ; RUN: opt -S -inline -mtriple=arm-eabi -pass-remarks=.* -pass-remarks-missed=.* < %s 2>&1 | FileCheck %s -check-prefix=NOFP ; RUN: opt -S -inline -mtriple=arm-eabi -mattr=+vfp2 -pass-remarks=.* -pass-remarks-missed=.* < %s 2>&1 | FileCheck %s -check-prefix=FULLFP -; RUN: opt -S -inline -mtriple=arm-eabi -mattr=+vfp2,+fp-only-sp -pass-remarks=.* -pass-remarks-missed=.* < %s 2>&1 | FileCheck %s -check-prefix=SINGLEFP +; RUN: opt -S -inline -mtriple=arm-eabi -mattr=+vfp2,-fp64 -pass-remarks=.* -pass-remarks-missed=.* < %s 2>&1 | FileCheck %s -check-prefix=SINGLEFP ; Make sure that soft float implementations are calculated as being more expensive ; to the inliner. diff --git a/test/Transforms/LoopUnroll/runtime-epilog-debuginfo.ll b/test/Transforms/LoopUnroll/runtime-epilog-debuginfo.ll index dc6adfb8e24..7bbfb7ef17a 100644 --- a/test/Transforms/LoopUnroll/runtime-epilog-debuginfo.ll +++ b/test/Transforms/LoopUnroll/runtime-epilog-debuginfo.ll @@ -66,7 +66,7 @@ lee1.exit: ; preds = %lee1.exit.loopexit, ; Function Attrs: nounwind readnone declare void @llvm.dbg.value(metadata, i64, metadata, metadata) #1 -attributes #0 = { nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="arm7tdmi" "target-features"="+neon,+strict-align,+vfp3,-crypto,-d16,-fp-armv8,-fp-only-sp,-fp16,-vfp4" "unsafe-fp-math"="false" "use-soft-float"="false" } +attributes #0 = { nounwind readnone "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="arm7tdmi" "target-features"="+neon,+strict-align,+vfp3,-crypto,-fp-armv8,-fp16,-vfp4" "unsafe-fp-math"="false" "use-soft-float"="false" } attributes #1 = { nounwind readnone } !llvm.dbg.cu = !{!0} -- 2.40.0