From: Thomas Raoux Date: Thu, 26 Sep 2019 00:16:01 +0000 (+0000) Subject: [TargetLowering] Make allowsMemoryAccess methode virtual. X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=3a2a144e7e6cb53770929fb131995401301c3e35;p=llvm [TargetLowering] Make allowsMemoryAccess methode virtual. Rename old function to explicitly show that it cares only about alignment. The new allowsMemoryAccess call the function related to alignment by default and can be overridden by target to inform whether the memory access is legal or not. Differential Revision: https://reviews.llvm.org/D67121 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@372935 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/include/llvm/CodeGen/TargetLowering.h b/include/llvm/CodeGen/TargetLowering.h index 42d9341210d..8c94456e86a 100644 --- a/include/llvm/CodeGen/TargetLowering.h +++ b/include/llvm/CodeGen/TargetLowering.h @@ -1472,11 +1472,30 @@ public: return false; } + /// This function returns true if the memory access is aligned or if the + /// target allows this specific unaligned memory access. If the access is + /// allowed, the optional final parameter returns if the access is also fast + /// (as defined by the target). + bool allowsMemoryAccessForAlignment( + LLVMContext &Context, const DataLayout &DL, EVT VT, + unsigned AddrSpace = 0, unsigned Alignment = 1, + MachineMemOperand::Flags Flags = MachineMemOperand::MONone, + bool *Fast = nullptr) const; + + /// Return true if the memory access of this type is aligned or if the target + /// allows this specific unaligned access for the given MachineMemOperand. + /// If the access is allowed, the optional final parameter returns if the + /// access is also fast (as defined by the target). + bool allowsMemoryAccessForAlignment(LLVMContext &Context, + const DataLayout &DL, EVT VT, + const MachineMemOperand &MMO, + bool *Fast = nullptr) const; + /// Return true if the target supports a memory access of this type for the /// given address space and alignment. If the access is allowed, the optional /// final parameter returns if the access is also fast (as defined by the /// target). - bool + virtual bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace = 0, unsigned Alignment = 1, MachineMemOperand::Flags Flags = MachineMemOperand::MONone, diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 80afbbee1b1..36523f4d809 100644 --- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -4952,7 +4952,7 @@ bool DAGCombiner::isLegalNarrowLdSt(LSBaseSDNode *LDST, if (LDST->getMemoryVT().getSizeInBits() < MemVT.getSizeInBits()) return false; - // Ensure that this isn't going to produce an unsupported unaligned access. + // Ensure that this isn't going to produce an unsupported memory access. if (ShAmt && !TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT, LDST->getAddressSpace(), ShAmt / 8, diff --git a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp index 3d43295d7df..0efcaaa19cb 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp @@ -503,8 +503,8 @@ void SelectionDAGLegalize::LegalizeStoreOps(SDNode *Node) { // expand it. EVT MemVT = ST->getMemoryVT(); const DataLayout &DL = DAG.getDataLayout(); - if (!TLI.allowsMemoryAccess(*DAG.getContext(), DL, MemVT, - *ST->getMemOperand())) { + if (!TLI.allowsMemoryAccessForAlignment(*DAG.getContext(), DL, MemVT, + *ST->getMemOperand())) { LLVM_DEBUG(dbgs() << "Expanding unsupported unaligned store\n"); SDValue Result = TLI.expandUnalignedStore(ST, DAG); ReplaceNode(SDValue(ST, 0), Result); @@ -618,8 +618,8 @@ void SelectionDAGLegalize::LegalizeStoreOps(SDNode *Node) { EVT MemVT = ST->getMemoryVT(); // If this is an unaligned store and the target doesn't support it, // expand it. - if (!TLI.allowsMemoryAccess(*DAG.getContext(), DL, MemVT, - *ST->getMemOperand())) { + if (!TLI.allowsMemoryAccessForAlignment(*DAG.getContext(), DL, MemVT, + *ST->getMemOperand())) { SDValue Result = TLI.expandUnalignedStore(ST, DAG); ReplaceNode(SDValue(ST, 0), Result); } @@ -679,8 +679,8 @@ void SelectionDAGLegalize::LegalizeLoadOps(SDNode *Node) { const DataLayout &DL = DAG.getDataLayout(); // If this is an unaligned load and the target doesn't support it, // expand it. - if (!TLI.allowsMemoryAccess(*DAG.getContext(), DL, MemVT, - *LD->getMemOperand())) { + if (!TLI.allowsMemoryAccessForAlignment(*DAG.getContext(), DL, MemVT, + *LD->getMemOperand())) { std::tie(RVal, RChain) = TLI.expandUnalignedLoad(LD, DAG); } break; diff --git a/lib/CodeGen/TargetLoweringBase.cpp b/lib/CodeGen/TargetLoweringBase.cpp index 557df03784d..7ac5c94a154 100644 --- a/lib/CodeGen/TargetLoweringBase.cpp +++ b/lib/CodeGen/TargetLoweringBase.cpp @@ -1505,12 +1505,9 @@ unsigned TargetLoweringBase::getByValTypeAlignment(Type *Ty, return DL.getABITypeAlignment(Ty); } -bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context, - const DataLayout &DL, EVT VT, - unsigned AddrSpace, - unsigned Alignment, - MachineMemOperand::Flags Flags, - bool *Fast) const { +bool TargetLoweringBase::allowsMemoryAccessForAlignment( + LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace, + unsigned Alignment, MachineMemOperand::Flags Flags, bool *Fast) const { // Check if the specified alignment is sufficient based on the data layout. // TODO: While using the data layout works in practice, a better solution // would be to implement this check directly (make this a virtual function). @@ -1528,6 +1525,21 @@ bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context, return allowsMisalignedMemoryAccesses(VT, AddrSpace, Alignment, Flags, Fast); } +bool TargetLoweringBase::allowsMemoryAccessForAlignment( + LLVMContext &Context, const DataLayout &DL, EVT VT, + const MachineMemOperand &MMO, bool *Fast) const { + return allowsMemoryAccessForAlignment(Context, DL, VT, MMO.getAddrSpace(), + MMO.getAlignment(), MMO.getFlags(), + Fast); +} + +bool TargetLoweringBase::allowsMemoryAccess( + LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace, + unsigned Alignment, MachineMemOperand::Flags Flags, bool *Fast) const { + return allowsMemoryAccessForAlignment(Context, DL, VT, AddrSpace, Alignment, + Flags, Fast); +} + bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, const MachineMemOperand &MMO, diff --git a/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/lib/Target/AMDGPU/AMDGPUISelLowering.cpp index 203cc672b62..65fa2720b51 100644 --- a/lib/Target/AMDGPU/AMDGPUISelLowering.cpp +++ b/lib/Target/AMDGPU/AMDGPUISelLowering.cpp @@ -681,8 +681,9 @@ bool AMDGPUTargetLowering::isLoadBitCastBeneficial(EVT LoadTy, EVT CastTy, return false; bool Fast = false; - return allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), CastTy, - MMO, &Fast) && Fast; + return allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(), + CastTy, MMO, &Fast) && + Fast; } // SI+ has instructions for cttz / ctlz for 32-bit values. This is probably also diff --git a/lib/Target/AMDGPU/SIISelLowering.cpp b/lib/Target/AMDGPU/SIISelLowering.cpp index 690114297d6..385984a51f2 100644 --- a/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/lib/Target/AMDGPU/SIISelLowering.cpp @@ -7316,8 +7316,8 @@ SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { assert(Op.getValueType().getVectorElementType() == MVT::i32 && "Custom lowering for non-i32 vectors hasn't been implemented."); - if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT, - *Load->getMemOperand())) { + if (!allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(), + MemVT, *Load->getMemOperand())) { SDValue Ops[2]; std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG); return DAG.getMergeValues(Ops, DL); @@ -7818,8 +7818,8 @@ SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { assert(VT.isVector() && Store->getValue().getValueType().getScalarType() == MVT::i32); - if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT, - *Store->getMemOperand())) { + if (!allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(), + VT, *Store->getMemOperand())) { return expandUnalignedStore(Store, DAG); } diff --git a/lib/Target/Hexagon/HexagonISelLowering.cpp b/lib/Target/Hexagon/HexagonISelLowering.cpp index 66b6e11bb07..7cbec61c3ba 100644 --- a/lib/Target/Hexagon/HexagonISelLowering.cpp +++ b/lib/Target/Hexagon/HexagonISelLowering.cpp @@ -2673,7 +2673,8 @@ HexagonTargetLowering::LowerUnalignedLoad(SDValue Op, SelectionDAG &DAG) DoDefault = true; if (!AlignLoads) { - if (allowsMemoryAccess(Ctx, DL, LN->getMemoryVT(), *LN->getMemOperand())) + if (allowsMemoryAccessForAlignment(Ctx, DL, LN->getMemoryVT(), + *LN->getMemOperand())) return Op; DoDefault = true; } @@ -2681,7 +2682,8 @@ HexagonTargetLowering::LowerUnalignedLoad(SDValue Op, SelectionDAG &DAG) // The PartTy is the equivalent of "getLoadableTypeOfSize(HaveAlign)". MVT PartTy = HaveAlign <= 8 ? MVT::getIntegerVT(8 * HaveAlign) : MVT::getVectorVT(MVT::i8, HaveAlign); - DoDefault = allowsMemoryAccess(Ctx, DL, PartTy, *LN->getMemOperand()); + DoDefault = + allowsMemoryAccessForAlignment(Ctx, DL, PartTy, *LN->getMemOperand()); } if (DoDefault) { std::pair P = expandUnalignedLoad(LN, DAG); diff --git a/lib/Target/NVPTX/NVPTXISelLowering.cpp b/lib/Target/NVPTX/NVPTXISelLowering.cpp index 9b710f2b866..9acd0bea66f 100644 --- a/lib/Target/NVPTX/NVPTXISelLowering.cpp +++ b/lib/Target/NVPTX/NVPTXISelLowering.cpp @@ -2230,8 +2230,8 @@ SDValue NVPTXTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { if (Op.getValueType() == MVT::v2f16) { LoadSDNode *Load = cast(Op); EVT MemVT = Load->getMemoryVT(); - if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT, - *Load->getMemOperand())) { + if (!allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(), + MemVT, *Load->getMemOperand())) { SDValue Ops[2]; std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG); return DAG.getMergeValues(Ops, SDLoc(Op)); @@ -2273,8 +2273,8 @@ SDValue NVPTXTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { // v2f16 is legal, so we can't rely on legalizer to handle unaligned // stores and have to handle it here. if (VT == MVT::v2f16 && - !allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT, - *Store->getMemOperand())) + !allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(), + VT, *Store->getMemOperand())) return expandUnalignedStore(Store, DAG); if (VT.isVector()) diff --git a/lib/Target/XCore/XCoreISelLowering.cpp b/lib/Target/XCore/XCoreISelLowering.cpp index fc6fd40c82e..ea3dcfc9d7d 100644 --- a/lib/Target/XCore/XCoreISelLowering.cpp +++ b/lib/Target/XCore/XCoreISelLowering.cpp @@ -414,8 +414,8 @@ SDValue XCoreTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { "Unexpected extension type"); assert(LD->getMemoryVT() == MVT::i32 && "Unexpected load EVT"); - if (allowsMemoryAccess(Context, DAG.getDataLayout(), LD->getMemoryVT(), - *LD->getMemOperand())) + if (allowsMemoryAccessForAlignment(Context, DAG.getDataLayout(), + LD->getMemoryVT(), *LD->getMemOperand())) return SDValue(); SDValue Chain = LD->getChain(); @@ -488,8 +488,8 @@ SDValue XCoreTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { assert(!ST->isTruncatingStore() && "Unexpected store type"); assert(ST->getMemoryVT() == MVT::i32 && "Unexpected store EVT"); - if (allowsMemoryAccess(Context, DAG.getDataLayout(), ST->getMemoryVT(), - *ST->getMemOperand())) + if (allowsMemoryAccessForAlignment(Context, DAG.getDataLayout(), + ST->getMemoryVT(), *ST->getMemOperand())) return SDValue(); SDValue Chain = ST->getChain(); @@ -1780,8 +1780,9 @@ SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N, // Replace unaligned store of unaligned load with memmove. StoreSDNode *ST = cast(N); if (!DCI.isBeforeLegalize() || - allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), - ST->getMemoryVT(), *ST->getMemOperand()) || + allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(), + ST->getMemoryVT(), + *ST->getMemOperand()) || ST->isVolatile() || ST->isIndexed()) { break; }