public:
/// \name Scalar TTI Implementations
/// @{
- bool allowsMisalignedMemoryAccesses(LLVMContext &Context,
- unsigned BitWidth, unsigned AddressSpace,
- unsigned Alignment, bool *Fast) const {
+ bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth,
+ unsigned AddressSpace, unsigned Alignment,
+ bool *Fast) const {
EVT E = EVT::getIntegerVT(Context, BitWidth);
- return getTLI()->allowsMisalignedMemoryAccesses(E, AddressSpace, Alignment, Fast);
+ return getTLI()->allowsMisalignedMemoryAccesses(
+ E, AddressSpace, Alignment, MachineMemOperand::MONone, Fast);
}
bool hasBranchDivergence() { return false; }
/// copy/move/set is converted to a sequence of store operations. Its use
/// helps to ensure that such replacements don't generate code that causes an
/// alignment error (trap) on the target machine.
- virtual bool allowsMisalignedMemoryAccesses(EVT,
- unsigned AddrSpace = 0,
- unsigned Align = 1,
- bool * /*Fast*/ = nullptr) const {
+ virtual bool allowsMisalignedMemoryAccesses(
+ EVT, unsigned AddrSpace = 0, unsigned Align = 1,
+ MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
+ bool * /*Fast*/ = nullptr) const {
return false;
}
/// given address space and alignment. If the access is allowed, the optional
/// final parameter returns if the access is also fast (as defined by the
/// target).
- bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT,
- unsigned AddrSpace = 0, unsigned Alignment = 1,
- bool *Fast = nullptr) const;
+ bool
+ allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT,
+ unsigned AddrSpace = 0, unsigned Alignment = 1,
+ MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
+ bool *Fast = nullptr) const;
/// Return true if the target supports a memory access of this type for the
/// given MachineMemOperand. If the access is allowed, the optional
// Ensure that this isn't going to produce an unsupported unaligned access.
if (ShAmt &&
!TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT,
- LDST->getAddressSpace(), ShAmt / 8))
+ LDST->getAddressSpace(), ShAmt / 8,
+ LDST->getMemOperand()->getFlags()))
return false;
// It's not possible to generate a constant of extended or untyped type.
// issuing a (or a pair of) unaligned and overlapping load / store.
bool Fast;
if (NumMemOps && AllowOverlap && NewVTSize < Size &&
- allowsMisalignedMemoryAccesses(VT, DstAS, DstAlign, &Fast) &&
+ allowsMisalignedMemoryAccesses(VT, DstAS, DstAlign,
+ MachineMemOperand::MONone, &Fast) &&
Fast)
VTSize = Size;
else {
const DataLayout &DL, EVT VT,
unsigned AddrSpace,
unsigned Alignment,
+ MachineMemOperand::Flags Flags,
bool *Fast) const {
// Check if the specified alignment is sufficient based on the data layout.
// TODO: While using the data layout works in practice, a better solution
}
// This is a misaligned access.
- return allowsMisalignedMemoryAccesses(VT, AddrSpace, Alignment, Fast);
+ return allowsMisalignedMemoryAccesses(VT, AddrSpace, Alignment, Flags, Fast);
}
bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context,
const MachineMemOperand &MMO,
bool *Fast) const {
return allowsMemoryAccess(Context, DL, VT, MMO.getAddrSpace(),
- MMO.getAlignment(), Fast);
+ MMO.getAlignment(), MMO.getFlags(), Fast);
}
BranchProbability TargetLoweringBase::getPredictableBranchThreshold() const {
return MVT::i64;
}
-bool AArch64TargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
- unsigned AddrSpace,
- unsigned Align,
- bool *Fast) const {
+bool AArch64TargetLowering::allowsMisalignedMemoryAccesses(
+ EVT VT, unsigned AddrSpace, unsigned Align, MachineMemOperand::Flags Flags,
+ bool *Fast) const {
if (Subtarget->requiresStrictAlign())
return false;
unsigned AS = StoreNode->getAddressSpace();
unsigned Align = StoreNode->getAlignment();
if (Align < MemVT.getStoreSize() &&
- !allowsMisalignedMemoryAccesses(MemVT, AS, Align, nullptr)) {
+ !allowsMisalignedMemoryAccesses(
+ MemVT, AS, Align, StoreNode->getMemOperand()->getFlags(), nullptr)) {
return scalarizeVectorStore(StoreNode, DAG);
}
if (memOpAlign(SrcAlign, DstAlign, AlignCheck))
return true;
bool Fast;
- return allowsMisalignedMemoryAccesses(VT, 0, 1, &Fast) && Fast;
+ return allowsMisalignedMemoryAccesses(VT, 0, 1, MachineMemOperand::MONone,
+ &Fast) &&
+ Fast;
};
if (CanUseNEON && IsMemset && !IsSmallMemset &&
/// Returns true if the target allows unaligned memory accesses of the
/// specified type.
- bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace = 0,
- unsigned Align = 1,
- bool *Fast = nullptr) const override;
+ bool allowsMisalignedMemoryAccesses(
+ EVT VT, unsigned AddrSpace = 0, unsigned Align = 1,
+ MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
+ bool *Fast = nullptr) const override;
/// Provide custom lowering hooks for some operations.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
// Expand unaligned loads earlier than legalization. Due to visitation order
// problems during legalization, the emitted instructions to pack and unpack
// the bytes again are not eliminated in the case of an unaligned copy.
- if (!allowsMisalignedMemoryAccesses(VT, AS, Align, &IsFast)) {
+ if (!allowsMisalignedMemoryAccesses(
+ VT, AS, Align, LN->getMemOperand()->getFlags(), &IsFast)) {
if (VT.isVector())
return scalarizeVectorLoad(LN, DAG);
// order problems during legalization, the emitted instructions to pack and
// unpack the bytes again are not eliminated in the case of an unaligned
// copy.
- if (!allowsMisalignedMemoryAccesses(VT, AS, Align, &IsFast)) {
+ if (!allowsMisalignedMemoryAccesses(
+ VT, AS, Align, SN->getMemOperand()->getFlags(), &IsFast)) {
if (VT.isVector())
return scalarizeVectorStore(SN, DAG);
unsigned Align = StoreNode->getAlignment();
if (Align < MemVT.getStoreSize() &&
- !allowsMisalignedMemoryAccesses(MemVT, AS, Align, nullptr)) {
+ !allowsMisalignedMemoryAccesses(
+ MemVT, AS, Align, StoreNode->getMemOperand()->getFlags(), nullptr)) {
return expandUnalignedStore(StoreNode, DAG);
}
return true;
}
-bool R600TargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
- unsigned AddrSpace,
- unsigned Align,
- bool *IsFast) const {
+bool R600TargetLowering::allowsMisalignedMemoryAccesses(
+ EVT VT, unsigned AddrSpace, unsigned Align, MachineMemOperand::Flags Flags,
+ bool *IsFast) const {
if (IsFast)
*IsFast = false;
bool canMergeStoresTo(unsigned AS, EVT MemVT,
const SelectionDAG &DAG) const override;
- bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS,
- unsigned Align,
- bool *IsFast) const override;
+ bool allowsMisalignedMemoryAccesses(
+ EVT VT, unsigned AS, unsigned Align,
+ MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
+ bool *IsFast = nullptr) const override;
private:
unsigned Gen;
return true;
}
-bool SITargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
- unsigned AddrSpace,
- unsigned Align,
- bool *IsFast) const {
+bool SITargetLowering::allowsMisalignedMemoryAccesses(
+ EVT VT, unsigned AddrSpace, unsigned Align, MachineMemOperand::Flags Flags,
+ bool *IsFast) const {
if (IsFast)
*IsFast = false;
bool canMergeStoresTo(unsigned AS, EVT MemVT,
const SelectionDAG &DAG) const override;
- bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS,
- unsigned Align,
- bool *IsFast) const override;
+ bool allowsMisalignedMemoryAccesses(
+ EVT VT, unsigned AS, unsigned Align,
+ MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
+ bool *IsFast = nullptr) const override;
EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign,
unsigned SrcAlign, bool IsMemset,
return (VT == MVT::f32) && (Opc == ISD::LOAD || Opc == ISD::STORE);
}
-bool ARMTargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
- unsigned,
+bool ARMTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, unsigned,
unsigned,
+ MachineMemOperand::Flags,
bool *Fast) const {
// Depends what it gets converted into if the type is weird.
if (!VT.isSimple())
bool Fast;
if (Size >= 16 &&
(memOpAlign(SrcAlign, DstAlign, 16) ||
- (allowsMisalignedMemoryAccesses(MVT::v2f64, 0, 1, &Fast) && Fast))) {
+ (allowsMisalignedMemoryAccesses(MVT::v2f64, 0, 1,
+ MachineMemOperand::MONone, &Fast) &&
+ Fast))) {
return MVT::v2f64;
} else if (Size >= 8 &&
(memOpAlign(SrcAlign, DstAlign, 8) ||
- (allowsMisalignedMemoryAccesses(MVT::f64, 0, 1, &Fast) &&
+ (allowsMisalignedMemoryAccesses(
+ MVT::f64, 0, 1, MachineMemOperand::MONone, &Fast) &&
Fast))) {
return MVT::f64;
}
/// is "fast" by reference in the second argument.
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace,
unsigned Align,
+ MachineMemOperand::Flags Flags,
bool *Fast) const override;
EVT getOptimalMemOpType(uint64_t Size,
return MVT::Other;
}
-bool HexagonTargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
- unsigned AS, unsigned Align, bool *Fast) const {
+bool HexagonTargetLowering::allowsMisalignedMemoryAccesses(
+ EVT VT, unsigned AS, unsigned Align, MachineMemOperand::Flags Flags,
+ bool *Fast) const {
if (Fast)
*Fast = false;
return Subtarget.isHVXVectorType(VT.getSimpleVT());
const AttributeList &FuncAttributes) const override;
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace,
- unsigned Align, bool *Fast) const override;
+ unsigned Align, MachineMemOperand::Flags Flags, bool *Fast) const override;
/// Returns relocation base for the given PIC jumptable.
SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG)
return new Mips16TargetLowering(TM, STI);
}
-bool
-Mips16TargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
- unsigned,
- unsigned,
- bool *Fast) const {
+bool Mips16TargetLowering::allowsMisalignedMemoryAccesses(
+ EVT VT, unsigned, unsigned, MachineMemOperand::Flags, bool *Fast) const {
return false;
}
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace,
unsigned Align,
+ MachineMemOperand::Flags Flags,
bool *Fast) const override;
MachineBasicBlock *
Op->getOperand(2));
}
-bool
-MipsSETargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
- unsigned,
- unsigned,
- bool *Fast) const {
+bool MipsSETargetLowering::allowsMisalignedMemoryAccesses(
+ EVT VT, unsigned, unsigned, MachineMemOperand::Flags, bool *Fast) const {
MVT::SimpleValueType SVT = VT.getSimpleVT().SimpleTy;
if (Subtarget.systemSupportsUnalignedAccess()) {
void addMSAFloatType(MVT::SimpleValueType Ty,
const TargetRegisterClass *RC);
- bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS = 0,
- unsigned Align = 1,
- bool *Fast = nullptr) const override;
+ bool allowsMisalignedMemoryAccesses(
+ EVT VT, unsigned AS = 0, unsigned Align = 1,
+ MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
+ bool *Fast = nullptr) const override;
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
bool PPCTargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
unsigned,
unsigned,
+ MachineMemOperand::Flags,
bool *Fast) const {
if (DisablePPCUnaligned)
return false;
/// Is unaligned memory access allowed for the given type, and is it fast
/// relative to software emulation.
- bool allowsMisalignedMemoryAccesses(EVT VT,
- unsigned AddrSpace,
- unsigned Align = 1,
- bool *Fast = nullptr) const override;
+ bool allowsMisalignedMemoryAccesses(
+ EVT VT, unsigned AddrSpace, unsigned Align = 1,
+ MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
+ bool *Fast = nullptr) const override;
/// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster
/// than a pair of fmul and fadd instructions. fmuladd intrinsics will be
return isUInt<32>(Imm) || isUInt<32>(-Imm);
}
-bool SystemZTargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
- unsigned,
- unsigned,
- bool *Fast) const {
+bool SystemZTargetLowering::allowsMisalignedMemoryAccesses(
+ EVT VT, unsigned, unsigned, MachineMemOperand::Flags, bool *Fast) const {
// Unaligned accesses should never be slower than the expanded version.
// We check specifically for aligned accesses in the few cases where
// they are required.
Instruction *I = nullptr) const override;
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS,
unsigned Align,
+ MachineMemOperand::Flags Flags,
bool *Fast) const override;
bool isTruncateFree(Type *, Type *) const override;
bool isTruncateFree(EVT, EVT) const override;
}
bool WebAssemblyTargetLowering::allowsMisalignedMemoryAccesses(
- EVT /*VT*/, unsigned /*AddrSpace*/, unsigned /*Align*/, bool *Fast) const {
+ EVT /*VT*/, unsigned /*AddrSpace*/, unsigned /*Align*/,
+ MachineMemOperand::Flags /*Flags*/, bool *Fast) const {
// WebAssembly supports unaligned accesses, though it should be declared
// with the p2align attribute on loads and stores which do so, and there
// may be a performance impact. We tell LLVM they're "fast" because
unsigned AS,
Instruction *I = nullptr) const override;
bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace, unsigned Align,
+ MachineMemOperand::Flags Flags,
bool *Fast) const override;
bool isIntDivCheap(EVT VT, AttributeList Attr) const override;
return true;
}
-bool
-X86TargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
- unsigned,
- unsigned,
- bool *Fast) const {
+bool X86TargetLowering::allowsMisalignedMemoryAccesses(EVT VT, unsigned,
+ unsigned,
+ MachineMemOperand::Flags,
+ bool *Fast) const {
if (Fast) {
switch (VT.getSizeInBits()) {
default:
/// Returns true if the target allows unaligned memory accesses of the
/// specified type. Returns whether it is "fast" in the last argument.
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS, unsigned Align,
- bool *Fast) const override;
+ MachineMemOperand::Flags Flags,
+ bool *Fast) const override;
/// Provide custom lowering hooks for some operations.
///