return false;
}
+ /// This function returns true if the memory access is aligned or if the
+ /// target allows this specific unaligned memory access. If the access is
+ /// allowed, the optional final parameter returns if the access is also fast
+ /// (as defined by the target).
+ bool allowsMemoryAccessForAlignment(
+ LLVMContext &Context, const DataLayout &DL, EVT VT,
+ unsigned AddrSpace = 0, unsigned Alignment = 1,
+ MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
+ bool *Fast = nullptr) const;
+
+ /// Return true if the memory access of this type is aligned or if the target
+ /// allows this specific unaligned access for the given MachineMemOperand.
+ /// If the access is allowed, the optional final parameter returns if the
+ /// access is also fast (as defined by the target).
+ bool allowsMemoryAccessForAlignment(LLVMContext &Context,
+ const DataLayout &DL, EVT VT,
+ const MachineMemOperand &MMO,
+ bool *Fast = nullptr) const;
+
/// Return true if the target supports a memory access of this type for the
/// given address space and alignment. If the access is allowed, the optional
/// final parameter returns if the access is also fast (as defined by the
/// target).
- bool
+ virtual bool
allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT,
unsigned AddrSpace = 0, unsigned Alignment = 1,
MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
if (LDST->getMemoryVT().getSizeInBits() < MemVT.getSizeInBits())
return false;
- // Ensure that this isn't going to produce an unsupported unaligned access.
+ // Ensure that this isn't going to produce an unsupported memory access.
if (ShAmt &&
!TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT,
LDST->getAddressSpace(), ShAmt / 8,
// expand it.
EVT MemVT = ST->getMemoryVT();
const DataLayout &DL = DAG.getDataLayout();
- if (!TLI.allowsMemoryAccess(*DAG.getContext(), DL, MemVT,
- *ST->getMemOperand())) {
+ if (!TLI.allowsMemoryAccessForAlignment(*DAG.getContext(), DL, MemVT,
+ *ST->getMemOperand())) {
LLVM_DEBUG(dbgs() << "Expanding unsupported unaligned store\n");
SDValue Result = TLI.expandUnalignedStore(ST, DAG);
ReplaceNode(SDValue(ST, 0), Result);
EVT MemVT = ST->getMemoryVT();
// If this is an unaligned store and the target doesn't support it,
// expand it.
- if (!TLI.allowsMemoryAccess(*DAG.getContext(), DL, MemVT,
- *ST->getMemOperand())) {
+ if (!TLI.allowsMemoryAccessForAlignment(*DAG.getContext(), DL, MemVT,
+ *ST->getMemOperand())) {
SDValue Result = TLI.expandUnalignedStore(ST, DAG);
ReplaceNode(SDValue(ST, 0), Result);
}
const DataLayout &DL = DAG.getDataLayout();
// If this is an unaligned load and the target doesn't support it,
// expand it.
- if (!TLI.allowsMemoryAccess(*DAG.getContext(), DL, MemVT,
- *LD->getMemOperand())) {
+ if (!TLI.allowsMemoryAccessForAlignment(*DAG.getContext(), DL, MemVT,
+ *LD->getMemOperand())) {
std::tie(RVal, RChain) = TLI.expandUnalignedLoad(LD, DAG);
}
break;
return DL.getABITypeAlignment(Ty);
}
-bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context,
- const DataLayout &DL, EVT VT,
- unsigned AddrSpace,
- unsigned Alignment,
- MachineMemOperand::Flags Flags,
- bool *Fast) const {
+bool TargetLoweringBase::allowsMemoryAccessForAlignment(
+ LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace,
+ unsigned Alignment, MachineMemOperand::Flags Flags, bool *Fast) const {
// Check if the specified alignment is sufficient based on the data layout.
// TODO: While using the data layout works in practice, a better solution
// would be to implement this check directly (make this a virtual function).
return allowsMisalignedMemoryAccesses(VT, AddrSpace, Alignment, Flags, Fast);
}
+bool TargetLoweringBase::allowsMemoryAccessForAlignment(
+ LLVMContext &Context, const DataLayout &DL, EVT VT,
+ const MachineMemOperand &MMO, bool *Fast) const {
+ return allowsMemoryAccessForAlignment(Context, DL, VT, MMO.getAddrSpace(),
+ MMO.getAlignment(), MMO.getFlags(),
+ Fast);
+}
+
+bool TargetLoweringBase::allowsMemoryAccess(
+ LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace,
+ unsigned Alignment, MachineMemOperand::Flags Flags, bool *Fast) const {
+ return allowsMemoryAccessForAlignment(Context, DL, VT, AddrSpace, Alignment,
+ Flags, Fast);
+}
+
bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context,
const DataLayout &DL, EVT VT,
const MachineMemOperand &MMO,
return false;
bool Fast = false;
- return allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), CastTy,
- MMO, &Fast) && Fast;
+ return allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
+ CastTy, MMO, &Fast) &&
+ Fast;
}
// SI+ has instructions for cttz / ctlz for 32-bit values. This is probably also
assert(Op.getValueType().getVectorElementType() == MVT::i32 &&
"Custom lowering for non-i32 vectors hasn't been implemented.");
- if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT,
- *Load->getMemOperand())) {
+ if (!allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
+ MemVT, *Load->getMemOperand())) {
SDValue Ops[2];
std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG);
return DAG.getMergeValues(Ops, DL);
assert(VT.isVector() &&
Store->getValue().getValueType().getScalarType() == MVT::i32);
- if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
- *Store->getMemOperand())) {
+ if (!allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
+ VT, *Store->getMemOperand())) {
return expandUnalignedStore(Store, DAG);
}
DoDefault = true;
if (!AlignLoads) {
- if (allowsMemoryAccess(Ctx, DL, LN->getMemoryVT(), *LN->getMemOperand()))
+ if (allowsMemoryAccessForAlignment(Ctx, DL, LN->getMemoryVT(),
+ *LN->getMemOperand()))
return Op;
DoDefault = true;
}
// The PartTy is the equivalent of "getLoadableTypeOfSize(HaveAlign)".
MVT PartTy = HaveAlign <= 8 ? MVT::getIntegerVT(8 * HaveAlign)
: MVT::getVectorVT(MVT::i8, HaveAlign);
- DoDefault = allowsMemoryAccess(Ctx, DL, PartTy, *LN->getMemOperand());
+ DoDefault =
+ allowsMemoryAccessForAlignment(Ctx, DL, PartTy, *LN->getMemOperand());
}
if (DoDefault) {
std::pair<SDValue, SDValue> P = expandUnalignedLoad(LN, DAG);
if (Op.getValueType() == MVT::v2f16) {
LoadSDNode *Load = cast<LoadSDNode>(Op);
EVT MemVT = Load->getMemoryVT();
- if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT,
- *Load->getMemOperand())) {
+ if (!allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
+ MemVT, *Load->getMemOperand())) {
SDValue Ops[2];
std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG);
return DAG.getMergeValues(Ops, SDLoc(Op));
// v2f16 is legal, so we can't rely on legalizer to handle unaligned
// stores and have to handle it here.
if (VT == MVT::v2f16 &&
- !allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
- *Store->getMemOperand()))
+ !allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
+ VT, *Store->getMemOperand()))
return expandUnalignedStore(Store, DAG);
if (VT.isVector())
"Unexpected extension type");
assert(LD->getMemoryVT() == MVT::i32 && "Unexpected load EVT");
- if (allowsMemoryAccess(Context, DAG.getDataLayout(), LD->getMemoryVT(),
- *LD->getMemOperand()))
+ if (allowsMemoryAccessForAlignment(Context, DAG.getDataLayout(),
+ LD->getMemoryVT(), *LD->getMemOperand()))
return SDValue();
SDValue Chain = LD->getChain();
assert(!ST->isTruncatingStore() && "Unexpected store type");
assert(ST->getMemoryVT() == MVT::i32 && "Unexpected store EVT");
- if (allowsMemoryAccess(Context, DAG.getDataLayout(), ST->getMemoryVT(),
- *ST->getMemOperand()))
+ if (allowsMemoryAccessForAlignment(Context, DAG.getDataLayout(),
+ ST->getMemoryVT(), *ST->getMemOperand()))
return SDValue();
SDValue Chain = ST->getChain();
// Replace unaligned store of unaligned load with memmove.
StoreSDNode *ST = cast<StoreSDNode>(N);
if (!DCI.isBeforeLegalize() ||
- allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(),
- ST->getMemoryVT(), *ST->getMemOperand()) ||
+ allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
+ ST->getMemoryVT(),
+ *ST->getMemOperand()) ||
ST->isVolatile() || ST->isIndexed()) {
break;
}