/// modes that operate across loop iterations.
bool shouldFavorBackedgeIndex(const Loop *L) const;
- /// Return true if the target supports masked load.
- bool isLegalMaskedStore(Type *DataType) const;
/// Return true if the target supports masked store.
- bool isLegalMaskedLoad(Type *DataType) const;
+ bool isLegalMaskedStore(Type *DataType, MaybeAlign Alignment) const;
+ /// Return true if the target supports masked load.
+ bool isLegalMaskedLoad(Type *DataType, MaybeAlign Alignment) const;
/// Return true if the target supports nontemporal store.
bool isLegalNTStore(Type *DataType, Align Alignment) const;
TargetLibraryInfo *LibInfo) = 0;
virtual bool shouldFavorPostInc() const = 0;
virtual bool shouldFavorBackedgeIndex(const Loop *L) const = 0;
- virtual bool isLegalMaskedStore(Type *DataType) = 0;
- virtual bool isLegalMaskedLoad(Type *DataType) = 0;
+ virtual bool isLegalMaskedStore(Type *DataType, MaybeAlign Alignment) = 0;
+ virtual bool isLegalMaskedLoad(Type *DataType, MaybeAlign Alignment) = 0;
virtual bool isLegalNTStore(Type *DataType, Align Alignment) = 0;
virtual bool isLegalNTLoad(Type *DataType, Align Alignment) = 0;
virtual bool isLegalMaskedScatter(Type *DataType) = 0;
bool shouldFavorBackedgeIndex(const Loop *L) const override {
return Impl.shouldFavorBackedgeIndex(L);
}
- bool isLegalMaskedStore(Type *DataType) override {
- return Impl.isLegalMaskedStore(DataType);
+ bool isLegalMaskedStore(Type *DataType, MaybeAlign Alignment) override {
+ return Impl.isLegalMaskedStore(DataType, Alignment);
}
- bool isLegalMaskedLoad(Type *DataType) override {
- return Impl.isLegalMaskedLoad(DataType);
+ bool isLegalMaskedLoad(Type *DataType, MaybeAlign Alignment) override {
+ return Impl.isLegalMaskedLoad(DataType, Alignment);
}
bool isLegalNTStore(Type *DataType, Align Alignment) override {
return Impl.isLegalNTStore(DataType, Alignment);
bool shouldFavorBackedgeIndex(const Loop *L) const { return false; }
- bool isLegalMaskedStore(Type *DataType) { return false; }
+ bool isLegalMaskedStore(Type *DataType, MaybeAlign Alignment) { return false; }
- bool isLegalMaskedLoad(Type *DataType) { return false; }
+ bool isLegalMaskedLoad(Type *DataType, MaybeAlign Alignment) { return false; }
bool isLegalNTStore(Type *DataType, Align Alignment) {
// By default, assume nontemporal memory stores are available for stores
return TTIImpl->shouldFavorBackedgeIndex(L);
}
-bool TargetTransformInfo::isLegalMaskedStore(Type *DataType) const {
- return TTIImpl->isLegalMaskedStore(DataType);
+bool TargetTransformInfo::isLegalMaskedStore(Type *DataType,
+ MaybeAlign Alignment) const {
+ return TTIImpl->isLegalMaskedStore(DataType, Alignment);
}
-bool TargetTransformInfo::isLegalMaskedLoad(Type *DataType) const {
- return TTIImpl->isLegalMaskedLoad(DataType);
+bool TargetTransformInfo::isLegalMaskedLoad(Type *DataType,
+ MaybeAlign Alignment) const {
+ return TTIImpl->isLegalMaskedLoad(DataType, Alignment);
}
bool TargetTransformInfo::isLegalNTStore(Type *DataType,
switch (II->getIntrinsicID()) {
default:
break;
- case Intrinsic::masked_load:
+ case Intrinsic::masked_load: {
// Scalarize unsupported vector masked load
- if (TTI->isLegalMaskedLoad(CI->getType()))
+ unsigned Alignment =
+ cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
+ if (TTI->isLegalMaskedLoad(CI->getType(), MaybeAlign(Alignment)))
return false;
scalarizeMaskedLoad(CI, ModifiedDT);
return true;
- case Intrinsic::masked_store:
- if (TTI->isLegalMaskedStore(CI->getArgOperand(0)->getType()))
+ }
+ case Intrinsic::masked_store: {
+ unsigned Alignment =
+ cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
+ if (TTI->isLegalMaskedStore(CI->getArgOperand(0)->getType(),
+ MaybeAlign(Alignment)))
return false;
scalarizeMaskedStore(CI, ModifiedDT);
return true;
+ }
case Intrinsic::masked_gather:
if (TTI->isLegalMaskedGather(CI->getType()))
return false;
return BaseT::getAddressComputationCost(Ty, SE, Ptr);
}
-bool ARMTTIImpl::isLegalMaskedLoad(Type *DataTy) {
+bool ARMTTIImpl::isLegalMaskedLoad(Type *DataTy, MaybeAlign Alignment) {
if (!EnableMaskedLoadStores || !ST->hasMVEIntegerOps())
return false;
return ST->getMaxInterleaveFactor();
}
- bool isLegalMaskedLoad(Type *DataTy);
- bool isLegalMaskedStore(Type *DataTy) { return isLegalMaskedLoad(DataTy); }
+ bool isLegalMaskedLoad(Type *DataTy, MaybeAlign Alignment);
+ bool isLegalMaskedStore(Type *DataTy, MaybeAlign Alignment) {
+ return isLegalMaskedLoad(DataTy, Alignment);
+ }
int getMemcpyCost(const Instruction *I);
unsigned NumElem = SrcVTy->getVectorNumElements();
VectorType *MaskTy =
VectorType::get(Type::getInt8Ty(SrcVTy->getContext()), NumElem);
- if ((IsLoad && !isLegalMaskedLoad(SrcVTy)) ||
- (IsStore && !isLegalMaskedStore(SrcVTy)) || !isPowerOf2_32(NumElem)) {
+ if ((IsLoad && !isLegalMaskedLoad(SrcVTy, MaybeAlign(Alignment))) ||
+ (IsStore && !isLegalMaskedStore(SrcVTy, MaybeAlign(Alignment))) ||
+ !isPowerOf2_32(NumElem)) {
// Scalarization
int MaskSplitCost = getScalarizationOverhead(MaskTy, false, true);
int ScalarCompareCost = getCmpSelInstrCost(
return ST->hasMacroFusion() || ST->hasBranchFusion();
}
-bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy) {
+bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy, MaybeAlign Alignment) {
if (!ST->hasAVX())
return false;
((IntWidth == 8 || IntWidth == 16) && ST->hasBWI());
}
-bool X86TTIImpl::isLegalMaskedStore(Type *DataType) {
- return isLegalMaskedLoad(DataType);
+bool X86TTIImpl::isLegalMaskedStore(Type *DataType, MaybeAlign Alignment) {
+ return isLegalMaskedLoad(DataType, Alignment);
}
bool X86TTIImpl::isLegalNTLoad(Type *DataType, Align Alignment) {
bool isLSRCostLess(TargetTransformInfo::LSRCost &C1,
TargetTransformInfo::LSRCost &C2);
bool canMacroFuseCmp();
- bool isLegalMaskedLoad(Type *DataType);
- bool isLegalMaskedStore(Type *DataType);
+ bool isLegalMaskedLoad(Type *DataType, MaybeAlign Alignment);
+ bool isLegalMaskedStore(Type *DataType, MaybeAlign Alignment);
bool isLegalNTLoad(Type *DataType, Align Alignment);
bool isLegalNTStore(Type *DataType, Align Alignment);
bool isLegalMaskedGather(Type *DataType);
/// Returns true if the target machine supports masked store operation
/// for the given \p DataType and kind of access to \p Ptr.
- bool isLegalMaskedStore(Type *DataType, Value *Ptr) {
- return Legal->isConsecutivePtr(Ptr) && TTI.isLegalMaskedStore(DataType);
+ bool isLegalMaskedStore(Type *DataType, Value *Ptr, unsigned Alignment) {
+ return Legal->isConsecutivePtr(Ptr) &&
+ TTI.isLegalMaskedStore(DataType, MaybeAlign(Alignment));
}
/// Returns true if the target machine supports masked load operation
/// for the given \p DataType and kind of access to \p Ptr.
- bool isLegalMaskedLoad(Type *DataType, Value *Ptr) {
- return Legal->isConsecutivePtr(Ptr) && TTI.isLegalMaskedLoad(DataType);
+ bool isLegalMaskedLoad(Type *DataType, Value *Ptr, unsigned Alignment) {
+ return Legal->isConsecutivePtr(Ptr) &&
+ TTI.isLegalMaskedLoad(DataType, MaybeAlign(Alignment));
}
/// Returns true if the target machine supports masked scatter operation
return false;
auto *Ptr = getLoadStorePointerOperand(I);
auto *Ty = getMemInstValueType(I);
+ unsigned Alignment = getLoadStoreAlignment(I);
// We have already decided how to vectorize this instruction, get that
// result.
if (VF > 1) {
return WideningDecision == CM_Scalarize;
}
return isa<LoadInst>(I) ?
- !(isLegalMaskedLoad(Ty, Ptr) || isLegalMaskedGather(Ty))
- : !(isLegalMaskedStore(Ty, Ptr) || isLegalMaskedScatter(Ty));
+ !(isLegalMaskedLoad(Ty, Ptr, Alignment) || isLegalMaskedGather(Ty))
+ : !(isLegalMaskedStore(Ty, Ptr, Alignment) || isLegalMaskedScatter(Ty));
}
case Instruction::UDiv:
case Instruction::SDiv:
"Masked interleave-groups for predicated accesses are not enabled.");
auto *Ty = getMemInstValueType(I);
- return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty)
- : TTI.isLegalMaskedStore(Ty);
+ unsigned Alignment = getLoadStoreAlignment(I);
+ return isa<LoadInst>(I) ? TTI.isLegalMaskedLoad(Ty, MaybeAlign(Alignment))
+ : TTI.isLegalMaskedStore(Ty, MaybeAlign(Alignment));
}
bool LoopVectorizationCostModel::memoryInstructionCanBeWidened(Instruction *I,