bool isLegalMaskedLoad(Type *DataType) const;
/// Return true if the target supports nontemporal store.
- bool isLegalNTStore(Type *DataType, unsigned Alignment) const;
+ bool isLegalNTStore(Type *DataType, llvm::Align Alignment) const;
/// Return true if the target supports nontemporal load.
- bool isLegalNTLoad(Type *DataType, unsigned Alignment) const;
+ bool isLegalNTLoad(Type *DataType, llvm::Align Alignment) const;
/// Return true if the target supports masked scatter.
bool isLegalMaskedScatter(Type *DataType) const;
virtual bool shouldFavorBackedgeIndex(const Loop *L) const = 0;
virtual bool isLegalMaskedStore(Type *DataType) = 0;
virtual bool isLegalMaskedLoad(Type *DataType) = 0;
- virtual bool isLegalNTStore(Type *DataType, unsigned Alignment) = 0;
- virtual bool isLegalNTLoad(Type *DataType, unsigned Alignment) = 0;
+ virtual bool isLegalNTStore(Type *DataType, llvm::Align Alignment) = 0;
+ virtual bool isLegalNTLoad(Type *DataType, llvm::Align Alignment) = 0;
virtual bool isLegalMaskedScatter(Type *DataType) = 0;
virtual bool isLegalMaskedGather(Type *DataType) = 0;
virtual bool isLegalMaskedCompressStore(Type *DataType) = 0;
bool isLegalMaskedLoad(Type *DataType) override {
return Impl.isLegalMaskedLoad(DataType);
}
- bool isLegalNTStore(Type *DataType, unsigned Alignment) override {
+ bool isLegalNTStore(Type *DataType, llvm::Align Alignment) override {
return Impl.isLegalNTStore(DataType, Alignment);
}
- bool isLegalNTLoad(Type *DataType, unsigned Alignment) override {
+ bool isLegalNTLoad(Type *DataType, llvm::Align Alignment) override {
return Impl.isLegalNTLoad(DataType, Alignment);
}
bool isLegalMaskedScatter(Type *DataType) override {
bool isLegalMaskedLoad(Type *DataType) { return false; }
- bool isLegalNTStore(Type *DataType, unsigned Alignment) {
+ bool isLegalNTStore(Type *DataType, llvm::Align Alignment) {
// By default, assume nontemporal memory stores are available for stores
// that are aligned and have a size that is a power of 2.
unsigned DataSize = DL.getTypeStoreSize(DataType);
return Alignment >= DataSize && isPowerOf2_32(DataSize);
}
- bool isLegalNTLoad(Type *DataType, unsigned Alignment) {
+ bool isLegalNTLoad(Type *DataType, llvm::Align Alignment) {
// By default, assume nontemporal memory loads are available for loads that
// are aligned and have a size that is a power of 2.
unsigned DataSize = DL.getTypeStoreSize(DataType);
}
bool TargetTransformInfo::isLegalNTStore(Type *DataType,
- unsigned Alignment) const {
+ llvm::Align Alignment) const {
return TTIImpl->isLegalNTStore(DataType, Alignment);
}
bool TargetTransformInfo::isLegalNTLoad(Type *DataType,
- unsigned Alignment) const {
+ llvm::Align Alignment) const {
return TTIImpl->isLegalNTLoad(DataType, Alignment);
}
return isLegalMaskedLoad(DataType);
}
-bool X86TTIImpl::isLegalNTLoad(Type *DataType, unsigned Alignment) {
+bool X86TTIImpl::isLegalNTLoad(Type *DataType, llvm::Align Alignment) {
unsigned DataSize = DL.getTypeStoreSize(DataType);
// The only supported nontemporal loads are for aligned vectors of 16 or 32
// bytes. Note that 32-byte nontemporal vector loads are supported by AVX2
return false;
}
-bool X86TTIImpl::isLegalNTStore(Type *DataType, unsigned Alignment) {
+bool X86TTIImpl::isLegalNTStore(Type *DataType, llvm::Align Alignment) {
unsigned DataSize = DL.getTypeStoreSize(DataType);
// SSE4A supports nontemporal stores of float and double at arbitrary
bool canMacroFuseCmp();
bool isLegalMaskedLoad(Type *DataType);
bool isLegalMaskedStore(Type *DataType);
- bool isLegalNTLoad(Type *DataType, unsigned Alignment);
- bool isLegalNTStore(Type *DataType, unsigned Alignment);
+ bool isLegalNTLoad(Type *DataType, llvm::Align Alignment);
+ bool isLegalNTStore(Type *DataType, llvm::Align Alignment);
bool isLegalMaskedGather(Type *DataType);
bool isLegalMaskedScatter(Type *DataType);
bool isLegalMaskedExpandLoad(Type *DataType);
Type *VecTy = VectorType::get(T, /*NumElements=*/2);
assert(VecTy && "did not find vectorized version of stored type");
unsigned Alignment = getLoadStoreAlignment(ST);
- if (!TTI->isLegalNTStore(VecTy, Alignment)) {
+ assert(Alignment && "Alignment should be set");
+ if (!TTI->isLegalNTStore(VecTy, llvm::Align(Alignment))) {
reportVectorizationFailure(
"nontemporal store instruction cannot be vectorized",
"nontemporal store instruction cannot be vectorized",
Type *VecTy = VectorType::get(I.getType(), /*NumElements=*/2);
assert(VecTy && "did not find vectorized version of load type");
unsigned Alignment = getLoadStoreAlignment(LD);
- if (!TTI->isLegalNTLoad(VecTy, Alignment)) {
+ assert(Alignment && "Alignment should be set");
+ if (!TTI->isLegalNTLoad(VecTy, llvm::Align(Alignment))) {
reportVectorizationFailure(
"nontemporal load instruction cannot be vectorized",
"nontemporal load instruction cannot be vectorized",