OpPropsBW);
// For non-rotates (X != Y) we must add shift-by-zero handling costs.
if (X != Y) {
- Type *CondTy = Type::getInt1Ty(RetTy->getContext());
- if (RetVF > 1)
- CondTy = VectorType::get(CondTy, RetVF);
+ Type *CondTy = RetTy->getWithNewBitWidth(1);
Cost += ConcreteTTI->getCmpSelInstrCost(BinaryOperator::ICmp, RetTy,
CondTy, nullptr);
Cost += ConcreteTTI->getCmpSelInstrCost(BinaryOperator::Select, RetTy,
unsigned getIntrinsicInstrCost(
Intrinsic::ID IID, Type *RetTy, ArrayRef<Type *> Tys, FastMathFlags FMF,
unsigned ScalarizationCostPassed = std::numeric_limits<unsigned>::max()) {
- unsigned RetVF = (RetTy->isVectorTy() ? RetTy->getVectorNumElements() : 1);
auto *ConcreteTTI = static_cast<T *>(this);
SmallVector<unsigned, 2> ISDs;
/*IsUnsigned=*/false);
case Intrinsic::sadd_sat:
case Intrinsic::ssub_sat: {
- Type *CondTy = Type::getInt1Ty(RetTy->getContext());
- if (RetVF > 1)
- CondTy = VectorType::get(CondTy, RetVF);
+ Type *CondTy = RetTy->getWithNewBitWidth(1);
Type *OpTy = StructType::create({RetTy, CondTy});
Intrinsic::ID OverflowOp = IID == Intrinsic::sadd_sat
}
case Intrinsic::uadd_sat:
case Intrinsic::usub_sat: {
- Type *CondTy = Type::getInt1Ty(RetTy->getContext());
- if (RetVF > 1)
- CondTy = VectorType::get(CondTy, RetVF);
+ Type *CondTy = RetTy->getWithNewBitWidth(1);
Type *OpTy = StructType::create({RetTy, CondTy});
Intrinsic::ID OverflowOp = IID == Intrinsic::uadd_sat
case Intrinsic::smul_fix:
case Intrinsic::umul_fix: {
unsigned ExtSize = RetTy->getScalarSizeInBits() * 2;
- Type *ExtTy = Type::getIntNTy(RetTy->getContext(), ExtSize);
- if (RetVF > 1)
- ExtTy = VectorType::get(ExtTy, RetVF);
+ Type *ExtTy = RetTy->getWithNewBitWidth(ExtSize);
unsigned ExtOp =
IID == Intrinsic::smul_fix ? Instruction::SExt : Instruction::ZExt;
Type *MulTy = RetTy->getContainedType(0);
Type *OverflowTy = RetTy->getContainedType(1);
unsigned ExtSize = MulTy->getScalarSizeInBits() * 2;
- Type *ExtTy = Type::getIntNTy(RetTy->getContext(), ExtSize);
- if (MulTy->isVectorTy())
- ExtTy = VectorType::get(ExtTy, MulTy->getVectorNumElements() );
+ Type *ExtTy = MulTy->getWithNewBitWidth(ExtSize);
unsigned ExtOp =
IID == Intrinsic::smul_fix ? Instruction::SExt : Instruction::ZExt;
return cast<VectorType>(this)->isScalable();
}
+ElementCount Type::getVectorElementCount() const {
+ return cast<VectorType>(this)->getElementCount();
+}
+
/// Class to represent pointers.
class PointerType : public Type {
explicit PointerType(Type *ElType, unsigned AddrSpace);
return cast<IntegerType>(this)->getExtendedType();
}
+Type *Type::getWithNewBitWidth(unsigned NewBitWidth) const {
+ assert(
+ isIntOrIntVectorTy() &&
+ "Original type expected to be a vector of integers or a scalar integer.");
+ Type *NewType = getIntNTy(getContext(), NewBitWidth);
+ if (isVectorTy())
+ NewType = VectorType::get(NewType, getVectorElementCount());
+ return NewType;
+}
+
unsigned Type::getPointerAddressSpace() const {
return cast<PointerType>(getScalarType())->getAddressSpace();
}
inline bool getVectorIsScalable() const;
inline unsigned getVectorNumElements() const;
+ inline ElementCount getVectorElementCount() const;
Type *getVectorElementType() const {
assert(getTypeID() == VectorTyID);
return ContainedTys[0];
return ContainedTys[0];
}
+ /// Given an integer or vector type, change the lane bitwidth to NewBitwidth,
+ /// whilst keeping the old number of lanes.
+ inline Type *getWithNewBitWidth(unsigned NewBitWidth) const;
+
/// Given scalar/vector integer type, returns a type with elements twice as
/// wide as in the original type. For vectors, preserves element count.
inline Type *getExtendedType() const;