}
// Add the offset to the index.
- unsigned EltSize =
- Vec.getValueType().getVectorElementType().getSizeInBits()/8;
+ unsigned EltSize = Vec.getValueType().getScalarSizeInBits() / 8;
Idx = DAG.getNode(ISD::MUL, dl, Idx.getValueType(), Idx,
DAG.getConstant(EltSize, SDLoc(Vec), Idx.getValueType()));
// Then store the inserted part.
// Add the offset to the index.
- unsigned EltSize =
- Vec.getValueType().getVectorElementType().getSizeInBits()/8;
+ unsigned EltSize = Vec.getValueType().getScalarSizeInBits() / 8;
Idx = DAG.getNode(ISD::MUL, dl, Idx.getValueType(), Idx,
DAG.getConstant(EltSize, SDLoc(Vec), Idx.getValueType()));
// vector element type. Check that any extra bits introduced will be
// truncated away.
assert(N->getOperand(0).getValueSizeInBits() >=
- N->getValueType(0).getVectorElementType().getSizeInBits() &&
+ N->getValueType(0).getScalarSizeInBits() &&
"Type of inserted value narrower than vector element type!");
SmallVector<SDValue, 16> NewOps;
// Check that any extra bits introduced will be truncated away.
assert(N->getOperand(1).getValueSizeInBits() >=
- N->getValueType(0).getVectorElementType().getSizeInBits() &&
+ N->getValueType(0).getScalarSizeInBits() &&
"Type of inserted value narrower than vector element type!");
return SDValue(DAG.UpdateNodeOperands(N, N->getOperand(0),
GetPromotedInteger(N->getOperand(1)),
// the vector element type. For example BUILD_VECTOR of type <1 x i1> with
// a constant i8 operand.
assert(Result.getValueSizeInBits() >=
- Op.getValueType().getVectorElementType().getSizeInBits() &&
+ Op.getValueType().getScalarSizeInBits() &&
"Invalid type for scalarized vector");
AnalyzeNewValue(Result);
/// Convert to a vector of integers of the same size.
SDValue DAGTypeLegalizer::BitConvertVectorToIntegerVector(SDValue Op) {
assert(Op.getValueType().isVector() && "Only applies to vectors!");
- unsigned EltWidth = Op.getValueType().getVectorElementType().getSizeInBits();
+ unsigned EltWidth = Op.getValueType().getScalarSizeInBits();
EVT EltNVT = EVT::getIntegerVT(*DAG.getContext(), EltWidth);
unsigned NumElts = Op.getValueType().getVectorNumElements();
return DAG.getNode(ISD::BITCAST, SDLoc(Op),
// Now we need sign extend. Do this by shifting the elements. Even if these
// aren't legal operations, they have a better chance of being legalized
// without full scalarization than the sign extension does.
- unsigned EltWidth = VT.getVectorElementType().getSizeInBits();
- unsigned SrcEltWidth = SrcVT.getVectorElementType().getSizeInBits();
+ unsigned EltWidth = VT.getScalarSizeInBits();
+ unsigned SrcEltWidth = SrcVT.getScalarSizeInBits();
SDValue ShiftAmount = DAG.getConstant(EltWidth - SrcEltWidth, DL, VT);
return DAG.getNode(ISD::SRA, DL, VT,
DAG.getNode(ISD::SHL, DL, VT, Op, ShiftAmount),
LLVMContext &Ctx = *DAG.getContext();
EVT NewSrcVT = EVT::getVectorVT(
Ctx, EVT::getIntegerVT(
- Ctx, SrcVT.getVectorElementType().getSizeInBits() * 2),
+ Ctx, SrcVT.getScalarSizeInBits() * 2),
NumElements);
EVT SplitSrcVT =
EVT::getVectorVT(Ctx, SrcVT.getVectorElementType(), NumElements / 2);
// if we're trying to split it at all. assert() that's true, just in case.
assert(!(NumElements & 1) && "Splitting vector, but not in half!");
- unsigned InElementSize = InVT.getVectorElementType().getSizeInBits();
- unsigned OutElementSize = OutVT.getVectorElementType().getSizeInBits();
+ unsigned InElementSize = InVT.getScalarSizeInBits();
+ unsigned OutElementSize = OutVT.getScalarSizeInBits();
// If the input elements are only 1/2 the width of the result elements,
// just use the normal splitting. Our trick only work if there's room
// we care if the resultant vector is all ones, not whether the individual
// constants are.
SDValue NotZero = N->getOperand(i);
- unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits();
+ unsigned EltSize = N->getValueType(0).getScalarSizeInBits();
if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(NotZero)) {
if (CN->getAPIntValue().countTrailingOnes() < EltSize)
return false;
// We only want to check enough bits to cover the vector elements, because
// we care if the resultant vector is all zeros, not whether the individual
// constants are.
- unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits();
+ unsigned EltSize = N->getValueType(0).getScalarSizeInBits();
if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op)) {
if (CN->getAPIntValue().countTrailingZeros() < EltSize)
return false;
// false.
unsigned int nOps = getNumOperands();
assert(nOps > 0 && "isConstantSplat has 0-size build vector");
- unsigned EltBitSize = VT.getVectorElementType().getSizeInBits();
+ unsigned EltBitSize = VT.getScalarSizeInBits();
for (unsigned j = 0; j < nOps; ++j) {
unsigned i = isBigEndian ? nOps-1-j : j;
MVT SVT = (MVT::SimpleValueType) nVT;
// Promote vectors of integers to vectors with the same number
// of elements, with a wider element type.
- if (SVT.getVectorElementType().getSizeInBits() > EltVT.getSizeInBits() &&
+ if (SVT.getScalarSizeInBits() > EltVT.getSizeInBits() &&
SVT.getVectorNumElements() == NElts && isTypeLegal(SVT)) {
TransformToType[i] = SVT;
RegisterTypeForVT[i] = SVT;
EVT EVT::changeExtendedVectorElementTypeToInteger() const {
LLVMContext &Context = LLVMTy->getContext();
- EVT IntTy = getIntegerVT(Context, getVectorElementType().getSizeInBits());
+ EVT IntTy = getIntegerVT(Context, getScalarSizeInBits());
return getVectorVT(Context, IntTy, getVectorNumElements());
}
for (const SDValue &Elt : N->op_values()) {
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) {
- unsigned EltSize = VT.getVectorElementType().getSizeInBits();
+ unsigned EltSize = VT.getScalarSizeInBits();
unsigned HalfSize = EltSize / 2;
if (isSigned) {
if (!isIntN(HalfSize, C->getSExtValue()))
assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR");
EVT VT = N->getValueType(0);
SDLoc dl(N);
- unsigned EltSize = VT.getVectorElementType().getSizeInBits() / 2;
+ unsigned EltSize = VT.getScalarSizeInBits() / 2;
unsigned NumElts = VT.getVectorNumElements();
MVT TruncVT = MVT::getIntegerVT(EltSize);
SmallVector<SDValue, 8> Ops;
}
}
unsigned ResMultiplier =
- VT.getVectorElementType().getSizeInBits() / SmallestEltTy.getSizeInBits();
+ VT.getScalarSizeInBits() / SmallestEltTy.getSizeInBits();
NumElts = VT.getSizeInBits() / SmallestEltTy.getSizeInBits();
EVT ShuffleVT = EVT::getVectorVT(*DAG.getContext(), SmallestEltTy, NumElts);
// The stars all align, our next step is to produce the mask for the shuffle.
SmallVector<int, 8> Mask(ShuffleVT.getVectorNumElements(), -1);
- int BitsPerShuffleLane = ShuffleVT.getVectorElementType().getSizeInBits();
+ int BitsPerShuffleLane = ShuffleVT.getScalarSizeInBits();
for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) {
SDValue Entry = Op.getOperand(i);
if (Entry.isUndef())
// trunc. So only std::min(SrcBits, DestBits) actually get defined in this
// segment.
EVT OrigEltTy = Entry.getOperand(0).getValueType().getVectorElementType();
- int BitsDefined = std::min(OrigEltTy.getSizeInBits(),
- VT.getVectorElementType().getSizeInBits());
+ int BitsDefined =
+ std::min(OrigEltTy.getSizeInBits(), VT.getScalarSizeInBits());
int LanesDefined = BitsDefined / BitsPerShuffleLane;
// This source is expected to fill ResMultiplier lanes of the final shuffle,
assert((BlockSize == 16 || BlockSize == 32 || BlockSize == 64) &&
"Only possible block sizes for REV are: 16, 32, 64");
- unsigned EltSz = VT.getVectorElementType().getSizeInBits();
+ unsigned EltSz = VT.getScalarSizeInBits();
if (EltSz == 64)
return false;
// Is C1 == ~C2, taking into account how much one can shift elements of a
// particular size?
uint64_t C2 = C2node->getZExtValue();
- unsigned ElemSizeInBits = VT.getVectorElementType().getSizeInBits();
+ unsigned ElemSizeInBits = VT.getScalarSizeInBits();
if (C2 > ElemSizeInBits)
return SDValue();
unsigned ElemMask = (1 << ElemSizeInBits) - 1;
if (!isConstant && !usesOnlyOneValue) {
SDValue Vec = DAG.getUNDEF(VT);
SDValue Op0 = Op.getOperand(0);
- unsigned ElemSize = VT.getVectorElementType().getSizeInBits();
+ unsigned ElemSize = VT.getScalarSizeInBits();
unsigned i = 0;
// For 32 and 64 bit types, use INSERT_SUBREG for lane zero to
// a) Avoid a RMW dependency on the full vector register, and
// If this is extracting the upper 64-bits of a 128-bit vector, we match
// that directly.
- if (Size == 64 && Val * VT.getVectorElementType().getSizeInBits() == 64)
+ if (Size == 64 && Val * VT.getScalarSizeInBits() == 64)
return Op;
return SDValue();
/// 0 <= Value <= ElementBits for a long left shift.
static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) {
assert(VT.isVector() && "vector shift count is not a vector type");
- int64_t ElementBits = VT.getVectorElementType().getSizeInBits();
+ int64_t ElementBits = VT.getScalarSizeInBits();
if (!getVShiftImm(Op, ElementBits, Cnt))
return false;
return (Cnt >= 0 && (isLong ? Cnt - 1 : Cnt) < ElementBits);
/// 1 <= Value <= ElementBits for a right shift; or
static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, int64_t &Cnt) {
assert(VT.isVector() && "vector shift count is not a vector type");
- int64_t ElementBits = VT.getVectorElementType().getSizeInBits();
+ int64_t ElementBits = VT.getScalarSizeInBits();
if (!getVShiftImm(Op, ElementBits, Cnt))
return false;
return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits / 2 : ElementBits));
if (!Op.getOperand(1).getValueType().isVector())
return Op;
- unsigned EltSize = VT.getVectorElementType().getSizeInBits();
+ unsigned EltSize = VT.getScalarSizeInBits();
switch (Op.getOpcode()) {
default:
// We only have to look for constant vectors here since the general, variable
// case can be handled in TableGen.
- unsigned Bits = VT.getVectorElementType().getSizeInBits();
+ unsigned Bits = VT.getScalarSizeInBits();
uint64_t BitMask = Bits == 64 ? -1ULL : ((1ULL << Bits) - 1);
for (int i = 1; i >= 0; --i)
for (int j = 1; j >= 0; --j) {
// splat. The indexed instructions are going to be expecting a DUPLANE64, so
// canonicalise to that.
if (N0 == N1 && VT.getVectorNumElements() == 2) {
- assert(VT.getVectorElementType().getSizeInBits() == 64);
+ assert(VT.getScalarSizeInBits() == 64);
return DAG.getNode(AArch64ISD::DUPLANE64, dl, VT, WidenVector(N0, DAG),
DAG.getConstant(0, dl, MVT::i64));
}
if (SrcVT.getSizeInBits() != 64)
return SDValue();
- unsigned SrcEltSize = SrcVT.getVectorElementType().getSizeInBits();
+ unsigned SrcEltSize = SrcVT.getScalarSizeInBits();
unsigned ElementCount = SrcVT.getVectorNumElements();
SrcVT = MVT::getVectorVT(MVT::getIntegerVT(SrcEltSize * 2), ElementCount);
SDLoc DL(N);
unsigned Alignment = 0;
if (NumVecs != 3) {
Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
- unsigned NumBytes = NumVecs * VT.getVectorElementType().getSizeInBits()/8;
+ unsigned NumBytes = NumVecs * VT.getScalarSizeInBits() / 8;
if (Alignment > NumBytes)
Alignment = NumBytes;
if (Alignment < 8 && Alignment < NumBytes)
unsigned Alignment = 0;
if (NumVecs != 3) {
Alignment = cast<ConstantSDNode>(Align)->getZExtValue();
- unsigned NumBytes = NumVecs * VT.getVectorElementType().getSizeInBits()/8;
+ unsigned NumBytes = NumVecs * VT.getScalarSizeInBits() / 8;
if (Alignment > NumBytes)
Alignment = NumBytes;
if (Alignment < 8 && Alignment < NumBytes)
assert((BlockSize==16 || BlockSize==32 || BlockSize==64) &&
"Only possible block sizes for VREV are: 16, 32, 64");
- unsigned EltSz = VT.getVectorElementType().getSizeInBits();
+ unsigned EltSz = VT.getScalarSizeInBits();
if (EltSz == 64)
return false;
// want to check the low half and high half of the shuffle mask as if it were
// the other case
static bool isVTRNMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
- unsigned EltSz = VT.getVectorElementType().getSizeInBits();
+ unsigned EltSz = VT.getScalarSizeInBits();
if (EltSz == 64)
return false;
/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
/// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>.
static bool isVTRN_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){
- unsigned EltSz = VT.getVectorElementType().getSizeInBits();
+ unsigned EltSz = VT.getScalarSizeInBits();
if (EltSz == 64)
return false;
// Requires similar checks to that of isVTRNMask with
// respect the how results are returned.
static bool isVUZPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
- unsigned EltSz = VT.getVectorElementType().getSizeInBits();
+ unsigned EltSz = VT.getScalarSizeInBits();
if (EltSz == 64)
return false;
/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
/// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>,
static bool isVUZP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){
- unsigned EltSz = VT.getVectorElementType().getSizeInBits();
+ unsigned EltSz = VT.getScalarSizeInBits();
if (EltSz == 64)
return false;
// Requires similar checks to that of isVTRNMask with respect the how results
// are returned.
static bool isVZIPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
- unsigned EltSz = VT.getVectorElementType().getSizeInBits();
+ unsigned EltSz = VT.getScalarSizeInBits();
if (EltSz == 64)
return false;
/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
/// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>.
static bool isVZIP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){
- unsigned EltSz = VT.getVectorElementType().getSizeInBits();
+ unsigned EltSz = VT.getScalarSizeInBits();
if (EltSz == 64)
return false;
if (isOnlyLowElement && !ISD::isNormalLoad(Value.getNode()))
return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value);
- unsigned EltSize = VT.getVectorElementType().getSizeInBits();
+ unsigned EltSize = VT.getScalarSizeInBits();
// Use VDUP for non-constant splats. For f32 constant splats, reduce to
// i32 and try again.
SmallestEltTy = SrcEltTy;
}
unsigned ResMultiplier =
- VT.getVectorElementType().getSizeInBits() / SmallestEltTy.getSizeInBits();
+ VT.getScalarSizeInBits() / SmallestEltTy.getSizeInBits();
NumElts = VT.getSizeInBits() / SmallestEltTy.getSizeInBits();
EVT ShuffleVT = EVT::getVectorVT(*DAG.getContext(), SmallestEltTy, NumElts);
// The stars all align, our next step is to produce the mask for the shuffle.
SmallVector<int, 8> Mask(ShuffleVT.getVectorNumElements(), -1);
- int BitsPerShuffleLane = ShuffleVT.getVectorElementType().getSizeInBits();
+ int BitsPerShuffleLane = ShuffleVT.getScalarSizeInBits();
for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) {
SDValue Entry = Op.getOperand(i);
if (Entry.isUndef())
// segment.
EVT OrigEltTy = Entry.getOperand(0).getValueType().getVectorElementType();
int BitsDefined = std::min(OrigEltTy.getSizeInBits(),
- VT.getVectorElementType().getSizeInBits());
+ VT.getScalarSizeInBits());
int LanesDefined = BitsDefined / BitsPerShuffleLane;
// This source is expected to fill ResMultiplier lanes of the final shuffle,
bool ReverseVEXT, isV_UNDEF;
unsigned Imm, WhichResult;
- unsigned EltSize = VT.getVectorElementType().getSizeInBits();
+ unsigned EltSize = VT.getScalarSizeInBits();
return (EltSize >= 32 ||
ShuffleVectorSDNode::isSplatMask(&M[0], VT) ||
isVREVMask(M, VT, 64) ||
// of the same time so that they get CSEd properly.
ArrayRef<int> ShuffleMask = SVN->getMask();
- unsigned EltSize = VT.getVectorElementType().getSizeInBits();
+ unsigned EltSize = VT.getScalarSizeInBits();
if (EltSize <= 32) {
if (SVN->isSplat()) {
int Lane = SVN->getSplatIndex();
SDValue Vec = Op.getOperand(0);
if (Op.getValueType() == MVT::i32 &&
- Vec.getValueType().getVectorElementType().getSizeInBits() < 32) {
+ Vec.getValueType().getScalarSizeInBits() < 32) {
SDLoc dl(Op);
return DAG.getNode(ARMISD::VGETLANEu, dl, MVT::i32, Vec, Lane);
}
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
SDNode *Elt = N->getOperand(i).getNode();
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) {
- unsigned EltSize = VT.getVectorElementType().getSizeInBits();
+ unsigned EltSize = VT.getScalarSizeInBits();
unsigned HalfSize = EltSize / 2;
if (isSigned) {
if (!isIntN(HalfSize, C->getSExtValue()))
// Construct a new BUILD_VECTOR with elements truncated to half the size.
assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR");
EVT VT = N->getValueType(0);
- unsigned EltSize = VT.getVectorElementType().getSizeInBits() / 2;
+ unsigned EltSize = VT.getScalarSizeInBits() / 2;
unsigned NumElts = VT.getVectorNumElements();
MVT TruncVT = MVT::getIntegerVT(EltSize);
SmallVector<SDValue, 8> Ops;
return SDValue();
// Make sure the VMOV element size is not bigger than the VDUPLANE elements.
- unsigned EltSize = Op.getValueType().getVectorElementType().getSizeInBits();
+ unsigned EltSize = Op.getValueType().getScalarSizeInBits();
// The canonical VMOV for a zero vector uses a 32-bit element size.
unsigned Imm = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
unsigned EltBits;
if (ARM_AM::decodeNEONModImm(Imm, EltBits) == 0)
EltSize = 8;
EVT VT = N->getValueType(0);
- if (EltSize > VT.getVectorElementType().getSizeInBits())
+ if (EltSize > VT.getScalarSizeInBits())
return SDValue();
return DCI.DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op);
EVT StVT = St->getMemoryVT();
unsigned NumElems = VT.getVectorNumElements();
assert(StVT != VT && "Cannot truncate to the same type");
- unsigned FromEltSz = VT.getVectorElementType().getSizeInBits();
- unsigned ToEltSz = StVT.getVectorElementType().getSizeInBits();
+ unsigned FromEltSz = VT.getScalarSizeInBits();
+ unsigned ToEltSz = StVT.getScalarSizeInBits();
// From, To sizes and ElemCount must be pow of two
if (!isPowerOf2_32(NumElems * FromEltSz * ToEltSz)) return SDValue();
/// 0 <= Value <= ElementBits for a long left shift.
static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) {
assert(VT.isVector() && "vector shift count is not a vector type");
- int64_t ElementBits = VT.getVectorElementType().getSizeInBits();
+ int64_t ElementBits = VT.getScalarSizeInBits();
if (! getVShiftImm(Op, ElementBits, Cnt))
return false;
return (Cnt >= 0 && (isLong ? Cnt-1 : Cnt) < ElementBits);
static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic,
int64_t &Cnt) {
assert(VT.isVector() && "vector shift count is not a vector type");
- int64_t ElementBits = VT.getVectorElementType().getSizeInBits();
+ int64_t ElementBits = VT.getScalarSizeInBits();
if (! getVShiftImm(Op, ElementBits, Cnt))
return false;
if (!isIntrinsic)
SDNode *Mask = CurDAG->getMachineNode(Hexagon::C2_mask, dl, MVT::i64, Op0);
unsigned NE = OpVT.getVectorNumElements();
EVT ExVT = N->getValueType(0);
- unsigned ES = ExVT.getVectorElementType().getSizeInBits();
+ unsigned ES = ExVT.getScalarSizeInBits();
uint64_t MV = 0, Bit = 1;
for (unsigned i = 0; i < NE; ++i) {
MV |= Bit;
if (UseHVX) {
ArrayRef<int> Mask = SVN->getMask();
size_t MaskLen = Mask.size();
- int ElemSizeInBits = VT.getVectorElementType().getSizeInBits();
+ int ElemSizeInBits = VT.getScalarSizeInBits();
if ((Subtarget.useHVXSglOps() && (ElemSizeInBits * MaskLen) == 64 * 8) ||
(Subtarget.useHVXDblOps() && (ElemSizeInBits * MaskLen) == 128 * 8)) {
// Return 1 for odd and 2 of even
APInt SplatValue, SplatUndef;
unsigned SplatBitSize;
bool HasAnyUndefs;
- unsigned EltSize = Ty.getVectorElementType().getSizeInBits();
+ unsigned EltSize = Ty.getScalarSizeInBits();
BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N->getOperand(1));
if (!Subtarget.hasDSP())
static SDValue lowerMSABitClearImm(SDValue Op, SelectionDAG &DAG) {
SDLoc DL(Op);
EVT ResTy = Op->getValueType(0);
- APInt BitImm = APInt(ResTy.getVectorElementType().getSizeInBits(), 1)
+ APInt BitImm = APInt(ResTy.getScalarSizeInBits(), 1)
<< cast<ConstantSDNode>(Op->getOperand(2))->getAPIntValue();
SDValue BitMask = DAG.getConstant(~BitImm, DL, ResTy);
/// then the VPERM for the shuffle. All in all a very slow sequence.
TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(EVT VT)
const override {
- if (VT.getVectorElementType().getSizeInBits() % 8 == 0)
+ if (VT.getScalarSizeInBits() % 8 == 0)
return TypeWidenVector;
return TargetLoweringBase::getPreferredVectorAction(VT);
}
case ISD::INSERT_VECTOR_ELT: {
EVT VT = Node->getValueType(0);
- unsigned ElemBitSize = VT.getVectorElementType().getSizeInBits();
+ unsigned ElemBitSize = VT.getScalarSizeInBits();
if (ElemBitSize == 32) {
if (tryGather(Node, SystemZ::VGEF))
return;
if (VT.isVector()) {
Op = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Op);
Op = DAG.getNode(SystemZISD::POPCNT, DL, MVT::v16i8, Op);
- switch (VT.getVectorElementType().getSizeInBits()) {
+ switch (VT.getScalarSizeInBits()) {
case 8:
break;
case 16: {
}
// Otherwise bitcast to the equivalent integer form and insert via a GPR.
- MVT IntVT = MVT::getIntegerVT(VT.getVectorElementType().getSizeInBits());
+ MVT IntVT = MVT::getIntegerVT(VT.getScalarSizeInBits());
MVT IntVecVT = MVT::getVectorVT(IntVT, VT.getVectorNumElements());
SDValue Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntVecVT,
DAG.getNode(ISD::BITCAST, DL, IntVecVT, Op0),
SDValue PackedOp = Op.getOperand(0);
EVT OutVT = Op.getValueType();
EVT InVT = PackedOp.getValueType();
- unsigned ToBits = OutVT.getVectorElementType().getSizeInBits();
- unsigned FromBits = InVT.getVectorElementType().getSizeInBits();
+ unsigned ToBits = OutVT.getScalarSizeInBits();
+ unsigned FromBits = InVT.getScalarSizeInBits();
do {
FromBits *= 2;
EVT OutVT = MVT::getVectorVT(MVT::getIntegerVT(FromBits),
SDValue Op1 = Op.getOperand(1);
SDLoc DL(Op);
EVT VT = Op.getValueType();
- unsigned ElemBitSize = VT.getVectorElementType().getSizeInBits();
+ unsigned ElemBitSize = VT.getScalarSizeInBits();
// See whether the shift vector is a splat represented as BUILD_VECTOR.
if (auto *BVN = dyn_cast<BuildVectorSDNode>(Op1)) {
// Return true if VT is a vector whose elements are a whole number of bytes
// in width.
static bool canTreatAsByteVector(EVT VT) {
- return VT.isVector() && VT.getVectorElementType().getSizeInBits() % 8 == 0;
+ return VT.isVector() && VT.getScalarSizeInBits() % 8 == 0;
}
// Try to simplify an EXTRACT_VECTOR_ELT from a vector of type VecVT
//
// (c) there are no multiplication instructions for the widest integer
// type (v2i64).
- if (VT.getVectorElementType().getSizeInBits() % 8 == 0)
+ if (VT.getScalarSizeInBits() % 8 == 0)
return TypeWidenVector;
return TargetLoweringBase::getPreferredVectorAction(VT);
}
void DecodePALIGNRMask(MVT VT, unsigned Imm,
SmallVectorImpl<int> &ShuffleMask) {
unsigned NumElts = VT.getVectorNumElements();
- unsigned Offset = Imm * (VT.getVectorElementType().getSizeInBits() / 8);
+ unsigned Offset = Imm * (VT.getScalarSizeInBits() / 8);
unsigned NumLanes = VT.getSizeInBits() / 128;
unsigned NumLaneElts = NumElts / NumLanes;
cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
MVT VT = N->getSimpleValueType(0);
- unsigned ElSize = VT.getVectorElementType().getSizeInBits();
+ unsigned ElSize = VT.getScalarSizeInBits();
bool Result = (Index * ElSize) % vecWidth == 0;
return Result;
cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
MVT VT = N->getSimpleValueType(0);
- unsigned ElSize = VT.getVectorElementType().getSizeInBits();
+ unsigned ElSize = VT.getScalarSizeInBits();
bool Result = (Index * ElSize) % vecWidth == 0;
return Result;
if (!isa<ConstantSDNode>(Idx)) {
if (VecVT.is512BitVector() ||
(VecVT.is256BitVector() && Subtarget.hasInt256() &&
- VecVT.getVectorElementType().getSizeInBits() == 32)) {
+ VecVT.getScalarSizeInBits() == 32)) {
MVT MaskEltVT =
- MVT::getIntegerVT(VecVT.getVectorElementType().getSizeInBits());
+ MVT::getIntegerVT(VecVT.getScalarSizeInBits());
MVT MaskVT = MVT::getVectorVT(MaskEltVT, VecVT.getSizeInBits() /
MaskEltVT.getSizeInBits());
// In this case use SSE compare
bool UseAVX512Inst =
(OpVT.is512BitVector() ||
- OpVT.getVectorElementType().getSizeInBits() >= 32 ||
+ OpVT.getScalarSizeInBits() >= 32 ||
(Subtarget.hasBWI() && Subtarget.hasVLX()));
if (UseAVX512Inst)
SDValue SignExt = Curr;
if (CurrVT != InVT) {
unsigned SignExtShift =
- CurrVT.getVectorElementType().getSizeInBits() - InSVT.getSizeInBits();
+ CurrVT.getScalarSizeInBits() - InSVT.getSizeInBits();
SignExt = DAG.getNode(X86ISD::VSRAI, dl, CurrVT, Curr,
DAG.getConstant(SignExtShift, dl, MVT::i8));
}
if (auto *AmtSplat = AmtBV->getConstantSplatNode()) {
const APInt &ShiftAmt = AmtSplat->getAPIntValue();
unsigned MaxAmount =
- VT.getSimpleVT().getVectorElementType().getSizeInBits();
+ VT.getSimpleVT().getScalarSizeInBits();
// SSE2/AVX2 logical shifts always return a vector of 0s
// if the shift amount is bigger than or equal to
// Validate that the Mask operand is a vector sra node.
// FIXME: what to do for bytes, since there is a psignb/pblendvb, but
// there is no psrai.b
- unsigned EltBits = MaskVT.getVectorElementType().getSizeInBits();
+ unsigned EltBits = MaskVT.getScalarSizeInBits();
unsigned SraAmt = ~0;
if (Mask.getOpcode() == ISD::SRA) {
if (auto *AmtBV = dyn_cast<BuildVectorSDNode>(Mask.getOperand(1)))
SDLoc dl(Mld);
assert(LdVT != VT && "Cannot extend to the same type");
- unsigned ToSz = VT.getVectorElementType().getSizeInBits();
- unsigned FromSz = LdVT.getVectorElementType().getSizeInBits();
+ unsigned ToSz = VT.getScalarSizeInBits();
+ unsigned FromSz = LdVT.getScalarSizeInBits();
// From/To sizes and ElemCount must be pow of two.
assert (isPowerOf2_32(NumElems * FromSz * ToSz) &&
"Unexpected size for extending masked load");
SDLoc dl(Mst);
assert(StVT != VT && "Cannot truncate to the same type");
- unsigned FromSz = VT.getVectorElementType().getSizeInBits();
- unsigned ToSz = StVT.getVectorElementType().getSizeInBits();
+ unsigned FromSz = VT.getScalarSizeInBits();
+ unsigned ToSz = StVT.getScalarSizeInBits();
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
unsigned NumElems = VT.getVectorNumElements();
assert(StVT != VT && "Cannot truncate to the same type");
- unsigned FromSz = VT.getVectorElementType().getSizeInBits();
- unsigned ToSz = StVT.getVectorElementType().getSizeInBits();
+ unsigned FromSz = VT.getScalarSizeInBits();
+ unsigned ToSz = StVT.getScalarSizeInBits();
// The truncating store is legal in some cases. For example
// vpmovqb, vpmovqw, vpmovqd, vpmovdb, vpmovdw