return true;
}
-bool X86TargetLowering::isExtractSubvectorCheap(EVT ResVT,
+bool X86TargetLowering::isExtractSubvectorCheap(EVT ResVT,
unsigned Index) const {
if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
return false;
return NewLd;
}
-
+
//TODO: The code below fires only for for loading the low v2i32 / v2f32
//of a v4i32 / v4f32. It's probably worth generalizing.
if (NumElems == 4 && LastLoadedElt == 1 && (EltVT.getSizeInBits() == 32) &&
// Check for a build vector of consecutive loads.
if (SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG, false))
return LD;
-
+
EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElems/2);
// Build both the lower and upper subvector.
int Size = Mask.size();
int Scale = 16 / Size;
- auto isSequential = [](int Base, int StartIndex, int EndIndex, int MaskOffset,
- ArrayRef<int> Mask) {
- for (int i = StartIndex; i < EndIndex; i++) {
- if (Mask[i] < 0)
- continue;
- if (i + Base != Mask[i] - MaskOffset)
- return false;
- }
- return true;
- };
-
for (int Shift = 1; Shift < Size; Shift++) {
int ByteShift = Shift * Scale;
}
if (ZeroableRight) {
- bool ValidShiftRight1 = isSequential(Shift, 0, Size - Shift, 0, Mask);
- bool ValidShiftRight2 = isSequential(Shift, 0, Size - Shift, Size, Mask);
+ bool ValidShiftRight1 =
+ isSequentialOrUndefInRange(Mask, 0, Size - Shift, Shift);
+ bool ValidShiftRight2 =
+ isSequentialOrUndefInRange(Mask, 0, Size - Shift, Size + Shift);
if (ValidShiftRight1 || ValidShiftRight2) {
// Cast the inputs to v2i64 to match PSRLDQ.
}
if (ZeroableLeft) {
- bool ValidShiftLeft1 = isSequential(-Shift, Shift, Size, 0, Mask);
- bool ValidShiftLeft2 = isSequential(-Shift, Shift, Size, Size, Mask);
+ bool ValidShiftLeft1 =
+ isSequentialOrUndefInRange(Mask, Shift, Size - Shift, 0);
+ bool ValidShiftLeft2 =
+ isSequentialOrUndefInRange(Mask, Shift, Size - Shift, Size);
if (ValidShiftLeft1 || ValidShiftLeft2) {
// Cast the inputs to v2i64 to match PSLLDQ.
/// The mask is comming as MVT::i8 and it should be truncated
/// to MVT::i1 while lowering masking intrinsics.
/// The main difference between ScalarMaskingNode and VectorMaskingNode is using
-/// "X86select" instead of "vselect". We just can't create the "vselect" node for
+/// "X86select" instead of "vselect". We just can't create the "vselect" node for
/// a scalar instruction.
static SDValue getScalarMaskingNode(SDValue Op, SDValue Mask,
SDValue PreservedSrc,
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
SDValue Vals[4];
SDLoc dl(InputVector);
-
+
if (TLI.isOperationLegal(ISD::SRA, MVT::i64)) {
SDValue Cst = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, InputVector);
EVT VecIdxTy = DAG.getTargetLoweringInfo().getVectorIdxTy();
SDValue TopHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Cst,
DAG.getConstant(1, VecIdxTy));
- SDValue ShAmt = DAG.getConstant(32,
+ SDValue ShAmt = DAG.getConstant(32,
DAG.getTargetLoweringInfo().getShiftAmountTy(MVT::i64));
Vals[0] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BottomHalf);
Vals[1] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,