return true;
}
+static bool canWidenShuffleElements(ArrayRef<int> Mask,
+ const APInt &Zeroable,
+ SmallVectorImpl<int> &WidenedMask) {
+ SmallVector<int, 32> TargetMask(Mask.begin(), Mask.end());
+ for (int i = 0, Size = TargetMask.size(); i < Size; ++i) {
+ if (TargetMask[i] == SM_SentinelUndef)
+ continue;
+ if (Zeroable[i])
+ TargetMask[i] = SM_SentinelZero;
+ }
+ return canWidenShuffleElements(TargetMask, WidenedMask);
+}
+
static bool canWidenShuffleElements(ArrayRef<int> Mask) {
SmallVector<int, 32> WidenedMask;
return canWidenShuffleElements(Mask, WidenedMask);
return SDValue();
SmallVector<int, 4> WidenedMask;
- if (!canWidenShuffleElements(Mask, WidenedMask))
+ if (!canWidenShuffleElements(Mask, Zeroable, WidenedMask))
return SDValue();
bool IsLowZero = (Zeroable & 0x3) == 0x3;
// [6] - ignore
// [7] - zero high half of destination
- assert(WidenedMask[0] >= 0 && WidenedMask[1] >= 0 && "Undef half?");
+ assert((WidenedMask[0] >= 0 || IsLowZero) &&
+ (WidenedMask[1] >= 0 || IsHighZero) && "Undef half?");
unsigned PermMask = 0;
PermMask |= IsLowZero ? 0x08 : (WidenedMask[0] << 0);
// function lowerV2X128VectorShuffle() is better solution.
assert(VT.is512BitVector() && "Unexpected vector size for 512bit shuffle.");
+ // TODO - use Zeroable like we do for lowerV2X128VectorShuffle?
SmallVector<int, 4> WidenedMask;
if (!canWidenShuffleElements(Mask, WidenedMask))
return SDValue();
define <4 x double> @castB(<2 x double> %m) nounwind uwtable readnone ssp {
; AVX-LABEL: castB:
; AVX: ## %bb.0:
-; AVX-NEXT: ## kill: def $xmm0 killed $xmm0 def $ymm0
-; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX-NEXT: vmovaps %xmm0, %xmm0
; AVX-NEXT: retq
%shuffle.i = shufflevector <2 x double> %m, <2 x double> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 2>
ret <4 x double> %shuffle.i
define <4 x i64> @castC(<2 x i64> %m) nounwind uwtable readnone ssp {
; AVX-LABEL: castC:
; AVX: ## %bb.0:
-; AVX-NEXT: ## kill: def $xmm0 killed $xmm0 def $ymm0
-; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX-NEXT: vmovaps %xmm0, %xmm0
; AVX-NEXT: retq
%shuffle.i = shufflevector <2 x i64> %m, <2 x i64> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 2>
ret <4 x i64> %shuffle.i