return true;
}
+ // Match a 128-bit integer vector against a VZEXT_MOVL (MOVQ) instruction.
+ if (!FloatDomain && VT.is128BitVector() &&
+ Mask.size() == 2 && Mask[0] == 0 && Mask[1] < 0) {
+ unsigned Shuffle = X86ISD::VZEXT_MOVL;
+ MVT ShuffleVT = MVT::v2i64;
+ if (Depth == 1 && Root->getOpcode() == Shuffle)
+ return false; // Nothing to do!
+ Res = DAG.getBitcast(ShuffleVT, Input);
+ DCI.AddToWorklist(Res.getNode());
+ Res = DAG.getNode(Shuffle, DL, ShuffleVT, Res, Res);
+ DCI.AddToWorklist(Res.getNode());
+ DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Res),
+ /*AddTo*/ true);
+ return true;
+ }
+
// Attempt to blend with zero.
if (VT.getVectorNumElements() <= 8 &&
((Subtarget.hasSSE41() && VT.is128BitVector()) ||
define <16 x i8> @combine_vpshufb_movq(<16 x i8> %a0) {
; SSE-LABEL: combine_vpshufb_movq:
; SSE: # BB#0:
-; SSE-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,7],zero,zero,zero,zero,zero,zero,zero,zero
+; SSE-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vpshufb_movq:
; AVX: # BB#0:
-; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,7],zero,zero,zero,zero,zero,zero,zero,zero
+; AVX-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
; AVX-NEXT: retq
%res0 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %a0, <16 x i8> <i8 0, i8 128, i8 1, i8 128, i8 2, i8 128, i8 3, i8 128, i8 4, i8 128, i8 5, i8 128, i8 6, i8 128, i8 7, i8 128>)
%res1 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %res0, <16 x i8> <i8 0, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 1, i8 3, i8 5, i8 7, i8 9, i8 11, i8 13, i8 15>)