(VMOVDQU32Z256mr addr:$dst, (v32i8 (EXTRACT_SUBREG VR512:$src,sub_ymm)))>;
}
+let Predicates = [HasVLX] in {
+// A masked extract from the first 128-bits of a 256-bit vector can be
+// implemented with masked move.
+def : Pat<(v2i64 (vselect VK2WM:$mask,
+ (extract_subvector (v4i64 VR256X:$src), (iPTR 0)),
+ VR128X:$src0)),
+ (v2i64 (VMOVDQA64Z128rrk VR128X:$src0, VK2WM:$mask,
+ (EXTRACT_SUBREG (v4i64 VR256X:$src), sub_xmm)))>;
+def : Pat<(v4i32 (vselect VK4WM:$mask,
+ (extract_subvector (v8i32 VR256X:$src), (iPTR 0)),
+ VR128X:$src0)),
+ (v4i32 (VMOVDQA32Z128rrk VR128X:$src0, VK4WM:$mask,
+ (EXTRACT_SUBREG (v8i32 VR256X:$src), sub_xmm)))>;
+def : Pat<(v2f64 (vselect VK2WM:$mask,
+ (extract_subvector (v4f64 VR256X:$src), (iPTR 0)),
+ VR128X:$src0)),
+ (v2f64 (VMOVAPDZ128rrk VR128X:$src0, VK2WM:$mask,
+ (EXTRACT_SUBREG (v4f64 VR256X:$src), sub_xmm)))>;
+def : Pat<(v4f32 (vselect VK4WM:$mask,
+ (extract_subvector (v8f32 VR256X:$src), (iPTR 0)),
+ VR128X:$src0)),
+ (v4f32 (VMOVAPSZ128rrk VR128X:$src0, VK4WM:$mask,
+ (EXTRACT_SUBREG (v8f32 VR256X:$src), sub_xmm)))>;
+
+def : Pat<(v2i64 (vselect VK2WM:$mask,
+ (extract_subvector (v4i64 VR256X:$src), (iPTR 0)),
+ (bitconvert (v4i32 immAllZerosV)))),
+ (v2i64 (VMOVDQA64Z128rrkz VK2WM:$mask,
+ (EXTRACT_SUBREG (v4i64 VR256X:$src), sub_xmm)))>;
+def : Pat<(v4i32 (vselect VK4WM:$mask,
+ (extract_subvector (v8i32 VR256X:$src), (iPTR 0)),
+ (bitconvert (v4i32 immAllZerosV)))),
+ (v4i32 (VMOVDQA32Z128rrkz VK4WM:$mask,
+ (EXTRACT_SUBREG (v8i32 VR256X:$src), sub_xmm)))>;
+def : Pat<(v2f64 (vselect VK2WM:$mask,
+ (extract_subvector (v4f64 VR256X:$src), (iPTR 0)),
+ (bitconvert (v4i32 immAllZerosV)))),
+ (v2f64 (VMOVAPDZ128rrkz VK2WM:$mask,
+ (EXTRACT_SUBREG (v4f64 VR256X:$src), sub_xmm)))>;
+def : Pat<(v4f32 (vselect VK4WM:$mask,
+ (extract_subvector (v8f32 VR256X:$src), (iPTR 0)),
+ (bitconvert (v4i32 immAllZerosV)))),
+ (v4f32 (VMOVAPSZ128rrkz VK4WM:$mask,
+ (EXTRACT_SUBREG (v8f32 VR256X:$src), sub_xmm)))>;
+
+// A masked extract from the first 128-bits of a 512-bit vector can be
+// implemented with masked move.
+def : Pat<(v2i64 (vselect VK2WM:$mask,
+ (extract_subvector (v8i64 VR512:$src), (iPTR 0)),
+ VR128X:$src0)),
+ (v2i64 (VMOVDQA64Z128rrk VR128X:$src0, VK2WM:$mask,
+ (EXTRACT_SUBREG (v8i64 VR512:$src), sub_xmm)))>;
+def : Pat<(v4i32 (vselect VK4WM:$mask,
+ (extract_subvector (v16i32 VR512:$src), (iPTR 0)),
+ VR128X:$src0)),
+ (v4i32 (VMOVDQA32Z128rrk VR128X:$src0, VK4WM:$mask,
+ (EXTRACT_SUBREG (v16i32 VR512:$src), sub_xmm)))>;
+def : Pat<(v2f64 (vselect VK2WM:$mask,
+ (extract_subvector (v8f64 VR512:$src), (iPTR 0)),
+ VR128X:$src0)),
+ (v2f64 (VMOVAPDZ128rrk VR128X:$src0, VK2WM:$mask,
+ (EXTRACT_SUBREG (v8f64 VR512:$src), sub_xmm)))>;
+def : Pat<(v4f32 (vselect VK4WM:$mask,
+ (extract_subvector (v16f32 VR512:$src), (iPTR 0)),
+ VR128X:$src0)),
+ (v4f32 (VMOVAPSZ128rrk VR128X:$src0, VK4WM:$mask,
+ (EXTRACT_SUBREG (v16f32 VR512:$src), sub_xmm)))>;
+
+def : Pat<(v2i64 (vselect VK2WM:$mask,
+ (extract_subvector (v8i64 VR512:$src), (iPTR 0)),
+ (bitconvert (v4i32 immAllZerosV)))),
+ (v2i64 (VMOVDQA64Z128rrkz VK2WM:$mask,
+ (EXTRACT_SUBREG (v8i64 VR512:$src), sub_xmm)))>;
+def : Pat<(v4i32 (vselect VK4WM:$mask,
+ (extract_subvector (v16i32 VR512:$src), (iPTR 0)),
+ (bitconvert (v4i32 immAllZerosV)))),
+ (v4i32 (VMOVDQA32Z128rrkz VK4WM:$mask,
+ (EXTRACT_SUBREG (v16i32 VR512:$src), sub_xmm)))>;
+def : Pat<(v2f64 (vselect VK2WM:$mask,
+ (extract_subvector (v8f64 VR512:$src), (iPTR 0)),
+ (bitconvert (v4i32 immAllZerosV)))),
+ (v2f64 (VMOVAPDZ128rrkz VK2WM:$mask,
+ (EXTRACT_SUBREG (v8f64 VR512:$src), sub_xmm)))>;
+def : Pat<(v4f32 (vselect VK4WM:$mask,
+ (extract_subvector (v16f32 VR512:$src), (iPTR 0)),
+ (bitconvert (v4i32 immAllZerosV)))),
+ (v4f32 (VMOVAPSZ128rrkz VK4WM:$mask,
+ (EXTRACT_SUBREG (v16f32 VR512:$src), sub_xmm)))>;
+
+// A masked extract from the first 256-bits of a 512-bit vector can be
+// implemented with masked move.
+def : Pat<(v4i64 (vselect VK4WM:$mask,
+ (extract_subvector (v8i64 VR512:$src), (iPTR 0)),
+ VR256X:$src0)),
+ (v4i64 (VMOVDQA64Z256rrk VR256X:$src0, VK4WM:$mask,
+ (EXTRACT_SUBREG (v8i64 VR512:$src), sub_ymm)))>;
+def : Pat<(v8i32 (vselect VK8WM:$mask,
+ (extract_subvector (v16i32 VR512:$src), (iPTR 0)),
+ VR256X:$src0)),
+ (v8i32 (VMOVDQA32Z256rrk VR256X:$src0, VK8WM:$mask,
+ (EXTRACT_SUBREG (v16i32 VR512:$src), sub_ymm)))>;
+def : Pat<(v4f64 (vselect VK4WM:$mask,
+ (extract_subvector (v8f64 VR512:$src), (iPTR 0)),
+ VR256X:$src0)),
+ (v4f64 (VMOVAPDZ256rrk VR256X:$src0, VK4WM:$mask,
+ (EXTRACT_SUBREG (v8f64 VR512:$src), sub_ymm)))>;
+def : Pat<(v8f32 (vselect VK8WM:$mask,
+ (extract_subvector (v16f32 VR512:$src), (iPTR 0)),
+ VR256X:$src0)),
+ (v8f32 (VMOVAPSZ256rrk VR256X:$src0, VK8WM:$mask,
+ (EXTRACT_SUBREG (v16f32 VR512:$src), sub_ymm)))>;
+
+def : Pat<(v4i64 (vselect VK4WM:$mask,
+ (extract_subvector (v8i64 VR512:$src), (iPTR 0)),
+ (bitconvert (v8i32 immAllZerosV)))),
+ (v4i64 (VMOVDQA64Z256rrkz VK4WM:$mask,
+ (EXTRACT_SUBREG (v8i64 VR512:$src), sub_ymm)))>;
+def : Pat<(v8i32 (vselect VK8WM:$mask,
+ (extract_subvector (v16i32 VR512:$src), (iPTR 0)),
+ (bitconvert (v8i32 immAllZerosV)))),
+ (v8i32 (VMOVDQA32Z256rrkz VK8WM:$mask,
+ (EXTRACT_SUBREG (v16i32 VR512:$src), sub_ymm)))>;
+def : Pat<(v4f64 (vselect VK4WM:$mask,
+ (extract_subvector (v8f64 VR512:$src), (iPTR 0)),
+ (bitconvert (v8i32 immAllZerosV)))),
+ (v4f64 (VMOVAPDZ256rrkz VK4WM:$mask,
+ (EXTRACT_SUBREG (v8f64 VR512:$src), sub_ymm)))>;
+def : Pat<(v8f32 (vselect VK8WM:$mask,
+ (extract_subvector (v16f32 VR512:$src), (iPTR 0)),
+ (bitconvert (v8i32 immAllZerosV)))),
+ (v8f32 (VMOVAPSZ256rrkz VK8WM:$mask,
+ (EXTRACT_SUBREG (v16f32 VR512:$src), sub_ymm)))>;
+}
// Move Int Doubleword to Packed Double Int
//
; CHECK-LABEL: mask_extract_v8i32_v4i32_0:
; CHECK: # BB#0:
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vextracti32x4 $0, %ymm0, %xmm1 {%k1}
-; CHECK-NEXT: vmovdqa %xmm1, %xmm0
+; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuffle = shufflevector <8 x i32> %a, <8 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
; CHECK-LABEL: mask_extract_v8i32_v4i32_0_z:
; CHECK: # BB#0:
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vextracti32x4 $0, %ymm0, %xmm0 {%k1} {z}
+; CHECK-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuffle = shufflevector <8 x i32> %a, <8 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
; CHECK-LABEL: mask_extract_v8f32_v4f32_0:
; CHECK: # BB#0:
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vextractf32x4 $0, %ymm0, %xmm1 {%k1}
-; CHECK-NEXT: vmovaps %xmm1, %xmm0
+; CHECK-NEXT: vblendmps %xmm0, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuffle = shufflevector <8 x float> %a, <8 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
; CHECK-LABEL: mask_extract_v8f32_v4f32_0_z:
; CHECK: # BB#0:
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vextractf32x4 $0, %ymm0, %xmm0 {%k1} {z}
+; CHECK-NEXT: vmovaps %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuffle = shufflevector <8 x float> %a, <8 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
; CHECK-LABEL: mask_extract_v4i64_v2i64_0:
; CHECK: # BB#0:
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vextracti64x2 $0, %ymm0, %xmm1 {%k1}
-; CHECK-NEXT: vmovdqa %xmm1, %xmm0
+; CHECK-NEXT: vpblendmq %xmm0, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuffle = shufflevector <4 x i64> %a, <4 x i64> undef, <2 x i32> <i32 0, i32 1>
; CHECK-LABEL: mask_extract_v4i64_v2i64_0_z:
; CHECK: # BB#0:
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vextracti64x2 $0, %ymm0, %xmm0 {%k1} {z}
+; CHECK-NEXT: vmovdqa64 %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuffle = shufflevector <4 x i64> %a, <4 x i64> undef, <2 x i32> <i32 0, i32 1>
; CHECK-LABEL: mask_extract_v4f64_v2f64_0:
; CHECK: # BB#0:
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vextractf64x2 $0, %ymm0, %xmm1 {%k1}
-; CHECK-NEXT: vmovapd %xmm1, %xmm0
+; CHECK-NEXT: vblendmpd %xmm0, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuffle = shufflevector <4 x double> %a, <4 x double> undef, <2 x i32> <i32 0, i32 1>
; CHECK-LABEL: mask_extract_v4f64_v2f64_0_z:
; CHECK: # BB#0:
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vextractf64x2 $0, %ymm0, %xmm0 {%k1} {z}
+; CHECK-NEXT: vmovapd %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuffle = shufflevector <4 x double> %a, <4 x double> undef, <2 x i32> <i32 0, i32 1>
; CHECK-LABEL: mask_extract_v16i32_v4i32_0:
; CHECK: # BB#0:
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vextracti32x4 $0, %zmm0, %xmm1 {%k1}
-; CHECK-NEXT: vmovdqa %xmm1, %xmm0
+; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuffle = shufflevector <16 x i32> %a, <16 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
; CHECK-LABEL: mask_extract_v16i32_v4i32_0_z:
; CHECK: # BB#0:
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vextracti32x4 $0, %zmm0, %xmm0 {%k1} {z}
+; CHECK-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuffle = shufflevector <16 x i32> %a, <16 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
; CHECK-LABEL: mask_extract_v16f32_v4f32_0:
; CHECK: # BB#0:
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vextractf32x4 $0, %zmm0, %xmm1 {%k1}
-; CHECK-NEXT: vmovaps %xmm1, %xmm0
+; CHECK-NEXT: vblendmps %xmm0, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuffle = shufflevector <16 x float> %a, <16 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
; CHECK-LABEL: mask_extract_v16f32_v4f32_0_z:
; CHECK: # BB#0:
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vextractf32x4 $0, %zmm0, %xmm0 {%k1} {z}
+; CHECK-NEXT: vmovaps %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuffle = shufflevector <16 x float> %a, <16 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
; CHECK-LABEL: mask_extract_v16i32_v8i32_0:
; CHECK: # BB#0:
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vextracti32x8 $0, %zmm0, %ymm1 {%k1}
-; CHECK-NEXT: vmovdqa %ymm1, %ymm0
+; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1}
; CHECK-NEXT: retq
%shuffle = shufflevector <16 x i32> %a, <16 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
%mask.cast = bitcast i8 %mask to <8 x i1>
; CHECK-LABEL: mask_extract_v16i32_v8i32_0_z:
; CHECK: # BB#0:
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vextracti32x8 $0, %zmm0, %ymm0 {%k1} {z}
+; CHECK-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z}
; CHECK-NEXT: retq
%shuffle = shufflevector <16 x i32> %a, <16 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
%mask.cast = bitcast i8 %mask to <8 x i1>
; CHECK-LABEL: mask_extract_v16f32_v8f32_0:
; CHECK: # BB#0:
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vextractf32x8 $0, %zmm0, %ymm1 {%k1}
-; CHECK-NEXT: vmovaps %ymm1, %ymm0
+; CHECK-NEXT: vblendmps %ymm0, %ymm1, %ymm0 {%k1}
; CHECK-NEXT: retq
%shuffle = shufflevector <16 x float> %a, <16 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
%mask.cast = bitcast i8 %mask to <8 x i1>
; CHECK-LABEL: mask_extract_v16f32_v8f32_0_z:
; CHECK: # BB#0:
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vextractf32x8 $0, %zmm0, %ymm0 {%k1} {z}
+; CHECK-NEXT: vmovaps %ymm0, %ymm0 {%k1} {z}
; CHECK-NEXT: retq
%shuffle = shufflevector <16 x float> %a, <16 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
%mask.cast = bitcast i8 %mask to <8 x i1>
; CHECK-LABEL: mask_extract_v8i64_v2i64_0:
; CHECK: # BB#0:
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vextracti64x2 $0, %zmm0, %xmm1 {%k1}
-; CHECK-NEXT: vmovdqa %xmm1, %xmm0
+; CHECK-NEXT: vpblendmq %xmm0, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> undef, <2 x i32> <i32 0, i32 1>
; CHECK-LABEL: mask_extract_v8i64_v2i64_0_z:
; CHECK: # BB#0:
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vextracti64x2 $0, %zmm0, %xmm0 {%k1} {z}
+; CHECK-NEXT: vmovdqa64 %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> undef, <2 x i32> <i32 0, i32 1>
; CHECK-LABEL: mask_extract_v8f64_v2f64_0:
; CHECK: # BB#0:
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vextractf64x2 $0, %zmm0, %xmm1 {%k1}
-; CHECK-NEXT: vmovapd %xmm1, %xmm0
+; CHECK-NEXT: vblendmpd %xmm0, %xmm1, %xmm0 {%k1}
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> undef, <2 x i32> <i32 0, i32 1>
; CHECK-LABEL: mask_extract_v8f64_v2f64_0_z:
; CHECK: # BB#0:
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vextractf64x2 $0, %zmm0, %xmm0 {%k1} {z}
+; CHECK-NEXT: vmovapd %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> undef, <2 x i32> <i32 0, i32 1>
; CHECK-LABEL: mask_extract_v8i64_v4i64_0:
; CHECK: # BB#0:
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vextracti64x4 $0, %zmm0, %ymm1 {%k1}
-; CHECK-NEXT: vmovdqa %ymm1, %ymm0
+; CHECK-NEXT: vpblendmq %ymm0, %ymm1, %ymm0 {%k1}
; CHECK-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
%mask.cast = bitcast i8 %mask to <8 x i1>
; CHECK-LABEL: mask_extract_v8i64_v4i64_0_z:
; CHECK: # BB#0:
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vextracti64x4 $0, %zmm0, %ymm0 {%k1} {z}
+; CHECK-NEXT: vmovdqa64 %ymm0, %ymm0 {%k1} {z}
; CHECK-NEXT: retq
%shuffle = shufflevector <8 x i64> %a, <8 x i64> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
%mask.cast = bitcast i8 %mask to <8 x i1>
; CHECK-LABEL: mask_extract_v8f64_v4f64_0:
; CHECK: # BB#0:
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vextractf64x4 $0, %zmm0, %ymm1 {%k1}
-; CHECK-NEXT: vmovapd %ymm1, %ymm0
+; CHECK-NEXT: vblendmpd %ymm0, %ymm1, %ymm0 {%k1}
; CHECK-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
%mask.cast = bitcast i8 %mask to <8 x i1>
; CHECK-LABEL: mask_extract_v8f64_v4f64_0_z:
; CHECK: # BB#0:
; CHECK-NEXT: kmovw %edi, %k1
-; CHECK-NEXT: vextractf64x4 $0, %zmm0, %ymm0 {%k1} {z}
+; CHECK-NEXT: vmovapd %ymm0, %ymm0 {%k1} {z}
; CHECK-NEXT: retq
%shuffle = shufflevector <8 x double> %a, <8 x double> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
%mask.cast = bitcast i8 %mask to <8 x i1>