From: Craig Topper Date: Fri, 25 Aug 2017 23:34:57 +0000 (+0000) Subject: [AVX512] Add additional test cases for masked extract subvector. X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=e0707b12d117649e7f9bfd73bad562e6a088c4fc;p=llvm [AVX512] Add additional test cases for masked extract subvector. This includes tests for extracting 128-bits from a 256-bit vector and zero masking. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@311820 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/test/CodeGen/X86/vector-shuffle-masked.ll b/test/CodeGen/X86/vector-shuffle-masked.ll index 91d68646049..abdc6992b09 100644 --- a/test/CodeGen/X86/vector-shuffle-masked.ll +++ b/test/CodeGen/X86/vector-shuffle-masked.ll @@ -237,6 +237,238 @@ define <8 x i32> @maskz_shuffle_v8i32_23456701(<8 x i32> %a, i8 %mask) { ret <8 x i32> %res } +define <4 x i32> @mask_extract_v8i32_v4i32_0(<8 x i32> %a, <4 x i32> %passthru, i8 %mask) { +; CHECK-LABEL: mask_extract_v8i32_v4i32_0: +; CHECK: # BB#0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vextracti32x4 $0, %ymm0, %xmm1 {%k1} +; CHECK-NEXT: vmovdqa %xmm1, %xmm0 +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuffle = shufflevector <8 x i32> %a, <8 x i32> undef, <4 x i32> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <4 x i32> + %res = select <4 x i1> %mask.extract, <4 x i32> %shuffle, <4 x i32> %passthru + ret <4 x i32> %res +} + +define <4 x i32> @mask_extract_v8i32_v4i32_0_z(<8 x i32> %a, i8 %mask) { +; CHECK-LABEL: mask_extract_v8i32_v4i32_0_z: +; CHECK: # BB#0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vextracti32x4 $0, %ymm0, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuffle = shufflevector <8 x i32> %a, <8 x i32> undef, <4 x i32> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <4 x i32> + %res = select <4 x i1> %mask.extract, <4 x i32> %shuffle, <4 x i32> zeroinitializer + ret <4 x i32> %res +} + +define <4 x i32> @mask_extract_v8i32_v4i32_1(<8 x i32> %a, <4 x i32> %passthru, i8 %mask) { +; CHECK-LABEL: mask_extract_v8i32_v4i32_1: +; CHECK: # BB#0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vextracti32x4 $1, %ymm0, %xmm1 {%k1} +; CHECK-NEXT: vmovdqa %xmm1, %xmm0 +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuffle = shufflevector <8 x i32> %a, <8 x i32> undef, <4 x i32> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <4 x i32> + %res = select <4 x i1> %mask.extract, <4 x i32> %shuffle, <4 x i32> %passthru + ret <4 x i32> %res +} + +define <4 x i32> @mask_extract_v8i32_v4i32_1_z(<8 x i32> %a, i8 %mask) { +; CHECK-LABEL: mask_extract_v8i32_v4i32_1_z: +; CHECK: # BB#0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vextracti32x4 $1, %ymm0, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuffle = shufflevector <8 x i32> %a, <8 x i32> undef, <4 x i32> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <4 x i32> + %res = select <4 x i1> %mask.extract, <4 x i32> %shuffle, <4 x i32> zeroinitializer + ret <4 x i32> %res +} + +define <4 x float> @mask_extract_v8f32_v4f32_0(<8 x float> %a, <4 x float> %passthru, i8 %mask) { +; CHECK-LABEL: mask_extract_v8f32_v4f32_0: +; CHECK: # BB#0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vextractf32x4 $0, %ymm0, %xmm1 {%k1} +; CHECK-NEXT: vmovaps %xmm1, %xmm0 +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuffle = shufflevector <8 x float> %a, <8 x float> undef, <4 x i32> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <4 x i32> + %res = select <4 x i1> %mask.extract, <4 x float> %shuffle, <4 x float> %passthru + ret <4 x float> %res +} + +define <4 x float> @mask_extract_v8f32_v4f32_0_z(<8 x float> %a, i8 %mask) { +; CHECK-LABEL: mask_extract_v8f32_v4f32_0_z: +; CHECK: # BB#0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vextractf32x4 $0, %ymm0, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuffle = shufflevector <8 x float> %a, <8 x float> undef, <4 x i32> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <4 x i32> + %res = select <4 x i1> %mask.extract, <4 x float> %shuffle, <4 x float> zeroinitializer + ret <4 x float> %res +} + +define <4 x float> @mask_extract_v8f32_v4f32_1(<8 x float> %a, <4 x float> %passthru, i8 %mask) { +; CHECK-LABEL: mask_extract_v8f32_v4f32_1: +; CHECK: # BB#0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vextractf32x4 $1, %ymm0, %xmm1 {%k1} +; CHECK-NEXT: vmovaps %xmm1, %xmm0 +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuffle = shufflevector <8 x float> %a, <8 x float> undef, <4 x i32> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <4 x i32> + %res = select <4 x i1> %mask.extract, <4 x float> %shuffle, <4 x float> %passthru + ret <4 x float> %res +} + +define <4 x float> @mask_extract_v8f32_v4f32_1_z(<8 x float> %a, i8 %mask) { +; CHECK-LABEL: mask_extract_v8f32_v4f32_1_z: +; CHECK: # BB#0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vextractf32x4 $1, %ymm0, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuffle = shufflevector <8 x float> %a, <8 x float> undef, <4 x i32> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <4 x i32> + %res = select <4 x i1> %mask.extract, <4 x float> %shuffle, <4 x float> zeroinitializer + ret <4 x float> %res +} + +define <2 x i64> @mask_extract_v4i64_v2i64_0(<4 x i64> %a, <2 x i64> %passthru, i8 %mask) { +; CHECK-LABEL: mask_extract_v4i64_v2i64_0: +; CHECK: # BB#0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vextracti64x2 $0, %ymm0, %xmm1 {%k1} +; CHECK-NEXT: vmovdqa %xmm1, %xmm0 +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuffle = shufflevector <4 x i64> %a, <4 x i64> undef, <2 x i32> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <2 x i32> + %res = select <2 x i1> %mask.extract, <2 x i64> %shuffle, <2 x i64> %passthru + ret <2 x i64> %res +} + +define <2 x i64> @mask_extract_v4i64_v2i64_0_z(<4 x i64> %a, i8 %mask) { +; CHECK-LABEL: mask_extract_v4i64_v2i64_0_z: +; CHECK: # BB#0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vextracti64x2 $0, %ymm0, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuffle = shufflevector <4 x i64> %a, <4 x i64> undef, <2 x i32> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <2 x i32> + %res = select <2 x i1> %mask.extract, <2 x i64> %shuffle, <2 x i64> zeroinitializer + ret <2 x i64> %res +} + +define <2 x i64> @mask_extract_v4i64_v2i64_1(<4 x i64> %a, <2 x i64> %passthru, i8 %mask) { +; CHECK-LABEL: mask_extract_v4i64_v2i64_1: +; CHECK: # BB#0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vextracti64x2 $1, %ymm0, %xmm1 {%k1} +; CHECK-NEXT: vmovdqa %xmm1, %xmm0 +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuffle = shufflevector <4 x i64> %a, <4 x i64> undef, <2 x i32> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <2 x i32> + %res = select <2 x i1> %mask.extract, <2 x i64> %shuffle, <2 x i64> %passthru + ret <2 x i64> %res +} + +define <2 x i64> @mask_extract_v4i64_v2i64_1_z(<4 x i64> %a, i8 %mask) { +; CHECK-LABEL: mask_extract_v4i64_v2i64_1_z: +; CHECK: # BB#0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vextracti64x2 $1, %ymm0, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuffle = shufflevector <4 x i64> %a, <4 x i64> undef, <2 x i32> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <2 x i32> + %res = select <2 x i1> %mask.extract, <2 x i64> %shuffle, <2 x i64> zeroinitializer + ret <2 x i64> %res +} + +define <2 x double> @mask_extract_v4f64_v2f64_0(<4 x double> %a, <2 x double> %passthru, i8 %mask) { +; CHECK-LABEL: mask_extract_v4f64_v2f64_0: +; CHECK: # BB#0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vextractf64x2 $0, %ymm0, %xmm1 {%k1} +; CHECK-NEXT: vmovapd %xmm1, %xmm0 +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuffle = shufflevector <4 x double> %a, <4 x double> undef, <2 x i32> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <2 x i32> + %res = select <2 x i1> %mask.extract, <2 x double> %shuffle, <2 x double> %passthru + ret <2 x double> %res +} + +define <2 x double> @mask_extract_v4f64_v2f64_0_z(<4 x double> %a, i8 %mask) { +; CHECK-LABEL: mask_extract_v4f64_v2f64_0_z: +; CHECK: # BB#0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vextractf64x2 $0, %ymm0, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuffle = shufflevector <4 x double> %a, <4 x double> undef, <2 x i32> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <2 x i32> + %res = select <2 x i1> %mask.extract, <2 x double> %shuffle, <2 x double> zeroinitializer + ret <2 x double> %res +} + +define <2 x double> @mask_extract_v4f64_v2f64_1(<4 x double> %a, <2 x double> %passthru, i8 %mask) { +; CHECK-LABEL: mask_extract_v4f64_v2f64_1: +; CHECK: # BB#0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vextractf64x2 $1, %ymm0, %xmm1 {%k1} +; CHECK-NEXT: vmovapd %xmm1, %xmm0 +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuffle = shufflevector <4 x double> %a, <4 x double> undef, <2 x i32> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <2 x i32> + %res = select <2 x i1> %mask.extract, <2 x double> %shuffle, <2 x double> %passthru + ret <2 x double> %res +} + +define <2 x double> @mask_extract_v4f64_v2f64_1_z(<4 x double> %a, i8 %mask) { +; CHECK-LABEL: mask_extract_v4f64_v2f64_1_z: +; CHECK: # BB#0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vextractf64x2 $1, %ymm0, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuffle = shufflevector <4 x double> %a, <4 x double> undef, <2 x i32> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <2 x i32> + %res = select <2 x i1> %mask.extract, <2 x double> %shuffle, <2 x double> zeroinitializer + ret <2 x double> %res +} + define <4 x i32> @mask_extract_v16i32_v4i32_0(<16 x i32> %a, <4 x i32> %passthru, i8 %mask) { ; CHECK-LABEL: mask_extract_v16i32_v4i32_0: ; CHECK: # BB#0: @@ -252,6 +484,20 @@ define <4 x i32> @mask_extract_v16i32_v4i32_0(<16 x i32> %a, <4 x i32> %passthru ret <4 x i32> %res } +define <4 x i32> @mask_extract_v16i32_v4i32_0_z(<16 x i32> %a, i8 %mask) { +; CHECK-LABEL: mask_extract_v16i32_v4i32_0_z: +; CHECK: # BB#0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vextracti32x4 $0, %zmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuffle = shufflevector <16 x i32> %a, <16 x i32> undef, <4 x i32> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <4 x i32> + %res = select <4 x i1> %mask.extract, <4 x i32> %shuffle, <4 x i32> zeroinitializer + ret <4 x i32> %res +} + define <4 x i32> @mask_extract_v16i32_v4i32_1(<16 x i32> %a, <4 x i32> %passthru, i8 %mask) { ; CHECK-LABEL: mask_extract_v16i32_v4i32_1: ; CHECK: # BB#0: @@ -267,6 +513,20 @@ define <4 x i32> @mask_extract_v16i32_v4i32_1(<16 x i32> %a, <4 x i32> %passthru ret <4 x i32> %res } +define <4 x i32> @mask_extract_v16i32_v4i32_1_z(<16 x i32> %a, i8 %mask) { +; CHECK-LABEL: mask_extract_v16i32_v4i32_1_z: +; CHECK: # BB#0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vextracti32x4 $1, %zmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuffle = shufflevector <16 x i32> %a, <16 x i32> undef, <4 x i32> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <4 x i32> + %res = select <4 x i1> %mask.extract, <4 x i32> %shuffle, <4 x i32> zeroinitializer + ret <4 x i32> %res +} + define <4 x i32> @mask_extract_v16i32_v4i32_2(<16 x i32> %a, <4 x i32> %passthru, i8 %mask) { ; CHECK-LABEL: mask_extract_v16i32_v4i32_2: ; CHECK: # BB#0: @@ -312,6 +572,20 @@ define <4 x float> @mask_extract_v16f32_v4f32_0(<16 x float> %a, <4 x float> %pa ret <4 x float> %res } +define <4 x float> @mask_extract_v16f32_v4f32_0_z(<16 x float> %a, i8 %mask) { +; CHECK-LABEL: mask_extract_v16f32_v4f32_0_z: +; CHECK: # BB#0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vextractf32x4 $0, %zmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuffle = shufflevector <16 x float> %a, <16 x float> undef, <4 x i32> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <4 x i32> + %res = select <4 x i1> %mask.extract, <4 x float> %shuffle, <4 x float> zeroinitializer + ret <4 x float> %res +} + define <4 x float> @mask_extract_v16f32_v4f32_1(<16 x float> %a, <4 x float> %passthru, i8 %mask) { ; CHECK-LABEL: mask_extract_v16f32_v4f32_1: ; CHECK: # BB#0: @@ -327,6 +601,20 @@ define <4 x float> @mask_extract_v16f32_v4f32_1(<16 x float> %a, <4 x float> %pa ret <4 x float> %res } +define <4 x float> @mask_extract_v16f32_v4f32_1_z(<16 x float> %a, i8 %mask) { +; CHECK-LABEL: mask_extract_v16f32_v4f32_1_z: +; CHECK: # BB#0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vextractf32x4 $1, %zmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuffle = shufflevector <16 x float> %a, <16 x float> undef, <4 x i32> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <4 x i32> + %res = select <4 x i1> %mask.extract, <4 x float> %shuffle, <4 x float> zeroinitializer + ret <4 x float> %res +} + define <4 x float> @mask_extract_v16f32_v4f32_2(<16 x float> %a, <4 x float> %passthru, i8 %mask) { ; CHECK-LABEL: mask_extract_v16f32_v4f32_2: ; CHECK: # BB#0: @@ -370,6 +658,18 @@ define <8 x i32> @mask_extract_v16i32_v8i32_0(<16 x i32> %a, <8 x i32> %passthru ret <8 x i32> %res } +define <8 x i32> @mask_extract_v16i32_v8i32_0_z(<16 x i32> %a, i8 %mask) { +; CHECK-LABEL: mask_extract_v16i32_v8i32_0_z: +; CHECK: # BB#0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vextracti32x8 $0, %zmm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %shuffle = shufflevector <16 x i32> %a, <16 x i32> undef, <8 x i32> + %mask.cast = bitcast i8 %mask to <8 x i1> + %res = select <8 x i1> %mask.cast, <8 x i32> %shuffle, <8 x i32> zeroinitializer + ret <8 x i32> %res +} + define <8 x i32> @mask_extract_v16i32_v8i32_1(<16 x i32> %a, <8 x i32> %passthru, i8 %mask) { ; CHECK-LABEL: mask_extract_v16i32_v8i32_1: ; CHECK: # BB#0: @@ -383,6 +683,18 @@ define <8 x i32> @mask_extract_v16i32_v8i32_1(<16 x i32> %a, <8 x i32> %passthru ret <8 x i32> %res } +define <8 x i32> @mask_extract_v16i32_v8i32_1_z(<16 x i32> %a, i8 %mask) { +; CHECK-LABEL: mask_extract_v16i32_v8i32_1_z: +; CHECK: # BB#0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vextracti32x8 $1, %zmm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %shuffle = shufflevector <16 x i32> %a, <16 x i32> undef, <8 x i32> + %mask.cast = bitcast i8 %mask to <8 x i1> + %res = select <8 x i1> %mask.cast, <8 x i32> %shuffle, <8 x i32> zeroinitializer + ret <8 x i32> %res +} + define <8 x float> @mask_extract_v16f32_v8f32_0(<16 x float> %a, <8 x float> %passthru, i8 %mask) { ; CHECK-LABEL: mask_extract_v16f32_v8f32_0: ; CHECK: # BB#0: @@ -396,6 +708,18 @@ define <8 x float> @mask_extract_v16f32_v8f32_0(<16 x float> %a, <8 x float> %pa ret <8 x float> %res } +define <8 x float> @mask_extract_v16f32_v8f32_0_z(<16 x float> %a, i8 %mask) { +; CHECK-LABEL: mask_extract_v16f32_v8f32_0_z: +; CHECK: # BB#0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vextractf32x8 $0, %zmm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %shuffle = shufflevector <16 x float> %a, <16 x float> undef, <8 x i32> + %mask.cast = bitcast i8 %mask to <8 x i1> + %res = select <8 x i1> %mask.cast, <8 x float> %shuffle, <8 x float> zeroinitializer + ret <8 x float> %res +} + define <8 x float> @mask_extract_v16f32_v8f32_1(<16 x float> %a, <8 x float> %passthru, i8 %mask) { ; CHECK-LABEL: mask_extract_v16f32_v8f32_1: ; CHECK: # BB#0: @@ -409,6 +733,18 @@ define <8 x float> @mask_extract_v16f32_v8f32_1(<16 x float> %a, <8 x float> %pa ret <8 x float> %res } +define <8 x float> @mask_extract_v16f32_v8f32_1_z(<16 x float> %a, i8 %mask) { +; CHECK-LABEL: mask_extract_v16f32_v8f32_1_z: +; CHECK: # BB#0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vextractf32x8 $1, %zmm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %shuffle = shufflevector <16 x float> %a, <16 x float> undef, <8 x i32> + %mask.cast = bitcast i8 %mask to <8 x i1> + %res = select <8 x i1> %mask.cast, <8 x float> %shuffle, <8 x float> zeroinitializer + ret <8 x float> %res +} + define <2 x i64> @mask_extract_v8i64_v2i64_0(<8 x i64> %a, <2 x i64> %passthru, i8 %mask) { ; CHECK-LABEL: mask_extract_v8i64_v2i64_0: ; CHECK: # BB#0: @@ -424,6 +760,20 @@ define <2 x i64> @mask_extract_v8i64_v2i64_0(<8 x i64> %a, <2 x i64> %passthru, ret <2 x i64> %res } +define <2 x i64> @mask_extract_v8i64_v2i64_0_z(<8 x i64> %a, i8 %mask) { +; CHECK-LABEL: mask_extract_v8i64_v2i64_0_z: +; CHECK: # BB#0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vextracti64x2 $0, %zmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuffle = shufflevector <8 x i64> %a, <8 x i64> undef, <2 x i32> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <2 x i32> + %res = select <2 x i1> %mask.extract, <2 x i64> %shuffle, <2 x i64> zeroinitializer + ret <2 x i64> %res +} + define <2 x i64> @mask_extract_v8i64_v2i64_1(<8 x i64> %a, <2 x i64> %passthru, i8 %mask) { ; CHECK-LABEL: mask_extract_v8i64_v2i64_1: ; CHECK: # BB#0: @@ -439,6 +789,20 @@ define <2 x i64> @mask_extract_v8i64_v2i64_1(<8 x i64> %a, <2 x i64> %passthru, ret <2 x i64> %res } +define <2 x i64> @mask_extract_v8i64_v2i64_1_z(<8 x i64> %a, i8 %mask) { +; CHECK-LABEL: mask_extract_v8i64_v2i64_1_z: +; CHECK: # BB#0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vextracti64x2 $1, %zmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuffle = shufflevector <8 x i64> %a, <8 x i64> undef, <2 x i32> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <2 x i32> + %res = select <2 x i1> %mask.extract, <2 x i64> %shuffle, <2 x i64> zeroinitializer + ret <2 x i64> %res +} + define <2 x i64> @mask_extract_v8i64_v2i64_2(<8 x i64> %a, <2 x i64> %passthru, i8 %mask) { ; CHECK-LABEL: mask_extract_v8i64_v2i64_2: ; CHECK: # BB#0: @@ -484,6 +848,20 @@ define <2 x double> @mask_extract_v8f64_v2f64_0(<8 x double> %a, <2 x double> %p ret <2 x double> %res } +define <2 x double> @mask_extract_v8f64_v2f64_0_z(<8 x double> %a, i8 %mask) { +; CHECK-LABEL: mask_extract_v8f64_v2f64_0_z: +; CHECK: # BB#0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vextractf64x2 $0, %zmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuffle = shufflevector <8 x double> %a, <8 x double> undef, <2 x i32> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <2 x i32> + %res = select <2 x i1> %mask.extract, <2 x double> %shuffle, <2 x double> zeroinitializer + ret <2 x double> %res +} + define <2 x double> @mask_extract_v8f64_v2f64_1(<8 x double> %a, <2 x double> %passthru, i8 %mask) { ; CHECK-LABEL: mask_extract_v8f64_v2f64_1: ; CHECK: # BB#0: @@ -499,6 +877,20 @@ define <2 x double> @mask_extract_v8f64_v2f64_1(<8 x double> %a, <2 x double> %p ret <2 x double> %res } +define <2 x double> @mask_extract_v8f64_v2f64_1_z(<8 x double> %a, i8 %mask) { +; CHECK-LABEL: mask_extract_v8f64_v2f64_1_z: +; CHECK: # BB#0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vextractf64x2 $1, %zmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuffle = shufflevector <8 x double> %a, <8 x double> undef, <2 x i32> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <2 x i32> + %res = select <2 x i1> %mask.extract, <2 x double> %shuffle, <2 x double> zeroinitializer + ret <2 x double> %res +} + define <2 x double> @mask_extract_v8f64_v2f64_2(<8 x double> %a, <2 x double> %passthru, i8 %mask) { ; CHECK-LABEL: mask_extract_v8f64_v2f64_2: ; CHECK: # BB#0: @@ -543,6 +935,19 @@ define <4 x i64> @mask_extract_v8i64_v4i64_0(<8 x i64> %a, <4 x i64> %passthru, ret <4 x i64> %res } +define <4 x i64> @mask_extract_v8i64_v4i64_0_z(<8 x i64> %a, i8 %mask) { +; CHECK-LABEL: mask_extract_v8i64_v4i64_0_z: +; CHECK: # BB#0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vextracti64x4 $0, %zmm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %shuffle = shufflevector <8 x i64> %a, <8 x i64> undef, <4 x i32> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <4 x i32> + %res = select <4 x i1> %mask.extract, <4 x i64> %shuffle, <4 x i64> zeroinitializer + ret <4 x i64> %res +} + define <4 x i64> @mask_extract_v8i64_v4i64_1(<8 x i64> %a, <4 x i64> %passthru, i8 %mask) { ; CHECK-LABEL: mask_extract_v8i64_v4i64_1: ; CHECK: # BB#0: @@ -557,6 +962,19 @@ define <4 x i64> @mask_extract_v8i64_v4i64_1(<8 x i64> %a, <4 x i64> %passthru, ret <4 x i64> %res } +define <4 x i64> @mask_extract_v8i64_v4i64_1_z(<8 x i64> %a, i8 %mask) { +; CHECK-LABEL: mask_extract_v8i64_v4i64_1_z: +; CHECK: # BB#0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %shuffle = shufflevector <8 x i64> %a, <8 x i64> undef, <4 x i32> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <4 x i32> + %res = select <4 x i1> %mask.extract, <4 x i64> %shuffle, <4 x i64> zeroinitializer + ret <4 x i64> %res +} + define <4 x double> @mask_extract_v8f64_v4f64_0(<8 x double> %a, <4 x double> %passthru, i8 %mask) { ; CHECK-LABEL: mask_extract_v8f64_v4f64_0: ; CHECK: # BB#0: @@ -571,6 +989,19 @@ define <4 x double> @mask_extract_v8f64_v4f64_0(<8 x double> %a, <4 x double> %p ret <4 x double> %res } +define <4 x double> @mask_extract_v8f64_v4f64_0_z(<8 x double> %a, i8 %mask) { +; CHECK-LABEL: mask_extract_v8f64_v4f64_0_z: +; CHECK: # BB#0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vextractf64x4 $0, %zmm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %shuffle = shufflevector <8 x double> %a, <8 x double> undef, <4 x i32> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <4 x i32> + %res = select <4 x i1> %mask.extract, <4 x double> %shuffle, <4 x double> zeroinitializer + ret <4 x double> %res +} + define <4 x double> @mask_extract_v8f64_v4f64_1(<8 x double> %a, <4 x double> %passthru, i8 %mask) { ; CHECK-LABEL: mask_extract_v8f64_v4f64_1: ; CHECK: # BB#0: @@ -585,8 +1016,21 @@ define <4 x double> @mask_extract_v8f64_v4f64_1(<8 x double> %a, <4 x double> %p ret <4 x double> %res } -define <8 x i32> @mask_extract_v8i64_v8i32_1(<8 x i64> %a, <8 x i32> %passthru, i8 %mask) { -; CHECK-LABEL: mask_extract_v8i64_v8i32_1: +define <4 x double> @mask_extract_v8f64_v4f64_1_z(<8 x double> %a, i8 %mask) { +; CHECK-LABEL: mask_extract_v8f64_v4f64_1_z: +; CHECK: # BB#0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %shuffle = shufflevector <8 x double> %a, <8 x double> undef, <4 x i32> + %mask.cast = bitcast i8 %mask to <8 x i1> + %mask.extract = shufflevector <8 x i1> %mask.cast, <8 x i1> undef, <4 x i32> + %res = select <4 x i1> %mask.extract, <4 x double> %shuffle, <4 x double> zeroinitializer + ret <4 x double> %res +} + +define <8 x i32> @mask_cast_extract_v8i64_v8i32_1(<8 x i64> %a, <8 x i32> %passthru, i8 %mask) { +; CHECK-LABEL: mask_cast_extract_v8i64_v8i32_1: ; CHECK: # BB#0: ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vextracti32x8 $1, %zmm0, %ymm1 {%k1} @@ -599,8 +1043,8 @@ define <8 x i32> @mask_extract_v8i64_v8i32_1(<8 x i64> %a, <8 x i32> %passthru, ret <8 x i32> %res } -define <8 x float> @mask_extract_v8f64_v8f32_1(<8 x double> %a, <8 x float> %passthru, i8 %mask) { -; CHECK-LABEL: mask_extract_v8f64_v8f32_1: +define <8 x float> @mask_cast_extract_v8f64_v8f32_1(<8 x double> %a, <8 x float> %passthru, i8 %mask) { +; CHECK-LABEL: mask_cast_extract_v8f64_v8f32_1: ; CHECK: # BB#0: ; CHECK-NEXT: kmovw %edi, %k1 ; CHECK-NEXT: vextractf32x8 $1, %zmm0, %ymm1 {%k1}