From: Simon Pilgrim Date: Sat, 6 Apr 2019 14:14:54 +0000 (+0000) Subject: [X86] Split expandload and compressstore tests X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=98514317dd2ab304cdb7e3dbde6d42a757e93c16;p=llvm [X86] Split expandload and compressstore tests git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@357840 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/test/CodeGen/X86/compress_expand.ll b/test/CodeGen/X86/masked_compressstore.ll similarity index 54% rename from test/CodeGen/X86/compress_expand.ll rename to test/CodeGen/X86/masked_compressstore.ll index c0e78348668..97ffc1ba7cc 100644 --- a/test/CodeGen/X86/compress_expand.ll +++ b/test/CodeGen/X86/masked_compressstore.ll @@ -1,109 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mcpu=skylake-avx512 < %s | FileCheck %s --check-prefix=ALL --check-prefix=SKX -; RUN: llc -mcpu=knl < %s | FileCheck %s --check-prefix=ALL --check-prefix=KNL - -target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" -target triple = "x86_64-unknown-linux-gnu" - -define <16 x float> @expandload_v16f32_const_undef(float* %base) { -; SKX-LABEL: expandload_v16f32_const_undef: -; SKX: # %bb.0: -; SKX-NEXT: movw $-2049, %ax # imm = 0xF7FF -; SKX-NEXT: kmovd %eax, %k1 -; SKX-NEXT: vexpandps (%rdi), %zmm0 {%k1} {z} -; SKX-NEXT: retq -; -; KNL-LABEL: expandload_v16f32_const_undef: -; KNL: # %bb.0: -; KNL-NEXT: movw $-2049, %ax # imm = 0xF7FF -; KNL-NEXT: kmovw %eax, %k1 -; KNL-NEXT: vexpandps (%rdi), %zmm0 {%k1} {z} -; KNL-NEXT: retq - %res = call <16 x float> @llvm.masked.expandload.v16f32(float* %base, <16 x i1> , <16 x float> undef) - ret <16 x float>%res -} - -define <16 x float> @expandload_v16f32_const(float* %base, <16 x float> %src0) { -; SKX-LABEL: expandload_v16f32_const: -; SKX: # %bb.0: -; SKX-NEXT: movw $30719, %ax # imm = 0x77FF -; SKX-NEXT: kmovd %eax, %k1 -; SKX-NEXT: vexpandps (%rdi), %zmm0 {%k1} -; SKX-NEXT: retq -; -; KNL-LABEL: expandload_v16f32_const: -; KNL: # %bb.0: -; KNL-NEXT: movw $30719, %ax # imm = 0x77FF -; KNL-NEXT: kmovw %eax, %k1 -; KNL-NEXT: vexpandps (%rdi), %zmm0 {%k1} -; KNL-NEXT: retq - %res = call <16 x float> @llvm.masked.expandload.v16f32(float* %base, <16 x i1> , <16 x float> %src0) - ret <16 x float>%res -} - -define <8 x double> @expandload_v8f64_v8i1(double* %base, <8 x double> %src0, <8 x i1> %mask) { -; SKX-LABEL: expandload_v8f64_v8i1: -; SKX: # %bb.0: -; SKX-NEXT: vpsllw $15, %xmm1, %xmm1 -; SKX-NEXT: vpmovw2m %xmm1, %k1 -; SKX-NEXT: vexpandpd (%rdi), %zmm0 {%k1} -; SKX-NEXT: retq -; -; KNL-LABEL: expandload_v8f64_v8i1: -; KNL: # %bb.0: -; KNL-NEXT: vpmovzxwq {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero -; KNL-NEXT: vpsllq $63, %zmm1, %zmm1 -; KNL-NEXT: vptestmq %zmm1, %zmm1, %k1 -; KNL-NEXT: vexpandpd (%rdi), %zmm0 {%k1} -; KNL-NEXT: retq - %res = call <8 x double> @llvm.masked.expandload.v8f64(double* %base, <8 x i1> %mask, <8 x double> %src0) - ret <8 x double>%res -} - -define <4 x float> @expandload_v4f32_const(float* %base, <4 x float> %src0) { -; SKX-LABEL: expandload_v4f32_const: -; SKX: # %bb.0: -; SKX-NEXT: movb $7, %al -; SKX-NEXT: kmovd %eax, %k1 -; SKX-NEXT: vexpandps (%rdi), %xmm0 {%k1} -; SKX-NEXT: retq -; -; KNL-LABEL: expandload_v4f32_const: -; KNL: # %bb.0: -; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 -; KNL-NEXT: movw $7, %ax -; KNL-NEXT: kmovw %eax, %k1 -; KNL-NEXT: vexpandps (%rdi), %zmm0 {%k1} -; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 -; KNL-NEXT: retq - %res = call <4 x float> @llvm.masked.expandload.v4f32(float* %base, <4 x i1> , <4 x float> %src0) - ret <4 x float>%res -} - -define <2 x i64> @expandload_v2i64_const(i64* %base, <2 x i64> %src0) { -; SKX-LABEL: expandload_v2i64_const: -; SKX: # %bb.0: -; SKX-NEXT: movb $2, %al -; SKX-NEXT: kmovd %eax, %k1 -; SKX-NEXT: vpexpandq (%rdi), %xmm0 {%k1} -; SKX-NEXT: retq -; -; KNL-LABEL: expandload_v2i64_const: -; KNL: # %bb.0: -; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 -; KNL-NEXT: movb $2, %al -; KNL-NEXT: kmovw %eax, %k1 -; KNL-NEXT: vpexpandq (%rdi), %zmm0 {%k1} -; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 -; KNL-NEXT: retq - %res = call <2 x i64> @llvm.masked.expandload.v2i64(i64* %base, <2 x i1> , <2 x i64> %src0) - ret <2 x i64>%res -} - -declare <16 x float> @llvm.masked.expandload.v16f32(float*, <16 x i1>, <16 x float>) -declare <8 x double> @llvm.masked.expandload.v8f64(double*, <8 x i1>, <8 x double>) -declare <4 x float> @llvm.masked.expandload.v4f32(float*, <4 x i1>, <4 x float>) -declare <2 x i64> @llvm.masked.expandload.v2i64(i64*, <2 x i1>, <2 x i64>) +; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=skylake-avx512 | FileCheck %s --check-prefixes=ALL,SKX +; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=knl | FileCheck %s --check-prefixes=ALL,KNL define void @compressstore_v16f32_const(float* %base, <16 x float> %V) { ; SKX-LABEL: compressstore_v16f32_const: @@ -249,31 +146,6 @@ define void @compressstore_v4f32_v4i1(float* %base, <4 x float> %V, <4 x i1> %ma ret void } -define <2 x float> @expandload_v2f32_v2i1(float* %base, <2 x float> %src0, <2 x i32> %trigger) { -; SKX-LABEL: expandload_v2f32_v2i1: -; SKX: # %bb.0: -; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 -; SKX-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3] -; SKX-NEXT: vptestnmq %xmm1, %xmm1, %k1 -; SKX-NEXT: vexpandps (%rdi), %xmm0 {%k1} -; SKX-NEXT: retq -; -; KNL-LABEL: expandload_v2f32_v2i1: -; KNL: # %bb.0: -; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 -; KNL-NEXT: vpxor %xmm2, %xmm2, %xmm2 -; KNL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3] -; KNL-NEXT: vptestnmq %zmm1, %zmm1, %k0 -; KNL-NEXT: kshiftlw $14, %k0, %k0 -; KNL-NEXT: kshiftrw $14, %k0, %k1 -; KNL-NEXT: vexpandps (%rdi), %zmm0 {%k1} -; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 -; KNL-NEXT: retq - %mask = icmp eq <2 x i32> %trigger, zeroinitializer - %res = call <2 x float> @llvm.masked.expandload.v2f32(float* %base, <2 x i1> %mask, <2 x float> %src0) - ret <2 x float> %res -} - define void @compressstore_v2f32_v2i32(float* %base, <2 x float> %V, <2 x i32> %trigger) { ; SKX-LABEL: compressstore_v2f32_v2i32: ; SKX: # %bb.0: @@ -298,49 +170,6 @@ define void @compressstore_v2f32_v2i32(float* %base, <2 x float> %V, <2 x i32> % ret void } -define <32 x float> @expandload_v32f32_v32i32(float* %base, <32 x float> %src0, <32 x i32> %trigger) { -; ALL-LABEL: expandload_v32f32_v32i32: -; ALL: # %bb.0: -; ALL-NEXT: vptestnmd %zmm3, %zmm3, %k1 -; ALL-NEXT: vptestnmd %zmm2, %zmm2, %k2 -; ALL-NEXT: kmovw %k2, %eax -; ALL-NEXT: popcntl %eax, %eax -; ALL-NEXT: vexpandps (%rdi,%rax,4), %zmm1 {%k1} -; ALL-NEXT: vexpandps (%rdi), %zmm0 {%k2} -; ALL-NEXT: retq - %mask = icmp eq <32 x i32> %trigger, zeroinitializer - %res = call <32 x float> @llvm.masked.expandload.v32f32(float* %base, <32 x i1> %mask, <32 x float> %src0) - ret <32 x float> %res -} - -define <16 x double> @compressstore_v16f64_v16i32(double* %base, <16 x double> %src0, <16 x i32> %trigger) { -; SKX-LABEL: compressstore_v16f64_v16i32: -; SKX: # %bb.0: -; SKX-NEXT: vextracti64x4 $1, %zmm2, %ymm3 -; SKX-NEXT: vptestnmd %ymm3, %ymm3, %k1 -; SKX-NEXT: vptestnmd %ymm2, %ymm2, %k2 -; SKX-NEXT: kmovb %k2, %eax -; SKX-NEXT: popcntl %eax, %eax -; SKX-NEXT: vexpandpd (%rdi,%rax,8), %zmm1 {%k1} -; SKX-NEXT: vexpandpd (%rdi), %zmm0 {%k2} -; SKX-NEXT: retq -; -; KNL-LABEL: compressstore_v16f64_v16i32: -; KNL: # %bb.0: -; KNL-NEXT: vextracti64x4 $1, %zmm2, %ymm3 -; KNL-NEXT: vptestnmd %zmm3, %zmm3, %k1 -; KNL-NEXT: vptestnmd %zmm2, %zmm2, %k2 -; KNL-NEXT: vexpandpd (%rdi), %zmm0 {%k2} -; KNL-NEXT: kmovw %k2, %eax -; KNL-NEXT: movzbl %al, %eax -; KNL-NEXT: popcntl %eax, %eax -; KNL-NEXT: vexpandpd (%rdi,%rax,8), %zmm1 {%k1} -; KNL-NEXT: retq - %mask = icmp eq <16 x i32> %trigger, zeroinitializer - %res = call <16 x double> @llvm.masked.expandload.v16f64(double* %base, <16 x i1> %mask, <16 x double> %src0) - ret <16 x double> %res -} - define void @compressstore_v32f32_v32i32(float* %base, <32 x float> %V, <32 x i32> %trigger) { ; SKX-LABEL: compressstore_v32f32_v32i32: ; SKX: # %bb.0: @@ -396,21 +225,34 @@ define void @compressstore_v16f64_v16i1(double* %base, <16 x double> %V, <16 x i ret void } -declare void @llvm.masked.compressstore.v16f32(<16 x float>, float* , <16 x i1>) -declare void @llvm.masked.compressstore.v8f32(<8 x float>, float* , <8 x i1>) -declare void @llvm.masked.compressstore.v8f64(<8 x double>, double* , <8 x i1>) -declare void @llvm.masked.compressstore.v16i32(<16 x i32>, i32* , <16 x i1>) -declare void @llvm.masked.compressstore.v8i32(<8 x i32>, i32* , <8 x i1>) -declare void @llvm.masked.compressstore.v8i64(<8 x i64>, i64* , <8 x i1>) -declare void @llvm.masked.compressstore.v4i32(<4 x i32>, i32* , <4 x i1>) -declare void @llvm.masked.compressstore.v4f32(<4 x float>, float* , <4 x i1>) -declare void @llvm.masked.compressstore.v4i64(<4 x i64>, i64* , <4 x i1>) -declare void @llvm.masked.compressstore.v2i64(<2 x i64>, i64* , <2 x i1>) -declare void @llvm.masked.compressstore.v2f32(<2 x float>, float* , <2 x i1>) -declare void @llvm.masked.compressstore.v32f32(<32 x float>, float* , <32 x i1>) -declare void @llvm.masked.compressstore.v16f64(<16 x double>, double* , <16 x i1>) -declare void @llvm.masked.compressstore.v32f64(<32 x double>, double* , <32 x i1>) +declare void @llvm.masked.compressstore.v16f64(<16 x double>, double*, <16 x i1>) +declare void @llvm.masked.compressstore.v8f64(<8 x double>, double*, <8 x i1>) +declare void @llvm.masked.compressstore.v4f64(<4 x double>, double*, <4 x i1>) +declare void @llvm.masked.compressstore.v2f64(<2 x double>, double*, <2 x i1>) +declare void @llvm.masked.compressstore.v1f64(<1 x double>, double*, <1 x i1>) + +declare void @llvm.masked.compressstore.v32f32(<32 x float>, float*, <32 x i1>) +declare void @llvm.masked.compressstore.v16f32(<16 x float>, float*, <16 x i1>) +declare void @llvm.masked.compressstore.v8f32(<8 x float>, float*, <8 x i1>) +declare void @llvm.masked.compressstore.v4f32(<4 x float>, float*, <4 x i1>) +declare void @llvm.masked.compressstore.v2f32(<2 x float>, float*, <2 x i1>) + +declare void @llvm.masked.compressstore.v8i64(<8 x i64>, i64*, <8 x i1>) +declare void @llvm.masked.compressstore.v4i64(<4 x i64>, i64*, <4 x i1>) +declare void @llvm.masked.compressstore.v2i64(<2 x i64>, i64*, <2 x i1>) +declare void @llvm.masked.compressstore.v1i64(<1 x i64>, i64*, <1 x i1>) + +declare void @llvm.masked.compressstore.v16i32(<16 x i32>, i32*, <16 x i1>) +declare void @llvm.masked.compressstore.v8i32(<8 x i32>, i32*, <8 x i1>) +declare void @llvm.masked.compressstore.v4i32(<4 x i32>, i32*, <4 x i1>) +declare void @llvm.masked.compressstore.v2i32(<2 x i32>, i32*, <2 x i1>) + +declare void @llvm.masked.compressstore.v32i16(<32 x i16>, i16*, <32 x i1>) +declare void @llvm.masked.compressstore.v16i16(<16 x i16>, i16*, <16 x i1>) +declare void @llvm.masked.compressstore.v8i16(<8 x i16>, i16*, <8 x i1>) +declare void @llvm.masked.compressstore.v4i16(<4 x i16>, i16*, <4 x i1>) -declare <2 x float> @llvm.masked.expandload.v2f32(float* , <2 x i1> , <2 x float> ) -declare <32 x float> @llvm.masked.expandload.v32f32(float* , <32 x i1> , <32 x float> ) -declare <16 x double> @llvm.masked.expandload.v16f64(double* , <16 x i1> , <16 x double> ) +declare void @llvm.masked.compressstore.v64i8(<64 x i8>, i8*, <64 x i1>) +declare void @llvm.masked.compressstore.v32i8(<32 x i8>, i8*, <32 x i1>) +declare void @llvm.masked.compressstore.v16i8(<16 x i8>, i8*, <16 x i1>) +declare void @llvm.masked.compressstore.v8i8(<8 x i8>, i8*, <8 x i1>) diff --git a/test/CodeGen/X86/masked_expandload.ll b/test/CodeGen/X86/masked_expandload.ll new file mode 100644 index 00000000000..8ebebc467d5 --- /dev/null +++ b/test/CodeGen/X86/masked_expandload.ll @@ -0,0 +1,198 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=skylake-avx512 | FileCheck %s --check-prefixes=ALL,SKX +; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mcpu=knl | FileCheck %s --check-prefixes=ALL,KNL + +define <16 x float> @expandload_v16f32_const_undef(float* %base) { +; SKX-LABEL: expandload_v16f32_const_undef: +; SKX: # %bb.0: +; SKX-NEXT: movw $-2049, %ax # imm = 0xF7FF +; SKX-NEXT: kmovd %eax, %k1 +; SKX-NEXT: vexpandps (%rdi), %zmm0 {%k1} {z} +; SKX-NEXT: retq +; +; KNL-LABEL: expandload_v16f32_const_undef: +; KNL: # %bb.0: +; KNL-NEXT: movw $-2049, %ax # imm = 0xF7FF +; KNL-NEXT: kmovw %eax, %k1 +; KNL-NEXT: vexpandps (%rdi), %zmm0 {%k1} {z} +; KNL-NEXT: retq + %res = call <16 x float> @llvm.masked.expandload.v16f32(float* %base, <16 x i1> , <16 x float> undef) + ret <16 x float>%res +} + +define <16 x float> @expandload_v16f32_const(float* %base, <16 x float> %src0) { +; SKX-LABEL: expandload_v16f32_const: +; SKX: # %bb.0: +; SKX-NEXT: movw $30719, %ax # imm = 0x77FF +; SKX-NEXT: kmovd %eax, %k1 +; SKX-NEXT: vexpandps (%rdi), %zmm0 {%k1} +; SKX-NEXT: retq +; +; KNL-LABEL: expandload_v16f32_const: +; KNL: # %bb.0: +; KNL-NEXT: movw $30719, %ax # imm = 0x77FF +; KNL-NEXT: kmovw %eax, %k1 +; KNL-NEXT: vexpandps (%rdi), %zmm0 {%k1} +; KNL-NEXT: retq + %res = call <16 x float> @llvm.masked.expandload.v16f32(float* %base, <16 x i1> , <16 x float> %src0) + ret <16 x float>%res +} + +define <8 x double> @expandload_v8f64_v8i1(double* %base, <8 x double> %src0, <8 x i1> %mask) { +; SKX-LABEL: expandload_v8f64_v8i1: +; SKX: # %bb.0: +; SKX-NEXT: vpsllw $15, %xmm1, %xmm1 +; SKX-NEXT: vpmovw2m %xmm1, %k1 +; SKX-NEXT: vexpandpd (%rdi), %zmm0 {%k1} +; SKX-NEXT: retq +; +; KNL-LABEL: expandload_v8f64_v8i1: +; KNL: # %bb.0: +; KNL-NEXT: vpmovzxwq {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero +; KNL-NEXT: vpsllq $63, %zmm1, %zmm1 +; KNL-NEXT: vptestmq %zmm1, %zmm1, %k1 +; KNL-NEXT: vexpandpd (%rdi), %zmm0 {%k1} +; KNL-NEXT: retq + %res = call <8 x double> @llvm.masked.expandload.v8f64(double* %base, <8 x i1> %mask, <8 x double> %src0) + ret <8 x double>%res +} + +define <4 x float> @expandload_v4f32_const(float* %base, <4 x float> %src0) { +; SKX-LABEL: expandload_v4f32_const: +; SKX: # %bb.0: +; SKX-NEXT: movb $7, %al +; SKX-NEXT: kmovd %eax, %k1 +; SKX-NEXT: vexpandps (%rdi), %xmm0 {%k1} +; SKX-NEXT: retq +; +; KNL-LABEL: expandload_v4f32_const: +; KNL: # %bb.0: +; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 +; KNL-NEXT: movw $7, %ax +; KNL-NEXT: kmovw %eax, %k1 +; KNL-NEXT: vexpandps (%rdi), %zmm0 {%k1} +; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 +; KNL-NEXT: retq + %res = call <4 x float> @llvm.masked.expandload.v4f32(float* %base, <4 x i1> , <4 x float> %src0) + ret <4 x float>%res +} + +define <2 x i64> @expandload_v2i64_const(i64* %base, <2 x i64> %src0) { +; SKX-LABEL: expandload_v2i64_const: +; SKX: # %bb.0: +; SKX-NEXT: movb $2, %al +; SKX-NEXT: kmovd %eax, %k1 +; SKX-NEXT: vpexpandq (%rdi), %xmm0 {%k1} +; SKX-NEXT: retq +; +; KNL-LABEL: expandload_v2i64_const: +; KNL: # %bb.0: +; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 +; KNL-NEXT: movb $2, %al +; KNL-NEXT: kmovw %eax, %k1 +; KNL-NEXT: vpexpandq (%rdi), %zmm0 {%k1} +; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 +; KNL-NEXT: retq + %res = call <2 x i64> @llvm.masked.expandload.v2i64(i64* %base, <2 x i1> , <2 x i64> %src0) + ret <2 x i64>%res +} + +define <2 x float> @expandload_v2f32_v2i1(float* %base, <2 x float> %src0, <2 x i32> %trigger) { +; SKX-LABEL: expandload_v2f32_v2i1: +; SKX: # %bb.0: +; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; SKX-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3] +; SKX-NEXT: vptestnmq %xmm1, %xmm1, %k1 +; SKX-NEXT: vexpandps (%rdi), %xmm0 {%k1} +; SKX-NEXT: retq +; +; KNL-LABEL: expandload_v2f32_v2i1: +; KNL: # %bb.0: +; KNL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 +; KNL-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; KNL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3] +; KNL-NEXT: vptestnmq %zmm1, %zmm1, %k0 +; KNL-NEXT: kshiftlw $14, %k0, %k0 +; KNL-NEXT: kshiftrw $14, %k0, %k1 +; KNL-NEXT: vexpandps (%rdi), %zmm0 {%k1} +; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 +; KNL-NEXT: retq + %mask = icmp eq <2 x i32> %trigger, zeroinitializer + %res = call <2 x float> @llvm.masked.expandload.v2f32(float* %base, <2 x i1> %mask, <2 x float> %src0) + ret <2 x float> %res +} + +define <32 x float> @expandload_v32f32_v32i32(float* %base, <32 x float> %src0, <32 x i32> %trigger) { +; ALL-LABEL: expandload_v32f32_v32i32: +; ALL: # %bb.0: +; ALL-NEXT: vptestnmd %zmm3, %zmm3, %k1 +; ALL-NEXT: vptestnmd %zmm2, %zmm2, %k2 +; ALL-NEXT: kmovw %k2, %eax +; ALL-NEXT: popcntl %eax, %eax +; ALL-NEXT: vexpandps (%rdi,%rax,4), %zmm1 {%k1} +; ALL-NEXT: vexpandps (%rdi), %zmm0 {%k2} +; ALL-NEXT: retq + %mask = icmp eq <32 x i32> %trigger, zeroinitializer + %res = call <32 x float> @llvm.masked.expandload.v32f32(float* %base, <32 x i1> %mask, <32 x float> %src0) + ret <32 x float> %res +} + +define <16 x double> @expandload_v16f64_v16i32(double* %base, <16 x double> %src0, <16 x i32> %trigger) { +; SKX-LABEL: expandload_v16f64_v16i32: +; SKX: # %bb.0: +; SKX-NEXT: vextracti64x4 $1, %zmm2, %ymm3 +; SKX-NEXT: vptestnmd %ymm3, %ymm3, %k1 +; SKX-NEXT: vptestnmd %ymm2, %ymm2, %k2 +; SKX-NEXT: kmovb %k2, %eax +; SKX-NEXT: popcntl %eax, %eax +; SKX-NEXT: vexpandpd (%rdi,%rax,8), %zmm1 {%k1} +; SKX-NEXT: vexpandpd (%rdi), %zmm0 {%k2} +; SKX-NEXT: retq +; +; KNL-LABEL: expandload_v16f64_v16i32: +; KNL: # %bb.0: +; KNL-NEXT: vextracti64x4 $1, %zmm2, %ymm3 +; KNL-NEXT: vptestnmd %zmm3, %zmm3, %k1 +; KNL-NEXT: vptestnmd %zmm2, %zmm2, %k2 +; KNL-NEXT: vexpandpd (%rdi), %zmm0 {%k2} +; KNL-NEXT: kmovw %k2, %eax +; KNL-NEXT: movzbl %al, %eax +; KNL-NEXT: popcntl %eax, %eax +; KNL-NEXT: vexpandpd (%rdi,%rax,8), %zmm1 {%k1} +; KNL-NEXT: retq + %mask = icmp eq <16 x i32> %trigger, zeroinitializer + %res = call <16 x double> @llvm.masked.expandload.v16f64(double* %base, <16 x i1> %mask, <16 x double> %src0) + ret <16 x double> %res +} + +declare <16 x double> @llvm.masked.expandload.v16f64(double*, <16 x i1>, <16 x double>) +declare <8 x double> @llvm.masked.expandload.v8f64(double*, <8 x i1>, <8 x double>) +declare <4 x double> @llvm.masked.expandload.v4f64(double*, <4 x i1>, <4 x double>) +declare <2 x double> @llvm.masked.expandload.v2f64(double*, <2 x i1>, <2 x double>) +declare <1 x double> @llvm.masked.expandload.v1f64(double*, <1 x i1>, <1 x double>) + +declare <32 x float> @llvm.masked.expandload.v32f32(float*, <32 x i1>, <32 x float>) +declare <16 x float> @llvm.masked.expandload.v16f32(float*, <16 x i1>, <16 x float>) +declare <8 x float> @llvm.masked.expandload.v8f32(float*, <8 x i1>, <8 x float>) +declare <4 x float> @llvm.masked.expandload.v4f32(float*, <4 x i1>, <4 x float>) +declare <2 x float> @llvm.masked.expandload.v2f32(float*, <2 x i1>, <2 x float>) + +declare <8 x i64> @llvm.masked.expandload.v8i64(i64*, <8 x i1>, <8 x i64>) +declare <4 x i64> @llvm.masked.expandload.v4i64(i64*, <4 x i1>, <4 x i64>) +declare <2 x i64> @llvm.masked.expandload.v2i64(i64*, <2 x i1>, <2 x i64>) +declare <1 x i64> @llvm.masked.expandload.v1i64(i64*, <1 x i1>, <1 x i64>) + +declare <16 x i32> @llvm.masked.expandload.v16i32(i32*, <16 x i1>, <16 x i32>) +declare <8 x i32> @llvm.masked.expandload.v8i32(i32*, <8 x i1>, <8 x i32>) +declare <4 x i32> @llvm.masked.expandload.v4i32(i32*, <4 x i1>, <4 x i32>) +declare <2 x i32> @llvm.masked.expandload.v2i32(i32*, <2 x i1>, <2 x i32>) + +declare <32 x i16> @llvm.masked.expandload.v32i16(i16*, <32 x i1>, <32 x i16>) +declare <16 x i16> @llvm.masked.expandload.v16i16(i16*, <16 x i1>, <16 x i16>) +declare <8 x i16> @llvm.masked.expandload.v8i16(i16*, <8 x i1>, <8 x i16>) +declare <4 x i16> @llvm.masked.expandload.v4i16(i16*, <4 x i1>, <4 x i16>) + +declare <64 x i8> @llvm.masked.expandload.v64i8(i8*, <64 x i1>, <64 x i8>) +declare <32 x i8> @llvm.masked.expandload.v32i8(i8*, <32 x i1>, <32 x i8>) +declare <16 x i8> @llvm.masked.expandload.v16i8(i8*, <16 x i1>, <16 x i8>) +declare <8 x i8> @llvm.masked.expandload.v8i8(i8*, <8 x i1>, <8 x i8>)