From d32b7518d301e1e140e7e7d7dd5f1c94c6ac602e Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Mon, 4 Jul 2016 21:30:47 +0000 Subject: [PATCH] [X86][AVX512] Converted the VSHUFPD intrinsics to generic IR git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@274523 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Headers/avx512fintrin.h | 37 +++++++++-------- lib/Headers/avx512vlintrin.h | 70 +++++++++++++++----------------- test/CodeGen/avx512f-builtins.c | 8 ++-- test/CodeGen/avx512vl-builtins.c | 24 +++++++---- 4 files changed, 74 insertions(+), 65 deletions(-) diff --git a/lib/Headers/avx512fintrin.h b/lib/Headers/avx512fintrin.h index c0d44984fd..4bd5a8d7db 100644 --- a/lib/Headers/avx512fintrin.h +++ b/lib/Headers/avx512fintrin.h @@ -5950,6 +5950,7 @@ _mm512_kmov (__mmask16 __A) #define _mm_cvt_roundsd_si64(A, R) __extension__ ({ \ (long long)__builtin_ia32_vcvtsd2si64((__v2df)(__m128d)(A), (int)(R)); }) + static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask2_permutex2var_epi32 (__m512i __A, __m512i __I, __mmask16 __U, __m512i __B) @@ -7166,23 +7167,27 @@ _mm_maskz_scalef_ss (__mmask8 __U, __m128 __A, __m128 __B) (__v8di)_mm512_setzero_si512(), \ (__mmask8)(U)); }) -#define _mm512_shuffle_pd(M, V, imm) __extension__ ({ \ - (__m512d)__builtin_ia32_shufpd512_mask((__v8df)(__m512d)(M), \ - (__v8df)(__m512d)(V), (int)(imm), \ - (__v8df)_mm512_undefined_pd(), \ - (__mmask8)-1); }) - -#define _mm512_mask_shuffle_pd(W, U, M, V, imm) __extension__ ({ \ - (__m512d)__builtin_ia32_shufpd512_mask((__v8df)(__m512d)(M), \ - (__v8df)(__m512d)(V), (int)(imm), \ - (__v8df)(__m512d)(W), \ - (__mmask8)(U)); }) +#define _mm512_shuffle_pd(A, B, M) __extension__ ({ \ + (__m512d)__builtin_shufflevector((__v8df)(__m512d)(A), \ + (__v8df)(__m512d)(B), \ + (((M) & 0x01) >> 0) + 0, \ + (((M) & 0x02) >> 1) + 8, \ + (((M) & 0x04) >> 2) + 2, \ + (((M) & 0x08) >> 3) + 10, \ + (((M) & 0x10) >> 4) + 4, \ + (((M) & 0x20) >> 5) + 12, \ + (((M) & 0x40) >> 6) + 6, \ + (((M) & 0x80) >> 7) + 14); }) + +#define _mm512_mask_shuffle_pd(W, U, A, B, M) __extension__ ({ \ + (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_shuffle_pd((A), (B), (M)), \ + (__v8df)(__m512d)(W)); }) -#define _mm512_maskz_shuffle_pd(U, M, V, imm) __extension__ ({ \ - (__m512d)__builtin_ia32_shufpd512_mask((__v8df)(__m512d)(M), \ - (__v8df)(__m512d)(V), (int)(imm), \ - (__v8df)_mm512_setzero_pd(), \ - (__mmask8)(U)); }) +#define _mm512_maskz_shuffle_pd(U, A, B, M) __extension__ ({ \ + (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \ + (__v8df)_mm512_shuffle_pd((A), (B), (M)), \ + (__v8df)_mm512_setzero_pd()); }) #define _mm512_shuffle_ps(M, V, imm) __extension__ ({ \ (__m512)__builtin_ia32_shufps512_mask((__v16sf)(__m512)(M), \ diff --git a/lib/Headers/avx512vlintrin.h b/lib/Headers/avx512vlintrin.h index b5b371823a..2e6f9fc83b 100644 --- a/lib/Headers/avx512vlintrin.h +++ b/lib/Headers/avx512vlintrin.h @@ -7374,51 +7374,45 @@ _mm256_maskz_sra_epi64 (__mmask8 __U, __m256i __A, __m128i __B) (__v4di)_mm256_setzero_si256(), \ (__mmask8)(U)); }) -#define _mm_mask_shuffle_pd(W, U, A, B, imm) __extension__ ({ \ - (__m128d)__builtin_ia32_shufpd128_mask((__v2df)(__m128d)(A), \ - (__v2df)(__m128d)(B), (int)(imm), \ - (__v2df)(__m128d)(W), \ - (__mmask8)(U)); }) +#define _mm_mask_shuffle_pd(W, U, A, B, M) __extension__ ({ \ + (__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \ + (__v2df)_mm_shuffle_pd((A), (B), (M)), \ + (__v2df)(__m128d)(W)); }) -#define _mm_maskz_shuffle_pd(U, A, B, imm) __extension__ ({ \ - (__m128d)__builtin_ia32_shufpd128_mask((__v2df)(__m128d)(A), \ - (__v2df)(__m128d)(B), (int)(imm), \ - (__v2df)_mm_setzero_pd(), \ - (__mmask8)(U)); }) +#define _mm_maskz_shuffle_pd(U, A, B, M) __extension__ ({ \ + (__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \ + (__v2df)_mm_shuffle_pd((A), (B), (M)), \ + (__v2df)_mm_setzero_pd()); }) -#define _mm256_mask_shuffle_pd(W, U, A, B, imm) __extension__ ({ \ - (__m256d)__builtin_ia32_shufpd256_mask((__v4df)(__m256d)(A), \ - (__v4df)(__m256d)(B), (int)(imm), \ - (__v4df)(__m256d)(W), \ - (__mmask8)(U)); }) +#define _mm256_mask_shuffle_pd(W, U, A, B, M) __extension__ ({ \ + (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \ + (__v4df)_mm256_shuffle_pd((A), (B), (M)), \ + (__v4df)(__m256d)(W)); }) -#define _mm256_maskz_shuffle_pd(U, A, B, imm) __extension__ ({ \ - (__m256d)__builtin_ia32_shufpd256_mask((__v4df)(__m256d)(A), \ - (__v4df)(__m256d)(B), (int)(imm), \ - (__v4df)_mm256_setzero_pd(), \ - (__mmask8)(U)); }) +#define _mm256_maskz_shuffle_pd(U, A, B, M) __extension__ ({ \ + (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \ + (__v4df)_mm256_shuffle_pd((A), (B), (M)), \ + (__v4df)_mm256_setzero_pd()); }) -#define _mm_mask_shuffle_ps(W, U, A, B, imm) __extension__ ({ \ - (__m128)__builtin_ia32_shufps128_mask((__v4sf)(__m128)(A), \ - (__v4sf)(__m128)(B), (int)(imm), \ - (__v4sf)(__m128)(W), (__mmask8)(U)); }) +#define _mm_mask_shuffle_ps(W, U, A, B, M) __extension__ ({ \ + (__m128)__builtin_ia32_selectps_128((__mmask8)(U), \ + (__v4sf)_mm_shuffle_ps((A), (B), (M)), \ + (__v4sf)(__m128)(W)); }) -#define _mm_maskz_shuffle_ps(U, A, B, imm) __extension__ ({ \ - (__m128)__builtin_ia32_shufps128_mask((__v4sf)(__m128)(A), \ - (__v4sf)(__m128)(B), (int)(imm), \ - (__v4sf)_mm_setzero_ps(), \ - (__mmask8)(U)); }) +#define _mm_maskz_shuffle_ps(U, A, B, M) __extension__ ({ \ + (__m128)__builtin_ia32_selectps_128((__mmask8)(U), \ + (__v4sf)_mm_shuffle_ps((A), (B), (M)), \ + (__v4sf)_mm_setzero_ps()); }) -#define _mm256_mask_shuffle_ps(W, U, A, B, imm) __extension__ ({ \ - (__m256)__builtin_ia32_shufps256_mask((__v8sf)(__m256)(A), \ - (__v8sf)(__m256)(B), (int)(imm), \ - (__v8sf)(__m256)(W), (__mmask8)(U)); }) +#define _mm256_mask_shuffle_ps(W, U, A, B, M) __extension__ ({ \ + (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \ + (__v8sf)_mm256_shuffle_ps((A), (B), (M)), \ + (__v8sf)(__m256)(W)); }) -#define _mm256_maskz_shuffle_ps(U, A, B, imm) __extension__ ({ \ - (__m256)__builtin_ia32_shufps256_mask((__v8sf)(__m256)(A), \ - (__v8sf)(__m256)(B), (int)(imm), \ - (__v8sf)_mm256_setzero_ps(), \ - (__mmask8)(U)); }) +#define _mm256_maskz_shuffle_ps(U, A, B, M) __extension__ ({ \ + (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \ + (__v8sf)_mm256_shuffle_ps((A), (B), (M)), \ + (__v8sf)_mm256_setzero_ps()); }) static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_rsqrt14_pd (__m128d __A) diff --git a/test/CodeGen/avx512f-builtins.c b/test/CodeGen/avx512f-builtins.c index 1a24cafa57..a475e0eaaa 100644 --- a/test/CodeGen/avx512f-builtins.c +++ b/test/CodeGen/avx512f-builtins.c @@ -4220,19 +4220,21 @@ __m512i test_mm512_maskz_shuffle_i64x2(__mmask8 __U, __m512i __A, __m512i __B) { __m512d test_mm512_shuffle_pd(__m512d __M, __m512d __V) { // CHECK-LABEL: @test_mm512_shuffle_pd - // CHECK: @llvm.x86.avx512.mask.shuf.pd.512 + // CHECK: shufflevector <8 x double> %{{.*}}, <8 x double> %{{.*}}, <8 x i32> return _mm512_shuffle_pd(__M, __V, 4); } __m512d test_mm512_mask_shuffle_pd(__m512d __W, __mmask8 __U, __m512d __M, __m512d __V) { // CHECK-LABEL: @test_mm512_mask_shuffle_pd - // CHECK: @llvm.x86.avx512.mask.shuf.pd.512 + // CHECK: shufflevector <8 x double> %{{.*}}, <8 x double> %{{.*}}, <8 x i32> + // CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}} return _mm512_mask_shuffle_pd(__W, __U, __M, __V, 4); } __m512d test_mm512_maskz_shuffle_pd(__mmask8 __U, __m512d __M, __m512d __V) { // CHECK-LABEL: @test_mm512_maskz_shuffle_pd - // CHECK: @llvm.x86.avx512.mask.shuf.pd.512 + // CHECK: shufflevector <8 x double> %{{.*}}, <8 x double> %{{.*}}, <8 x i32> + // CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}} return _mm512_maskz_shuffle_pd(__U, __M, __V, 4); } diff --git a/test/CodeGen/avx512vl-builtins.c b/test/CodeGen/avx512vl-builtins.c index 7e1c990874..b4024ba8c6 100644 --- a/test/CodeGen/avx512vl-builtins.c +++ b/test/CodeGen/avx512vl-builtins.c @@ -5189,49 +5189,57 @@ __m256i test_mm256_maskz_shuffle_i64x2(__mmask8 __U, __m256i __A, __m256i __B) { __m128d test_mm_mask_shuffle_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { // CHECK-LABEL: @test_mm_mask_shuffle_pd - // CHECK: @llvm.x86.avx512.mask.shuf.pd.128 + // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x i32> + // CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}} return _mm_mask_shuffle_pd(__W, __U, __A, __B, 3); } __m128d test_mm_maskz_shuffle_pd(__mmask8 __U, __m128d __A, __m128d __B) { // CHECK-LABEL: @test_mm_maskz_shuffle_pd - // CHECK: @llvm.x86.avx512.mask.shuf.pd.128 + // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x i32> + // CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}} return _mm_maskz_shuffle_pd(__U, __A, __B, 3); } __m256d test_mm256_mask_shuffle_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { // CHECK-LABEL: @test_mm256_mask_shuffle_pd - // CHECK: @llvm.x86.avx512.mask.shuf.pd.256 + // CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x i32> + // CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}} return _mm256_mask_shuffle_pd(__W, __U, __A, __B, 3); } __m256d test_mm256_maskz_shuffle_pd(__mmask8 __U, __m256d __A, __m256d __B) { // CHECK-LABEL: @test_mm256_maskz_shuffle_pd - // CHECK: @llvm.x86.avx512.mask.shuf.pd.256 + // CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x i32> + // CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}} return _mm256_maskz_shuffle_pd(__U, __A, __B, 3); } __m128 test_mm_mask_shuffle_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { // CHECK-LABEL: @test_mm_mask_shuffle_ps - // CHECK: @llvm.x86.avx512.mask.shuf.ps.128 + // CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x i32> + // CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}} return _mm_mask_shuffle_ps(__W, __U, __A, __B, 4); } __m128 test_mm_maskz_shuffle_ps(__mmask8 __U, __m128 __A, __m128 __B) { // CHECK-LABEL: @test_mm_maskz_shuffle_ps - // CHECK: @llvm.x86.avx512.mask.shuf.ps.128 + // CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x i32> + // CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}} return _mm_maskz_shuffle_ps(__U, __A, __B, 4); } __m256 test_mm256_mask_shuffle_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { // CHECK-LABEL: @test_mm256_mask_shuffle_ps - // CHECK: @llvm.x86.avx512.mask.shuf.ps.256 + // CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x i32> + // CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}} return _mm256_mask_shuffle_ps(__W, __U, __A, __B, 4); } __m256 test_mm256_maskz_shuffle_ps(__mmask8 __U, __m256 __A, __m256 __B) { // CHECK-LABEL: @test_mm256_maskz_shuffle_ps - // CHECK: @llvm.x86.avx512.mask.shuf.ps.256 + // CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x i32> + // CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}} return _mm256_maskz_shuffle_ps(__U, __A, __B, 4); } -- 2.40.0