From 39af41fdefbaf05441564fd5d6861dba5842b908 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Sun, 4 Sep 2016 18:30:17 +0000 Subject: [PATCH] [AVX-512] Remove 128-bit and 256-bit masked floating point add/sub/mul/div builtins and replace with native operations. We can't do the 512-bit ones because they take a rounding mode argument that we can't represent. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@280635 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/clang/Basic/BuiltinsX86.def | 16 -- lib/Headers/avx512vlintrin.h | 307 ++++++++++++---------------- test/CodeGen/avx512vl-builtins.c | 112 ++++++---- 3 files changed, 200 insertions(+), 235 deletions(-) diff --git a/include/clang/Basic/BuiltinsX86.def b/include/clang/Basic/BuiltinsX86.def index 2923e3218b..a9577cf837 100644 --- a/include/clang/Basic/BuiltinsX86.def +++ b/include/clang/Basic/BuiltinsX86.def @@ -1224,10 +1224,6 @@ TARGET_BUILTIN(__builtin_ia32_subsd_round_mask, "V2dV2dV2dV2dUcIi", "", "avx512f TARGET_BUILTIN(__builtin_ia32_maxsd_round_mask, "V2dV2dV2dV2dUcIi", "", "avx512f") TARGET_BUILTIN(__builtin_ia32_minsd_round_mask, "V2dV2dV2dV2dUcIi", "", "avx512f") -TARGET_BUILTIN(__builtin_ia32_addpd128_mask, "V2dV2dV2dV2dUc", "", "avx512vl") -TARGET_BUILTIN(__builtin_ia32_addpd256_mask, "V4dV4dV4dV4dUc", "", "avx512vl") -TARGET_BUILTIN(__builtin_ia32_addps128_mask, "V4fV4fV4fV4fUc", "", "avx512vl") -TARGET_BUILTIN(__builtin_ia32_addps256_mask, "V8fV8fV8fV8fUc", "", "avx512vl") TARGET_BUILTIN(__builtin_ia32_compressdf128_mask, "V2dV2dV2dUc", "", "avx512vl") TARGET_BUILTIN(__builtin_ia32_compressdf256_mask, "V4dV4dV4dUc", "", "avx512vl") TARGET_BUILTIN(__builtin_ia32_compressdi128_mask, "V2LLiV2LLiV2LLiUc", "", "avx512vl") @@ -1272,10 +1268,6 @@ TARGET_BUILTIN(__builtin_ia32_cvtudq2pd128_mask, "V2dV4iV2dUc", "", "avx512vl") TARGET_BUILTIN(__builtin_ia32_cvtudq2pd256_mask, "V4dV4iV4dUc", "", "avx512vl") TARGET_BUILTIN(__builtin_ia32_cvtudq2ps128_mask, "V4fV4iV4fUc", "", "avx512vl") TARGET_BUILTIN(__builtin_ia32_cvtudq2ps256_mask, "V8fV8iV8fUc", "", "avx512vl") -TARGET_BUILTIN(__builtin_ia32_divpd_mask, "V2dV2dV2dV2dUc", "", "avx512vl") -TARGET_BUILTIN(__builtin_ia32_divpd256_mask, "V4dV4dV4dV4dUc", "", "avx512vl") -TARGET_BUILTIN(__builtin_ia32_divps_mask, "V4fV4fV4fV4fUc", "", "avx512vl") -TARGET_BUILTIN(__builtin_ia32_divps256_mask, "V8fV8fV8fV8fUc", "", "avx512vl") TARGET_BUILTIN(__builtin_ia32_expanddf128_mask, "V2dV2dV2dUc", "", "avx512vl") TARGET_BUILTIN(__builtin_ia32_expanddf256_mask, "V4dV4dV4dUc", "", "avx512vl") TARGET_BUILTIN(__builtin_ia32_expanddi128_mask, "V2LLiV2LLiV2LLiUc", "", "avx512vl") @@ -1304,10 +1296,6 @@ TARGET_BUILTIN(__builtin_ia32_minpd_mask, "V2dV2dV2dV2dUc", "", "avx512vl") TARGET_BUILTIN(__builtin_ia32_minpd256_mask, "V4dV4dV4dV4dUc", "", "avx512vl") TARGET_BUILTIN(__builtin_ia32_minps_mask, "V4fV4fV4fV4fUc", "", "avx512vl") TARGET_BUILTIN(__builtin_ia32_minps256_mask, "V8fV8fV8fV8fUc", "", "avx512vl") -TARGET_BUILTIN(__builtin_ia32_mulpd_mask, "V2dV2dV2dV2dUc", "", "avx512vl") -TARGET_BUILTIN(__builtin_ia32_mulpd256_mask, "V4dV4dV4dV4dUc", "", "avx512vl") -TARGET_BUILTIN(__builtin_ia32_mulps_mask, "V4fV4fV4fV4fUc", "", "avx512vl") -TARGET_BUILTIN(__builtin_ia32_mulps256_mask, "V8fV8fV8fV8fUc", "", "avx512vl") TARGET_BUILTIN(__builtin_ia32_pabsd128_mask, "V4iV4iV4iUc", "", "avx512vl") TARGET_BUILTIN(__builtin_ia32_pabsd256_mask, "V8iV8iV8iUc", "", "avx512vl") TARGET_BUILTIN(__builtin_ia32_pabsq128_mask, "V2LLiV2LLiV2LLiUc", "", "avx512vl") @@ -1358,10 +1346,6 @@ TARGET_BUILTIN(__builtin_ia32_sqrtpd128_mask, "V2dV2dV2dUc", "", "avx512vl") TARGET_BUILTIN(__builtin_ia32_sqrtpd256_mask, "V4dV4dV4dUc", "", "avx512vl") TARGET_BUILTIN(__builtin_ia32_sqrtps128_mask, "V4fV4fV4fUc", "", "avx512vl") TARGET_BUILTIN(__builtin_ia32_sqrtps256_mask, "V8fV8fV8fUc", "", "avx512vl") -TARGET_BUILTIN(__builtin_ia32_subpd128_mask, "V2dV2dV2dV2dUc", "", "avx512vl") -TARGET_BUILTIN(__builtin_ia32_subpd256_mask, "V4dV4dV4dV4dUc", "", "avx512vl") -TARGET_BUILTIN(__builtin_ia32_subps128_mask, "V4fV4fV4fV4fUc", "", "avx512vl") -TARGET_BUILTIN(__builtin_ia32_subps256_mask, "V8fV8fV8fV8fUc", "", "avx512vl") TARGET_BUILTIN(__builtin_ia32_vpermi2vard128_mask, "V4iV4iV4iV4iUc", "", "avx512vl") TARGET_BUILTIN(__builtin_ia32_vpermi2vard256_mask, "V8iV8iV8iV8iUc", "", "avx512vl") TARGET_BUILTIN(__builtin_ia32_vpermi2varpd128_mask, "V2dV2dV2LLiV2dUc", "", "avx512vl") diff --git a/lib/Headers/avx512vlintrin.h b/lib/Headers/avx512vlintrin.h index 77751b814a..72b0292f67 100644 --- a/lib/Headers/avx512vlintrin.h +++ b/lib/Headers/avx512vlintrin.h @@ -1857,71 +1857,59 @@ _mm256_mask3_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U) } static __inline__ __m128d __DEFAULT_FN_ATTRS -_mm_mask_add_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { - return (__m128d) __builtin_ia32_addpd128_mask ((__v2df) __A, - (__v2df) __B, - (__v2df) __W, - (__mmask8) __U); +_mm_mask_add_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_add_pd(__A, __B), + (__v2df)__W); } static __inline__ __m128d __DEFAULT_FN_ATTRS -_mm_maskz_add_pd (__mmask8 __U, __m128d __A, __m128d __B) { - return (__m128d) __builtin_ia32_addpd128_mask ((__v2df) __A, - (__v2df) __B, - (__v2df) - _mm_setzero_pd (), - (__mmask8) __U); +_mm_maskz_add_pd(__mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_add_pd(__A, __B), + (__v2df)_mm_setzero_pd()); } static __inline__ __m256d __DEFAULT_FN_ATTRS -_mm256_mask_add_pd (__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { - return (__m256d) __builtin_ia32_addpd256_mask ((__v4df) __A, - (__v4df) __B, - (__v4df) __W, - (__mmask8) __U); +_mm256_mask_add_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_add_pd(__A, __B), + (__v4df)__W); } static __inline__ __m256d __DEFAULT_FN_ATTRS -_mm256_maskz_add_pd (__mmask8 __U, __m256d __A, __m256d __B) { - return (__m256d) __builtin_ia32_addpd256_mask ((__v4df) __A, - (__v4df) __B, - (__v4df) - _mm256_setzero_pd (), - (__mmask8) __U); +_mm256_maskz_add_pd(__mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_add_pd(__A, __B), + (__v4df)_mm256_setzero_pd()); } static __inline__ __m128 __DEFAULT_FN_ATTRS -_mm_mask_add_ps (__m128 __W, __mmask16 __U, __m128 __A, __m128 __B) { - return (__m128) __builtin_ia32_addps128_mask ((__v4sf) __A, - (__v4sf) __B, - (__v4sf) __W, - (__mmask8) __U); +_mm_mask_add_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_add_ps(__A, __B), + (__v4sf)__W); } static __inline__ __m128 __DEFAULT_FN_ATTRS -_mm_maskz_add_ps (__mmask16 __U, __m128 __A, __m128 __B) { - return (__m128) __builtin_ia32_addps128_mask ((__v4sf) __A, - (__v4sf) __B, - (__v4sf) - _mm_setzero_ps (), - (__mmask8) __U); +_mm_maskz_add_ps(__mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_add_ps(__A, __B), + (__v4sf)_mm_setzero_ps()); } static __inline__ __m256 __DEFAULT_FN_ATTRS -_mm256_mask_add_ps (__m256 __W, __mmask16 __U, __m256 __A, __m256 __B) { - return (__m256) __builtin_ia32_addps256_mask ((__v8sf) __A, - (__v8sf) __B, - (__v8sf) __W, - (__mmask8) __U); +_mm256_mask_add_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_add_ps(__A, __B), + (__v8sf)__W); } static __inline__ __m256 __DEFAULT_FN_ATTRS -_mm256_maskz_add_ps (__mmask16 __U, __m256 __A, __m256 __B) { - return (__m256) __builtin_ia32_addps256_mask ((__v8sf) __A, - (__v8sf) __B, - (__v8sf) - _mm256_setzero_ps (), - (__mmask8) __U); +_mm256_maskz_add_ps(__mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_add_ps(__A, __B), + (__v8sf)_mm256_setzero_ps()); } static __inline__ __m128i __DEFAULT_FN_ATTRS @@ -2673,72 +2661,59 @@ _mm256_maskz_cvtepu32_ps (__mmask8 __U, __m256i __A) { } static __inline__ __m128d __DEFAULT_FN_ATTRS -_mm_mask_div_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { - return (__m128d) __builtin_ia32_divpd_mask ((__v2df) __A, - (__v2df) __B, - (__v2df) __W, - (__mmask8) __U); +_mm_mask_div_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_div_pd(__A, __B), + (__v2df)__W); } static __inline__ __m128d __DEFAULT_FN_ATTRS -_mm_maskz_div_pd (__mmask8 __U, __m128d __A, __m128d __B) { - return (__m128d) __builtin_ia32_divpd_mask ((__v2df) __A, - (__v2df) __B, - (__v2df) - _mm_setzero_pd (), - (__mmask8) __U); +_mm_maskz_div_pd(__mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_div_pd(__A, __B), + (__v2df)_mm_setzero_pd()); } static __inline__ __m256d __DEFAULT_FN_ATTRS -_mm256_mask_div_pd (__m256d __W, __mmask8 __U, __m256d __A, - __m256d __B) { - return (__m256d) __builtin_ia32_divpd256_mask ((__v4df) __A, - (__v4df) __B, - (__v4df) __W, - (__mmask8) __U); +_mm256_mask_div_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_div_pd(__A, __B), + (__v4df)__W); } static __inline__ __m256d __DEFAULT_FN_ATTRS -_mm256_maskz_div_pd (__mmask8 __U, __m256d __A, __m256d __B) { - return (__m256d) __builtin_ia32_divpd256_mask ((__v4df) __A, - (__v4df) __B, - (__v4df) - _mm256_setzero_pd (), - (__mmask8) __U); +_mm256_maskz_div_pd(__mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_div_pd(__A, __B), + (__v4df)_mm256_setzero_pd()); } static __inline__ __m128 __DEFAULT_FN_ATTRS -_mm_mask_div_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { - return (__m128) __builtin_ia32_divps_mask ((__v4sf) __A, - (__v4sf) __B, - (__v4sf) __W, - (__mmask8) __U); +_mm_mask_div_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_div_ps(__A, __B), + (__v4sf)__W); } static __inline__ __m128 __DEFAULT_FN_ATTRS -_mm_maskz_div_ps (__mmask8 __U, __m128 __A, __m128 __B) { - return (__m128) __builtin_ia32_divps_mask ((__v4sf) __A, - (__v4sf) __B, - (__v4sf) - _mm_setzero_ps (), - (__mmask8) __U); +_mm_maskz_div_ps(__mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_div_ps(__A, __B), + (__v4sf)_mm_setzero_ps()); } static __inline__ __m256 __DEFAULT_FN_ATTRS -_mm256_mask_div_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { - return (__m256) __builtin_ia32_divps256_mask ((__v8sf) __A, - (__v8sf) __B, - (__v8sf) __W, - (__mmask8) __U); +_mm256_mask_div_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_div_ps(__A, __B), + (__v8sf)__W); } static __inline__ __m256 __DEFAULT_FN_ATTRS -_mm256_maskz_div_ps (__mmask8 __U, __m256 __A, __m256 __B) { - return (__m256) __builtin_ia32_divps256_mask ((__v8sf) __A, - (__v8sf) __B, - (__v8sf) - _mm256_setzero_ps (), - (__mmask8) __U); +_mm256_maskz_div_ps(__mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_div_ps(__A, __B), + (__v8sf)_mm256_setzero_ps()); } static __inline__ __m128d __DEFAULT_FN_ATTRS @@ -3227,72 +3202,59 @@ _mm256_maskz_min_ps (__mmask8 __U, __m256 __A, __m256 __B) { } static __inline__ __m128d __DEFAULT_FN_ATTRS -_mm_mask_mul_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { - return (__m128d) __builtin_ia32_mulpd_mask ((__v2df) __A, - (__v2df) __B, - (__v2df) __W, - (__mmask8) __U); +_mm_mask_mul_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_mul_pd(__A, __B), + (__v2df)__W); } static __inline__ __m128d __DEFAULT_FN_ATTRS -_mm_maskz_mul_pd (__mmask8 __U, __m128d __A, __m128d __B) { - return (__m128d) __builtin_ia32_mulpd_mask ((__v2df) __A, - (__v2df) __B, - (__v2df) - _mm_setzero_pd (), - (__mmask8) __U); +_mm_maskz_mul_pd(__mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_mul_pd(__A, __B), + (__v2df)_mm_setzero_pd()); } static __inline__ __m256d __DEFAULT_FN_ATTRS -_mm256_mask_mul_pd (__m256d __W, __mmask8 __U, __m256d __A, - __m256d __B) { - return (__m256d) __builtin_ia32_mulpd256_mask ((__v4df) __A, - (__v4df) __B, - (__v4df) __W, - (__mmask8) __U); +_mm256_mask_mul_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_mul_pd(__A, __B), + (__v4df)__W); } static __inline__ __m256d __DEFAULT_FN_ATTRS -_mm256_maskz_mul_pd (__mmask8 __U, __m256d __A, __m256d __B) { - return (__m256d) __builtin_ia32_mulpd256_mask ((__v4df) __A, - (__v4df) __B, - (__v4df) - _mm256_setzero_pd (), - (__mmask8) __U); +_mm256_maskz_mul_pd(__mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_mul_pd(__A, __B), + (__v4df)_mm256_setzero_pd()); } static __inline__ __m128 __DEFAULT_FN_ATTRS -_mm_mask_mul_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { - return (__m128) __builtin_ia32_mulps_mask ((__v4sf) __A, - (__v4sf) __B, - (__v4sf) __W, - (__mmask8) __U); +_mm_mask_mul_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_mul_ps(__A, __B), + (__v4sf)__W); } static __inline__ __m128 __DEFAULT_FN_ATTRS -_mm_maskz_mul_ps (__mmask8 __U, __m128 __A, __m128 __B) { - return (__m128) __builtin_ia32_mulps_mask ((__v4sf) __A, - (__v4sf) __B, - (__v4sf) - _mm_setzero_ps (), - (__mmask8) __U); +_mm_maskz_mul_ps(__mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_mul_ps(__A, __B), + (__v4sf)_mm_setzero_ps()); } static __inline__ __m256 __DEFAULT_FN_ATTRS -_mm256_mask_mul_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { - return (__m256) __builtin_ia32_mulps256_mask ((__v8sf) __A, - (__v8sf) __B, - (__v8sf) __W, - (__mmask8) __U); +_mm256_mask_mul_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_mul_ps(__A, __B), + (__v8sf)__W); } static __inline__ __m256 __DEFAULT_FN_ATTRS -_mm256_maskz_mul_ps (__mmask8 __U, __m256 __A, __m256 __B) { - return (__m256) __builtin_ia32_mulps256_mask ((__v8sf) __A, - (__v8sf) __B, - (__v8sf) - _mm256_setzero_ps (), - (__mmask8) __U); +_mm256_maskz_mul_ps(__mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_mul_ps(__A, __B), + (__v8sf)_mm256_setzero_ps()); } static __inline__ __m128i __DEFAULT_FN_ATTRS @@ -4117,72 +4079,59 @@ _mm256_maskz_sqrt_ps (__mmask8 __U, __m256 __A) { } static __inline__ __m128d __DEFAULT_FN_ATTRS -_mm_mask_sub_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { - return (__m128d) __builtin_ia32_subpd128_mask ((__v2df) __A, - (__v2df) __B, - (__v2df) __W, - (__mmask8) __U); +_mm_mask_sub_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_sub_pd(__A, __B), + (__v2df)__W); } static __inline__ __m128d __DEFAULT_FN_ATTRS -_mm_maskz_sub_pd (__mmask8 __U, __m128d __A, __m128d __B) { - return (__m128d) __builtin_ia32_subpd128_mask ((__v2df) __A, - (__v2df) __B, - (__v2df) - _mm_setzero_pd (), - (__mmask8) __U); +_mm_maskz_sub_pd(__mmask8 __U, __m128d __A, __m128d __B) { + return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U, + (__v2df)_mm_sub_pd(__A, __B), + (__v2df)_mm_setzero_pd()); } static __inline__ __m256d __DEFAULT_FN_ATTRS -_mm256_mask_sub_pd (__m256d __W, __mmask8 __U, __m256d __A, - __m256d __B) { - return (__m256d) __builtin_ia32_subpd256_mask ((__v4df) __A, - (__v4df) __B, - (__v4df) __W, - (__mmask8) __U); +_mm256_mask_sub_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_sub_pd(__A, __B), + (__v4df)__W); } static __inline__ __m256d __DEFAULT_FN_ATTRS -_mm256_maskz_sub_pd (__mmask8 __U, __m256d __A, __m256d __B) { - return (__m256d) __builtin_ia32_subpd256_mask ((__v4df) __A, - (__v4df) __B, - (__v4df) - _mm256_setzero_pd (), - (__mmask8) __U); +_mm256_maskz_sub_pd(__mmask8 __U, __m256d __A, __m256d __B) { + return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U, + (__v4df)_mm256_sub_pd(__A, __B), + (__v4df)_mm256_setzero_pd()); } static __inline__ __m128 __DEFAULT_FN_ATTRS -_mm_mask_sub_ps (__m128 __W, __mmask16 __U, __m128 __A, __m128 __B) { - return (__m128) __builtin_ia32_subps128_mask ((__v4sf) __A, - (__v4sf) __B, - (__v4sf) __W, - (__mmask8) __U); +_mm_mask_sub_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_sub_ps(__A, __B), + (__v4sf)__W); } static __inline__ __m128 __DEFAULT_FN_ATTRS -_mm_maskz_sub_ps (__mmask16 __U, __m128 __A, __m128 __B) { - return (__m128) __builtin_ia32_subps128_mask ((__v4sf) __A, - (__v4sf) __B, - (__v4sf) - _mm_setzero_ps (), - (__mmask8) __U); +_mm_maskz_sub_ps(__mmask8 __U, __m128 __A, __m128 __B) { + return (__m128)__builtin_ia32_selectps_128((__mmask8)__U, + (__v4sf)_mm_sub_ps(__A, __B), + (__v4sf)_mm_setzero_ps()); } static __inline__ __m256 __DEFAULT_FN_ATTRS -_mm256_mask_sub_ps (__m256 __W, __mmask16 __U, __m256 __A, __m256 __B) { - return (__m256) __builtin_ia32_subps256_mask ((__v8sf) __A, - (__v8sf) __B, - (__v8sf) __W, - (__mmask8) __U); +_mm256_mask_sub_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_sub_ps(__A, __B), + (__v8sf)__W); } static __inline__ __m256 __DEFAULT_FN_ATTRS -_mm256_maskz_sub_ps (__mmask16 __U, __m256 __A, __m256 __B) { - return (__m256) __builtin_ia32_subps256_mask ((__v8sf) __A, - (__v8sf) __B, - (__v8sf) - _mm256_setzero_ps (), - (__mmask8) __U); +_mm256_maskz_sub_ps(__mmask8 __U, __m256 __A, __m256 __B) { + return (__m256)__builtin_ia32_selectps_256((__mmask8)__U, + (__v8sf)_mm256_sub_ps(__A, __B), + (__v8sf)_mm256_setzero_ps()); } static __inline__ __m128i __DEFAULT_FN_ATTRS diff --git a/test/CodeGen/avx512vl-builtins.c b/test/CodeGen/avx512vl-builtins.c index 801f3e78f7..16310ca4b8 100644 --- a/test/CodeGen/avx512vl-builtins.c +++ b/test/CodeGen/avx512vl-builtins.c @@ -1523,42 +1523,50 @@ __m256 test_mm256_mask3_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 _ __m128d test_mm_mask_add_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { // CHECK-LABEL: @test_mm_mask_add_pd - // CHECK: @llvm.x86.avx512.mask.add.pd.128 + // CHECK: fadd <2 x double> %{{.*}}, %{{.*}} + // CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}} return _mm_mask_add_pd(__W,__U,__A,__B); } __m128d test_mm_maskz_add_pd(__mmask8 __U, __m128d __A, __m128d __B) { // CHECK-LABEL: @test_mm_maskz_add_pd - // CHECK: @llvm.x86.avx512.mask.add.pd.128 + // CHECK: fadd <2 x double> %{{.*}}, %{{.*}} + // CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}} return _mm_maskz_add_pd(__U,__A,__B); } __m256d test_mm256_mask_add_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { // CHECK-LABEL: @test_mm256_mask_add_pd - // CHECK: @llvm.x86.avx512.mask.add.pd.256 + // CHECK: fadd <4 x double> %{{.*}}, %{{.*}} + // CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}} return _mm256_mask_add_pd(__W,__U,__A,__B); } __m256d test_mm256_maskz_add_pd(__mmask8 __U, __m256d __A, __m256d __B) { // CHECK-LABEL: @test_mm256_maskz_add_pd - // CHECK: @llvm.x86.avx512.mask.add.pd.256 + // CHECK: fadd <4 x double> %{{.*}}, %{{.*}} + // CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}} return _mm256_maskz_add_pd(__U,__A,__B); } -__m128 test_mm_mask_add_ps(__m128 __W, __mmask16 __U, __m128 __A, __m128 __B) { +__m128 test_mm_mask_add_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { // CHECK-LABEL: @test_mm_mask_add_ps - // CHECK: @llvm.x86.avx512.mask.add.ps.128 + // CHECK: fadd <4 x float> %{{.*}}, %{{.*}} + // CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}} return _mm_mask_add_ps(__W,__U,__A,__B); } __m128 test_mm_maskz_add_ps(__mmask16 __U, __m128 __A, __m128 __B) { // CHECK-LABEL: @test_mm_maskz_add_ps - // CHECK: @llvm.x86.avx512.mask.add.ps.128 + // CHECK: fadd <4 x float> %{{.*}}, %{{.*}} + // CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}} return _mm_maskz_add_ps(__U,__A,__B); } -__m256 test_mm256_mask_add_ps(__m256 __W, __mmask16 __U, __m256 __A, __m256 __B) { +__m256 test_mm256_mask_add_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { // CHECK-LABEL: @test_mm256_mask_add_ps - // CHECK: @llvm.x86.avx512.mask.add.ps.256 + // CHECK: fadd <8 x float> %{{.*}}, %{{.*}} + // CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}} return _mm256_mask_add_ps(__W,__U,__A,__B); } -__m256 test_mm256_maskz_add_ps(__mmask16 __U, __m256 __A, __m256 __B) { +__m256 test_mm256_maskz_add_ps(__mmask8 __U, __m256 __A, __m256 __B) { // CHECK-LABEL: @test_mm256_maskz_add_ps - // CHECK: @llvm.x86.avx512.mask.add.ps.256 + // CHECK: fadd <8 x float> %{{.*}}, %{{.*}} + // CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}} return _mm256_maskz_add_ps(__U,__A,__B); } __m128i test_mm_mask_blend_epi32(__mmask8 __U, __m128i __A, __m128i __W) { @@ -2063,42 +2071,50 @@ __m256 test_mm256_maskz_cvtepu32_ps(__mmask8 __U, __m256i __A) { } __m128d test_mm_mask_div_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { // CHECK-LABEL: @test_mm_mask_div_pd - // CHECK: @llvm.x86.avx512.mask.div.pd.128 + // CHECK: fdiv <2 x double> %{{.*}}, %{{.*}} + // CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}} return _mm_mask_div_pd(__W,__U,__A,__B); } __m128d test_mm_maskz_div_pd(__mmask8 __U, __m128d __A, __m128d __B) { // CHECK-LABEL: @test_mm_maskz_div_pd - // CHECK: @llvm.x86.avx512.mask.div.pd.128 + // CHECK: fdiv <2 x double> %{{.*}}, %{{.*}} + // CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}} return _mm_maskz_div_pd(__U,__A,__B); } __m256d test_mm256_mask_div_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { // CHECK-LABEL: @test_mm256_mask_div_pd - // CHECK: @llvm.x86.avx512.mask.div.pd.256 + // CHECK: fdiv <4 x double> %{{.*}}, %{{.*}} + // CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}} return _mm256_mask_div_pd(__W,__U,__A,__B); } __m256d test_mm256_maskz_div_pd(__mmask8 __U, __m256d __A, __m256d __B) { // CHECK-LABEL: @test_mm256_maskz_div_pd - // CHECK: @llvm.x86.avx512.mask.div.pd.256 + // CHECK: fdiv <4 x double> %{{.*}}, %{{.*}} + // CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}} return _mm256_maskz_div_pd(__U,__A,__B); } __m128 test_mm_mask_div_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { // CHECK-LABEL: @test_mm_mask_div_ps - // CHECK: @llvm.x86.avx512.mask.div.ps.128 + // CHECK: fdiv <4 x float> %{{.*}}, %{{.*}} + // CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}} return _mm_mask_div_ps(__W,__U,__A,__B); } -__m128 test_mm_maskz_div_ps(__mmask8 __U, __m128 __A, __m128 __B) { +__m128 test_mm_maskz_div_ps(__mmask16 __U, __m128 __A, __m128 __B) { // CHECK-LABEL: @test_mm_maskz_div_ps - // CHECK: @llvm.x86.avx512.mask.div.ps.128 + // CHECK: fdiv <4 x float> %{{.*}}, %{{.*}} + // CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}} return _mm_maskz_div_ps(__U,__A,__B); } __m256 test_mm256_mask_div_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { // CHECK-LABEL: @test_mm256_mask_div_ps - // CHECK: @llvm.x86.avx512.mask.div.ps.256 + // CHECK: fdiv <8 x float> %{{.*}}, %{{.*}} + // CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}} return _mm256_mask_div_ps(__W,__U,__A,__B); } __m256 test_mm256_maskz_div_ps(__mmask8 __U, __m256 __A, __m256 __B) { // CHECK-LABEL: @test_mm256_maskz_div_ps - // CHECK: @llvm.x86.avx512.mask.div.ps.256 + // CHECK: fdiv <8 x float> %{{.*}}, %{{.*}} + // CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}} return _mm256_maskz_div_ps(__U,__A,__B); } __m128d test_mm_mask_expand_pd(__m128d __W, __mmask8 __U, __m128d __A) { @@ -2403,42 +2419,50 @@ __m256 test_mm256_maskz_min_ps(__mmask8 __U, __m256 __A, __m256 __B) { } __m128d test_mm_mask_mul_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { // CHECK-LABEL: @test_mm_mask_mul_pd - // CHECK: @llvm.x86.avx512.mask.mul.pd + // CHECK: fmul <2 x double> %{{.*}}, %{{.*}} + // CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}} return _mm_mask_mul_pd(__W,__U,__A,__B); } __m128d test_mm_maskz_mul_pd(__mmask8 __U, __m128d __A, __m128d __B) { // CHECK-LABEL: @test_mm_maskz_mul_pd - // CHECK: @llvm.x86.avx512.mask.mul.pd + // CHECK: fmul <2 x double> %{{.*}}, %{{.*}} + // CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}} return _mm_maskz_mul_pd(__U,__A,__B); } __m256d test_mm256_mask_mul_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { // CHECK-LABEL: @test_mm256_mask_mul_pd - // CHECK: @llvm.x86.avx512.mask.mul.pd.256 + // CHECK: fmul <4 x double> %{{.*}}, %{{.*}} + // CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}} return _mm256_mask_mul_pd(__W,__U,__A,__B); } __m256d test_mm256_maskz_mul_pd(__mmask8 __U, __m256d __A, __m256d __B) { // CHECK-LABEL: @test_mm256_maskz_mul_pd - // CHECK: @llvm.x86.avx512.mask.mul.pd.256 + // CHECK: fmul <4 x double> %{{.*}}, %{{.*}} + // CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}} return _mm256_maskz_mul_pd(__U,__A,__B); } __m128 test_mm_mask_mul_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { // CHECK-LABEL: @test_mm_mask_mul_ps - // CHECK: @llvm.x86.avx512.mask.mul.ps + // CHECK: fmul <4 x float> %{{.*}}, %{{.*}} + // CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}} return _mm_mask_mul_ps(__W,__U,__A,__B); } __m128 test_mm_maskz_mul_ps(__mmask8 __U, __m128 __A, __m128 __B) { // CHECK-LABEL: @test_mm_maskz_mul_ps - // CHECK: @llvm.x86.avx512.mask.mul.ps + // CHECK: fmul <4 x float> %{{.*}}, %{{.*}} + // CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}} return _mm_maskz_mul_ps(__U,__A,__B); } __m256 test_mm256_mask_mul_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { // CHECK-LABEL: @test_mm256_mask_mul_ps - // CHECK: @llvm.x86.avx512.mask.mul.ps.256 + // CHECK: fmul <8 x float> %{{.*}}, %{{.*}} + // CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}} return _mm256_mask_mul_ps(__W,__U,__A,__B); } __m256 test_mm256_maskz_mul_ps(__mmask8 __U, __m256 __A, __m256 __B) { // CHECK-LABEL: @test_mm256_maskz_mul_ps - // CHECK: @llvm.x86.avx512.mask.mul.ps.256 + // CHECK: fmul <8 x float> %{{.*}}, %{{.*}} + // CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}} return _mm256_maskz_mul_ps(__U,__A,__B); } __m128i test_mm_mask_abs_epi32(__m128i __W, __mmask8 __U, __m128i __A) { @@ -3013,42 +3037,50 @@ __m256 test_mm256_maskz_sqrt_ps(__mmask8 __U, __m256 __A) { } __m128d test_mm_mask_sub_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { // CHECK-LABEL: @test_mm_mask_sub_pd - // CHECK: @llvm.x86.avx512.mask.sub.pd.128 + // CHECK: fsub <2 x double> %{{.*}}, %{{.*}} + // CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}} return _mm_mask_sub_pd(__W,__U,__A,__B); } __m128d test_mm_maskz_sub_pd(__mmask8 __U, __m128d __A, __m128d __B) { // CHECK-LABEL: @test_mm_maskz_sub_pd - // CHECK: @llvm.x86.avx512.mask.sub.pd.128 + // CHECK: fsub <2 x double> %{{.*}}, %{{.*}} + // CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}} return _mm_maskz_sub_pd(__U,__A,__B); } __m256d test_mm256_mask_sub_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) { // CHECK-LABEL: @test_mm256_mask_sub_pd - // CHECK: @llvm.x86.avx512.mask.sub.pd.256 + // CHECK: fsub <4 x double> %{{.*}}, %{{.*}} + // CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}} return _mm256_mask_sub_pd(__W,__U,__A,__B); } __m256d test_mm256_maskz_sub_pd(__mmask8 __U, __m256d __A, __m256d __B) { // CHECK-LABEL: @test_mm256_maskz_sub_pd - // CHECK: @llvm.x86.avx512.mask.sub.pd.256 + // CHECK: fsub <4 x double> %{{.*}}, %{{.*}} + // CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}} return _mm256_maskz_sub_pd(__U,__A,__B); } -__m128 test_mm_mask_sub_ps(__m128 __W, __mmask16 __U, __m128 __A, __m128 __B) { +__m128 test_mm_mask_sub_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { // CHECK-LABEL: @test_mm_mask_sub_ps - // CHECK: @llvm.x86.avx512.mask.sub.ps.128 + // CHECK: fsub <4 x float> %{{.*}}, %{{.*}} + // CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}} return _mm_mask_sub_ps(__W,__U,__A,__B); } -__m128 test_mm_maskz_sub_ps(__mmask16 __U, __m128 __A, __m128 __B) { +__m128 test_mm_maskz_sub_ps(__mmask8 __U, __m128 __A, __m128 __B) { // CHECK-LABEL: @test_mm_maskz_sub_ps - // CHECK: @llvm.x86.avx512.mask.sub.ps.128 + // CHECK: fsub <4 x float> %{{.*}}, %{{.*}} + // CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}} return _mm_maskz_sub_ps(__U,__A,__B); } -__m256 test_mm256_mask_sub_ps(__m256 __W, __mmask16 __U, __m256 __A, __m256 __B) { +__m256 test_mm256_mask_sub_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) { // CHECK-LABEL: @test_mm256_mask_sub_ps - // CHECK: @llvm.x86.avx512.mask.sub.ps.256 + // CHECK: fsub <8 x float> %{{.*}}, %{{.*}} + // CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}} return _mm256_mask_sub_ps(__W,__U,__A,__B); } -__m256 test_mm256_maskz_sub_ps(__mmask16 __U, __m256 __A, __m256 __B) { +__m256 test_mm256_maskz_sub_ps(__mmask8 __U, __m256 __A, __m256 __B) { // CHECK-LABEL: @test_mm256_maskz_sub_ps - // CHECK: @llvm.x86.avx512.mask.sub.ps.256 + // CHECK: fsub <8 x float> %{{.*}}, %{{.*}} + // CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}} return _mm256_maskz_sub_ps(__U,__A,__B); } __m128i test_mm_mask2_permutex2var_epi32(__m128i __A, __m128i __I, __mmask8 __U, __m128i __B) { -- 2.40.0