From: Craig Topper Date: Tue, 17 May 2016 04:41:38 +0000 (+0000) Subject: [AVX512] Add parentheses around macro arguments in AVX512ER intrinsics. Remove leadin... X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=79f24a4aa3f7a85d673e674f75caf696e130df13;p=clang [AVX512] Add parentheses around macro arguments in AVX512ER intrinsics. Remove leading underscores from macro argument names. Add explicit typecasts to all macro arguments and return values. And finally reformat after all the adjustments. This is a mostly mechanical change accomplished with a script. I tried to split out any changes to the typecasts that already existed into separate commits. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@269741 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Headers/avx512erintrin.h b/lib/Headers/avx512erintrin.h index 8c57c727af..b24ba7b182 100644 --- a/lib/Headers/avx512erintrin.h +++ b/lib/Headers/avx512erintrin.h @@ -31,66 +31,66 @@ #define _mm512_exp2a23_round_pd(A, R) __extension__ ({ \ (__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \ (__v8df)_mm512_setzero_pd(), \ - (__mmask8)-1, (R)); }) + (__mmask8)-1, (int)(R)); }) #define _mm512_mask_exp2a23_round_pd(S, M, A, R) __extension__ ({ \ (__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \ - (__v8df)(__m512d)(S), \ - (__mmask8)(M), (R)); }) + (__v8df)(__m512d)(S), (__mmask8)(M), \ + (int)(R)); }) #define _mm512_maskz_exp2a23_round_pd(M, A, R) __extension__ ({ \ (__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \ (__v8df)_mm512_setzero_pd(), \ - (__mmask8)(M), (R)); }) + (__mmask8)(M), (int)(R)); }) #define _mm512_exp2a23_pd(A) \ - _mm512_exp2a23_round_pd((A), _MM_FROUND_CUR_DIRECTION) + _mm512_exp2a23_round_pd((A), _MM_FROUND_CUR_DIRECTION) #define _mm512_mask_exp2a23_pd(S, M, A) \ - _mm512_mask_exp2a23_round_pd((S), (M), (A), _MM_FROUND_CUR_DIRECTION) + _mm512_mask_exp2a23_round_pd((S), (M), (A), _MM_FROUND_CUR_DIRECTION) #define _mm512_maskz_exp2a23_pd(M, A) \ - _mm512_maskz_exp2a23_round_pd((M), (A), _MM_FROUND_CUR_DIRECTION) + _mm512_maskz_exp2a23_round_pd((M), (A), _MM_FROUND_CUR_DIRECTION) #define _mm512_exp2a23_round_ps(A, R) __extension__ ({ \ (__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \ (__v16sf)_mm512_setzero_ps(), \ - (__mmask16)-1, (R)); }) + (__mmask16)-1, (int)(R)); }) #define _mm512_mask_exp2a23_round_ps(S, M, A, R) __extension__ ({ \ (__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \ - (__v16sf)(__m512)(S), \ - (__mmask16)(M), (R)); }) + (__v16sf)(__m512)(S), (__mmask16)(M), \ + (int)(R)); }) #define _mm512_maskz_exp2a23_round_ps(M, A, R) __extension__ ({ \ (__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \ (__v16sf)_mm512_setzero_ps(), \ - (__mmask16)(M), (R)); }) + (__mmask16)(M), (int)(R)); }) #define _mm512_exp2a23_ps(A) \ - _mm512_exp2a23_round_ps((A), _MM_FROUND_CUR_DIRECTION) + _mm512_exp2a23_round_ps((A), _MM_FROUND_CUR_DIRECTION) #define _mm512_mask_exp2a23_ps(S, M, A) \ - _mm512_mask_exp2a23_round_ps((S), (M), (A), _MM_FROUND_CUR_DIRECTION) + _mm512_mask_exp2a23_round_ps((S), (M), (A), _MM_FROUND_CUR_DIRECTION) #define _mm512_maskz_exp2a23_ps(M, A) \ - _mm512_maskz_exp2a23_round_ps((M), (A), _MM_FROUND_CUR_DIRECTION) + _mm512_maskz_exp2a23_round_ps((M), (A), _MM_FROUND_CUR_DIRECTION) // rsqrt28 #define _mm512_rsqrt28_round_pd(A, R) __extension__ ({ \ (__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \ (__v8df)_mm512_setzero_pd(), \ - (__mmask8)-1, (R)); }) + (__mmask8)-1, (int)(R)); }) #define _mm512_mask_rsqrt28_round_pd(S, M, A, R) __extension__ ({ \ (__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \ - (__v8df)(__m512d)(S), \ - (__mmask8)(M), (R)); }) + (__v8df)(__m512d)(S), (__mmask8)(M), \ + (int)(R)); }) #define _mm512_maskz_rsqrt28_round_pd(M, A, R) __extension__ ({ \ (__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \ (__v8df)_mm512_setzero_pd(), \ - (__mmask8)(M), (R)); }) + (__mmask8)(M), (int)(R)); }) #define _mm512_rsqrt28_pd(A) \ _mm512_rsqrt28_round_pd((A), _MM_FROUND_CUR_DIRECTION) @@ -104,17 +104,17 @@ #define _mm512_rsqrt28_round_ps(A, R) __extension__ ({ \ (__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \ (__v16sf)_mm512_setzero_ps(), \ - (__mmask16)-1, (R)); }) + (__mmask16)-1, (int)(R)); }) #define _mm512_mask_rsqrt28_round_ps(S, M, A, R) __extension__ ({ \ (__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \ - (__v16sf)(__m512)(S), \ - (__mmask16)(M), (R)); }) + (__v16sf)(__m512)(S), (__mmask16)(M), \ + (int)(R)); }) #define _mm512_maskz_rsqrt28_round_ps(M, A, R) __extension__ ({ \ (__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \ (__v16sf)_mm512_setzero_ps(), \ - (__mmask16)(M), (R)); }) + (__mmask16)(M), (int)(R)); }) #define _mm512_rsqrt28_ps(A) \ _mm512_rsqrt28_round_ps((A), _MM_FROUND_CUR_DIRECTION) @@ -127,21 +127,21 @@ #define _mm_rsqrt28_round_ss(A, B, R) __extension__ ({ \ (__m128)__builtin_ia32_rsqrt28ss_round_mask((__v4sf)(__m128)(A), \ - (__v4sf)(__m128)(B), \ - (__v4sf)_mm_setzero_ps(), \ - (__mmask8)-1, (R)); }) + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1, (int)(R)); }) #define _mm_mask_rsqrt28_round_ss(S, M, A, B, R) __extension__ ({ \ (__m128)__builtin_ia32_rsqrt28ss_round_mask((__v4sf)(__m128)(A), \ - (__v4sf)(__m128)(B), \ - (__v4sf)(__m128)(S), \ - (__mmask8)(M), (R)); }) + (__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(S), \ + (__mmask8)(M), (int)(R)); }) #define _mm_maskz_rsqrt28_round_ss(M, A, B, R) __extension__ ({ \ (__m128)__builtin_ia32_rsqrt28ss_round_mask((__v4sf)(__m128)(A), \ - (__v4sf)(__m128)(B), \ - (__v4sf)_mm_setzero_ps(), \ - (__mmask8)(M), (R)); }) + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(M), (int)(R)); }) #define _mm_rsqrt28_ss(A, B) \ _mm_rsqrt28_round_ss((A), (B), _MM_FROUND_CUR_DIRECTION) @@ -154,21 +154,21 @@ #define _mm_rsqrt28_round_sd(A, B, R) __extension__ ({ \ (__m128d)__builtin_ia32_rsqrt28sd_round_mask((__v2df)(__m128d)(A), \ - (__v2df)(__m128d)(B), \ - (__v2df)_mm_setzero_pd(), \ - (__mmask8)-1, (R)); }) + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1, (int)(R)); }) #define _mm_mask_rsqrt28_round_sd(S, M, A, B, R) __extension__ ({ \ (__m128d)__builtin_ia32_rsqrt28sd_round_mask((__v2df)(__m128d)(A), \ - (__v2df)(__m128d)(B), \ - (__v2df)(__m128d)(S), \ - (__mmask8)(M), (R)); }) + (__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(S), \ + (__mmask8)(M), (int)(R)); }) #define _mm_maskz_rsqrt28_round_sd(M, A, B, R) __extension__ ({ \ (__m128d)__builtin_ia32_rsqrt28sd_round_mask((__v2df)(__m128d)(A), \ - (__v2df)(__m128d)(B), \ - (__v2df)_mm_setzero_pd(), \ - (__mmask8)(M), (R)); }) + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(M), (int)(R)); }) #define _mm_rsqrt28_sd(A, B) \ _mm_rsqrt28_round_sd((A), (B), _MM_FROUND_CUR_DIRECTION) @@ -183,17 +183,17 @@ #define _mm512_rcp28_round_pd(A, R) __extension__ ({ \ (__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \ (__v8df)_mm512_setzero_pd(), \ - (__mmask8)-1, (R)); }) + (__mmask8)-1, (int)(R)); }) #define _mm512_mask_rcp28_round_pd(S, M, A, R) __extension__ ({ \ (__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \ - (__v8df)(__m512d)(S), \ - (__mmask8)(M), (R)); }) + (__v8df)(__m512d)(S), (__mmask8)(M), \ + (int)(R)); }) #define _mm512_maskz_rcp28_round_pd(M, A, R) __extension__ ({ \ (__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \ (__v8df)_mm512_setzero_pd(), \ - (__mmask8)(M), (R)); }) + (__mmask8)(M), (int)(R)); }) #define _mm512_rcp28_pd(A) \ _mm512_rcp28_round_pd((A), _MM_FROUND_CUR_DIRECTION) @@ -207,17 +207,17 @@ #define _mm512_rcp28_round_ps(A, R) __extension__ ({ \ (__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \ (__v16sf)_mm512_setzero_ps(), \ - (__mmask16)-1, (R)); }) + (__mmask16)-1, (int)(R)); }) #define _mm512_mask_rcp28_round_ps(S, M, A, R) __extension__ ({ \ (__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \ - (__v16sf)(__m512)(S), \ - (__mmask16)(M), (R)); }) + (__v16sf)(__m512)(S), (__mmask16)(M), \ + (int)(R)); }) #define _mm512_maskz_rcp28_round_ps(M, A, R) __extension__ ({ \ (__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \ (__v16sf)_mm512_setzero_ps(), \ - (__mmask16)(M), (R)); }) + (__mmask16)(M), (int)(R)); }) #define _mm512_rcp28_ps(A) \ _mm512_rcp28_round_ps((A), _MM_FROUND_CUR_DIRECTION) @@ -230,21 +230,21 @@ #define _mm_rcp28_round_ss(A, B, R) __extension__ ({ \ (__m128)__builtin_ia32_rcp28ss_round_mask((__v4sf)(__m128)(A), \ - (__v4sf)(__m128)(B), \ - (__v4sf)_mm_setzero_ps(), \ - (__mmask8)-1, (R)); }) + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1, (int)(R)); }) #define _mm_mask_rcp28_round_ss(S, M, A, B, R) __extension__ ({ \ (__m128)__builtin_ia32_rcp28ss_round_mask((__v4sf)(__m128)(A), \ - (__v4sf)(__m128)(B), \ - (__v4sf)(__m128)(S), \ - (__mmask8)(M), (R)); }) + (__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(S), \ + (__mmask8)(M), (int)(R)); }) #define _mm_maskz_rcp28_round_ss(M, A, B, R) __extension__ ({ \ (__m128)__builtin_ia32_rcp28ss_round_mask((__v4sf)(__m128)(A), \ - (__v4sf)(__m128)(B), \ - (__v4sf)_mm_setzero_ps(), \ - (__mmask8)(M), (R)); }) + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(M), (int)(R)); }) #define _mm_rcp28_ss(A, B) \ _mm_rcp28_round_ss((A), (B), _MM_FROUND_CUR_DIRECTION) @@ -257,21 +257,21 @@ #define _mm_rcp28_round_sd(A, B, R) __extension__ ({ \ (__m128d)__builtin_ia32_rcp28sd_round_mask((__v2df)(__m128d)(A), \ - (__v2df)(__m128d)(B), \ - (__v2df)_mm_setzero_pd(), \ - (__mmask8)-1, (R)); }) + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1, (int)(R)); }) #define _mm_mask_rcp28_round_sd(S, M, A, B, R) __extension__ ({ \ (__m128d)__builtin_ia32_rcp28sd_round_mask((__v2df)(__m128d)(A), \ - (__v2df)(__m128d)(B), \ - (__v2df)(__m128d)(S), \ - (__mmask8)(M), (R)); }) + (__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(S), \ + (__mmask8)(M), (int)(R)); }) #define _mm_maskz_rcp28_round_sd(M, A, B, R) __extension__ ({ \ (__m128d)__builtin_ia32_rcp28sd_round_mask((__v2df)(__m128d)(A), \ - (__v2df)(__m128d)(B), \ - (__v2df)_mm_setzero_pd(), \ - (__mmask8)(M), (R)); }) + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(M), (int)(R)); }) #define _mm_rcp28_sd(A, B) \ _mm_rcp28_round_sd((A), (B), _MM_FROUND_CUR_DIRECTION)