From: Simon Pilgrim Date: Wed, 1 Jun 2016 21:46:51 +0000 (+0000) Subject: [X86][SSE] Replace (V)CVTTPS2DQ and VCVTTPD2DQ truncating (round to zero) f32/f64... X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=83f2064bfa57ba77297f618cbaa45e825dc5f119;p=clang [X86][SSE] Replace (V)CVTTPS2DQ and VCVTTPD2DQ truncating (round to zero) f32/f64 to i32 with generic IR (clang) The 'cvtt' truncation (round to zero) conversions can be safely represented as generic __builtin_convertvector (fptosi) calls instead of x86 intrinsics. We already do this (implicitly) for the scalar equivalents. Note: I looked at updating _mm_cvttpd_epi32 as well but this still requires a lot more backend work to correctly lower (both for debug and optimized builds). Differential Revision: http://reviews.llvm.org/D20859 git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@271436 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/include/clang/Basic/BuiltinsX86.def b/include/clang/Basic/BuiltinsX86.def index b4c44437d9..7f95c3f1f2 100644 --- a/include/clang/Basic/BuiltinsX86.def +++ b/include/clang/Basic/BuiltinsX86.def @@ -339,7 +339,6 @@ TARGET_BUILTIN(__builtin_ia32_cvttpd2dq, "V4iV2d", "", "sse2") TARGET_BUILTIN(__builtin_ia32_cvtsd2si, "iV2d", "", "sse2") TARGET_BUILTIN(__builtin_ia32_cvtsd2si64, "LLiV2d", "", "sse2") TARGET_BUILTIN(__builtin_ia32_cvtps2dq, "V4iV4f", "", "sse2") -TARGET_BUILTIN(__builtin_ia32_cvttps2dq, "V4iV4f", "", "sse2") TARGET_BUILTIN(__builtin_ia32_clflush, "vvC*", "", "sse2") TARGET_BUILTIN(__builtin_ia32_lfence, "v", "", "sse2") TARGET_BUILTIN(__builtin_ia32_mfence, "v", "", "sse2") @@ -462,9 +461,7 @@ TARGET_BUILTIN(__builtin_ia32_cmpps256, "V8fV8fV8fIc", "", "avx") TARGET_BUILTIN(__builtin_ia32_cvtdq2ps256, "V8fV8i", "", "avx") TARGET_BUILTIN(__builtin_ia32_cvtpd2ps256, "V4fV4d", "", "avx") TARGET_BUILTIN(__builtin_ia32_cvtps2dq256, "V8iV8f", "", "avx") -TARGET_BUILTIN(__builtin_ia32_cvttpd2dq256, "V4iV4d", "", "avx") TARGET_BUILTIN(__builtin_ia32_cvtpd2dq256, "V4iV4d", "", "avx") -TARGET_BUILTIN(__builtin_ia32_cvttps2dq256, "V8iV8f", "", "avx") TARGET_BUILTIN(__builtin_ia32_vperm2f128_pd256, "V4dV4dV4dIc", "", "avx") TARGET_BUILTIN(__builtin_ia32_vperm2f128_ps256, "V8fV8fV8fIc", "", "avx") TARGET_BUILTIN(__builtin_ia32_vperm2f128_si256, "V8iV8iV8iIc", "", "avx") diff --git a/lib/Headers/avxintrin.h b/lib/Headers/avxintrin.h index 6a7a4b903a..e46f8f4ac4 100644 --- a/lib/Headers/avxintrin.h +++ b/lib/Headers/avxintrin.h @@ -2108,7 +2108,7 @@ _mm256_cvtps_pd(__m128 __a) static __inline __m128i __DEFAULT_FN_ATTRS _mm256_cvttpd_epi32(__m256d __a) { - return (__m128i)__builtin_ia32_cvttpd2dq256((__v4df) __a); + return (__m128i)__builtin_convertvector((__v4df) __a, __v4si); } static __inline __m128i __DEFAULT_FN_ATTRS @@ -2120,7 +2120,7 @@ _mm256_cvtpd_epi32(__m256d __a) static __inline __m256i __DEFAULT_FN_ATTRS _mm256_cvttps_epi32(__m256 __a) { - return (__m256i)__builtin_ia32_cvttps2dq256((__v8sf) __a); + return (__m256i)__builtin_convertvector((__v8sf) __a, __v8si); } static __inline double __DEFAULT_FN_ATTRS diff --git a/lib/Headers/emmintrin.h b/lib/Headers/emmintrin.h index 08ee06b1c6..04cca74a42 100644 --- a/lib/Headers/emmintrin.h +++ b/lib/Headers/emmintrin.h @@ -1744,7 +1744,7 @@ _mm_cvtps_epi32(__m128 __a) static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvttps_epi32(__m128 __a) { - return (__m128i)__builtin_ia32_cvttps2dq((__v4sf)__a); + return (__m128i)__builtin_convertvector((__v4sf)__a, __v4si); } /// \brief Returns a vector of [4 x i32] where the lowest element is the input diff --git a/test/CodeGen/avx-builtins.c b/test/CodeGen/avx-builtins.c index c1b8cf2c91..f1f211e54d 100644 --- a/test/CodeGen/avx-builtins.c +++ b/test/CodeGen/avx-builtins.c @@ -286,13 +286,13 @@ __m256d test_mm256_cvtps_pd(__m128 A) { __m128i test_mm256_cvttpd_epi32(__m256d A) { // CHECK-LABEL: test_mm256_cvttpd_epi32 - // CHECK: call <4 x i32> @llvm.x86.avx.cvtt.pd2dq.256(<4 x double> %{{.*}}) + // CHECK: fptosi <4 x double> %{{.*}} to <4 x i32> return _mm256_cvttpd_epi32(A); } __m256i test_mm256_cvttps_epi32(__m256 A) { // CHECK-LABEL: test_mm256_cvttps_epi32 - // CHECK: call <8 x i32> @llvm.x86.avx.cvtt.ps2dq.256(<8 x float> %{{.*}}) + // CHECK: fptosi <8 x float> %{{.*}} to <8 x i32> return _mm256_cvttps_epi32(A); } diff --git a/test/CodeGen/builtins-x86.c b/test/CodeGen/builtins-x86.c index 6bfff11b78..24e491c026 100644 --- a/test/CodeGen/builtins-x86.c +++ b/test/CodeGen/builtins-x86.c @@ -335,7 +335,6 @@ void f0() { tmp_LLi = __builtin_ia32_cvtsd2si64(tmp_V2d); #endif tmp_V4i = __builtin_ia32_cvtps2dq(tmp_V4f); - tmp_V4i = __builtin_ia32_cvttps2dq(tmp_V4f); (void) __builtin_ia32_clflush(tmp_vCp); (void) __builtin_ia32_lfence(); (void) __builtin_ia32_mfence(); @@ -415,9 +414,7 @@ void f0() { tmp_V8f = __builtin_ia32_cvtdq2ps256(tmp_V8i); tmp_V4f = __builtin_ia32_cvtpd2ps256(tmp_V4d); tmp_V8i = __builtin_ia32_cvtps2dq256(tmp_V8f); - tmp_V4i = __builtin_ia32_cvttpd2dq256(tmp_V4d); tmp_V4i = __builtin_ia32_cvtpd2dq256(tmp_V4d); - tmp_V8i = __builtin_ia32_cvttps2dq256(tmp_V8f); tmp_V4d = __builtin_ia32_vperm2f128_pd256(tmp_V4d, tmp_V4d, 0x7); tmp_V8f = __builtin_ia32_vperm2f128_ps256(tmp_V8f, tmp_V8f, 0x7); tmp_V8i = __builtin_ia32_vperm2f128_si256(tmp_V8i, tmp_V8i, 0x7); diff --git a/test/CodeGen/sse2-builtins.c b/test/CodeGen/sse2-builtins.c index 7d2cbc20e3..f08feb0e3a 100644 --- a/test/CodeGen/sse2-builtins.c +++ b/test/CodeGen/sse2-builtins.c @@ -533,7 +533,7 @@ __m128i test_mm_cvttpd_epi32(__m128d A) { __m128i test_mm_cvttps_epi32(__m128 A) { // CHECK-LABEL: test_mm_cvttps_epi32 - // CHECK: call <4 x i32> @llvm.x86.sse2.cvttps2dq(<4 x float> %{{.*}}) + // CHECK: fptosi <4 x float> %{{.*}} to <4 x i32> return _mm_cvttps_epi32(A); }