From: Sanjay Patel Date: Tue, 10 Mar 2015 15:19:26 +0000 (+0000) Subject: [X86, AVX] Replace vinsertf128 intrinsics with generic shuffles. X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=a283317accbab76cb811ffda01dcf0aecbc21e42;p=clang [X86, AVX] Replace vinsertf128 intrinsics with generic shuffles. We want to replace as much custom x86 shuffling via intrinsics as possible because pushing the code down the generic shuffle optimization path allows for better codegen and less complexity in LLVM. This is the sibling patch for the LLVM half of this change: http://reviews.llvm.org/D8086 Differential Revision: http://reviews.llvm.org/D8088 git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@231792 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/include/clang/Basic/BuiltinsX86.def b/include/clang/Basic/BuiltinsX86.def index acf2eb6acb..7cb50b9c03 100644 --- a/include/clang/Basic/BuiltinsX86.def +++ b/include/clang/Basic/BuiltinsX86.def @@ -450,9 +450,6 @@ BUILTIN(__builtin_ia32_cvttps2dq256, "V8iV8f", "") BUILTIN(__builtin_ia32_vperm2f128_pd256, "V4dV4dV4dIc", "") BUILTIN(__builtin_ia32_vperm2f128_ps256, "V8fV8fV8fIc", "") BUILTIN(__builtin_ia32_vperm2f128_si256, "V8iV8iV8iIc", "") -BUILTIN(__builtin_ia32_vinsertf128_pd256, "V4dV4dV2dIc", "") -BUILTIN(__builtin_ia32_vinsertf128_ps256, "V8fV8fV4fIc", "") -BUILTIN(__builtin_ia32_vinsertf128_si256, "V8iV8iV4iIc", "") BUILTIN(__builtin_ia32_sqrtpd256, "V4dV4d", "") BUILTIN(__builtin_ia32_sqrtps256, "V8fV8f", "") BUILTIN(__builtin_ia32_rsqrtps256, "V8fV8f", "") diff --git a/lib/Headers/avxintrin.h b/lib/Headers/avxintrin.h index d7c7f4645c..2d1735ec19 100644 --- a/lib/Headers/avxintrin.h +++ b/lib/Headers/avxintrin.h @@ -472,22 +472,6 @@ _mm256_extract_epi64(__m256i __a, const int __imm) } #endif -/* Vector insert */ -#define _mm256_insertf128_pd(V1, V2, O) __extension__ ({ \ - __m256d __V1 = (V1); \ - __m128d __V2 = (V2); \ - (__m256d)__builtin_ia32_vinsertf128_pd256((__v4df)__V1, (__v2df)__V2, (O)); }) - -#define _mm256_insertf128_ps(V1, V2, O) __extension__ ({ \ - __m256 __V1 = (V1); \ - __m128 __V2 = (V2); \ - (__m256)__builtin_ia32_vinsertf128_ps256((__v8sf)__V1, (__v4sf)__V2, (O)); }) - -#define _mm256_insertf128_si256(V1, V2, O) __extension__ ({ \ - __m256i __V1 = (V1); \ - __m128i __V2 = (V2); \ - (__m256i)__builtin_ia32_vinsertf128_si256((__v8si)__V1, (__v4si)__V2, (O)); }) - static __inline __m256i __attribute__((__always_inline__, __nodebug__)) _mm256_insert_epi32(__m256i __a, int __b, int const __imm) { @@ -1166,6 +1150,42 @@ _mm256_castsi128_si256(__m128i __a) return __builtin_shufflevector(__a, __a, 0, 1, -1, -1); } +/* + Vector insert. + We use macros rather than inlines because we only want to accept + invocations where the immediate M is a constant expression. +*/ +#define _mm256_insertf128_ps(V1, V2, M) __extension__ ({ \ + (__m256)__builtin_shufflevector( \ + (__v8sf)(V1), \ + (__v8sf)_mm256_castps128_ps256((__m128)(V2)), \ + (((M) & 1) ? 0 : 8), \ + (((M) & 1) ? 1 : 9), \ + (((M) & 1) ? 2 : 10), \ + (((M) & 1) ? 3 : 11), \ + (((M) & 1) ? 8 : 4), \ + (((M) & 1) ? 9 : 5), \ + (((M) & 1) ? 10 : 6), \ + (((M) & 1) ? 11 : 7) );}) + +#define _mm256_insertf128_pd(V1, V2, M) __extension__ ({ \ + (__m256d)__builtin_shufflevector( \ + (__v4df)(V1), \ + (__v4df)_mm256_castpd128_pd256((__m128d)(V2)), \ + (((M) & 1) ? 0 : 4), \ + (((M) & 1) ? 1 : 5), \ + (((M) & 1) ? 4 : 2), \ + (((M) & 1) ? 5 : 3) );}) + +#define _mm256_insertf128_si256(V1, V2, M) __extension__ ({ \ + (__m256i)__builtin_shufflevector( \ + (__v4di)(V1), \ + (__v4di)_mm256_castsi128_si256((__m128i)(V2)), \ + (((M) & 1) ? 0 : 4), \ + (((M) & 1) ? 1 : 5), \ + (((M) & 1) ? 4 : 2), \ + (((M) & 1) ? 5 : 3) );}) + /* SIMD load ops (unaligned) */ static __inline __m256 __attribute__((__always_inline__, __nodebug__)) _mm256_loadu2_m128(float const *__addr_hi, float const *__addr_lo) diff --git a/lib/Sema/SemaChecking.cpp b/lib/Sema/SemaChecking.cpp index a421ee6108..8ba9c685cd 100644 --- a/lib/Sema/SemaChecking.cpp +++ b/lib/Sema/SemaChecking.cpp @@ -882,9 +882,6 @@ bool Sema::CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { case X86::BI__builtin_ia32_vextractf128_ps256: case X86::BI__builtin_ia32_vextractf128_si256: case X86::BI__builtin_ia32_extract128i256: i = 1, l = 0, u = 1; break; - case X86::BI__builtin_ia32_vinsertf128_pd256: - case X86::BI__builtin_ia32_vinsertf128_ps256: - case X86::BI__builtin_ia32_vinsertf128_si256: case X86::BI__builtin_ia32_insert128i256: i = 2, l = 0; u = 1; break; case X86::BI__builtin_ia32_sha1rnds4: i = 2, l = 0; u = 3; break; case X86::BI__builtin_ia32_vpermil2pd: diff --git a/test/CodeGen/avx-shuffle-builtins.c b/test/CodeGen/avx-shuffle-builtins.c index 76e2395fe8..3273b1ea2f 100644 --- a/test/CodeGen/avx-shuffle-builtins.c +++ b/test/CodeGen/avx-shuffle-builtins.c @@ -97,3 +97,42 @@ test_mm256_broadcast_ss(float const *__a) { // CHECK: insertelement <8 x float> {{.*}}, i32 7 return _mm256_broadcast_ss(__a); } + +// Make sure we have the correct mask for each insertf128 case. + +__m256d test_mm256_insertf128_ps_0(__m256 a, __m128 b) { + // CHECK-LABEL: @test_mm256_insertf128_ps_0 + // CHECK: shufflevector{{.*}} + return _mm256_insertf128_ps(a, b, 0); +} + +__m256d test_mm256_insertf128_pd_0(__m256d a, __m128d b) { + // CHECK-LABEL: @test_mm256_insertf128_pd_0 + // CHECK: shufflevector{{.*}} + return _mm256_insertf128_pd(a, b, 0); +} + +__m256d test_mm256_insertf128_si256_0(__m256i a, __m128i b) { + // CHECK-LABEL: @test_mm256_insertf128_si256_0 + // CHECK: shufflevector{{.*}} + return _mm256_insertf128_si256(a, b, 0); +} + +__m256d test_mm256_insertf128_ps_1(__m256 a, __m128 b) { + // CHECK-LABEL: @test_mm256_insertf128_ps_1 + // CHECK: shufflevector{{.*}} + return _mm256_insertf128_ps(a, b, 1); +} + +__m256d test_mm256_insertf128_pd_1(__m256d a, __m128d b) { + // CHECK-LABEL: @test_mm256_insertf128_pd_1 + // CHECK: shufflevector{{.*}} + return _mm256_insertf128_pd(a, b, 1); +} + +__m256d test_mm256_insertf128_si256_1(__m256i a, __m128i b) { + // CHECK-LABEL: @test_mm256_insertf128_si256_1 + // CHECK: shufflevector{{.*}} + return _mm256_insertf128_si256(a, b, 1); +} + diff --git a/test/CodeGen/builtins-x86.c b/test/CodeGen/builtins-x86.c index c77be2cd7e..811bef2881 100644 --- a/test/CodeGen/builtins-x86.c +++ b/test/CodeGen/builtins-x86.c @@ -419,9 +419,6 @@ void f0() { tmp_V4d = __builtin_ia32_vperm2f128_pd256(tmp_V4d, tmp_V4d, 0x7); tmp_V8f = __builtin_ia32_vperm2f128_ps256(tmp_V8f, tmp_V8f, 0x7); tmp_V8i = __builtin_ia32_vperm2f128_si256(tmp_V8i, tmp_V8i, 0x7); - tmp_V4d = __builtin_ia32_vinsertf128_pd256(tmp_V4d, tmp_V2d, 0x1); - tmp_V8f = __builtin_ia32_vinsertf128_ps256(tmp_V8f, tmp_V4f, 0x1); - tmp_V8i = __builtin_ia32_vinsertf128_si256(tmp_V8i, tmp_V4i, 0x1); tmp_V4d = __builtin_ia32_sqrtpd256(tmp_V4d); tmp_V8f = __builtin_ia32_sqrtps256(tmp_V8f); tmp_V8f = __builtin_ia32_rsqrtps256(tmp_V8f);