]> granicus.if.org Git - clang/commitdiff
[X86][AVX512] Converted the VSHUFPD intrinsics to generic IR
authorSimon Pilgrim <llvm-dev@redking.me.uk>
Mon, 4 Jul 2016 21:30:47 +0000 (21:30 +0000)
committerSimon Pilgrim <llvm-dev@redking.me.uk>
Mon, 4 Jul 2016 21:30:47 +0000 (21:30 +0000)
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@274523 91177308-0d34-0410-b5e6-96231b3b80d8

lib/Headers/avx512fintrin.h
lib/Headers/avx512vlintrin.h
test/CodeGen/avx512f-builtins.c
test/CodeGen/avx512vl-builtins.c

index c0d44984fd74cd11246db29381831ea288539007..4bd5a8d7dbb5a0c2d7398f9f1a6ec8d842ff12b2 100644 (file)
@@ -5950,6 +5950,7 @@ _mm512_kmov (__mmask16 __A)
 
 #define _mm_cvt_roundsd_si64(A, R) __extension__ ({ \
   (long long)__builtin_ia32_vcvtsd2si64((__v2df)(__m128d)(A), (int)(R)); })
+
 static __inline__ __m512i __DEFAULT_FN_ATTRS
 _mm512_mask2_permutex2var_epi32 (__m512i __A, __m512i __I,
          __mmask16 __U, __m512i __B)
@@ -7166,23 +7167,27 @@ _mm_maskz_scalef_ss (__mmask8 __U, __m128 __A, __m128 __B)
                                           (__v8di)_mm512_setzero_si512(), \
                                           (__mmask8)(U)); })
 
-#define _mm512_shuffle_pd(M, V, imm) __extension__ ({ \
-  (__m512d)__builtin_ia32_shufpd512_mask((__v8df)(__m512d)(M), \
-                                         (__v8df)(__m512d)(V), (int)(imm), \
-                                         (__v8df)_mm512_undefined_pd(), \
-                                         (__mmask8)-1); })
-
-#define _mm512_mask_shuffle_pd(W, U, M, V, imm) __extension__ ({ \
-  (__m512d)__builtin_ia32_shufpd512_mask((__v8df)(__m512d)(M), \
-                                         (__v8df)(__m512d)(V), (int)(imm), \
-                                         (__v8df)(__m512d)(W), \
-                                         (__mmask8)(U)); })
+#define _mm512_shuffle_pd(A, B, M) __extension__ ({ \
+  (__m512d)__builtin_shufflevector((__v8df)(__m512d)(A), \
+                                   (__v8df)(__m512d)(B), \
+                                   (((M) & 0x01) >> 0) +  0, \
+                                   (((M) & 0x02) >> 1) +  8, \
+                                   (((M) & 0x04) >> 2) +  2, \
+                                   (((M) & 0x08) >> 3) + 10, \
+                                   (((M) & 0x10) >> 4) +  4, \
+                                   (((M) & 0x20) >> 5) + 12, \
+                                   (((M) & 0x40) >> 6) +  6, \
+                                   (((M) & 0x80) >> 7) + 14); })
+
+#define _mm512_mask_shuffle_pd(W, U, A, B, M) __extension__ ({ \
+  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+                                       (__v8df)_mm512_shuffle_pd((A), (B), (M)), \
+                                       (__v8df)(__m512d)(W)); })
 
-#define _mm512_maskz_shuffle_pd(U, M, V, imm) __extension__ ({ \
-  (__m512d)__builtin_ia32_shufpd512_mask((__v8df)(__m512d)(M), \
-                                         (__v8df)(__m512d)(V), (int)(imm), \
-                                         (__v8df)_mm512_setzero_pd(), \
-                                         (__mmask8)(U)); })
+#define _mm512_maskz_shuffle_pd(U, A, B, M) __extension__ ({ \
+  (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+                                       (__v8df)_mm512_shuffle_pd((A), (B), (M)), \
+                                       (__v8df)_mm512_setzero_pd()); })
 
 #define _mm512_shuffle_ps(M, V, imm) __extension__ ({ \
   (__m512)__builtin_ia32_shufps512_mask((__v16sf)(__m512)(M), \
index b5b371823a36c4a224925ea9523d827a14de9bd2..2e6f9fc83ba4f6fe321370a1a1817c5274a8e554 100644 (file)
@@ -7374,51 +7374,45 @@ _mm256_maskz_sra_epi64 (__mmask8 __U, __m256i __A, __m128i __B)
                                               (__v4di)_mm256_setzero_si256(), \
                                               (__mmask8)(U)); })
 
-#define _mm_mask_shuffle_pd(W, U, A, B, imm) __extension__ ({ \
-  (__m128d)__builtin_ia32_shufpd128_mask((__v2df)(__m128d)(A), \
-                                         (__v2df)(__m128d)(B), (int)(imm), \
-                                         (__v2df)(__m128d)(W), \
-                                         (__mmask8)(U)); })
+#define _mm_mask_shuffle_pd(W, U, A, B, M) __extension__ ({ \
+  (__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \
+                                       (__v2df)_mm_shuffle_pd((A), (B), (M)), \
+                                       (__v2df)(__m128d)(W)); })
 
-#define _mm_maskz_shuffle_pd(U, A, B, imm) __extension__ ({ \
-  (__m128d)__builtin_ia32_shufpd128_mask((__v2df)(__m128d)(A), \
-                                         (__v2df)(__m128d)(B), (int)(imm), \
-                                         (__v2df)_mm_setzero_pd(), \
-                                         (__mmask8)(U)); })
+#define _mm_maskz_shuffle_pd(U, A, B, M) __extension__ ({ \
+  (__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \
+                                       (__v2df)_mm_shuffle_pd((A), (B), (M)), \
+                                       (__v2df)_mm_setzero_pd()); })
 
-#define _mm256_mask_shuffle_pd(W, U, A, B, imm) __extension__ ({ \
-  (__m256d)__builtin_ia32_shufpd256_mask((__v4df)(__m256d)(A), \
-                                         (__v4df)(__m256d)(B), (int)(imm), \
-                                         (__v4df)(__m256d)(W), \
-                                         (__mmask8)(U)); })
+#define _mm256_mask_shuffle_pd(W, U, A, B, M) __extension__ ({ \
+  (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
+                                       (__v4df)_mm256_shuffle_pd((A), (B), (M)), \
+                                       (__v4df)(__m256d)(W)); })
 
-#define _mm256_maskz_shuffle_pd(U, A, B, imm) __extension__ ({ \
-  (__m256d)__builtin_ia32_shufpd256_mask((__v4df)(__m256d)(A), \
-                                         (__v4df)(__m256d)(B), (int)(imm), \
-                                         (__v4df)_mm256_setzero_pd(), \
-                                         (__mmask8)(U)); })
+#define _mm256_maskz_shuffle_pd(U, A, B, M) __extension__ ({ \
+  (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
+                                       (__v4df)_mm256_shuffle_pd((A), (B), (M)), \
+                                       (__v4df)_mm256_setzero_pd()); })
 
-#define _mm_mask_shuffle_ps(W, U, A, B, imm) __extension__ ({ \
-  (__m128)__builtin_ia32_shufps128_mask((__v4sf)(__m128)(A), \
-                                        (__v4sf)(__m128)(B), (int)(imm), \
-                                        (__v4sf)(__m128)(W), (__mmask8)(U)); })
+#define _mm_mask_shuffle_ps(W, U, A, B, M) __extension__ ({ \
+  (__m128)__builtin_ia32_selectps_128((__mmask8)(U), \
+                                      (__v4sf)_mm_shuffle_ps((A), (B), (M)), \
+                                      (__v4sf)(__m128)(W)); })
 
-#define _mm_maskz_shuffle_ps(U, A, B, imm) __extension__ ({ \
-  (__m128)__builtin_ia32_shufps128_mask((__v4sf)(__m128)(A), \
-                                        (__v4sf)(__m128)(B), (int)(imm), \
-                                        (__v4sf)_mm_setzero_ps(), \
-                                        (__mmask8)(U)); })
+#define _mm_maskz_shuffle_ps(U, A, B, M) __extension__ ({ \
+  (__m128)__builtin_ia32_selectps_128((__mmask8)(U), \
+                                      (__v4sf)_mm_shuffle_ps((A), (B), (M)), \
+                                      (__v4sf)_mm_setzero_ps()); })
 
-#define _mm256_mask_shuffle_ps(W, U, A, B, imm) __extension__ ({ \
-  (__m256)__builtin_ia32_shufps256_mask((__v8sf)(__m256)(A), \
-                                        (__v8sf)(__m256)(B), (int)(imm), \
-                                        (__v8sf)(__m256)(W), (__mmask8)(U)); })
+#define _mm256_mask_shuffle_ps(W, U, A, B, M) __extension__ ({ \
+  (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
+                                      (__v8sf)_mm256_shuffle_ps((A), (B), (M)), \
+                                      (__v8sf)(__m256)(W)); })
 
-#define _mm256_maskz_shuffle_ps(U, A, B, imm) __extension__ ({ \
-  (__m256)__builtin_ia32_shufps256_mask((__v8sf)(__m256)(A), \
-                                        (__v8sf)(__m256)(B), (int)(imm), \
-                                        (__v8sf)_mm256_setzero_ps(), \
-                                        (__mmask8)(U)); })
+#define _mm256_maskz_shuffle_ps(U, A, B, M) __extension__ ({ \
+  (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
+                                      (__v8sf)_mm256_shuffle_ps((A), (B), (M)), \
+                                      (__v8sf)_mm256_setzero_ps()); })
 
 static __inline__ __m128d __DEFAULT_FN_ATTRS
 _mm_rsqrt14_pd (__m128d __A)
index 1a24cafa5739aa72d09cbd82b6f3c154ed360773..a475e0eaaad89dfde4ba8cb0bcf9457d493257f9 100644 (file)
@@ -4220,19 +4220,21 @@ __m512i test_mm512_maskz_shuffle_i64x2(__mmask8 __U, __m512i __A, __m512i __B) {
 
 __m512d test_mm512_shuffle_pd(__m512d __M, __m512d __V) {
   // CHECK-LABEL: @test_mm512_shuffle_pd
-  // CHECK: @llvm.x86.avx512.mask.shuf.pd.512
+  // CHECK: shufflevector <8 x double> %{{.*}}, <8 x double> %{{.*}}, <8 x i32> <i32 0, i32 8, i32 3, i32 10, i32 4, i32 12, i32 6, i32 14>
   return _mm512_shuffle_pd(__M, __V, 4); 
 }
 
 __m512d test_mm512_mask_shuffle_pd(__m512d __W, __mmask8 __U, __m512d __M, __m512d __V) {
   // CHECK-LABEL: @test_mm512_mask_shuffle_pd
-  // CHECK: @llvm.x86.avx512.mask.shuf.pd.512
+  // CHECK: shufflevector <8 x double> %{{.*}}, <8 x double> %{{.*}}, <8 x i32> <i32 0, i32 8, i32 3, i32 10, i32 4, i32 12, i32 6, i32 14>
+  // CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
   return _mm512_mask_shuffle_pd(__W, __U, __M, __V, 4); 
 }
 
 __m512d test_mm512_maskz_shuffle_pd(__mmask8 __U, __m512d __M, __m512d __V) {
   // CHECK-LABEL: @test_mm512_maskz_shuffle_pd
-  // CHECK: @llvm.x86.avx512.mask.shuf.pd.512
+  // CHECK: shufflevector <8 x double> %{{.*}}, <8 x double> %{{.*}}, <8 x i32> <i32 0, i32 8, i32 3, i32 10, i32 4, i32 12, i32 6, i32 14>
+  // CHECK: select <8 x i1> %{{.*}}, <8 x double> %{{.*}}, <8 x double> %{{.*}}
   return _mm512_maskz_shuffle_pd(__U, __M, __V, 4); 
 }
 
index 7e1c99087431757ce4d2fcc2f1224c1f4b170af0..b4024ba8c64f22a3517b0391f1268203580812d5 100644 (file)
@@ -5189,49 +5189,57 @@ __m256i test_mm256_maskz_shuffle_i64x2(__mmask8 __U, __m256i __A, __m256i __B) {
 
 __m128d test_mm_mask_shuffle_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
   // CHECK-LABEL: @test_mm_mask_shuffle_pd
-  // CHECK: @llvm.x86.avx512.mask.shuf.pd.128
+  // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x i32> <i32 1, i32 3>
+  // CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
   return _mm_mask_shuffle_pd(__W, __U, __A, __B, 3); 
 }
 
 __m128d test_mm_maskz_shuffle_pd(__mmask8 __U, __m128d __A, __m128d __B) {
   // CHECK-LABEL: @test_mm_maskz_shuffle_pd
-  // CHECK: @llvm.x86.avx512.mask.shuf.pd.128
+  // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x i32> <i32 1, i32 3>
+  // CHECK: select <2 x i1> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}
   return _mm_maskz_shuffle_pd(__U, __A, __B, 3); 
 }
 
 __m256d test_mm256_mask_shuffle_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
   // CHECK-LABEL: @test_mm256_mask_shuffle_pd
-  // CHECK: @llvm.x86.avx512.mask.shuf.pd.256
+  // CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x i32> <i32 1, i32 5, i32 2, i32 6>
+  // CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
   return _mm256_mask_shuffle_pd(__W, __U, __A, __B, 3); 
 }
 
 __m256d test_mm256_maskz_shuffle_pd(__mmask8 __U, __m256d __A, __m256d __B) {
   // CHECK-LABEL: @test_mm256_maskz_shuffle_pd
-  // CHECK: @llvm.x86.avx512.mask.shuf.pd.256
+  // CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x i32> <i32 1, i32 5, i32 2, i32 6>
+  // CHECK: select <4 x i1> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}
   return _mm256_maskz_shuffle_pd(__U, __A, __B, 3); 
 }
 
 __m128 test_mm_mask_shuffle_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
   // CHECK-LABEL: @test_mm_mask_shuffle_ps
-  // CHECK: @llvm.x86.avx512.mask.shuf.ps.128
+  // CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 4, i32 4>
+  // CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
   return _mm_mask_shuffle_ps(__W, __U, __A, __B, 4); 
 }
 
 __m128 test_mm_maskz_shuffle_ps(__mmask8 __U, __m128 __A, __m128 __B) {
   // CHECK-LABEL: @test_mm_maskz_shuffle_ps
-  // CHECK: @llvm.x86.avx512.mask.shuf.ps.128
+  // CHECK: shufflevector <4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 4, i32 4>
+  // CHECK: select <4 x i1> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}
   return _mm_maskz_shuffle_ps(__U, __A, __B, 4); 
 }
 
 __m256 test_mm256_mask_shuffle_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
   // CHECK-LABEL: @test_mm256_mask_shuffle_ps
-  // CHECK: @llvm.x86.avx512.mask.shuf.ps.256
+  // CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 8, i32 8, i32 4, i32 5, i32 12, i32 12>
+  // CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
   return _mm256_mask_shuffle_ps(__W, __U, __A, __B, 4); 
 }
 
 __m256 test_mm256_maskz_shuffle_ps(__mmask8 __U, __m256 __A, __m256 __B) {
   // CHECK-LABEL: @test_mm256_maskz_shuffle_ps
-  // CHECK: @llvm.x86.avx512.mask.shuf.ps.256
+  // CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x i32> <i32 0, i32 1, i32 8, i32 8, i32 4, i32 5, i32 12, i32 12>
+  // CHECK: select <8 x i1> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}
   return _mm256_maskz_shuffle_ps(__U, __A, __B, 4); 
 }