From: Michael Zuckerman Date: Mon, 23 May 2016 08:01:48 +0000 (+0000) Subject: [Clang][AVX512][BUILTIN]adding missing intrinsics for movdaq instruction set X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=54f24ebb272ec058546d0075117873ed055b0cfe;p=clang [Clang][AVX512][BUILTIN]adding missing intrinsics for movdaq instruction set Differential Revision: http://reviews.llvm.org/D20514 git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@270401 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/include/clang/Basic/BuiltinsX86.def b/include/clang/Basic/BuiltinsX86.def index 91454b823f..723df2ff8f 100644 --- a/include/clang/Basic/BuiltinsX86.def +++ b/include/clang/Basic/BuiltinsX86.def @@ -1723,6 +1723,11 @@ TARGET_BUILTIN(__builtin_ia32_psrlw128_mask, "V8sV8sV8sV8sUc","","avx512bw,avx51 TARGET_BUILTIN(__builtin_ia32_psrlw256_mask, "V16sV16sV8sV16sUs","","avx512bw,avx512vl") TARGET_BUILTIN(__builtin_ia32_psrlwi128_mask, "V8sV8sIiV8sUc","","avx512bw,avx512vl") TARGET_BUILTIN(__builtin_ia32_psrlwi256_mask, "V16sV16sIiV16sUs","","avx512bw,avx512vl") +TARGET_BUILTIN(__builtin_ia32_movdqa32_128_mask, "V4iV4iV4iUc","","avx512vl") +TARGET_BUILTIN(__builtin_ia32_movdqa32_256_mask, "V8iV8iV8iUc","","avx512vl") +TARGET_BUILTIN(__builtin_ia32_movdqa32_512_mask, "V16iV16iV16iUs","","avx512f") +TARGET_BUILTIN(__builtin_ia32_movdqa32load128_mask, "V4iV4i*V4iUc","","avx512f") +TARGET_BUILTIN(__builtin_ia32_movdqa32load256_mask, "V8iV8i*V8iUc","","avx512f") TARGET_BUILTIN(__builtin_ia32_movdqa32load512_mask, "V16iV16iC*V16iUs","","avx512f") TARGET_BUILTIN(__builtin_ia32_movdqa32store512_mask, "vV16i*V16iUs","","avx512f") TARGET_BUILTIN(__builtin_ia32_movdqa64_512_mask, "V8LLiV8LLiV8LLiUc","","avx512f") diff --git a/lib/Headers/avx512fintrin.h b/lib/Headers/avx512fintrin.h index b97308ffce..57a1de00b2 100644 --- a/lib/Headers/avx512fintrin.h +++ b/lib/Headers/avx512fintrin.h @@ -4925,6 +4925,23 @@ _mm512_mask_store_epi32 (void *__P, __mmask16 __U, __m512i __A) (__mmask16) __U); } +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_mask_mov_epi32 (__m512i __W, __mmask16 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_movdqa32_512_mask ((__v16si) __A, + (__v16si) __W, + (__mmask16) __U); +} + +static __inline__ __m512i __DEFAULT_FN_ATTRS +_mm512_maskz_mov_epi32 (__mmask16 __U, __m512i __A) +{ + return (__m512i) __builtin_ia32_movdqa32_512_mask ((__v16si) __A, + (__v16si) + _mm512_setzero_si512 (), + (__mmask16) __U); +} + static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_mask_mov_epi64 (__m512i __W, __mmask8 __U, __m512i __A) { diff --git a/lib/Headers/avx512vlintrin.h b/lib/Headers/avx512vlintrin.h index 41c4bc80ff..a431418c8b 100644 --- a/lib/Headers/avx512vlintrin.h +++ b/lib/Headers/avx512vlintrin.h @@ -5834,7 +5834,78 @@ _mm256_maskz_srav_epi64 (__mmask8 __U, __m256i __X, __m256i __Y) (__mmask8) __U); } +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_mov_epi32 (__m128i __W, __mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_movdqa32_128_mask ((__v4si) __A, + (__v4si) __W, + (__mmask8) __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_mov_epi32 (__mmask8 __U, __m128i __A) +{ + return (__m128i) __builtin_ia32_movdqa32_128_mask ((__v4si) __A, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) __U); +} + + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_mov_epi32 (__m256i __W, __mmask8 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_movdqa32_256_mask ((__v8si) __A, + (__v8si) __W, + (__mmask8) __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_mov_epi32 (__mmask8 __U, __m256i __A) +{ + return (__m256i) __builtin_ia32_movdqa32_256_mask ((__v8si) __A, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) __U); +} +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_mask_load_epi32 (__m128i __W, __mmask8 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_movdqa32load128_mask ((__v4si *) __P, + (__v4si) __W, + (__mmask8) + __U); +} + +static __inline__ __m128i __DEFAULT_FN_ATTRS +_mm_maskz_load_epi32 (__mmask8 __U, void const *__P) +{ + return (__m128i) __builtin_ia32_movdqa32load128_mask ((__v4si *) __P, + (__v4si) + _mm_setzero_si128 (), + (__mmask8) + __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_mask_load_epi32 (__m256i __W, __mmask8 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_movdqa32load256_mask ((__v8si *) __P, + (__v8si) __W, + (__mmask8) + __U); +} + +static __inline__ __m256i __DEFAULT_FN_ATTRS +_mm256_maskz_load_epi32 (__mmask8 __U, void const *__P) +{ + return (__m256i) __builtin_ia32_movdqa32load256_mask ((__v8si *) __P, + (__v8si) + _mm256_setzero_si256 (), + (__mmask8) + __U); +} static __inline__ void __DEFAULT_FN_ATTRS _mm_mask_store_epi32 (void *__P, __mmask8 __U, __m128i __A) diff --git a/test/CodeGen/avx512f-builtins.c b/test/CodeGen/avx512f-builtins.c index b5de5a660b..b98be5ff03 100644 --- a/test/CodeGen/avx512f-builtins.c +++ b/test/CodeGen/avx512f-builtins.c @@ -2542,6 +2542,18 @@ __m512i test_mm512_maskz_load_epi32(__mmask16 __U, void const *__P) { return _mm512_maskz_load_epi32(__U, __P); } +__m512i test_mm512_mask_mov_epi32(__m512i __W, __mmask16 __U, __m512i __A) { + // CHECK-LABEL: @test_mm512_mask_mov_epi32 + // CHECK: @llvm.x86.avx512.mask.mov + return _mm512_mask_mov_epi32(__W, __U, __A); +} + +__m512i test_mm512_maskz_mov_epi32(__mmask16 __U, __m512i __A) { + // CHECK-LABEL: @test_mm512_maskz_mov_epi32 + // CHECK: @llvm.x86.avx512.mask.mov + return _mm512_maskz_mov_epi32(__U, __A); +} + __m512i test_mm512_mask_mov_epi64(__m512i __W, __mmask8 __U, __m512i __A) { // CHECK-LABEL: @test_mm512_mask_mov_epi64 // CHECK: @llvm.x86.avx512.mask.mov diff --git a/test/CodeGen/avx512vl-builtins.c b/test/CodeGen/avx512vl-builtins.c index 07aa9f0f23..71e7799060 100644 --- a/test/CodeGen/avx512vl-builtins.c +++ b/test/CodeGen/avx512vl-builtins.c @@ -3948,6 +3948,30 @@ void test_mm256_mask_store_epi32(void *__P, __mmask8 __U, __m256i __A) { return _mm256_mask_store_epi32(__P, __U, __A); } +__m128i test_mm_mask_mov_epi32(__m128i __W, __mmask8 __U, __m128i __A) { + // CHECK-LABEL: @test_mm_mask_mov_epi32 + // CHECK: @llvm.x86.avx512.mask.mov + return _mm_mask_mov_epi32(__W, __U, __A); +} + +__m128i test_mm_maskz_mov_epi32(__mmask8 __U, __m128i __A) { + // CHECK-LABEL: @test_mm_maskz_mov_epi32 + // CHECK: @llvm.x86.avx512.mask.mov + return _mm_maskz_mov_epi32(__U, __A); +} + +__m256i test_mm256_mask_mov_epi32(__m256i __W, __mmask8 __U, __m256i __A) { + // CHECK-LABEL: @test_mm256_mask_mov_epi32 + // CHECK: @llvm.x86.avx512.mask.mov + return _mm256_mask_mov_epi32(__W, __U, __A); +} + +__m256i test_mm256_maskz_mov_epi32(__mmask8 __U, __m256i __A) { + // CHECK-LABEL: @test_mm256_maskz_mov_epi32 + // CHECK: @llvm.x86.avx512.mask.mov + return _mm256_maskz_mov_epi32(__U, __A); +} + __m128i test_mm_mask_mov_epi64(__m128i __W, __mmask8 __U, __m128i __A) { // CHECK-LABEL: @test_mm_mask_mov_epi64 // CHECK: @llvm.x86.avx512.mask.mov @@ -3972,6 +3996,30 @@ __m256i test_mm256_maskz_mov_epi64(__mmask8 __U, __m256i __A) { return _mm256_maskz_mov_epi64(__U, __A); } +__m128i test_mm_mask_load_epi32(__m128i __W, __mmask8 __U, void const *__P) { + // CHECK-LABEL: @test_mm_mask_load_epi32 + // CHECK: @llvm.x86.avx512.mask.load.d.128 + return _mm_mask_load_epi32(__W, __U, __P); +} + +__m128i test_mm_maskz_load_epi32(__mmask8 __U, void const *__P) { + // CHECK-LABEL: @test_mm_maskz_load_epi32 + // CHECK: @llvm.x86.avx512.mask.load.d.128 + return _mm_maskz_load_epi32(__U, __P); +} + +__m256i test_mm256_mask_load_epi32(__m256i __W, __mmask8 __U, void const *__P) { + // CHECK-LABEL: @test_mm256_mask_load_epi32 + // CHECK: @llvm.x86.avx512.mask.load.d.256 + return _mm256_mask_load_epi32(__W, __U, __P); +} + +__m256i test_mm256_maskz_load_epi32(__mmask8 __U, void const *__P) { + // CHECK-LABEL: @test_mm256_maskz_load_epi32 + // CHECK: @llvm.x86.avx512.mask.load.d.256 + return _mm256_maskz_load_epi32(__U, __P); +} + __m128i test_mm_mask_load_epi64(__m128i __W, __mmask8 __U, void const *__P) { // CHECK-LABEL: @test_mm_mask_load_epi64 // CHECK: @llvm.x86.avx512.mask.load.q.128