From: Craig Topper Date: Sun, 14 Jan 2018 19:23:50 +0000 (+0000) Subject: [X86] Implement old kunpck intrinsics using vector ops on vXi1 instead of integer... X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=4647e409dfdcda90c9ea357d16f30f65ffcf1bb6;p=clang [X86] Implement old kunpck intrinsics using vector ops on vXi1 instead of integer shift/and/or Summary: kunpck intrinsics were removed in favor of native IR a few months ago. The implementation lowers them as by operation on the integer types passed to the intrinsic and then just shifting, masking, and oring them together. A special X86 DAG combine was added to recognize this patter and turn it into a concat_vector operation. I think it makes more sense to keep the IR implementation closer to vector operations on vXi1. Given that we expect these builtins to be used around other builtins that operate on k-registers which we try to represent in IR with vXi1. InstCombine should be able to get rid of the bitcasts between integers and vXi1 leaving only the vector operations. Reviewers: RKSimon, spatel, zvi, jina.nahias Reviewed By: RKSimon Subscribers: cfe-commits Differential Revision: https://reviews.llvm.org/D42016 git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@322461 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp index 9ffc7de4bc..51a3cea7d0 100644 --- a/lib/CodeGen/CGBuiltin.cpp +++ b/lib/CodeGen/CGBuiltin.cpp @@ -8456,6 +8456,28 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, Builder.getInt16Ty()); } + case X86::BI__builtin_ia32_kunpckdi: + case X86::BI__builtin_ia32_kunpcksi: + case X86::BI__builtin_ia32_kunpckhi: { + unsigned NumElts = Ops[0]->getType()->getScalarSizeInBits(); + Value *LHS = getMaskVecValue(*this, Ops[0], NumElts); + Value *RHS = getMaskVecValue(*this, Ops[1], NumElts); + uint32_t Indices[64]; + for (unsigned i = 0; i != NumElts; ++i) + Indices[i] = i; + + // First extract half of each vector. This gives better codegen than + // doing it in a single shuffle. + LHS = Builder.CreateShuffleVector(LHS, LHS, + makeArrayRef(Indices, NumElts / 2)); + RHS = Builder.CreateShuffleVector(RHS, RHS, + makeArrayRef(Indices, NumElts / 2)); + // Concat the vectors. + Value *Res = Builder.CreateShuffleVector(LHS, RHS, + makeArrayRef(Indices, NumElts)); + return Builder.CreateBitCast(Res, Ops[0]->getType()); + } + case X86::BI__builtin_ia32_vplzcntd_128_mask: case X86::BI__builtin_ia32_vplzcntd_256_mask: case X86::BI__builtin_ia32_vplzcntd_512_mask: diff --git a/lib/Headers/avx512bwintrin.h b/lib/Headers/avx512bwintrin.h index 3ff0e3aafd..064300a487 100644 --- a/lib/Headers/avx512bwintrin.h +++ b/lib/Headers/avx512bwintrin.h @@ -1854,13 +1854,15 @@ _mm512_maskz_set1_epi8 (__mmask64 __M, char __A) static __inline__ __mmask64 __DEFAULT_FN_ATTRS _mm512_kunpackd (__mmask64 __A, __mmask64 __B) { - return (__mmask64) (( __A & 0xFFFFFFFF) | ( __B << 32)); + return (__mmask64) __builtin_ia32_kunpckdi ((__mmask64) __A, + (__mmask64) __B); } static __inline__ __mmask32 __DEFAULT_FN_ATTRS _mm512_kunpackw (__mmask32 __A, __mmask32 __B) { -return (__mmask32) (( __A & 0xFFFF) | ( __B << 16)); + return (__mmask32) __builtin_ia32_kunpcksi ((__mmask32) __A, + (__mmask32) __B); } static __inline__ __m512i __DEFAULT_FN_ATTRS diff --git a/lib/Headers/avx512fintrin.h b/lib/Headers/avx512fintrin.h index d34f0b1327..f5137428ba 100644 --- a/lib/Headers/avx512fintrin.h +++ b/lib/Headers/avx512fintrin.h @@ -8787,7 +8787,7 @@ _mm512_kortestz (__mmask16 __A, __mmask16 __B) static __inline__ __mmask16 __DEFAULT_FN_ATTRS _mm512_kunpackb (__mmask16 __A, __mmask16 __B) { - return (__mmask16) (( __A & 0xFF) | ( __B << 8)); + return (__mmask16) __builtin_ia32_kunpckhi ((__mmask16) __A, (__mmask16) __B); } static __inline__ __mmask16 __DEFAULT_FN_ATTRS diff --git a/test/CodeGen/avx512bw-builtins.c b/test/CodeGen/avx512bw-builtins.c index f84df5c06b..bfdf46d261 100644 --- a/test/CodeGen/avx512bw-builtins.c +++ b/test/CodeGen/avx512bw-builtins.c @@ -1628,23 +1628,22 @@ __m512i test_mm512_maskz_set1_epi8(__mmask64 __M, char __A) { __mmask64 test_mm512_kunpackd(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) { // CHECK-LABEL: @test_mm512_kunpackd - // CHECK: bitcast <64 x i1> %{{.*}} to i64 - // CHECK: bitcast <64 x i1> %{{.*}} to i64 - // CHECK: and i64 %{{.*}}, 4294967295 - // CHECK: shl i64 %{{.*}}, 32 - // CHECK: or i64 %{{.*}}, %{{.*}} - // CHECK: bitcast i64 %{{.*}} to <64 x i1> + // CHECK: [[LHS:%.*]] = bitcast i64 %{{.*}} to <64 x i1> + // CHECK: [[RHS:%.*]] = bitcast i64 %{{.*}} to <64 x i1> + // CHECK: [[LHS2:%.*]] = shufflevector <64 x i1> [[LHS]], <64 x i1> [[LHS]], <32 x i32> + // CHECK: [[RHS2:%.*]] = shufflevector <64 x i1> [[RHS]], <64 x i1> [[RHS]], <32 x i32> + // CHECK: [[CONCAT:%.*]] = shufflevector <32 x i1> [[LHS2]], <32 x i1> [[RHS2]], <64 x i32> + // CHECK: bitcast <64 x i1> [[CONCAT]] to i64 return _mm512_mask_cmpneq_epu8_mask(_mm512_kunpackd(_mm512_cmpneq_epu8_mask(__B, __A),_mm512_cmpneq_epu8_mask(__C, __D)), __E, __F); } __mmask32 test_mm512_kunpackw(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) { // CHECK-LABEL: @test_mm512_kunpackw - // CHECK: bitcast <32 x i1> %{{.*}} to i32 - // CHECK: bitcast <32 x i1> %{{.*}} to i32 - // CHECK: and i32 %{{.*}}, 65535 - // CHECK: shl i32 %{{.*}}, 16 - // CHECK: or i32 %{{.*}}, %{{.*}} - // CHECK: bitcast i32 %{{.*}} to <32 x i1> + // CHECK: [[LHS:%.*]] = bitcast i32 %{{.*}} to <32 x i1> + // CHECK: [[RHS:%.*]] = bitcast i32 %{{.*}} to <32 x i1> + // CHECK: [[LHS2:%.*]] = shufflevector <32 x i1> [[LHS]], <32 x i1> [[LHS]], <16 x i32> + // CHECK: [[RHS2:%.*]] = shufflevector <32 x i1> [[RHS]], <32 x i1> [[RHS]], <16 x i32> + // CHECK: [[CONCAT:%.*]] = shufflevector <16 x i1> [[LHS2]], <16 x i1> [[RHS2]], <32 x i32> return _mm512_mask_cmpneq_epu16_mask(_mm512_kunpackw(_mm512_cmpneq_epu16_mask(__B, __A),_mm512_cmpneq_epu16_mask(__C, __D)), __E, __F); } diff --git a/test/CodeGen/avx512f-builtins.c b/test/CodeGen/avx512f-builtins.c index ce831d690e..35a97912c7 100644 --- a/test/CodeGen/avx512f-builtins.c +++ b/test/CodeGen/avx512f-builtins.c @@ -6261,12 +6261,12 @@ int test_mm512_kortestz(__mmask16 __A, __mmask16 __B) { __mmask16 test_mm512_kunpackb(__m512i __A, __m512i __B, __m512i __C, __m512i __D, __m512i __E, __m512i __F) { // CHECK-LABEL: @test_mm512_kunpackb - // CHECK: bitcast <16 x i1> %{{.*}} to i16 - // CHECK: bitcast <16 x i1> %{{.*}} to i16 - // CHECK: and i32 %{{.*}}, 255 - // CHECK: shl i32 %{{.*}}, 8 - // CHECK: or i32 %{{.*}}, %{{.*}} - // CHECK: bitcast i16 %{{.*}} to <16 x i1> + // CHECK: [[LHS:%.*]] = bitcast i16 %{{.*}} to <16 x i1> + // CHECK: [[RHS:%.*]] = bitcast i16 %{{.*}} to <16 x i1> + // CHECK: [[LHS2:%.*]] = shufflevector <16 x i1> [[LHS]], <16 x i1> [[LHS]], <8 x i32> + // CHECK: [[RHS2:%.*]] = shufflevector <16 x i1> [[RHS]], <16 x i1> [[RHS]], <8 x i32> + // CHECK: [[CONCAT:%.*]] = shufflevector <8 x i1> [[LHS2]], <8 x i1> [[RHS2]], <16 x i32> + // CHECK: bitcast <16 x i1> [[CONCAT]] to i16 return _mm512_mask_cmpneq_epu32_mask(_mm512_kunpackb(_mm512_cmpneq_epu32_mask(__A, __B), _mm512_cmpneq_epu32_mask(__C, __D)), __E, __F);