From: Eric Christopher Date: Thu, 15 Apr 2010 01:43:08 +0000 (+0000) Subject: Rewrite handling of 64-bit palignr intrinsics to be vector shuffles. X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=e57aa9e367af227214140e1a174cc561aabaa0b9;p=clang Rewrite handling of 64-bit palignr intrinsics to be vector shuffles. Stop multiplying constant by 8 accordingly in the header and change intrinsic definition for what types we expect. Add to existing palignr test to check that we're emitting the correct things. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@101332 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/include/clang/Basic/BuiltinsX86.def b/include/clang/Basic/BuiltinsX86.def index 5c75d3799b..a878dd1bd1 100644 --- a/include/clang/Basic/BuiltinsX86.def +++ b/include/clang/Basic/BuiltinsX86.def @@ -245,7 +245,7 @@ BUILTIN(__builtin_ia32_monitor, "vv*UiUi", "") BUILTIN(__builtin_ia32_mwait, "vUiUi", "") BUILTIN(__builtin_ia32_lddqu, "V16ccC*", "") BUILTIN(__builtin_ia32_palignr128, "V16cV16cV16cc", "") -BUILTIN(__builtin_ia32_palignr, "V1LLiV1LLiV1LLic", "") +BUILTIN(__builtin_ia32_palignr, "V8cV8cV8cc", "") BUILTIN(__builtin_ia32_insertps128, "V4fV4fV4fi", "") BUILTIN(__builtin_ia32_storelv4si, "vV2i*V2LLi", "") diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp index 85ab1dceba..95c41db86e 100644 --- a/lib/CodeGen/CGBuiltin.cpp +++ b/lib/CodeGen/CGBuiltin.cpp @@ -982,8 +982,38 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, return Builder.CreateStore(Ops[1], Ops[0]); } case X86::BI__builtin_ia32_palignr: { - Function *F = CGM.getIntrinsic(Intrinsic::x86_ssse3_palign_r); - return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size()); + unsigned shiftVal = cast(Ops[2])->getZExtValue(); + + // If palignr is shifting the pair of input vectors less than 9 bytes, + // emit a shuffle instruction. + if (shiftVal <= 8) { + const llvm::Type *IntTy = llvm::Type::getInt32Ty(VMContext); + + llvm::SmallVector Indices; + for (unsigned i = 0; i != 8; ++i) + Indices.push_back(llvm::ConstantInt::get(IntTy, shiftVal + i)); + + Value* SV = llvm::ConstantVector::get(Indices.begin(), Indices.size()); + return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr"); + } + + // If palignr is shifting the pair of input vectors more than 8 but less + // than 16 bytes, emit a logical right shift of the destination. + if (shiftVal < 16) { + // MMX has these as 1 x i64 vectors for some odd optimization reasons. + const llvm::Type *EltTy = llvm::Type::getInt64Ty(VMContext); + const llvm::Type *VecTy = llvm::VectorType::get(EltTy, 1); + + Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast"); + Ops[1] = llvm::ConstantInt::get(VecTy, (shiftVal-8) * 8); + + // create i32 constant + llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_mmx_psrl_q); + return Builder.CreateCall(F, &Ops[0], &Ops[0] + 2, "palignr"); + } + + // If palignr is shifting the pair of vectors more than 32 bytes, emit zero. + return llvm::Constant::getNullValue(ConvertType(E->getType())); } case X86::BI__builtin_ia32_palignr128: { unsigned shiftVal = cast(Ops[2])->getZExtValue(); diff --git a/lib/Headers/tmmintrin.h b/lib/Headers/tmmintrin.h index 09ebc23780..07fea1c98b 100644 --- a/lib/Headers/tmmintrin.h +++ b/lib/Headers/tmmintrin.h @@ -67,7 +67,7 @@ _mm_abs_epi32(__m128i a) } #define _mm_alignr_epi8(a, b, n) (__builtin_ia32_palignr128((a), (b), (n))) -#define _mm_alignr_pi8(a, b, n) (__builtin_ia32_palignr((a), (b), (n*8))) +#define _mm_alignr_pi8(a, b, n) (__builtin_ia32_palignr((a), (b), (n))) static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) _mm_hadd_epi16(__m128i a, __m128i b) diff --git a/test/CodeGen/palignr.c b/test/CodeGen/palignr.c index 627e309bd9..6297b2e990 100644 --- a/test/CodeGen/palignr.c +++ b/test/CodeGen/palignr.c @@ -1,12 +1,8 @@ // RUN: %clang_cc1 %s -triple=i686-apple-darwin -target-feature +ssse3 -O1 -S -o - | FileCheck %s #define _mm_alignr_epi8(a, b, n) (__builtin_ia32_palignr128((a), (b), (n))) -#define _mm_alignr_pi8(a, b, n) (__builtin_ia32_palignr((a), (b), (n*8))) -typedef __attribute__((vector_size(8))) int int2; typedef __attribute__((vector_size(16))) int int4; -// CHECK: palignr -int2 mmx_align1(int2 a, int2 b) { return _mm_alignr_pi8(a, b, 7); } // CHECK: palignr int4 align1(int4 a, int4 b) { return _mm_alignr_epi8(a, b, 15); } // CHECK: ret @@ -17,3 +13,18 @@ int4 align2(int4 a, int4 b) { return _mm_alignr_epi8(a, b, 16); } int4 align3(int4 a, int4 b) { return _mm_alignr_epi8(a, b, 17); } // CHECK: xor int4 align4(int4 a, int4 b) { return _mm_alignr_epi8(a, b, 32); } + +#define _mm_alignr_pi8(a, b, n) (__builtin_ia32_palignr((a), (b), (n))) +typedef __attribute__((vector_size(8))) int int2; + +// CHECK-NOT: palignr +int2 align5(int2 a, int2 b) { return _mm_alignr_pi8(a, b, 8); } + +// CHECK: psrlq +int2 align6(int2 a, int2 b) { return _mm_alignr_pi8(a, b, 9); } + +// CHECK: xor +int2 align7(int2 a, int2 b) { return _mm_alignr_pi8(a, b, 16); } + +// CHECK: palignr +int2 align8(int2 a, int2 b) { return _mm_alignr_pi8(a, b, 7); } \ No newline at end of file