From c3420ffb282c6ffc0192013bf8045b6c21eddece Mon Sep 17 00:00:00 2001 From: Nate Begeman Date: Mon, 14 Dec 2009 05:15:02 +0000 Subject: [PATCH] Revert mmx palignr to use an intrinsic, since mmx shuffle patterns are missing. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@91269 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/clang/Basic/BuiltinsX86.def | 2 +- lib/CodeGen/CGBuiltin.cpp | 5 ++++- lib/Headers/tmmintrin.h | 2 +- test/CodeGen/palignr.c | 4 ++++ 4 files changed, 10 insertions(+), 3 deletions(-) diff --git a/include/clang/Basic/BuiltinsX86.def b/include/clang/Basic/BuiltinsX86.def index adb1b7643f..6315c16dd8 100644 --- a/include/clang/Basic/BuiltinsX86.def +++ b/include/clang/Basic/BuiltinsX86.def @@ -251,7 +251,7 @@ BUILTIN(__builtin_ia32_monitor, "vv*UiUi", "") BUILTIN(__builtin_ia32_mwait, "vUiUi", "") BUILTIN(__builtin_ia32_lddqu, "V16ccC*", "") BUILTIN(__builtin_ia32_palignr128, "V16cV16cV16cc", "") -BUILTIN(__builtin_ia32_palignr, "V8cV8cV8cc", "") +BUILTIN(__builtin_ia32_palignr, "V1LLiV1LLiV1LLic", "") BUILTIN(__builtin_ia32_insertps128, "V4fV4fV4fi", "") BUILTIN(__builtin_ia32_storelv4si, "vV2i*V2LLi", "") diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp index 5d5caa2e9a..c70443245c 100644 --- a/lib/CodeGen/CGBuiltin.cpp +++ b/lib/CodeGen/CGBuiltin.cpp @@ -805,8 +805,11 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, Ops[0] = Builder.CreateBitCast(Ops[0], PtrTy); return Builder.CreateStore(Ops[1], Ops[0]); } - case X86::BI__builtin_ia32_palignr128: case X86::BI__builtin_ia32_palignr: { + Function *F = CGM.getIntrinsic(Intrinsic::x86_ssse3_palign_r); + return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size()); + } + case X86::BI__builtin_ia32_palignr128: { unsigned shiftVal = cast(Ops[2])->getZExtValue(); // If palignr is shifting the pair of input vectors less than 17 bytes, diff --git a/lib/Headers/tmmintrin.h b/lib/Headers/tmmintrin.h index 374a27ecd7..7adb776fef 100644 --- a/lib/Headers/tmmintrin.h +++ b/lib/Headers/tmmintrin.h @@ -67,7 +67,7 @@ _mm_abs_epi32(__m128i a) } #define _mm_alignr_epi8(a, b, n) (__builtin_ia32_palignr128((a), (b), (n))) -#define _mm_alignr_pi8(a, b, n) (__builtin_ia32_palignr((a), (b), (n))) +#define _mm_alignr_pi8(a, b, n) (__builtin_ia32_palignr((a), (b), (n*8))) static inline __m128i __attribute__((__always_inline__, __nodebug__)) _mm_hadd_epi16(__m128i a, __m128i b) diff --git a/test/CodeGen/palignr.c b/test/CodeGen/palignr.c index c0c7e77384..41e48bd285 100644 --- a/test/CodeGen/palignr.c +++ b/test/CodeGen/palignr.c @@ -1,8 +1,12 @@ // RUN: clang-cc %s -triple=i686-apple-darwin -target-feature +ssse3 -O1 -S -o - | FileCheck %s #define _mm_alignr_epi8(a, b, n) (__builtin_ia32_palignr128((a), (b), (n))) +#define _mm_alignr_pi8(a, b, n) (__builtin_ia32_palignr((a), (b), (n*8))) +typedef __attribute__((vector_size(8))) int int2; typedef __attribute__((vector_size(16))) int int4; +// CHECK: palignr +int2 mmx_align1(int2 a, int2 b) { return _mm_alignr_pi8(a, b, 7); } // CHECK: palignr int4 align1(int4 a, int4 b) { return _mm_alignr_epi8(a, b, 15); } // CHECK: ret -- 2.40.0