From: Dale Johannesen Date: Thu, 30 Sep 2010 23:57:50 +0000 (+0000) Subject: Clang part of MMX rewrite (goes with 115243). X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=d47e262935f25f704afd03e26482641ca21a1df8;p=clang Clang part of MMX rewrite (goes with 115243). git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@115244 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Headers/mmintrin.h b/lib/Headers/mmintrin.h index bad9e1c059..fefb42fd74 100644 --- a/lib/Headers/mmintrin.h +++ b/lib/Headers/mmintrin.h @@ -43,14 +43,13 @@ _mm_empty(void) static __inline__ __m64 __attribute__((__always_inline__, __nodebug__)) _mm_cvtsi32_si64(int __i) { - return (__m64)(__v2si){__i, 0}; + return (__m64)__builtin_ia32_vec_init_v2si(__i, 0); } static __inline__ int __attribute__((__always_inline__, __nodebug__)) _mm_cvtsi64_si32(__m64 __m) { - __v2si __mmx_var2 = (__v2si)__m; - return __mmx_var2[0]; + return __builtin_ia32_vec_ext_v2si((__v2si)__m, 0); } static __inline__ __m64 __attribute__((__always_inline__, __nodebug__)) @@ -86,59 +85,55 @@ _mm_packs_pu16(__m64 __m1, __m64 __m2) static __inline__ __m64 __attribute__((__always_inline__, __nodebug__)) _mm_unpackhi_pi8(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_shufflevector((__v8qi)__m1, (__v8qi)__m2, 4, 8+4, 5, - 8+5, 6, 8+6, 7, 8+7); + return (__m64)__builtin_ia32_punpckhbw((__v8qi)__m1, (__v8qi)__m2); } static __inline__ __m64 __attribute__((__always_inline__, __nodebug__)) _mm_unpackhi_pi16(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_shufflevector((__v4hi)__m1, (__v4hi)__m2, 2, 4+2, 3, - 4+3); + return (__m64)__builtin_ia32_punpckhwd((__v4hi)__m1, (__v4hi)__m2); } static __inline__ __m64 __attribute__((__always_inline__, __nodebug__)) _mm_unpackhi_pi32(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_shufflevector((__v2si)__m1, (__v2si)__m2, 1, 2+1); + return (__m64)__builtin_ia32_punpckhdq((__v2si)__m1, (__v2si)__m2); } static __inline__ __m64 __attribute__((__always_inline__, __nodebug__)) _mm_unpacklo_pi8(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_shufflevector((__v8qi)__m1, (__v8qi)__m2, 0, 8+0, 1, - 8+1, 2, 8+2, 3, 8+3); + return (__m64)__builtin_ia32_punpcklbw((__v8qi)__m1, (__v8qi)__m2); } static __inline__ __m64 __attribute__((__always_inline__, __nodebug__)) _mm_unpacklo_pi16(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_shufflevector((__v4hi)__m1, (__v4hi)__m2, 0, 4+0, 1, - 4+1); + return (__m64)__builtin_ia32_punpcklwd((__v4hi)__m1, (__v4hi)__m2); } static __inline__ __m64 __attribute__((__always_inline__, __nodebug__)) _mm_unpacklo_pi32(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_shufflevector((__v2si)__m1, (__v2si)__m2, 0, 2+0); + return (__m64)__builtin_ia32_punpckldq((__v2si)__m1, (__v2si)__m2); } static __inline__ __m64 __attribute__((__always_inline__, __nodebug__)) _mm_add_pi8(__m64 __m1, __m64 __m2) { - return (__m64)((__v8qi)__m1 + (__v8qi)__m2); + return (__m64)__builtin_ia32_paddb((__v8qi)__m1, (__v8qi)__m2); } static __inline__ __m64 __attribute__((__always_inline__, __nodebug__)) _mm_add_pi16(__m64 __m1, __m64 __m2) { - return (__m64)((__v4hi)__m1 + (__v4hi)__m2); + return (__m64)__builtin_ia32_paddw((__v4hi)__m1, (__v4hi)__m2); } static __inline__ __m64 __attribute__((__always_inline__, __nodebug__)) _mm_add_pi32(__m64 __m1, __m64 __m2) { - return (__m64)((__v2si)__m1 + (__v2si)__m2); + return (__m64)__builtin_ia32_paddd((__v2si)__m1, (__v2si)__m2); } static __inline__ __m64 __attribute__((__always_inline__, __nodebug__)) @@ -168,19 +163,19 @@ _mm_adds_pu16(__m64 __m1, __m64 __m2) static __inline__ __m64 __attribute__((__always_inline__, __nodebug__)) _mm_sub_pi8(__m64 __m1, __m64 __m2) { - return (__m64)((__v8qi)__m1 - (__v8qi)__m2); + return (__m64)__builtin_ia32_psubb((__v8qi)__m1, (__v8qi)__m2); } static __inline__ __m64 __attribute__((__always_inline__, __nodebug__)) _mm_sub_pi16(__m64 __m1, __m64 __m2) { - return (__m64)((__v4hi)__m1 - (__v4hi)__m2); + return (__m64)__builtin_ia32_psubw((__v4hi)__m1, (__v4hi)__m2); } static __inline__ __m64 __attribute__((__always_inline__, __nodebug__)) _mm_sub_pi32(__m64 __m1, __m64 __m2) { - return (__m64)((__v2si)__m1 - (__v2si)__m2); + return (__m64)__builtin_ia32_psubd((__v2si)__m1, (__v2si)__m2); } static __inline__ __m64 __attribute__((__always_inline__, __nodebug__)) @@ -222,7 +217,7 @@ _mm_mulhi_pi16(__m64 __m1, __m64 __m2) static __inline__ __m64 __attribute__((__always_inline__, __nodebug__)) _mm_mullo_pi16(__m64 __m1, __m64 __m2) { - return (__m64)((__v4hi)__m1 * (__v4hi)__m2); + return (__m64)__builtin_ia32_pmullw((__v4hi)__m1, (__v4hi)__m2); } static __inline__ __m64 __attribute__((__always_inline__, __nodebug__)) @@ -252,13 +247,13 @@ _mm_slli_pi32(__m64 __m, int __count) static __inline__ __m64 __attribute__((__always_inline__, __nodebug__)) _mm_sll_si64(__m64 __m, __m64 __count) { - return __builtin_ia32_psllq(__m, __count); + return (__m64)__builtin_ia32_psllq(__m, __count); } static __inline__ __m64 __attribute__((__always_inline__, __nodebug__)) _mm_slli_si64(__m64 __m, int __count) { - return __builtin_ia32_psllqi(__m, __count); + return (__m64)__builtin_ia32_psllqi(__m, __count); } static __inline__ __m64 __attribute__((__always_inline__, __nodebug__)) @@ -318,67 +313,67 @@ _mm_srl_si64(__m64 __m, __m64 __count) static __inline__ __m64 __attribute__((__always_inline__, __nodebug__)) _mm_srli_si64(__m64 __m, int __count) { - return __builtin_ia32_psrlqi(__m, __count); + return (__m64)__builtin_ia32_psrlqi(__m, __count); } static __inline__ __m64 __attribute__((__always_inline__, __nodebug__)) _mm_and_si64(__m64 __m1, __m64 __m2) { - return __m1 & __m2; + return __builtin_ia32_pand(__m1, __m2); } static __inline__ __m64 __attribute__((__always_inline__, __nodebug__)) _mm_andnot_si64(__m64 __m1, __m64 __m2) { - return ~__m1 & __m2; + return __builtin_ia32_pandn(__m1, __m2); } static __inline__ __m64 __attribute__((__always_inline__, __nodebug__)) _mm_or_si64(__m64 __m1, __m64 __m2) { - return __m1 | __m2; + return __builtin_ia32_por(__m1, __m2); } static __inline__ __m64 __attribute__((__always_inline__, __nodebug__)) _mm_xor_si64(__m64 __m1, __m64 __m2) { - return __m1 ^ __m2; + return __builtin_ia32_pxor(__m1, __m2); } static __inline__ __m64 __attribute__((__always_inline__, __nodebug__)) _mm_cmpeq_pi8(__m64 __m1, __m64 __m2) { - return (__m64)((__v8qi)__m1 == (__v8qi)__m2); + return (__m64)__builtin_ia32_pcmpeqb((__v8qi)__m1, (__v8qi)__m2); } static __inline__ __m64 __attribute__((__always_inline__, __nodebug__)) _mm_cmpeq_pi16(__m64 __m1, __m64 __m2) { - return (__m64)((__v4hi)__m1 == (__v4hi)__m2); + return (__m64)__builtin_ia32_pcmpeqw((__v4hi)__m1, (__v4hi)__m2); } static __inline__ __m64 __attribute__((__always_inline__, __nodebug__)) _mm_cmpeq_pi32(__m64 __m1, __m64 __m2) { - return (__m64)((__v2si)__m1 == (__v2si)__m2); + return (__m64)__builtin_ia32_pcmpeqd((__v2si)__m1, (__v2si)__m2); } static __inline__ __m64 __attribute__((__always_inline__, __nodebug__)) _mm_cmpgt_pi8(__m64 __m1, __m64 __m2) { - return (__m64)((__v8qi)__m1 > (__v8qi)__m2); + return (__m64)__builtin_ia32_pcmpgtb((__v8qi)__m1, (__v8qi)__m2); } static __inline__ __m64 __attribute__((__always_inline__, __nodebug__)) _mm_cmpgt_pi16(__m64 __m1, __m64 __m2) { - return (__m64)((__v4hi)__m1 > (__v4hi)__m2); + return (__m64)__builtin_ia32_pcmpgtw((__v4hi)__m1, (__v4hi)__m2); } static __inline__ __m64 __attribute__((__always_inline__, __nodebug__)) _mm_cmpgt_pi32(__m64 __m1, __m64 __m2) { - return (__m64)((__v2si)__m1 > (__v2si)__m2); + return (__m64)__builtin_ia32_pcmpgtd((__v2si)__m1, (__v2si)__m2); } static __inline__ __m64 __attribute__((__always_inline__, __nodebug__)) @@ -390,57 +385,58 @@ _mm_setzero_si64(void) static __inline__ __m64 __attribute__((__always_inline__, __nodebug__)) _mm_set_pi32(int __i1, int __i0) { - return (__m64)(__v2si){ __i0, __i1 }; + return (__m64)__builtin_ia32_vec_init_v2si(__i0, __i1); } static __inline__ __m64 __attribute__((__always_inline__, __nodebug__)) _mm_set_pi16(short __s3, short __s2, short __s1, short __s0) { - return (__m64)(__v4hi){ __s0, __s1, __s2, __s3 }; + return (__m64)__builtin_ia32_vec_init_v4hi(__s0, __s1, __s2, __s3); } static __inline__ __m64 __attribute__((__always_inline__, __nodebug__)) _mm_set_pi8(char __b7, char __b6, char __b5, char __b4, char __b3, char __b2, char __b1, char __b0) { - return (__m64)(__v8qi){ __b0, __b1, __b2, __b3, __b4, __b5, __b6, __b7 }; + return (__m64)__builtin_ia32_vec_init_v8qi(__b0, __b1, __b2, __b3, + __b4, __b5, __b6, __b7); } static __inline__ __m64 __attribute__((__always_inline__, __nodebug__)) _mm_set1_pi32(int __i) { - return (__m64)(__v2si){ __i, __i }; + return _mm_set_pi32(__i, __i); } static __inline__ __m64 __attribute__((__always_inline__, __nodebug__)) -_mm_set1_pi16(short __s) +_mm_set1_pi16(short __w) { - return (__m64)(__v4hi){ __s, __s, __s, __s }; + return _mm_set_pi16(__w, __w, __w, __w); } static __inline__ __m64 __attribute__((__always_inline__, __nodebug__)) _mm_set1_pi8(char __b) { - return (__m64)(__v8qi){ __b, __b, __b, __b, __b, __b, __b, __b }; + return _mm_set_pi8(__b, __b, __b, __b, __b, __b, __b, __b); } static __inline__ __m64 __attribute__((__always_inline__, __nodebug__)) _mm_setr_pi32(int __i1, int __i0) { - return (__m64)(__v2si){ __i1, __i0 }; + return _mm_set_pi32(__i1, __i0); } static __inline__ __m64 __attribute__((__always_inline__, __nodebug__)) -_mm_setr_pi16(short __s3, short __s2, short __s1, short __s0) +_mm_setr_pi16(short __w3, short __w2, short __w1, short __w0) { - return (__m64)(__v4hi){ __s3, __s2, __s1, __s0 }; + return _mm_set_pi16(__w3, __w2, __w1, __w0); } static __inline__ __m64 __attribute__((__always_inline__, __nodebug__)) _mm_setr_pi8(char __b7, char __b6, char __b5, char __b4, char __b3, char __b2, char __b1, char __b0) { - return (__m64)(__v8qi){ __b7, __b6, __b5, __b4, __b3, __b2, __b1, __b0 }; + return _mm_set_pi8(__b7, __b6, __b5, __b4, __b3, __b2, __b1, __b0); } diff --git a/test/CodeGen/mmx-shift-with-immediate.c b/test/CodeGen/mmx-shift-with-immediate.c index d4f4d213f1..995a9fcc7d 100644 --- a/test/CodeGen/mmx-shift-with-immediate.c +++ b/test/CodeGen/mmx-shift-with-immediate.c @@ -2,25 +2,25 @@ #include void shift(__m64 a, __m64 b, int c) { - // CHECK: <4 x i16> @llvm.x86.mmx.pslli.w(<4 x i16> %{{.*}}, i32 {{.*}}) + // CHECK: x86_mmx @llvm.x86.mmx.pslli.w(x86_mmx %{{.*}}, i32 {{.*}}) _mm_slli_pi16(a, c); - // CHECK: <2 x i32> @llvm.x86.mmx.pslli.d(<2 x i32> %{{.*}}, i32 {{.*}}) + // CHECK: x86_mmx @llvm.x86.mmx.pslli.d(x86_mmx %{{.*}}, i32 {{.*}}) _mm_slli_pi32(a, c); // FIXME: <1 x i64> @llvm.x86.mmx.pslli.q(<1 x i64> %{{.*}}, i32 {{.*}}) // This is currently lowered into non-intrinsic instructions. This may not be // correct once the MMX reworking is finished. _mm_slli_si64(a, c); - // CHECK: <4 x i16> @llvm.x86.mmx.psrli.w(<4 x i16> %{{.*}}, i32 {{.*}}) + // CHECK: x86_mmx @llvm.x86.mmx.psrli.w(x86_mmx %{{.*}}, i32 {{.*}}) _mm_srli_pi16(a, c); - // CHECK: <2 x i32> @llvm.x86.mmx.psrli.d(<2 x i32> %{{.*}}, i32 {{.*}}) + // CHECK: x86_mmx @llvm.x86.mmx.psrli.d(x86_mmx %{{.*}}, i32 {{.*}}) _mm_srli_pi32(a, c); // FIXME: <1 x i64> @llvm.x86.mmx.psrli.q(<1 x i64> %{{.*}}, i32 {{.*}}) // See above. _mm_srli_si64(a, c); - // CHECK: <4 x i16> @llvm.x86.mmx.psrai.w(<4 x i16> %{{.*}}, i32 {{.*}}) + // CHECK: x86_mmx @llvm.x86.mmx.psrai.w(x86_mmx %{{.*}}, i32 {{.*}}) _mm_srai_pi16(a, c); - // CHECK: <2 x i32> @llvm.x86.mmx.psrai.d(<2 x i32> %{{.*}}, i32 {{.*}}) + // CHECK: x86_mmx @llvm.x86.mmx.psrai.d(x86_mmx %{{.*}}, i32 {{.*}}) _mm_srai_pi32(a, c); }