From: Craig Topper Date: Mon, 4 Jun 2012 03:42:47 +0000 (+0000) Subject: Add fma3 intrinsic header file. X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=97075fb073ffc40aef4c2fdef393b715e514301e;p=clang Add fma3 intrinsic header file. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@157913 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Headers/CMakeLists.txt b/lib/Headers/CMakeLists.txt index 3c58a82d05..3043e03d7f 100644 --- a/lib/Headers/CMakeLists.txt +++ b/lib/Headers/CMakeLists.txt @@ -8,6 +8,7 @@ set(files emmintrin.h float.h fma4intrin.h + fmaintrin.h immintrin.h iso646.h limits.h diff --git a/lib/Headers/fmaintrin.h b/lib/Headers/fmaintrin.h new file mode 100644 index 0000000000..6bfd5a88b3 --- /dev/null +++ b/lib/Headers/fmaintrin.h @@ -0,0 +1,229 @@ +/*===---- fma4intrin.h - FMA4 intrinsics -----------------------------------=== + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __IMMINTRIN_H +#error "Never use directly; include instead." +#endif + +#ifndef __FMAINTRIN_H +#define __FMAINTRIN_H + +#ifndef __FMA__ +# error "FMA instruction set is not enabled" +#else + +static __inline__ __m128 __attribute__((__always_inline__, __nodebug__)) +_mm_fmadd_ps(__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmaddps(__A, __B, __C); +} + +static __inline__ __m128d __attribute__((__always_inline__, __nodebug__)) +_mm_fmadd_pd(__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmaddpd(__A, __B, __C); +} + +static __inline__ __m128 __attribute__((__always_inline__, __nodebug__)) +_mm_fmadd_ss(__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmaddss(__A, __B, __C); +} + +static __inline__ __m128d __attribute__((__always_inline__, __nodebug__)) +_mm_fmadd_sd(__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmaddsd(__A, __B, __C); +} + +static __inline__ __m128 __attribute__((__always_inline__, __nodebug__)) +_mm_fmsub_ps(__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmsubps(__A, __B, __C); +} + +static __inline__ __m128d __attribute__((__always_inline__, __nodebug__)) +_mm_fmsub_pd(__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmsubpd(__A, __B, __C); +} + +static __inline__ __m128 __attribute__((__always_inline__, __nodebug__)) +_mm_fmsub_ss(__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmsubss(__A, __B, __C); +} + +static __inline__ __m128d __attribute__((__always_inline__, __nodebug__)) +_mm_fmsub_sd(__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmsubsd(__A, __B, __C); +} + +static __inline__ __m128 __attribute__((__always_inline__, __nodebug__)) +_mm_fnmadd_ps(__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfnmaddps(__A, __B, __C); +} + +static __inline__ __m128d __attribute__((__always_inline__, __nodebug__)) +_mm_fnmadd_pd(__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfnmaddpd(__A, __B, __C); +} + +static __inline__ __m128 __attribute__((__always_inline__, __nodebug__)) +_mm_fnmadd_ss(__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfnmaddss(__A, __B, __C); +} + +static __inline__ __m128d __attribute__((__always_inline__, __nodebug__)) +_mm_fnmadd_sd(__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfnmaddsd(__A, __B, __C); +} + +static __inline__ __m128 __attribute__((__always_inline__, __nodebug__)) +_mm_fnmsub_ps(__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfnmsubps(__A, __B, __C); +} + +static __inline__ __m128d __attribute__((__always_inline__, __nodebug__)) +_mm_fnmsub_pd(__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfnmsubpd(__A, __B, __C); +} + +static __inline__ __m128 __attribute__((__always_inline__, __nodebug__)) +_mm_fnmsub_ss(__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfnmsubss(__A, __B, __C); +} + +static __inline__ __m128d __attribute__((__always_inline__, __nodebug__)) +_mm_fnmsub_sd(__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfnmsubsd(__A, __B, __C); +} + +static __inline__ __m128 __attribute__((__always_inline__, __nodebug__)) +_mm_fmaddsub_ps(__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmaddsubps(__A, __B, __C); +} + +static __inline__ __m128d __attribute__((__always_inline__, __nodebug__)) +_mm_fmaddsub_pd(__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmaddsubpd(__A, __B, __C); +} + +static __inline__ __m128 __attribute__((__always_inline__, __nodebug__)) +_mm_fmsubadd_ps(__m128 __A, __m128 __B, __m128 __C) +{ + return (__m128)__builtin_ia32_vfmsubaddps(__A, __B, __C); +} + +static __inline__ __m128d __attribute__((__always_inline__, __nodebug__)) +_mm_fmsubadd_pd(__m128d __A, __m128d __B, __m128d __C) +{ + return (__m128d)__builtin_ia32_vfmsubaddpd(__A, __B, __C); +} + +static __inline__ __m256 __attribute__((__always_inline__, __nodebug__)) +_mm256_fmadd_ps(__m256 __A, __m256 __B, __m256 __C) +{ + return (__m256)__builtin_ia32_vfmaddps256(__A, __B, __C); +} + +static __inline__ __m256d __attribute__((__always_inline__, __nodebug__)) +_mm256_fmadd_pd(__m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d)__builtin_ia32_vfmaddpd256(__A, __B, __C); +} + +static __inline__ __m256 __attribute__((__always_inline__, __nodebug__)) +_mm256_fmsub_ps(__m256 __A, __m256 __B, __m256 __C) +{ + return (__m256)__builtin_ia32_vfmsubps256(__A, __B, __C); +} + +static __inline__ __m256d __attribute__((__always_inline__, __nodebug__)) +_mm256_fmsub_pd(__m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d)__builtin_ia32_vfmsubpd256(__A, __B, __C); +} + +static __inline__ __m256 __attribute__((__always_inline__, __nodebug__)) +_mm256_fnmadd_ps(__m256 __A, __m256 __B, __m256 __C) +{ + return (__m256)__builtin_ia32_vfnmaddps256(__A, __B, __C); +} + +static __inline__ __m256d __attribute__((__always_inline__, __nodebug__)) +_mm256_fnmadd_pd(__m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d)__builtin_ia32_vfnmaddpd256(__A, __B, __C); +} + +static __inline__ __m256 __attribute__((__always_inline__, __nodebug__)) +_mm256_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C) +{ + return (__m256)__builtin_ia32_vfnmsubps256(__A, __B, __C); +} + +static __inline__ __m256d __attribute__((__always_inline__, __nodebug__)) +_mm256_fnmsub_pd(__m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d)__builtin_ia32_vfnmsubpd256(__A, __B, __C); +} + +static __inline__ __m256 __attribute__((__always_inline__, __nodebug__)) +_mm256_fmaddsub_ps(__m256 __A, __m256 __B, __m256 __C) +{ + return (__m256)__builtin_ia32_vfmaddsubps256(__A, __B, __C); +} + +static __inline__ __m256d __attribute__((__always_inline__, __nodebug__)) +_mm256_fmaddsub_pd(__m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d)__builtin_ia32_vfmaddsubpd256(__A, __B, __C); +} + +static __inline__ __m256 __attribute__((__always_inline__, __nodebug__)) +_mm256_fmsubadd_ps(__m256 __A, __m256 __B, __m256 __C) +{ + return (__m256)__builtin_ia32_vfmsubaddps256(__A, __B, __C); +} + +static __inline__ __m256d __attribute__((__always_inline__, __nodebug__)) +_mm256_fmsubadd_pd(__m256d __A, __m256d __B, __m256d __C) +{ + return (__m256d)__builtin_ia32_vfmsubaddpd256(__A, __B, __C); +} + +#endif /* __FMA__ */ + +#endif /* __FMAINTRIN_H */ diff --git a/lib/Headers/immintrin.h b/lib/Headers/immintrin.h index 16055251dd..f4258dc28e 100644 --- a/lib/Headers/immintrin.h +++ b/lib/Headers/immintrin.h @@ -72,4 +72,8 @@ #include #endif +#ifdef __FMA__ +#include +#endif + #endif /* __IMMINTRIN_H */ diff --git a/test/CodeGen/fma-builtins.c b/test/CodeGen/fma-builtins.c new file mode 100644 index 0000000000..3424616b12 --- /dev/null +++ b/test/CodeGen/fma-builtins.c @@ -0,0 +1,166 @@ +// RUN: %clang_cc1 %s -O3 -triple=x86_64-apple-darwin -target-feature +fma -emit-llvm -o - | FileCheck %s + +// Don't include mm_malloc.h, it's system specific. +#define __MM_MALLOC_H + +#include + +__m128 test_mm_fmadd_ps(__m128 a, __m128 b, __m128 c) { + // CHECK: @llvm.x86.fma.vfmadd.ps + return _mm_fmadd_ps(a, b, c); +} + +__m128d test_mm_fmadd_pd(__m128d a, __m128d b, __m128d c) { + // CHECK: @llvm.x86.fma.vfmadd.pd + return _mm_fmadd_pd(a, b, c); +} + +__m128 test_mm_fmadd_ss(__m128 a, __m128 b, __m128 c) { + // CHECK: @llvm.x86.fma.vfmadd.ss + return _mm_fmadd_ss(a, b, c); +} + +__m128d test_mm_fmadd_sd(__m128d a, __m128d b, __m128d c) { + // CHECK: @llvm.x86.fma.vfmadd.sd + return _mm_fmadd_sd(a, b, c); +} + +__m128 test_mm_fmsub_ps(__m128 a, __m128 b, __m128 c) { + // CHECK: @llvm.x86.fma.vfmsub.ps + return _mm_fmsub_ps(a, b, c); +} + +__m128d test_mm_fmsub_pd(__m128d a, __m128d b, __m128d c) { + // CHECK: @llvm.x86.fma.vfmsub.pd + return _mm_fmsub_pd(a, b, c); +} + +__m128 test_mm_fmsub_ss(__m128 a, __m128 b, __m128 c) { + // CHECK: @llvm.x86.fma.vfmsub.ss + return _mm_fmsub_ss(a, b, c); +} + +__m128d test_mm_fmsub_sd(__m128d a, __m128d b, __m128d c) { + // CHECK: @llvm.x86.fma.vfmsub.sd + return _mm_fmsub_sd(a, b, c); +} + +__m128 test_mm_fnmadd_ps(__m128 a, __m128 b, __m128 c) { + // CHECK: @llvm.x86.fma.vfnmadd.ps + return _mm_fnmadd_ps(a, b, c); +} + +__m128d test_mm_fnmadd_pd(__m128d a, __m128d b, __m128d c) { + // CHECK: @llvm.x86.fma.vfnmadd.pd + return _mm_fnmadd_pd(a, b, c); +} + +__m128 test_mm_fnmadd_ss(__m128 a, __m128 b, __m128 c) { + // CHECK: @llvm.x86.fma.vfnmadd.ss + return _mm_fnmadd_ss(a, b, c); +} + +__m128d test_mm_fnmadd_sd(__m128d a, __m128d b, __m128d c) { + // CHECK: @llvm.x86.fma.vfnmadd.sd + return _mm_fnmadd_sd(a, b, c); +} + +__m128 test_mm_fnmsub_ps(__m128 a, __m128 b, __m128 c) { + // CHECK: @llvm.x86.fma.vfnmsub.ps + return _mm_fnmsub_ps(a, b, c); +} + +__m128d test_mm_fnmsub_pd(__m128d a, __m128d b, __m128d c) { + // CHECK: @llvm.x86.fma.vfnmsub.pd + return _mm_fnmsub_pd(a, b, c); +} + +__m128 test_mm_fnmsub_ss(__m128 a, __m128 b, __m128 c) { + // CHECK: @llvm.x86.fma.vfnmsub.ss + return _mm_fnmsub_ss(a, b, c); +} + +__m128d test_mm_fnmsub_sd(__m128d a, __m128d b, __m128d c) { + // CHECK: @llvm.x86.fma.vfnmsub.sd + return _mm_fnmsub_sd(a, b, c); +} + +__m128 test_mm_fmaddsub_ps(__m128 a, __m128 b, __m128 c) { + // CHECK: @llvm.x86.fma.vfmaddsub.ps + return _mm_fmaddsub_ps(a, b, c); +} + +__m128d test_mm_fmaddsub_pd(__m128d a, __m128d b, __m128d c) { + // CHECK: @llvm.x86.fma.vfmaddsub.pd + return _mm_fmaddsub_pd(a, b, c); +} + +__m128 test_mm_fmsubadd_ps(__m128 a, __m128 b, __m128 c) { + // CHECK: @llvm.x86.fma.vfmsubadd.ps + return _mm_fmsubadd_ps(a, b, c); +} + +__m128d test_mm_fmsubadd_pd(__m128d a, __m128d b, __m128d c) { + // CHECK: @llvm.x86.fma.vfmsubadd.pd + return _mm_fmsubadd_pd(a, b, c); +} + +__m256 test_mm256_fmadd_ps(__m256 a, __m256 b, __m256 c) { + // CHECK: @llvm.x86.fma.vfmadd.ps.256 + return _mm256_fmadd_ps(a, b, c); +} + +__m256d test_mm256_fmadd_pd(__m256d a, __m256d b, __m256d c) { + // CHECK: @llvm.x86.fma.vfmadd.pd.256 + return _mm256_fmadd_pd(a, b, c); +} + +__m256 test_mm256_fmsub_ps(__m256 a, __m256 b, __m256 c) { + // CHECK: @llvm.x86.fma.vfmsub.ps.256 + return _mm256_fmsub_ps(a, b, c); +} + +__m256d test_mm256_fmsub_pd(__m256d a, __m256d b, __m256d c) { + // CHECK: @llvm.x86.fma.vfmsub.pd.256 + return _mm256_fmsub_pd(a, b, c); +} + +__m256 test_mm256_fnmadd_ps(__m256 a, __m256 b, __m256 c) { + // CHECK: @llvm.x86.fma.vfnmadd.ps.256 + return _mm256_fnmadd_ps(a, b, c); +} + +__m256d test_mm256_fnmadd_pd(__m256d a, __m256d b, __m256d c) { + // CHECK: @llvm.x86.fma.vfnmadd.pd.256 + return _mm256_fnmadd_pd(a, b, c); +} + +__m256 test_mm256_fnmsub_ps(__m256 a, __m256 b, __m256 c) { + // CHECK: @llvm.x86.fma.vfnmsub.ps.256 + return _mm256_fnmsub_ps(a, b, c); +} + +__m256d test_mm256_fnmsub_pd(__m256d a, __m256d b, __m256d c) { + // CHECK: @llvm.x86.fma.vfnmsub.pd.256 + return _mm256_fnmsub_pd(a, b, c); +} + +__m256 test_mm256_fmaddsub_ps(__m256 a, __m256 b, __m256 c) { + // CHECK: @llvm.x86.fma.vfmaddsub.ps.256 + return _mm256_fmaddsub_ps(a, b, c); +} + +__m256d test_mm256_fmaddsub_pd(__m256d a, __m256d b, __m256d c) { + // CHECK: @llvm.x86.fma.vfmaddsub.pd.256 + return _mm256_fmaddsub_pd(a, b, c); +} + +__m256 test_mm256_fmsubadd_ps(__m256 a, __m256 b, __m256 c) { + // CHECK: @llvm.x86.fma.vfmsubadd.ps.256 + return _mm256_fmsubadd_ps(a, b, c); +} + +__m256d test_mm256_fmsubadd_pd(__m256d a, __m256d b, __m256d c) { + // CHECK: @llvm.x86.fma.vfmsubadd.pd.256 + return _mm256_fmsubadd_pd(a, b, c); +}