// f -> this is a libc/libm function without the '__builtin_' prefix. It can
// be followed by ':headername:' to state which header this function
// comes from.
+// h -> this function requires a specific header or an explicit declaration.
// i -> this is a runtime library implemented function without the
// '__builtin_' prefix. It will be implemented in compiler-rt or libgcc.
// p:N: -> this is a printf-like function whose Nth argument is the format
// Microsoft builtins. These are only active with -fms-extensions.
LANGBUILTIN(_alloca, "v*z", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(__assume, "vb", "n", ALL_MS_LANGUAGES)
+LIBBUILTIN(_byteswap_ushort, "UsUs", "fnc", "stdlib.h", ALL_MS_LANGUAGES)
+LIBBUILTIN(_byteswap_ulong, "ULiULi", "fnc", "stdlib.h", ALL_MS_LANGUAGES)
+LIBBUILTIN(_byteswap_uint64, "ULLiULLi", "fnc", "stdlib.h", ALL_MS_LANGUAGES)
LANGBUILTIN(__debugbreak, "v", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(__exception_code, "ULi", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_exception_code, "ULi", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_InterlockedXor16, "ssD*s", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_InterlockedXor, "LiLiD*Li", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(__noop, "i.", "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(__popcnt16, "UsUs", "nc", ALL_MS_LANGUAGES)
+LANGBUILTIN(__popcnt, "UiUi", "nc", ALL_MS_LANGUAGES)
+LANGBUILTIN(__popcnt64, "ULLiULLi", "nc", ALL_MS_LANGUAGES)
LANGBUILTIN(__readfsdword, "ULiULi", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_rotl8, "UcUcUc", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_rotl16, "UsUsUc", "n", ALL_MS_LANGUAGES)
return strchr(getRecord(ID).Attributes, 'f') != nullptr;
}
+ /// \brief Returns true if this builtin requires appropriate header in other
+ /// compilers. In Clang it will work even without including it, but we can emit
+ /// a warning about missing header.
+ bool isHeaderDependentFunction(unsigned ID) const {
+ return strchr(getRecord(ID).Attributes, 'h') != nullptr;
+ }
+
/// \brief Determines whether this builtin is a predefined compiler-rt/libgcc
/// function, such as "__clear_cache", where we know the signature a
/// priori.
# define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) BUILTIN(ID, TYPE, ATTRS)
#endif
+#if defined(BUILTIN) && !defined(TARGET_HEADER_BUILTIN)
+# define TARGET_HEADER_BUILTIN(ID, TYPE, ATTRS, HEADER, LANG, FEATURE) BUILTIN(ID, TYPE, ATTRS)
+#endif
+
// FIXME: Are these nothrow/const?
// Miscellaneous builtin for checking x86 cpu features.
TARGET_BUILTIN(__builtin_ia32_pabsd128, "V4iV4i", "", "ssse3")
TARGET_BUILTIN(__builtin_ia32_ldmxcsr, "vUi", "", "sse")
+TARGET_HEADER_BUILTIN(_mm_setcsr, "vUi", "h","xmmintrin.h", ALL_LANGUAGES, "sse")
TARGET_BUILTIN(__builtin_ia32_stmxcsr, "Ui", "", "sse")
+TARGET_HEADER_BUILTIN(_mm_getcsr, "Ui", "h", "xmmintrin.h", ALL_LANGUAGES, "sse")
TARGET_BUILTIN(__builtin_ia32_cvtss2si, "iV4f", "", "sse")
TARGET_BUILTIN(__builtin_ia32_cvttss2si, "iV4f", "", "sse")
TARGET_BUILTIN(__builtin_ia32_cvtss2si64, "LLiV4f", "", "sse")
TARGET_BUILTIN(__builtin_ia32_storelps, "vV2i*V4f", "", "sse")
TARGET_BUILTIN(__builtin_ia32_movmskps, "iV4f", "", "sse")
TARGET_BUILTIN(__builtin_ia32_sfence, "v", "", "sse")
+TARGET_HEADER_BUILTIN(_mm_sfence, "v", "h", "xmmintrin.h", ALL_LANGUAGES, "sse")
TARGET_BUILTIN(__builtin_ia32_rcpps, "V4fV4f", "", "sse")
TARGET_BUILTIN(__builtin_ia32_rcpss, "V4fV4f", "", "sse")
TARGET_BUILTIN(__builtin_ia32_rsqrtps, "V4fV4f", "", "sse")
TARGET_BUILTIN(__builtin_ia32_cvtps2dq, "V4iV4f", "", "sse2")
TARGET_BUILTIN(__builtin_ia32_cvttps2dq, "V4iV4f", "", "sse2")
TARGET_BUILTIN(__builtin_ia32_clflush, "vvC*", "", "sse2")
+TARGET_HEADER_BUILTIN(_mm_clflush, "vvC*", "h", "emmintrin.h", ALL_LANGUAGES, "sse2")
TARGET_BUILTIN(__builtin_ia32_lfence, "v", "", "sse2")
+TARGET_HEADER_BUILTIN(_mm_lfence, "v", "h", "emmintrin.h", ALL_LANGUAGES, "sse2")
TARGET_BUILTIN(__builtin_ia32_mfence, "v", "", "sse2")
+TARGET_HEADER_BUILTIN(_mm_mfence, "v", "h", "emmintrin.h", ALL_LANGUAGES, "sse2")
TARGET_BUILTIN(__builtin_ia32_pause, "v", "", "sse2")
+TARGET_HEADER_BUILTIN(_mm_pause, "v", "h", "emmintrin.h", ALL_LANGUAGES, "sse2")
TARGET_BUILTIN(__builtin_ia32_pmuludq128, "V2LLiV4iV4i", "", "sse2")
TARGET_BUILTIN(__builtin_ia32_psraw128, "V8sV8sV8s", "", "sse2")
TARGET_BUILTIN(__builtin_ia32_psrad128, "V4iV4iV4i", "", "sse2")
BUILTIN(__builtin_ia32_rdpmc, "ULLii", "")
BUILTIN(__builtin_ia32_rdtsc, "ULLi", "")
+BUILTIN(__rdtsc, "ULLi", "")
BUILTIN(__builtin_ia32_rdtscp, "ULLiUi*", "")
// PKU
TARGET_BUILTIN(__builtin_ia32_rdpkru, "Ui", "", "pku")
#undef BUILTIN
#undef TARGET_BUILTIN
+#undef TARGET_HEADER_BUILTIN
{ #ID, TYPE, ATTRS, HEADER, ALL_LANGUAGES, nullptr },
#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
{ #ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, FEATURE },
+#define TARGET_HEADER_BUILTIN(ID, TYPE, ATTRS, HEADER, LANGS, FEATURE) \
+ { #ID, TYPE, ATTRS, HEADER, LANGS, FEATURE },
#include "clang/Basic/BuiltinsX86.def"
};
"cast");
return RValue::get(Result);
}
+ case Builtin::BI__popcnt16:
+ case Builtin::BI__popcnt:
+ case Builtin::BI__popcnt64:
case Builtin::BI__builtin_popcount:
case Builtin::BI__builtin_popcountl:
case Builtin::BI__builtin_popcountll: {
Value *F = CGM.getIntrinsic(Intrinsic::prefetch);
return Builder.CreateCall(F, {Address, RW, Locality, Data});
}
+ case X86::BI_mm_clflush: {
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_clflush),
+ Ops[0]);
+ }
+ case X86::BI_mm_lfence: {
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_lfence));
+ }
+ case X86::BI_mm_mfence: {
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_mfence));
+ }
+ case X86::BI_mm_sfence: {
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_sfence));
+ }
+ case X86::BI_mm_pause: {
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_pause));
+ }
+ case X86::BI__rdtsc: {
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_rdtsc));
+ }
case X86::BI__builtin_ia32_undef128:
case X86::BI__builtin_ia32_undef256:
case X86::BI__builtin_ia32_undef512:
case X86::BI__builtin_ia32_vec_ext_v2si:
return Builder.CreateExtractElement(Ops[0],
llvm::ConstantInt::get(Ops[1]->getType(), 0));
+ case X86::BI_mm_setcsr:
case X86::BI__builtin_ia32_ldmxcsr: {
Address Tmp = CreateMemTemp(E->getArg(0)->getType());
Builder.CreateStore(Ops[0], Tmp);
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr),
Builder.CreateBitCast(Tmp.getPointer(), Int8PtrTy));
}
+ case X86::BI_mm_getcsr:
case X86::BI__builtin_ia32_stmxcsr: {
Address Tmp = CreateMemTemp(E->getType());
Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr),
}
#endif
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
/// \brief The cache line containing __p is flushed and invalidated from all
/// caches in the coherency domain.
///
/// \param __p
/// A pointer to the memory location used to identify the cache line to be
/// flushed.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_clflush(void const *__p)
-{
- __builtin_ia32_clflush(__p);
-}
+void _mm_clflush(void const *);
/// \brief Forces strong memory ordering (serialization) between load
/// instructions preceding this instruction and load instructions following
///
/// This intrinsic corresponds to the \c LFENCE instruction.
///
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_lfence(void)
-{
- __builtin_ia32_lfence();
-}
+void _mm_lfence(void);
/// \brief Forces strong memory ordering (serialization) between load and store
/// instructions preceding this instruction and load and store instructions
///
/// This intrinsic corresponds to the \c MFENCE instruction.
///
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_mfence(void)
-{
- __builtin_ia32_mfence();
-}
+void _mm_mfence(void);
+
+#if defined(__cplusplus)
+} // extern "C"
+#endif
/// \brief Converts 16-bit signed integers from both 128-bit integer vector
/// operands into 8-bit signed integers, and packs the results into the
///
/// This intrinsic corresponds to the \c PAUSE instruction.
///
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_pause(void)
-{
- __builtin_ia32_pause();
-}
+#if defined(__cplusplus)
+extern "C"
+#endif
+void _mm_pause(void);
#undef __DEFAULT_FN_ATTRS
return __builtin_ia32_rdpmc(__A);
}
-/* __rdtsc */
-static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
-__rdtsc(void) {
- return __builtin_ia32_rdtsc();
-}
-
/* __rdtscp */
static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
__rdtscp(unsigned int *__A) {
*_Index = 31 - __builtin_clzl(_Mask);
return 1;
}
-static __inline__ unsigned short __DEFAULT_FN_ATTRS
-__popcnt16(unsigned short _Value) {
- return __builtin_popcount((int)_Value);
-}
-static __inline__ unsigned int __DEFAULT_FN_ATTRS
-__popcnt(unsigned int _Value) {
- return __builtin_popcount(_Value);
-}
static __inline__ unsigned char __DEFAULT_FN_ATTRS
_bittest(long const *_BitBase, long _BitPos) {
return (*_BitBase >> _BitPos) & 1;
*_Index = 63 - __builtin_clzll(_Mask);
return 1;
}
-static __inline__
-unsigned __int64 __DEFAULT_FN_ATTRS
-__popcnt64(unsigned __int64 _Value) {
- return __builtin_popcountll(_Value);
-}
static __inline__ unsigned char __DEFAULT_FN_ATTRS
_bittest64(__int64 const *_BitBase, __int64 _BitPos) {
return (*_BitBase >> _BitPos) & 1;
__atomic_fetch_or(_BitBase, 1ll << _BitPos, __ATOMIC_SEQ_CST);
return (_PrevVal >> _BitPos) & 1;
}
-/*----------------------------------------------------------------------------*\\r
-|* Interlocked Exchange Add\r
-\*----------------------------------------------------------------------------*/\r
-static __inline__ __int64 __DEFAULT_FN_ATTRS\r
-_InterlockedExchangeAdd64(__int64 volatile *_Addend, __int64 _Value) {\r
- return __atomic_fetch_add(_Addend, _Value, __ATOMIC_SEQ_CST);\r
-}\r
-/*----------------------------------------------------------------------------*\\r
-|* Interlocked Exchange Sub\r
-\*----------------------------------------------------------------------------*/\r
-static __inline__ __int64 __DEFAULT_FN_ATTRS\r
-_InterlockedExchangeSub64(__int64 volatile *_Subend, __int64 _Value) {\r
- return __atomic_fetch_sub(_Subend, _Value, __ATOMIC_SEQ_CST);\r
-}\r
-/*----------------------------------------------------------------------------*\\r
-|* Interlocked Increment\r
-\*----------------------------------------------------------------------------*/\r
-static __inline__ __int64 __DEFAULT_FN_ATTRS\r
-_InterlockedIncrement64(__int64 volatile *_Value) {\r
- return __atomic_add_fetch(_Value, 1, __ATOMIC_SEQ_CST);\r
-}\r
-/*----------------------------------------------------------------------------*\\r
-|* Interlocked Decrement\r
-\*----------------------------------------------------------------------------*/\r
-static __inline__ __int64 __DEFAULT_FN_ATTRS\r
-_InterlockedDecrement64(__int64 volatile *_Value) {\r
- return __atomic_sub_fetch(_Value, 1, __ATOMIC_SEQ_CST);\r
-}\r
-/*----------------------------------------------------------------------------*\\r
-|* Interlocked And\r
-\*----------------------------------------------------------------------------*/\r
-static __inline__ __int64 __DEFAULT_FN_ATTRS\r
-_InterlockedAnd64(__int64 volatile *_Value, __int64 _Mask) {\r
- return __atomic_fetch_and(_Value, _Mask, __ATOMIC_SEQ_CST);\r
-}\r
-/*----------------------------------------------------------------------------*\\r
-|* Interlocked Or\r
-\*----------------------------------------------------------------------------*/\r
-static __inline__ __int64 __DEFAULT_FN_ATTRS\r
-_InterlockedOr64(__int64 volatile *_Value, __int64 _Mask) {\r
- return __atomic_fetch_or(_Value, _Mask, __ATOMIC_SEQ_CST);\r
-}\r
-/*----------------------------------------------------------------------------*\\r
-|* Interlocked Xor\r
-\*----------------------------------------------------------------------------*/\r
-static __inline__ __int64 __DEFAULT_FN_ATTRS\r
-_InterlockedXor64(__int64 volatile *_Value, __int64 _Mask) {\r
- return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_SEQ_CST);\r
-}\r
-/*----------------------------------------------------------------------------*\\r
-|* Interlocked Exchange\r
-\*----------------------------------------------------------------------------*/\r
-static __inline__ __int64 __DEFAULT_FN_ATTRS\r
-_InterlockedExchange64(__int64 volatile *_Target, __int64 _Value) {\r
- __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_SEQ_CST);\r
- return _Value;\r
-}\r
+/*----------------------------------------------------------------------------*\
+|* Interlocked Exchange Add
+\*----------------------------------------------------------------------------*/
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedExchangeAdd64(__int64 volatile *_Addend, __int64 _Value) {
+ return __atomic_fetch_add(_Addend, _Value, __ATOMIC_SEQ_CST);
+}
+/*----------------------------------------------------------------------------*\
+|* Interlocked Exchange Sub
+\*----------------------------------------------------------------------------*/
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedExchangeSub64(__int64 volatile *_Subend, __int64 _Value) {
+ return __atomic_fetch_sub(_Subend, _Value, __ATOMIC_SEQ_CST);
+}
+/*----------------------------------------------------------------------------*\
+|* Interlocked Increment
+\*----------------------------------------------------------------------------*/
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedIncrement64(__int64 volatile *_Value) {
+ return __atomic_add_fetch(_Value, 1, __ATOMIC_SEQ_CST);
+}
+/*----------------------------------------------------------------------------*\
+|* Interlocked Decrement
+\*----------------------------------------------------------------------------*/
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedDecrement64(__int64 volatile *_Value) {
+ return __atomic_sub_fetch(_Value, 1, __ATOMIC_SEQ_CST);
+}
+/*----------------------------------------------------------------------------*\
+|* Interlocked And
+\*----------------------------------------------------------------------------*/
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedAnd64(__int64 volatile *_Value, __int64 _Mask) {
+ return __atomic_fetch_and(_Value, _Mask, __ATOMIC_SEQ_CST);
+}
+/*----------------------------------------------------------------------------*\
+|* Interlocked Or
+\*----------------------------------------------------------------------------*/
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedOr64(__int64 volatile *_Value, __int64 _Mask) {
+ return __atomic_fetch_or(_Value, _Mask, __ATOMIC_SEQ_CST);
+}
+/*----------------------------------------------------------------------------*\
+|* Interlocked Xor
+\*----------------------------------------------------------------------------*/
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedXor64(__int64 volatile *_Value, __int64 _Mask) {
+ return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_SEQ_CST);
+}
+/*----------------------------------------------------------------------------*\
+|* Interlocked Exchange
+\*----------------------------------------------------------------------------*/
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedExchange64(__int64 volatile *_Target, __int64 _Value) {
+ __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_SEQ_CST);
+ return _Value;
+}
#endif
/*----------------------------------------------------------------------------*\
|* Barriers
///
/// This intrinsic corresponds to the \c SFENCE instruction.
///
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_sfence(void)
-{
- __builtin_ia32_sfence();
-}
+#if defined(__cplusplus)
+extern "C"
+#endif
+void _mm_sfence(void);
/// \brief Extracts 16-bit element from a 64-bit vector of [4 x i16] and
/// returns it, as specified by the immediate integer operand.
return (__m64)__builtin_ia32_psadbw((__v8qi)__a, (__v8qi)__b);
}
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
/// \brief Returns the contents of the MXCSR register as a 32-bit unsigned
/// integer value. There are several groups of macros associated with this
/// intrinsic, including:
///
/// \returns A 32-bit unsigned integer containing the contents of the MXCSR
/// register.
-static __inline__ unsigned int __DEFAULT_FN_ATTRS
-_mm_getcsr(void)
-{
- return __builtin_ia32_stmxcsr();
-}
+unsigned int _mm_getcsr(void);
/// \brief Sets the MXCSR register with the 32-bit unsigned integer value. There
/// are several groups of macros associated with this intrinsic, including:
///
/// \param __i
/// A 32-bit unsigned integer value to be written to the MXCSR register.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_setcsr(unsigned int __i)
-{
- __builtin_ia32_ldmxcsr(__i);
-}
+void _mm_setcsr(unsigned int);
+
+#if defined(__cplusplus)
+} // extern "C"
+#endif
/// \brief Selects 4 float values from the 128-bit operands of [4 x float], as
/// specified by the immediate value operand.
return nullptr;
}
- if (!ForRedeclaration && Context.BuiltinInfo.isPredefinedLibFunction(ID)) {
+ if (!ForRedeclaration &&
+ (Context.BuiltinInfo.isPredefinedLibFunction(ID) ||
+ Context.BuiltinInfo.isHeaderDependentFunction(ID))) {
Diag(Loc, diag::ext_implicit_lib_function_decl)
<< Context.BuiltinInfo.getName(ID) << R;
if (Context.BuiltinInfo.getHeaderName(ID) &&
tmp_i = __builtin_ia32_vec_ext_v2si(tmp_V2i, 0);
(void) __builtin_ia32_ldmxcsr(tmp_Ui);
+ (void) _mm_setcsr(tmp_Ui);
tmp_Ui = __builtin_ia32_stmxcsr();
+ tmp_Ui = _mm_getcsr();
(void)__builtin_ia32_fxsave(tmp_vp);
(void)__builtin_ia32_fxsave64(tmp_vp);
(void)__builtin_ia32_fxrstor(tmp_vp);
tmp_i = __builtin_ia32_cvttss2si(tmp_V4f);
tmp_i = __builtin_ia32_rdtsc();
+ tmp_i = __rdtsc();
tmp_i = __builtin_ia32_rdtscp(&tmp_Ui);
tmp_LLi = __builtin_ia32_rdpmc(tmp_i);
#ifdef USE_64
tmp_i = __builtin_ia32_pmovmskb(tmp_V8c);
(void) __builtin_ia32_movntq(tmp_V1LLip, tmp_V1LLi);
(void) __builtin_ia32_sfence();
+ (void) _mm_sfence();
tmp_V4s = __builtin_ia32_psadbw(tmp_V8c, tmp_V8c);
tmp_V4f = __builtin_ia32_rcpps(tmp_V4f);
tmp_V4i = __builtin_ia32_cvtps2dq(tmp_V4f);
tmp_V4i = __builtin_ia32_cvttps2dq(tmp_V4f);
(void) __builtin_ia32_clflush(tmp_vCp);
+ (void) _mm_clflush(tmp_vCp);
(void) __builtin_ia32_lfence();
+ (void) _mm_lfence();
(void) __builtin_ia32_mfence();
+ (void) _mm_mfence();
+ (void) __builtin_ia32_pause();
+ (void) _mm_pause();
tmp_V4s = __builtin_ia32_psllwi(tmp_V4s, tmp_i);
tmp_V2i = __builtin_ia32_pslldi(tmp_V2i, tmp_i);
tmp_V1LLi = __builtin_ia32_psllqi(tmp_V1LLi, tmp_i);
--- /dev/null
+// RUN: %clang_cc1 -fsyntax-only -ffreestanding %s -verify
+// expected-no-diagnostics
+
+#if defined(i386) || defined(__x86_64__)
+
+// Include the metaheader that includes all x86 intrinsic headers.
+extern "C++" {
+#include <x86intrin.h>
+}
+
+#endif
--- /dev/null
+// RUN: %clang_cc1 -triple x86_64-unknown-unknown -target-feature +sse2 -fsyntax-only -verify %s
+// RUN: %clang_cc1 -triple x86_64-unknown-unknown -target-feature +sse2 -fsyntax-only -verify %s -x c++
+
+void f() {
+ (void)_mm_getcsr(); // expected-warning{{implicitly declaring library function '_mm_getcsr'}} \
+ // expected-note{{include the header <xmmintrin.h> or explicitly provide a declaration for '_mm_getcsr'}}
+ _mm_setcsr(1); // expected-warning{{implicitly declaring library function '_mm_setcsr'}} \
+ // expected-note{{include the header <xmmintrin.h> or explicitly provide a declaration for '_mm_setcsr'}}
+ _mm_sfence(); // expected-warning{{implicitly declaring library function '_mm_sfence'}} \
+ // expected-note{{include the header <xmmintrin.h> or explicitly provide a declaration for '_mm_sfence'}}
+
+ _mm_clflush((void*)0); // expected-warning{{implicitly declaring library function '_mm_clflush'}} \
+ // expected-note{{include the header <emmintrin.h> or explicitly provide a declaration for '_mm_clflush'}}
+ _mm_lfence(); // expected-warning{{implicitly declaring library function '_mm_lfence'}} \
+ // expected-note{{include the header <emmintrin.h> or explicitly provide a declaration for '_mm_lfence'}}
+ _mm_mfence(); // expected-warning{{implicitly declaring library function '_mm_mfence'}} \
+ // expected-note{{include the header <emmintrin.h> or explicitly provide a declaration for '_mm_mfence'}}
+ _mm_pause(); // expected-warning{{implicitly declaring library function '_mm_pause'}} \
+ // expected-note{{include the header <emmintrin.h> or explicitly provide a declaration for '_mm_pause'}}
+}
+
+unsigned int _mm_getcsr();
+void _mm_setcsr(unsigned int);
+void _mm_sfence();
+
+void _mm_clflush(void const *);
+void _mm_lfence();
+void _mm_mfence();
+void _mm_pause();
+
+void g() {
+ (void)_mm_getcsr();
+ _mm_setcsr(1);
+ _mm_sfence();
+
+ _mm_clflush((void*)0);
+ _mm_lfence();
+ _mm_mfence();
+ _mm_pause();
+}
--- /dev/null
+// RUN: %clang_cc1 -fsyntax-only -verify %s -fms-extensions
+
+void f() {
+ (void)_byteswap_ushort(42); // expected-warning{{implicitly declaring library function '_byteswap_ushort}} \
+ // expected-note{{include the header <stdlib.h> or explicitly provide a declaration for '_byteswap_ushort'}}
+ (void)_byteswap_uint64(42LL); // expected-warning{{implicitly declaring library function '_byteswap_uint64}} \
+ // expected-note{{include the header <stdlib.h> or explicitly provide a declaration for '_byteswap_uint64'}}
+}
+
+void _byteswap_ulong(); // expected-warning{{incompatible redeclaration of library function '_byteswap_ulong'}} \
+// expected-note{{'_byteswap_ulong' is a builtin}}
+
+unsigned short _byteswap_ushort(unsigned short);
+unsigned long long _byteswap_uint64(unsigned long long);
+
+void g() {
+ (void)_byteswap_ushort(42);
+ (void)_byteswap_uint64(42LL);
+}