TARGET_HEADER_BUILTIN(_InterlockedOr64_nf, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
TARGET_HEADER_BUILTIN(_InterlockedOr64_rel, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedXor8_acq, "ccD*c", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedXor8_nf, "ccD*c", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedXor8_rel, "ccD*c", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedXor16_acq, "ssD*s", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedXor16_nf, "ssD*s", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedXor16_rel, "ssD*s", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedXor_acq, "LiLiD*Li", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedXor_nf, "LiLiD*Li", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedXor_rel, "LiLiD*Li", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedXor64_acq, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedXor64_nf, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedXor64_rel, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-
TARGET_HEADER_BUILTIN(_ReadWriteBarrier, "v", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
TARGET_HEADER_BUILTIN(__getReg, "ULLii", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
TARGET_HEADER_BUILTIN(_ReadStatusReg, "ii", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
TARGET_HEADER_BUILTIN(_InterlockedOr64_nf, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
TARGET_HEADER_BUILTIN(_InterlockedOr64_rel, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedXor8_acq, "ccD*c", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedXor8_nf, "ccD*c", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedXor8_rel, "ccD*c", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedXor16_acq, "ssD*s", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedXor16_nf, "ssD*s", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedXor16_rel, "ssD*s", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedXor_acq, "LiLiD*Li", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedXor_nf, "LiLiD*Li", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedXor_rel, "LiLiD*Li", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedXor64_acq, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedXor64_nf, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-TARGET_HEADER_BUILTIN(_InterlockedXor64_rel, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
-
#undef BUILTIN
#undef LANGBUILTIN
#undef TARGET_HEADER_BUILTIN
_InterlockedOr_acq,
_InterlockedOr_rel,
_InterlockedOr_nf,
- _InterlockedXor_acq,
- _InterlockedXor_rel,
- _InterlockedXor_nf,
__fastfail,
};
case MSVCIntrin::_InterlockedOr_nf:
return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
AtomicOrdering::Monotonic);
- case MSVCIntrin::_InterlockedXor_acq:
- return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
- AtomicOrdering::Acquire);
- case MSVCIntrin::_InterlockedXor_rel:
- return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
- AtomicOrdering::Release);
- case MSVCIntrin::_InterlockedXor_nf:
- return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
- AtomicOrdering::Monotonic);
case MSVCIntrin::_InterlockedDecrement: {
llvm::Type *IntTy = ConvertType(E->getType());
case ARM::BI_InterlockedOr_nf:
case ARM::BI_InterlockedOr64_nf:
return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_nf, E);
- case ARM::BI_InterlockedXor8_acq:
- case ARM::BI_InterlockedXor16_acq:
- case ARM::BI_InterlockedXor_acq:
- case ARM::BI_InterlockedXor64_acq:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_acq, E);
- case ARM::BI_InterlockedXor8_rel:
- case ARM::BI_InterlockedXor16_rel:
- case ARM::BI_InterlockedXor_rel:
- case ARM::BI_InterlockedXor64_rel:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_rel, E);
- case ARM::BI_InterlockedXor8_nf:
- case ARM::BI_InterlockedXor16_nf:
- case ARM::BI_InterlockedXor_nf:
- case ARM::BI_InterlockedXor64_nf:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_nf, E);
}
// Get the last argument, which specifies the vector type.
case AArch64::BI_InterlockedOr_nf:
case AArch64::BI_InterlockedOr64_nf:
return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_nf, E);
- case AArch64::BI_InterlockedXor8_acq:
- case AArch64::BI_InterlockedXor16_acq:
- case AArch64::BI_InterlockedXor_acq:
- case AArch64::BI_InterlockedXor64_acq:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_acq, E);
- case AArch64::BI_InterlockedXor8_rel:
- case AArch64::BI_InterlockedXor16_rel:
- case AArch64::BI_InterlockedXor_rel:
- case AArch64::BI_InterlockedXor64_rel:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_rel, E);
- case AArch64::BI_InterlockedXor8_nf:
- case AArch64::BI_InterlockedXor16_nf:
- case AArch64::BI_InterlockedXor_nf:
- case AArch64::BI_InterlockedXor64_nf:
- return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_nf, E);
case AArch64::BI_InterlockedAdd: {
Value *Arg0 = EmitScalarExpr(E->getArg(0));
|* Interlocked Xor
\*----------------------------------------------------------------------------*/
#if defined(__arm__) || defined(__aarch64__)
-char _InterlockedXor8_acq(char volatile *_Value, char _Mask);
-char _InterlockedXor8_nf(char volatile *_Value, char _Mask);
-char _InterlockedXor8_rel(char volatile *_Value, char _Mask);
-short _InterlockedXor16_acq(short volatile *_Value, short _Mask);
-short _InterlockedXor16_nf(short volatile *_Value, short _Mask);
-short _InterlockedXor16_rel(short volatile *_Value, short _Mask);
-long _InterlockedXor_acq(long volatile *_Value, long _Mask);
-long _InterlockedXor_nf(long volatile *_Value, long _Mask);
-long _InterlockedXor_rel(long volatile *_Value, long _Mask);
-__int64 _InterlockedXor64_acq(__int64 volatile *_Value, __int64 _Mask);
-__int64 _InterlockedXor64_nf(__int64 volatile *_Value, __int64 _Mask);
-__int64 _InterlockedXor64_rel(__int64 volatile *_Value, __int64 _Mask);
+static __inline__ char __DEFAULT_FN_ATTRS
+_InterlockedXor8_acq(char volatile *_Value, char _Mask) {
+ return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_ACQUIRE);
+}
+static __inline__ char __DEFAULT_FN_ATTRS
+_InterlockedXor8_nf(char volatile *_Value, char _Mask) {
+ return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_RELAXED);
+}
+static __inline__ char __DEFAULT_FN_ATTRS
+_InterlockedXor8_rel(char volatile *_Value, char _Mask) {
+ return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_RELEASE);
+}
+static __inline__ short __DEFAULT_FN_ATTRS
+_InterlockedXor16_acq(short volatile *_Value, short _Mask) {
+ return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_ACQUIRE);
+}
+static __inline__ short __DEFAULT_FN_ATTRS
+_InterlockedXor16_nf(short volatile *_Value, short _Mask) {
+ return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_RELAXED);
+}
+static __inline__ short __DEFAULT_FN_ATTRS
+_InterlockedXor16_rel(short volatile *_Value, short _Mask) {
+ return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_RELEASE);
+}
+static __inline__ long __DEFAULT_FN_ATTRS
+_InterlockedXor_acq(long volatile *_Value, long _Mask) {
+ return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_ACQUIRE);
+}
+static __inline__ long __DEFAULT_FN_ATTRS
+_InterlockedXor_nf(long volatile *_Value, long _Mask) {
+ return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_RELAXED);
+}
+static __inline__ long __DEFAULT_FN_ATTRS
+_InterlockedXor_rel(long volatile *_Value, long _Mask) {
+ return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_RELEASE);
+}
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedXor64_acq(__int64 volatile *_Value, __int64 _Mask) {
+ return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_ACQUIRE);
+}
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedXor64_nf(__int64 volatile *_Value, __int64 _Mask) {
+ return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_RELAXED);
+}
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedXor64_rel(__int64 volatile *_Value, __int64 _Mask) {
+ return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_RELEASE);
+}
#endif
/*----------------------------------------------------------------------------*\
|* Interlocked Exchange
// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or i64* %value, i64 %mask monotonic
// CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]]
// CHECK-ARM-ARM64: }
-
-char test_InterlockedXor8_acq(char volatile *value, char mask) {
- return _InterlockedXor8_acq(value, mask);
-}
-// CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedXor8_acq(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor i8* %value, i8 %mask acquire
-// CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]]
-// CHECK-ARM-ARM64: }
-
-char test_InterlockedXor8_rel(char volatile *value, char mask) {
- return _InterlockedXor8_rel(value, mask);
-}
-// CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedXor8_rel(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor i8* %value, i8 %mask release
-// CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]]
-// CHECK-ARM-ARM64: }
-
-char test_InterlockedXor8_nf(char volatile *value, char mask) {
- return _InterlockedXor8_nf(value, mask);
-}
-// CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedXor8_nf(i8*{{[a-z_ ]*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor i8* %value, i8 %mask monotonic
-// CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]]
-// CHECK-ARM-ARM64: }
-
-short test_InterlockedXor16_acq(short volatile *value, short mask) {
- return _InterlockedXor16_acq(value, mask);
-}
-// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedXor16_acq(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor i16* %value, i16 %mask acquire
-// CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]]
-// CHECK-ARM-ARM64: }
-
-short test_InterlockedXor16_rel(short volatile *value, short mask) {
- return _InterlockedXor16_rel(value, mask);
-}
-// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedXor16_rel(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor i16* %value, i16 %mask release
-// CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]]
-// CHECK-ARM-ARM64: }
-
-short test_InterlockedXor16_nf(short volatile *value, short mask) {
- return _InterlockedXor16_nf(value, mask);
-}
-// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedXor16_nf(i16*{{[a-z_ ]*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor i16* %value, i16 %mask monotonic
-// CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]]
-// CHECK-ARM-ARM64: }
-
-long test_InterlockedXor_acq(long volatile *value, long mask) {
- return _InterlockedXor_acq(value, mask);
-}
-// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedXor_acq(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor i32* %value, i32 %mask acquire
-// CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]]
-// CHECK-ARM-ARM64: }
-
-long test_InterlockedXor_rel(long volatile *value, long mask) {
- return _InterlockedXor_rel(value, mask);
-}
-// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedXor_rel(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor i32* %value, i32 %mask release
-// CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]]
-// CHECK-ARM-ARM64: }
-
-long test_InterlockedXor_nf(long volatile *value, long mask) {
- return _InterlockedXor_nf(value, mask);
-}
-// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedXor_nf(i32*{{[a-z_ ]*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor i32* %value, i32 %mask monotonic
-// CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]]
-// CHECK-ARM-ARM64: }
-
-__int64 test_InterlockedXor64_acq(__int64 volatile *value, __int64 mask) {
- return _InterlockedXor64_acq(value, mask);
-}
-// CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedXor64_acq(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor i64* %value, i64 %mask acquire
-// CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]]
-// CHECK-ARM-ARM64: }
-
-__int64 test_InterlockedXor64_rel(__int64 volatile *value, __int64 mask) {
- return _InterlockedXor64_rel(value, mask);
-}
-// CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedXor64_rel(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor i64* %value, i64 %mask release
-// CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]]
-// CHECK-ARM-ARM64: }
-
-__int64 test_InterlockedXor64_nf(__int64 volatile *value, __int64 mask) {
- return _InterlockedXor64_nf(value, mask);
-}
-// CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedXor64_nf(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor i64* %value, i64 %mask monotonic
-// CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]]
-// CHECK-ARM-ARM64: }
#endif
#if !defined(__aarch64__)