TARGET_HEADER_BUILTIN(_InterlockedExchange64_nf, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
TARGET_HEADER_BUILTIN(_InterlockedExchange64_rel, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedCompareExchange8_acq, "ccD*cc", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedCompareExchange8_nf, "ccD*cc", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedCompareExchange8_rel, "ccD*cc", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedCompareExchange16_acq, "ssD*ss", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedCompareExchange16_nf, "ssD*ss", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedCompareExchange16_rel, "ssD*ss", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedCompareExchange_acq, "LiLiD*LiLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedCompareExchange_nf, "LiLiD*LiLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedCompareExchange_rel, "LiLiD*LiLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedCompareExchange64_acq, "LLiLLiD*LLiLLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedCompareExchange64_nf, "LLiLLiD*LLiLLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedCompareExchange64_rel, "LLiLLiD*LLiLLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+
TARGET_HEADER_BUILTIN(_ReadWriteBarrier, "v", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
TARGET_HEADER_BUILTIN(__getReg, "ULLii", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
TARGET_HEADER_BUILTIN(_ReadStatusReg, "ii", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
TARGET_HEADER_BUILTIN(_InterlockedExchange64_nf, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
TARGET_HEADER_BUILTIN(_InterlockedExchange64_rel, "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedCompareExchange8_acq, "ccD*cc", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedCompareExchange8_nf, "ccD*cc", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedCompareExchange8_rel, "ccD*cc", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedCompareExchange16_acq, "ssD*ss", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedCompareExchange16_nf, "ssD*ss", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedCompareExchange16_rel, "ssD*ss", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedCompareExchange_acq, "LiLiD*LiLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedCompareExchange_nf, "LiLiD*LiLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedCompareExchange_rel, "LiLiD*LiLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedCompareExchange64_acq, "LLiLLiD*LLiLLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedCompareExchange64_nf, "LLiLLiD*LLiLLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(_InterlockedCompareExchange64_rel, "LLiLLiD*LLiLLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+
#undef BUILTIN
#undef LANGBUILTIN
#undef TARGET_HEADER_BUILTIN
/// cmpxchg result or the old value.
///
/// @returns result of cmpxchg, according to ReturnBool
+///
+/// Note: In order to lower Microsoft's _InterlockedCompareExchange* intrinsics
+/// invoke the function EmitAtomicCmpXchgForMSIntrin.
static Value *MakeAtomicCmpXchgValue(CodeGenFunction &CGF, const CallExpr *E,
bool ReturnBool) {
QualType T = ReturnBool ? E->getArg(1)->getType() : E->getType();
ValueType);
}
+/// This function should be invoked to emit atomic cmpxchg for Microsoft's
+/// _InterlockedCompareExchange* intrinsics which have the following signature:
+/// T _InterlockedCompareExchange(T volatile *Destination,
+/// T Exchange,
+/// T Comparand);
+///
+/// Whereas the llvm 'cmpxchg' instruction has the following syntax:
+/// cmpxchg *Destination, Comparand, Exchange.
+/// So we need to swap Comparand and Exchange when invoking
+/// CreateAtomicCmpXchg. That is the reason we could not use the above utility
+/// function MakeAtomicCmpXchgValue since it expects the arguments to be
+/// already swapped.
+
+static
+Value *EmitAtomicCmpXchgForMSIntrin(CodeGenFunction &CGF, const CallExpr *E,
+ AtomicOrdering SuccessOrdering = AtomicOrdering::SequentiallyConsistent) {
+ auto T = E->getType();
+ assert(E->getArg(0)->getType()->isPointerType());
+ assert(CGF.getContext().hasSameUnqualifiedType(T,
+ E->getArg(0)->getType()->getPointeeType()));
+ assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
+ assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(2)->getType()));
+
+ auto *Destination = CGF.EmitScalarExpr(E->getArg(0));
+ auto *Comparand = CGF.EmitScalarExpr(E->getArg(2));
+ auto *Exchange = CGF.EmitScalarExpr(E->getArg(1));
+
+ // For Release ordering, the failure ordering should be Monotonic.
+ auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release ?
+ AtomicOrdering::Monotonic :
+ SuccessOrdering;
+
+ auto *Result = CGF.Builder.CreateAtomicCmpXchg(
+ Destination, Comparand, Exchange,
+ SuccessOrdering, FailureOrdering);
+ Result->setVolatile(true);
+ return CGF.Builder.CreateExtractValue(Result, 0);
+}
+
// Emit a simple mangled intrinsic that has 1 argument and a return type
// matching the argument type.
static Value *emitUnaryBuiltin(CodeGenFunction &CGF,
_InterlockedExchange_acq,
_InterlockedExchange_rel,
_InterlockedExchange_nf,
+ _InterlockedCompareExchange_acq,
+ _InterlockedCompareExchange_rel,
+ _InterlockedCompareExchange_nf,
__fastfail,
};
case MSVCIntrin::_InterlockedExchange_nf:
return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
AtomicOrdering::Monotonic);
+ case MSVCIntrin::_InterlockedCompareExchange_acq:
+ return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Acquire);
+ case MSVCIntrin::_InterlockedCompareExchange_rel:
+ return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Release);
+ case MSVCIntrin::_InterlockedCompareExchange_nf:
+ return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Monotonic);
case MSVCIntrin::_InterlockedDecrement: {
llvm::Type *IntTy = ConvertType(E->getType());
case Builtin::BI_InterlockedCompareExchange8:
case Builtin::BI_InterlockedCompareExchange16:
case Builtin::BI_InterlockedCompareExchange:
- case Builtin::BI_InterlockedCompareExchange64: {
- AtomicCmpXchgInst *CXI = Builder.CreateAtomicCmpXchg(
- EmitScalarExpr(E->getArg(0)),
- EmitScalarExpr(E->getArg(2)),
- EmitScalarExpr(E->getArg(1)),
- AtomicOrdering::SequentiallyConsistent,
- AtomicOrdering::SequentiallyConsistent);
- CXI->setVolatile(true);
- return RValue::get(Builder.CreateExtractValue(CXI, 0));
- }
+ case Builtin::BI_InterlockedCompareExchange64:
+ return RValue::get(EmitAtomicCmpXchgForMSIntrin(*this, E));
case Builtin::BI_InterlockedIncrement16:
case Builtin::BI_InterlockedIncrement:
return RValue::get(
case ARM::BI_InterlockedExchange_nf:
case ARM::BI_InterlockedExchange64_nf:
return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_nf, E);
+ case ARM::BI_InterlockedCompareExchange8_acq:
+ case ARM::BI_InterlockedCompareExchange16_acq:
+ case ARM::BI_InterlockedCompareExchange_acq:
+ case ARM::BI_InterlockedCompareExchange64_acq:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_acq, E);
+ case ARM::BI_InterlockedCompareExchange8_rel:
+ case ARM::BI_InterlockedCompareExchange16_rel:
+ case ARM::BI_InterlockedCompareExchange_rel:
+ case ARM::BI_InterlockedCompareExchange64_rel:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_rel, E);
+ case ARM::BI_InterlockedCompareExchange8_nf:
+ case ARM::BI_InterlockedCompareExchange16_nf:
+ case ARM::BI_InterlockedCompareExchange_nf:
+ case ARM::BI_InterlockedCompareExchange64_nf:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_nf, E);
}
// Get the last argument, which specifies the vector type.
case AArch64::BI_InterlockedExchange_nf:
case AArch64::BI_InterlockedExchange64_nf:
return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_nf, E);
+ case AArch64::BI_InterlockedCompareExchange8_acq:
+ case AArch64::BI_InterlockedCompareExchange16_acq:
+ case AArch64::BI_InterlockedCompareExchange_acq:
+ case AArch64::BI_InterlockedCompareExchange64_acq:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_acq, E);
+ case AArch64::BI_InterlockedCompareExchange8_rel:
+ case AArch64::BI_InterlockedCompareExchange16_rel:
+ case AArch64::BI_InterlockedCompareExchange_rel:
+ case AArch64::BI_InterlockedCompareExchange64_rel:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_rel, E);
+ case AArch64::BI_InterlockedCompareExchange8_nf:
+ case AArch64::BI_InterlockedCompareExchange16_nf:
+ case AArch64::BI_InterlockedCompareExchange_nf:
+ case AArch64::BI_InterlockedCompareExchange64_nf:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_nf, E);
case AArch64::BI_InterlockedAdd: {
Value *Arg0 = EmitScalarExpr(E->getArg(0));
|* Interlocked Compare Exchange
\*----------------------------------------------------------------------------*/
#if defined(__arm__) || defined(__aarch64__)
-static __inline__ char __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange8_acq(char volatile *_Destination,
- char _Exchange, char _Comparand) {
- __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
- __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
- return _Comparand;
-}
-static __inline__ char __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange8_nf(char volatile *_Destination,
- char _Exchange, char _Comparand) {
- __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
- __ATOMIC_SEQ_CST, __ATOMIC_RELAXED);
- return _Comparand;
-}
-static __inline__ char __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange8_rel(char volatile *_Destination,
- char _Exchange, char _Comparand) {
- __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
- __ATOMIC_SEQ_CST, __ATOMIC_RELEASE);
- return _Comparand;
-}
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange16_acq(short volatile *_Destination,
- short _Exchange, short _Comparand) {
- __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
- __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
- return _Comparand;
-}
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange16_nf(short volatile *_Destination,
- short _Exchange, short _Comparand) {
- __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
- __ATOMIC_SEQ_CST, __ATOMIC_RELAXED);
- return _Comparand;
-}
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange16_rel(short volatile *_Destination,
- short _Exchange, short _Comparand) {
- __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
- __ATOMIC_SEQ_CST, __ATOMIC_RELEASE);
- return _Comparand;
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange_acq(long volatile *_Destination,
- long _Exchange, long _Comparand) {
- __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
- __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
- return _Comparand;
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange_nf(long volatile *_Destination,
- long _Exchange, long _Comparand) {
- __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
- __ATOMIC_SEQ_CST, __ATOMIC_RELAXED);
- return _Comparand;
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange_rel(long volatile *_Destination,
- long _Exchange, long _Comparand) {
- __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
- __ATOMIC_SEQ_CST, __ATOMIC_RELEASE);
- return _Comparand;
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange64_acq(__int64 volatile *_Destination,
- __int64 _Exchange, __int64 _Comparand) {
- __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
- __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
- return _Comparand;
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange64_nf(__int64 volatile *_Destination,
- __int64 _Exchange, __int64 _Comparand) {
- __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
- __ATOMIC_SEQ_CST, __ATOMIC_RELAXED);
- return _Comparand;
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange64_rel(__int64 volatile *_Destination,
- __int64 _Exchange, __int64 _Comparand) {
- __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
- __ATOMIC_SEQ_CST, __ATOMIC_RELEASE);
- return _Comparand;
-}
+char _InterlockedCompareExchange8_acq(char volatile *_Destination,
+ char _Exchange, char _Comparand);
+char _InterlockedCompareExchange8_nf(char volatile *_Destination,
+ char _Exchange, char _Comparand);
+char _InterlockedCompareExchange8_rel(char volatile *_Destination,
+ char _Exchange, char _Comparand);
+short _InterlockedCompareExchange16_acq(short volatile *_Destination,
+ short _Exchange, short _Comparand);
+short _InterlockedCompareExchange16_nf(short volatile *_Destination,
+ short _Exchange, short _Comparand);
+short _InterlockedCompareExchange16_rel(short volatile *_Destination,
+ short _Exchange, short _Comparand);
+long _InterlockedCompareExchange_acq(long volatile *_Destination,
+ long _Exchange, long _Comparand);
+long _InterlockedCompareExchange_nf(long volatile *_Destination,
+ long _Exchange, long _Comparand);
+long _InterlockedCompareExchange_rel(long volatile *_Destination,
+ long _Exchange, long _Comparand);
+__int64 _InterlockedCompareExchange64_acq(__int64 volatile *_Destination,
+ __int64 _Exchange, __int64 _Comparand);
+__int64 _InterlockedCompareExchange64_nf(__int64 volatile *_Destination,
+ __int64 _Exchange, __int64 _Comparand);
+__int64 _InterlockedCompareExchange64_rel(__int64 volatile *_Destination,
+ __int64 _Exchange, __int64 _Comparand);
#endif
/*----------------------------------------------------------------------------*\
// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg i64* %value, i64 %mask monotonic
// CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]]
// CHECK-ARM-ARM64: }
+
+char test_InterlockedCompareExchange8_acq(char volatile *Destination, char Exchange, char Comperand) {
+ return _InterlockedCompareExchange8_acq(Destination, Exchange, Comperand);
+}
+// CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedCompareExchange8_acq(i8*{{[a-z_ ]*}}%Destination, i8{{[a-z_ ]*}}%Exchange, i8{{[a-z_ ]*}}%Comperand){{.*}}{
+// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i8* %Destination, i8 %Comperand, i8 %Exchange acquire acquire
+// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i8, i1 } [[TMP]], 0
+// CHECK-ARM-ARM64: ret i8 [[RESULT]]
+// CHECK-ARM-ARM64: }
+
+char test_InterlockedCompareExchange8_rel(char volatile *Destination, char Exchange, char Comperand) {
+ return _InterlockedCompareExchange8_rel(Destination, Exchange, Comperand);
+}
+// CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedCompareExchange8_rel(i8*{{[a-z_ ]*}}%Destination, i8{{[a-z_ ]*}}%Exchange, i8{{[a-z_ ]*}}%Comperand){{.*}}{
+// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i8* %Destination, i8 %Comperand, i8 %Exchange release monotonic
+// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i8, i1 } [[TMP]], 0
+// CHECK-ARM-ARM64: ret i8 [[RESULT]]
+// CHECK-ARM-ARM64: }
+
+char test_InterlockedCompareExchange8_nf(char volatile *Destination, char Exchange, char Comperand) {
+ return _InterlockedCompareExchange8_nf(Destination, Exchange, Comperand);
+}
+// CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedCompareExchange8_nf(i8*{{[a-z_ ]*}}%Destination, i8{{[a-z_ ]*}}%Exchange, i8{{[a-z_ ]*}}%Comperand){{.*}}{
+// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i8* %Destination, i8 %Comperand, i8 %Exchange monotonic monotonic
+// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i8, i1 } [[TMP]], 0
+// CHECK-ARM-ARM64: ret i8 [[RESULT]]
+// CHECK-ARM-ARM64: }
+
+short test_InterlockedCompareExchange16_acq(short volatile *Destination, short Exchange, short Comperand) {
+ return _InterlockedCompareExchange16_acq(Destination, Exchange, Comperand);
+}
+// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedCompareExchange16_acq(i16*{{[a-z_ ]*}}%Destination, i16{{[a-z_ ]*}}%Exchange, i16{{[a-z_ ]*}}%Comperand){{.*}}{
+// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i16* %Destination, i16 %Comperand, i16 %Exchange acquire acquire
+// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i16, i1 } [[TMP]], 0
+// CHECK-ARM-ARM64: ret i16 [[RESULT]]
+// CHECK-ARM-ARM64: }
+
+short test_InterlockedCompareExchange16_rel(short volatile *Destination, short Exchange, short Comperand) {
+ return _InterlockedCompareExchange16_rel(Destination, Exchange, Comperand);
+}
+// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedCompareExchange16_rel(i16*{{[a-z_ ]*}}%Destination, i16{{[a-z_ ]*}}%Exchange, i16{{[a-z_ ]*}}%Comperand){{.*}}{
+// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i16* %Destination, i16 %Comperand, i16 %Exchange release monotonic
+// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i16, i1 } [[TMP]], 0
+// CHECK-ARM-ARM64: ret i16 [[RESULT]]
+// CHECK-ARM-ARM64: }
+
+short test_InterlockedCompareExchange16_nf(short volatile *Destination, short Exchange, short Comperand) {
+ return _InterlockedCompareExchange16_nf(Destination, Exchange, Comperand);
+}
+// CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedCompareExchange16_nf(i16*{{[a-z_ ]*}}%Destination, i16{{[a-z_ ]*}}%Exchange, i16{{[a-z_ ]*}}%Comperand){{.*}}{
+// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i16* %Destination, i16 %Comperand, i16 %Exchange monotonic monotonic
+// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i16, i1 } [[TMP]], 0
+// CHECK-ARM-ARM64: ret i16 [[RESULT]]
+// CHECK-ARM-ARM64: }
+
+long test_InterlockedCompareExchange_acq(long volatile *Destination, long Exchange, long Comperand) {
+ return _InterlockedCompareExchange_acq(Destination, Exchange, Comperand);
+}
+// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedCompareExchange_acq(i32*{{[a-z_ ]*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comperand){{.*}}{
+// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i32* %Destination, i32 %Comperand, i32 %Exchange acquire acquire
+// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i32, i1 } [[TMP]], 0
+// CHECK-ARM-ARM64: ret i32 [[RESULT]]
+// CHECK-ARM-ARM64: }
+
+long test_InterlockedCompareExchange_rel(long volatile *Destination, long Exchange, long Comperand) {
+ return _InterlockedCompareExchange_rel(Destination, Exchange, Comperand);
+}
+// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedCompareExchange_rel(i32*{{[a-z_ ]*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comperand){{.*}}{
+// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i32* %Destination, i32 %Comperand, i32 %Exchange release monotonic
+// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i32, i1 } [[TMP]], 0
+// CHECK-ARM-ARM64: ret i32 [[RESULT]]
+// CHECK-ARM-ARM64: }
+
+long test_InterlockedCompareExchange_nf(long volatile *Destination, long Exchange, long Comperand) {
+ return _InterlockedCompareExchange_nf(Destination, Exchange, Comperand);
+}
+// CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedCompareExchange_nf(i32*{{[a-z_ ]*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comperand){{.*}}{
+// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i32* %Destination, i32 %Comperand, i32 %Exchange monotonic monotonic
+// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i32, i1 } [[TMP]], 0
+// CHECK-ARM-ARM64: ret i32 [[RESULT]]
+// CHECK-ARM-ARM64: }
+
+__int64 test_InterlockedCompareExchange64_acq(__int64 volatile *Destination, __int64 Exchange, __int64 Comperand) {
+ return _InterlockedCompareExchange64_acq(Destination, Exchange, Comperand);
+}
+// CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedCompareExchange64_acq(i64*{{[a-z_ ]*}}%Destination, i64{{[a-z_ ]*}}%Exchange, i64{{[a-z_ ]*}}%Comperand){{.*}}{
+// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i64* %Destination, i64 %Comperand, i64 %Exchange acquire acquire
+// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i64, i1 } [[TMP]], 0
+// CHECK-ARM-ARM64: ret i64 [[RESULT]]
+// CHECK-ARM-ARM64: }
+
+__int64 test_InterlockedCompareExchange64_rel(__int64 volatile *Destination, __int64 Exchange, __int64 Comperand) {
+ return _InterlockedCompareExchange64_rel(Destination, Exchange, Comperand);
+}
+// CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedCompareExchange64_rel(i64*{{[a-z_ ]*}}%Destination, i64{{[a-z_ ]*}}%Exchange, i64{{[a-z_ ]*}}%Comperand){{.*}}{
+// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i64* %Destination, i64 %Comperand, i64 %Exchange release monotonic
+// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i64, i1 } [[TMP]], 0
+// CHECK-ARM-ARM64: ret i64 [[RESULT]]
+// CHECK-ARM-ARM64: }
+
+__int64 test_InterlockedCompareExchange64_nf(__int64 volatile *Destination, __int64 Exchange, __int64 Comperand) {
+ return _InterlockedCompareExchange64_nf(Destination, Exchange, Comperand);
+}
+// CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedCompareExchange64_nf(i64*{{[a-z_ ]*}}%Destination, i64{{[a-z_ ]*}}%Exchange, i64{{[a-z_ ]*}}%Comperand){{.*}}{
+// CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile i64* %Destination, i64 %Comperand, i64 %Exchange monotonic monotonic
+// CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i64, i1 } [[TMP]], 0
+// CHECK-ARM-ARM64: ret i64 [[RESULT]]
+// CHECK-ARM-ARM64: }
#endif
#if !defined(__aarch64__)