LANGBUILTIN(_InterlockedAnd8, "ccD*c", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_InterlockedAnd16, "ssD*s", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_InterlockedAnd, "LiLiD*Li", "n", ALL_MS_LANGUAGES)
-LANGBUILTIN(_InterlockedAnd64, "LLiLLiD*LLi", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_InterlockedCompareExchange8, "ccD*cc", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_InterlockedCompareExchange16, "ssD*ss", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_InterlockedCompareExchange, "LiLiD*LiLi", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_InterlockedCompareExchangePointer, "v*v*D*v*v*", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_InterlockedDecrement16, "ssD*", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_InterlockedDecrement, "LiLiD*", "n", ALL_MS_LANGUAGES)
-LANGBUILTIN(_InterlockedDecrement64, "LLiLLiD*", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_InterlockedExchange, "LiLiD*Li", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_InterlockedExchange8, "ccD*c", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_InterlockedExchange16, "ssD*s", "n", ALL_MS_LANGUAGES)
-LANGBUILTIN(_InterlockedExchange64, "LLiLLiD*LLi", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_InterlockedExchangeAdd8, "ccD*c", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_InterlockedExchangeAdd16, "ssD*s", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_InterlockedExchangeAdd, "LiLiD*Li", "n", ALL_MS_LANGUAGES)
-LANGBUILTIN(_InterlockedExchangeAdd64, "LLiLLiD*LLi", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_InterlockedExchangePointer, "v*v*D*v*", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_InterlockedExchangeSub8, "ccD*c", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_InterlockedExchangeSub16, "ssD*s", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_InterlockedExchangeSub, "LiLiD*Li", "n", ALL_MS_LANGUAGES)
-LANGBUILTIN(_InterlockedExchangeSub64, "LLiLLiD*LLi", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_InterlockedIncrement16, "ssD*", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_InterlockedIncrement, "LiLiD*", "n", ALL_MS_LANGUAGES)
-LANGBUILTIN(_InterlockedIncrement64, "LLiLLiD*", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_InterlockedOr8, "ccD*c", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_InterlockedOr16, "ssD*s", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_InterlockedOr, "LiLiD*Li", "n", ALL_MS_LANGUAGES)
-LANGBUILTIN(_InterlockedOr64, "LLiLLiD*LLi", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_InterlockedXor8, "ccD*c", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_InterlockedXor16, "ssD*s", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_InterlockedXor, "LiLiD*Li", "n", ALL_MS_LANGUAGES)
-LANGBUILTIN(_InterlockedXor64, "LLiLLiD*LLi", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(__noop, "i.", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(__readfsdword, "ULiULi", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_rotl8, "UcUcUc", "n", ALL_MS_LANGUAGES)
case Builtin::BI_InterlockedExchange8:
case Builtin::BI_InterlockedExchange16:
case Builtin::BI_InterlockedExchange:
- case Builtin::BI_InterlockedExchange64:
case Builtin::BI_InterlockedExchangePointer:
return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
case Builtin::BI_InterlockedCompareExchangePointer: {
return RValue::get(Builder.CreateExtractValue(CXI, 0));
}
case Builtin::BI_InterlockedIncrement16:
- case Builtin::BI_InterlockedIncrement:
- case Builtin::BI_InterlockedIncrement64: {
+ case Builtin::BI_InterlockedIncrement: {
llvm::Type *IntTy = ConvertType(E->getType());
AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
AtomicRMWInst::Add,
return RValue::get(Builder.CreateAdd(RMWI, ConstantInt::get(IntTy, 1)));
}
case Builtin::BI_InterlockedDecrement16:
- case Builtin::BI_InterlockedDecrement:
- case Builtin::BI_InterlockedDecrement64: {
+ case Builtin::BI_InterlockedDecrement: {
llvm::Type *IntTy = ConvertType(E->getType());
AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
AtomicRMWInst::Sub,
case Builtin::BI_InterlockedAnd8:
case Builtin::BI_InterlockedAnd16:
case Builtin::BI_InterlockedAnd:
- case Builtin::BI_InterlockedAnd64:
return EmitBinaryAtomic(*this, AtomicRMWInst::And, E);
case Builtin::BI_InterlockedExchangeAdd8:
case Builtin::BI_InterlockedExchangeAdd16:
case Builtin::BI_InterlockedExchangeAdd:
- case Builtin::BI_InterlockedExchangeAdd64:
return EmitBinaryAtomic(*this, AtomicRMWInst::Add, E);
case Builtin::BI_InterlockedExchangeSub8:
case Builtin::BI_InterlockedExchangeSub16:
case Builtin::BI_InterlockedExchangeSub:
- case Builtin::BI_InterlockedExchangeSub64:
return EmitBinaryAtomic(*this, AtomicRMWInst::Sub, E);
case Builtin::BI_InterlockedOr8:
case Builtin::BI_InterlockedOr16:
case Builtin::BI_InterlockedOr:
- case Builtin::BI_InterlockedOr64:
return EmitBinaryAtomic(*this, AtomicRMWInst::Or, E);
case Builtin::BI_InterlockedXor8:
case Builtin::BI_InterlockedXor16:
case Builtin::BI_InterlockedXor:
- case Builtin::BI_InterlockedXor64:
return EmitBinaryAtomic(*this, AtomicRMWInst::Xor, E);
case Builtin::BI__readfsdword: {
llvm::Type *IntTy = ConvertType(E->getType());
__atomic_fetch_or(_BitBase, 1ll << _BitPos, __ATOMIC_SEQ_CST);
return (_PrevVal >> _BitPos) & 1;
}
+/*----------------------------------------------------------------------------*\\r
+|* Interlocked Exchange Add\r
+\*----------------------------------------------------------------------------*/\r
+static __inline__ __int64 __DEFAULT_FN_ATTRS\r
+_InterlockedExchangeAdd64(__int64 volatile *_Addend, __int64 _Value) {\r
+ return __atomic_fetch_add(_Addend, _Value, __ATOMIC_SEQ_CST);\r
+}\r
+/*----------------------------------------------------------------------------*\\r
+|* Interlocked Exchange Sub\r
+\*----------------------------------------------------------------------------*/\r
+static __inline__ __int64 __DEFAULT_FN_ATTRS\r
+_InterlockedExchangeSub64(__int64 volatile *_Subend, __int64 _Value) {\r
+ return __atomic_fetch_sub(_Subend, _Value, __ATOMIC_SEQ_CST);\r
+}\r
+/*----------------------------------------------------------------------------*\\r
+|* Interlocked Increment\r
+\*----------------------------------------------------------------------------*/\r
+static __inline__ __int64 __DEFAULT_FN_ATTRS\r
+_InterlockedIncrement64(__int64 volatile *_Value) {\r
+ return __atomic_add_fetch(_Value, 1, __ATOMIC_SEQ_CST);\r
+}\r
+/*----------------------------------------------------------------------------*\\r
+|* Interlocked Decrement\r
+\*----------------------------------------------------------------------------*/\r
+static __inline__ __int64 __DEFAULT_FN_ATTRS\r
+_InterlockedDecrement64(__int64 volatile *_Value) {\r
+ return __atomic_sub_fetch(_Value, 1, __ATOMIC_SEQ_CST);\r
+}\r
+/*----------------------------------------------------------------------------*\\r
+|* Interlocked And\r
+\*----------------------------------------------------------------------------*/\r
+static __inline__ __int64 __DEFAULT_FN_ATTRS\r
+_InterlockedAnd64(__int64 volatile *_Value, __int64 _Mask) {\r
+ return __atomic_fetch_and(_Value, _Mask, __ATOMIC_SEQ_CST);\r
+}\r
+/*----------------------------------------------------------------------------*\\r
+|* Interlocked Or\r
+\*----------------------------------------------------------------------------*/\r
+static __inline__ __int64 __DEFAULT_FN_ATTRS\r
+_InterlockedOr64(__int64 volatile *_Value, __int64 _Mask) {\r
+ return __atomic_fetch_or(_Value, _Mask, __ATOMIC_SEQ_CST);\r
+}\r
+/*----------------------------------------------------------------------------*\\r
+|* Interlocked Xor\r
+\*----------------------------------------------------------------------------*/\r
+static __inline__ __int64 __DEFAULT_FN_ATTRS\r
+_InterlockedXor64(__int64 volatile *_Value, __int64 _Mask) {\r
+ return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_SEQ_CST);\r
+}\r
+/*----------------------------------------------------------------------------*\\r
+|* Interlocked Exchange\r
+\*----------------------------------------------------------------------------*/\r
+static __inline__ __int64 __DEFAULT_FN_ATTRS\r
+_InterlockedExchange64(__int64 volatile *_Target, __int64 _Value) {\r
+ __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_SEQ_CST);\r
+ return _Value;\r
+}\r
#endif
/*----------------------------------------------------------------------------*\
|* Barriers
// CHECK: ret i32 [[RESULT:%[0-9]+]]
// CHECK: }
-__int64 test_InterlockedExchange64(__int64 volatile *value, __int64 mask) {
- return _InterlockedExchange64(value, mask);
-}
-// CHECK: define{{.*}}i64 @test_InterlockedExchange64(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK: [[RESULT:%[0-9]+]] = atomicrmw xchg i64* %value, i64 %mask seq_cst
-// CHECK: ret i64 [[RESULT:%[0-9]+]]
-// CHECK: }
-
char test_InterlockedExchangeAdd8(char volatile *value, char mask) {
return _InterlockedExchangeAdd8(value, mask);
}
// CHECK: ret i32 [[RESULT:%[0-9]+]]
// CHECK: }
-__int64 test_InterlockedExchangeAdd64(__int64 volatile *value, __int64 mask) {
- return _InterlockedExchangeAdd64(value, mask);
-}
-// CHECK: define{{.*}}i64 @test_InterlockedExchangeAdd64(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK: [[RESULT:%[0-9]+]] = atomicrmw add i64* %value, i64 %mask seq_cst
-// CHECK: ret i64 [[RESULT:%[0-9]+]]
-// CHECK: }
-
char test_InterlockedExchangeSub8(char volatile *value, char mask) {
return _InterlockedExchangeSub8(value, mask);
}
// CHECK: ret i32 [[RESULT:%[0-9]+]]
// CHECK: }
-__int64 test_InterlockedExchangeSub64(__int64 volatile *value, __int64 mask) {
- return _InterlockedExchangeSub64(value, mask);
-}
-// CHECK: define{{.*}}i64 @test_InterlockedExchangeSub64(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK: [[RESULT:%[0-9]+]] = atomicrmw sub i64* %value, i64 %mask seq_cst
-// CHECK: ret i64 [[RESULT:%[0-9]+]]
-// CHECK: }
-
char test_InterlockedOr8(char volatile *value, char mask) {
return _InterlockedOr8(value, mask);
}
// CHECK: ret i32 [[RESULT:%[0-9]+]]
// CHECK: }
-__int64 test_InterlockedOr64(__int64 volatile *value, __int64 mask) {
- return _InterlockedOr64(value, mask);
-}
-// CHECK: define{{.*}}i64 @test_InterlockedOr64(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK: [[RESULT:%[0-9]+]] = atomicrmw or i64* %value, i64 %mask seq_cst
-// CHECK: ret i64 [[RESULT:%[0-9]+]]
-// CHECK: }
-
char test_InterlockedXor8(char volatile *value, char mask) {
return _InterlockedXor8(value, mask);
}
// CHECK: ret i32 [[RESULT:%[0-9]+]]
// CHECK: }
-__int64 test_InterlockedXor64(__int64 volatile *value, __int64 mask) {
- return _InterlockedXor64(value, mask);
-}
-// CHECK: define{{.*}}i64 @test_InterlockedXor64(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK: [[RESULT:%[0-9]+]] = atomicrmw xor i64* %value, i64 %mask seq_cst
-// CHECK: ret i64 [[RESULT:%[0-9]+]]
-// CHECK: }
-
char test_InterlockedAnd8(char volatile *value, char mask) {
return _InterlockedAnd8(value, mask);
}
// CHECK: ret i32 [[RESULT:%[0-9]+]]
// CHECK: }
-__int64 test_InterlockedAnd64(__int64 volatile *value, __int64 mask) {
- return _InterlockedAnd64(value, mask);
-}
-// CHECK: define{{.*}}i64 @test_InterlockedAnd64(i64*{{[a-z_ ]*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
-// CHECK: [[RESULT:%[0-9]+]] = atomicrmw and i64* %value, i64 %mask seq_cst
-// CHECK: ret i64 [[RESULT:%[0-9]+]]
-// CHECK: }
-
char test_InterlockedCompareExchange8(char volatile *Destination, char Exchange, char Comperand) {
return _InterlockedCompareExchange8(Destination, Exchange, Comperand);
}
// CHECK: ret i32 [[RESULT]]
// CHECK: }
-__int64 test_InterlockedIncrement64(__int64 volatile *Addend) {
- return _InterlockedIncrement64(Addend);
-}
-// CHECK: define{{.*}}i64 @test_InterlockedIncrement64(i64*{{[a-z_ ]*}}%Addend){{.*}}{
-// CHECK: [[TMP:%[0-9]+]] = atomicrmw add i64* %Addend, i64 1 seq_cst
-// CHECK: [[RESULT:%[0-9]+]] = add i64 [[TMP]], 1
-// CHECK: ret i64 [[RESULT]]
-// CHECK: }
-
short test_InterlockedDecrement16(short volatile *Addend) {
return _InterlockedDecrement16(Addend);
}
// CHECK: [[RESULT:%[0-9]+]] = add i32 [[TMP]], -1
// CHECK: ret i32 [[RESULT]]
// CHECK: }
-
-__int64 test_InterlockedDecrement64(__int64 volatile *Addend) {
- return _InterlockedDecrement64(Addend);
-}
-// CHECK: define{{.*}}i64 @test_InterlockedDecrement64(i64*{{[a-z_ ]*}}%Addend){{.*}}{
-// CHECK: [[TMP:%[0-9]+]] = atomicrmw sub i64* %Addend, i64 1 seq_cst
-// CHECK: [[RESULT:%[0-9]+]] = add i64 [[TMP]], -1
-// CHECK: ret i64 [[RESULT]]
-// CHECK: }