class AtomicExpr : public Expr {
public:
enum AtomicOp { Load, Store, CmpXchgStrong, CmpXchgWeak, Xchg,
- Add, Sub, And, Or, Xor };
+ Add, Sub, And, Or, Xor, Init };
private:
enum { PTR, ORDER, VAL1, ORDER_FAIL, VAL2, END_EXPR };
Stmt* SubExprs[END_EXPR];
SubExprs[ORDER] = E;
}
Expr *getVal1() const {
+ if (Op == Init)
+ return cast<Expr>(SubExprs[ORDER]);
assert(NumSubExprs >= 3);
return cast<Expr>(SubExprs[VAL1]);
}
void setVal1(Expr *E) {
+ if (Op == Init) {
+ SubExprs[ORDER] = E;
+ return;
+ }
assert(NumSubExprs >= 3);
SubExprs[VAL1] = E;
}
/// in ARC cause blocks to be copied; this is for cases where that
/// would not otherwise be guaranteed, such as when casting to a
/// non-block pointer type.
- CK_ARCExtendBlockObject
+ CK_ARCExtendBlockObject,
+
+ /// \brief Converts from _Atomic(T) to T.
+ CK_AtomicToNonAtomic,
+ /// \brief Converts from T to _Atomic(T).
+ CK_NonAtomicToAtomic
};
#define CK_Invalid ((CastKind) -1)
BUILTIN(__atomic_fetch_xor, "v.", "t")
BUILTIN(__atomic_thread_fence, "vi", "n")
BUILTIN(__atomic_signal_fence, "vi", "n")
+BUILTIN(__atomic_init, "v.", "t")
BUILTIN(__atomic_is_lock_free, "iz", "n")
// Non-overloaded atomic builtins.
case CK_Dependent:
case CK_LValueToRValue:
case CK_NoOp:
+ case CK_AtomicToNonAtomic:
+ case CK_NonAtomicToAtomic:
case CK_PointerToBoolean:
case CK_IntegralToBoolean:
case CK_FloatingToBoolean:
return "ARCReclaimReturnedObject";
case CK_ARCExtendBlockObject:
return "ARCCExtendBlockObject";
+ case CK_AtomicToNonAtomic:
+ return "AtomicToNonAtomic";
+ case CK_NonAtomicToAtomic:
+ return "NonAtomicToAtomic";
}
llvm_unreachable("Unhandled cast kind!");
case CStyleCastExprClass: {
const CastExpr *CE = cast<CastExpr>(this);
+ // If we're promoting an integer to an _Atomic type then this is constant
+ // if the integer is constant. We also need to check the converse in case
+ // someone does something like:
+ //
+ // int a = (_Atomic(int))42;
+ //
+ // I doubt anyone would write code like this directly, but it's quite
+ // possible as the result of macro expansions.
+ if (CE->getCastKind() == CK_NonAtomicToAtomic ||
+ CE->getCastKind() == CK_AtomicToNonAtomic)
+ return CE->getSubExpr()->isConstantInitializer(Ctx, false);
+
// Handle bitcasts of vector constants.
if (getType()->isVectorType() && CE->getCastKind() == CK_BitCast)
return CE->getSubExpr()->isConstantInitializer(Ctx, false);
default:
break;
+ case CK_AtomicToNonAtomic:
+ case CK_NonAtomicToAtomic:
case CK_NoOp:
return StmtVisitorTy::Visit(E->getSubExpr());
return Error(E);
case CK_LValueToRValue:
+ case CK_AtomicToNonAtomic:
+ case CK_NonAtomicToAtomic:
case CK_NoOp:
return ExprEvaluatorBaseTy::VisitCastExpr(E);
llvm_unreachable("invalid cast kind for complex value");
case CK_LValueToRValue:
+ case CK_AtomicToNonAtomic:
+ case CK_NonAtomicToAtomic:
case CK_NoOp:
return ExprEvaluatorBaseTy::VisitCastExpr(E);
}
switch (cast<CastExpr>(E)->getCastKind()) {
case CK_LValueToRValue:
+ case CK_AtomicToNonAtomic:
+ case CK_NonAtomicToAtomic:
case CK_NoOp:
case CK_IntegralToBoolean:
case CK_IntegralCast:
void StmtPrinter::VisitAtomicExpr(AtomicExpr *Node) {
const char *Name = 0;
switch (Node->getOp()) {
+ case AtomicExpr::Init:
+ Name = "__atomic_init(";
+ break;
case AtomicExpr::Load:
Name = "__atomic_load(";
break;
PrintExpr(Node->getVal2());
OS << ", ";
}
- PrintExpr(Node->getOrder());
+ if (Node->getOp() != AtomicExpr::Init)
+ PrintExpr(Node->getOrder());
if (Node->isCmpXChg()) {
OS << ", ";
PrintExpr(Node->getOrderFail());
llvm::Value *value = EmitScalarExpr(init);
if (capturedByInit)
drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
- EmitStoreThroughLValue(RValue::get(value), lvalue);
+ EmitStoreThroughLValue(RValue::get(value), lvalue, true);
return;
}
// Otherwise just do a simple store.
else
- EmitStoreOfScalar(zero, tempLV);
+ EmitStoreOfScalar(zero, tempLV, /* isInitialization */ true);
}
// Emit the initializer.
// both __weak and __strong, but __weak got filtered out above.
if (accessedByInit && lifetime == Qualifiers::OCL_Strong) {
llvm::Value *oldValue = EmitLoadOfScalar(lvalue);
- EmitStoreOfScalar(value, lvalue);
+ EmitStoreOfScalar(value, lvalue, /* isInitialization */ true);
EmitARCRelease(oldValue, /*precise*/ false);
return;
}
- EmitStoreOfScalar(value, lvalue);
+ EmitStoreOfScalar(value, lvalue, /* isInitialization */ true);
}
/// EmitScalarInit - Initialize the given lvalue with the given object.
void CodeGenFunction::EmitScalarInit(llvm::Value *init, LValue lvalue) {
Qualifiers::ObjCLifetime lifetime = lvalue.getObjCLifetime();
if (!lifetime)
- return EmitStoreThroughLValue(RValue::get(init), lvalue);
+ return EmitStoreThroughLValue(RValue::get(init), lvalue, true);
switch (lifetime) {
case Qualifiers::OCL_None:
break;
}
- EmitStoreOfScalar(init, lvalue);
+ EmitStoreOfScalar(init, lvalue, /* isInitialization */ true);
}
/// canEmitInitWithFewStoresAfterMemset - Decide whether we can emit the
RValue rvalue = EmitReferenceBindingToExpr(init, D);
if (capturedByInit)
drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
- EmitStoreThroughLValue(rvalue, lvalue);
+ EmitStoreThroughLValue(rvalue, lvalue, true);
} else if (!hasAggregateLLVMType(type)) {
EmitScalarInit(init, D, lvalue, capturedByInit);
} else if (type->isAnyComplexType()) {
if (doStore) {
LValue lv = MakeAddrLValue(DeclPtr, Ty,
getContext().getDeclAlign(&D));
- EmitStoreOfScalar(Arg, lv);
+ EmitStoreOfScalar(Arg, lv, /* isInitialization */ true);
}
}
Load->setAlignment(Alignment);
if (TBAAInfo)
CGM.DecorateInstruction(Load, TBAAInfo);
+ // If this is an atomic type, all normal reads must be atomic
+ if (Ty->isAtomicType())
+ Load->setAtomic(llvm::SequentiallyConsistent);
return EmitFromMemory(Load, Ty);
}
void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
bool Volatile, unsigned Alignment,
QualType Ty,
- llvm::MDNode *TBAAInfo) {
+ llvm::MDNode *TBAAInfo,
+ bool isInit) {
Value = EmitToMemory(Value, Ty);
llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile);
Store->setAlignment(Alignment);
if (TBAAInfo)
CGM.DecorateInstruction(Store, TBAAInfo);
+ if (!isInit && Ty->isAtomicType())
+ Store->setAtomic(llvm::SequentiallyConsistent);
}
-void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue) {
+void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue,
+ bool isInit) {
EmitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(),
lvalue.getAlignment().getQuantity(), lvalue.getType(),
- lvalue.getTBAAInfo());
+ lvalue.getTBAAInfo(), isInit);
}
/// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
/// EmitStoreThroughLValue - Store the specified rvalue into the specified
/// lvalue, where both are guaranteed to the have the same type, and that type
/// is 'Ty'.
-void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst) {
+void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit) {
if (!Dst.isSimple()) {
if (Dst.isVectorElt()) {
// Read/modify/write the vector, inserting the new element.
}
assert(Src.isScalar() && "Can't emit an agg store with this method");
- EmitStoreOfScalar(Src.getScalarVal(), Dst);
+ EmitStoreOfScalar(Src.getScalarVal(), Dst, isInit);
}
void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
case CK_Dependent:
llvm_unreachable("dependent cast kind in IR gen!");
+
+ // These two casts are currently treated as no-ops, although they could
+ // potentially be real operations depending on the target's ABI.
+ case CK_NonAtomicToAtomic:
+ case CK_AtomicToNonAtomic:
case CK_NoOp:
case CK_LValueToRValue:
case AtomicExpr::CmpXchgWeak:
case AtomicExpr::CmpXchgStrong:
case AtomicExpr::Store:
+ case AtomicExpr::Init:
case AtomicExpr::Load: assert(0 && "Already handled!");
case AtomicExpr::Add: Op = llvm::AtomicRMWInst::Add; break;
case AtomicExpr::Sub: Op = llvm::AtomicRMWInst::Sub; break;
getContext().getTargetInfo().getMaxAtomicInlineWidth();
bool UseLibcall = (Size != Align || Size > MaxInlineWidth);
+
+
llvm::Value *Ptr, *Order, *OrderFail = 0, *Val1 = 0, *Val2 = 0;
Ptr = EmitScalarExpr(E->getPtr());
+
+ if (E->getOp() == AtomicExpr::Init) {
+ assert(!Dest && "Init does not return a value");
+ Val1 = EmitScalarExpr(E->getVal1());
+ llvm::StoreInst *Store = Builder.CreateStore(Val1, Ptr);
+ Store->setAlignment(Size);
+ Store->setVolatile(E->isVolatile());
+ return RValue::get(0);
+ }
+
Order = EmitScalarExpr(E->getOrder());
if (E->isCmpXChg()) {
Val1 = EmitScalarExpr(E->getVal1());
// enforce that in general.
break;
}
- if (E->getOp() == AtomicExpr::Store)
+ if (E->getOp() == AtomicExpr::Store || E->getOp() == AtomicExpr::Init)
return RValue::get(0);
return ConvertTempToRValue(*this, E->getType(), OrigDest);
}
case CK_LValueToRValue: // hope for downstream optimization
case CK_NoOp:
+ case CK_AtomicToNonAtomic:
+ case CK_NonAtomicToAtomic:
case CK_UserDefinedConversion:
case CK_ConstructorConversion:
assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(),
switch (CK) {
case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!");
+ // Atomic to non-atomic casts may be more than a no-op for some platforms and
+ // for some types.
+ case CK_AtomicToNonAtomic:
+ case CK_NonAtomicToAtomic:
case CK_NoOp:
case CK_LValueToRValue:
case CK_UserDefinedConversion:
return CGM.getCXXABI().EmitMemberPointerConversion(C, E);
case CK_LValueToRValue:
+ case CK_AtomicToNonAtomic:
+ case CK_NonAtomicToAtomic:
case CK_NoOp:
return C;
Value *Src = Visit(const_cast<Expr*>(E));
return Builder.CreateBitCast(Src, ConvertType(DestTy));
}
+ case CK_AtomicToNonAtomic:
+ case CK_NonAtomicToAtomic:
case CK_NoOp:
case CK_UserDefinedConversion:
return Visit(const_cast<Expr*>(E));
QualType type = E->getSubExpr()->getType();
llvm::Value *value = EmitLoadOfLValue(LV);
llvm::Value *input = value;
+ llvm::PHINode *atomicPHI = 0;
int amount = (isInc ? 1 : -1);
+ if (const AtomicType *atomicTy = type->getAs<AtomicType>()) {
+ llvm::BasicBlock *startBB = Builder.GetInsertBlock();
+ llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
+ Builder.CreateBr(opBB);
+ Builder.SetInsertPoint(opBB);
+ atomicPHI = Builder.CreatePHI(value->getType(), 2);
+ atomicPHI->addIncoming(value, startBB);
+ type = atomicTy->getValueType();
+ value = atomicPHI;
+ }
+
// Special case of integer increment that we have to check first: bool++.
// Due to promotion rules, we get:
// bool++ -> bool = bool + 1
value = Builder.CreateInBoundsGEP(value, sizeValue, "incdec.objptr");
value = Builder.CreateBitCast(value, input->getType());
}
+
+ if (atomicPHI) {
+ llvm::BasicBlock *opBB = Builder.GetInsertBlock();
+ llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
+ llvm::Value *old = Builder.CreateAtomicCmpXchg(LV.getAddress(), atomicPHI,
+ value, llvm::SequentiallyConsistent);
+ atomicPHI->addIncoming(old, opBB);
+ llvm::Value *success = Builder.CreateICmpEQ(old, atomicPHI);
+ Builder.CreateCondBr(success, contBB, opBB);
+ Builder.SetInsertPoint(contBB);
+ return isPre ? value : input;
+ }
// Store the updated result through the lvalue.
if (LV.isBitField())
OpInfo.LHS = EmitLoadOfLValue(LHSLV);
OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy,
E->getComputationLHSType());
+
+ llvm::PHINode *atomicPHI = 0;
+ if (const AtomicType *atomicTy = OpInfo.Ty->getAs<AtomicType>()) {
+ // FIXME: For floating point types, we should be saving and restoring the
+ // floating point environment in the loop.
+ llvm::BasicBlock *startBB = Builder.GetInsertBlock();
+ llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
+ Builder.CreateBr(opBB);
+ Builder.SetInsertPoint(opBB);
+ atomicPHI = Builder.CreatePHI(OpInfo.LHS->getType(), 2);
+ atomicPHI->addIncoming(OpInfo.LHS, startBB);
+ OpInfo.Ty = atomicTy->getValueType();
+ OpInfo.LHS = atomicPHI;
+ }
// Expand the binary operator.
Result = (this->*Func)(OpInfo);
// Convert the result back to the LHS type.
Result = EmitScalarConversion(Result, E->getComputationResultType(), LHSTy);
+
+ if (atomicPHI) {
+ llvm::BasicBlock *opBB = Builder.GetInsertBlock();
+ llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
+ llvm::Value *old = Builder.CreateAtomicCmpXchg(LHSLV.getAddress(), atomicPHI,
+ Result, llvm::SequentiallyConsistent);
+ atomicPHI->addIncoming(old, opBB);
+ llvm::Value *success = Builder.CreateICmpEQ(old, atomicPHI);
+ Builder.CreateCondBr(success, contBB, opBB);
+ Builder.SetInsertPoint(contBB);
+ return LHSLV;
+ }
// Store the result value into the LHS lvalue. Bit-fields are handled
// specially because the result is altered by the store, i.e., [C99 6.5.16p1]
/// the LLVM value representation.
void EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
bool Volatile, unsigned Alignment, QualType Ty,
- llvm::MDNode *TBAAInfo = 0);
+ llvm::MDNode *TBAAInfo = 0, bool isInit=false);
/// EmitStoreOfScalar - Store a scalar value to an address, taking
/// care to appropriately convert from the memory representation to
/// the LLVM value representation. The l-value must be a simple
- /// l-value.
- void EmitStoreOfScalar(llvm::Value *value, LValue lvalue);
+ /// l-value. The isInit flag indicates whether this is an initialization.
+ /// If so, atomic qualifiers are ignored and the store is always non-atomic.
+ void EmitStoreOfScalar(llvm::Value *value, LValue lvalue, bool isInit=false);
/// EmitLoadOfLValue - Given an expression that represents a value lvalue,
/// this method emits the address of the lvalue, then loads the result as an
/// EmitStoreThroughLValue - Store the specified rvalue into the specified
/// lvalue, where both are guaranteed to the have the same type, and that type
/// is 'Ty'.
- void EmitStoreThroughLValue(RValue Src, LValue Dst);
+ void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false);
void EmitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst);
/// EmitStoreThroughLValue - Store Src into Dst with same constraints as
Builder.defineMacro("__GNUC__", "4");
Builder.defineMacro("__GXX_ABI_VERSION", "1002");
+ // Define macros for the C11 / C++11 memory orderings
+ Builder.defineMacro("__ATOMIC_RELAXED", "0");
+ Builder.defineMacro("__ATOMIC_CONSUME", "1");
+ Builder.defineMacro("__ATOMIC_ACQUIRE", "2");
+ Builder.defineMacro("__ATOMIC_RELEASE", "3");
+ Builder.defineMacro("__ATOMIC_ACQ_REL", "4");
+ Builder.defineMacro("__ATOMIC_SEQ_CST", "5");
+
// As sad as it is, enough software depends on the __VERSION__ for version
// checks that it is necessary to report 4.2.1 (the base GCC version we claim
// compatibility with) first.
.Case("arc_cf_code_audited", true)
// C11 features
.Case("c_alignas", LangOpts.C11)
+ .Case("c_atomic", LangOpts.C11)
.Case("c_generic_selections", LangOpts.C11)
.Case("c_static_assert", LangOpts.C11)
// C++0x features
.Case("cxx_access_control_sfinae", LangOpts.CPlusPlus0x)
.Case("cxx_alias_templates", LangOpts.CPlusPlus0x)
.Case("cxx_alignas", LangOpts.CPlusPlus0x)
+ .Case("cxx_atomic", LangOpts.CPlusPlus0x)
.Case("cxx_attributes", LangOpts.CPlusPlus0x)
.Case("cxx_auto_type", LangOpts.CPlusPlus0x)
//.Case("cxx_constexpr", false);
return llvm::StringSwitch<bool>(II->getName())
// C11 features supported by other languages as extensions.
.Case("c_alignas", true)
+ .Case("c_atomic", true)
.Case("c_generic_selections", true)
.Case("c_static_assert", true)
// C++0x features supported by other languages as extensions.
+ .Case("cxx_atomic", LangOpts.CPlusPlus)
.Case("cxx_deleted_functions", LangOpts.CPlusPlus)
.Case("cxx_explicit_conversions", LangOpts.CPlusPlus)
.Case("cxx_inline_namespaces", LangOpts.CPlusPlus)
if (SrcExpr.isInvalid())
return;
QualType SrcType = SrcExpr.get()->getType();
+
+ // You can cast an _Atomic(T) to anything you can cast a T to.
+ if (const AtomicType *AtomicSrcType = SrcType->getAs<AtomicType>())
+ SrcType = AtomicSrcType->getValueType();
+
assert(!SrcType->isPlaceholderType());
if (Self.RequireCompleteType(OpRange.getBegin(), DestType,
return SemaAtomicOpsOverloaded(move(TheCallResult), AtomicExpr::Load);
case Builtin::BI__atomic_store:
return SemaAtomicOpsOverloaded(move(TheCallResult), AtomicExpr::Store);
+ case Builtin::BI__atomic_init:
+ return SemaAtomicOpsOverloaded(move(TheCallResult), AtomicExpr::Init);
case Builtin::BI__atomic_exchange:
return SemaAtomicOpsOverloaded(move(TheCallResult), AtomicExpr::Xchg);
case Builtin::BI__atomic_compare_exchange_strong:
NumVals = 2;
NumOrders = 2;
}
+ if (Op == AtomicExpr::Init)
+ NumOrders = 0;
if (TheCall->getNumArgs() < NumVals+NumOrders+1) {
Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args)
}
QualType ResultType = ValType;
- if (Op == AtomicExpr::Store)
+ if (Op == AtomicExpr::Store || Op == AtomicExpr::Init)
ResultType = Context.VoidTy;
else if (Op == AtomicExpr::CmpXchgWeak || Op == AtomicExpr::CmpXchgStrong)
ResultType = Context.BoolTy;
SubExprs.push_back(Ptr);
if (Op == AtomicExpr::Load) {
SubExprs.push_back(TheCall->getArg(1)); // Order
+ } else if (Op == AtomicExpr::Init) {
+ SubExprs.push_back(TheCall->getArg(1)); // Val1
} else if (Op != AtomicExpr::CmpXchgWeak && Op != AtomicExpr::CmpXchgStrong) {
SubExprs.push_back(TheCall->getArg(2)); // Order
SubExprs.push_back(TheCall->getArg(1)); // Val1
// pointers. Everything else should be possible.
QualType SrcTy = Src.get()->getType();
+ if (const AtomicType *SrcAtomicTy = SrcTy->getAs<AtomicType>())
+ SrcTy = SrcAtomicTy->getValueType();
+ if (const AtomicType *DestAtomicTy = DestTy->getAs<AtomicType>())
+ DestTy = DestAtomicTy->getValueType();
+
if (Context.hasSameUnqualifiedType(SrcTy, DestTy))
return CK_NoOp;
LHSType = Context.getCanonicalType(LHSType).getUnqualifiedType();
RHSType = Context.getCanonicalType(RHSType).getUnqualifiedType();
- // We can't do assignment from/to atomics yet.
- if (LHSType->isAtomicType())
- return Incompatible;
// Common case: no conversion required.
if (LHSType == RHSType) {
return Compatible;
}
+ if (const AtomicType *AtomicTy = dyn_cast<AtomicType>(LHSType)) {
+ if (AtomicTy->getValueType() == RHSType) {
+ Kind = CK_NonAtomicToAtomic;
+ return Compatible;
+ }
+ }
+
+ if (const AtomicType *AtomicTy = dyn_cast<AtomicType>(RHSType)) {
+ if (AtomicTy->getValueType() == LHSType) {
+ Kind = CK_AtomicToNonAtomic;
+ return Compatible;
+ }
+ }
+
+
// If the left-hand side is a reference type, then we are in a
// (rare!) case where we've allowed the use of references in C,
// e.g., as a parameter type in a built-in function. In this case,
if (LHS.isInvalid() || RHS.isInvalid())
return QualType();
+
if (!LHS.get()->getType()->isArithmeticType() ||
- !RHS.get()->getType()->isArithmeticType())
+ !RHS.get()->getType()->isArithmeticType()) {
+ if (IsCompAssign &&
+ LHS.get()->getType()->isAtomicType() &&
+ RHS.get()->getType()->isArithmeticType())
+ return compType;
return InvalidOperands(Loc, LHS, RHS);
+ }
// Check for division by zero.
if (IsDiv &&
return compType;
}
+ if (LHS.get()->getType()->isAtomicType() &&
+ RHS.get()->getType()->isArithmeticType()) {
+ *CompLHSTy = LHS.get()->getType();
+ return compType;
+ }
+
// Put any potential pointer into PExp
Expr* PExp = LHS.get(), *IExp = RHS.get();
if (IExp->getType()->isAnyPointerType())
return compType;
}
+ if (LHS.get()->getType()->isAtomicType() &&
+ RHS.get()->getType()->isArithmeticType()) {
+ *CompLHSTy = LHS.get()->getType();
+ return compType;
+ }
+
// Either ptr - int or ptr - ptr.
if (LHS.get()->getType()->isAnyPointerType()) {
QualType lpointee = LHS.get()->getType()->getPointeeType();
return S.Context.DependentTy;
QualType ResType = Op->getType();
+ // Atomic types can be used for increment / decrement where the non-atomic
+ // versions can, so ignore the _Atomic() specifier for the purpose of
+ // checking.
+ if (const AtomicType *ResAtomicType = ResType->getAs<AtomicType>())
+ ResType = ResAtomicType->getValueType();
+
assert(!ResType.isNull() && "no type for increment/decrement expression");
if (S.getLangOptions().CPlusPlus && ResType->isBooleanType()) {
case CK_ARCConsumeObject:
case CK_ARCReclaimReturnedObject:
case CK_ARCExtendBlockObject: // Fall-through.
+ // The analyser can ignore atomic casts for now, although some future
+ // checkers may want to make certain that you're not modifying the same
+ // value through atomic and nonatomic pointers.
+ case CK_AtomicToNonAtomic:
+ case CK_NonAtomicToAtomic:
// True no-ops.
case CK_NoOp:
case CK_FunctionToPointerDecay: {
--- /dev/null
+// RUN: %clang_cc1 -emit-llvm %s -o - | FileCheck %s
+
+// Check that no atomic operations are used in any initialisation of _Atomic
+// types.
+
+_Atomic(int) i = 42;
+
+void foo()
+{
+ _Atomic(int) j = 12; // CHECK: store
+ // CHECK-NOT: atomic
+ __atomic_init(&j, 42); // CHECK: store
+ // CHECK-NOT: atomic
+}
--- /dev/null
+// RUN: %clang_cc1 -emit-llvm %s -o - | FileCheck %s
+
+void foo(void)
+{
+ _Atomic(int) i = 0;
+ // Check that multiply / divides on atomics produce a cmpxchg loop
+ i *= 2; // CHECK: cmpxchg
+ i /= 2; // CHECK: cmpxchg
+ // These should be emitting atomicrmw instructions, but they aren't yet
+ i += 2; // CHECK: cmpxchg
+ i -= 2; // CHECK: cmpxchg
+ i++; // CHECK: cmpxchg
+ i--; // CHECK: cmpxchg
+}
// RUN: %clang_cc1 -E -std=c1x %s -o - | FileCheck --check-prefix=CHECK-1X %s
// RUN: %clang_cc1 -E %s -o - | FileCheck --check-prefix=CHECK-NO-1X %s
+#if __has_feature(c_atomic)
+int has_atomic();
+#else
+int no_atomic();
+#endif
+
+// CHECK-1X: has_atomic
+// CHECK-NO-1X: no_atomic
+
#if __has_feature(c_static_assert)
int has_static_assert();
#else
// RUN: %clang_cc1 -E -std=c++11 %s -o - | FileCheck --check-prefix=CHECK-0X %s
// RUN: %clang_cc1 -E %s -o - | FileCheck --check-prefix=CHECK-NO-0X %s
+#if __has_feature(cxx_atomic)
+int has_atomic();
+#else
+int no_atomic();
+#endif
+
+// CHECK-0X: has_atomic
+// CHECK-NO-0X: no_atomic
+
#if __has_feature(cxx_lambdas)
int has_lambdas();
#else