]> granicus.if.org Git - clang/commitdiff
Initial implementation of __atomic_* (everything except __atomic_is_lock_free).
authorEli Friedman <eli.friedman@gmail.com>
Tue, 11 Oct 2011 02:20:01 +0000 (02:20 +0000)
committerEli Friedman <eli.friedman@gmail.com>
Tue, 11 Oct 2011 02:20:01 +0000 (02:20 +0000)
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@141632 91177308-0d34-0410-b5e6-96231b3b80d8

27 files changed:
include/clang/AST/Expr.h
include/clang/AST/RecursiveASTVisitor.h
include/clang/Basic/Builtins.def
include/clang/Basic/DiagnosticSemaKinds.td
include/clang/Basic/StmtNodes.td
include/clang/Sema/Sema.h
include/clang/Serialization/ASTBitCodes.h
lib/AST/Expr.cpp
lib/AST/ExprClassification.cpp
lib/AST/ExprConstant.cpp
lib/AST/ItaniumMangle.cpp
lib/AST/StmtPrinter.cpp
lib/AST/StmtProfile.cpp
lib/CodeGen/CGBuiltin.cpp
lib/CodeGen/CGExpr.cpp
lib/CodeGen/CGExprAgg.cpp
lib/CodeGen/CGExprComplex.cpp
lib/CodeGen/CGExprScalar.cpp
lib/CodeGen/CodeGenFunction.h
lib/Sema/SemaChecking.cpp
lib/Sema/TreeTransform.h
lib/Serialization/ASTReaderStmt.cpp
lib/Serialization/ASTWriterStmt.cpp
lib/StaticAnalyzer/Core/ExprEngine.cpp
test/CodeGen/atomic-ops.c [new file with mode: 0644]
test/Sema/atomic-ops.c [new file with mode: 0644]
tools/libclang/CXCursor.cpp

index 882124bfdd6ec13ec58107992ea2fc1d929dc04d..d7b6247b63cc65d51a1dcc11f522edab77dce35d 100644 (file)
@@ -4162,6 +4162,152 @@ public:
   // Iterators
   child_range children() { return child_range(&SrcExpr, &SrcExpr+1); }
 };
+
+/// AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*,
+/// __atomic_load, __atomic_store, and __atomic_compare_exchange_*, for the
+/// similarly-named C++0x instructions.  All of these instructions take one
+/// primary pointer and at least one memory order.
+class AtomicExpr : public Expr {
+public:
+  enum AtomicOp { Load, Store, CmpXchgStrong, CmpXchgWeak, Xchg,
+                  Add, Sub, And, Or, Xor };
+private:
+  enum { PTR, ORDER, VAL1, ORDER_FAIL, VAL2, END_EXPR };
+  Stmt* SubExprs[END_EXPR];
+  unsigned NumSubExprs;
+  SourceLocation BuiltinLoc, RParenLoc;
+  AtomicOp Op;
+
+public:
+  // Constructor for Load
+  AtomicExpr(SourceLocation BLoc, Expr *ptr, Expr *order, QualType t,
+             AtomicOp op, SourceLocation RP,
+             bool TypeDependent, bool ValueDependent)
+    : Expr(AtomicExprClass, t, VK_RValue, OK_Ordinary,
+           TypeDependent, ValueDependent,
+           ptr->isInstantiationDependent(),
+           ptr->containsUnexpandedParameterPack()),
+      BuiltinLoc(BLoc), RParenLoc(RP), Op(op) {
+      assert(op == Load && "single-argument atomic must be load");
+      SubExprs[PTR] = ptr;
+      SubExprs[ORDER] = order;
+      NumSubExprs = 2;
+    }
+
+  // Constructor for Store, Xchg, Add, Sub, And, Or, Xor
+  AtomicExpr(SourceLocation BLoc, Expr *ptr, Expr *val, Expr *order,
+             QualType t, AtomicOp op, SourceLocation RP,
+             bool TypeDependent, bool ValueDependent)
+      : Expr(AtomicExprClass, t, VK_RValue, OK_Ordinary,
+             TypeDependent, ValueDependent,
+             (ptr->isInstantiationDependent() ||
+              val->isInstantiationDependent()),
+             (ptr->containsUnexpandedParameterPack() ||
+              val->containsUnexpandedParameterPack())),
+        BuiltinLoc(BLoc), RParenLoc(RP), Op(op) {
+        assert(!isCmpXChg() && op != Load &&
+               "two-argument atomic store or binop");
+        SubExprs[PTR] = ptr;
+        SubExprs[ORDER] = order;
+        SubExprs[VAL1] = val;
+        NumSubExprs = 3;
+      }
+
+  // Constructor for CmpXchgStrong, CmpXchgWeak
+  AtomicExpr(SourceLocation BLoc, Expr *ptr, Expr *val1, Expr *val2,
+             Expr *order, Expr *order_fail, QualType t, AtomicOp op,
+             SourceLocation RP, bool TypeDependent, bool ValueDependent)
+    : Expr(AtomicExprClass, t, VK_RValue, OK_Ordinary,
+           TypeDependent, ValueDependent,
+           (ptr->isInstantiationDependent() ||
+            val1->isInstantiationDependent() ||
+            val2->isInstantiationDependent()),
+           (ptr->containsUnexpandedParameterPack() ||
+            val1->containsUnexpandedParameterPack() ||
+            val2->containsUnexpandedParameterPack())),
+      BuiltinLoc(BLoc), RParenLoc(RP), Op(op) {
+      assert(isCmpXChg() && "three-argument atomic must be cmpxchg");
+      SubExprs[PTR] = ptr;
+      SubExprs[ORDER] = order;
+      SubExprs[VAL1] = val1;
+      SubExprs[VAL2] = val2;
+      SubExprs[ORDER_FAIL] = order_fail;
+      NumSubExprs = 5;
+    }
+
+  /// \brief Build an empty AtomicExpr.
+  explicit AtomicExpr(EmptyShell Empty) : Expr(AtomicExprClass, Empty) { }
+
+  Expr *getPtr() const {
+    return cast<Expr>(SubExprs[PTR]);
+  }
+  void setPtr(Expr *E) {
+    SubExprs[PTR] = E;
+  }
+  Expr *getOrder() const {
+    return cast<Expr>(SubExprs[ORDER]);
+  }
+  void setOrder(Expr *E) {
+    SubExprs[ORDER] = E;
+  }
+  Expr *getVal1() const {
+    assert(NumSubExprs >= 3);
+    return cast<Expr>(SubExprs[VAL1]);
+  }
+  void setVal1(Expr *E) {
+    assert(NumSubExprs >= 3);
+    SubExprs[VAL1] = E;
+  }
+  Expr *getOrderFail() const {
+    assert(NumSubExprs == 5);
+    return cast<Expr>(SubExprs[ORDER_FAIL]);
+  }
+  void setOrderFail(Expr *E) {
+    assert(NumSubExprs == 5);
+    SubExprs[ORDER_FAIL] = E;
+  }
+  Expr *getVal2() const {
+    assert(NumSubExprs == 5);
+    return cast<Expr>(SubExprs[VAL2]);
+  }
+  void setVal2(Expr *E) {
+    assert(NumSubExprs == 5);
+    SubExprs[VAL2] = E;
+  }
+
+  AtomicOp getOp() const { return Op; }
+  void setOp(AtomicOp op) { Op = op; }
+  unsigned getNumSubExprs() { return NumSubExprs; }
+  void setNumSubExprs(unsigned num) { NumSubExprs = num; }
+
+  bool isVolatile() const {
+    return getPtr()->getType()->getPointeeType().isVolatileQualified();
+  }
+
+  bool isCmpXChg() const {
+    return getOp() == AtomicExpr::CmpXchgStrong ||
+           getOp() == AtomicExpr::CmpXchgWeak;
+  }
+
+  SourceLocation getBuiltinLoc() const { return BuiltinLoc; }
+  void setBuiltinLoc(SourceLocation L) { BuiltinLoc = L; }
+
+  SourceLocation getRParenLoc() const { return RParenLoc; }
+  void setRParenLoc(SourceLocation L) { RParenLoc = L; }
+
+  SourceRange getSourceRange() const {
+    return SourceRange(BuiltinLoc, RParenLoc);
+  }
+  static bool classof(const Stmt *T) {
+    return T->getStmtClass() == AtomicExprClass;
+  }
+  static bool classof(const AtomicExpr *) { return true; }
+
+  // Iterators
+  child_range children() {
+    return child_range(SubExprs, SubExprs+NumSubExprs);
+  }
+};
 }  // end namespace clang
 
 #endif
index a224bb0a40c747c5f7e0fd0f6ce9e07ecf613b41..0ec09c9b09e06cfef3cb6afd38014afa7f4603a0 100644 (file)
@@ -1992,6 +1992,7 @@ DEF_TRAVERSE_STMT(SizeOfPackExpr, { })
 DEF_TRAVERSE_STMT(SubstNonTypeTemplateParmPackExpr, { })
 DEF_TRAVERSE_STMT(SubstNonTypeTemplateParmExpr, { })
 DEF_TRAVERSE_STMT(MaterializeTemporaryExpr, { })
+DEF_TRAVERSE_STMT(AtomicExpr, { })
 
 // These literals (all of them) do not need any action.
 DEF_TRAVERSE_STMT(IntegerLiteral, { })
index 50b51c7a4f7b0732be1cb41d0a91302903336909..60bcde372ac95c652756cddf044fd49438a53fcd 100644 (file)
@@ -585,7 +585,18 @@ BUILTIN(__sync_swap_4, "iiD*i.", "n")
 BUILTIN(__sync_swap_8, "LLiLLiD*LLi.", "n")
 BUILTIN(__sync_swap_16, "LLLiLLLiD*LLLi.", "n")
 
-
+BUILTIN(__atomic_load, "v.", "t")
+BUILTIN(__atomic_store, "v.", "t")
+BUILTIN(__atomic_exchange, "v.", "t")
+BUILTIN(__atomic_compare_exchange_strong, "v.", "t")
+BUILTIN(__atomic_compare_exchange_weak, "v.", "t")
+BUILTIN(__atomic_fetch_add, "v.", "t")
+BUILTIN(__atomic_fetch_sub, "v.", "t")
+BUILTIN(__atomic_fetch_and, "v.", "t")
+BUILTIN(__atomic_fetch_or, "v.", "t")
+BUILTIN(__atomic_fetch_xor, "v.", "t")
+BUILTIN(__atomic_thread_fence, "vi", "t")
+BUILTIN(__atomic_signal_fence, "vi", "t")
 
 // Non-overloaded atomic builtins.
 BUILTIN(__sync_synchronize, "v.", "n")
index a1be656fac35e97618d69599e1d00b8cae2b5a5a..240401b3939a8385df247a673439f04720cd250f 100644 (file)
@@ -4012,6 +4012,15 @@ def err_atomic_builtin_must_be_pointer_intptr : Error<
 def err_atomic_builtin_pointer_size : Error<
   "first argument to atomic builtin must be a pointer to 1,2,4,8 or 16 byte "
   "type (%0 invalid)">;
+def err_atomic_op_needs_atomic : Error<
+  "first argument to atomic operation must be a pointer to _Atomic "
+  "type (%0 invalid)">;
+def err_atomic_op_needs_atomic_int_or_ptr : Error<
+  "first argument to atomic operation must be a pointer to atomic "
+  "integer or pointer (%0 invalid)">;
+def err_atomic_op_logical_needs_atomic_int : Error<
+  "first argument to logical atomic operation must be a pointer to atomic "
+  "integer (%0 invalid)">;
 
 def err_deleted_function_use : Error<"attempt to use a deleted function">;
 
index 73996e43d5dabf80f7c72fb8280a2ac247002965..7b3d7762c24aa551a8540285a7a616582d466a64 100644 (file)
@@ -78,6 +78,9 @@ def ParenListExpr : DStmt<Expr>;
 def VAArgExpr : DStmt<Expr>;
 def GenericSelectionExpr : DStmt<Expr>;
 
+// Atomic expressions
+def AtomicExpr : DStmt<Expr>;
+
 // GNU Extensions.
 def AddrLabelExpr : DStmt<Expr>;
 def StmtExpr : DStmt<Expr>;
index 39c989976114f2767f623d649138c7233a64aac8..fec14c58260afadcb35e8db41f2febc5bbc3b619 100644 (file)
@@ -6084,6 +6084,8 @@ private:
   bool SemaBuiltinObjectSize(CallExpr *TheCall);
   bool SemaBuiltinLongjmp(CallExpr *TheCall);
   ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
+  ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
+                                     AtomicExpr::AtomicOp Op);
   bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
                               llvm::APSInt &Result);
 
index 9a8eba2427499eab97bca9b77792542f46eebf9b..1efd7542177d48d3570ff6a36d745f2f331d7bf1 100644 (file)
@@ -973,7 +973,9 @@ namespace clang {
       EXPR_BLOCK_DECL_REF,
       /// \brief A GenericSelectionExpr record.
       EXPR_GENERIC_SELECTION,
-      
+      /// \brief An AtomicExpr record.
+      EXPR_ATOMIC,
+
       // Objective-C
 
       /// \brief An ObjCStringLiteral record.
index dc37ac9226ac713fa863277398c0cfa7c998a124..465b490d477cd109af72885360521a3256674b38 100644 (file)
@@ -1538,6 +1538,7 @@ bool Expr::isUnusedResultAWarning(SourceLocation &Loc, SourceRange &R1,
   }
   case CompoundAssignOperatorClass:
   case VAArgExprClass:
+  case AtomicExprClass:
     return false;
 
   case ConditionalOperatorClass: {
index 624e9d29447723e4a70f46fa4db4069e02e346b4..49c68213aa43b5b567544e304b0ecd8536d3813f 100644 (file)
@@ -162,6 +162,7 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
   case Expr::SubstNonTypeTemplateParmPackExprClass:
   case Expr::AsTypeExprClass:
   case Expr::ObjCIndirectCopyRestoreExprClass:
+  case Expr::AtomicExprClass:
     return Cl::CL_PRValue;
 
     // Next come the complicated cases.
index c0f913d7ed667eb7da33bfa6ecc6be219cd737eb..85cb40f9e08478dec0937d10586adc41e24c7dd1 100644 (file)
@@ -2857,6 +2857,7 @@ static ICEDiag CheckICE(const Expr* E, ASTContext &Ctx) {
   case Expr::AsTypeExprClass:
   case Expr::ObjCIndirectCopyRestoreExprClass:
   case Expr::MaterializeTemporaryExprClass:
+  case Expr::AtomicExprClass:
     return ICEDiag(2, E->getLocStart());
 
   case Expr::InitListExprClass:
index 2b93250fadf3c87769d1b24354db3c463eba0b85..d4ac7229b253dd3459cc6ae72ee159419099ab96 100644 (file)
@@ -2255,6 +2255,7 @@ recurse:
   case Expr::CXXNoexceptExprClass:
   case Expr::CUDAKernelCallExprClass:
   case Expr::AsTypeExprClass:
+  case Expr::AtomicExprClass:
   {
     // As bad as this diagnostic is, it's better than crashing.
     DiagnosticsEngine &Diags = Context.getDiags();
index dfa2612152e172c824685a741870762945743028..78d9a596fbcaa31633572b1fa8111d4329cac921 100644 (file)
@@ -1011,6 +1011,59 @@ void StmtPrinter::VisitVAArgExpr(VAArgExpr *Node) {
   OS << ")";
 }
 
+void StmtPrinter::VisitAtomicExpr(AtomicExpr *Node) {
+  const char *Name;
+  switch (Node->getOp()) {
+    case AtomicExpr::Load:
+      Name = "__atomic_load(";
+      break;
+    case AtomicExpr::Store:
+      Name = "__atomic_store(";
+      break;
+    case AtomicExpr::CmpXchgStrong:
+      Name = "__atomic_compare_exchange_strong(";
+      break;
+    case AtomicExpr::CmpXchgWeak:
+      Name = "__atomic_compare_exchange_weak(";
+      break;
+    case AtomicExpr::Xchg:
+      Name = "__atomic_exchange(";
+      break;
+    case AtomicExpr::Add:
+      Name = "__atomic_fetch_add(";
+      break;
+    case AtomicExpr::Sub:
+      Name = "__atomic_fetch_sub(";
+      break;
+    case AtomicExpr::And:
+      Name = "__atomic_fetch_and(";
+      break;
+    case AtomicExpr::Or:
+      Name = "__atomic_fetch_or(";
+      break;
+    case AtomicExpr::Xor:
+      Name = "__atomic_fetch_xor(";
+      break;
+  }
+  OS << Name;
+  PrintExpr(Node->getPtr());
+  OS << ", ";
+  if (Node->getOp() != AtomicExpr::Load) {
+    PrintExpr(Node->getVal1());
+    OS << ", ";
+  }
+  if (Node->isCmpXChg()) {
+    PrintExpr(Node->getVal2());
+    OS << ", ";
+  }
+  PrintExpr(Node->getOrder());
+  if (Node->isCmpXChg()) {
+    OS << ", ";
+    PrintExpr(Node->getOrderFail());
+  }
+  OS << ")";
+}
+
 // C++
 void StmtPrinter::VisitCXXOperatorCallExpr(CXXOperatorCallExpr *Node) {
   const char *OpStrings[NUM_OVERLOADED_OPERATORS] = {
index 12321ef0d6f25b48ce745c8b5dbe044926dcf3e5..df49e843f9692594b914232a10769410033bc0cb 100644 (file)
@@ -468,6 +468,10 @@ void StmtProfiler::VisitGenericSelectionExpr(const GenericSelectionExpr *S) {
   }
 }
 
+void StmtProfiler::VisitAtomicExpr(const AtomicExpr *S) {
+  VisitExpr(S);
+}
+
 static Stmt::StmtClass DecodeOperatorCall(const CXXOperatorCallExpr *S,
                                           UnaryOperatorKind &UnaryOp,
                                           BinaryOperatorKind &BinaryOp) {
index f7179befeb47c6dfa7ff25a7b5d5be130c1d01c5..ec0ca424220ea34960979a74bd123f31686d57bc 100644 (file)
@@ -948,6 +948,72 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
     return RValue::get(0);
   }
 
+  case Builtin::BI__atomic_thread_fence:
+  case Builtin::BI__atomic_signal_fence: {
+    llvm::SynchronizationScope Scope;
+    if (BuiltinID == Builtin::BI__atomic_signal_fence)
+      Scope = llvm::SingleThread;
+    else
+      Scope = llvm::CrossThread;
+    Value *Order = EmitScalarExpr(E->getArg(0));
+    if (isa<llvm::ConstantInt>(Order)) {
+      int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
+      switch (ord) {
+      case 0:  // memory_order_relaxed
+      default: // invalid order
+        break;
+      case 1:  // memory_order_consume
+      case 2:  // memory_order_acquire
+        Builder.CreateFence(llvm::Acquire, Scope);
+        break;
+      case 3:  // memory_order_release
+        Builder.CreateFence(llvm::Release, Scope);
+        break;
+      case 4:  // memory_order_acq_rel
+        Builder.CreateFence(llvm::AcquireRelease, Scope);
+        break;
+      case 5:  // memory_order_seq_cst
+        Builder.CreateFence(llvm::SequentiallyConsistent, Scope);
+        break;
+      }
+      return RValue::get(0);
+    }
+
+    llvm::BasicBlock *AcquireBB, *ReleaseBB, *AcqRelBB, *SeqCstBB;
+    AcquireBB = createBasicBlock("acquire", CurFn);
+    ReleaseBB = createBasicBlock("release", CurFn);
+    AcqRelBB = createBasicBlock("acqrel", CurFn);
+    SeqCstBB = createBasicBlock("seqcst", CurFn);
+    llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
+
+    Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
+    llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB);
+
+    Builder.SetInsertPoint(AcquireBB);
+    Builder.CreateFence(llvm::Acquire, Scope);
+    Builder.CreateBr(ContBB);
+    SI->addCase(Builder.getInt32(1), AcquireBB);
+    SI->addCase(Builder.getInt32(2), AcquireBB);
+
+    Builder.SetInsertPoint(ReleaseBB);
+    Builder.CreateFence(llvm::Release, Scope);
+    Builder.CreateBr(ContBB);
+    SI->addCase(Builder.getInt32(3), ReleaseBB);
+
+    Builder.SetInsertPoint(AcqRelBB);
+    Builder.CreateFence(llvm::AcquireRelease, Scope);
+    Builder.CreateBr(ContBB);
+    SI->addCase(Builder.getInt32(4), AcqRelBB);
+
+    Builder.SetInsertPoint(SeqCstBB);
+    Builder.CreateFence(llvm::SequentiallyConsistent, Scope);
+    Builder.CreateBr(ContBB);
+    SI->addCase(Builder.getInt32(5), SeqCstBB);
+
+    Builder.SetInsertPoint(ContBB);
+    return RValue::get(0);
+  }
+
     // Library functions with special handling.
   case Builtin::BIsqrt:
   case Builtin::BIsqrtf:
index b242061cf1098d101a5abb5cbfa44966596d534d..cb60df181d0a29bbd10e9496defc6176a28da4b3 100644 (file)
@@ -2478,3 +2478,280 @@ EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) {
 
   return MakeAddrLValue(AddV, MPT->getPointeeType());
 }
+
+static void
+EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
+             llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2,
+             uint64_t Size, unsigned Align, llvm::AtomicOrdering Order) {
+  if (E->isCmpXChg()) {
+    // Note that cmpxchg only supports specifying one ordering and
+    // doesn't support weak cmpxchg, at least at the moment.
+    llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
+    LoadVal1->setAlignment(Align);
+    llvm::LoadInst *LoadVal2 = CGF.Builder.CreateLoad(Val2);
+    LoadVal2->setAlignment(Align);
+    llvm::AtomicCmpXchgInst *CXI =
+        CGF.Builder.CreateAtomicCmpXchg(Ptr, LoadVal1, LoadVal2, Order);
+    CXI->setVolatile(E->isVolatile());
+    llvm::StoreInst *StoreVal1 = CGF.Builder.CreateStore(CXI, Val1);
+    StoreVal1->setAlignment(Align);
+    llvm::Value *Cmp = CGF.Builder.CreateICmpEQ(CXI, LoadVal1);
+    CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
+    return;
+  }
+
+  if (E->getOp() == AtomicExpr::Load) {
+    llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
+    Load->setAtomic(Order);
+    Load->setAlignment(Size);
+    Load->setVolatile(E->isVolatile());
+    llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Load, Dest);
+    StoreDest->setAlignment(Align);
+    return;
+  }
+
+  if (E->getOp() == AtomicExpr::Store) {
+    assert(!Dest && "Store does not return a value");
+    llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
+    LoadVal1->setAlignment(Align);
+    llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
+    Store->setAtomic(Order);
+    Store->setAlignment(Size);
+    Store->setVolatile(E->isVolatile());
+    return;
+  }
+
+  llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
+  switch (E->getOp()) {
+    case AtomicExpr::CmpXchgWeak:
+    case AtomicExpr::CmpXchgStrong:
+    case AtomicExpr::Store:
+    case AtomicExpr::Load:  assert(0 && "Already handled!");
+    case AtomicExpr::Add:   Op = llvm::AtomicRMWInst::Add;  break;
+    case AtomicExpr::Sub:   Op = llvm::AtomicRMWInst::Sub;  break;
+    case AtomicExpr::And:   Op = llvm::AtomicRMWInst::And;  break;
+    case AtomicExpr::Or:    Op = llvm::AtomicRMWInst::Or;   break;
+    case AtomicExpr::Xor:   Op = llvm::AtomicRMWInst::Xor;  break;
+    case AtomicExpr::Xchg:  Op = llvm::AtomicRMWInst::Xchg; break;
+  }
+  llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
+  LoadVal1->setAlignment(Align);
+  llvm::AtomicRMWInst *RMWI =
+      CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order);
+  RMWI->setVolatile(E->isVolatile());
+  llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(RMWI, Dest);
+  StoreDest->setAlignment(Align);
+}
+
+// This function emits any expression (scalar, complex, or aggregate)
+// into a temporary alloca.
+static llvm::Value *
+EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
+  llvm::Value *DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
+  CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
+                       /*Init*/ true);
+  return DeclPtr;
+}
+
+static RValue ConvertTempToRValue(CodeGenFunction &CGF, QualType Ty,
+                                  llvm::Value *Dest) {
+  if (Ty->isAnyComplexType())
+    return RValue::getComplex(CGF.LoadComplexFromAddr(Dest, false));
+  if (CGF.hasAggregateLLVMType(Ty))
+    return RValue::getAggregate(Dest);
+  return RValue::get(CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(Dest, Ty)));
+}
+
+RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
+  QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
+  QualType MemTy = AtomicTy->getAs<AtomicType>()->getValueType();
+  CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy);
+  uint64_t Size = sizeChars.getQuantity();
+  CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy);
+  unsigned Align = alignChars.getQuantity();
+  // FIXME: Bound on Size should not be hardcoded.
+  bool UseLibcall = (sizeChars != alignChars || !llvm::isPowerOf2_64(Size) ||
+                     Size > 8);
+
+  llvm::Value *Ptr, *Order, *OrderFail = 0, *Val1 = 0, *Val2 = 0;
+  Ptr = EmitScalarExpr(E->getPtr());
+  Order = EmitScalarExpr(E->getOrder());
+  if (E->isCmpXChg()) {
+    Val1 = EmitScalarExpr(E->getVal1());
+    Val2 = EmitValToTemp(*this, E->getVal2());
+    OrderFail = EmitScalarExpr(E->getOrderFail());
+    (void)OrderFail; // OrderFail is unused at the moment
+  } else if ((E->getOp() == AtomicExpr::Add || E->getOp() == AtomicExpr::Sub) &&
+             MemTy->isPointerType()) {
+    // For pointers, we're required to do a bit of math: adding 1 to an int*
+    // is not the same as adding 1 to a uintptr_t.
+    QualType Val1Ty = E->getVal1()->getType();
+    llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
+    uint64_t PointeeIncAmt =
+        getContext().getTypeSizeInChars(MemTy->getPointeeType()).getQuantity();
+    llvm::Value *PointeeIncAmtVal =
+        llvm::ConstantInt::get(Val1Scalar->getType(), PointeeIncAmt);
+    Val1Scalar = Builder.CreateMul(Val1Scalar, PointeeIncAmtVal);
+    Val1 = CreateMemTemp(Val1Ty, ".atomictmp");
+    EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty));
+  } else if (E->getOp() != AtomicExpr::Load) {
+    Val1 = EmitValToTemp(*this, E->getVal1());
+  }
+
+  if (E->getOp() != AtomicExpr::Store && !Dest)
+    Dest = CreateMemTemp(E->getType(), ".atomicdst");
+
+  if (UseLibcall) {
+    // FIXME: Finalize what the libcalls are actually supposed to look like.
+    // See also http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
+    return EmitUnsupportedRValue(E, "atomic library call");
+  }
+#if 0
+  if (UseLibcall) {
+    const char* LibCallName;
+    switch (E->getOp()) {
+    case AtomicExpr::CmpXchgWeak:
+      LibCallName = "__atomic_compare_exchange_generic"; break;
+    case AtomicExpr::CmpXchgStrong:
+      LibCallName = "__atomic_compare_exchange_generic"; break;
+    case AtomicExpr::Add:   LibCallName = "__atomic_fetch_add_generic"; break;
+    case AtomicExpr::Sub:   LibCallName = "__atomic_fetch_sub_generic"; break;
+    case AtomicExpr::And:   LibCallName = "__atomic_fetch_and_generic"; break;
+    case AtomicExpr::Or:    LibCallName = "__atomic_fetch_or_generic"; break;
+    case AtomicExpr::Xor:   LibCallName = "__atomic_fetch_xor_generic"; break;
+    case AtomicExpr::Xchg:  LibCallName = "__atomic_exchange_generic"; break;
+    case AtomicExpr::Store: LibCallName = "__atomic_store_generic"; break;
+    case AtomicExpr::Load:  LibCallName = "__atomic_load_generic"; break;
+    }
+    llvm::SmallVector<QualType, 4> Params;
+    CallArgList Args;
+    QualType RetTy = getContext().VoidTy;
+    if (E->getOp() != AtomicExpr::Store && !E->isCmpXChg())
+      Args.add(RValue::get(EmitCastToVoidPtr(Dest)),
+               getContext().VoidPtrTy);
+    Args.add(RValue::get(EmitCastToVoidPtr(Ptr)),
+             getContext().VoidPtrTy);
+    if (E->getOp() != AtomicExpr::Load)
+      Args.add(RValue::get(EmitCastToVoidPtr(Val1)),
+               getContext().VoidPtrTy);
+    if (E->isCmpXChg()) {
+      Args.add(RValue::get(EmitCastToVoidPtr(Val2)),
+               getContext().VoidPtrTy);
+      RetTy = getContext().IntTy;
+    }
+    Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
+             getContext().getSizeType());
+    const CGFunctionInfo &FuncInfo =
+        CGM.getTypes().getFunctionInfo(RetTy, Args, FunctionType::ExtInfo());
+    llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo, false);
+    llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
+    RValue Res = EmitCall(FuncInfo, Func, ReturnValueSlot(), Args);
+    if (E->isCmpXChg())
+      return Res;
+    if (E->getOp() == AtomicExpr::Store)
+      return RValue::get(0);
+    return ConvertTempToRValue(*this, E->getType(), Dest);
+  }
+#endif
+  llvm::Type *IPtrTy =
+      llvm::IntegerType::get(getLLVMContext(), Size * 8)->getPointerTo();
+  llvm::Value *OrigDest = Dest;
+  Ptr = Builder.CreateBitCast(Ptr, IPtrTy);
+  if (Val1) Val1 = Builder.CreateBitCast(Val1, IPtrTy);
+  if (Val2) Val2 = Builder.CreateBitCast(Val2, IPtrTy);
+  if (Dest && !E->isCmpXChg()) Dest = Builder.CreateBitCast(Dest, IPtrTy);
+
+  if (isa<llvm::ConstantInt>(Order)) {
+    int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
+    switch (ord) {
+    case 0:  // memory_order_relaxed
+      EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
+                   llvm::Monotonic);
+      break;
+    case 1:  // memory_order_consume
+    case 2:  // memory_order_acquire
+      EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
+                   llvm::Acquire);
+      break;
+    case 3:  // memory_order_release
+      EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
+                   llvm::Release);
+      break;
+    case 4:  // memory_order_acq_rel
+      EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
+                   llvm::AcquireRelease);
+      break;
+    case 5:  // memory_order_seq_cst
+      EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
+                   llvm::SequentiallyConsistent);
+      break;
+    default: // invalid order
+      // We should not ever get here normally, but it's hard to
+      // enforce that in general.
+      break; 
+    }
+    if (E->getOp() == AtomicExpr::Store)
+      return RValue::get(0);
+    return ConvertTempToRValue(*this, E->getType(), OrigDest);
+  }
+
+  // Long case, when Order isn't obviously constant.
+
+  // Create all the relevant BB's
+  llvm::BasicBlock *MonotonicBB, *AcquireBB, *ReleaseBB, *AcqRelBB, *SeqCstBB;
+  MonotonicBB = createBasicBlock("monotonic", CurFn);
+  if (E->getOp() != AtomicExpr::Store)
+    AcquireBB = createBasicBlock("acquire", CurFn);
+  if (E->getOp() != AtomicExpr::Load)
+    ReleaseBB = createBasicBlock("release", CurFn);
+  if (E->getOp() != AtomicExpr::Load && E->getOp() != AtomicExpr::Store)
+    AcqRelBB = createBasicBlock("acqrel", CurFn);
+  SeqCstBB = createBasicBlock("seqcst", CurFn);
+  llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
+
+  // Create the switch for the split
+  // MonotonicBB is arbitrarily chosen as the default case; in practice, this
+  // doesn't matter unless someone is crazy enough to use something that
+  // doesn't fold to a constant for the ordering.
+  Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
+  llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
+
+  // Emit all the different atomics
+  Builder.SetInsertPoint(MonotonicBB);
+  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
+               llvm::Monotonic);
+  Builder.CreateBr(ContBB);
+  if (E->getOp() != AtomicExpr::Store) {
+    Builder.SetInsertPoint(AcquireBB);
+    EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
+                 llvm::Acquire);
+    Builder.CreateBr(ContBB);
+    SI->addCase(Builder.getInt32(1), AcquireBB);
+    SI->addCase(Builder.getInt32(2), AcquireBB);
+  }
+  if (E->getOp() != AtomicExpr::Load) {
+    Builder.SetInsertPoint(ReleaseBB);
+    EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
+                 llvm::Release);
+    Builder.CreateBr(ContBB);
+    SI->addCase(Builder.getInt32(3), ReleaseBB);
+  }
+  if (E->getOp() != AtomicExpr::Load && E->getOp() != AtomicExpr::Store) {
+    Builder.SetInsertPoint(AcqRelBB);
+    EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
+                 llvm::AcquireRelease);
+    Builder.CreateBr(ContBB);
+    SI->addCase(Builder.getInt32(4), AcqRelBB);
+  }
+  Builder.SetInsertPoint(SeqCstBB);
+  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
+               llvm::SequentiallyConsistent);
+  Builder.CreateBr(ContBB);
+  SI->addCase(Builder.getInt32(5), SeqCstBB);
+
+  // Cleanup and return
+  Builder.SetInsertPoint(ContBB);
+  if (E->getOp() == AtomicExpr::Store)
+    return RValue::get(0);
+  return ConvertTempToRValue(*this, E->getType(), OrigDest);
+}
index ff82bfdc48a7df5a8c393538ef080f8a4f133fd0..97754d5c0ba699d451f57e0fc4d6d12f77688dce 100644 (file)
@@ -154,6 +154,9 @@ public:
   void EmitNullInitializationToLValue(LValue Address);
   //  case Expr::ChooseExprClass:
   void VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); }
+  void VisitAtomicExpr(AtomicExpr *E) {
+    CGF.EmitAtomicExpr(E, EnsureSlot(E->getType()).getAddr());
+  }
 };
 }  // end anonymous namespace.
 
index e3f02acbda4ae2c16b28cdc55d739fcc118f98ee..4a31bcfbe9e08c678855b7300fd1a21447a6212c 100644 (file)
@@ -266,6 +266,10 @@ public:
   ComplexPairTy VisitInitListExpr(InitListExpr *E);
 
   ComplexPairTy VisitVAArgExpr(VAArgExpr *E);
+
+  ComplexPairTy VisitAtomicExpr(AtomicExpr *E) {
+    return CGF.EmitAtomicExpr(E).getComplexVal();
+  }
 };
 }  // end anonymous namespace.
 
index 9d9562014848f70402ac2e616f0d988e1d207f81..26a3e948efe9fc2c9a391bab39c80f2afc3a1b9b 100644 (file)
@@ -513,6 +513,7 @@ public:
     return CGF.EmitObjCStringLiteral(E);
   }
   Value *VisitAsTypeExpr(AsTypeExpr *CE);
+  Value *VisitAtomicExpr(AtomicExpr *AE);
 };
 }  // end anonymous namespace.
 
@@ -2637,6 +2638,10 @@ Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
   return Builder.CreateBitCast(Src, DstTy, "astype");
 }
 
+Value *ScalarExprEmitter::VisitAtomicExpr(AtomicExpr *E) {
+  return CGF.EmitAtomicExpr(E).getScalarVal();
+}
+
 //===----------------------------------------------------------------------===//
 //                         Entry Point into this File
 //===----------------------------------------------------------------------===//
index e53ed30256cbd5565c8fdbea4327cd74dfff104d..157623da8fdd5227f69e2ceb0f999f0d8359680c 100644 (file)
@@ -2283,6 +2283,8 @@ public:
 
   void EmitCXXThrowExpr(const CXXThrowExpr *E);
 
+  RValue EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest = 0);
+
   //===--------------------------------------------------------------------===//
   //                         Annotations Emission
   //===--------------------------------------------------------------------===//
index 412ed0ebb5f03affe944a4c6fb790f0054376ac2..a9d79bb70a0254505bf876b68f5f9ca0c81e029a 100644 (file)
@@ -15,6 +15,7 @@
 #include "clang/Sema/Initialization.h"
 #include "clang/Sema/Sema.h"
 #include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/Initialization.h"
 #include "clang/Sema/ScopeInfo.h"
 #include "clang/Analysis/Analyses/FormatString.h"
 #include "clang/AST/ASTContext.h"
@@ -197,6 +198,28 @@ Sema::CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
   case Builtin::BI__sync_lock_release:
   case Builtin::BI__sync_swap:
     return SemaBuiltinAtomicOverloaded(move(TheCallResult));
+  case Builtin::BI__atomic_load:
+    return SemaAtomicOpsOverloaded(move(TheCallResult), AtomicExpr::Load);
+  case Builtin::BI__atomic_store:
+    return SemaAtomicOpsOverloaded(move(TheCallResult), AtomicExpr::Store);
+  case Builtin::BI__atomic_exchange:
+    return SemaAtomicOpsOverloaded(move(TheCallResult), AtomicExpr::Xchg);
+  case Builtin::BI__atomic_compare_exchange_strong:
+    return SemaAtomicOpsOverloaded(move(TheCallResult),
+                                   AtomicExpr::CmpXchgStrong);
+  case Builtin::BI__atomic_compare_exchange_weak:
+    return SemaAtomicOpsOverloaded(move(TheCallResult),
+                                   AtomicExpr::CmpXchgWeak);
+  case Builtin::BI__atomic_fetch_add:
+    return SemaAtomicOpsOverloaded(move(TheCallResult), AtomicExpr::Add);
+  case Builtin::BI__atomic_fetch_sub:
+    return SemaAtomicOpsOverloaded(move(TheCallResult), AtomicExpr::Sub);
+  case Builtin::BI__atomic_fetch_and:
+    return SemaAtomicOpsOverloaded(move(TheCallResult), AtomicExpr::And);
+  case Builtin::BI__atomic_fetch_or:
+    return SemaAtomicOpsOverloaded(move(TheCallResult), AtomicExpr::Or);
+  case Builtin::BI__atomic_fetch_xor:
+    return SemaAtomicOpsOverloaded(move(TheCallResult), AtomicExpr::Xor);
   case Builtin::BI__builtin_annotation:
     if (CheckBuiltinAnnotationString(*this, TheCall->getArg(1)))
       return ExprError();
@@ -414,6 +437,153 @@ bool Sema::CheckBlockCall(NamedDecl *NDecl, CallExpr *TheCall) {
   return false;
 }
 
+ExprResult
+Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op) {
+  CallExpr *TheCall = cast<CallExpr>(TheCallResult.get());
+  DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
+  Expr *Ptr, *Order, *Val1, *Val2, *OrderFail;
+
+  // All these operations take one of the following four forms:
+  // T   __atomic_load(_Atomic(T)*, int)                              (loads)
+  // T*  __atomic_add(_Atomic(T*)*, ptrdiff_t, int)         (pointer add/sub)
+  // int __atomic_compare_exchange_strong(_Atomic(T)*, T*, T, int, int)
+  //                                                                (cmpxchg)
+  // T   __atomic_exchange(_Atomic(T)*, T, int)             (everything else)
+  // where T is an appropriate type, and the int paremeterss are for orderings.
+  unsigned NumVals = 1;
+  unsigned NumOrders = 1;
+  if (Op == AtomicExpr::Load) {
+    NumVals = 0;
+  } else if (Op == AtomicExpr::CmpXchgWeak || Op == AtomicExpr::CmpXchgStrong) {
+    NumVals = 2;
+    NumOrders = 2;
+  }
+
+  if (TheCall->getNumArgs() < NumVals+NumOrders+1) {
+    Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args)
+      << 0 << NumVals+NumOrders+1 << TheCall->getNumArgs()
+      << TheCall->getCallee()->getSourceRange();
+    return ExprError();
+  } else if (TheCall->getNumArgs() > NumVals+NumOrders+1) {
+    Diag(TheCall->getArg(NumVals+NumOrders+1)->getLocStart(),
+         diag::err_typecheck_call_too_many_args)
+      << 0 << NumVals+NumOrders+1 << TheCall->getNumArgs()
+      << TheCall->getCallee()->getSourceRange();
+    return ExprError();
+  }
+
+  // Inspect the first argument of the atomic operation.  This should always be
+  // a pointer to an _Atomic type.
+  Ptr = TheCall->getArg(0);
+  Ptr = DefaultFunctionArrayLvalueConversion(Ptr).get();
+  const PointerType *pointerType = Ptr->getType()->getAs<PointerType>();
+  if (!pointerType) {
+    Diag(DRE->getLocStart(), diag::err_atomic_op_needs_atomic)
+      << Ptr->getType() << Ptr->getSourceRange();
+    return ExprError();
+  }
+
+  QualType AtomTy = pointerType->getPointeeType();
+  if (!AtomTy->isAtomicType()) {
+    Diag(DRE->getLocStart(), diag::err_atomic_op_needs_atomic)
+      << Ptr->getType() << Ptr->getSourceRange();
+    return ExprError();
+  }
+  QualType ValType = AtomTy->getAs<AtomicType>()->getValueType();
+
+  if ((Op == AtomicExpr::Add || Op == AtomicExpr::Sub) &&
+      !ValType->isIntegerType() && !ValType->isPointerType()) {
+    Diag(DRE->getLocStart(), diag::err_atomic_op_needs_atomic_int_or_ptr)
+      << Ptr->getType() << Ptr->getSourceRange();
+    return ExprError();
+  }
+
+  if (!ValType->isIntegerType() &&
+      (Op == AtomicExpr::And || Op == AtomicExpr::Or || Op == AtomicExpr::Xor)){
+    Diag(DRE->getLocStart(), diag::err_atomic_op_logical_needs_atomic_int)
+      << Ptr->getType() << Ptr->getSourceRange();
+    return ExprError();
+  }
+
+  switch (ValType.getObjCLifetime()) {
+  case Qualifiers::OCL_None:
+  case Qualifiers::OCL_ExplicitNone:
+    // okay
+    break;
+
+  case Qualifiers::OCL_Weak:
+  case Qualifiers::OCL_Strong:
+  case Qualifiers::OCL_Autoreleasing:
+    Diag(DRE->getLocStart(), diag::err_arc_atomic_ownership)
+      << ValType << Ptr->getSourceRange();
+    return ExprError();
+  }
+
+  QualType ResultType = ValType;
+  if (Op == AtomicExpr::Store)
+    ResultType = Context.VoidTy;
+  else if (Op == AtomicExpr::CmpXchgWeak || Op == AtomicExpr::CmpXchgStrong)
+    ResultType = Context.BoolTy;
+
+  // The first argument --- the pointer --- has a fixed type; we
+  // deduce the types of the rest of the arguments accordingly.  Walk
+  // the remaining arguments, converting them to the deduced value type.
+  for (unsigned i = 1; i != NumVals+NumOrders+1; ++i) {
+    ExprResult Arg = TheCall->getArg(i);
+    QualType Ty;
+    if (i < NumVals+1) {
+      // The second argument to a cmpxchg is a pointer to the data which will
+      // be exchanged. The second argument to a pointer add/subtract is the
+      // amount to add/subtract, which must be a ptrdiff_t.  The third
+      // argument to a cmpxchg and the second argument in all other cases
+      // is the type of the value.
+      if (i == 1 && (Op == AtomicExpr::CmpXchgWeak ||
+                     Op == AtomicExpr::CmpXchgStrong))
+         Ty = Context.getPointerType(ValType.getUnqualifiedType());
+      else if (!ValType->isIntegerType() &&
+               (Op == AtomicExpr::Add || Op == AtomicExpr::Sub))
+        Ty = Context.getPointerDiffType();
+      else
+        Ty = ValType;
+    } else {
+      // The order(s) are always converted to int.
+      Ty = Context.IntTy;
+    }
+    InitializedEntity Entity =
+        InitializedEntity::InitializeParameter(Context, Ty, false);
+    Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg);
+    if (Arg.isInvalid())
+      return true;
+    TheCall->setArg(i, Arg.get());
+  }
+
+  if (Op == AtomicExpr::Load) {
+    Order = TheCall->getArg(1);
+    return Owned(new (Context) AtomicExpr(TheCall->getCallee()->getLocStart(),
+                                          Ptr, Order, ResultType, Op,
+                                          TheCall->getRParenLoc(), false,
+                                          false));
+  } else if (Op != AtomicExpr::CmpXchgWeak && Op != AtomicExpr::CmpXchgStrong) {
+    Val1 = TheCall->getArg(1);
+    Order = TheCall->getArg(2);
+    return Owned(new (Context) AtomicExpr(TheCall->getCallee()->getLocStart(),
+                                          Ptr, Val1, Order, ResultType, Op,
+                                          TheCall->getRParenLoc(), false,
+                                          false));
+  } else {
+    Val1 = TheCall->getArg(1);
+    Val2 = TheCall->getArg(2);
+    Order = TheCall->getArg(3);
+    OrderFail = TheCall->getArg(4);
+    return Owned(new (Context) AtomicExpr(TheCall->getCallee()->getLocStart(),
+                                          Ptr, Val1, Val2, Order, OrderFail,
+                                          ResultType, Op, 
+                                          TheCall->getRParenLoc(), false,
+                                          false));
+  }
+}
+
+
 /// checkBuiltinArgument - Given a call to a builtin function, perform
 /// normal type-checking on the given argument, updating the call in
 /// place.  This is useful when a builtin function requires custom
index 2a18afafd9bc62abc9b65ea7ba453ad532668ad4..4a2ad18e7126a57296ae6c1cf3fe56f3aab4a500 100644 (file)
@@ -8099,6 +8099,13 @@ ExprResult
 TreeTransform<Derived>::TransformAsTypeExpr(AsTypeExpr *E) {
   llvm_unreachable("Cannot transform asType expressions yet");
 }
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformAtomicExpr(AtomicExpr *E) {
+  assert(false && "Cannot transform atomic expressions yet");
+  return SemaRef.Owned(E);
+}
   
 //===----------------------------------------------------------------------===//
 // Type reconstruction
index 7653d5f2906c4c876e05c3b6deb813cfd178bea5..ab07b85bf4c37bbf750b69ee1abc91756456b454 100644 (file)
@@ -774,6 +774,25 @@ void ASTStmtReader::VisitGenericSelectionExpr(GenericSelectionExpr *E) {
   E->RParenLoc = ReadSourceLocation(Record, Idx);
 }
 
+void ASTStmtReader::VisitAtomicExpr(AtomicExpr *E) {
+  VisitExpr(E);
+  E->setOp(AtomicExpr::AtomicOp(Record[Idx++]));
+  E->setPtr(Reader.ReadSubExpr());
+  E->setOrder(Reader.ReadSubExpr());
+  E->setNumSubExprs(2);
+  if (E->getOp() != AtomicExpr::Load) {
+    E->setVal1(Reader.ReadSubExpr());
+    E->setNumSubExprs(3);
+  }
+  if (E->isCmpXChg()) {
+    E->setOrderFail(Reader.ReadSubExpr());
+    E->setVal2(Reader.ReadSubExpr());
+    E->setNumSubExprs(5);
+  }
+  E->setBuiltinLoc(ReadSourceLocation(Record, Idx));
+  E->setRParenLoc(ReadSourceLocation(Record, Idx));
+}
+
 //===----------------------------------------------------------------------===//
 // Objective-C Expressions and Statements
 
@@ -2010,6 +2029,10 @@ Stmt *ASTReader::ReadStmtFromStream(Module &F) {
     case EXPR_ASTYPE:
       S = new (Context) AsTypeExpr(Empty);
       break;
+
+    case EXPR_ATOMIC:
+      S = new (Context) AtomicExpr(Empty);
+      break;
     }
     
     // We hit a STMT_STOP, so we're done with this expression.
index fb5617551e9574452a25e2bbb9c9f18a6aa6c11b..7e2d45c5e7f995239ac4bb5552e331b051317502 100644 (file)
@@ -736,6 +736,21 @@ void ASTStmtWriter::VisitGenericSelectionExpr(GenericSelectionExpr *E) {
   Code = serialization::EXPR_GENERIC_SELECTION;
 }
 
+void ASTStmtWriter::VisitAtomicExpr(AtomicExpr *E) {
+  VisitExpr(E);
+  Record.push_back(E->getOp());
+  Writer.AddStmt(E->getPtr());
+  Writer.AddStmt(E->getOrder());
+  if (E->getOp() != AtomicExpr::Load)
+    Writer.AddStmt(E->getVal1());
+  if (E->isCmpXChg()) {
+    Writer.AddStmt(E->getOrderFail());
+    Writer.AddStmt(E->getVal2());
+  }
+  Writer.AddSourceLocation(E->getBuiltinLoc(), Record);
+  Writer.AddSourceLocation(E->getRParenLoc(), Record);
+}
+
 //===----------------------------------------------------------------------===//
 // Objective-C Expressions and Statements.
 //===----------------------------------------------------------------------===//
index 11be71a96851b60253548374ab5ebaa15764d4de..5489c10bba82f82e9d163d29cd673df66a0f1541 100644 (file)
@@ -564,6 +564,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
     case Stmt::CUDAKernelCallExprClass:
     case Stmt::OpaqueValueExprClass:
     case Stmt::AsTypeExprClass:
+    case Stmt::AtomicExprClass:
         // Fall through.
 
     // Cases we intentionally don't evaluate, since they don't need
diff --git a/test/CodeGen/atomic-ops.c b/test/CodeGen/atomic-ops.c
new file mode 100644 (file)
index 0000000..cb3a868
--- /dev/null
@@ -0,0 +1,79 @@
+// RUN: %clang_cc1 %s -emit-llvm -o - -triple=i686-apple-darwin9 | FileCheck %s
+
+// Basic IRGen tests for __atomic_*
+
+// FIXME: Need to implement __atomic_is_lock_free
+
+typedef enum memory_order {
+  memory_order_relaxed, memory_order_consume, memory_order_acquire,
+  memory_order_release, memory_order_acq_rel, memory_order_seq_cst
+} memory_order;
+
+int fi1(_Atomic(int) *i) {
+  // CHECK: @fi1
+  // CHECK: load atomic i32* {{.*}} seq_cst
+  return __atomic_load(i, memory_order_seq_cst);
+}
+
+void fi2(_Atomic(int) *i) {
+  // CHECK: @fi2
+  // CHECK: store atomic i32 {{.*}} seq_cst
+  __atomic_store(i, 1, memory_order_seq_cst);
+}
+
+void fi3(_Atomic(int) *i) {
+  // CHECK: @fi3
+  // CHECK: atomicrmw and
+  __atomic_fetch_and(i, 1, memory_order_seq_cst);
+}
+
+void fi4(_Atomic(int) *i) {
+  // CHECK: @fi4
+  // CHECK: cmpxchg i32*
+  int cmp = 0;
+  __atomic_compare_exchange_strong(i, &cmp, 1, memory_order_acquire, memory_order_acquire);
+}
+
+float ff1(_Atomic(float) *d) {
+  // CHECK: @ff1
+  // CHECK: load atomic i32* {{.*}} monotonic
+  return __atomic_load(d, memory_order_relaxed);
+}
+
+void ff2(_Atomic(float) *d) {
+  // CHECK: @ff2
+  // CHECK: store atomic i32 {{.*}} release
+  __atomic_store(d, 1, memory_order_release);
+}
+
+float ff3(_Atomic(float) *d) {
+  return __atomic_exchange(d, 2, memory_order_seq_cst);
+}
+
+int* fp1(_Atomic(int*) *p) {
+  // CHECK: @fp1
+  // CHECK: load atomic i32* {{.*}} seq_cst
+  return __atomic_load(p, memory_order_seq_cst);
+}
+
+int* fp2(_Atomic(int*) *p) {
+  // CHECK: @fp2
+  // CHECK: store i32 4
+  // CHECK: atomicrmw add {{.*}} monotonic
+  return __atomic_fetch_add(p, 1, memory_order_relaxed);
+}
+
+// FIXME: Alignment specification shouldn't be necessary
+typedef _Complex float ComplexAligned __attribute((aligned(8)));
+_Complex float fc(_Atomic(ComplexAligned) *c) {
+  // CHECK: @fc
+  // CHECK: atomicrmw xchg i64*
+  return __atomic_exchange(c, 2, memory_order_seq_cst);
+}
+
+typedef struct X { int x; } X;
+X fs(_Atomic(X) *c) {
+  // CHECK: @fs
+  // CHECK: atomicrmw xchg i32*
+  return __atomic_exchange(c, (X){2}, memory_order_seq_cst);
+}
diff --git a/test/Sema/atomic-ops.c b/test/Sema/atomic-ops.c
new file mode 100644 (file)
index 0000000..51b46bd
--- /dev/null
@@ -0,0 +1,37 @@
+// RUN: %clang_cc1 %s -verify -fsyntax-only
+
+// Basic parsing/Sema tests for __atomic_*
+
+// FIXME: Need to implement __atomic_is_lock_free
+
+typedef enum memory_order {
+  memory_order_relaxed, memory_order_consume, memory_order_acquire,
+  memory_order_release, memory_order_acq_rel, memory_order_seq_cst
+} memory_order;
+
+void f(_Atomic(int) *i, _Atomic(int*) *p, _Atomic(float) *d) {
+  __atomic_load(0); // expected-error {{too few arguments to function}}
+  __atomic_load(0,0,0); // expected-error {{too many arguments to function}}
+  __atomic_store(0,0,0); // expected-error {{first argument to atomic operation}}
+  __atomic_store((int*)0,0,0); // expected-error {{first argument to atomic operation}}
+
+  __atomic_load(i, memory_order_seq_cst);
+  __atomic_load(p, memory_order_seq_cst);
+  __atomic_load(d, memory_order_seq_cst);
+
+  __atomic_store(i, 1, memory_order_seq_cst);
+  __atomic_store(p, 1, memory_order_seq_cst); // expected-warning {{incompatible integer to pointer conversion}}
+  (int)__atomic_store(d, 1, memory_order_seq_cst); // expected-error {{operand of type 'void'}}
+
+  __atomic_fetch_add(i, 1, memory_order_seq_cst);
+  __atomic_fetch_add(p, 1, memory_order_seq_cst);
+  __atomic_fetch_add(d, 1, memory_order_seq_cst); // expected-error {{must be a pointer to atomic integer or pointer}}
+
+  __atomic_fetch_and(i, 1, memory_order_seq_cst);
+  __atomic_fetch_and(p, 1, memory_order_seq_cst); // expected-error {{must be a pointer to atomic integer}}
+  __atomic_fetch_and(d, 1, memory_order_seq_cst); // expected-error {{must be a pointer to atomic integer}}
+
+  __atomic_compare_exchange_strong(i, 0, 1, memory_order_seq_cst, memory_order_seq_cst);
+  __atomic_compare_exchange_strong(p, 0, (int*)1, memory_order_seq_cst, memory_order_seq_cst);
+  __atomic_compare_exchange_strong(d, (int*)0, 1, memory_order_seq_cst, memory_order_seq_cst); // expected-warning {{incompatible pointer types}}
+}
index 16aea6f71338649d8297a11cdcdae2897066de6d..45f3fa8c2b4ff0a129c096c25c0d68219bb54623 100644 (file)
@@ -201,6 +201,7 @@ CXCursor cxcursor::MakeCXCursor(Stmt *S, Decl *Parent, CXTranslationUnit TU,
   
   case Stmt::ArrayTypeTraitExprClass:
   case Stmt::AsTypeExprClass:
+  case Stmt::AtomicExprClass:
   case Stmt::BinaryConditionalOperatorClass:
   case Stmt::BinaryTypeTraitExprClass:
   case Stmt::CXXBindTemporaryExprClass: