BUILTIN(__builtin_alloca, "v*z" , "n")
+// Atomic operators builtin.
+BUILTIN(__sync_fetch_and_add,"ii*i", "n")
+BUILTIN(__sync_fetch_and_sub,"ii*i", "n")
+BUILTIN(__sync_fetch_and_min,"ii*i", "n")
+BUILTIN(__sync_fetch_and_max,"ii*i", "n")
+BUILTIN(__sync_fetch_and_umin,"UiUi*Ui", "n")
+BUILTIN(__sync_fetch_and_umax,"UiUi*Ui", "n")
+BUILTIN(__sync_fetch_and_and,"ii*i", "n")
+BUILTIN(__sync_fetch_and_or,"ii*i", "n")
+BUILTIN(__sync_fetch_and_xor,"ii*i", "n")
+BUILTIN(__sync_lock_test_and_set,"ii*i", "n")
+BUILTIN(__sync_val_compare_and_swap,"ii*ii", "n")
+
#undef BUILTIN
using namespace CodeGen;
using namespace llvm;
+/// Utility to insert an atomic instruction based Instrinsic::ID and
+// the expression node
+static RValue EmitBinaryAtomic(CodeGenFunction& CFG,
+ Intrinsic::ID Id, const CallExpr *E) {
+ const llvm::Type *ResType = CFG.ConvertType(E->getType());
+ Value *AtomF = CFG.CGM.getIntrinsic(Id, &ResType, 1);
+ return RValue::get(CFG.Builder.CreateCall2(AtomF,
+ CFG.EmitScalarExpr(E->getArg(0)),
+ CFG.EmitScalarExpr(E->getArg(1))));
+}
+
RValue CodeGenFunction::EmitBuiltinExpr(unsigned BuiltinID, const CallExpr *E) {
switch (BuiltinID) {
default: {
return RValue::get(Builder.CreateAlloca(llvm::Type::Int8Ty,
EmitScalarExpr(E->getArg(0)),
"tmp"));
+ case Builtin::BI__sync_fetch_and_add:
+ return EmitBinaryAtomic(*this, Intrinsic::atomic_las, E);
+ case Builtin::BI__sync_fetch_and_sub:
+ return EmitBinaryAtomic(*this, Intrinsic::atomic_lss, E);
+ case Builtin::BI__sync_fetch_and_min:
+ return EmitBinaryAtomic(*this, Intrinsic::atomic_load_min, E);
+ case Builtin::BI__sync_fetch_and_max:
+ return EmitBinaryAtomic(*this, Intrinsic::atomic_load_max, E);
+ case Builtin::BI__sync_fetch_and_umin:
+ return EmitBinaryAtomic(*this, Intrinsic::atomic_load_umin, E);
+ case Builtin::BI__sync_fetch_and_umax:
+ return EmitBinaryAtomic(*this, Intrinsic::atomic_load_umax, E);
+ case Builtin::BI__sync_fetch_and_and:
+ return EmitBinaryAtomic(*this, Intrinsic::atomic_load_and, E);
+ case Builtin::BI__sync_fetch_and_or:
+ return EmitBinaryAtomic(*this, Intrinsic::atomic_load_or, E);
+ case Builtin::BI__sync_fetch_and_xor:
+ return EmitBinaryAtomic(*this, Intrinsic::atomic_load_xor, E);
+ case Builtin::BI__sync_val_compare_and_swap: {
+ Value *Args[3];
+ Args[0]= EmitScalarExpr(E->getArg(0));
+ Args[1] = EmitScalarExpr(E->getArg(1));
+ Args[2] = EmitScalarExpr(E->getArg(2));
+ const llvm::Type *ResType = ConvertType(E->getType());
+ Value *AtomF = CGM.getIntrinsic(Intrinsic::atomic_lcs, &ResType, 1);
+ return RValue::get(Builder.CreateCall(AtomF, &Args[0], &Args[1]+2));
}
+ case Builtin::BI__sync_lock_test_and_set:
+ return EmitBinaryAtomic(*this, Intrinsic::atomic_swap, E); }
return RValue::get(0);
}
--- /dev/null
+// RUN: clang %s -emit-llvm -o - > %t1
+// RUN: grep @llvm.atomic.las.i32 %t1
+// RUN: grep @llvm.atomic.lss.i32 %t1
+// RUN: grep @llvm.atomic.load.min.i32 %t1
+// RUN: grep @llvm.atomic.load.max.i32 %t1
+// RUN: grep @llvm.atomic.load.umin.i32 %t1
+// RUN: grep @llvm.atomic.load.umax.i32 %t1
+// RUN: grep @llvm.atomic.swap.i32 %t1
+// RUN: grep @llvm.atomic.lcs.i32 %t1
+// RUN: grep @llvm.atomic.load.and.i32 %t1
+// RUN: grep @llvm.atomic.load.or.i32 %t1
+// RUN: grep @llvm.atomic.load.xor.i32 %t1
+
+
+int atomic(void)
+{
+ // nonsenical test for sync functions
+ int old;
+ int val = 1;
+ unsigned int uval = 1;
+ int cmp = 0;
+
+ old = __sync_fetch_and_add(&val, 1);
+ old = __sync_fetch_and_sub(&val, 2);
+ old = __sync_fetch_and_min(&val, 3);
+ old = __sync_fetch_and_max(&val, 4);
+ old = __sync_fetch_and_umin(&uval, 5u);
+ old = __sync_fetch_and_umax(&uval, 6u);
+ old = __sync_lock_test_and_set(&val, 7);
+ old = __sync_val_compare_and_swap(&val, 4, 1976);
+ old = __sync_fetch_and_and(&val, 0x9);
+ old = __sync_fetch_and_or(&val, 0xa);
+ old = __sync_fetch_and_xor(&val, 0xb);
+ return old;
+}