ATOMIC_BUILTIN(__atomic_fetch_and, "v.", "t")
ATOMIC_BUILTIN(__atomic_fetch_or, "v.", "t")
ATOMIC_BUILTIN(__atomic_fetch_xor, "v.", "t")
+ATOMIC_BUILTIN(__atomic_fetch_nand, "v.", "t")
ATOMIC_BUILTIN(__atomic_add_fetch, "v.", "t")
ATOMIC_BUILTIN(__atomic_sub_fetch, "v.", "t")
ATOMIC_BUILTIN(__atomic_and_fetch, "v.", "t")
ATOMIC_BUILTIN(__atomic_or_fetch, "v.", "t")
ATOMIC_BUILTIN(__atomic_xor_fetch, "v.", "t")
+ATOMIC_BUILTIN(__atomic_nand_fetch, "v.", "t")
BUILTIN(__atomic_test_and_set, "bvD*i", "n")
BUILTIN(__atomic_clear, "vvD*i", "n")
BUILTIN(__atomic_thread_fence, "vi", "n")
case AO__atomic_fetch_and:
case AO__atomic_fetch_or:
case AO__atomic_fetch_xor:
+ case AO__atomic_fetch_nand:
case AO__atomic_add_fetch:
case AO__atomic_sub_fetch:
case AO__atomic_and_fetch:
case AO__atomic_or_fetch:
case AO__atomic_xor_fetch:
+ case AO__atomic_nand_fetch:
return 3;
case AO__atomic_exchange:
case AtomicExpr::AO__atomic_fetch_xor:
Op = llvm::AtomicRMWInst::Xor;
break;
+
+ case AtomicExpr::AO__atomic_nand_fetch:
+ PostOp = llvm::Instruction::And;
+ // Fall through.
+ case AtomicExpr::AO__atomic_fetch_nand:
+ Op = llvm::AtomicRMWInst::Nand;
+ break;
}
llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
llvm::Value *Result = RMWI;
if (PostOp)
Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
-
+ if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
+ Result = CGF.Builder.CreateNot(Result);
llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Result, Dest);
StoreDest->setAlignment(Align);
}
case AtomicExpr::AO__atomic_fetch_and:
case AtomicExpr::AO__atomic_fetch_or:
case AtomicExpr::AO__atomic_fetch_xor:
+ case AtomicExpr::AO__atomic_fetch_nand:
case AtomicExpr::AO__atomic_and_fetch:
case AtomicExpr::AO__atomic_or_fetch:
case AtomicExpr::AO__atomic_xor_fetch:
+ case AtomicExpr::AO__atomic_nand_fetch:
Val1 = EmitValToTemp(*this, E->getVal1());
break;
}
case AtomicExpr::AO__atomic_fetch_and:
case AtomicExpr::AO__atomic_fetch_or:
case AtomicExpr::AO__atomic_fetch_xor:
+ case AtomicExpr::AO__atomic_fetch_nand:
case AtomicExpr::AO__atomic_and_fetch:
case AtomicExpr::AO__atomic_or_fetch:
case AtomicExpr::AO__atomic_xor_fetch:
+ case AtomicExpr::AO__atomic_nand_fetch:
Form = Arithmetic;
break;
return __atomic_add_fetch(i, 1, memory_order_seq_cst);
}
+int fi3c(int *i) {
+ // CHECK: @fi3c
+ // CHECK: atomicrmw nand
+ // CHECK-NOT: and
+ return __atomic_fetch_nand(i, 1, memory_order_seq_cst);
+}
+
+int fi3d(int *i) {
+ // CHECK: @fi3d
+ // CHECK: atomicrmw nand
+ // CHECK: and
+ // CHECK: xor
+ return __atomic_nand_fetch(i, 1, memory_order_seq_cst);
+}
+
_Bool fi4(_Atomic(int) *i) {
// CHECK: @fi4
// CHECK: cmpxchg i32*