return Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args)
<< 0 << TheCall->getCallee()->getSourceRange();
+
+ // Get the decl for the concrete builtin from this, we can tell what the
+ // concrete integer type we should convert to is.
+ unsigned NewBuiltinID = BuiltinIndices[BuiltinIndex][SizeIndex];
+ const char *NewBuiltinName = Context.BuiltinInfo.GetName(NewBuiltinID);
+ IdentifierInfo *NewBuiltinII = PP.getIdentifierInfo(NewBuiltinName);
+ FunctionDecl *NewBuiltinDecl =
+ cast<FunctionDecl>(LazilyCreateBuiltin(NewBuiltinII, NewBuiltinID,
+ TUScope, false, DRE->getLocStart()));
+ const FunctionProtoType *BuiltinFT =
+ NewBuiltinDecl->getType()->getAsFunctionProtoType();
+ ValType = BuiltinFT->getArgType(0)->getAsPointerType()->getPointeeType();
+
+ // If the first type needs to be converted (e.g. void** -> int*), do it now.
+ if (BuiltinFT->getArgType(0) != FirstArg->getType()) {
+ ImpCastExprToType(FirstArg, BuiltinFT->getArgType(0), false);
+ TheCall->setArg(0, FirstArg);
+ }
+
// Next, walk the valid ones promoting to the right type.
for (unsigned i = 0; i != NumFixed; ++i) {
Expr *Arg = TheCall->getArg(i+1);
TheCall->setArg(i+1, Arg);
}
- // Okay, if we get here, everything is good. Get the decl for the concrete
- // builtin.
- unsigned NewBuiltinID = BuiltinIndices[BuiltinIndex][SizeIndex];
- const char *NewBuiltinName = Context.BuiltinInfo.GetName(NewBuiltinID);
- IdentifierInfo *NewBuiltinII = PP.getIdentifierInfo(NewBuiltinName);
- FunctionDecl *NewBuiltinDecl =
- cast<FunctionDecl>(LazilyCreateBuiltin(NewBuiltinII, NewBuiltinID,
- TUScope, false, DRE->getLocStart()));
// Switch the DeclRefExpr to refer to the new decl.
DRE->setDecl(NewBuiltinDecl);
DRE->setType(NewBuiltinDecl->getType());
// RUN: grep @llvm.atomic.load.umin.i32 %t1 &&
// RUN: grep @llvm.atomic.load.umax.i32 %t1 &&
// RUN: grep @llvm.atomic.swap.i32 %t1 &&
-// RUN: grep @llvm.atomic.cmp.swap.i32 %t1 | count 3 &&
+// RUN: grep @llvm.atomic.cmp.swap.i32 %t1 | count 4 &&
// RUN: grep @llvm.atomic.load.and.i32 %t1 | count 2 &&
// RUN: grep @llvm.atomic.load.or.i8 %t1 &&
// RUN: grep @llvm.atomic.load.xor.i8 %t1
old = __sync_or_and_fetch(&valc, 4);
old = __sync_xor_and_fetch(&valc, 5);
+
+ __sync_val_compare_and_swap((void **)0, (void *)0, (void *)0);
+
return old;
}