getContext().getSizeType());
}
// Atomic address is the first or second parameter
- Args.add(RValue::get(EmitCastToVoidPtr(Ptr)),
- getContext().VoidPtrTy);
+ Args.add(RValue::get(EmitCastToVoidPtr(Ptr)), getContext().VoidPtrTy);
std::string LibCallName;
QualType RetTy;
Args.add(RValue::get(EmitCastToVoidPtr(Val1)),
getContext().VoidPtrTy);
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2, MemTy);
- Args.add(RValue::get(Order),
- getContext().IntTy);
+ Args.add(RValue::get(Order), getContext().IntTy);
Order = OrderFail;
break;
// void __atomic_exchange(size_t size, void *mem, void *val, void *return,
/// Note that the r-value is expected to be an r-value *of the atomic
/// type*; this means that for aggregate r-values, it should include
/// storage for any padding that was necessary.
-void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest,
- bool isInit) {
+void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest, bool isInit) {
// If this is an aggregate r-value, it should agree in type except
// maybe for address-space qualification.
assert(!rvalue.isAggregate() ||
-//===----- CGCXXABI.cpp - Interface to C++ ABIs -----------------*- C++ -*-===//
+//===----- CGCXXABI.cpp - Interface to C++ ABIs ---------------------------===//
//
// The LLVM Compiler Infrastructure
//
-//===--- CGCall.cpp - Encapsulate calling convention details ----*- C++ -*-===//
+//===--- CGCall.cpp - Encapsulate calling convention details --------------===//
//
// The LLVM Compiler Infrastructure
//
if (isPromoted)
V = emitArgumentDemotion(*this, Arg, V);
- if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl)) {
+ if (const CXXMethodDecl *MD =
+ dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl)) {
if (MD->isVirtual() && Arg == CXXABIThisDecl)
- V = CGM.getCXXABI().adjustThisParameterInVirtualFunctionPrologue(*this, CurGD, V);
+ V = CGM.getCXXABI().
+ adjustThisParameterInVirtualFunctionPrologue(*this, CurGD, V);
}
// Because of merging of function types from multiple decls it is
void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
bool Volatile, unsigned Alignment,
- QualType Ty,
- llvm::MDNode *TBAAInfo,
+ QualType Ty, llvm::MDNode *TBAAInfo,
bool isInit, QualType TBAABaseType,
uint64_t TBAAOffset) {
// Our source is a vec3, do a shuffle vector to make it a vec4.
SmallVector<llvm::Constant*, 4> Mask;
- Mask.push_back(llvm::ConstantInt::get(
- llvm::Type::getInt32Ty(VMContext),
+ Mask.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
0));
- Mask.push_back(llvm::ConstantInt::get(
- llvm::Type::getInt32Ty(VMContext),
+ Mask.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
1));
- Mask.push_back(llvm::ConstantInt::get(
- llvm::Type::getInt32Ty(VMContext),
+ Mask.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
2));
Mask.push_back(llvm::UndefValue::get(llvm::Type::getInt32Ty(VMContext)));
/// EmitStoreThroughLValue - Store the specified rvalue into the specified
/// lvalue, where both are guaranteed to the have the same type, and that type
/// is 'Ty'.
-void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit) {
+void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
+ bool isInit) {
if (!Dst.isSimple()) {
if (Dst.isVectorElt()) {
// Read/modify/write the vector, inserting the new element.
// And the rest of the call args
EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end());
- return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required), Callee,
- ReturnValue, Args);
+ return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required),
+ Callee, ReturnValue, Args);
}
RValue
assert(!getContext().getAsConstantArrayType(E->getType())
&& "EmitSynthesizedCXXCopyCtor - Copied-in Array");
- EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src,
- E->arg_begin(), E->arg_end());
+ EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src, E->arg_begin(), E->arg_end());
}
static CharUnits CalculateCookiePadding(CodeGenFunction &CGF,
FunctionType::ExtInfo(),
RequiredArgs::All),
copyCppAtomicObjectFn, ReturnValueSlot(), args);
-
-
}
// If the RHS was emitted retained, expand this.
if (hasImmediateRetain) {
- llvm::Value *oldValue =
- EmitLoadOfScalar(lvalue);
+ llvm::Value *oldValue = EmitLoadOfScalar(lvalue);
EmitStoreOfScalar(value, lvalue);
EmitARCRelease(oldValue, lvalue.isARCPreciseLifetime());
} else {
imp = EnforceType(Builder, imp, MSI.MessengerType);
llvm::Instruction *call;
- RValue msgRet = CGF.EmitCall(MSI.CallInfo, imp, Return, ActualArgs,
- 0, &call);
+ RValue msgRet = CGF.EmitCall(MSI.CallInfo, imp, Return, ActualArgs, 0, &call);
call->setMetadata(msgSendMDKind, node);