void computeInfo(CGFunctionInfo &FI) const override {
FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
- for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
- it != ie; ++it)
- it->info = classifyArgumentType(it->type);
+ for (auto &I : FI.arguments())
+ I.info = classifyArgumentType(I.type);
}
llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const {
FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
- for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
- it != ie; ++it)
- it->info = classifyArgumentType(it->type);
+ for (auto &I : FI.arguments())
+ I.info = classifyArgumentType(I.type);
}
llvm::Value *PNaClABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
FI.setEffectiveCallingConvention(llvm::CallingConv::X86_CDeclMethod);
bool UsedInAlloca = false;
- for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
- it != ie; ++it) {
- it->info = classifyArgumentType(it->type, State);
- UsedInAlloca |= (it->info.getKind() == ABIArgInfo::InAlloca);
+ for (auto &I : FI.arguments()) {
+ I.info = classifyArgumentType(I.type, State);
+ UsedInAlloca |= (I.info.getKind() == ABIArgInfo::InAlloca);
}
// If we needed to use inalloca for any argument, do a second pass and rewrite
QualType RetTy = FI.getReturnType();
FI.getReturnInfo() = classify(RetTy, true);
- for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
- it != ie; ++it)
- it->info = classify(it->type, false);
+ for (auto &I : FI.arguments())
+ I.info = classify(I.type, false);
}
llvm::Value *WinX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
// when lowering the parameters in the caller and args in the callee.
void computeInfo(CGFunctionInfo &FI) const override {
FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
- for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
- it != ie; ++it) {
+ for (auto &I : FI.arguments()) {
// We rely on the default argument classification for the most part.
// One exception: An aggregate containing a single floating-point
// or vector item must be passed in a register if one is available.
- const Type *T = isSingleElementStruct(it->type, getContext());
+ const Type *T = isSingleElementStruct(I.type, getContext());
if (T) {
const BuiltinType *BT = T->getAs<BuiltinType>();
if (T->isVectorType() || (BT && BT->isFloatingPoint())) {
QualType QT(T, 0);
- it->info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT));
+ I.info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT));
continue;
}
}
- it->info = classifyArgumentType(it->type);
+ I.info = classifyArgumentType(I.type);
}
}
resetAllocatedRegs();
FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), FI.isVariadic());
- for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
- it != ie; ++it) {
+ for (auto &I : FI.arguments()) {
unsigned PreAllocationVFPs = AllocatedVFPs;
unsigned PreAllocationGPRs = AllocatedGPRs;
bool IsHA = false;
bool IsCPRC = false;
// 6.1.2.3 There is one VFP co-processor register class using registers
// s0-s15 (d0-d7) for passing arguments.
- it->info = classifyArgumentType(it->type, IsHA, FI.isVariadic(), IsCPRC);
+ I.info = classifyArgumentType(I.type, IsHA, FI.isVariadic(), IsCPRC);
assert((IsCPRC || !IsHA) && "Homogeneous aggregates must be CPRCs");
// If we do not have enough VFP registers for the HA, any VFP registers
// that are unallocated are marked as unavailable. To achieve this, we add
if (IsHA && AllocatedVFPs > NumVFPs && PreAllocationVFPs < NumVFPs) {
llvm::Type *PaddingTy = llvm::ArrayType::get(
llvm::Type::getFloatTy(getVMContext()), NumVFPs - PreAllocationVFPs);
- it->info = ABIArgInfo::getExpandWithPadding(false, PaddingTy);
+ I.info = ABIArgInfo::getExpandWithPadding(false, PaddingTy);
}
// If we have allocated some arguments onto the stack (due to running
if (!IsCPRC && PreAllocationGPRs < NumGPRs && AllocatedGPRs > NumGPRs && StackUsed) {
llvm::Type *PaddingTy = llvm::ArrayType::get(
llvm::Type::getInt32Ty(getVMContext()), NumGPRs - PreAllocationGPRs);
- it->info = ABIArgInfo::getExpandWithPadding(false, PaddingTy);
+ I.info = ABIArgInfo::getExpandWithPadding(false, PaddingTy);
}
}
FreeIntRegs, FreeVFPRegs);
FreeIntRegs = FreeVFPRegs = 8;
- for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
- it != ie; ++it) {
- it->info = classifyGenericType(it->type, FreeIntRegs, FreeVFPRegs);
+ for (auto &I : FI.arguments()) {
+ I.info = classifyGenericType(I.type, FreeIntRegs, FreeVFPRegs);
}
}
void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
- for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
- it != ie; ++it)
- it->info = classifyArgumentType(it->type);
+ for (auto &I : FI.arguments())
+ I.info = classifyArgumentType(I.type);
// Always honor user-specified calling convention.
if (FI.getCallingConvention() != llvm::CallingConv::C)
void computeInfo(CGFunctionInfo &FI) const override {
FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
- for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
- it != ie; ++it)
- it->info = classifyArgumentType(it->type);
+ for (auto &I : FI.arguments())
+ I.info = classifyArgumentType(I.type);
}
llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
// Check if a pointer to an aggregate is passed as a hidden argument.
uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0;
- for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
- it != ie; ++it)
- it->info = classifyArgumentType(it->type, Offset);
+ for (auto &I : FI.arguments())
+ I.info = classifyArgumentType(I.type, Offset);
}
llvm::Value* MipsABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const {
FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
- for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
- it != ie; ++it)
- it->info = classifyArgumentType(it->type);
+ for (auto &I : FI.arguments())
+ I.info = classifyArgumentType(I.type);
}
ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const {
void SparcV9ABIInfo::computeInfo(CGFunctionInfo &FI) const {
FI.getReturnInfo() = classifyType(FI.getReturnType(), 32 * 8);
- for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
- it != ie; ++it)
- it->info = classifyType(it->type, 16 * 8);
+ for (auto &I : FI.arguments())
+ I.info = classifyType(I.type, 16 * 8);
}
namespace {