// Compute ABI information.
getABIInfo().computeInfo(*FI);
+
+ // Loop over all of the computed argument and return value info. If any of
+ // them are direct or extend without a specified coerce type, specify the
+ // default now.
+ ABIArgInfo &RetInfo = FI->getReturnInfo();
+ if (RetInfo.canHaveCoerceToType() && RetInfo.getCoerceToType() == 0)
+ RetInfo.setCoerceToType(ConvertTypeRecursive(FI->getReturnType()));
+
+ for (CGFunctionInfo::arg_iterator I = FI->arg_begin(), E = FI->arg_end();
+ I != E; ++I)
+ if (I->info.canHaveCoerceToType() && I->info.getCoerceToType() == 0)
+ I->info.setCoerceToType(ConvertTypeRecursive(I->type));
// If this is a top-level call and ConvertTypeRecursive hit unresolved pointer
// types, resolve them now. These pointers may point to this function, which
case ABIArgInfo::Extend:
case ABIArgInfo::Direct:
- ResultType = ConvertType(RetTy, IsRecursive);
+ ResultType = RetAI.getCoerceToType();
break;
case ABIArgInfo::Indirect: {
case ABIArgInfo::Ignore:
ResultType = llvm::Type::getVoidTy(getLLVMContext());
break;
-
- case ABIArgInfo::Coerce:
- ResultType = RetAI.getCoerceToType();
- break;
}
for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
case ABIArgInfo::Ignore:
break;
- case ABIArgInfo::Coerce: {
+ case ABIArgInfo::Indirect: {
+ // indirect arguments are always on the stack, which is addr space #0.
+ const llvm::Type *LTy = ConvertTypeForMem(it->type, IsRecursive);
+ ArgTys.push_back(llvm::PointerType::getUnqual(LTy));
+ break;
+ }
+
+ case ABIArgInfo::Extend:
+ case ABIArgInfo::Direct:
// If the coerce-to type is a first class aggregate, flatten it. Either
// way is semantically identical, but fast-isel and the optimizer
// generally likes scalar values better than FCAs.
ArgTys.push_back(ArgTy);
}
break;
- }
-
- case ABIArgInfo::Indirect: {
- // indirect arguments are always on the stack, which is addr space #0.
- const llvm::Type *LTy = ConvertTypeForMem(it->type, IsRecursive);
- ArgTys.push_back(llvm::PointerType::getUnqual(LTy));
- break;
- }
-
- case ABIArgInfo::Extend:
- case ABIArgInfo::Direct:
- ArgTys.push_back(ConvertType(it->type, IsRecursive));
- break;
case ABIArgInfo::Expand:
GetExpandedTypes(it->type, ArgTys, IsRecursive);
RetAttrs |= llvm::Attribute::SExt;
else if (RetTy->hasUnsignedIntegerRepresentation())
RetAttrs |= llvm::Attribute::ZExt;
- // FALLTHROUGH
+ break;
case ABIArgInfo::Direct:
+ case ABIArgInfo::Ignore:
break;
case ABIArgInfo::Indirect:
llvm::Attribute::ReadNone);
break;
- case ABIArgInfo::Ignore:
- case ABIArgInfo::Coerce:
- break;
-
case ABIArgInfo::Expand:
assert(0 && "Invalid ABI kind for return argument");
}
// 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
// have the corresponding parameter variable. It doesn't make
// sense to do it here because parameters are so fucked up.
-
switch (AI.getKind()) {
- case ABIArgInfo::Coerce:
- if (const llvm::StructType *STy =
- dyn_cast<llvm::StructType>(AI.getCoerceToType()))
- Index += STy->getNumElements();
- else
- ++Index;
- continue; // Skip index increment.
-
- case ABIArgInfo::Indirect:
- if (AI.getIndirectByVal())
- Attributes |= llvm::Attribute::ByVal;
-
- Attributes |=
- llvm::Attribute::constructAlignmentFromInt(AI.getIndirectAlign());
- // byval disables readnone and readonly.
- FuncAttrs &= ~(llvm::Attribute::ReadOnly |
- llvm::Attribute::ReadNone);
- break;
-
case ABIArgInfo::Extend:
if (ParamType->isSignedIntegerType())
Attributes |= llvm::Attribute::SExt;
else if (ParamType->isUnsignedIntegerType())
Attributes |= llvm::Attribute::ZExt;
- // FALLS THROUGH
+ // FALL THROUGH
case ABIArgInfo::Direct:
if (RegParm > 0 &&
(ParamType->isIntegerType() || ParamType->isPointerType())) {
RegParm -=
- (Context.getTypeSize(ParamType) + PointerWidth - 1) / PointerWidth;
+ (Context.getTypeSize(ParamType) + PointerWidth - 1) / PointerWidth;
if (RegParm >= 0)
Attributes |= llvm::Attribute::InReg;
}
// FIXME: handle sseregparm someday...
+
+ if (const llvm::StructType *STy =
+ dyn_cast<llvm::StructType>(AI.getCoerceToType()))
+ Index += STy->getNumElements()-1; // 1 will be added below.
+ break;
+
+ case ABIArgInfo::Indirect:
+ if (AI.getIndirectByVal())
+ Attributes |= llvm::Attribute::ByVal;
+
+ Attributes |=
+ llvm::Attribute::constructAlignmentFromInt(AI.getIndirectAlign());
+ // byval disables readnone and readonly.
+ FuncAttrs &= ~(llvm::Attribute::ReadOnly |
+ llvm::Attribute::ReadNone);
break;
case ABIArgInfo::Ignore:
case ABIArgInfo::Extend:
case ABIArgInfo::Direct: {
- assert(AI != Fn->arg_end() && "Argument mismatch!");
- llvm::Value *V = AI;
- if (hasAggregateLLVMType(Ty)) {
- // Create a temporary alloca to hold the argument; the rest of
- // codegen expects to access aggregates & complex values by
- // reference.
- V = CreateMemTemp(Ty);
- Builder.CreateStore(AI, V);
- } else {
+ // If we have the trivial case, handle it with no muss and fuss.
+ if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
+ ArgI.getCoerceToType() == ConvertType(Ty)) {
+ assert(AI != Fn->arg_end() && "Argument mismatch!");
+ llvm::Value *V = AI;
+
if (Arg->getType().isRestrictQualified())
AI->addAttr(llvm::Attribute::NoAlias);
// "void a(x) short x; {..."
V = EmitScalarConversion(V, Ty, Arg->getType());
}
+ EmitParmDecl(*Arg, V);
+ break;
}
- EmitParmDecl(*Arg, V);
- break;
- }
- case ABIArgInfo::Expand: {
- // If this structure was expanded into multiple arguments then
- // we need to create a temporary and reconstruct it from the
- // arguments.
- llvm::Value *Temp = CreateMemTemp(Ty, Arg->getName() + ".addr");
- // FIXME: What are the right qualifiers here?
- llvm::Function::arg_iterator End =
- ExpandTypeFromArgs(Ty, LValue::MakeAddr(Temp, Qualifiers()), AI);
- EmitParmDecl(*Arg, Temp);
-
- // Name the arguments used in expansion and increment AI.
- unsigned Index = 0;
- for (; AI != End; ++AI, ++Index)
- AI->setName(Arg->getName() + "." + llvm::Twine(Index));
- continue;
- }
-
- case ABIArgInfo::Ignore:
- // Initialize the local variable appropriately.
- if (hasAggregateLLVMType(Ty)) {
- EmitParmDecl(*Arg, CreateMemTemp(Ty));
- } else {
- EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())));
- }
-
- // Skip increment, no matching LLVM parameter.
- continue;
-
- case ABIArgInfo::Coerce: {
llvm::AllocaInst *Alloca = CreateMemTemp(Ty, "coerce");
// The alignment we need to use is the max of the requested alignment for
EmitParmDecl(*Arg, V);
continue; // Skip ++AI increment, already done.
}
+
+ case ABIArgInfo::Expand: {
+ // If this structure was expanded into multiple arguments then
+ // we need to create a temporary and reconstruct it from the
+ // arguments.
+ llvm::Value *Temp = CreateMemTemp(Ty, Arg->getName() + ".addr");
+ // FIXME: What are the right qualifiers here?
+ llvm::Function::arg_iterator End =
+ ExpandTypeFromArgs(Ty, LValue::MakeAddr(Temp, Qualifiers()), AI);
+ EmitParmDecl(*Arg, Temp);
+
+ // Name the arguments used in expansion and increment AI.
+ unsigned Index = 0;
+ for (; AI != End; ++AI, ++Index)
+ AI->setName(Arg->getName() + "." + llvm::Twine(Index));
+ continue;
+ }
+
+ case ABIArgInfo::Ignore:
+ // Initialize the local variable appropriately.
+ if (hasAggregateLLVMType(Ty))
+ EmitParmDecl(*Arg, CreateMemTemp(Ty));
+ else
+ EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())));
+
+ // Skip increment, no matching LLVM parameter.
+ continue;
}
++AI;
break;
case ABIArgInfo::Extend:
- case ABIArgInfo::Direct: {
- // The internal return value temp always will have pointer-to-return-type
- // type, just do a load.
-
- // If the instruction right before the insertion point is a store to the
- // return value, we can elide the load, zap the store, and usually zap the
- // alloca.
- llvm::BasicBlock *InsertBB = Builder.GetInsertBlock();
- llvm::StoreInst *SI = 0;
- if (InsertBB->empty() ||
- !(SI = dyn_cast<llvm::StoreInst>(&InsertBB->back())) ||
- SI->getPointerOperand() != ReturnValue || SI->isVolatile()) {
- RV = Builder.CreateLoad(ReturnValue);
- } else {
- // Get the stored value and nuke the now-dead store.
- RetDbgLoc = SI->getDebugLoc();
- RV = SI->getValueOperand();
- SI->eraseFromParent();
-
- // If that was the only use of the return value, nuke it as well now.
- if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) {
- cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent();
- ReturnValue = 0;
+ case ABIArgInfo::Direct:
+
+ if (RetAI.getCoerceToType() == ConvertType(RetTy)) {
+ // The internal return value temp always will have pointer-to-return-type
+ // type, just do a load.
+
+ // If the instruction right before the insertion point is a store to the
+ // return value, we can elide the load, zap the store, and usually zap the
+ // alloca.
+ llvm::BasicBlock *InsertBB = Builder.GetInsertBlock();
+ llvm::StoreInst *SI = 0;
+ if (InsertBB->empty() ||
+ !(SI = dyn_cast<llvm::StoreInst>(&InsertBB->back())) ||
+ SI->getPointerOperand() != ReturnValue || SI->isVolatile()) {
+ RV = Builder.CreateLoad(ReturnValue);
+ } else {
+ // Get the stored value and nuke the now-dead store.
+ RetDbgLoc = SI->getDebugLoc();
+ RV = SI->getValueOperand();
+ SI->eraseFromParent();
+
+ // If that was the only use of the return value, nuke it as well now.
+ if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) {
+ cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent();
+ ReturnValue = 0;
+ }
}
+ } else {
+ RV = CreateCoercedLoad(ReturnValue, RetAI.getCoerceToType(), *this);
}
break;
- }
- case ABIArgInfo::Ignore:
- break;
- case ABIArgInfo::Coerce:
- RV = CreateCoercedLoad(ReturnValue, RetAI.getCoerceToType(), *this);
+ case ABIArgInfo::Ignore:
break;
case ABIArgInfo::Expand:
}
break;
- case ABIArgInfo::Extend:
- case ABIArgInfo::Direct:
- if (RV.isScalar()) {
- Args.push_back(RV.getScalarVal());
- } else if (RV.isComplex()) {
- llvm::Value *Tmp = llvm::UndefValue::get(ConvertType(I->second));
- Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().first, 0);
- Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().second, 1);
- Args.push_back(Tmp);
- } else {
- Args.push_back(Builder.CreateLoad(RV.getAggregateAddr()));
- }
- break;
-
case ABIArgInfo::Ignore:
break;
+
+ case ABIArgInfo::Extend:
+ case ABIArgInfo::Direct: {
+ if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
+ ArgInfo.getCoerceToType() == ConvertType(info_it->type)) {
+ if (RV.isScalar())
+ Args.push_back(RV.getScalarVal());
+ else
+ Args.push_back(Builder.CreateLoad(RV.getAggregateAddr()));
+ break;
+ }
- case ABIArgInfo::Coerce: {
// FIXME: Avoid the conversion through memory if possible.
llvm::Value *SrcPtr;
if (RV.isScalar()) {
return RValue::getAggregate(Args[0]);
return RValue::get(EmitLoadOfScalar(Args[0], false, RetTy));
- case ABIArgInfo::Extend:
- case ABIArgInfo::Direct:
- if (RetTy->isAnyComplexType()) {
- llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
- llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
- return RValue::getComplex(std::make_pair(Real, Imag));
- }
- if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
- llvm::Value *DestPtr = ReturnValue.getValue();
- bool DestIsVolatile = ReturnValue.isVolatile();
-
- if (!DestPtr) {
- DestPtr = CreateMemTemp(RetTy, "agg.tmp");
- DestIsVolatile = false;
- }
- Builder.CreateStore(CI, DestPtr, DestIsVolatile);
- return RValue::getAggregate(DestPtr);
- }
- return RValue::get(CI);
-
case ABIArgInfo::Ignore:
// If we are ignoring an argument that had a result, make sure to
// construct the appropriate return value for our caller.
return GetUndefRValue(RetTy);
+
+ case ABIArgInfo::Extend:
+ case ABIArgInfo::Direct: {
+ if (RetAI.getCoerceToType() == ConvertType(RetTy)) {
+ if (RetTy->isAnyComplexType()) {
+ llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
+ llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
+ return RValue::getComplex(std::make_pair(Real, Imag));
+ }
+ if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
+ llvm::Value *DestPtr = ReturnValue.getValue();
+ bool DestIsVolatile = ReturnValue.isVolatile();
- case ABIArgInfo::Coerce: {
+ if (!DestPtr) {
+ DestPtr = CreateMemTemp(RetTy, "agg.tmp");
+ DestIsVolatile = false;
+ }
+ Builder.CreateStore(CI, DestPtr, DestIsVolatile);
+ return RValue::getAggregate(DestPtr);
+ }
+ return RValue::get(CI);
+ }
+
llvm::Value *DestPtr = ReturnValue.getValue();
bool DestIsVolatile = ReturnValue.isVolatile();
OS << "(ABIArgInfo Kind=";
switch (TheKind) {
case Direct:
- OS << "Direct";
+ OS << "Direct Type=";
+ if (const llvm::Type *Ty = getCoerceToType())
+ Ty->print(OS);
+ else
+ OS << "null";
break;
case Extend:
OS << "Extend";
case Ignore:
OS << "Ignore";
break;
- case Coerce:
- OS << "Coerce Type=";
- getCoerceToType()->print(OS);
- break;
case Indirect:
OS << "Indirect Align=" << getIndirectAlign()
<< " Byal=" << getIndirectByVal();
// registers and we need to make sure to pick a type the LLVM
// backend will like.
if (Size == 128)
- return ABIArgInfo::getCoerce(llvm::VectorType::get(
+ return ABIArgInfo::getDirect(llvm::VectorType::get(
llvm::Type::getInt64Ty(getVMContext()), 2));
// Always return in register if it fits in a general purpose
// register, or if it is 64 bits and has a single element.
if ((Size == 8 || Size == 16 || Size == 32) ||
(Size == 64 && VT->getNumElements() == 1))
- return ABIArgInfo::getCoerce(llvm::IntegerType::get(getVMContext(),
+ return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
Size));
return ABIArgInfo::getIndirect(0);
// bit-fields can adjust that to be larger than the single
// element type.
uint64_t Size = getContext().getTypeSize(RetTy);
- return ABIArgInfo::getCoerce(
+ return ABIArgInfo::getDirect(
llvm::IntegerType::get(getVMContext(), (unsigned)Size));
}
assert(getContext().getTypeSize(RetTy) ==
getContext().getTypeSize(SeltTy) &&
"Unexpect single element structure size!");
- return ABIArgInfo::getCoerce(llvm::Type::getFloatTy(getVMContext()));
+ return ABIArgInfo::getDirect(llvm::Type::getFloatTy(getVMContext()));
}
if (BT->getKind() == BuiltinType::Double) {
assert(getContext().getTypeSize(RetTy) ==
getContext().getTypeSize(SeltTy) &&
"Unexpect single element structure size!");
- return ABIArgInfo::getCoerce(llvm::Type::getDoubleTy(getVMContext()));
+ return ABIArgInfo::getDirect(llvm::Type::getDoubleTy(getVMContext()));
}
} else if (SeltTy->isPointerType()) {
// FIXME: It would be really nice if this could come out as the proper
// pointer type.
const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(getVMContext());
- return ABIArgInfo::getCoerce(PtrTy);
+ return ABIArgInfo::getDirect(PtrTy);
} else if (SeltTy->isVectorType()) {
// 64- and 128-bit vectors are never returned in a
// register when inside a structure.
// in a register.
if (X86_32ABIInfo::shouldReturnTypeInRegister(RetTy, getContext())) {
uint64_t Size = getContext().getTypeSize(RetTy);
- return ABIArgInfo::getCoerce(llvm::IntegerType::get(getVMContext(),Size));
+ return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size));
}
return ABIArgInfo::getIndirect(0);
CoerceTo = llvm::Type::getFloatTy(CoerceTo->getContext());
}
- return ABIArgInfo::getCoerce(CoerceTo);
+ return ABIArgInfo::getDirect(CoerceTo);
}
ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
STy = dyn_cast<llvm::StructType>(IRType);
}
-
-
// If the preferred type is a 16-byte vector, prefer to pass it.
if (const llvm::VectorType *VT = dyn_cast<llvm::VectorType>(IRType)){
const llvm::Type *EltTy = VT->getElementType();
"reg_save_area");
if (neededInt && neededSSE) {
// FIXME: Cleanup.
- assert(AI.isCoerce() && "Unexpected ABI info for mixed regs");
+ assert(AI.isDirect() && "Unexpected ABI info for mixed regs");
const llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
llvm::Value *Tmp = CGF.CreateTempAlloca(ST);
assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
LLVMFields.push_back(llvm::ArrayType::get(ElemTy, SizeRegs));
const llvm::Type* STy = llvm::StructType::get(getVMContext(), LLVMFields,
true);
- return ABIArgInfo::getCoerce(STy);
+ return ABIArgInfo::getDirect(STy);
}
static bool isIntegerLikeType(QualType Ty, ASTContext &Context,
// FIXME: Consider using 2 x vector types if the back end handles them
// correctly.
if (RetTy->isAnyComplexType())
- return ABIArgInfo::getCoerce(llvm::IntegerType::get(getVMContext(),
+ return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
getContext().getTypeSize(RetTy)));
// Integer like structures are returned in r0.
// Return in the smallest viable integer type.
uint64_t Size = getContext().getTypeSize(RetTy);
if (Size <= 8)
- return ABIArgInfo::getCoerce(llvm::Type::getInt8Ty(getVMContext()));
+ return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
if (Size <= 16)
- return ABIArgInfo::getCoerce(llvm::Type::getInt16Ty(getVMContext()));
- return ABIArgInfo::getCoerce(llvm::Type::getInt32Ty(getVMContext()));
+ return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
+ return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
}
// Otherwise return in memory.
if (Size <= 32) {
// Return in the smallest viable integer type.
if (Size <= 8)
- return ABIArgInfo::getCoerce(llvm::Type::getInt8Ty(getVMContext()));
+ return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
if (Size <= 16)
- return ABIArgInfo::getCoerce(llvm::Type::getInt16Ty(getVMContext()));
- return ABIArgInfo::getCoerce(llvm::Type::getInt32Ty(getVMContext()));
+ return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
+ return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
}
return ABIArgInfo::getIndirect(0);