class ABIArgInfo {
public:
enum Kind {
- Direct, /// Pass the argument directly using the normal converted LLVM
- /// type, or by coercing to another specified type
- /// (stored in 'CoerceToType').
-
- Extend, /// Valid only for integer argument types. Same as 'direct'
- /// but also emit a zero/sign extension attribute.
-
- Indirect, /// Pass the argument indirectly via a hidden pointer
- /// with the specified alignment (0 indicates default
- /// alignment).
-
- Ignore, /// Ignore the argument (treat as void). Useful for
- /// void and empty structs.
-
- Expand, /// Only valid for aggregate argument types. The
- /// structure should be expanded into consecutive
- /// arguments for its constituent fields. Currently
- /// expand is only allowed on structures whose fields
- /// are all scalar types or are themselves expandable
- /// types.
+ /// Direct - Pass the argument directly using the normal converted LLVM
+ /// type, or by coercing to another specified type stored in
+ /// 'CoerceToType'). If an offset is specified (in UIntData), then the
+ /// argument passed is offset by some number of bytes in the memory
+ /// representation.
+ Direct,
+
+ /// Extend - Valid only for integer argument types. Same as 'direct'
+ /// but also emit a zero/sign extension attribute.
+ Extend,
+
+ /// Indirect - Pass the argument indirectly via a hidden pointer
+ /// with the specified alignment (0 indicates default alignment).
+ Indirect,
+
+ /// Ignore - Ignore the argument (treat as void). Useful for void and
+ /// empty structs.
+ Ignore,
+
+ /// Expand - Only valid for aggregate argument types. The structure should
+ /// be expanded into consecutive arguments for its constituent fields.
+ /// Currently expand is only allowed on structures whose fields
+ /// are all scalar types or are themselves expandable types.
+ Expand,
KindFirst=Direct, KindLast=Expand
};
public:
ABIArgInfo() : TheKind(Direct), TypeData(0), UIntData(0) {}
- static ABIArgInfo getDirect(const llvm::Type *T = 0) {
- return ABIArgInfo(Direct, T);
+ static ABIArgInfo getDirect(const llvm::Type *T = 0, unsigned Offset = 0) {
+ return ABIArgInfo(Direct, T, Offset);
}
static ABIArgInfo getExtend(const llvm::Type *T = 0) {
- return ABIArgInfo(Extend, T);
+ return ABIArgInfo(Extend, T, 0);
}
static ABIArgInfo getIgnore() {
return ABIArgInfo(Ignore);
}
// Direct/Extend accessors
+ unsigned getDirectOffset() const {
+ assert((isDirect() || isExtend()) && "Not a direct or extend kind");
+ return UIntData;
+ }
const llvm::Type *getCoerceToType() const {
assert(canHaveCoerceToType() && "Invalid kind!");
return TypeData;
assert(canHaveCoerceToType() && "Invalid kind!");
TypeData = T;
}
+
// Indirect accessors
unsigned getIndirectAlign() const {
assert(TheKind == Indirect && "Invalid kind!");
case ABIArgInfo::Direct: {
// If we have the trivial case, handle it with no muss and fuss.
if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
- ArgI.getCoerceToType() == ConvertType(Ty)) {
+ ArgI.getCoerceToType() == ConvertType(Ty) &&
+ ArgI.getDirectOffset() == 0) {
assert(AI != Fn->arg_end() && "Argument mismatch!");
llvm::Value *V = AI;
Alloca->setAlignment(AlignmentToUse);
llvm::Value *V = Alloca;
+ llvm::Value *Ptr = V; // Pointer to store into.
+
+ // If the value is offset in memory, apply the offset now.
+ if (unsigned Offs = ArgI.getDirectOffset()) {
+ Ptr = Builder.CreateBitCast(Ptr, Builder.getInt8PtrTy());
+ Ptr = Builder.CreateConstGEP1_32(Ptr, Offs);
+ Ptr = Builder.CreateBitCast(Ptr,
+ llvm::PointerType::getUnqual(ArgI.getCoerceToType()));
+ }
// If the coerce-to type is a first class aggregate, we flatten it and
// pass the elements. Either way is semantically identical, but fast-isel
// and the optimizer generally likes scalar values better than FCAs.
if (const llvm::StructType *STy =
dyn_cast<llvm::StructType>(ArgI.getCoerceToType())) {
- llvm::Value *Ptr = V;
Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
// Simple case, just do a coerced store of the argument into the alloca.
assert(AI != Fn->arg_end() && "Argument mismatch!");
AI->setName(Arg->getName() + ".coerce");
- CreateCoercedStore(AI++, V, /*DestIsVolatile=*/false, *this);
+ CreateCoercedStore(AI++, Ptr, /*DestIsVolatile=*/false, *this);
}
case ABIArgInfo::Extend:
case ABIArgInfo::Direct:
-
- if (RetAI.getCoerceToType() == ConvertType(RetTy)) {
+ if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
+ RetAI.getDirectOffset() == 0) {
// The internal return value temp always will have pointer-to-return-type
// type, just do a load.
}
}
} else {
- RV = CreateCoercedLoad(ReturnValue, RetAI.getCoerceToType(), *this);
+ llvm::Value *V = ReturnValue;
+ // If the value is offset in memory, apply the offset now.
+ if (unsigned Offs = RetAI.getDirectOffset()) {
+ V = Builder.CreateBitCast(V, Builder.getInt8PtrTy());
+ V = Builder.CreateConstGEP1_32(V, Offs);
+ V = Builder.CreateBitCast(V,
+ llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
+ }
+
+ RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
}
break;
case ABIArgInfo::Extend:
case ABIArgInfo::Direct: {
if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
- ArgInfo.getCoerceToType() == ConvertType(info_it->type)) {
+ ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
+ ArgInfo.getDirectOffset() == 0) {
if (RV.isScalar())
Args.push_back(RV.getScalarVal());
else
} else
SrcPtr = RV.getAggregateAddr();
+ // If the value is offset in memory, apply the offset now.
+ if (unsigned Offs = ArgInfo.getDirectOffset()) {
+ SrcPtr = Builder.CreateBitCast(SrcPtr, Builder.getInt8PtrTy());
+ SrcPtr = Builder.CreateConstGEP1_32(SrcPtr, Offs);
+ SrcPtr = Builder.CreateBitCast(SrcPtr,
+ llvm::PointerType::getUnqual(ArgInfo.getCoerceToType()));
+
+ }
+
// If the coerce-to type is a first class aggregate, we flatten it and
// pass the elements. Either way is semantically identical, but fast-isel
// and the optimizer generally likes scalar values better than FCAs.
case ABIArgInfo::Extend:
case ABIArgInfo::Direct: {
- if (RetAI.getCoerceToType() == ConvertType(RetTy)) {
+ if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
+ RetAI.getDirectOffset() == 0) {
if (RetTy->isAnyComplexType()) {
llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
DestIsVolatile = false;
}
- CreateCoercedStore(CI, DestPtr, DestIsVolatile, *this);
+ // If the value is offset in memory, apply the offset now.
+ llvm::Value *StorePtr = DestPtr;
+ if (unsigned Offs = RetAI.getDirectOffset()) {
+ StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy());
+ StorePtr = Builder.CreateConstGEP1_32(StorePtr, Offs);
+ StorePtr = Builder.CreateBitCast(StorePtr,
+ llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
+ }
+ CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
+
if (RetTy->isAnyComplexType())
return RValue::getComplex(LoadComplexFromAddr(DestPtr, false));
if (CodeGenFunction::hasAggregateLLVMType(RetTy))
if (Lo == Memory || Hi == Memory)
break;
}
-
- // If this record has no fields, no bases, no vtable, but isn't empty,
- // classify as INTEGER.
- if (CXXRD->isEmpty() && Size)
- Current = Integer;
}
// Classify the fields one at a time, merging the results.
// Check some invariants.
assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
- assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification.");
assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
const llvm::Type *ResType = 0;
switch (Lo) {
case NoClass:
- return ABIArgInfo::getIgnore();
+ if (Hi == NoClass)
+ return ABIArgInfo::getIgnore();
+ // If the low part is just padding, it takes no register, leave ResType
+ // null.
+ assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
+ "Unknown missing lo part");
+ break;
case SSEUp:
case X87Up:
case Integer: {
const llvm::Type *HiType =
GetINTEGERTypeAtOffset(CGT.ConvertTypeRecursive(RetTy), 8, RetTy, 8);
+ if (Lo == NoClass) // Return HiType at offset 8 in memory.
+ return ABIArgInfo::getDirect(HiType, 8);
+
ResType = llvm::StructType::get(getVMContext(), ResType, HiType, NULL);
break;
}
case SSE: {
const llvm::Type *HiType =
GetSSETypeAtOffset(CGT.ConvertTypeRecursive(RetTy), 8, RetTy, 8);
+ if (Lo == NoClass) // Return HiType at offset 8 in memory.
+ return ABIArgInfo::getDirect(HiType, 8);
+
ResType = llvm::StructType::get(getVMContext(), ResType, HiType,NULL);
break;
}
if (Lo != X87) {
const llvm::Type *HiType =
GetSSETypeAtOffset(CGT.ConvertTypeRecursive(RetTy), 8, RetTy, 8);
+ if (Lo == NoClass) // Return HiType at offset 8 in memory.
+ return ABIArgInfo::getDirect(HiType, 8);
+
ResType = llvm::StructType::get(getVMContext(), ResType, HiType, NULL);
}
break;
// Check some invariants.
// FIXME: Enforce these by construction.
assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
- assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification.");
assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
neededInt = 0;
const llvm::Type *ResType = 0;
switch (Lo) {
case NoClass:
- return ABIArgInfo::getIgnore();
-
+ if (Hi == NoClass)
+ return ABIArgInfo::getIgnore();
+ // If the low part is just padding, it takes no register, leave ResType
+ // null.
+ assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
+ "Unknown missing lo part");
+ break;
+
// AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
// on the stack.
case Memory:
// Pick an 8-byte type based on the preferred type.
const llvm::Type *HiType =
GetINTEGERTypeAtOffset(CGT.ConvertTypeRecursive(Ty), 8, Ty, 8);
+
+ if (Lo == NoClass) // Pass HiType at offset 8 in memory.
+ return ABIArgInfo::getDirect(HiType, 8);
+
ResType = llvm::StructType::get(getVMContext(), ResType, HiType, NULL);
break;
}
case SSE: {
const llvm::Type *HiType =
GetSSETypeAtOffset(CGT.ConvertTypeRecursive(Ty), 8, Ty, 8);
+
+ if (Lo == NoClass) // Pass HiType at offset 8 in memory.
+ return ABIArgInfo::getDirect(HiType, 8);
+
ResType = llvm::StructType::get(getVMContext(), ResType, HiType, NULL);
++neededSSE;
break;
// CHECK: define weak_odr void @_ZN5Casts7static_ILj4EEEvPN9enable_ifIXleT_cvjLi4EEvE4typeE
template void static_<4>(void*);
- // CHECK: define weak_odr i8 @_ZN5Casts1fILi6EEENS_1TIXT_EEEv
+ // CHECK: define weak_odr void @_ZN5Casts1fILi6EEENS_1TIXT_EEEv
template T<6> f<6>();
}
void test3() {
T t1, t2;
- // RUN: grep "call i8 @_ZN1TplERKS_" %t
+ // RUN: grep "call void @_ZN1TplERKS_" %t
T result = t1 + t2;
}
void f2(f2_s1 a0) { }
// PR5831
-// CHECK: define void @_Z2f34s3_1(i8 %x.coerce0, i64 %x.coerce1)
+// CHECK: define void @_Z2f34s3_1(i64 %x.coerce)
struct s3_0 {};
struct s3_1 { struct s3_0 a; long b; };
void f3(struct s3_1 x) {}
}
}
-
-
namespace PR7742 { // Also rdar://8250764
struct s2 {
float a[2];
c2 foo(c2 *P) {
}
-}
\ No newline at end of file
+}
+
+namespace PR5179 {
+ struct B {};
+
+ struct B1 : B {
+ int* pa;
+ };
+
+ struct B2 : B {
+ B1 b1;
+ };
+
+ // CHECK: define i8* @_ZN6PR51793barENS_2B2E(i32* %b2.coerce)
+ const void *bar(B2 b2) {
+ return b2.b1.pa;
+ }
+}