1 //===--- CodeGenTypes.cpp - Type translation for LLVM CodeGen -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This is the code that handles AST -> LLVM type lowering.
12 //===----------------------------------------------------------------------===//
14 #include "CodeGenTypes.h"
15 #include "clang/Basic/TargetInfo.h"
16 #include "clang/AST/AST.h"
17 #include "llvm/DerivedTypes.h"
18 #include "llvm/Module.h"
19 #include "llvm/Target/TargetData.h"
21 using namespace clang;
22 using namespace CodeGen;
25 /// RecordOrganizer - This helper class, used by CGRecordLayout, layouts
26 /// structs and unions. It manages transient information used during layout.
27 /// FIXME : Handle field aligments. Handle packed structs.
28 class RecordOrganizer {
30 explicit RecordOrganizer(CodeGenTypes &Types) :
31 CGT(Types), STy(NULL), llvmFieldNo(0), Cursor(0),
34 /// addField - Add new field.
35 void addField(const FieldDecl *FD);
37 /// addLLVMField - Add llvm struct field that corresponds to llvm type Ty.
38 /// Increment field count.
39 void addLLVMField(const llvm::Type *Ty, bool isPaddingField = false);
41 /// addPaddingFields - Current cursor is not suitable place to add next
42 /// field. Add required padding fields.
43 void addPaddingFields(unsigned WaterMark);
45 /// layoutStructFields - Do the actual work and lay out all fields. Create
46 /// corresponding llvm struct type. This should be invoked only after
47 /// all fields are added.
48 void layoutStructFields(const ASTRecordLayout &RL);
50 /// layoutUnionFields - Do the actual work and lay out all fields. Create
51 /// corresponding llvm struct type. This should be invoked only after
52 /// all fields are added.
53 void layoutUnionFields();
55 /// getLLVMType - Return associated llvm struct type. This may be NULL
56 /// if fields are not laid out.
57 llvm::Type *getLLVMType() const {
61 /// placeBitField - Find a place for FD, which is a bit-field.
62 void placeBitField(const FieldDecl *FD);
64 llvm::SmallSet<unsigned, 8> &getPaddingFields() {
74 llvm::SmallVector<const FieldDecl *, 8> FieldDecls;
75 std::vector<const llvm::Type*> LLVMFields;
76 llvm::SmallVector<uint64_t, 8> Offsets;
77 llvm::SmallSet<unsigned, 8> PaddingFields;
81 CodeGenTypes::CodeGenTypes(ASTContext &Ctx, llvm::Module& M,
82 const llvm::TargetData &TD)
83 : Context(Ctx), Target(Ctx.Target), TheModule(M), TheTargetData(TD) {
86 CodeGenTypes::~CodeGenTypes() {
87 for(llvm::DenseMap<const TagDecl *, CGRecordLayout *>::iterator
88 I = CGRecordLayouts.begin(), E = CGRecordLayouts.end();
91 CGRecordLayouts.clear();
94 /// ConvertType - Convert the specified type to its LLVM form.
95 const llvm::Type *CodeGenTypes::ConvertType(QualType T) {
96 // See if type is already cached.
97 llvm::DenseMap<Type *, llvm::PATypeHolder>::iterator
98 I = TypeHolderMap.find(T.getTypePtr());
99 // If type is found in map and this is not a definition for a opaque
100 // place holder type then use it. Otherwise convert type T.
101 if (I != TypeHolderMap.end())
102 return I->second.get();
104 const llvm::Type *ResultType = ConvertNewType(T);
105 TypeHolderMap.insert(std::make_pair(T.getTypePtr(),
106 llvm::PATypeHolder(ResultType)));
110 /// ConvertTypeForMem - Convert type T into a llvm::Type. Maintain and use
111 /// type cache through TypeHolderMap. This differs from ConvertType in that
112 /// it is used to convert to the memory representation for a type. For
113 /// example, the scalar representation for _Bool is i1, but the memory
114 /// representation is usually i8 or i32, depending on the target.
115 const llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T) {
116 const llvm::Type *R = ConvertType(T);
118 // If this is a non-bool type, don't map it.
119 if (R != llvm::Type::Int1Ty)
122 // Otherwise, return an integer of the target-specified size.
123 unsigned BoolWidth = (unsigned)Context.getTypeSize(T, SourceLocation());
124 return llvm::IntegerType::get(BoolWidth);
128 /// UpdateCompletedType - When we find the full definition for a TagDecl,
129 /// replace the 'opaque' type we previously made for it if applicable.
130 void CodeGenTypes::UpdateCompletedType(const TagDecl *TD) {
131 llvm::DenseMap<const TagDecl*, llvm::PATypeHolder>::iterator TDTI =
132 TagDeclTypes.find(TD);
133 if (TDTI == TagDeclTypes.end()) return;
135 // Remember the opaque LLVM type for this tagdecl.
136 llvm::PATypeHolder OpaqueHolder = TDTI->second;
137 assert(isa<llvm::OpaqueType>(OpaqueHolder.get()) &&
138 "Updating compilation of an already non-opaque type?");
140 // Remove it from TagDeclTypes so that it will be regenerated.
141 TagDeclTypes.erase(TDTI);
143 QualType NewTy = Context.getTagDeclType(const_cast<TagDecl*>(TD));
144 const llvm::Type *NT = ConvertNewType(NewTy);
146 // If getting the type didn't itself refine it, refine it to its actual type
148 if (llvm::OpaqueType *OT = dyn_cast<llvm::OpaqueType>(OpaqueHolder.get()))
149 OT->refineAbstractTypeTo(NT);
154 const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) {
155 const clang::Type &Ty = *T.getCanonicalType();
157 switch (Ty.getTypeClass()) {
158 case Type::TypeName: // typedef isn't canonical.
159 case Type::TypeOfExp: // typeof isn't canonical.
160 case Type::TypeOfTyp: // typeof isn't canonical.
161 assert(0 && "Non-canonical type, shouldn't happen");
162 case Type::Builtin: {
163 switch (cast<BuiltinType>(Ty).getKind()) {
164 case BuiltinType::Void:
165 // LLVM void type can only be used as the result of a function call. Just
166 // map to the same as char.
167 return llvm::IntegerType::get(8);
169 case BuiltinType::Bool:
170 // Note that we always return bool as i1 for use as a scalar type.
171 return llvm::Type::Int1Ty;
173 case BuiltinType::Char_S:
174 case BuiltinType::Char_U:
175 case BuiltinType::SChar:
176 case BuiltinType::UChar:
177 case BuiltinType::Short:
178 case BuiltinType::UShort:
179 case BuiltinType::Int:
180 case BuiltinType::UInt:
181 case BuiltinType::Long:
182 case BuiltinType::ULong:
183 case BuiltinType::LongLong:
184 case BuiltinType::ULongLong:
185 return llvm::IntegerType::get(
186 static_cast<unsigned>(Context.getTypeSize(T, SourceLocation())));
188 case BuiltinType::Float: return llvm::Type::FloatTy;
189 case BuiltinType::Double: return llvm::Type::DoubleTy;
190 case BuiltinType::LongDouble:
191 // FIXME: mapping long double onto double.
192 return llvm::Type::DoubleTy;
196 case Type::Complex: {
197 std::vector<const llvm::Type*> Elts;
198 Elts.push_back(ConvertType(cast<ComplexType>(Ty).getElementType()));
199 Elts.push_back(Elts[0]);
200 return llvm::StructType::get(Elts);
202 case Type::Pointer: {
203 const PointerType &P = cast<PointerType>(Ty);
204 QualType ETy = P.getPointeeType();
205 return llvm::PointerType::get(ConvertType(ETy), ETy.getAddressSpace());
207 case Type::Reference: {
208 const ReferenceType &R = cast<ReferenceType>(Ty);
209 return llvm::PointerType::getUnqual(ConvertType(R.getReferenceeType()));
212 case Type::VariableArray: {
213 const VariableArrayType &A = cast<VariableArrayType>(Ty);
214 assert(A.getSizeModifier() == ArrayType::Normal &&
215 A.getIndexTypeQualifier() == 0 &&
216 "FIXME: We only handle trivial array types so far!");
217 if (A.getSizeExpr() == 0) {
218 // int X[] -> [0 x int]
219 return llvm::ArrayType::get(ConvertType(A.getElementType()), 0);
221 assert(0 && "FIXME: VLAs not implemented yet!");
224 case Type::ConstantArray: {
225 const ConstantArrayType &A = cast<ConstantArrayType>(Ty);
226 const llvm::Type *EltTy = ConvertType(A.getElementType());
227 return llvm::ArrayType::get(EltTy, A.getSize().getZExtValue());
229 case Type::OCUVector:
231 const VectorType &VT = cast<VectorType>(Ty);
232 return llvm::VectorType::get(ConvertType(VT.getElementType()),
233 VT.getNumElements());
235 case Type::FunctionNoProto:
236 case Type::FunctionProto: {
237 const FunctionType &FP = cast<FunctionType>(Ty);
238 const llvm::Type *ResultType;
240 if (FP.getResultType()->isVoidType())
241 ResultType = llvm::Type::VoidTy; // Result of function uses llvm void.
243 ResultType = ConvertType(FP.getResultType());
245 // FIXME: Convert argument types.
247 std::vector<const llvm::Type*> ArgTys;
249 // Struct return passes the struct byref.
250 if (!ResultType->isFirstClassType() && ResultType != llvm::Type::VoidTy) {
251 const llvm::Type *RType = llvm::PointerType::get(ResultType,
252 FP.getResultType().getAddressSpace());
253 QualType RTy = Context.getPointerType(FP.getResultType());
254 TypeHolderMap.insert(std::make_pair(RTy.getTypePtr(),
255 llvm::PATypeHolder(RType)));
257 ArgTys.push_back(RType);
258 ResultType = llvm::Type::VoidTy;
261 if (const FunctionTypeProto *FTP = dyn_cast<FunctionTypeProto>(&FP)) {
262 DecodeArgumentTypes(*FTP, ArgTys);
263 isVarArg = FTP->isVariadic();
268 return llvm::FunctionType::get(ResultType, ArgTys, isVarArg);
272 return ConvertType(cast<ASQualType>(Ty).getBaseType());
274 case Type::ObjCInterface:
275 assert(0 && "FIXME: add missing functionality here");
278 case Type::ObjCQualifiedInterface:
279 assert(0 && "FIXME: add missing functionality here");
282 case Type::ObjCQualifiedId:
283 assert(0 && "FIXME: add missing functionality here");
287 return ConvertTagDeclType(T, cast<TagType>(Ty).getDecl());
291 return llvm::OpaqueType::get();
294 void CodeGenTypes::DecodeArgumentTypes(const FunctionTypeProto &FTP,
295 std::vector<const llvm::Type*> &ArgTys) {
296 for (unsigned i = 0, e = FTP.getNumArgs(); i != e; ++i) {
297 const llvm::Type *Ty = ConvertType(FTP.getArgType(i));
298 if (Ty->isFirstClassType())
299 ArgTys.push_back(Ty);
301 QualType ATy = FTP.getArgType(i);
302 QualType PTy = Context.getPointerType(ATy);
303 unsigned AS = ATy.getAddressSpace();
304 const llvm::Type *PtrTy = llvm::PointerType::get(Ty, AS);
305 TypeHolderMap.insert(std::make_pair(PTy.getTypePtr(),
306 llvm::PATypeHolder(PtrTy)));
308 ArgTys.push_back(PtrTy);
313 /// ConvertTagDeclType - Lay out a tagged decl type like struct or union or
315 const llvm::Type *CodeGenTypes::ConvertTagDeclType(QualType T,
317 llvm::DenseMap<const TagDecl*, llvm::PATypeHolder>::iterator TDTI =
318 TagDeclTypes.find(TD);
320 // If corresponding llvm type is not a opaque struct type
322 if (TDTI != TagDeclTypes.end() && // Don't have a type?
323 // Have a type, but it was opaque before and now we have a definition.
324 (!isa<llvm::OpaqueType>(TDTI->second.get()) || !TD->isDefinition()))
327 llvm::Type *ResultType = 0;
329 if (!TD->isDefinition()) {
330 ResultType = llvm::OpaqueType::get();
331 TagDeclTypes.insert(std::make_pair(TD, ResultType));
332 } else if (TD->getKind() == Decl::Enum) {
333 // Don't bother storing enums in TagDeclTypes.
334 return ConvertType(cast<EnumDecl>(TD)->getIntegerType());
335 } else if (TD->getKind() == Decl::Struct) {
336 const RecordDecl *RD = cast<const RecordDecl>(TD);
338 // If this is nested record and this RecordDecl is already under
339 // process then return associated OpaqueType for now.
340 if (TDTI == TagDeclTypes.end()) {
341 // Create new OpaqueType now for later use in case this is a recursive
342 // type. This will later be refined to the actual type.
343 ResultType = llvm::OpaqueType::get();
344 TagDeclTypes.insert(std::make_pair(TD, ResultType));
345 TypeHolderMap.insert(std::make_pair(T.getTypePtr(), ResultType));
349 RecordOrganizer RO(*this);
350 for (unsigned i = 0, e = RD->getNumMembers(); i != e; ++i)
351 RO.addField(RD->getMember(i));
352 const ASTRecordLayout &RL = Context.getASTRecordLayout(RD,
354 RO.layoutStructFields(RL);
356 // Get llvm::StructType.
357 CGRecordLayout *RLI = new CGRecordLayout(RO.getLLVMType(),
358 RO.getPaddingFields());
359 ResultType = RLI->getLLVMType();
360 TagDeclTypes.insert(std::make_pair(TD, ResultType));
361 CGRecordLayouts[TD] = RLI;
363 // Refining away Opaque could cause ResultType to become invalidated.
364 // Keep it in a happy little type holder to handle this.
365 llvm::PATypeHolder Holder(ResultType);
367 // Refine the OpaqueType associated with this RecordDecl.
368 cast<llvm::OpaqueType>(TagDeclTypes.find(TD)->second.get())
369 ->refineAbstractTypeTo(ResultType);
371 ResultType = Holder.get();
372 } else if (TD->getKind() == Decl::Union) {
373 const RecordDecl *RD = cast<const RecordDecl>(TD);
374 // Just use the largest element of the union, breaking ties with the
375 // highest aligned member.
377 if (RD->getNumMembers() != 0) {
378 RecordOrganizer RO(*this);
379 for (unsigned i = 0, e = RD->getNumMembers(); i != e; ++i)
380 RO.addField(RD->getMember(i));
381 RO.layoutUnionFields();
383 // Get llvm::StructType.
384 CGRecordLayout *RLI = new CGRecordLayout(RO.getLLVMType(),
385 RO.getPaddingFields());
386 ResultType = RLI->getLLVMType();
387 TagDeclTypes.insert(std::make_pair(TD, ResultType));
388 CGRecordLayouts[TD] = RLI;
390 std::vector<const llvm::Type*> Fields;
391 ResultType = llvm::StructType::get(Fields);
392 TagDeclTypes.insert(std::make_pair(TD, ResultType));
395 assert(0 && "FIXME: Implement tag decl kind!");
398 std::string TypeName(TD->getKindName());
401 // Name the codegen type after the typedef name
402 // if there is no tag type name available
403 if (TD->getIdentifier() == 0) {
404 if (T->getTypeClass() == Type::TypeName) {
405 const TypedefType *TdT = cast<TypedefType>(T);
406 TypeName += TdT->getDecl()->getName();
410 TypeName += TD->getName();
413 TheModule.addTypeName(TypeName, ResultType);
417 /// getLLVMFieldNo - Return llvm::StructType element number
418 /// that corresponds to the field FD.
419 unsigned CodeGenTypes::getLLVMFieldNo(const FieldDecl *FD) {
420 llvm::DenseMap<const FieldDecl *, unsigned>::iterator
421 I = FieldInfo.find(FD);
422 assert (I != FieldInfo.end() && "Unable to find field info");
426 /// addFieldInfo - Assign field number to field FD.
427 void CodeGenTypes::addFieldInfo(const FieldDecl *FD, unsigned No) {
431 /// getBitFieldInfo - Return the BitFieldInfo that corresponds to the field FD.
432 CodeGenTypes::BitFieldInfo CodeGenTypes::getBitFieldInfo(const FieldDecl *FD) {
433 llvm::DenseMap<const FieldDecl *, BitFieldInfo>::iterator
434 I = BitFields.find(FD);
435 assert (I != BitFields.end() && "Unable to find bitfield info");
439 /// addBitFieldInfo - Assign a start bit and a size to field FD.
440 void CodeGenTypes::addBitFieldInfo(const FieldDecl *FD, unsigned Begin,
442 BitFields.insert(std::make_pair(FD, BitFieldInfo(Begin, Size)));
445 /// getCGRecordLayout - Return record layout info for the given llvm::Type.
446 const CGRecordLayout *
447 CodeGenTypes::getCGRecordLayout(const TagDecl *TD) const {
448 llvm::DenseMap<const TagDecl*, CGRecordLayout *>::iterator I
449 = CGRecordLayouts.find(TD);
450 assert (I != CGRecordLayouts.end()
451 && "Unable to find record layout information for type");
455 /// addField - Add new field.
456 void RecordOrganizer::addField(const FieldDecl *FD) {
457 assert (!STy && "Record fields are already laid out");
458 FieldDecls.push_back(FD);
461 /// layoutStructFields - Do the actual work and lay out all fields. Create
462 /// corresponding llvm struct type. This should be invoked only after
463 /// all fields are added.
464 /// FIXME : At the moment assume
465 /// - one to one mapping between AST FieldDecls and
466 /// llvm::StructType elements.
467 /// - Ignore bit fields
468 /// - Ignore field aligments
469 /// - Ignore packed structs
470 void RecordOrganizer::layoutStructFields(const ASTRecordLayout &RL) {
471 // FIXME : Use SmallVector
478 for (llvm::SmallVector<const FieldDecl *, 8>::iterator I = FieldDecls.begin(),
479 E = FieldDecls.end(); I != E; ++I) {
480 const FieldDecl *FD = *I;
482 if (FD->isBitField())
485 const llvm::Type *Ty = CGT.ConvertType(FD->getType());
487 CGT.addFieldInfo(FD, llvmFieldNo - 1);
492 unsigned StructAlign = RL.getAlignment();
493 if (llvmSize % StructAlign) {
494 unsigned StructPadding = StructAlign - (llvmSize % StructAlign);
495 addPaddingFields(llvmSize + StructPadding);
498 STy = llvm::StructType::get(LLVMFields);
501 /// addPaddingFields - Current cursor is not suitable place to add next field.
502 /// Add required padding fields.
503 void RecordOrganizer::addPaddingFields(unsigned WaterMark) {
504 unsigned RequiredBits = WaterMark - llvmSize;
505 unsigned RequiredBytes = (RequiredBits + 7) / 8;
506 for (unsigned i = 0; i != RequiredBytes; ++i)
507 addLLVMField(llvm::Type::Int8Ty, true);
510 /// addLLVMField - Add llvm struct field that corresponds to llvm type Ty.
511 /// Increment field count.
512 void RecordOrganizer::addLLVMField(const llvm::Type *Ty, bool isPaddingField) {
514 unsigned AlignmentInBits = CGT.getTargetData().getABITypeAlignment(Ty) * 8;
515 if (llvmSize % AlignmentInBits) {
516 // At the moment, insert padding fields even if target specific llvm
517 // type alignment enforces implict padding fields for FD. Later on,
518 // optimize llvm fields by removing implicit padding fields and
519 // combining consequetive padding fields.
520 unsigned Padding = AlignmentInBits - (llvmSize % AlignmentInBits);
521 addPaddingFields(llvmSize + Padding);
524 unsigned TySize = CGT.getTargetData().getABITypeSizeInBits(Ty);
525 Offsets.push_back(llvmSize);
528 PaddingFields.insert(llvmFieldNo);
529 LLVMFields.push_back(Ty);
533 /// layoutUnionFields - Do the actual work and lay out all fields. Create
534 /// corresponding llvm struct type. This should be invoked only after
535 /// all fields are added.
536 void RecordOrganizer::layoutUnionFields() {
538 unsigned PrimaryEltNo = 0;
539 std::pair<uint64_t, unsigned> PrimaryElt =
540 CGT.getContext().getTypeInfo(FieldDecls[0]->getType(), SourceLocation());
541 CGT.addFieldInfo(FieldDecls[0], 0);
543 unsigned Size = FieldDecls.size();
544 for(unsigned i = 1; i != Size; ++i) {
545 const FieldDecl *FD = FieldDecls[i];
546 assert (!FD->isBitField() && "Bit fields are not yet supported");
547 std::pair<uint64_t, unsigned> EltInfo =
548 CGT.getContext().getTypeInfo(FD->getType(), SourceLocation());
550 // Use largest element, breaking ties with the hightest aligned member.
551 if (EltInfo.first > PrimaryElt.first ||
552 (EltInfo.first == PrimaryElt.first &&
553 EltInfo.second > PrimaryElt.second)) {
554 PrimaryElt = EltInfo;
558 // In union, each field gets first slot.
559 CGT.addFieldInfo(FD, 0);
562 std::vector<const llvm::Type*> Fields;
563 const llvm::Type *Ty = CGT.ConvertType(FieldDecls[PrimaryEltNo]->getType());
564 Fields.push_back(Ty);
565 STy = llvm::StructType::get(Fields);
568 /// placeBitField - Find a place for FD, which is a bit-field.
569 /// This function searches for the last aligned field. If the bit-field fits in
570 /// it, it is reused. Otherwise, the bit-field is placed in a new field.
571 void RecordOrganizer::placeBitField(const FieldDecl *FD) {
573 assert (FD->isBitField() && "FD is not a bit-field");
574 Expr *BitWidth = FD->getBitWidth();
575 llvm::APSInt FieldSize(32);
577 BitWidth->isIntegerConstantExpr(FieldSize, CGT.getContext());
578 assert (isBitField && "Invalid BitField size expression");
579 uint64_t BitFieldSize = FieldSize.getZExtValue();
581 bool FoundPrevField = false;
582 unsigned TotalOffsets = Offsets.size();
583 const llvm::Type *Ty = CGT.ConvertType(FD->getType());
584 uint64_t TySize = CGT.getTargetData().getABITypeSizeInBits(Ty);
587 // Special case: the first field.
588 CGT.addFieldInfo(FD, llvmFieldNo);
589 CGT.addBitFieldInfo(FD, 0, BitFieldSize);
590 addPaddingFields(BitFieldSize);
591 Cursor = BitFieldSize;
595 // Search for the last aligned field.
596 for (unsigned i = TotalOffsets; i != 0; --i) {
597 uint64_t O = Offsets[i - 1];
598 if (O % TySize == 0) {
599 FoundPrevField = true;
600 if (TySize - (Cursor - O) >= BitFieldSize) {
601 // The bitfield fits in the last aligned field.
602 // This is : struct { char a; int CurrentField:10;};
603 // where 'CurrentField' shares first field with 'a'.
604 addPaddingFields(Cursor + BitFieldSize);
605 CGT.addFieldInfo(FD, i - 1);
606 CGT.addBitFieldInfo(FD, Cursor - O, BitFieldSize);
607 Cursor += BitFieldSize;
609 // Place the bitfield in a new LLVM field.
610 // This is : struct { char a; short CurrentField:10;};
611 // where 'CurrentField' needs a new llvm field.
612 addPaddingFields(O + TySize);
613 CGT.addFieldInfo(FD, llvmFieldNo);
614 CGT.addBitFieldInfo(FD, 0, BitFieldSize);
615 addPaddingFields(O + TySize + BitFieldSize);
616 Cursor = O + TySize + BitFieldSize;
622 assert(FoundPrevField &&
623 "Unable to find a place for bitfield in struct layout");