1 //===--- CodeGenTypes.cpp - Type translation for LLVM CodeGen -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This is the code that handles AST -> LLVM type lowering.
12 //===----------------------------------------------------------------------===//
14 #include "CodeGenTypes.h"
17 #include "CGRecordLayout.h"
18 #include "clang/AST/ASTContext.h"
19 #include "clang/AST/DeclObjC.h"
20 #include "clang/AST/DeclCXX.h"
21 #include "clang/AST/Expr.h"
22 #include "clang/AST/RecordLayout.h"
23 #include "llvm/DerivedTypes.h"
24 #include "llvm/Module.h"
25 #include "llvm/Target/TargetData.h"
26 using namespace clang;
27 using namespace CodeGen;
29 CodeGenTypes::CodeGenTypes(ASTContext &Ctx, llvm::Module& M,
30 const llvm::TargetData &TD, const ABIInfo &Info,
32 : Context(Ctx), Target(Ctx.Target), TheModule(M), TheTargetData(TD),
33 TheABIInfo(Info), TheCXXABI(CXXABI) {
36 CodeGenTypes::~CodeGenTypes() {
37 for (llvm::DenseMap<const Type *, CGRecordLayout *>::iterator
38 I = CGRecordLayouts.begin(), E = CGRecordLayouts.end();
42 for (llvm::FoldingSet<CGFunctionInfo>::iterator
43 I = FunctionInfos.begin(), E = FunctionInfos.end(); I != E; )
47 /// HandleLateResolvedPointers - For top-level ConvertType calls, this handles
48 /// pointers that are referenced but have not been converted yet. This is used
49 /// to handle cyclic structures properly.
50 void CodeGenTypes::HandleLateResolvedPointers() {
51 assert(!PointersToResolve.empty() && "No pointers to resolve!");
53 // Any pointers that were converted deferred evaluation of their pointee type,
54 // creating an opaque type instead. This is in order to avoid problems with
55 // circular types. Loop through all these defered pointees, if any, and
57 while (!PointersToResolve.empty()) {
58 std::pair<QualType, llvm::OpaqueType*> P = PointersToResolve.pop_back_val();
60 // We can handle bare pointers here because we know that the only pointers
61 // to the Opaque type are P.second and from other types. Refining the
62 // opqaue type away will invalidate P.second, but we don't mind :).
63 const llvm::Type *NT = ConvertTypeForMemRecursive(P.first);
64 P.second->refineAbstractTypeTo(NT);
68 void CodeGenTypes::addRecordTypeName(const RecordDecl *RD, const llvm::Type *Ty,
69 llvm::StringRef suffix) {
70 llvm::SmallString<256> TypeName;
71 llvm::raw_svector_ostream OS(TypeName);
72 OS << RD->getKindName() << '.';
74 // Name the codegen type after the typedef name
75 // if there is no tag type name available
76 if (RD->getIdentifier()) {
77 // FIXME: We should not have to check for a null decl context here.
78 // Right now we do it because the implicit Obj-C decls don't have one.
79 if (RD->getDeclContext())
80 OS << RD->getQualifiedNameAsString();
83 } else if (const TypedefNameDecl *TDD = RD->getTypedefNameForAnonDecl()) {
84 // FIXME: We should not have to check for a null decl context here.
85 // Right now we do it because the implicit Obj-C decls don't have one.
86 if (TDD->getDeclContext())
87 OS << TDD->getQualifiedNameAsString();
96 TheModule.addTypeName(OS.str(), Ty);
99 /// ConvertType - Convert the specified type to its LLVM form.
100 const llvm::Type *CodeGenTypes::ConvertType(QualType T, bool IsRecursive) {
101 const llvm::Type *Result = ConvertTypeRecursive(T);
103 // If this is a top-level call to ConvertType and sub-conversions caused
104 // pointers to get lazily built as opaque types, resolve the pointers, which
105 // might cause Result to be merged away.
106 if (!IsRecursive && !PointersToResolve.empty()) {
107 llvm::PATypeHolder ResultHandle = Result;
108 HandleLateResolvedPointers();
109 Result = ResultHandle;
114 const llvm::Type *CodeGenTypes::ConvertTypeRecursive(QualType T) {
115 T = Context.getCanonicalType(T);
117 // See if type is already cached.
118 llvm::DenseMap<const Type *, llvm::PATypeHolder>::iterator
119 I = TypeCache.find(T.getTypePtr());
120 // If type is found in map and this is not a definition for a opaque
121 // place holder type then use it. Otherwise, convert type T.
122 if (I != TypeCache.end())
123 return I->second.get();
125 const llvm::Type *ResultType = ConvertNewType(T);
126 TypeCache.insert(std::make_pair(T.getTypePtr(),
127 llvm::PATypeHolder(ResultType)));
131 /// ConvertTypeForMem - Convert type T into a llvm::Type. This differs from
132 /// ConvertType in that it is used to convert to the memory representation for
133 /// a type. For example, the scalar representation for _Bool is i1, but the
134 /// memory representation is usually i8 or i32, depending on the target.
135 const llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T, bool IsRecursive){
136 const llvm::Type *R = ConvertType(T, IsRecursive);
138 // If this is a non-bool type, don't map it.
139 if (!R->isIntegerTy(1))
142 // Otherwise, return an integer of the target-specified size.
143 return llvm::IntegerType::get(getLLVMContext(),
144 (unsigned)Context.getTypeSize(T));
148 // Code to verify a given function type is complete, i.e. the return type
149 // and all of the argument types are complete.
150 const TagType *CodeGenTypes::VerifyFuncTypeComplete(const Type* T) {
151 const FunctionType *FT = cast<FunctionType>(T);
152 if (const TagType* TT = FT->getResultType()->getAs<TagType>())
153 if (!TT->getDecl()->isDefinition())
155 if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(T))
156 for (unsigned i = 0; i < FPT->getNumArgs(); i++)
157 if (const TagType* TT = FPT->getArgType(i)->getAs<TagType>())
158 if (!TT->getDecl()->isDefinition())
163 /// UpdateCompletedType - When we find the full definition for a TagDecl,
164 /// replace the 'opaque' type we previously made for it if applicable.
165 void CodeGenTypes::UpdateCompletedType(const TagDecl *TD) {
166 const Type *Key = Context.getTagDeclType(TD).getTypePtr();
167 llvm::DenseMap<const Type*, llvm::PATypeHolder>::iterator TDTI =
168 TagDeclTypes.find(Key);
169 if (TDTI == TagDeclTypes.end()) return;
171 // Remember the opaque LLVM type for this tagdecl.
172 llvm::PATypeHolder OpaqueHolder = TDTI->second;
173 assert(isa<llvm::OpaqueType>(OpaqueHolder.get()) &&
174 "Updating compilation of an already non-opaque type?");
176 // Remove it from TagDeclTypes so that it will be regenerated.
177 TagDeclTypes.erase(TDTI);
179 // Generate the new type.
180 const llvm::Type *NT = ConvertTagDeclType(TD);
182 // Refine the old opaque type to its new definition.
183 cast<llvm::OpaqueType>(OpaqueHolder.get())->refineAbstractTypeTo(NT);
185 // Since we just completed a tag type, check to see if any function types
186 // were completed along with the tag type.
187 // FIXME: This is very inefficient; if we track which function types depend
188 // on which tag types, though, it should be reasonably efficient.
189 llvm::DenseMap<const Type*, llvm::PATypeHolder>::iterator i;
190 for (i = FunctionTypes.begin(); i != FunctionTypes.end(); ++i) {
191 if (const TagType* TT = VerifyFuncTypeComplete(i->first)) {
192 // This function type still depends on an incomplete tag type; make sure
193 // that tag type has an associated opaque type.
194 ConvertTagDeclType(TT->getDecl());
196 // This function no longer depends on an incomplete tag type; create the
197 // function type, and refine the opaque type to the new function type.
198 llvm::PATypeHolder OpaqueHolder = i->second;
199 const llvm::Type *NFT = ConvertNewType(QualType(i->first, 0));
200 cast<llvm::OpaqueType>(OpaqueHolder.get())->refineAbstractTypeTo(NFT);
201 FunctionTypes.erase(i);
206 static const llvm::Type* getTypeForFormat(llvm::LLVMContext &VMContext,
207 const llvm::fltSemantics &format) {
208 if (&format == &llvm::APFloat::IEEEsingle)
209 return llvm::Type::getFloatTy(VMContext);
210 if (&format == &llvm::APFloat::IEEEdouble)
211 return llvm::Type::getDoubleTy(VMContext);
212 if (&format == &llvm::APFloat::IEEEquad)
213 return llvm::Type::getFP128Ty(VMContext);
214 if (&format == &llvm::APFloat::PPCDoubleDouble)
215 return llvm::Type::getPPC_FP128Ty(VMContext);
216 if (&format == &llvm::APFloat::x87DoubleExtended)
217 return llvm::Type::getX86_FP80Ty(VMContext);
218 assert(0 && "Unknown float format!");
222 const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) {
223 const clang::Type &Ty = *Context.getCanonicalType(T).getTypePtr();
225 switch (Ty.getTypeClass()) {
226 #define TYPE(Class, Base)
227 #define ABSTRACT_TYPE(Class, Base)
228 #define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
229 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
230 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
231 #include "clang/AST/TypeNodes.def"
232 llvm_unreachable("Non-canonical or dependent types aren't possible.");
235 case Type::Builtin: {
236 switch (cast<BuiltinType>(Ty).getKind()) {
237 case BuiltinType::Void:
238 case BuiltinType::ObjCId:
239 case BuiltinType::ObjCClass:
240 case BuiltinType::ObjCSel:
241 // LLVM void type can only be used as the result of a function call. Just
242 // map to the same as char.
243 return llvm::Type::getInt8Ty(getLLVMContext());
245 case BuiltinType::Bool:
246 // Note that we always return bool as i1 for use as a scalar type.
247 return llvm::Type::getInt1Ty(getLLVMContext());
249 case BuiltinType::Char_S:
250 case BuiltinType::Char_U:
251 case BuiltinType::SChar:
252 case BuiltinType::UChar:
253 case BuiltinType::Short:
254 case BuiltinType::UShort:
255 case BuiltinType::Int:
256 case BuiltinType::UInt:
257 case BuiltinType::Long:
258 case BuiltinType::ULong:
259 case BuiltinType::LongLong:
260 case BuiltinType::ULongLong:
261 case BuiltinType::WChar_S:
262 case BuiltinType::WChar_U:
263 case BuiltinType::Char16:
264 case BuiltinType::Char32:
265 return llvm::IntegerType::get(getLLVMContext(),
266 static_cast<unsigned>(Context.getTypeSize(T)));
268 case BuiltinType::Float:
269 case BuiltinType::Double:
270 case BuiltinType::LongDouble:
271 return getTypeForFormat(getLLVMContext(),
272 Context.getFloatTypeSemantics(T));
274 case BuiltinType::NullPtr: {
275 // Model std::nullptr_t as i8*
276 const llvm::Type *Ty = llvm::Type::getInt8Ty(getLLVMContext());
277 return llvm::PointerType::getUnqual(Ty);
280 case BuiltinType::UInt128:
281 case BuiltinType::Int128:
282 return llvm::IntegerType::get(getLLVMContext(), 128);
284 case BuiltinType::Overload:
285 case BuiltinType::Dependent:
286 case BuiltinType::BoundMember:
287 case BuiltinType::UnknownAny:
288 llvm_unreachable("Unexpected placeholder builtin type!");
291 llvm_unreachable("Unknown builtin type!");
294 case Type::Complex: {
295 const llvm::Type *EltTy =
296 ConvertTypeRecursive(cast<ComplexType>(Ty).getElementType());
297 return llvm::StructType::get(TheModule.getContext(), EltTy, EltTy, NULL);
299 case Type::LValueReference:
300 case Type::RValueReference: {
301 const ReferenceType &RTy = cast<ReferenceType>(Ty);
302 QualType ETy = RTy.getPointeeType();
303 llvm::OpaqueType *PointeeType = llvm::OpaqueType::get(getLLVMContext());
304 PointersToResolve.push_back(std::make_pair(ETy, PointeeType));
305 unsigned AS = Context.getTargetAddressSpace(ETy);
306 return llvm::PointerType::get(PointeeType, AS);
308 case Type::Pointer: {
309 const PointerType &PTy = cast<PointerType>(Ty);
310 QualType ETy = PTy.getPointeeType();
311 llvm::OpaqueType *PointeeType = llvm::OpaqueType::get(getLLVMContext());
312 PointersToResolve.push_back(std::make_pair(ETy, PointeeType));
313 unsigned AS = Context.getTargetAddressSpace(ETy);
314 return llvm::PointerType::get(PointeeType, AS);
317 case Type::VariableArray: {
318 const VariableArrayType &A = cast<VariableArrayType>(Ty);
319 assert(A.getIndexTypeCVRQualifiers() == 0 &&
320 "FIXME: We only handle trivial array types so far!");
321 // VLAs resolve to the innermost element type; this matches
322 // the return of alloca, and there isn't any obviously better choice.
323 return ConvertTypeForMemRecursive(A.getElementType());
325 case Type::IncompleteArray: {
326 const IncompleteArrayType &A = cast<IncompleteArrayType>(Ty);
327 assert(A.getIndexTypeCVRQualifiers() == 0 &&
328 "FIXME: We only handle trivial array types so far!");
329 // int X[] -> [0 x int]
330 return llvm::ArrayType::get(ConvertTypeForMemRecursive(A.getElementType()),
333 case Type::ConstantArray: {
334 const ConstantArrayType &A = cast<ConstantArrayType>(Ty);
335 const llvm::Type *EltTy = ConvertTypeForMemRecursive(A.getElementType());
336 return llvm::ArrayType::get(EltTy, A.getSize().getZExtValue());
338 case Type::ExtVector:
340 const VectorType &VT = cast<VectorType>(Ty);
341 return llvm::VectorType::get(ConvertTypeRecursive(VT.getElementType()),
342 VT.getNumElements());
344 case Type::FunctionNoProto:
345 case Type::FunctionProto: {
346 // First, check whether we can build the full function type. If the
347 // function type depends on an incomplete type (e.g. a struct or enum), we
348 // cannot lower the function type. Instead, turn it into an Opaque pointer
349 // and have UpdateCompletedType revisit the function type when/if the opaque
350 // argument type is defined.
351 if (const TagType *TT = VerifyFuncTypeComplete(&Ty)) {
352 // This function's type depends on an incomplete tag type; make sure
353 // we have an opaque type corresponding to the tag type.
354 ConvertTagDeclType(TT->getDecl());
355 // Create an opaque type for this function type, save it, and return it.
356 llvm::Type *ResultType = llvm::OpaqueType::get(getLLVMContext());
357 FunctionTypes.insert(std::make_pair(&Ty, ResultType));
361 // The function type can be built; call the appropriate routines to
363 const CGFunctionInfo *FI;
365 if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(&Ty)) {
366 FI = &getFunctionInfo(
367 CanQual<FunctionProtoType>::CreateUnsafe(QualType(FPT, 0)),
369 isVariadic = FPT->isVariadic();
371 const FunctionNoProtoType *FNPT = cast<FunctionNoProtoType>(&Ty);
372 FI = &getFunctionInfo(
373 CanQual<FunctionNoProtoType>::CreateUnsafe(QualType(FNPT, 0)),
378 return GetFunctionType(*FI, isVariadic, true);
381 case Type::ObjCObject:
382 return ConvertTypeRecursive(cast<ObjCObjectType>(Ty).getBaseType());
384 case Type::ObjCInterface: {
385 // Objective-C interfaces are always opaque (outside of the
386 // runtime, which can do whatever it likes); we never refine
388 const llvm::Type *&T = InterfaceTypes[cast<ObjCInterfaceType>(&Ty)];
390 T = llvm::OpaqueType::get(getLLVMContext());
394 case Type::ObjCObjectPointer: {
395 // Protocol qualifications do not influence the LLVM type, we just return a
396 // pointer to the underlying interface type. We don't need to worry about
397 // recursive conversion.
398 const llvm::Type *T =
399 ConvertTypeRecursive(cast<ObjCObjectPointerType>(Ty).getPointeeType());
400 return llvm::PointerType::getUnqual(T);
405 const TagDecl *TD = cast<TagType>(Ty).getDecl();
406 const llvm::Type *Res = ConvertTagDeclType(TD);
408 if (const RecordDecl *RD = dyn_cast<RecordDecl>(TD))
409 addRecordTypeName(RD, Res, llvm::StringRef());
413 case Type::BlockPointer: {
414 const QualType FTy = cast<BlockPointerType>(Ty).getPointeeType();
415 llvm::OpaqueType *PointeeType = llvm::OpaqueType::get(getLLVMContext());
416 PointersToResolve.push_back(std::make_pair(FTy, PointeeType));
417 unsigned AS = Context.getTargetAddressSpace(FTy);
418 return llvm::PointerType::get(PointeeType, AS);
421 case Type::MemberPointer: {
422 return getCXXABI().ConvertMemberPointerType(cast<MemberPointerType>(&Ty));
427 return llvm::OpaqueType::get(getLLVMContext());
430 /// ConvertTagDeclType - Lay out a tagged decl type like struct or union or
432 const llvm::Type *CodeGenTypes::ConvertTagDeclType(const TagDecl *TD) {
433 // TagDecl's are not necessarily unique, instead use the (clang)
434 // type connected to the decl.
436 Context.getTagDeclType(TD).getTypePtr();
437 llvm::DenseMap<const Type*, llvm::PATypeHolder>::iterator TDTI =
438 TagDeclTypes.find(Key);
440 // If we've already compiled this tag type, use the previous definition.
441 if (TDTI != TagDeclTypes.end())
444 const EnumDecl *ED = dyn_cast<EnumDecl>(TD);
446 // If this is still a forward declaration, just define an opaque
447 // type to use for this tagged decl.
448 // C++0x: If this is a enumeration type with fixed underlying type,
449 // consider it complete.
450 if (!TD->isDefinition() && !(ED && ED->isFixed())) {
451 llvm::Type *ResultType = llvm::OpaqueType::get(getLLVMContext());
452 TagDeclTypes.insert(std::make_pair(Key, ResultType));
456 // Okay, this is a definition of a type. Compile the implementation now.
458 if (ED) // Don't bother storing enums in TagDeclTypes.
459 return ConvertTypeRecursive(ED->getIntegerType());
461 // This decl could well be recursive. In this case, insert an opaque
462 // definition of this type, which the recursive uses will get. We will then
463 // refine this opaque version later.
465 // Create new OpaqueType now for later use in case this is a recursive
466 // type. This will later be refined to the actual type.
467 llvm::PATypeHolder ResultHolder = llvm::OpaqueType::get(getLLVMContext());
468 TagDeclTypes.insert(std::make_pair(Key, ResultHolder));
470 const RecordDecl *RD = cast<const RecordDecl>(TD);
472 // Force conversion of non-virtual base classes recursively.
473 if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(TD)) {
474 for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(),
475 e = RD->bases_end(); i != e; ++i) {
476 if (!i->isVirtual()) {
477 const CXXRecordDecl *Base =
478 cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
479 ConvertTagDeclType(Base);
485 CGRecordLayout *Layout = ComputeRecordLayout(RD);
487 CGRecordLayouts[Key] = Layout;
488 const llvm::Type *ResultType = Layout->getLLVMType();
490 // Refine our Opaque type to ResultType. This can invalidate ResultType, so
491 // make sure to read the result out of the holder.
492 cast<llvm::OpaqueType>(ResultHolder.get())
493 ->refineAbstractTypeTo(ResultType);
495 return ResultHolder.get();
498 /// getCGRecordLayout - Return record layout info for the given record decl.
499 const CGRecordLayout &
500 CodeGenTypes::getCGRecordLayout(const RecordDecl *RD) {
501 const Type *Key = Context.getTagDeclType(RD).getTypePtr();
503 const CGRecordLayout *Layout = CGRecordLayouts.lookup(Key);
505 // Compute the type information.
506 ConvertTagDeclType(RD);
509 Layout = CGRecordLayouts.lookup(Key);
512 assert(Layout && "Unable to find record layout information for type");
516 void CodeGenTypes::addBaseSubobjectTypeName(const CXXRecordDecl *RD,
517 const CGRecordLayout &layout) {
518 llvm::StringRef suffix;
519 if (layout.getBaseSubobjectLLVMType() != layout.getLLVMType())
522 addRecordTypeName(RD, layout.getBaseSubobjectLLVMType(), suffix);
525 bool CodeGenTypes::isZeroInitializable(QualType T) {
526 // No need to check for member pointers when not compiling C++.
527 if (!Context.getLangOptions().CPlusPlus)
530 T = Context.getBaseElementType(T);
532 // Records are non-zero-initializable if they contain any
533 // non-zero-initializable subobjects.
534 if (const RecordType *RT = T->getAs<RecordType>()) {
535 const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
536 return isZeroInitializable(RD);
539 // We have to ask the ABI about member pointers.
540 if (const MemberPointerType *MPT = T->getAs<MemberPointerType>())
541 return getCXXABI().isZeroInitializable(MPT);
543 // Everything else is okay.
547 bool CodeGenTypes::isZeroInitializable(const CXXRecordDecl *RD) {
548 return getCGRecordLayout(RD).isZeroInitializable();