1 //===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // These classes wrap the information about a call or function
11 // definition used to handle ABI compliancy.
13 //===----------------------------------------------------------------------===//
15 #include "TargetInfo.h"
17 #include "CodeGenFunction.h"
18 #include "clang/AST/RecordLayout.h"
19 #include "llvm/Type.h"
20 #include "llvm/Target/TargetData.h"
21 #include "llvm/ADT/StringExtras.h"
22 #include "llvm/ADT/Triple.h"
23 #include "llvm/Support/raw_ostream.h"
24 using namespace clang;
25 using namespace CodeGen;
27 static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder,
32 // Alternatively, we could emit this as a loop in the source.
33 for (unsigned I = FirstIndex; I <= LastIndex; ++I) {
34 llvm::Value *Cell = Builder.CreateConstInBoundsGEP1_32(Array, I);
35 Builder.CreateStore(Value, Cell);
39 ABIInfo::~ABIInfo() {}
41 void ABIArgInfo::dump() const {
42 llvm::raw_ostream &OS = llvm::errs();
43 OS << "(ABIArgInfo Kind=";
56 getCoerceToType()->print(OS);
59 OS << "Indirect Align=" << getIndirectAlign()
60 << " Byal=" << getIndirectByVal();
69 TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; }
71 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays);
73 /// isEmptyField - Return true iff a the field is "empty", that is it
74 /// is an unnamed bit-field or an (array of) empty record(s).
75 static bool isEmptyField(ASTContext &Context, const FieldDecl *FD,
77 if (FD->isUnnamedBitfield())
80 QualType FT = FD->getType();
82 // Constant arrays of empty records count as empty, strip them off.
84 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT))
85 FT = AT->getElementType();
87 const RecordType *RT = FT->getAs<RecordType>();
91 // C++ record fields are never empty, at least in the Itanium ABI.
93 // FIXME: We should use a predicate for whether this behavior is true in the
95 if (isa<CXXRecordDecl>(RT->getDecl()))
98 return isEmptyRecord(Context, FT, AllowArrays);
101 /// isEmptyRecord - Return true iff a structure contains only empty
102 /// fields. Note that a structure with a flexible array member is not
103 /// considered empty.
104 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) {
105 const RecordType *RT = T->getAs<RecordType>();
108 const RecordDecl *RD = RT->getDecl();
109 if (RD->hasFlexibleArrayMember())
112 // If this is a C++ record, check the bases first.
113 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
114 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
115 e = CXXRD->bases_end(); i != e; ++i)
116 if (!isEmptyRecord(Context, i->getType(), true))
119 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
121 if (!isEmptyField(Context, *i, AllowArrays))
126 /// hasNonTrivialDestructorOrCopyConstructor - Determine if a type has either
127 /// a non-trivial destructor or a non-trivial copy constructor.
128 static bool hasNonTrivialDestructorOrCopyConstructor(const RecordType *RT) {
129 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
133 return !RD->hasTrivialDestructor() || !RD->hasTrivialCopyConstructor();
136 /// isRecordWithNonTrivialDestructorOrCopyConstructor - Determine if a type is
137 /// a record type with either a non-trivial destructor or a non-trivial copy
139 static bool isRecordWithNonTrivialDestructorOrCopyConstructor(QualType T) {
140 const RecordType *RT = T->getAs<RecordType>();
144 return hasNonTrivialDestructorOrCopyConstructor(RT);
147 /// isSingleElementStruct - Determine if a structure is a "single
148 /// element struct", i.e. it has exactly one non-empty field or
149 /// exactly one field which is itself a single element
150 /// struct. Structures with flexible array members are never
151 /// considered single element structs.
153 /// \return The field declaration for the single non-empty field, if
155 static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
156 const RecordType *RT = T->getAsStructureType();
160 const RecordDecl *RD = RT->getDecl();
161 if (RD->hasFlexibleArrayMember())
164 const Type *Found = 0;
166 // If this is a C++ record, check the bases first.
167 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
168 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
169 e = CXXRD->bases_end(); i != e; ++i) {
170 // Ignore empty records.
171 if (isEmptyRecord(Context, i->getType(), true))
174 // If we already found an element then this isn't a single-element struct.
178 // If this is non-empty and not a single element struct, the composite
179 // cannot be a single element struct.
180 Found = isSingleElementStruct(i->getType(), Context);
186 // Check for single element.
187 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
189 const FieldDecl *FD = *i;
190 QualType FT = FD->getType();
192 // Ignore empty fields.
193 if (isEmptyField(Context, FD, true))
196 // If we already found an element then this isn't a single-element
201 // Treat single element arrays as the element.
202 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
203 if (AT->getSize().getZExtValue() != 1)
205 FT = AT->getElementType();
208 if (!CodeGenFunction::hasAggregateLLVMType(FT)) {
209 Found = FT.getTypePtr();
211 Found = isSingleElementStruct(FT, Context);
220 static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
221 if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() &&
222 !Ty->isAnyComplexType() && !Ty->isEnumeralType() &&
223 !Ty->isBlockPointerType())
226 uint64_t Size = Context.getTypeSize(Ty);
227 return Size == 32 || Size == 64;
230 /// canExpandIndirectArgument - Test whether an argument type which is to be
231 /// passed indirectly (on the stack) would have the equivalent layout if it was
232 /// expanded into separate arguments. If so, we prefer to do the latter to avoid
233 /// inhibiting optimizations.
235 // FIXME: This predicate is missing many cases, currently it just follows
236 // llvm-gcc (checks that all fields are 32-bit or 64-bit primitive types). We
237 // should probably make this smarter, or better yet make the LLVM backend
238 // capable of handling it.
239 static bool canExpandIndirectArgument(QualType Ty, ASTContext &Context) {
240 // We can only expand structure types.
241 const RecordType *RT = Ty->getAs<RecordType>();
245 // We can only expand (C) structures.
247 // FIXME: This needs to be generalized to handle classes as well.
248 const RecordDecl *RD = RT->getDecl();
249 if (!RD->isStruct() || isa<CXXRecordDecl>(RD))
252 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
254 const FieldDecl *FD = *i;
256 if (!is32Or64BitBasicType(FD->getType(), Context))
259 // FIXME: Reject bit-fields wholesale; there are two problems, we don't know
260 // how to expand them yet, and the predicate for telling if a bitfield still
261 // counts as "basic" is more complicated than what we were doing previously.
262 if (FD->isBitField())
270 /// DefaultABIInfo - The default implementation for ABI specific
271 /// details. This implementation provides information which results in
272 /// self-consistent and sensible LLVM IR generation, but does not
273 /// conform to any particular ABI.
274 class DefaultABIInfo : public ABIInfo {
275 ABIArgInfo classifyReturnType(QualType RetTy,
277 llvm::LLVMContext &VMContext) const;
279 ABIArgInfo classifyArgumentType(QualType RetTy,
281 llvm::LLVMContext &VMContext) const;
283 virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
284 llvm::LLVMContext &VMContext,
285 const llvm::Type *const *PrefTypes,
286 unsigned NumPrefTypes) const {
287 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
289 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
291 it->info = classifyArgumentType(it->type, Context, VMContext);
294 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
295 CodeGenFunction &CGF) const;
298 class DefaultTargetCodeGenInfo : public TargetCodeGenInfo {
300 DefaultTargetCodeGenInfo():TargetCodeGenInfo(new DefaultABIInfo()) {}
303 llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
304 CodeGenFunction &CGF) const {
308 ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty,
310 llvm::LLVMContext &VMContext) const {
311 if (CodeGenFunction::hasAggregateLLVMType(Ty))
312 return ABIArgInfo::getIndirect(0);
314 // Treat an enum type as its underlying type.
315 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
316 Ty = EnumTy->getDecl()->getIntegerType();
318 return (Ty->isPromotableIntegerType() ?
319 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
322 //===----------------------------------------------------------------------===//
323 // X86-32 ABI Implementation
324 //===----------------------------------------------------------------------===//
326 /// X86_32ABIInfo - The X86-32 ABI information.
327 class X86_32ABIInfo : public ABIInfo {
329 bool IsDarwinVectorABI;
330 bool IsSmallStructInRegABI;
332 static bool isRegisterSize(unsigned Size) {
333 return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
336 static bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context);
338 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
339 /// such that the argument will be passed in memory.
340 ABIArgInfo getIndirectResult(QualType Ty, ASTContext &Context,
341 bool ByVal = true) const;
344 ABIArgInfo classifyReturnType(QualType RetTy,
346 llvm::LLVMContext &VMContext) const;
348 ABIArgInfo classifyArgumentType(QualType RetTy,
350 llvm::LLVMContext &VMContext) const;
352 virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
353 llvm::LLVMContext &VMContext,
354 const llvm::Type *const *PrefTypes,
355 unsigned NumPrefTypes) const {
356 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
358 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
360 it->info = classifyArgumentType(it->type, Context, VMContext);
363 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
364 CodeGenFunction &CGF) const;
366 X86_32ABIInfo(ASTContext &Context, bool d, bool p)
367 : ABIInfo(), Context(Context), IsDarwinVectorABI(d),
368 IsSmallStructInRegABI(p) {}
371 class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
373 X86_32TargetCodeGenInfo(ASTContext &Context, bool d, bool p)
374 :TargetCodeGenInfo(new X86_32ABIInfo(Context, d, p)) {}
376 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
377 CodeGen::CodeGenModule &CGM) const;
379 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
380 // Darwin uses different dwarf register numbers for EH.
381 if (CGM.isTargetDarwin()) return 5;
386 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
387 llvm::Value *Address) const;
392 /// shouldReturnTypeInRegister - Determine if the given type should be
393 /// passed in a register (for the Darwin ABI).
394 bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
395 ASTContext &Context) {
396 uint64_t Size = Context.getTypeSize(Ty);
398 // Type must be register sized.
399 if (!isRegisterSize(Size))
402 if (Ty->isVectorType()) {
403 // 64- and 128- bit vectors inside structures are not returned in
405 if (Size == 64 || Size == 128)
411 // If this is a builtin, pointer, enum, complex type, member pointer, or
412 // member function pointer it is ok.
413 if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() ||
414 Ty->isAnyComplexType() || Ty->isEnumeralType() ||
415 Ty->isBlockPointerType() || Ty->isMemberPointerType())
418 // Arrays are treated like records.
419 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
420 return shouldReturnTypeInRegister(AT->getElementType(), Context);
422 // Otherwise, it must be a record type.
423 const RecordType *RT = Ty->getAs<RecordType>();
424 if (!RT) return false;
426 // FIXME: Traverse bases here too.
428 // Structure types are passed in register if all fields would be
429 // passed in a register.
430 for (RecordDecl::field_iterator i = RT->getDecl()->field_begin(),
431 e = RT->getDecl()->field_end(); i != e; ++i) {
432 const FieldDecl *FD = *i;
434 // Empty fields are ignored.
435 if (isEmptyField(Context, FD, true))
438 // Check fields recursively.
439 if (!shouldReturnTypeInRegister(FD->getType(), Context))
446 ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
448 llvm::LLVMContext &VMContext) const {
449 if (RetTy->isVoidType()) {
450 return ABIArgInfo::getIgnore();
451 } else if (const VectorType *VT = RetTy->getAs<VectorType>()) {
452 // On Darwin, some vectors are returned in registers.
453 if (IsDarwinVectorABI) {
454 uint64_t Size = Context.getTypeSize(RetTy);
456 // 128-bit vectors are a special case; they are returned in
457 // registers and we need to make sure to pick a type the LLVM
458 // backend will like.
460 return ABIArgInfo::getCoerce(llvm::VectorType::get(
461 llvm::Type::getInt64Ty(VMContext), 2));
463 // Always return in register if it fits in a general purpose
464 // register, or if it is 64 bits and has a single element.
465 if ((Size == 8 || Size == 16 || Size == 32) ||
466 (Size == 64 && VT->getNumElements() == 1))
467 return ABIArgInfo::getCoerce(llvm::IntegerType::get(VMContext, Size));
469 return ABIArgInfo::getIndirect(0);
472 return ABIArgInfo::getDirect();
473 } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
474 if (const RecordType *RT = RetTy->getAs<RecordType>()) {
475 // Structures with either a non-trivial destructor or a non-trivial
476 // copy constructor are always indirect.
477 if (hasNonTrivialDestructorOrCopyConstructor(RT))
478 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
480 // Structures with flexible arrays are always indirect.
481 if (RT->getDecl()->hasFlexibleArrayMember())
482 return ABIArgInfo::getIndirect(0);
485 // If specified, structs and unions are always indirect.
486 if (!IsSmallStructInRegABI && !RetTy->isAnyComplexType())
487 return ABIArgInfo::getIndirect(0);
489 // Classify "single element" structs as their element type.
490 if (const Type *SeltTy = isSingleElementStruct(RetTy, Context)) {
491 if (const BuiltinType *BT = SeltTy->getAs<BuiltinType>()) {
492 if (BT->isIntegerType()) {
493 // We need to use the size of the structure, padding
494 // bit-fields can adjust that to be larger than the single
496 uint64_t Size = Context.getTypeSize(RetTy);
497 return ABIArgInfo::getCoerce(
498 llvm::IntegerType::get(VMContext, (unsigned) Size));
499 } else if (BT->getKind() == BuiltinType::Float) {
500 assert(Context.getTypeSize(RetTy) == Context.getTypeSize(SeltTy) &&
501 "Unexpect single element structure size!");
502 return ABIArgInfo::getCoerce(llvm::Type::getFloatTy(VMContext));
503 } else if (BT->getKind() == BuiltinType::Double) {
504 assert(Context.getTypeSize(RetTy) == Context.getTypeSize(SeltTy) &&
505 "Unexpect single element structure size!");
506 return ABIArgInfo::getCoerce(llvm::Type::getDoubleTy(VMContext));
508 } else if (SeltTy->isPointerType()) {
509 // FIXME: It would be really nice if this could come out as the proper
511 const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(VMContext);
512 return ABIArgInfo::getCoerce(PtrTy);
513 } else if (SeltTy->isVectorType()) {
514 // 64- and 128-bit vectors are never returned in a
515 // register when inside a structure.
516 uint64_t Size = Context.getTypeSize(RetTy);
517 if (Size == 64 || Size == 128)
518 return ABIArgInfo::getIndirect(0);
520 return classifyReturnType(QualType(SeltTy, 0), Context, VMContext);
524 // Small structures which are register sized are generally returned
526 if (X86_32ABIInfo::shouldReturnTypeInRegister(RetTy, Context)) {
527 uint64_t Size = Context.getTypeSize(RetTy);
528 return ABIArgInfo::getCoerce(llvm::IntegerType::get(VMContext, Size));
531 return ABIArgInfo::getIndirect(0);
533 // Treat an enum type as its underlying type.
534 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
535 RetTy = EnumTy->getDecl()->getIntegerType();
537 return (RetTy->isPromotableIntegerType() ?
538 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
542 ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty,
546 return ABIArgInfo::getIndirect(0, false);
548 // Compute the byval alignment. We trust the back-end to honor the
549 // minimum ABI alignment for byval, to make cleaner IR.
550 const unsigned MinABIAlign = 4;
551 unsigned Align = Context.getTypeAlign(Ty) / 8;
552 if (Align > MinABIAlign)
553 return ABIArgInfo::getIndirect(Align);
554 return ABIArgInfo::getIndirect(0);
557 ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
559 llvm::LLVMContext &VMContext) const {
560 // FIXME: Set alignment on indirect arguments.
561 if (CodeGenFunction::hasAggregateLLVMType(Ty)) {
562 // Structures with flexible arrays are always indirect.
563 if (const RecordType *RT = Ty->getAs<RecordType>()) {
564 // Structures with either a non-trivial destructor or a non-trivial
565 // copy constructor are always indirect.
566 if (hasNonTrivialDestructorOrCopyConstructor(RT))
567 return getIndirectResult(Ty, Context, /*ByVal=*/false);
569 if (RT->getDecl()->hasFlexibleArrayMember())
570 return getIndirectResult(Ty, Context);
573 // Ignore empty structs.
574 if (Ty->isStructureType() && Context.getTypeSize(Ty) == 0)
575 return ABIArgInfo::getIgnore();
577 // Expand small (<= 128-bit) record types when we know that the stack layout
578 // of those arguments will match the struct. This is important because the
579 // LLVM backend isn't smart enough to remove byval, which inhibits many
581 if (Context.getTypeSize(Ty) <= 4*32 &&
582 canExpandIndirectArgument(Ty, Context))
583 return ABIArgInfo::getExpand();
585 return getIndirectResult(Ty, Context);
587 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
588 Ty = EnumTy->getDecl()->getIntegerType();
590 return (Ty->isPromotableIntegerType() ?
591 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
595 llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
596 CodeGenFunction &CGF) const {
597 const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
598 const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
600 CGBuilderTy &Builder = CGF.Builder;
601 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
603 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
605 llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
606 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
609 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
610 llvm::Value *NextAddr =
611 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
613 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
618 void X86_32TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
619 llvm::GlobalValue *GV,
620 CodeGen::CodeGenModule &CGM) const {
621 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
622 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
623 // Get the LLVM function.
624 llvm::Function *Fn = cast<llvm::Function>(GV);
626 // Now add the 'alignstack' attribute with a value of 16.
627 Fn->addFnAttr(llvm::Attribute::constructStackAlignmentFromInt(16));
632 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
633 CodeGen::CodeGenFunction &CGF,
634 llvm::Value *Address) const {
635 CodeGen::CGBuilderTy &Builder = CGF.Builder;
636 llvm::LLVMContext &Context = CGF.getLLVMContext();
638 const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
639 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
641 // 0-7 are the eight integer registers; the order is different
642 // on Darwin (for EH), but the range is the same.
644 AssignToArrayRange(Builder, Address, Four8, 0, 8);
646 if (CGF.CGM.isTargetDarwin()) {
647 // 12-16 are st(0..4). Not sure why we stop at 4.
648 // These have size 16, which is sizeof(long double) on
649 // platforms with 8-byte alignment for that type.
650 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
651 AssignToArrayRange(Builder, Address, Sixteen8, 12, 16);
654 // 9 is %eflags, which doesn't get a size on Darwin for some
656 Builder.CreateStore(Four8, Builder.CreateConstInBoundsGEP1_32(Address, 9));
658 // 11-16 are st(0..5). Not sure why we stop at 5.
659 // These have size 12, which is sizeof(long double) on
660 // platforms with 4-byte alignment for that type.
661 llvm::Value *Twelve8 = llvm::ConstantInt::get(i8, 12);
662 AssignToArrayRange(Builder, Address, Twelve8, 11, 16);
668 //===----------------------------------------------------------------------===//
669 // X86-64 ABI Implementation
670 //===----------------------------------------------------------------------===//
674 /// X86_64ABIInfo - The X86_64 ABI information.
675 class X86_64ABIInfo : public ABIInfo {
677 const llvm::TargetData &TD;
690 /// merge - Implement the X86_64 ABI merging algorithm.
692 /// Merge an accumulating classification \arg Accum with a field
693 /// classification \arg Field.
695 /// \param Accum - The accumulating classification. This should
696 /// always be either NoClass or the result of a previous merge
697 /// call. In addition, this should never be Memory (the caller
698 /// should just return Memory for the aggregate).
699 static Class merge(Class Accum, Class Field);
701 /// classify - Determine the x86_64 register classes in which the
702 /// given type T should be passed.
704 /// \param Lo - The classification for the parts of the type
705 /// residing in the low word of the containing object.
707 /// \param Hi - The classification for the parts of the type
708 /// residing in the high word of the containing object.
710 /// \param OffsetBase - The bit offset of this type in the
711 /// containing object. Some parameters are classified different
712 /// depending on whether they straddle an eightbyte boundary.
714 /// If a word is unused its result will be NoClass; if a type should
715 /// be passed in Memory then at least the classification of \arg Lo
718 /// The \arg Lo class will be NoClass iff the argument is ignored.
720 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
721 /// also be ComplexX87.
722 void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi) const;
724 /// getCoerceResult - Given a source type \arg Ty and an LLVM type
725 /// to coerce to, chose the best way to pass Ty in the same place
726 /// that \arg CoerceTo would be passed, but while keeping the
727 /// emitted code as simple as possible.
729 /// FIXME: Note, this should be cleaned up to just take an enumeration of all
730 /// the ways we might want to pass things, instead of constructing an LLVM
731 /// type. This makes this code more explicit, and it makes it clearer that we
732 /// are also doing this for correctness in the case of passing scalar types.
733 ABIArgInfo getCoerceResult(QualType Ty,
734 const llvm::Type *CoerceTo) const;
736 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
737 /// such that the argument will be returned in memory.
738 ABIArgInfo getIndirectReturnResult(QualType Ty) const;
740 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
741 /// such that the argument will be passed in memory.
742 ABIArgInfo getIndirectResult(QualType Ty) const;
744 ABIArgInfo classifyReturnType(QualType RetTy,
745 llvm::LLVMContext &VMContext) const;
747 ABIArgInfo classifyArgumentType(QualType Ty,
748 llvm::LLVMContext &VMContext,
751 const llvm::Type *PrefType) const;
754 X86_64ABIInfo(ASTContext &Ctx, const llvm::TargetData &td)
755 : Context(Ctx), TD(td) {}
757 virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
758 llvm::LLVMContext &VMContext,
759 const llvm::Type *const *PrefTypes,
760 unsigned NumPrefTypes) const;
762 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
763 CodeGenFunction &CGF) const;
766 class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
768 X86_64TargetCodeGenInfo(ASTContext &Ctx, const llvm::TargetData &TD)
769 : TargetCodeGenInfo(new X86_64ABIInfo(Ctx, TD)) {}
771 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
775 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
776 llvm::Value *Address) const {
777 CodeGen::CGBuilderTy &Builder = CGF.Builder;
778 llvm::LLVMContext &Context = CGF.getLLVMContext();
780 const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
781 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
783 // 0-15 are the 16 integer registers.
785 AssignToArrayRange(Builder, Address, Eight8, 0, 16);
793 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
794 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
795 // classified recursively so that always two fields are
796 // considered. The resulting class is calculated according to
797 // the classes of the fields in the eightbyte:
799 // (a) If both classes are equal, this is the resulting class.
801 // (b) If one of the classes is NO_CLASS, the resulting class is
804 // (c) If one of the classes is MEMORY, the result is the MEMORY
807 // (d) If one of the classes is INTEGER, the result is the
810 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
811 // MEMORY is used as class.
813 // (f) Otherwise class SSE is used.
815 // Accum should never be memory (we should have returned) or
816 // ComplexX87 (because this cannot be passed in a structure).
817 assert((Accum != Memory && Accum != ComplexX87) &&
818 "Invalid accumulated classification during merge.");
819 if (Accum == Field || Field == NoClass)
823 if (Accum == NoClass)
825 if (Accum == Integer || Field == Integer)
827 if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
828 Accum == X87 || Accum == X87Up)
833 void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
834 Class &Lo, Class &Hi) const {
835 // FIXME: This code can be simplified by introducing a simple value class for
836 // Class pairs with appropriate constructor methods for the various
839 // FIXME: Some of the split computations are wrong; unaligned vectors
840 // shouldn't be passed in registers for example, so there is no chance they
841 // can straddle an eightbyte. Verify & simplify.
845 Class &Current = OffsetBase < 64 ? Lo : Hi;
848 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
849 BuiltinType::Kind k = BT->getKind();
851 if (k == BuiltinType::Void) {
853 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
856 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
858 } else if (k == BuiltinType::Float || k == BuiltinType::Double) {
860 } else if (k == BuiltinType::LongDouble) {
864 // FIXME: _Decimal32 and _Decimal64 are SSE.
865 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
869 if (const EnumType *ET = Ty->getAs<EnumType>()) {
870 // Classify the underlying integer type.
871 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi);
875 if (Ty->hasPointerRepresentation()) {
880 if (Ty->isMemberPointerType()) {
881 if (Ty->isMemberFunctionPointerType())
888 if (const VectorType *VT = Ty->getAs<VectorType>()) {
889 uint64_t Size = Context.getTypeSize(VT);
891 // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x
892 // float> as integer.
895 // If this type crosses an eightbyte boundary, it should be
897 uint64_t EB_Real = (OffsetBase) / 64;
898 uint64_t EB_Imag = (OffsetBase + Size - 1) / 64;
899 if (EB_Real != EB_Imag)
901 } else if (Size == 64) {
902 // gcc passes <1 x double> in memory. :(
903 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double))
906 // gcc passes <1 x long long> as INTEGER.
907 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong))
912 // If this type crosses an eightbyte boundary, it should be
914 if (OffsetBase && OffsetBase != 64)
916 } else if (Size == 128) {
923 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
924 QualType ET = Context.getCanonicalType(CT->getElementType());
926 uint64_t Size = Context.getTypeSize(Ty);
927 if (ET->isIntegralOrEnumerationType()) {
930 else if (Size <= 128)
932 } else if (ET == Context.FloatTy)
934 else if (ET == Context.DoubleTy)
936 else if (ET == Context.LongDoubleTy)
937 Current = ComplexX87;
939 // If this complex type crosses an eightbyte boundary then it
941 uint64_t EB_Real = (OffsetBase) / 64;
942 uint64_t EB_Imag = (OffsetBase + Context.getTypeSize(ET)) / 64;
943 if (Hi == NoClass && EB_Real != EB_Imag)
949 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
950 // Arrays are treated like structures.
952 uint64_t Size = Context.getTypeSize(Ty);
954 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
955 // than two eightbytes, ..., it has class MEMORY.
959 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
960 // fields, it has class MEMORY.
962 // Only need to check alignment of array base.
963 if (OffsetBase % Context.getTypeAlign(AT->getElementType()))
966 // Otherwise implement simplified merge. We could be smarter about
967 // this, but it isn't worth it and would be harder to verify.
969 uint64_t EltSize = Context.getTypeSize(AT->getElementType());
970 uint64_t ArraySize = AT->getSize().getZExtValue();
971 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
972 Class FieldLo, FieldHi;
973 classify(AT->getElementType(), Offset, FieldLo, FieldHi);
974 Lo = merge(Lo, FieldLo);
975 Hi = merge(Hi, FieldHi);
976 if (Lo == Memory || Hi == Memory)
980 // Do post merger cleanup (see below). Only case we worry about is Memory.
983 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
987 if (const RecordType *RT = Ty->getAs<RecordType>()) {
988 uint64_t Size = Context.getTypeSize(Ty);
990 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
991 // than two eightbytes, ..., it has class MEMORY.
995 // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial
996 // copy constructor or a non-trivial destructor, it is passed by invisible
998 if (hasNonTrivialDestructorOrCopyConstructor(RT))
1001 const RecordDecl *RD = RT->getDecl();
1003 // Assume variable sized types are passed in memory.
1004 if (RD->hasFlexibleArrayMember())
1007 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
1009 // Reset Lo class, this will be recomputed.
1012 // If this is a C++ record, classify the bases first.
1013 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
1014 for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
1015 e = CXXRD->bases_end(); i != e; ++i) {
1016 assert(!i->isVirtual() && !i->getType()->isDependentType() &&
1017 "Unexpected base class!");
1018 const CXXRecordDecl *Base =
1019 cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
1021 // Classify this field.
1023 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a
1024 // single eightbyte, each is classified separately. Each eightbyte gets
1025 // initialized to class NO_CLASS.
1026 Class FieldLo, FieldHi;
1027 uint64_t Offset = OffsetBase + Layout.getBaseClassOffset(Base);
1028 classify(i->getType(), Offset, FieldLo, FieldHi);
1029 Lo = merge(Lo, FieldLo);
1030 Hi = merge(Hi, FieldHi);
1031 if (Lo == Memory || Hi == Memory)
1035 // If this record has no fields but isn't empty, classify as INTEGER.
1036 if (RD->field_empty() && Size)
1040 // Classify the fields one at a time, merging the results.
1042 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
1043 i != e; ++i, ++idx) {
1044 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
1045 bool BitField = i->isBitField();
1047 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
1048 // fields, it has class MEMORY.
1050 // Note, skip this test for bit-fields, see below.
1051 if (!BitField && Offset % Context.getTypeAlign(i->getType())) {
1056 // Classify this field.
1058 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
1059 // exceeds a single eightbyte, each is classified
1060 // separately. Each eightbyte gets initialized to class
1062 Class FieldLo, FieldHi;
1064 // Bit-fields require special handling, they do not force the
1065 // structure to be passed in memory even if unaligned, and
1066 // therefore they can straddle an eightbyte.
1068 // Ignore padding bit-fields.
1069 if (i->isUnnamedBitfield())
1072 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
1073 uint64_t Size = i->getBitWidth()->EvaluateAsInt(Context).getZExtValue();
1075 uint64_t EB_Lo = Offset / 64;
1076 uint64_t EB_Hi = (Offset + Size - 1) / 64;
1077 FieldLo = FieldHi = NoClass;
1079 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.");
1084 FieldHi = EB_Hi ? Integer : NoClass;
1087 classify(i->getType(), Offset, FieldLo, FieldHi);
1088 Lo = merge(Lo, FieldLo);
1089 Hi = merge(Hi, FieldHi);
1090 if (Lo == Memory || Hi == Memory)
1094 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
1096 // (a) If one of the classes is MEMORY, the whole argument is
1097 // passed in memory.
1099 // (b) If SSEUP is not preceeded by SSE, it is converted to SSE.
1101 // The first of these conditions is guaranteed by how we implement
1102 // the merge (just bail).
1104 // The second condition occurs in the case of unions; for example
1105 // union { _Complex double; unsigned; }.
1108 if (Hi == SSEUp && Lo != SSE)
1113 ABIArgInfo X86_64ABIInfo::getCoerceResult(QualType Ty,
1114 const llvm::Type *CoerceTo) const {
1115 if (CoerceTo->isIntegerTy(64) || isa<llvm::PointerType>(CoerceTo)) {
1116 // Integer and pointer types will end up in a general purpose
1119 // Treat an enum type as its underlying type.
1120 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
1121 Ty = EnumTy->getDecl()->getIntegerType();
1123 if (Ty->isIntegralOrEnumerationType() || Ty->hasPointerRepresentation())
1124 return (Ty->isPromotableIntegerType() ?
1125 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
1127 // If this is a 8/16/32-bit structure that is passed as an int64, then it
1128 // will be passed in the low 8/16/32-bits of a 64-bit GPR, which is the same
1129 // as how an i8/i16/i32 is passed. Coerce to a i8/i16/i32 instead of a i64.
1130 switch (Context.getTypeSizeInChars(Ty).getQuantity()) {
1132 case 1: CoerceTo = llvm::Type::getInt8Ty(CoerceTo->getContext()); break;
1133 case 2: CoerceTo = llvm::Type::getInt16Ty(CoerceTo->getContext()); break;
1134 case 4: CoerceTo = llvm::Type::getInt32Ty(CoerceTo->getContext()); break;
1137 } else if (CoerceTo->isDoubleTy()) {
1138 assert(Ty.isCanonical() && "should always have a canonical type here");
1139 assert(!Ty.hasQualifiers() && "should never have a qualified type here");
1141 // Float and double end up in a single SSE reg.
1142 if (Ty == Context.FloatTy || Ty == Context.DoubleTy)
1143 return ABIArgInfo::getDirect();
1145 // If this is a 32-bit structure that is passed as a double, then it will be
1146 // passed in the low 32-bits of the XMM register, which is the same as how a
1147 // float is passed. Coerce to a float instead of a double.
1148 if (Context.getTypeSizeInChars(Ty).getQuantity() == 4)
1149 CoerceTo = llvm::Type::getFloatTy(CoerceTo->getContext());
1152 return ABIArgInfo::getCoerce(CoerceTo);
1155 ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
1156 // If this is a scalar LLVM value then assume LLVM will pass it in the right
1158 if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
1159 // Treat an enum type as its underlying type.
1160 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
1161 Ty = EnumTy->getDecl()->getIntegerType();
1163 return (Ty->isPromotableIntegerType() ?
1164 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
1167 return ABIArgInfo::getIndirect(0);
1170 ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty) const {
1171 // If this is a scalar LLVM value then assume LLVM will pass it in the right
1173 if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
1174 // Treat an enum type as its underlying type.
1175 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
1176 Ty = EnumTy->getDecl()->getIntegerType();
1178 return (Ty->isPromotableIntegerType() ?
1179 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
1182 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty))
1183 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
1185 // Compute the byval alignment. We trust the back-end to honor the
1186 // minimum ABI alignment for byval, to make cleaner IR.
1187 const unsigned MinABIAlign = 8;
1188 unsigned Align = Context.getTypeAlign(Ty) / 8;
1189 if (Align > MinABIAlign)
1190 return ABIArgInfo::getIndirect(Align);
1191 return ABIArgInfo::getIndirect(0);
1194 ABIArgInfo X86_64ABIInfo::
1195 classifyReturnType(QualType RetTy, llvm::LLVMContext &VMContext) const {
1196 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
1197 // classification algorithm.
1198 X86_64ABIInfo::Class Lo, Hi;
1199 classify(RetTy, 0, Lo, Hi);
1201 // Check some invariants.
1202 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
1203 assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification.");
1204 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
1206 const llvm::Type *ResType = 0;
1209 return ABIArgInfo::getIgnore();
1213 assert(0 && "Invalid classification for lo word.");
1215 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
1218 return getIndirectReturnResult(RetTy);
1220 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
1221 // available register of the sequence %rax, %rdx is used.
1223 ResType = llvm::Type::getInt64Ty(VMContext); break;
1225 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
1226 // available SSE register of the sequence %xmm0, %xmm1 is used.
1228 ResType = llvm::Type::getDoubleTy(VMContext); break;
1230 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
1231 // returned on the X87 stack in %st0 as 80-bit x87 number.
1233 ResType = llvm::Type::getX86_FP80Ty(VMContext); break;
1235 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
1236 // part of the value is returned in %st0 and the imaginary part in
1239 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
1240 ResType = llvm::StructType::get(VMContext,
1241 llvm::Type::getX86_FP80Ty(VMContext),
1242 llvm::Type::getX86_FP80Ty(VMContext),
1248 // Memory was handled previously and X87 should
1249 // never occur as a hi class.
1252 assert(0 && "Invalid classification for hi word.");
1254 case ComplexX87: // Previously handled.
1255 case NoClass: break;
1258 ResType = llvm::StructType::get(VMContext, ResType,
1259 llvm::Type::getInt64Ty(VMContext), NULL);
1262 ResType = llvm::StructType::get(VMContext, ResType,
1263 llvm::Type::getDoubleTy(VMContext), NULL);
1266 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
1267 // is passed in the upper half of the last used SSE register.
1269 // SSEUP should always be preceeded by SSE, just widen.
1271 assert(Lo == SSE && "Unexpected SSEUp classification.");
1272 ResType = llvm::VectorType::get(llvm::Type::getDoubleTy(VMContext), 2);
1275 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
1276 // returned together with the previous X87 value in %st0.
1278 // If X87Up is preceeded by X87, we don't need to do
1279 // anything. However, in some cases with unions it may not be
1280 // preceeded by X87. In such situations we follow gcc and pass the
1281 // extra bits in an SSE reg.
1283 ResType = llvm::StructType::get(VMContext, ResType,
1284 llvm::Type::getDoubleTy(VMContext), NULL);
1288 return getCoerceResult(RetTy, ResType);
1291 static const llvm::Type *Get8ByteTypeAtOffset(const llvm::Type *PrefType,
1293 const llvm::TargetData &TD) {
1294 if (PrefType == 0) return 0;
1296 // Pointers are always 8-bytes at offset 0.
1297 if (Offset == 0 && isa<llvm::PointerType>(PrefType))
1300 // TODO: 1/2/4/8 byte integers are also interesting, but we have to know that
1301 // the "hole" is not used in the containing struct (just undef padding).
1302 const llvm::StructType *STy = dyn_cast<llvm::StructType>(PrefType);
1303 if (STy == 0) return 0;
1305 // If this is a struct, recurse into the field at the specified offset.
1306 const llvm::StructLayout *SL = TD.getStructLayout(STy);
1307 if (Offset >= SL->getSizeInBytes()) return 0;
1309 unsigned FieldIdx = SL->getElementContainingOffset(Offset);
1310 Offset -= SL->getElementOffset(FieldIdx);
1312 return Get8ByteTypeAtOffset(STy->getElementType(FieldIdx), Offset, TD);
1315 ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty,
1316 llvm::LLVMContext &VMContext,
1317 unsigned &neededInt,
1318 unsigned &neededSSE,
1319 const llvm::Type *PrefType)const{
1320 X86_64ABIInfo::Class Lo, Hi;
1321 classify(Ty, 0, Lo, Hi);
1323 // Check some invariants.
1324 // FIXME: Enforce these by construction.
1325 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
1326 assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification.");
1327 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
1331 const llvm::Type *ResType = 0;
1334 return ABIArgInfo::getIgnore();
1336 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
1340 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
1341 // COMPLEX_X87, it is passed in memory.
1344 return getIndirectResult(Ty);
1348 assert(0 && "Invalid classification for lo word.");
1350 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
1351 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
1354 // It is always safe to classify this as an i64 argument.
1355 ResType = llvm::Type::getInt64Ty(VMContext);
1358 // If we can choose a better 8-byte type based on the preferred type, and if
1359 // that type is still passed in a GPR, use it.
1360 if (const llvm::Type *PrefTypeLo = Get8ByteTypeAtOffset(PrefType, 0, TD))
1361 if (isa<llvm::IntegerType>(PrefTypeLo) ||
1362 isa<llvm::PointerType>(PrefTypeLo))
1363 ResType = PrefTypeLo;
1366 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
1367 // available SSE register is used, the registers are taken in the
1368 // order from %xmm0 to %xmm7.
1371 ResType = llvm::Type::getDoubleTy(VMContext);
1376 // Memory was handled previously, ComplexX87 and X87 should
1377 // never occur as hi classes, and X87Up must be preceed by X87,
1378 // which is passed in memory.
1382 assert(0 && "Invalid classification for hi word.");
1385 case NoClass: break;
1388 // It is always safe to classify this as an i64 argument.
1389 const llvm::Type *HiType = llvm::Type::getInt64Ty(VMContext);
1392 // If we can choose a better 8-byte type based on the preferred type, and if
1393 // that type is still passed in a GPR, use it.
1394 if (const llvm::Type *PrefTypeHi = Get8ByteTypeAtOffset(PrefType, 8, TD))
1395 if (isa<llvm::IntegerType>(PrefTypeHi) ||
1396 isa<llvm::PointerType>(PrefTypeHi))
1397 HiType = PrefTypeHi;
1399 ResType = llvm::StructType::get(VMContext, ResType, HiType, NULL);
1403 // X87Up generally doesn't occur here (long double is passed in
1404 // memory), except in situations involving unions.
1407 ResType = llvm::StructType::get(VMContext, ResType,
1408 llvm::Type::getDoubleTy(VMContext), NULL);
1412 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
1413 // eightbyte is passed in the upper half of the last used SSE
1416 assert(Lo == SSE && "Unexpected SSEUp classification.");
1417 ResType = llvm::VectorType::get(llvm::Type::getDoubleTy(VMContext), 2);
1421 return getCoerceResult(Ty, ResType);
1424 void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context,
1425 llvm::LLVMContext &VMContext,
1426 const llvm::Type *const *PrefTypes,
1427 unsigned NumPrefTypes) const {
1428 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), VMContext);
1430 // Keep track of the number of assigned registers.
1431 unsigned freeIntRegs = 6, freeSSERegs = 8;
1433 // If the return value is indirect, then the hidden argument is consuming one
1434 // integer register.
1435 if (FI.getReturnInfo().isIndirect())
1438 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
1439 // get assigned (in left-to-right order) for passing as follows...
1440 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
1442 // If the client specified a preferred IR type to use, pass it down to
1443 // classifyArgumentType.
1444 const llvm::Type *PrefType = 0;
1446 PrefType = *PrefTypes++;
1450 unsigned neededInt, neededSSE;
1451 it->info = classifyArgumentType(it->type, VMContext,
1452 neededInt, neededSSE, PrefType);
1454 // AMD64-ABI 3.2.3p3: If there are no registers available for any
1455 // eightbyte of an argument, the whole argument is passed on the
1456 // stack. If registers have already been assigned for some
1457 // eightbytes of such an argument, the assignments get reverted.
1458 if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) {
1459 freeIntRegs -= neededInt;
1460 freeSSERegs -= neededSSE;
1462 it->info = getIndirectResult(it->type);
1467 static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr,
1469 CodeGenFunction &CGF) {
1470 llvm::Value *overflow_arg_area_p =
1471 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p");
1472 llvm::Value *overflow_arg_area =
1473 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
1475 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
1476 // byte boundary if alignment needed by type exceeds 8 byte boundary.
1477 uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
1479 // Note that we follow the ABI & gcc here, even though the type
1480 // could in theory have an alignment greater than 16. This case
1481 // shouldn't ever matter in practice.
1483 // overflow_arg_area = (overflow_arg_area + 15) & ~15;
1484 llvm::Value *Offset =
1485 llvm::ConstantInt::get(CGF.Int32Ty, 15);
1486 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset);
1487 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area,
1489 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, ~15LL);
1491 CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
1492 overflow_arg_area->getType(),
1493 "overflow_arg_area.align");
1496 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
1497 const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
1499 CGF.Builder.CreateBitCast(overflow_arg_area,
1500 llvm::PointerType::getUnqual(LTy));
1502 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
1503 // l->overflow_arg_area + sizeof(type).
1504 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to
1505 // an 8 byte boundary.
1507 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
1508 llvm::Value *Offset =
1509 llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7);
1510 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset,
1511 "overflow_arg_area.next");
1512 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
1514 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
1518 llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1519 CodeGenFunction &CGF) const {
1520 llvm::LLVMContext &VMContext = CGF.getLLVMContext();
1522 // Assume that va_list type is correct; should be pointer to LLVM type:
1526 // i8* overflow_arg_area;
1527 // i8* reg_save_area;
1529 unsigned neededInt, neededSSE;
1531 Ty = CGF.getContext().getCanonicalType(Ty);
1532 ABIArgInfo AI = classifyArgumentType(Ty, VMContext, neededInt, neededSSE, 0);
1534 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
1535 // in the registers. If not go to step 7.
1536 if (!neededInt && !neededSSE)
1537 return EmitVAArgFromMemory(VAListAddr, Ty, CGF);
1539 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
1540 // general purpose registers needed to pass type and num_fp to hold
1541 // the number of floating point registers needed.
1543 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into
1544 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or
1545 // l->fp_offset > 304 - num_fp * 16 go to step 7.
1547 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of
1548 // register save space).
1550 llvm::Value *InRegs = 0;
1551 llvm::Value *gp_offset_p = 0, *gp_offset = 0;
1552 llvm::Value *fp_offset_p = 0, *fp_offset = 0;
1554 gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p");
1555 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
1556 InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8);
1557 InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp");
1561 fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p");
1562 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
1563 llvm::Value *FitsInFP =
1564 llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16);
1565 FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp");
1566 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
1569 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
1570 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
1571 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
1572 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
1574 // Emit code to load the value if it was passed in registers.
1576 CGF.EmitBlock(InRegBlock);
1578 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with
1579 // an offset of l->gp_offset and/or l->fp_offset. This may require
1580 // copying to a temporary location in case the parameter is passed
1581 // in different register classes or requires an alignment greater
1582 // than 8 for general purpose registers and 16 for XMM registers.
1584 // FIXME: This really results in shameful code when we end up needing to
1585 // collect arguments from different places; often what should result in a
1586 // simple assembling of a structure from scattered addresses has many more
1587 // loads than necessary. Can we clean this up?
1588 const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
1589 llvm::Value *RegAddr =
1590 CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3),
1592 if (neededInt && neededSSE) {
1594 assert(AI.isCoerce() && "Unexpected ABI info for mixed regs");
1595 const llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
1596 llvm::Value *Tmp = CGF.CreateTempAlloca(ST);
1597 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
1598 const llvm::Type *TyLo = ST->getElementType(0);
1599 const llvm::Type *TyHi = ST->getElementType(1);
1600 assert((TyLo->isFloatingPointTy() ^ TyHi->isFloatingPointTy()) &&
1601 "Unexpected ABI info for mixed regs");
1602 const llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
1603 const llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
1604 llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
1605 llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
1606 llvm::Value *RegLoAddr = TyLo->isFloatingPointTy() ? FPAddr : GPAddr;
1607 llvm::Value *RegHiAddr = TyLo->isFloatingPointTy() ? GPAddr : FPAddr;
1609 CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo));
1610 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
1611 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi));
1612 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
1614 RegAddr = CGF.Builder.CreateBitCast(Tmp,
1615 llvm::PointerType::getUnqual(LTy));
1616 } else if (neededInt) {
1617 RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
1618 RegAddr = CGF.Builder.CreateBitCast(RegAddr,
1619 llvm::PointerType::getUnqual(LTy));
1620 } else if (neededSSE == 1) {
1621 RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
1622 RegAddr = CGF.Builder.CreateBitCast(RegAddr,
1623 llvm::PointerType::getUnqual(LTy));
1625 assert(neededSSE == 2 && "Invalid number of needed registers!");
1626 // SSE registers are spaced 16 bytes apart in the register save
1627 // area, we need to collect the two eightbytes together.
1628 llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset);
1629 llvm::Value *RegAddrHi = CGF.Builder.CreateConstGEP1_32(RegAddrLo, 16);
1630 const llvm::Type *DoubleTy = llvm::Type::getDoubleTy(VMContext);
1631 const llvm::Type *DblPtrTy =
1632 llvm::PointerType::getUnqual(DoubleTy);
1633 const llvm::StructType *ST = llvm::StructType::get(VMContext, DoubleTy,
1635 llvm::Value *V, *Tmp = CGF.CreateTempAlloca(ST);
1636 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo,
1638 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
1639 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi,
1641 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
1642 RegAddr = CGF.Builder.CreateBitCast(Tmp,
1643 llvm::PointerType::getUnqual(LTy));
1646 // AMD64-ABI 3.5.7p5: Step 5. Set:
1647 // l->gp_offset = l->gp_offset + num_gp * 8
1648 // l->fp_offset = l->fp_offset + num_fp * 16.
1650 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8);
1651 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
1655 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16);
1656 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
1659 CGF.EmitBranch(ContBlock);
1661 // Emit code to load the value if it was passed in memory.
1663 CGF.EmitBlock(InMemBlock);
1664 llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF);
1666 // Return the appropriate result.
1668 CGF.EmitBlock(ContBlock);
1669 llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(),
1671 ResAddr->reserveOperandSpace(2);
1672 ResAddr->addIncoming(RegAddr, InRegBlock);
1673 ResAddr->addIncoming(MemAddr, InMemBlock);
1679 //===----------------------------------------------------------------------===//
1680 // PIC16 ABI Implementation
1681 //===----------------------------------------------------------------------===//
1685 class PIC16ABIInfo : public ABIInfo {
1686 ABIArgInfo classifyReturnType(QualType RetTy,
1687 ASTContext &Context,
1688 llvm::LLVMContext &VMContext) const;
1690 ABIArgInfo classifyArgumentType(QualType RetTy,
1691 ASTContext &Context,
1692 llvm::LLVMContext &VMContext) const;
1694 virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
1695 llvm::LLVMContext &VMContext,
1696 const llvm::Type *const *PrefTypes,
1697 unsigned NumPrefTypes) const {
1698 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
1700 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
1702 it->info = classifyArgumentType(it->type, Context, VMContext);
1705 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1706 CodeGenFunction &CGF) const;
1709 class PIC16TargetCodeGenInfo : public TargetCodeGenInfo {
1711 PIC16TargetCodeGenInfo():TargetCodeGenInfo(new PIC16ABIInfo()) {}
1716 ABIArgInfo PIC16ABIInfo::classifyReturnType(QualType RetTy,
1717 ASTContext &Context,
1718 llvm::LLVMContext &VMContext) const {
1719 if (RetTy->isVoidType()) {
1720 return ABIArgInfo::getIgnore();
1722 return ABIArgInfo::getDirect();
1726 ABIArgInfo PIC16ABIInfo::classifyArgumentType(QualType Ty,
1727 ASTContext &Context,
1728 llvm::LLVMContext &VMContext) const {
1729 return ABIArgInfo::getDirect();
1732 llvm::Value *PIC16ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1733 CodeGenFunction &CGF) const {
1734 const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
1735 const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
1737 CGBuilderTy &Builder = CGF.Builder;
1738 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
1740 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
1742 llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
1743 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
1745 uint64_t Offset = CGF.getContext().getTypeSize(Ty) / 8;
1747 llvm::Value *NextAddr =
1748 Builder.CreateGEP(Addr, llvm::ConstantInt::get(
1749 llvm::Type::getInt32Ty(CGF.getLLVMContext()), Offset),
1751 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
1760 class PPC32TargetCodeGenInfo : public DefaultTargetCodeGenInfo {
1762 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
1763 // This is recovered from gcc output.
1764 return 1; // r1 is the dedicated stack pointer
1767 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
1768 llvm::Value *Address) const;
1774 PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
1775 llvm::Value *Address) const {
1776 // This is calculated from the LLVM and GCC tables and verified
1777 // against gcc output. AFAIK all ABIs use the same encoding.
1779 CodeGen::CGBuilderTy &Builder = CGF.Builder;
1780 llvm::LLVMContext &Context = CGF.getLLVMContext();
1782 const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
1783 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
1784 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
1785 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
1787 // 0-31: r0-31, the 4-byte general-purpose registers
1788 AssignToArrayRange(Builder, Address, Four8, 0, 31);
1790 // 32-63: fp0-31, the 8-byte floating-point registers
1791 AssignToArrayRange(Builder, Address, Eight8, 32, 63);
1793 // 64-76 are various 4-byte special-purpose registers:
1800 AssignToArrayRange(Builder, Address, Four8, 64, 76);
1802 // 77-108: v0-31, the 16-byte vector registers
1803 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
1810 AssignToArrayRange(Builder, Address, Four8, 109, 113);
1816 //===----------------------------------------------------------------------===//
1817 // ARM ABI Implementation
1818 //===----------------------------------------------------------------------===//
1822 class ARMABIInfo : public ABIInfo {
1834 ARMABIInfo(ABIKind _Kind) : Kind(_Kind) {}
1837 ABIKind getABIKind() const { return Kind; }
1839 ABIArgInfo classifyReturnType(QualType RetTy,
1840 ASTContext &Context,
1841 llvm::LLVMContext &VMCOntext) const;
1843 ABIArgInfo classifyArgumentType(QualType RetTy,
1844 ASTContext &Context,
1845 llvm::LLVMContext &VMContext) const;
1847 virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
1848 llvm::LLVMContext &VMContext,
1849 const llvm::Type *const *PrefTypes,
1850 unsigned NumPrefTypes) const;
1852 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1853 CodeGenFunction &CGF) const;
1856 class ARMTargetCodeGenInfo : public TargetCodeGenInfo {
1858 ARMTargetCodeGenInfo(ARMABIInfo::ABIKind K)
1859 :TargetCodeGenInfo(new ARMABIInfo(K)) {}
1861 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
1868 void ARMABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context,
1869 llvm::LLVMContext &VMContext,
1870 const llvm::Type *const *PrefTypes,
1871 unsigned NumPrefTypes) const {
1872 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
1874 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
1876 it->info = classifyArgumentType(it->type, Context, VMContext);
1879 const llvm::Triple &Triple(Context.Target.getTriple());
1880 llvm::CallingConv::ID DefaultCC;
1881 if (Triple.getEnvironmentName() == "gnueabi" ||
1882 Triple.getEnvironmentName() == "eabi")
1883 DefaultCC = llvm::CallingConv::ARM_AAPCS;
1885 DefaultCC = llvm::CallingConv::ARM_APCS;
1887 switch (getABIKind()) {
1889 if (DefaultCC != llvm::CallingConv::ARM_APCS)
1890 FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_APCS);
1894 if (DefaultCC != llvm::CallingConv::ARM_AAPCS)
1895 FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS);
1899 FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS_VFP);
1904 ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty,
1905 ASTContext &Context,
1906 llvm::LLVMContext &VMContext) const {
1907 if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
1908 // Treat an enum type as its underlying type.
1909 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
1910 Ty = EnumTy->getDecl()->getIntegerType();
1912 return (Ty->isPromotableIntegerType() ?
1913 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
1916 // Ignore empty records.
1917 if (isEmptyRecord(Context, Ty, true))
1918 return ABIArgInfo::getIgnore();
1920 // Structures with either a non-trivial destructor or a non-trivial
1921 // copy constructor are always indirect.
1922 if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty))
1923 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
1925 // FIXME: This is kind of nasty... but there isn't much choice because the ARM
1926 // backend doesn't support byval.
1927 // FIXME: This doesn't handle alignment > 64 bits.
1928 const llvm::Type* ElemTy;
1930 if (Context.getTypeAlign(Ty) > 32) {
1931 ElemTy = llvm::Type::getInt64Ty(VMContext);
1932 SizeRegs = (Context.getTypeSize(Ty) + 63) / 64;
1934 ElemTy = llvm::Type::getInt32Ty(VMContext);
1935 SizeRegs = (Context.getTypeSize(Ty) + 31) / 32;
1937 std::vector<const llvm::Type*> LLVMFields;
1938 LLVMFields.push_back(llvm::ArrayType::get(ElemTy, SizeRegs));
1939 const llvm::Type* STy = llvm::StructType::get(VMContext, LLVMFields, true);
1940 return ABIArgInfo::getCoerce(STy);
1943 static bool isIntegerLikeType(QualType Ty,
1944 ASTContext &Context,
1945 llvm::LLVMContext &VMContext) {
1946 // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure
1947 // is called integer-like if its size is less than or equal to one word, and
1948 // the offset of each of its addressable sub-fields is zero.
1950 uint64_t Size = Context.getTypeSize(Ty);
1952 // Check that the type fits in a word.
1956 // FIXME: Handle vector types!
1957 if (Ty->isVectorType())
1960 // Float types are never treated as "integer like".
1961 if (Ty->isRealFloatingType())
1964 // If this is a builtin or pointer type then it is ok.
1965 if (Ty->getAs<BuiltinType>() || Ty->isPointerType())
1968 // Small complex integer types are "integer like".
1969 if (const ComplexType *CT = Ty->getAs<ComplexType>())
1970 return isIntegerLikeType(CT->getElementType(), Context, VMContext);
1972 // Single element and zero sized arrays should be allowed, by the definition
1973 // above, but they are not.
1975 // Otherwise, it must be a record type.
1976 const RecordType *RT = Ty->getAs<RecordType>();
1977 if (!RT) return false;
1979 // Ignore records with flexible arrays.
1980 const RecordDecl *RD = RT->getDecl();
1981 if (RD->hasFlexibleArrayMember())
1984 // Check that all sub-fields are at offset 0, and are themselves "integer
1986 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
1988 bool HadField = false;
1990 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
1991 i != e; ++i, ++idx) {
1992 const FieldDecl *FD = *i;
1994 // Bit-fields are not addressable, we only need to verify they are "integer
1995 // like". We still have to disallow a subsequent non-bitfield, for example:
1996 // struct { int : 0; int x }
1997 // is non-integer like according to gcc.
1998 if (FD->isBitField()) {
2002 if (!isIntegerLikeType(FD->getType(), Context, VMContext))
2008 // Check if this field is at offset 0.
2009 if (Layout.getFieldOffset(idx) != 0)
2012 if (!isIntegerLikeType(FD->getType(), Context, VMContext))
2015 // Only allow at most one field in a structure. This doesn't match the
2016 // wording above, but follows gcc in situations with a field following an
2018 if (!RD->isUnion()) {
2029 ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
2030 ASTContext &Context,
2031 llvm::LLVMContext &VMContext) const {
2032 if (RetTy->isVoidType())
2033 return ABIArgInfo::getIgnore();
2035 if (!CodeGenFunction::hasAggregateLLVMType(RetTy)) {
2036 // Treat an enum type as its underlying type.
2037 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
2038 RetTy = EnumTy->getDecl()->getIntegerType();
2040 return (RetTy->isPromotableIntegerType() ?
2041 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
2044 // Structures with either a non-trivial destructor or a non-trivial
2045 // copy constructor are always indirect.
2046 if (isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy))
2047 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
2049 // Are we following APCS?
2050 if (getABIKind() == APCS) {
2051 if (isEmptyRecord(Context, RetTy, false))
2052 return ABIArgInfo::getIgnore();
2054 // Complex types are all returned as packed integers.
2056 // FIXME: Consider using 2 x vector types if the back end handles them
2058 if (RetTy->isAnyComplexType())
2059 return ABIArgInfo::getCoerce(llvm::IntegerType::get(
2060 VMContext, Context.getTypeSize(RetTy)));
2062 // Integer like structures are returned in r0.
2063 if (isIntegerLikeType(RetTy, Context, VMContext)) {
2064 // Return in the smallest viable integer type.
2065 uint64_t Size = Context.getTypeSize(RetTy);
2067 return ABIArgInfo::getCoerce(llvm::Type::getInt8Ty(VMContext));
2069 return ABIArgInfo::getCoerce(llvm::Type::getInt16Ty(VMContext));
2070 return ABIArgInfo::getCoerce(llvm::Type::getInt32Ty(VMContext));
2073 // Otherwise return in memory.
2074 return ABIArgInfo::getIndirect(0);
2077 // Otherwise this is an AAPCS variant.
2079 if (isEmptyRecord(Context, RetTy, true))
2080 return ABIArgInfo::getIgnore();
2082 // Aggregates <= 4 bytes are returned in r0; other aggregates
2083 // are returned indirectly.
2084 uint64_t Size = Context.getTypeSize(RetTy);
2086 // Return in the smallest viable integer type.
2088 return ABIArgInfo::getCoerce(llvm::Type::getInt8Ty(VMContext));
2090 return ABIArgInfo::getCoerce(llvm::Type::getInt16Ty(VMContext));
2091 return ABIArgInfo::getCoerce(llvm::Type::getInt32Ty(VMContext));
2094 return ABIArgInfo::getIndirect(0);
2097 llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2098 CodeGenFunction &CGF) const {
2099 // FIXME: Need to handle alignment
2100 const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
2101 const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
2103 CGBuilderTy &Builder = CGF.Builder;
2104 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
2106 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
2108 llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
2109 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
2112 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
2113 llvm::Value *NextAddr =
2114 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
2116 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
2121 ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy,
2122 ASTContext &Context,
2123 llvm::LLVMContext &VMContext) const {
2124 if (RetTy->isVoidType()) {
2125 return ABIArgInfo::getIgnore();
2126 } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
2127 return ABIArgInfo::getIndirect(0);
2129 // Treat an enum type as its underlying type.
2130 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
2131 RetTy = EnumTy->getDecl()->getIntegerType();
2133 return (RetTy->isPromotableIntegerType() ?
2134 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
2138 //===----------------------------------------------------------------------===//
2139 // SystemZ ABI Implementation
2140 //===----------------------------------------------------------------------===//
2144 class SystemZABIInfo : public ABIInfo {
2145 bool isPromotableIntegerType(QualType Ty) const;
2147 ABIArgInfo classifyReturnType(QualType RetTy, ASTContext &Context,
2148 llvm::LLVMContext &VMContext) const;
2150 ABIArgInfo classifyArgumentType(QualType RetTy, ASTContext &Context,
2151 llvm::LLVMContext &VMContext) const;
2153 virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
2154 llvm::LLVMContext &VMContext,
2155 const llvm::Type *const *PrefTypes,
2156 unsigned NumPrefTypes) const {
2157 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(),
2158 Context, VMContext);
2159 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
2161 it->info = classifyArgumentType(it->type, Context, VMContext);
2164 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2165 CodeGenFunction &CGF) const;
2168 class SystemZTargetCodeGenInfo : public TargetCodeGenInfo {
2170 SystemZTargetCodeGenInfo():TargetCodeGenInfo(new SystemZABIInfo()) {}
2175 bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const {
2176 // SystemZ ABI requires all 8, 16 and 32 bit quantities to be extended.
2177 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
2178 switch (BT->getKind()) {
2179 case BuiltinType::Bool:
2180 case BuiltinType::Char_S:
2181 case BuiltinType::Char_U:
2182 case BuiltinType::SChar:
2183 case BuiltinType::UChar:
2184 case BuiltinType::Short:
2185 case BuiltinType::UShort:
2186 case BuiltinType::Int:
2187 case BuiltinType::UInt:
2195 llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2196 CodeGenFunction &CGF) const {
2202 ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy,
2203 ASTContext &Context,
2204 llvm::LLVMContext &VMContext) const {
2205 if (RetTy->isVoidType()) {
2206 return ABIArgInfo::getIgnore();
2207 } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
2208 return ABIArgInfo::getIndirect(0);
2210 return (isPromotableIntegerType(RetTy) ?
2211 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
2215 ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty,
2216 ASTContext &Context,
2217 llvm::LLVMContext &VMContext) const {
2218 if (CodeGenFunction::hasAggregateLLVMType(Ty)) {
2219 return ABIArgInfo::getIndirect(0);
2221 return (isPromotableIntegerType(Ty) ?
2222 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
2226 //===----------------------------------------------------------------------===//
2227 // MSP430 ABI Implementation
2228 //===----------------------------------------------------------------------===//
2232 class MSP430TargetCodeGenInfo : public TargetCodeGenInfo {
2234 MSP430TargetCodeGenInfo():TargetCodeGenInfo(new DefaultABIInfo()) {}
2235 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2236 CodeGen::CodeGenModule &M) const;
2241 void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
2242 llvm::GlobalValue *GV,
2243 CodeGen::CodeGenModule &M) const {
2244 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
2245 if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) {
2246 // Handle 'interrupt' attribute:
2247 llvm::Function *F = cast<llvm::Function>(GV);
2249 // Step 1: Set ISR calling convention.
2250 F->setCallingConv(llvm::CallingConv::MSP430_INTR);
2252 // Step 2: Add attributes goodness.
2253 F->addFnAttr(llvm::Attribute::NoInline);
2255 // Step 3: Emit ISR vector alias.
2256 unsigned Num = attr->getNumber() + 0xffe0;
2257 new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage,
2259 llvm::LowercaseString(llvm::utohexstr(Num)),
2260 GV, &M.getModule());
2265 //===----------------------------------------------------------------------===//
2266 // MIPS ABI Implementation. This works for both little-endian and
2267 // big-endian variants.
2268 //===----------------------------------------------------------------------===//
2271 class MIPSTargetCodeGenInfo : public TargetCodeGenInfo {
2273 MIPSTargetCodeGenInfo(): TargetCodeGenInfo(new DefaultABIInfo()) {}
2275 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
2279 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2280 llvm::Value *Address) const;
2285 MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2286 llvm::Value *Address) const {
2287 // This information comes from gcc's implementation, which seems to
2288 // as canonical as it gets.
2290 CodeGen::CGBuilderTy &Builder = CGF.Builder;
2291 llvm::LLVMContext &Context = CGF.getLLVMContext();
2293 // Everything on MIPS is 4 bytes. Double-precision FP registers
2294 // are aliased to pairs of single-precision FP registers.
2295 const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
2296 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
2298 // 0-31 are the general purpose registers, $0 - $31.
2299 // 32-63 are the floating-point registers, $f0 - $f31.
2300 // 64 and 65 are the multiply/divide registers, $hi and $lo.
2301 // 66 is the (notional, I think) register for signal-handler return.
2302 AssignToArrayRange(Builder, Address, Four8, 0, 65);
2304 // 67-74 are the floating-point status registers, $fcc0 - $fcc7.
2305 // They are one bit wide and ignored here.
2307 // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31.
2308 // (coprocessor 1 is the FP unit)
2309 // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31.
2310 // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31.
2311 // 176-181 are the DSP accumulator registers.
2312 AssignToArrayRange(Builder, Address, Four8, 80, 181);
2318 const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() const {
2319 if (TheTargetCodeGenInfo)
2320 return *TheTargetCodeGenInfo;
2322 // For now we just cache the TargetCodeGenInfo in CodeGenModule and don't
2325 const llvm::Triple &Triple = getContext().Target.getTriple();
2326 switch (Triple.getArch()) {
2328 return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo());
2330 case llvm::Triple::mips:
2331 case llvm::Triple::mipsel:
2332 return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo());
2334 case llvm::Triple::arm:
2335 case llvm::Triple::thumb:
2336 // FIXME: We want to know the float calling convention as well.
2337 if (strcmp(getContext().Target.getABI(), "apcs-gnu") == 0)
2338 return *(TheTargetCodeGenInfo =
2339 new ARMTargetCodeGenInfo(ARMABIInfo::APCS));
2341 return *(TheTargetCodeGenInfo =
2342 new ARMTargetCodeGenInfo(ARMABIInfo::AAPCS));
2344 case llvm::Triple::pic16:
2345 return *(TheTargetCodeGenInfo = new PIC16TargetCodeGenInfo());
2347 case llvm::Triple::ppc:
2348 return *(TheTargetCodeGenInfo = new PPC32TargetCodeGenInfo());
2350 case llvm::Triple::systemz:
2351 return *(TheTargetCodeGenInfo = new SystemZTargetCodeGenInfo());
2353 case llvm::Triple::msp430:
2354 return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo());
2356 case llvm::Triple::x86:
2357 switch (Triple.getOS()) {
2358 case llvm::Triple::Darwin:
2359 return *(TheTargetCodeGenInfo =
2360 new X86_32TargetCodeGenInfo(Context, true, true));
2361 case llvm::Triple::Cygwin:
2362 case llvm::Triple::MinGW32:
2363 case llvm::Triple::MinGW64:
2364 case llvm::Triple::AuroraUX:
2365 case llvm::Triple::DragonFly:
2366 case llvm::Triple::FreeBSD:
2367 case llvm::Triple::OpenBSD:
2368 case llvm::Triple::Minix:
2369 return *(TheTargetCodeGenInfo =
2370 new X86_32TargetCodeGenInfo(Context, false, true));
2373 return *(TheTargetCodeGenInfo =
2374 new X86_32TargetCodeGenInfo(Context, false, false));
2377 case llvm::Triple::x86_64:
2378 return *(TheTargetCodeGenInfo =
2379 new X86_64TargetCodeGenInfo(Context, TheTargetData));