1 //===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // These classes wrap the information about a call or function
11 // definition used to handle ABI compliancy.
13 //===----------------------------------------------------------------------===//
15 #include "TargetInfo.h"
19 #include "CodeGenFunction.h"
20 #include "clang/AST/RecordLayout.h"
21 #include "clang/CodeGen/CGFunctionInfo.h"
22 #include "clang/Frontend/CodeGenOptions.h"
23 #include "llvm/ADT/StringExtras.h"
24 #include "llvm/ADT/Triple.h"
25 #include "llvm/IR/DataLayout.h"
26 #include "llvm/IR/Type.h"
27 #include "llvm/Support/raw_ostream.h"
28 #include <algorithm> // std::sort
30 using namespace clang;
31 using namespace CodeGen;
33 static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder,
38 // Alternatively, we could emit this as a loop in the source.
39 for (unsigned I = FirstIndex; I <= LastIndex; ++I) {
40 llvm::Value *Cell = Builder.CreateConstInBoundsGEP1_32(Array, I);
41 Builder.CreateStore(Value, Cell);
45 static bool isAggregateTypeForABI(QualType T) {
46 return !CodeGenFunction::hasScalarEvaluationKind(T) ||
47 T->isMemberFunctionPointerType();
50 ABIInfo::~ABIInfo() {}
52 static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT,
54 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
56 return CGCXXABI::RAA_Default;
57 return CXXABI.getRecordArgABI(RD);
60 static CGCXXABI::RecordArgABI getRecordArgABI(QualType T,
62 const RecordType *RT = T->getAs<RecordType>();
64 return CGCXXABI::RAA_Default;
65 return getRecordArgABI(RT, CXXABI);
68 /// Pass transparent unions as if they were the type of the first element. Sema
69 /// should ensure that all elements of the union have the same "machine type".
70 static QualType useFirstFieldIfTransparentUnion(QualType Ty) {
71 if (const RecordType *UT = Ty->getAsUnionType()) {
72 const RecordDecl *UD = UT->getDecl();
73 if (UD->hasAttr<TransparentUnionAttr>()) {
74 assert(!UD->field_empty() && "sema created an empty transparent union");
75 return UD->field_begin()->getType();
81 CGCXXABI &ABIInfo::getCXXABI() const {
82 return CGT.getCXXABI();
85 ASTContext &ABIInfo::getContext() const {
86 return CGT.getContext();
89 llvm::LLVMContext &ABIInfo::getVMContext() const {
90 return CGT.getLLVMContext();
93 const llvm::DataLayout &ABIInfo::getDataLayout() const {
94 return CGT.getDataLayout();
97 const TargetInfo &ABIInfo::getTarget() const {
98 return CGT.getTarget();
101 bool ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
105 bool ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
106 uint64_t Members) const {
110 void ABIArgInfo::dump() const {
111 raw_ostream &OS = llvm::errs();
112 OS << "(ABIArgInfo Kind=";
115 OS << "Direct Type=";
116 if (llvm::Type *Ty = getCoerceToType())
128 OS << "InAlloca Offset=" << getInAllocaFieldIndex();
131 OS << "Indirect Align=" << getIndirectAlign()
132 << " ByVal=" << getIndirectByVal()
133 << " Realign=" << getIndirectRealign();
142 TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; }
144 // If someone can figure out a general rule for this, that would be great.
145 // It's probably just doomed to be platform-dependent, though.
146 unsigned TargetCodeGenInfo::getSizeOfUnwindException() const {
148 // x86-64 FreeBSD, Linux, Darwin
149 // x86-32 FreeBSD, Linux, Darwin
150 // PowerPC Linux, Darwin
151 // ARM Darwin (*not* EABI)
156 bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args,
157 const FunctionNoProtoType *fnType) const {
158 // The following conventions are known to require this to be false:
161 // For everything else, we just prefer false unless we opt out.
166 TargetCodeGenInfo::getDependentLibraryOption(llvm::StringRef Lib,
167 llvm::SmallString<24> &Opt) const {
168 // This assumes the user is passing a library name like "rt" instead of a
169 // filename like "librt.a/so", and that they don't care whether it's static or
175 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays);
177 /// isEmptyField - Return true iff a the field is "empty", that is it
178 /// is an unnamed bit-field or an (array of) empty record(s).
179 static bool isEmptyField(ASTContext &Context, const FieldDecl *FD,
181 if (FD->isUnnamedBitfield())
184 QualType FT = FD->getType();
186 // Constant arrays of empty records count as empty, strip them off.
187 // Constant arrays of zero length always count as empty.
189 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
190 if (AT->getSize() == 0)
192 FT = AT->getElementType();
195 const RecordType *RT = FT->getAs<RecordType>();
199 // C++ record fields are never empty, at least in the Itanium ABI.
201 // FIXME: We should use a predicate for whether this behavior is true in the
203 if (isa<CXXRecordDecl>(RT->getDecl()))
206 return isEmptyRecord(Context, FT, AllowArrays);
209 /// isEmptyRecord - Return true iff a structure contains only empty
210 /// fields. Note that a structure with a flexible array member is not
211 /// considered empty.
212 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) {
213 const RecordType *RT = T->getAs<RecordType>();
216 const RecordDecl *RD = RT->getDecl();
217 if (RD->hasFlexibleArrayMember())
220 // If this is a C++ record, check the bases first.
221 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
222 for (const auto &I : CXXRD->bases())
223 if (!isEmptyRecord(Context, I.getType(), true))
226 for (const auto *I : RD->fields())
227 if (!isEmptyField(Context, I, AllowArrays))
232 /// isSingleElementStruct - Determine if a structure is a "single
233 /// element struct", i.e. it has exactly one non-empty field or
234 /// exactly one field which is itself a single element
235 /// struct. Structures with flexible array members are never
236 /// considered single element structs.
238 /// \return The field declaration for the single non-empty field, if
240 static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
241 const RecordType *RT = T->getAsStructureType();
245 const RecordDecl *RD = RT->getDecl();
246 if (RD->hasFlexibleArrayMember())
249 const Type *Found = nullptr;
251 // If this is a C++ record, check the bases first.
252 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
253 for (const auto &I : CXXRD->bases()) {
254 // Ignore empty records.
255 if (isEmptyRecord(Context, I.getType(), true))
258 // If we already found an element then this isn't a single-element struct.
262 // If this is non-empty and not a single element struct, the composite
263 // cannot be a single element struct.
264 Found = isSingleElementStruct(I.getType(), Context);
270 // Check for single element.
271 for (const auto *FD : RD->fields()) {
272 QualType FT = FD->getType();
274 // Ignore empty fields.
275 if (isEmptyField(Context, FD, true))
278 // If we already found an element then this isn't a single-element
283 // Treat single element arrays as the element.
284 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
285 if (AT->getSize().getZExtValue() != 1)
287 FT = AT->getElementType();
290 if (!isAggregateTypeForABI(FT)) {
291 Found = FT.getTypePtr();
293 Found = isSingleElementStruct(FT, Context);
299 // We don't consider a struct a single-element struct if it has
300 // padding beyond the element type.
301 if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T))
307 static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
308 // Treat complex types as the element type.
309 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
310 Ty = CTy->getElementType();
312 // Check for a type which we know has a simple scalar argument-passing
313 // convention without any padding. (We're specifically looking for 32
314 // and 64-bit integer and integer-equivalents, float, and double.)
315 if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() &&
316 !Ty->isEnumeralType() && !Ty->isBlockPointerType())
319 uint64_t Size = Context.getTypeSize(Ty);
320 return Size == 32 || Size == 64;
323 /// canExpandIndirectArgument - Test whether an argument type which is to be
324 /// passed indirectly (on the stack) would have the equivalent layout if it was
325 /// expanded into separate arguments. If so, we prefer to do the latter to avoid
326 /// inhibiting optimizations.
328 // FIXME: This predicate is missing many cases, currently it just follows
329 // llvm-gcc (checks that all fields are 32-bit or 64-bit primitive types). We
330 // should probably make this smarter, or better yet make the LLVM backend
331 // capable of handling it.
332 static bool canExpandIndirectArgument(QualType Ty, ASTContext &Context) {
333 // We can only expand structure types.
334 const RecordType *RT = Ty->getAs<RecordType>();
338 // We can only expand (C) structures.
340 // FIXME: This needs to be generalized to handle classes as well.
341 const RecordDecl *RD = RT->getDecl();
342 if (!RD->isStruct() || isa<CXXRecordDecl>(RD))
347 for (const auto *FD : RD->fields()) {
348 if (!is32Or64BitBasicType(FD->getType(), Context))
351 // FIXME: Reject bit-fields wholesale; there are two problems, we don't know
352 // how to expand them yet, and the predicate for telling if a bitfield still
353 // counts as "basic" is more complicated than what we were doing previously.
354 if (FD->isBitField())
357 Size += Context.getTypeSize(FD->getType());
360 // Make sure there are not any holes in the struct.
361 if (Size != Context.getTypeSize(Ty))
368 /// DefaultABIInfo - The default implementation for ABI specific
369 /// details. This implementation provides information which results in
370 /// self-consistent and sensible LLVM IR generation, but does not
371 /// conform to any particular ABI.
372 class DefaultABIInfo : public ABIInfo {
374 DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
376 ABIArgInfo classifyReturnType(QualType RetTy) const;
377 ABIArgInfo classifyArgumentType(QualType RetTy) const;
379 void computeInfo(CGFunctionInfo &FI) const override {
380 if (!getCXXABI().classifyReturnType(FI))
381 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
382 for (auto &I : FI.arguments())
383 I.info = classifyArgumentType(I.type);
386 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
387 CodeGenFunction &CGF) const override;
390 class DefaultTargetCodeGenInfo : public TargetCodeGenInfo {
392 DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
393 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
396 llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
397 CodeGenFunction &CGF) const {
401 ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
402 if (isAggregateTypeForABI(Ty))
403 return ABIArgInfo::getIndirect(0);
405 // Treat an enum type as its underlying type.
406 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
407 Ty = EnumTy->getDecl()->getIntegerType();
409 return (Ty->isPromotableIntegerType() ?
410 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
413 ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
414 if (RetTy->isVoidType())
415 return ABIArgInfo::getIgnore();
417 if (isAggregateTypeForABI(RetTy))
418 return ABIArgInfo::getIndirect(0);
420 // Treat an enum type as its underlying type.
421 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
422 RetTy = EnumTy->getDecl()->getIntegerType();
424 return (RetTy->isPromotableIntegerType() ?
425 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
428 //===----------------------------------------------------------------------===//
429 // le32/PNaCl bitcode ABI Implementation
431 // This is a simplified version of the x86_32 ABI. Arguments and return values
432 // are always passed on the stack.
433 //===----------------------------------------------------------------------===//
435 class PNaClABIInfo : public ABIInfo {
437 PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
439 ABIArgInfo classifyReturnType(QualType RetTy) const;
440 ABIArgInfo classifyArgumentType(QualType RetTy) const;
442 void computeInfo(CGFunctionInfo &FI) const override;
443 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
444 CodeGenFunction &CGF) const override;
447 class PNaClTargetCodeGenInfo : public TargetCodeGenInfo {
449 PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
450 : TargetCodeGenInfo(new PNaClABIInfo(CGT)) {}
453 void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const {
454 if (!getCXXABI().classifyReturnType(FI))
455 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
457 for (auto &I : FI.arguments())
458 I.info = classifyArgumentType(I.type);
461 llvm::Value *PNaClABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
462 CodeGenFunction &CGF) const {
466 /// \brief Classify argument of given type \p Ty.
467 ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty) const {
468 if (isAggregateTypeForABI(Ty)) {
469 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
470 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
471 return ABIArgInfo::getIndirect(0);
472 } else if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
473 // Treat an enum type as its underlying type.
474 Ty = EnumTy->getDecl()->getIntegerType();
475 } else if (Ty->isFloatingType()) {
476 // Floating-point types don't go inreg.
477 return ABIArgInfo::getDirect();
480 return (Ty->isPromotableIntegerType() ?
481 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
484 ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const {
485 if (RetTy->isVoidType())
486 return ABIArgInfo::getIgnore();
488 // In the PNaCl ABI we always return records/structures on the stack.
489 if (isAggregateTypeForABI(RetTy))
490 return ABIArgInfo::getIndirect(0);
492 // Treat an enum type as its underlying type.
493 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
494 RetTy = EnumTy->getDecl()->getIntegerType();
496 return (RetTy->isPromotableIntegerType() ?
497 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
500 /// IsX86_MMXType - Return true if this is an MMX type.
501 bool IsX86_MMXType(llvm::Type *IRType) {
502 // Return true if the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>.
503 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
504 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
505 IRType->getScalarSizeInBits() != 64;
508 static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
509 StringRef Constraint,
511 if ((Constraint == "y" || Constraint == "&y") && Ty->isVectorTy()) {
512 if (cast<llvm::VectorType>(Ty)->getBitWidth() != 64) {
513 // Invalid MMX constraint
517 return llvm::Type::getX86_MMXTy(CGF.getLLVMContext());
520 // No operation needed
524 /// Returns true if this type can be passed in SSE registers with the
525 /// X86_VectorCall calling convention. Shared between x86_32 and x86_64.
526 static bool isX86VectorTypeForVectorCall(ASTContext &Context, QualType Ty) {
527 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
528 if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half)
530 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
531 // vectorcall can pass XMM, YMM, and ZMM vectors. We don't pass SSE1 MMX
532 // registers specially.
533 unsigned VecSize = Context.getTypeSize(VT);
534 if (VecSize == 128 || VecSize == 256 || VecSize == 512)
540 /// Returns true if this aggregate is small enough to be passed in SSE registers
541 /// in the X86_VectorCall calling convention. Shared between x86_32 and x86_64.
542 static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) {
543 return NumMembers <= 4;
546 //===----------------------------------------------------------------------===//
547 // X86-32 ABI Implementation
548 //===----------------------------------------------------------------------===//
550 /// \brief Similar to llvm::CCState, but for Clang.
552 CCState(unsigned CC) : CC(CC), FreeRegs(0), FreeSSERegs(0) {}
556 unsigned FreeSSERegs;
559 /// X86_32ABIInfo - The X86-32 ABI information.
560 class X86_32ABIInfo : public ABIInfo {
566 static const unsigned MinABIStackAlignInBytes = 4;
568 bool IsDarwinVectorABI;
569 bool IsSmallStructInRegABI;
570 bool IsWin32StructABI;
571 unsigned DefaultNumRegisterParameters;
573 static bool isRegisterSize(unsigned Size) {
574 return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
577 bool isHomogeneousAggregateBaseType(QualType Ty) const override {
578 // FIXME: Assumes vectorcall is in use.
579 return isX86VectorTypeForVectorCall(getContext(), Ty);
582 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
583 uint64_t NumMembers) const override {
584 // FIXME: Assumes vectorcall is in use.
585 return isX86VectorCallAggregateSmallEnough(NumMembers);
588 bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context) const;
590 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
591 /// such that the argument will be passed in memory.
592 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const;
594 ABIArgInfo getIndirectReturnResult(CCState &State) const;
596 /// \brief Return the alignment to use for the given type on the stack.
597 unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const;
599 Class classify(QualType Ty) const;
600 ABIArgInfo classifyReturnType(QualType RetTy, CCState &State) const;
601 ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const;
602 bool shouldUseInReg(QualType Ty, CCState &State, bool &NeedsPadding) const;
604 /// \brief Rewrite the function info so that all memory arguments use
606 void rewriteWithInAlloca(CGFunctionInfo &FI) const;
608 void addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
609 unsigned &StackOffset, ABIArgInfo &Info,
610 QualType Type) const;
614 void computeInfo(CGFunctionInfo &FI) const override;
615 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
616 CodeGenFunction &CGF) const override;
618 X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool w,
620 : ABIInfo(CGT), IsDarwinVectorABI(d), IsSmallStructInRegABI(p),
621 IsWin32StructABI(w), DefaultNumRegisterParameters(r) {}
624 class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
626 X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
627 bool d, bool p, bool w, unsigned r)
628 :TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p, w, r)) {}
630 static bool isStructReturnInRegABI(
631 const llvm::Triple &Triple, const CodeGenOptions &Opts);
633 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
634 CodeGen::CodeGenModule &CGM) const override;
636 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
637 // Darwin uses different dwarf register numbers for EH.
638 if (CGM.getTarget().getTriple().isOSDarwin()) return 5;
642 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
643 llvm::Value *Address) const override;
645 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
646 StringRef Constraint,
647 llvm::Type* Ty) const override {
648 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
651 void addReturnRegisterOutputs(CodeGenFunction &CGF, LValue ReturnValue,
652 std::string &Constraints,
653 std::vector<llvm::Type *> &ResultRegTypes,
654 std::vector<llvm::Type *> &ResultTruncRegTypes,
655 std::vector<LValue> &ResultRegDests,
656 std::string &AsmString,
657 unsigned NumOutputs) const override;
660 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
661 unsigned Sig = (0xeb << 0) | // jmp rel8
662 (0x06 << 8) | // .+0x08
665 return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
672 /// Rewrite input constraint references after adding some output constraints.
673 /// In the case where there is one output and one input and we add one output,
674 /// we need to replace all operand references greater than or equal to 1:
677 /// The result will be:
680 static void rewriteInputConstraintReferences(unsigned FirstIn,
682 std::string &AsmString) {
684 llvm::raw_string_ostream OS(Buf);
686 while (Pos < AsmString.size()) {
687 size_t DollarStart = AsmString.find('$', Pos);
688 if (DollarStart == std::string::npos)
689 DollarStart = AsmString.size();
690 size_t DollarEnd = AsmString.find_first_not_of('$', DollarStart);
691 if (DollarEnd == std::string::npos)
692 DollarEnd = AsmString.size();
693 OS << StringRef(&AsmString[Pos], DollarEnd - Pos);
695 size_t NumDollars = DollarEnd - DollarStart;
696 if (NumDollars % 2 != 0 && Pos < AsmString.size()) {
697 // We have an operand reference.
698 size_t DigitStart = Pos;
699 size_t DigitEnd = AsmString.find_first_not_of("0123456789", DigitStart);
700 if (DigitEnd == std::string::npos)
701 DigitEnd = AsmString.size();
702 StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart);
703 unsigned OperandIndex;
704 if (!OperandStr.getAsInteger(10, OperandIndex)) {
705 if (OperandIndex >= FirstIn)
706 OperandIndex += NumNewOuts;
714 AsmString = std::move(OS.str());
717 /// Add output constraints for EAX:EDX because they are return registers.
718 void X86_32TargetCodeGenInfo::addReturnRegisterOutputs(
719 CodeGenFunction &CGF, LValue ReturnSlot, std::string &Constraints,
720 std::vector<llvm::Type *> &ResultRegTypes,
721 std::vector<llvm::Type *> &ResultTruncRegTypes,
722 std::vector<LValue> &ResultRegDests, std::string &AsmString,
723 unsigned NumOutputs) const {
724 uint64_t RetWidth = CGF.getContext().getTypeSize(ReturnSlot.getType());
726 // Use the EAX constraint if the width is 32 or smaller and EAX:EDX if it is
728 if (!Constraints.empty())
730 if (RetWidth <= 32) {
731 Constraints += "={eax}";
732 ResultRegTypes.push_back(CGF.Int32Ty);
734 // Use the 'A' constraint for EAX:EDX.
736 ResultRegTypes.push_back(CGF.Int64Ty);
739 // Truncate EAX or EAX:EDX to an integer of the appropriate size.
740 llvm::Type *CoerceTy = llvm::IntegerType::get(CGF.getLLVMContext(), RetWidth);
741 ResultTruncRegTypes.push_back(CoerceTy);
743 // Coerce the integer by bitcasting the return slot pointer.
744 ReturnSlot.setAddress(CGF.Builder.CreateBitCast(ReturnSlot.getAddress(),
745 CoerceTy->getPointerTo()));
746 ResultRegDests.push_back(ReturnSlot);
748 rewriteInputConstraintReferences(NumOutputs, 1, AsmString);
751 /// shouldReturnTypeInRegister - Determine if the given type should be
752 /// passed in a register (for the Darwin ABI).
753 bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
754 ASTContext &Context) const {
755 uint64_t Size = Context.getTypeSize(Ty);
757 // Type must be register sized.
758 if (!isRegisterSize(Size))
761 if (Ty->isVectorType()) {
762 // 64- and 128- bit vectors inside structures are not returned in
764 if (Size == 64 || Size == 128)
770 // If this is a builtin, pointer, enum, complex type, member pointer, or
771 // member function pointer it is ok.
772 if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() ||
773 Ty->isAnyComplexType() || Ty->isEnumeralType() ||
774 Ty->isBlockPointerType() || Ty->isMemberPointerType())
777 // Arrays are treated like records.
778 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
779 return shouldReturnTypeInRegister(AT->getElementType(), Context);
781 // Otherwise, it must be a record type.
782 const RecordType *RT = Ty->getAs<RecordType>();
783 if (!RT) return false;
785 // FIXME: Traverse bases here too.
787 // Structure types are passed in register if all fields would be
788 // passed in a register.
789 for (const auto *FD : RT->getDecl()->fields()) {
790 // Empty fields are ignored.
791 if (isEmptyField(Context, FD, true))
794 // Check fields recursively.
795 if (!shouldReturnTypeInRegister(FD->getType(), Context))
801 ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(CCState &State) const {
802 // If the return value is indirect, then the hidden argument is consuming one
804 if (State.FreeRegs) {
806 return ABIArgInfo::getIndirectInReg(/*Align=*/0, /*ByVal=*/false);
808 return ABIArgInfo::getIndirect(/*Align=*/0, /*ByVal=*/false);
811 ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy, CCState &State) const {
812 if (RetTy->isVoidType())
813 return ABIArgInfo::getIgnore();
815 const Type *Base = nullptr;
816 uint64_t NumElts = 0;
817 if (State.CC == llvm::CallingConv::X86_VectorCall &&
818 isHomogeneousAggregate(RetTy, Base, NumElts)) {
819 // The LLVM struct type for such an aggregate should lower properly.
820 return ABIArgInfo::getDirect();
823 if (const VectorType *VT = RetTy->getAs<VectorType>()) {
824 // On Darwin, some vectors are returned in registers.
825 if (IsDarwinVectorABI) {
826 uint64_t Size = getContext().getTypeSize(RetTy);
828 // 128-bit vectors are a special case; they are returned in
829 // registers and we need to make sure to pick a type the LLVM
830 // backend will like.
832 return ABIArgInfo::getDirect(llvm::VectorType::get(
833 llvm::Type::getInt64Ty(getVMContext()), 2));
835 // Always return in register if it fits in a general purpose
836 // register, or if it is 64 bits and has a single element.
837 if ((Size == 8 || Size == 16 || Size == 32) ||
838 (Size == 64 && VT->getNumElements() == 1))
839 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
842 return getIndirectReturnResult(State);
845 return ABIArgInfo::getDirect();
848 if (isAggregateTypeForABI(RetTy)) {
849 if (const RecordType *RT = RetTy->getAs<RecordType>()) {
850 // Structures with flexible arrays are always indirect.
851 if (RT->getDecl()->hasFlexibleArrayMember())
852 return getIndirectReturnResult(State);
855 // If specified, structs and unions are always indirect.
856 if (!IsSmallStructInRegABI && !RetTy->isAnyComplexType())
857 return getIndirectReturnResult(State);
859 // Small structures which are register sized are generally returned
861 if (shouldReturnTypeInRegister(RetTy, getContext())) {
862 uint64_t Size = getContext().getTypeSize(RetTy);
864 // As a special-case, if the struct is a "single-element" struct, and
865 // the field is of type "float" or "double", return it in a
866 // floating-point register. (MSVC does not apply this special case.)
867 // We apply a similar transformation for pointer types to improve the
868 // quality of the generated IR.
869 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
870 if ((!IsWin32StructABI && SeltTy->isRealFloatingType())
871 || SeltTy->hasPointerRepresentation())
872 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
874 // FIXME: We should be able to narrow this integer in cases with dead
876 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size));
879 return getIndirectReturnResult(State);
882 // Treat an enum type as its underlying type.
883 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
884 RetTy = EnumTy->getDecl()->getIntegerType();
886 return (RetTy->isPromotableIntegerType() ?
887 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
890 static bool isSSEVectorType(ASTContext &Context, QualType Ty) {
891 return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128;
894 static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) {
895 const RecordType *RT = Ty->getAs<RecordType>();
898 const RecordDecl *RD = RT->getDecl();
900 // If this is a C++ record, check the bases first.
901 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
902 for (const auto &I : CXXRD->bases())
903 if (!isRecordWithSSEVectorType(Context, I.getType()))
906 for (const auto *i : RD->fields()) {
907 QualType FT = i->getType();
909 if (isSSEVectorType(Context, FT))
912 if (isRecordWithSSEVectorType(Context, FT))
919 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty,
920 unsigned Align) const {
921 // Otherwise, if the alignment is less than or equal to the minimum ABI
922 // alignment, just use the default; the backend will handle this.
923 if (Align <= MinABIStackAlignInBytes)
924 return 0; // Use default alignment.
926 // On non-Darwin, the stack type alignment is always 4.
927 if (!IsDarwinVectorABI) {
928 // Set explicit alignment, since we may need to realign the top.
929 return MinABIStackAlignInBytes;
932 // Otherwise, if the type contains an SSE vector type, the alignment is 16.
933 if (Align >= 16 && (isSSEVectorType(getContext(), Ty) ||
934 isRecordWithSSEVectorType(getContext(), Ty)))
937 return MinABIStackAlignInBytes;
940 ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal,
941 CCState &State) const {
943 if (State.FreeRegs) {
944 --State.FreeRegs; // Non-byval indirects just use one pointer.
945 return ABIArgInfo::getIndirectInReg(0, false);
947 return ABIArgInfo::getIndirect(0, false);
950 // Compute the byval alignment.
951 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
952 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
954 return ABIArgInfo::getIndirect(4, /*ByVal=*/true);
956 // If the stack alignment is less than the type alignment, realign the
958 bool Realign = TypeAlign > StackAlign;
959 return ABIArgInfo::getIndirect(StackAlign, /*ByVal=*/true, Realign);
962 X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const {
963 const Type *T = isSingleElementStruct(Ty, getContext());
967 if (const BuiltinType *BT = T->getAs<BuiltinType>()) {
968 BuiltinType::Kind K = BT->getKind();
969 if (K == BuiltinType::Float || K == BuiltinType::Double)
975 bool X86_32ABIInfo::shouldUseInReg(QualType Ty, CCState &State,
976 bool &NeedsPadding) const {
977 NeedsPadding = false;
978 Class C = classify(Ty);
982 unsigned Size = getContext().getTypeSize(Ty);
983 unsigned SizeInRegs = (Size + 31) / 32;
988 if (SizeInRegs > State.FreeRegs) {
993 State.FreeRegs -= SizeInRegs;
995 if (State.CC == llvm::CallingConv::X86_FastCall ||
996 State.CC == llvm::CallingConv::X86_VectorCall) {
1000 if (Ty->isIntegralOrEnumerationType())
1003 if (Ty->isPointerType())
1006 if (Ty->isReferenceType())
1010 NeedsPadding = true;
1018 ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
1019 CCState &State) const {
1020 // FIXME: Set alignment on indirect arguments.
1022 Ty = useFirstFieldIfTransparentUnion(Ty);
1024 // Check with the C++ ABI first.
1025 const RecordType *RT = Ty->getAs<RecordType>();
1027 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
1028 if (RAA == CGCXXABI::RAA_Indirect) {
1029 return getIndirectResult(Ty, false, State);
1030 } else if (RAA == CGCXXABI::RAA_DirectInMemory) {
1031 // The field index doesn't matter, we'll fix it up later.
1032 return ABIArgInfo::getInAlloca(/*FieldIndex=*/0);
1036 // vectorcall adds the concept of a homogenous vector aggregate, similar
1037 // to other targets.
1038 const Type *Base = nullptr;
1039 uint64_t NumElts = 0;
1040 if (State.CC == llvm::CallingConv::X86_VectorCall &&
1041 isHomogeneousAggregate(Ty, Base, NumElts)) {
1042 if (State.FreeSSERegs >= NumElts) {
1043 State.FreeSSERegs -= NumElts;
1044 if (Ty->isBuiltinType() || Ty->isVectorType())
1045 return ABIArgInfo::getDirect();
1046 return ABIArgInfo::getExpand();
1048 return getIndirectResult(Ty, /*ByVal=*/false, State);
1051 if (isAggregateTypeForABI(Ty)) {
1053 // Structs are always byval on win32, regardless of what they contain.
1054 if (IsWin32StructABI)
1055 return getIndirectResult(Ty, true, State);
1057 // Structures with flexible arrays are always indirect.
1058 if (RT->getDecl()->hasFlexibleArrayMember())
1059 return getIndirectResult(Ty, true, State);
1062 // Ignore empty structs/unions.
1063 if (isEmptyRecord(getContext(), Ty, true))
1064 return ABIArgInfo::getIgnore();
1066 llvm::LLVMContext &LLVMContext = getVMContext();
1067 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
1069 if (shouldUseInReg(Ty, State, NeedsPadding)) {
1070 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
1071 SmallVector<llvm::Type*, 3> Elements(SizeInRegs, Int32);
1072 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
1073 return ABIArgInfo::getDirectInReg(Result);
1075 llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : nullptr;
1077 // Expand small (<= 128-bit) record types when we know that the stack layout
1078 // of those arguments will match the struct. This is important because the
1079 // LLVM backend isn't smart enough to remove byval, which inhibits many
1081 if (getContext().getTypeSize(Ty) <= 4*32 &&
1082 canExpandIndirectArgument(Ty, getContext()))
1083 return ABIArgInfo::getExpandWithPadding(
1084 State.CC == llvm::CallingConv::X86_FastCall ||
1085 State.CC == llvm::CallingConv::X86_VectorCall,
1088 return getIndirectResult(Ty, true, State);
1091 if (const VectorType *VT = Ty->getAs<VectorType>()) {
1092 // On Darwin, some vectors are passed in memory, we handle this by passing
1093 // it as an i8/i16/i32/i64.
1094 if (IsDarwinVectorABI) {
1095 uint64_t Size = getContext().getTypeSize(Ty);
1096 if ((Size == 8 || Size == 16 || Size == 32) ||
1097 (Size == 64 && VT->getNumElements() == 1))
1098 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
1102 if (IsX86_MMXType(CGT.ConvertType(Ty)))
1103 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 64));
1105 return ABIArgInfo::getDirect();
1109 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
1110 Ty = EnumTy->getDecl()->getIntegerType();
1113 bool InReg = shouldUseInReg(Ty, State, NeedsPadding);
1115 if (Ty->isPromotableIntegerType()) {
1117 return ABIArgInfo::getExtendInReg();
1118 return ABIArgInfo::getExtend();
1121 return ABIArgInfo::getDirectInReg();
1122 return ABIArgInfo::getDirect();
1125 void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
1126 CCState State(FI.getCallingConvention());
1127 if (State.CC == llvm::CallingConv::X86_FastCall)
1129 else if (State.CC == llvm::CallingConv::X86_VectorCall) {
1131 State.FreeSSERegs = 6;
1132 } else if (FI.getHasRegParm())
1133 State.FreeRegs = FI.getRegParm();
1135 State.FreeRegs = DefaultNumRegisterParameters;
1137 if (!getCXXABI().classifyReturnType(FI)) {
1138 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), State);
1139 } else if (FI.getReturnInfo().isIndirect()) {
1140 // The C++ ABI is not aware of register usage, so we have to check if the
1141 // return value was sret and put it in a register ourselves if appropriate.
1142 if (State.FreeRegs) {
1143 --State.FreeRegs; // The sret parameter consumes a register.
1144 FI.getReturnInfo().setInReg(true);
1148 // The chain argument effectively gives us another free register.
1149 if (FI.isChainCall())
1152 bool UsedInAlloca = false;
1153 for (auto &I : FI.arguments()) {
1154 I.info = classifyArgumentType(I.type, State);
1155 UsedInAlloca |= (I.info.getKind() == ABIArgInfo::InAlloca);
1158 // If we needed to use inalloca for any argument, do a second pass and rewrite
1159 // all the memory arguments to use inalloca.
1161 rewriteWithInAlloca(FI);
1165 X86_32ABIInfo::addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
1166 unsigned &StackOffset,
1167 ABIArgInfo &Info, QualType Type) const {
1168 assert(StackOffset % 4U == 0 && "unaligned inalloca struct");
1169 Info = ABIArgInfo::getInAlloca(FrameFields.size());
1170 FrameFields.push_back(CGT.ConvertTypeForMem(Type));
1171 StackOffset += getContext().getTypeSizeInChars(Type).getQuantity();
1173 // Insert padding bytes to respect alignment. For x86_32, each argument is 4
1175 if (StackOffset % 4U) {
1176 unsigned OldOffset = StackOffset;
1177 StackOffset = llvm::RoundUpToAlignment(StackOffset, 4U);
1178 unsigned NumBytes = StackOffset - OldOffset;
1180 llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext());
1181 Ty = llvm::ArrayType::get(Ty, NumBytes);
1182 FrameFields.push_back(Ty);
1186 static bool isArgInAlloca(const ABIArgInfo &Info) {
1187 // Leave ignored and inreg arguments alone.
1188 switch (Info.getKind()) {
1189 case ABIArgInfo::InAlloca:
1191 case ABIArgInfo::Indirect:
1192 assert(Info.getIndirectByVal());
1194 case ABIArgInfo::Ignore:
1196 case ABIArgInfo::Direct:
1197 case ABIArgInfo::Extend:
1198 case ABIArgInfo::Expand:
1199 if (Info.getInReg())
1203 llvm_unreachable("invalid enum");
1206 void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const {
1207 assert(IsWin32StructABI && "inalloca only supported on win32");
1209 // Build a packed struct type for all of the arguments in memory.
1210 SmallVector<llvm::Type *, 6> FrameFields;
1212 unsigned StackOffset = 0;
1213 CGFunctionInfo::arg_iterator I = FI.arg_begin(), E = FI.arg_end();
1215 // Put 'this' into the struct before 'sret', if necessary.
1217 FI.getCallingConvention() == llvm::CallingConv::X86_ThisCall;
1218 ABIArgInfo &Ret = FI.getReturnInfo();
1219 if (Ret.isIndirect() && Ret.isSRetAfterThis() && !IsThisCall &&
1220 isArgInAlloca(I->info)) {
1221 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
1225 // Put the sret parameter into the inalloca struct if it's in memory.
1226 if (Ret.isIndirect() && !Ret.getInReg()) {
1227 CanQualType PtrTy = getContext().getPointerType(FI.getReturnType());
1228 addFieldToArgStruct(FrameFields, StackOffset, Ret, PtrTy);
1229 // On Windows, the hidden sret parameter is always returned in eax.
1230 Ret.setInAllocaSRet(IsWin32StructABI);
1233 // Skip the 'this' parameter in ecx.
1237 // Put arguments passed in memory into the struct.
1238 for (; I != E; ++I) {
1239 if (isArgInAlloca(I->info))
1240 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
1243 FI.setArgStruct(llvm::StructType::get(getVMContext(), FrameFields,
1244 /*isPacked=*/true));
1247 llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1248 CodeGenFunction &CGF) const {
1249 llvm::Type *BPP = CGF.Int8PtrPtrTy;
1251 CGBuilderTy &Builder = CGF.Builder;
1252 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
1254 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
1256 // Compute if the address needs to be aligned
1257 unsigned Align = CGF.getContext().getTypeAlignInChars(Ty).getQuantity();
1258 Align = getTypeStackAlignInBytes(Ty, Align);
1259 Align = std::max(Align, 4U);
1261 // addr = (addr + align - 1) & -align;
1262 llvm::Value *Offset =
1263 llvm::ConstantInt::get(CGF.Int32Ty, Align - 1);
1264 Addr = CGF.Builder.CreateGEP(Addr, Offset);
1265 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(Addr,
1267 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int32Ty, -Align);
1268 Addr = CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
1274 llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
1275 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
1278 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, Align);
1279 llvm::Value *NextAddr =
1280 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
1282 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
1287 bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
1288 const llvm::Triple &Triple, const CodeGenOptions &Opts) {
1289 assert(Triple.getArch() == llvm::Triple::x86);
1291 switch (Opts.getStructReturnConvention()) {
1292 case CodeGenOptions::SRCK_Default:
1294 case CodeGenOptions::SRCK_OnStack: // -fpcc-struct-return
1296 case CodeGenOptions::SRCK_InRegs: // -freg-struct-return
1300 if (Triple.isOSDarwin())
1303 switch (Triple.getOS()) {
1304 case llvm::Triple::DragonFly:
1305 case llvm::Triple::FreeBSD:
1306 case llvm::Triple::OpenBSD:
1307 case llvm::Triple::Bitrig:
1308 case llvm::Triple::Win32:
1315 void X86_32TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
1316 llvm::GlobalValue *GV,
1317 CodeGen::CodeGenModule &CGM) const {
1318 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
1319 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1320 // Get the LLVM function.
1321 llvm::Function *Fn = cast<llvm::Function>(GV);
1323 // Now add the 'alignstack' attribute with a value of 16.
1324 llvm::AttrBuilder B;
1325 B.addStackAlignmentAttr(16);
1326 Fn->addAttributes(llvm::AttributeSet::FunctionIndex,
1327 llvm::AttributeSet::get(CGM.getLLVMContext(),
1328 llvm::AttributeSet::FunctionIndex,
1334 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
1335 CodeGen::CodeGenFunction &CGF,
1336 llvm::Value *Address) const {
1337 CodeGen::CGBuilderTy &Builder = CGF.Builder;
1339 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
1341 // 0-7 are the eight integer registers; the order is different
1342 // on Darwin (for EH), but the range is the same.
1344 AssignToArrayRange(Builder, Address, Four8, 0, 8);
1346 if (CGF.CGM.getTarget().getTriple().isOSDarwin()) {
1347 // 12-16 are st(0..4). Not sure why we stop at 4.
1348 // These have size 16, which is sizeof(long double) on
1349 // platforms with 8-byte alignment for that type.
1350 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16);
1351 AssignToArrayRange(Builder, Address, Sixteen8, 12, 16);
1354 // 9 is %eflags, which doesn't get a size on Darwin for some
1356 Builder.CreateStore(Four8, Builder.CreateConstInBoundsGEP1_32(Address, 9));
1358 // 11-16 are st(0..5). Not sure why we stop at 5.
1359 // These have size 12, which is sizeof(long double) on
1360 // platforms with 4-byte alignment for that type.
1361 llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12);
1362 AssignToArrayRange(Builder, Address, Twelve8, 11, 16);
1368 //===----------------------------------------------------------------------===//
1369 // X86-64 ABI Implementation
1370 //===----------------------------------------------------------------------===//
1374 /// X86_64ABIInfo - The X86_64 ABI information.
1375 class X86_64ABIInfo : public ABIInfo {
1387 /// merge - Implement the X86_64 ABI merging algorithm.
1389 /// Merge an accumulating classification \arg Accum with a field
1390 /// classification \arg Field.
1392 /// \param Accum - The accumulating classification. This should
1393 /// always be either NoClass or the result of a previous merge
1394 /// call. In addition, this should never be Memory (the caller
1395 /// should just return Memory for the aggregate).
1396 static Class merge(Class Accum, Class Field);
1398 /// postMerge - Implement the X86_64 ABI post merging algorithm.
1400 /// Post merger cleanup, reduces a malformed Hi and Lo pair to
1401 /// final MEMORY or SSE classes when necessary.
1403 /// \param AggregateSize - The size of the current aggregate in
1404 /// the classification process.
1406 /// \param Lo - The classification for the parts of the type
1407 /// residing in the low word of the containing object.
1409 /// \param Hi - The classification for the parts of the type
1410 /// residing in the higher words of the containing object.
1412 void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const;
1414 /// classify - Determine the x86_64 register classes in which the
1415 /// given type T should be passed.
1417 /// \param Lo - The classification for the parts of the type
1418 /// residing in the low word of the containing object.
1420 /// \param Hi - The classification for the parts of the type
1421 /// residing in the high word of the containing object.
1423 /// \param OffsetBase - The bit offset of this type in the
1424 /// containing object. Some parameters are classified different
1425 /// depending on whether they straddle an eightbyte boundary.
1427 /// \param isNamedArg - Whether the argument in question is a "named"
1428 /// argument, as used in AMD64-ABI 3.5.7.
1430 /// If a word is unused its result will be NoClass; if a type should
1431 /// be passed in Memory then at least the classification of \arg Lo
1434 /// The \arg Lo class will be NoClass iff the argument is ignored.
1436 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
1437 /// also be ComplexX87.
1438 void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi,
1439 bool isNamedArg) const;
1441 llvm::Type *GetByteVectorType(QualType Ty) const;
1442 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType,
1443 unsigned IROffset, QualType SourceTy,
1444 unsigned SourceOffset) const;
1445 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType,
1446 unsigned IROffset, QualType SourceTy,
1447 unsigned SourceOffset) const;
1449 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
1450 /// such that the argument will be returned in memory.
1451 ABIArgInfo getIndirectReturnResult(QualType Ty) const;
1453 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
1454 /// such that the argument will be passed in memory.
1456 /// \param freeIntRegs - The number of free integer registers remaining
1458 ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const;
1460 ABIArgInfo classifyReturnType(QualType RetTy) const;
1462 ABIArgInfo classifyArgumentType(QualType Ty,
1463 unsigned freeIntRegs,
1464 unsigned &neededInt,
1465 unsigned &neededSSE,
1466 bool isNamedArg) const;
1468 bool IsIllegalVectorType(QualType Ty) const;
1470 /// The 0.98 ABI revision clarified a lot of ambiguities,
1471 /// unfortunately in ways that were not always consistent with
1472 /// certain previous compilers. In particular, platforms which
1473 /// required strict binary compatibility with older versions of GCC
1474 /// may need to exempt themselves.
1475 bool honorsRevision0_98() const {
1476 return !getTarget().getTriple().isOSDarwin();
1480 // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on
1482 bool Has64BitPointers;
1485 X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool hasavx) :
1486 ABIInfo(CGT), HasAVX(hasavx),
1487 Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) {
1490 bool isPassedUsingAVXType(QualType type) const {
1491 unsigned neededInt, neededSSE;
1492 // The freeIntRegs argument doesn't matter here.
1493 ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE,
1494 /*isNamedArg*/true);
1495 if (info.isDirect()) {
1496 llvm::Type *ty = info.getCoerceToType();
1497 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
1498 return (vectorTy->getBitWidth() > 128);
1503 void computeInfo(CGFunctionInfo &FI) const override;
1505 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1506 CodeGenFunction &CGF) const override;
1509 /// WinX86_64ABIInfo - The Windows X86_64 ABI information.
1510 class WinX86_64ABIInfo : public ABIInfo {
1512 ABIArgInfo classify(QualType Ty, unsigned &FreeSSERegs,
1513 bool IsReturnType) const;
1516 WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
1518 void computeInfo(CGFunctionInfo &FI) const override;
1520 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1521 CodeGenFunction &CGF) const override;
1523 bool isHomogeneousAggregateBaseType(QualType Ty) const override {
1524 // FIXME: Assumes vectorcall is in use.
1525 return isX86VectorTypeForVectorCall(getContext(), Ty);
1528 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
1529 uint64_t NumMembers) const override {
1530 // FIXME: Assumes vectorcall is in use.
1531 return isX86VectorCallAggregateSmallEnough(NumMembers);
1535 class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
1538 X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX)
1539 : TargetCodeGenInfo(new X86_64ABIInfo(CGT, HasAVX)), HasAVX(HasAVX) {}
1541 const X86_64ABIInfo &getABIInfo() const {
1542 return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo());
1545 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
1549 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
1550 llvm::Value *Address) const override {
1551 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
1553 // 0-15 are the 16 integer registers.
1555 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
1559 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
1560 StringRef Constraint,
1561 llvm::Type* Ty) const override {
1562 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
1565 bool isNoProtoCallVariadic(const CallArgList &args,
1566 const FunctionNoProtoType *fnType) const override {
1567 // The default CC on x86-64 sets %al to the number of SSA
1568 // registers used, and GCC sets this when calling an unprototyped
1569 // function, so we override the default behavior. However, don't do
1570 // that when AVX types are involved: the ABI explicitly states it is
1571 // undefined, and it doesn't work in practice because of how the ABI
1572 // defines varargs anyway.
1573 if (fnType->getCallConv() == CC_C) {
1574 bool HasAVXType = false;
1575 for (CallArgList::const_iterator
1576 it = args.begin(), ie = args.end(); it != ie; ++it) {
1577 if (getABIInfo().isPassedUsingAVXType(it->Ty)) {
1587 return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType);
1591 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
1592 unsigned Sig = (0xeb << 0) | // jmp rel8
1593 (0x0a << 8) | // .+0x0c
1596 return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
1599 unsigned getOpenMPSimdDefaultAlignment(QualType) const override {
1600 return HasAVX ? 32 : 16;
1604 static std::string qualifyWindowsLibrary(llvm::StringRef Lib) {
1605 // If the argument does not end in .lib, automatically add the suffix. This
1606 // matches the behavior of MSVC.
1607 std::string ArgStr = Lib;
1608 if (!Lib.endswith_lower(".lib"))
1613 class WinX86_32TargetCodeGenInfo : public X86_32TargetCodeGenInfo {
1615 WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
1616 bool d, bool p, bool w, unsigned RegParms)
1617 : X86_32TargetCodeGenInfo(CGT, d, p, w, RegParms) {}
1619 void getDependentLibraryOption(llvm::StringRef Lib,
1620 llvm::SmallString<24> &Opt) const override {
1621 Opt = "/DEFAULTLIB:";
1622 Opt += qualifyWindowsLibrary(Lib);
1625 void getDetectMismatchOption(llvm::StringRef Name,
1626 llvm::StringRef Value,
1627 llvm::SmallString<32> &Opt) const override {
1628 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
1632 class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
1635 WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX)
1636 : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)), HasAVX(HasAVX) {}
1638 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
1642 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
1643 llvm::Value *Address) const override {
1644 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
1646 // 0-15 are the 16 integer registers.
1648 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
1652 void getDependentLibraryOption(llvm::StringRef Lib,
1653 llvm::SmallString<24> &Opt) const override {
1654 Opt = "/DEFAULTLIB:";
1655 Opt += qualifyWindowsLibrary(Lib);
1658 void getDetectMismatchOption(llvm::StringRef Name,
1659 llvm::StringRef Value,
1660 llvm::SmallString<32> &Opt) const override {
1661 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
1664 unsigned getOpenMPSimdDefaultAlignment(QualType) const override {
1665 return HasAVX ? 32 : 16;
1671 void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo,
1673 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
1675 // (a) If one of the classes is Memory, the whole argument is passed in
1678 // (b) If X87UP is not preceded by X87, the whole argument is passed in
1681 // (c) If the size of the aggregate exceeds two eightbytes and the first
1682 // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole
1683 // argument is passed in memory. NOTE: This is necessary to keep the
1684 // ABI working for processors that don't support the __m256 type.
1686 // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE.
1688 // Some of these are enforced by the merging logic. Others can arise
1689 // only with unions; for example:
1690 // union { _Complex double; unsigned; }
1692 // Note that clauses (b) and (c) were added in 0.98.
1696 if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
1698 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
1700 if (Hi == SSEUp && Lo != SSE)
1704 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
1705 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
1706 // classified recursively so that always two fields are
1707 // considered. The resulting class is calculated according to
1708 // the classes of the fields in the eightbyte:
1710 // (a) If both classes are equal, this is the resulting class.
1712 // (b) If one of the classes is NO_CLASS, the resulting class is
1715 // (c) If one of the classes is MEMORY, the result is the MEMORY
1718 // (d) If one of the classes is INTEGER, the result is the
1721 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
1722 // MEMORY is used as class.
1724 // (f) Otherwise class SSE is used.
1726 // Accum should never be memory (we should have returned) or
1727 // ComplexX87 (because this cannot be passed in a structure).
1728 assert((Accum != Memory && Accum != ComplexX87) &&
1729 "Invalid accumulated classification during merge.");
1730 if (Accum == Field || Field == NoClass)
1732 if (Field == Memory)
1734 if (Accum == NoClass)
1736 if (Accum == Integer || Field == Integer)
1738 if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
1739 Accum == X87 || Accum == X87Up)
1744 void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
1745 Class &Lo, Class &Hi, bool isNamedArg) const {
1746 // FIXME: This code can be simplified by introducing a simple value class for
1747 // Class pairs with appropriate constructor methods for the various
1750 // FIXME: Some of the split computations are wrong; unaligned vectors
1751 // shouldn't be passed in registers for example, so there is no chance they
1752 // can straddle an eightbyte. Verify & simplify.
1756 Class &Current = OffsetBase < 64 ? Lo : Hi;
1759 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
1760 BuiltinType::Kind k = BT->getKind();
1762 if (k == BuiltinType::Void) {
1764 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
1767 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
1769 } else if ((k == BuiltinType::Float || k == BuiltinType::Double) ||
1770 (k == BuiltinType::LongDouble &&
1771 getTarget().getTriple().isOSNaCl())) {
1773 } else if (k == BuiltinType::LongDouble) {
1777 // FIXME: _Decimal32 and _Decimal64 are SSE.
1778 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
1782 if (const EnumType *ET = Ty->getAs<EnumType>()) {
1783 // Classify the underlying integer type.
1784 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
1788 if (Ty->hasPointerRepresentation()) {
1793 if (Ty->isMemberPointerType()) {
1794 if (Ty->isMemberFunctionPointerType()) {
1795 if (Has64BitPointers) {
1796 // If Has64BitPointers, this is an {i64, i64}, so classify both
1800 // Otherwise, with 32-bit pointers, this is an {i32, i32}. If that
1801 // straddles an eightbyte boundary, Hi should be classified as well.
1802 uint64_t EB_FuncPtr = (OffsetBase) / 64;
1803 uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64;
1804 if (EB_FuncPtr != EB_ThisAdj) {
1816 if (const VectorType *VT = Ty->getAs<VectorType>()) {
1817 uint64_t Size = getContext().getTypeSize(VT);
1819 // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x
1820 // float> as integer.
1823 // If this type crosses an eightbyte boundary, it should be
1825 uint64_t EB_Real = (OffsetBase) / 64;
1826 uint64_t EB_Imag = (OffsetBase + Size - 1) / 64;
1827 if (EB_Real != EB_Imag)
1829 } else if (Size == 64) {
1830 // gcc passes <1 x double> in memory. :(
1831 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double))
1834 // gcc passes <1 x long long> as INTEGER.
1835 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong) ||
1836 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULongLong) ||
1837 VT->getElementType()->isSpecificBuiltinType(BuiltinType::Long) ||
1838 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULong))
1843 // If this type crosses an eightbyte boundary, it should be
1845 if (OffsetBase && OffsetBase != 64)
1847 } else if (Size == 128 || (HasAVX && isNamedArg && Size == 256)) {
1848 // Arguments of 256-bits are split into four eightbyte chunks. The
1849 // least significant one belongs to class SSE and all the others to class
1850 // SSEUP. The original Lo and Hi design considers that types can't be
1851 // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense.
1852 // This design isn't correct for 256-bits, but since there're no cases
1853 // where the upper parts would need to be inspected, avoid adding
1854 // complexity and just consider Hi to match the 64-256 part.
1856 // Note that per 3.5.7 of AMD64-ABI, 256-bit args are only passed in
1857 // registers if they are "named", i.e. not part of the "..." of a
1858 // variadic function.
1865 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
1866 QualType ET = getContext().getCanonicalType(CT->getElementType());
1868 uint64_t Size = getContext().getTypeSize(Ty);
1869 if (ET->isIntegralOrEnumerationType()) {
1872 else if (Size <= 128)
1874 } else if (ET == getContext().FloatTy)
1876 else if (ET == getContext().DoubleTy ||
1877 (ET == getContext().LongDoubleTy &&
1878 getTarget().getTriple().isOSNaCl()))
1880 else if (ET == getContext().LongDoubleTy)
1881 Current = ComplexX87;
1883 // If this complex type crosses an eightbyte boundary then it
1885 uint64_t EB_Real = (OffsetBase) / 64;
1886 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64;
1887 if (Hi == NoClass && EB_Real != EB_Imag)
1893 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
1894 // Arrays are treated like structures.
1896 uint64_t Size = getContext().getTypeSize(Ty);
1898 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
1899 // than four eightbytes, ..., it has class MEMORY.
1903 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
1904 // fields, it has class MEMORY.
1906 // Only need to check alignment of array base.
1907 if (OffsetBase % getContext().getTypeAlign(AT->getElementType()))
1910 // Otherwise implement simplified merge. We could be smarter about
1911 // this, but it isn't worth it and would be harder to verify.
1913 uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
1914 uint64_t ArraySize = AT->getSize().getZExtValue();
1916 // The only case a 256-bit wide vector could be used is when the array
1917 // contains a single 256-bit element. Since Lo and Hi logic isn't extended
1918 // to work for sizes wider than 128, early check and fallback to memory.
1919 if (Size > 128 && EltSize != 256)
1922 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
1923 Class FieldLo, FieldHi;
1924 classify(AT->getElementType(), Offset, FieldLo, FieldHi, isNamedArg);
1925 Lo = merge(Lo, FieldLo);
1926 Hi = merge(Hi, FieldHi);
1927 if (Lo == Memory || Hi == Memory)
1931 postMerge(Size, Lo, Hi);
1932 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
1936 if (const RecordType *RT = Ty->getAs<RecordType>()) {
1937 uint64_t Size = getContext().getTypeSize(Ty);
1939 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
1940 // than four eightbytes, ..., it has class MEMORY.
1944 // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial
1945 // copy constructor or a non-trivial destructor, it is passed by invisible
1947 if (getRecordArgABI(RT, getCXXABI()))
1950 const RecordDecl *RD = RT->getDecl();
1952 // Assume variable sized types are passed in memory.
1953 if (RD->hasFlexibleArrayMember())
1956 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
1958 // Reset Lo class, this will be recomputed.
1961 // If this is a C++ record, classify the bases first.
1962 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
1963 for (const auto &I : CXXRD->bases()) {
1964 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
1965 "Unexpected base class!");
1966 const CXXRecordDecl *Base =
1967 cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
1969 // Classify this field.
1971 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a
1972 // single eightbyte, each is classified separately. Each eightbyte gets
1973 // initialized to class NO_CLASS.
1974 Class FieldLo, FieldHi;
1976 OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base));
1977 classify(I.getType(), Offset, FieldLo, FieldHi, isNamedArg);
1978 Lo = merge(Lo, FieldLo);
1979 Hi = merge(Hi, FieldHi);
1980 if (Lo == Memory || Hi == Memory)
1985 // Classify the fields one at a time, merging the results.
1987 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
1988 i != e; ++i, ++idx) {
1989 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
1990 bool BitField = i->isBitField();
1992 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than
1993 // four eightbytes, or it contains unaligned fields, it has class MEMORY.
1995 // The only case a 256-bit wide vector could be used is when the struct
1996 // contains a single 256-bit element. Since Lo and Hi logic isn't extended
1997 // to work for sizes wider than 128, early check and fallback to memory.
1999 if (Size > 128 && getContext().getTypeSize(i->getType()) != 256) {
2003 // Note, skip this test for bit-fields, see below.
2004 if (!BitField && Offset % getContext().getTypeAlign(i->getType())) {
2009 // Classify this field.
2011 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
2012 // exceeds a single eightbyte, each is classified
2013 // separately. Each eightbyte gets initialized to class
2015 Class FieldLo, FieldHi;
2017 // Bit-fields require special handling, they do not force the
2018 // structure to be passed in memory even if unaligned, and
2019 // therefore they can straddle an eightbyte.
2021 // Ignore padding bit-fields.
2022 if (i->isUnnamedBitfield())
2025 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
2026 uint64_t Size = i->getBitWidthValue(getContext());
2028 uint64_t EB_Lo = Offset / 64;
2029 uint64_t EB_Hi = (Offset + Size - 1) / 64;
2032 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.");
2037 FieldHi = EB_Hi ? Integer : NoClass;
2040 classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg);
2041 Lo = merge(Lo, FieldLo);
2042 Hi = merge(Hi, FieldHi);
2043 if (Lo == Memory || Hi == Memory)
2047 postMerge(Size, Lo, Hi);
2051 ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
2052 // If this is a scalar LLVM value then assume LLVM will pass it in the right
2054 if (!isAggregateTypeForABI(Ty)) {
2055 // Treat an enum type as its underlying type.
2056 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2057 Ty = EnumTy->getDecl()->getIntegerType();
2059 return (Ty->isPromotableIntegerType() ?
2060 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
2063 return ABIArgInfo::getIndirect(0);
2066 bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const {
2067 if (const VectorType *VecTy = Ty->getAs<VectorType>()) {
2068 uint64_t Size = getContext().getTypeSize(VecTy);
2069 unsigned LargestVector = HasAVX ? 256 : 128;
2070 if (Size <= 64 || Size > LargestVector)
2077 ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
2078 unsigned freeIntRegs) const {
2079 // If this is a scalar LLVM value then assume LLVM will pass it in the right
2082 // This assumption is optimistic, as there could be free registers available
2083 // when we need to pass this argument in memory, and LLVM could try to pass
2084 // the argument in the free register. This does not seem to happen currently,
2085 // but this code would be much safer if we could mark the argument with
2086 // 'onstack'. See PR12193.
2087 if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty)) {
2088 // Treat an enum type as its underlying type.
2089 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2090 Ty = EnumTy->getDecl()->getIntegerType();
2092 return (Ty->isPromotableIntegerType() ?
2093 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
2096 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
2097 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
2099 // Compute the byval alignment. We specify the alignment of the byval in all
2100 // cases so that the mid-level optimizer knows the alignment of the byval.
2101 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U);
2103 // Attempt to avoid passing indirect results using byval when possible. This
2104 // is important for good codegen.
2106 // We do this by coercing the value into a scalar type which the backend can
2107 // handle naturally (i.e., without using byval).
2109 // For simplicity, we currently only do this when we have exhausted all of the
2110 // free integer registers. Doing this when there are free integer registers
2111 // would require more care, as we would have to ensure that the coerced value
2112 // did not claim the unused register. That would require either reording the
2113 // arguments to the function (so that any subsequent inreg values came first),
2114 // or only doing this optimization when there were no following arguments that
2117 // We currently expect it to be rare (particularly in well written code) for
2118 // arguments to be passed on the stack when there are still free integer
2119 // registers available (this would typically imply large structs being passed
2120 // by value), so this seems like a fair tradeoff for now.
2122 // We can revisit this if the backend grows support for 'onstack' parameter
2123 // attributes. See PR12193.
2124 if (freeIntRegs == 0) {
2125 uint64_t Size = getContext().getTypeSize(Ty);
2127 // If this type fits in an eightbyte, coerce it into the matching integral
2128 // type, which will end up on the stack (with alignment 8).
2129 if (Align == 8 && Size <= 64)
2130 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
2134 return ABIArgInfo::getIndirect(Align);
2137 /// The ABI specifies that a value should be passed in a full vector XMM/YMM
2138 /// register. Pick an LLVM IR type that will be passed as a vector register.
2139 llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const {
2140 // Wrapper structs/arrays that only contain vectors are passed just like
2141 // vectors; strip them off if present.
2142 if (const Type *InnerTy = isSingleElementStruct(Ty, getContext()))
2143 Ty = QualType(InnerTy, 0);
2145 llvm::Type *IRType = CGT.ConvertType(Ty);
2147 // If the preferred type is a 16-byte vector, prefer to pass it.
2148 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(IRType)){
2149 llvm::Type *EltTy = VT->getElementType();
2150 unsigned BitWidth = VT->getBitWidth();
2151 if ((BitWidth >= 128 && BitWidth <= 256) &&
2152 (EltTy->isFloatTy() || EltTy->isDoubleTy() ||
2153 EltTy->isIntegerTy(8) || EltTy->isIntegerTy(16) ||
2154 EltTy->isIntegerTy(32) || EltTy->isIntegerTy(64) ||
2155 EltTy->isIntegerTy(128)))
2159 return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()), 2);
2162 /// BitsContainNoUserData - Return true if the specified [start,end) bit range
2163 /// is known to either be off the end of the specified type or being in
2164 /// alignment padding. The user type specified is known to be at most 128 bits
2165 /// in size, and have passed through X86_64ABIInfo::classify with a successful
2166 /// classification that put one of the two halves in the INTEGER class.
2168 /// It is conservatively correct to return false.
2169 static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,
2170 unsigned EndBit, ASTContext &Context) {
2171 // If the bytes being queried are off the end of the type, there is no user
2172 // data hiding here. This handles analysis of builtins, vectors and other
2173 // types that don't contain interesting padding.
2174 unsigned TySize = (unsigned)Context.getTypeSize(Ty);
2175 if (TySize <= StartBit)
2178 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
2179 unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType());
2180 unsigned NumElts = (unsigned)AT->getSize().getZExtValue();
2182 // Check each element to see if the element overlaps with the queried range.
2183 for (unsigned i = 0; i != NumElts; ++i) {
2184 // If the element is after the span we care about, then we're done..
2185 unsigned EltOffset = i*EltSize;
2186 if (EltOffset >= EndBit) break;
2188 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
2189 if (!BitsContainNoUserData(AT->getElementType(), EltStart,
2190 EndBit-EltOffset, Context))
2193 // If it overlaps no elements, then it is safe to process as padding.
2197 if (const RecordType *RT = Ty->getAs<RecordType>()) {
2198 const RecordDecl *RD = RT->getDecl();
2199 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
2201 // If this is a C++ record, check the bases first.
2202 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2203 for (const auto &I : CXXRD->bases()) {
2204 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
2205 "Unexpected base class!");
2206 const CXXRecordDecl *Base =
2207 cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
2209 // If the base is after the span we care about, ignore it.
2210 unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base));
2211 if (BaseOffset >= EndBit) continue;
2213 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
2214 if (!BitsContainNoUserData(I.getType(), BaseStart,
2215 EndBit-BaseOffset, Context))
2220 // Verify that no field has data that overlaps the region of interest. Yes
2221 // this could be sped up a lot by being smarter about queried fields,
2222 // however we're only looking at structs up to 16 bytes, so we don't care
2225 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
2226 i != e; ++i, ++idx) {
2227 unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx);
2229 // If we found a field after the region we care about, then we're done.
2230 if (FieldOffset >= EndBit) break;
2232 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
2233 if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset,
2238 // If nothing in this record overlapped the area of interest, then we're
2246 /// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a
2247 /// float member at the specified offset. For example, {int,{float}} has a
2248 /// float at offset 4. It is conservatively correct for this routine to return
2250 static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset,
2251 const llvm::DataLayout &TD) {
2252 // Base case if we find a float.
2253 if (IROffset == 0 && IRType->isFloatTy())
2256 // If this is a struct, recurse into the field at the specified offset.
2257 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
2258 const llvm::StructLayout *SL = TD.getStructLayout(STy);
2259 unsigned Elt = SL->getElementContainingOffset(IROffset);
2260 IROffset -= SL->getElementOffset(Elt);
2261 return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD);
2264 // If this is an array, recurse into the field at the specified offset.
2265 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
2266 llvm::Type *EltTy = ATy->getElementType();
2267 unsigned EltSize = TD.getTypeAllocSize(EltTy);
2268 IROffset -= IROffset/EltSize*EltSize;
2269 return ContainsFloatAtOffset(EltTy, IROffset, TD);
2276 /// GetSSETypeAtOffset - Return a type that will be passed by the backend in the
2277 /// low 8 bytes of an XMM register, corresponding to the SSE class.
2278 llvm::Type *X86_64ABIInfo::
2279 GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset,
2280 QualType SourceTy, unsigned SourceOffset) const {
2281 // The only three choices we have are either double, <2 x float>, or float. We
2282 // pass as float if the last 4 bytes is just padding. This happens for
2283 // structs that contain 3 floats.
2284 if (BitsContainNoUserData(SourceTy, SourceOffset*8+32,
2285 SourceOffset*8+64, getContext()))
2286 return llvm::Type::getFloatTy(getVMContext());
2288 // We want to pass as <2 x float> if the LLVM IR type contains a float at
2289 // offset+0 and offset+4. Walk the LLVM IR type to find out if this is the
2291 if (ContainsFloatAtOffset(IRType, IROffset, getDataLayout()) &&
2292 ContainsFloatAtOffset(IRType, IROffset+4, getDataLayout()))
2293 return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2);
2295 return llvm::Type::getDoubleTy(getVMContext());
2299 /// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in
2300 /// an 8-byte GPR. This means that we either have a scalar or we are talking
2301 /// about the high or low part of an up-to-16-byte struct. This routine picks
2302 /// the best LLVM IR type to represent this, which may be i64 or may be anything
2303 /// else that the backend will pass in a GPR that works better (e.g. i8, %foo*,
2306 /// PrefType is an LLVM IR type that corresponds to (part of) the IR type for
2307 /// the source type. IROffset is an offset in bytes into the LLVM IR type that
2308 /// the 8-byte value references. PrefType may be null.
2310 /// SourceTy is the source-level type for the entire argument. SourceOffset is
2311 /// an offset into this that we're processing (which is always either 0 or 8).
2313 llvm::Type *X86_64ABIInfo::
2314 GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
2315 QualType SourceTy, unsigned SourceOffset) const {
2316 // If we're dealing with an un-offset LLVM IR type, then it means that we're
2317 // returning an 8-byte unit starting with it. See if we can safely use it.
2318 if (IROffset == 0) {
2319 // Pointers and int64's always fill the 8-byte unit.
2320 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) ||
2321 IRType->isIntegerTy(64))
2324 // If we have a 1/2/4-byte integer, we can use it only if the rest of the
2325 // goodness in the source type is just tail padding. This is allowed to
2326 // kick in for struct {double,int} on the int, but not on
2327 // struct{double,int,int} because we wouldn't return the second int. We
2328 // have to do this analysis on the source type because we can't depend on
2329 // unions being lowered a specific way etc.
2330 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
2331 IRType->isIntegerTy(32) ||
2332 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) {
2333 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 :
2334 cast<llvm::IntegerType>(IRType)->getBitWidth();
2336 if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth,
2337 SourceOffset*8+64, getContext()))
2342 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
2343 // If this is a struct, recurse into the field at the specified offset.
2344 const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy);
2345 if (IROffset < SL->getSizeInBytes()) {
2346 unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
2347 IROffset -= SL->getElementOffset(FieldIdx);
2349 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
2350 SourceTy, SourceOffset);
2354 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
2355 llvm::Type *EltTy = ATy->getElementType();
2356 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy);
2357 unsigned EltOffset = IROffset/EltSize*EltSize;
2358 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
2362 // Okay, we don't have any better idea of what to pass, so we pass this in an
2363 // integer register that isn't too big to fit the rest of the struct.
2364 unsigned TySizeInBytes =
2365 (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity();
2367 assert(TySizeInBytes != SourceOffset && "Empty field?");
2369 // It is always safe to classify this as an integer type up to i64 that
2370 // isn't larger than the structure.
2371 return llvm::IntegerType::get(getVMContext(),
2372 std::min(TySizeInBytes-SourceOffset, 8U)*8);
2376 /// GetX86_64ByValArgumentPair - Given a high and low type that can ideally
2377 /// be used as elements of a two register pair to pass or return, return a
2378 /// first class aggregate to represent them. For example, if the low part of
2379 /// a by-value argument should be passed as i32* and the high part as float,
2380 /// return {i32*, float}.
2382 GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi,
2383 const llvm::DataLayout &TD) {
2384 // In order to correctly satisfy the ABI, we need to the high part to start
2385 // at offset 8. If the high and low parts we inferred are both 4-byte types
2386 // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have
2387 // the second element at offset 8. Check for this:
2388 unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo);
2389 unsigned HiAlign = TD.getABITypeAlignment(Hi);
2390 unsigned HiStart = llvm::RoundUpToAlignment(LoSize, HiAlign);
2391 assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!");
2393 // To handle this, we have to increase the size of the low part so that the
2394 // second element will start at an 8 byte offset. We can't increase the size
2395 // of the second element because it might make us access off the end of the
2398 // There are only two sorts of types the ABI generation code can produce for
2399 // the low part of a pair that aren't 8 bytes in size: float or i8/i16/i32.
2400 // Promote these to a larger type.
2401 if (Lo->isFloatTy())
2402 Lo = llvm::Type::getDoubleTy(Lo->getContext());
2404 assert(Lo->isIntegerTy() && "Invalid/unknown lo type");
2405 Lo = llvm::Type::getInt64Ty(Lo->getContext());
2409 llvm::StructType *Result = llvm::StructType::get(Lo, Hi, nullptr);
2412 // Verify that the second element is at an 8-byte offset.
2413 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
2414 "Invalid x86-64 argument pair!");
2418 ABIArgInfo X86_64ABIInfo::
2419 classifyReturnType(QualType RetTy) const {
2420 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
2421 // classification algorithm.
2422 X86_64ABIInfo::Class Lo, Hi;
2423 classify(RetTy, 0, Lo, Hi, /*isNamedArg*/ true);
2425 // Check some invariants.
2426 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
2427 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
2429 llvm::Type *ResType = nullptr;
2433 return ABIArgInfo::getIgnore();
2434 // If the low part is just padding, it takes no register, leave ResType
2436 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
2437 "Unknown missing lo part");
2442 llvm_unreachable("Invalid classification for lo word.");
2444 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
2447 return getIndirectReturnResult(RetTy);
2449 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
2450 // available register of the sequence %rax, %rdx is used.
2452 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
2454 // If we have a sign or zero extended integer, make sure to return Extend
2455 // so that the parameter gets the right LLVM IR attributes.
2456 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
2457 // Treat an enum type as its underlying type.
2458 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
2459 RetTy = EnumTy->getDecl()->getIntegerType();
2461 if (RetTy->isIntegralOrEnumerationType() &&
2462 RetTy->isPromotableIntegerType())
2463 return ABIArgInfo::getExtend();
2467 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
2468 // available SSE register of the sequence %xmm0, %xmm1 is used.
2470 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
2473 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
2474 // returned on the X87 stack in %st0 as 80-bit x87 number.
2476 ResType = llvm::Type::getX86_FP80Ty(getVMContext());
2479 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
2480 // part of the value is returned in %st0 and the imaginary part in
2483 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
2484 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()),
2485 llvm::Type::getX86_FP80Ty(getVMContext()),
2490 llvm::Type *HighPart = nullptr;
2492 // Memory was handled previously and X87 should
2493 // never occur as a hi class.
2496 llvm_unreachable("Invalid classification for hi word.");
2498 case ComplexX87: // Previously handled.
2503 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2504 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
2505 return ABIArgInfo::getDirect(HighPart, 8);
2508 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2509 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
2510 return ABIArgInfo::getDirect(HighPart, 8);
2513 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
2514 // is passed in the next available eightbyte chunk if the last used
2517 // SSEUP should always be preceded by SSE, just widen.
2519 assert(Lo == SSE && "Unexpected SSEUp classification.");
2520 ResType = GetByteVectorType(RetTy);
2523 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
2524 // returned together with the previous X87 value in %st0.
2526 // If X87Up is preceded by X87, we don't need to do
2527 // anything. However, in some cases with unions it may not be
2528 // preceded by X87. In such situations we follow gcc and pass the
2529 // extra bits in an SSE reg.
2531 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2532 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
2533 return ABIArgInfo::getDirect(HighPart, 8);
2538 // If a high part was specified, merge it together with the low part. It is
2539 // known to pass in the high eightbyte of the result. We do this by forming a
2540 // first class struct aggregate with the high and low part: {low, high}
2542 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
2544 return ABIArgInfo::getDirect(ResType);
2547 ABIArgInfo X86_64ABIInfo::classifyArgumentType(
2548 QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE,
2552 Ty = useFirstFieldIfTransparentUnion(Ty);
2554 X86_64ABIInfo::Class Lo, Hi;
2555 classify(Ty, 0, Lo, Hi, isNamedArg);
2557 // Check some invariants.
2558 // FIXME: Enforce these by construction.
2559 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
2560 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
2564 llvm::Type *ResType = nullptr;
2568 return ABIArgInfo::getIgnore();
2569 // If the low part is just padding, it takes no register, leave ResType
2571 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
2572 "Unknown missing lo part");
2575 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
2579 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
2580 // COMPLEX_X87, it is passed in memory.
2583 if (getRecordArgABI(Ty, getCXXABI()) == CGCXXABI::RAA_Indirect)
2585 return getIndirectResult(Ty, freeIntRegs);
2589 llvm_unreachable("Invalid classification for lo word.");
2591 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
2592 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
2597 // Pick an 8-byte type based on the preferred type.
2598 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0);
2600 // If we have a sign or zero extended integer, make sure to return Extend
2601 // so that the parameter gets the right LLVM IR attributes.
2602 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
2603 // Treat an enum type as its underlying type.
2604 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2605 Ty = EnumTy->getDecl()->getIntegerType();
2607 if (Ty->isIntegralOrEnumerationType() &&
2608 Ty->isPromotableIntegerType())
2609 return ABIArgInfo::getExtend();
2614 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
2615 // available SSE register is used, the registers are taken in the
2616 // order from %xmm0 to %xmm7.
2618 llvm::Type *IRType = CGT.ConvertType(Ty);
2619 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
2625 llvm::Type *HighPart = nullptr;
2627 // Memory was handled previously, ComplexX87 and X87 should
2628 // never occur as hi classes, and X87Up must be preceded by X87,
2629 // which is passed in memory.
2633 llvm_unreachable("Invalid classification for hi word.");
2635 case NoClass: break;
2639 // Pick an 8-byte type based on the preferred type.
2640 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
2642 if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
2643 return ABIArgInfo::getDirect(HighPart, 8);
2646 // X87Up generally doesn't occur here (long double is passed in
2647 // memory), except in situations involving unions.
2650 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
2652 if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
2653 return ABIArgInfo::getDirect(HighPart, 8);
2658 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
2659 // eightbyte is passed in the upper half of the last used SSE
2660 // register. This only happens when 128-bit vectors are passed.
2662 assert(Lo == SSE && "Unexpected SSEUp classification");
2663 ResType = GetByteVectorType(Ty);
2667 // If a high part was specified, merge it together with the low part. It is
2668 // known to pass in the high eightbyte of the result. We do this by forming a
2669 // first class struct aggregate with the high and low part: {low, high}
2671 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
2673 return ABIArgInfo::getDirect(ResType);
2676 void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
2678 if (!getCXXABI().classifyReturnType(FI))
2679 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
2681 // Keep track of the number of assigned registers.
2682 unsigned freeIntRegs = 6, freeSSERegs = 8;
2684 // If the return value is indirect, then the hidden argument is consuming one
2685 // integer register.
2686 if (FI.getReturnInfo().isIndirect())
2689 // The chain argument effectively gives us another free register.
2690 if (FI.isChainCall())
2693 unsigned NumRequiredArgs = FI.getNumRequiredArgs();
2694 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
2695 // get assigned (in left-to-right order) for passing as follows...
2697 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
2698 it != ie; ++it, ++ArgNo) {
2699 bool IsNamedArg = ArgNo < NumRequiredArgs;
2701 unsigned neededInt, neededSSE;
2702 it->info = classifyArgumentType(it->type, freeIntRegs, neededInt,
2703 neededSSE, IsNamedArg);
2705 // AMD64-ABI 3.2.3p3: If there are no registers available for any
2706 // eightbyte of an argument, the whole argument is passed on the
2707 // stack. If registers have already been assigned for some
2708 // eightbytes of such an argument, the assignments get reverted.
2709 if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) {
2710 freeIntRegs -= neededInt;
2711 freeSSERegs -= neededSSE;
2713 it->info = getIndirectResult(it->type, freeIntRegs);
2718 static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr,
2720 CodeGenFunction &CGF) {
2721 llvm::Value *overflow_arg_area_p =
2722 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p");
2723 llvm::Value *overflow_arg_area =
2724 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
2726 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
2727 // byte boundary if alignment needed by type exceeds 8 byte boundary.
2728 // It isn't stated explicitly in the standard, but in practice we use
2729 // alignment greater than 16 where necessary.
2730 uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
2732 // overflow_arg_area = (overflow_arg_area + align - 1) & -align;
2733 llvm::Value *Offset =
2734 llvm::ConstantInt::get(CGF.Int64Ty, Align - 1);
2735 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset);
2736 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area,
2738 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, -(uint64_t)Align);
2740 CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
2741 overflow_arg_area->getType(),
2742 "overflow_arg_area.align");
2745 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
2746 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
2748 CGF.Builder.CreateBitCast(overflow_arg_area,
2749 llvm::PointerType::getUnqual(LTy));
2751 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
2752 // l->overflow_arg_area + sizeof(type).
2753 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to
2754 // an 8 byte boundary.
2756 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
2757 llvm::Value *Offset =
2758 llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7);
2759 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset,
2760 "overflow_arg_area.next");
2761 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
2763 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
2767 llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2768 CodeGenFunction &CGF) const {
2769 // Assume that va_list type is correct; should be pointer to LLVM type:
2773 // i8* overflow_arg_area;
2774 // i8* reg_save_area;
2776 unsigned neededInt, neededSSE;
2778 Ty = CGF.getContext().getCanonicalType(Ty);
2779 ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE,
2780 /*isNamedArg*/false);
2782 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
2783 // in the registers. If not go to step 7.
2784 if (!neededInt && !neededSSE)
2785 return EmitVAArgFromMemory(VAListAddr, Ty, CGF);
2787 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
2788 // general purpose registers needed to pass type and num_fp to hold
2789 // the number of floating point registers needed.
2791 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into
2792 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or
2793 // l->fp_offset > 304 - num_fp * 16 go to step 7.
2795 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of
2796 // register save space).
2798 llvm::Value *InRegs = nullptr;
2799 llvm::Value *gp_offset_p = nullptr, *gp_offset = nullptr;
2800 llvm::Value *fp_offset_p = nullptr, *fp_offset = nullptr;
2802 gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p");
2803 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
2804 InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8);
2805 InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp");
2809 fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p");
2810 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
2811 llvm::Value *FitsInFP =
2812 llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16);
2813 FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp");
2814 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
2817 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
2818 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
2819 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
2820 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
2822 // Emit code to load the value if it was passed in registers.
2824 CGF.EmitBlock(InRegBlock);
2826 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with
2827 // an offset of l->gp_offset and/or l->fp_offset. This may require
2828 // copying to a temporary location in case the parameter is passed
2829 // in different register classes or requires an alignment greater
2830 // than 8 for general purpose registers and 16 for XMM registers.
2832 // FIXME: This really results in shameful code when we end up needing to
2833 // collect arguments from different places; often what should result in a
2834 // simple assembling of a structure from scattered addresses has many more
2835 // loads than necessary. Can we clean this up?
2836 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
2837 llvm::Value *RegAddr =
2838 CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3),
2840 if (neededInt && neededSSE) {
2842 assert(AI.isDirect() && "Unexpected ABI info for mixed regs");
2843 llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
2844 llvm::Value *Tmp = CGF.CreateMemTemp(Ty);
2845 Tmp = CGF.Builder.CreateBitCast(Tmp, ST->getPointerTo());
2846 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
2847 llvm::Type *TyLo = ST->getElementType(0);
2848 llvm::Type *TyHi = ST->getElementType(1);
2849 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
2850 "Unexpected ABI info for mixed regs");
2851 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
2852 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
2853 llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
2854 llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
2855 llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr;
2856 llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr;
2858 CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo));
2859 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
2860 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi));
2861 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
2863 RegAddr = CGF.Builder.CreateBitCast(Tmp,
2864 llvm::PointerType::getUnqual(LTy));
2865 } else if (neededInt) {
2866 RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
2867 RegAddr = CGF.Builder.CreateBitCast(RegAddr,
2868 llvm::PointerType::getUnqual(LTy));
2870 // Copy to a temporary if necessary to ensure the appropriate alignment.
2871 std::pair<CharUnits, CharUnits> SizeAlign =
2872 CGF.getContext().getTypeInfoInChars(Ty);
2873 uint64_t TySize = SizeAlign.first.getQuantity();
2874 unsigned TyAlign = SizeAlign.second.getQuantity();
2876 llvm::Value *Tmp = CGF.CreateMemTemp(Ty);
2877 CGF.Builder.CreateMemCpy(Tmp, RegAddr, TySize, 8, false);
2880 } else if (neededSSE == 1) {
2881 RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
2882 RegAddr = CGF.Builder.CreateBitCast(RegAddr,
2883 llvm::PointerType::getUnqual(LTy));
2885 assert(neededSSE == 2 && "Invalid number of needed registers!");
2886 // SSE registers are spaced 16 bytes apart in the register save
2887 // area, we need to collect the two eightbytes together.
2888 llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset);
2889 llvm::Value *RegAddrHi = CGF.Builder.CreateConstGEP1_32(RegAddrLo, 16);
2890 llvm::Type *DoubleTy = CGF.DoubleTy;
2891 llvm::Type *DblPtrTy =
2892 llvm::PointerType::getUnqual(DoubleTy);
2893 llvm::StructType *ST = llvm::StructType::get(DoubleTy, DoubleTy, nullptr);
2894 llvm::Value *V, *Tmp = CGF.CreateMemTemp(Ty);
2895 Tmp = CGF.Builder.CreateBitCast(Tmp, ST->getPointerTo());
2896 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo,
2898 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
2899 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi,
2901 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
2902 RegAddr = CGF.Builder.CreateBitCast(Tmp,
2903 llvm::PointerType::getUnqual(LTy));
2906 // AMD64-ABI 3.5.7p5: Step 5. Set:
2907 // l->gp_offset = l->gp_offset + num_gp * 8
2908 // l->fp_offset = l->fp_offset + num_fp * 16.
2910 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8);
2911 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
2915 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16);
2916 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
2919 CGF.EmitBranch(ContBlock);
2921 // Emit code to load the value if it was passed in memory.
2923 CGF.EmitBlock(InMemBlock);
2924 llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF);
2926 // Return the appropriate result.
2928 CGF.EmitBlock(ContBlock);
2929 llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(), 2,
2931 ResAddr->addIncoming(RegAddr, InRegBlock);
2932 ResAddr->addIncoming(MemAddr, InMemBlock);
2936 ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs,
2937 bool IsReturnType) const {
2939 if (Ty->isVoidType())
2940 return ABIArgInfo::getIgnore();
2942 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2943 Ty = EnumTy->getDecl()->getIntegerType();
2945 TypeInfo Info = getContext().getTypeInfo(Ty);
2946 uint64_t Width = Info.Width;
2947 unsigned Align = getContext().toCharUnitsFromBits(Info.Align).getQuantity();
2949 const RecordType *RT = Ty->getAs<RecordType>();
2951 if (!IsReturnType) {
2952 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()))
2953 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
2956 if (RT->getDecl()->hasFlexibleArrayMember())
2957 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
2959 // FIXME: mingw-w64-gcc emits 128-bit struct as i128
2960 if (Width == 128 && getTarget().getTriple().isWindowsGNUEnvironment())
2961 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
2965 // vectorcall adds the concept of a homogenous vector aggregate, similar to
2967 const Type *Base = nullptr;
2968 uint64_t NumElts = 0;
2969 if (FreeSSERegs && isHomogeneousAggregate(Ty, Base, NumElts)) {
2970 if (FreeSSERegs >= NumElts) {
2971 FreeSSERegs -= NumElts;
2972 if (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType())
2973 return ABIArgInfo::getDirect();
2974 return ABIArgInfo::getExpand();
2976 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
2980 if (Ty->isMemberPointerType()) {
2981 // If the member pointer is represented by an LLVM int or ptr, pass it
2983 llvm::Type *LLTy = CGT.ConvertType(Ty);
2984 if (LLTy->isPointerTy() || LLTy->isIntegerTy())
2985 return ABIArgInfo::getDirect();
2988 if (RT || Ty->isMemberPointerType()) {
2989 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
2990 // not 1, 2, 4, or 8 bytes, must be passed by reference."
2991 if (Width > 64 || !llvm::isPowerOf2_64(Width))
2992 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
2994 // Otherwise, coerce it to a small integer.
2995 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Width));
2998 // Bool type is always extended to the ABI, other builtin types are not
3000 const BuiltinType *BT = Ty->getAs<BuiltinType>();
3001 if (BT && BT->getKind() == BuiltinType::Bool)
3002 return ABIArgInfo::getExtend();
3004 return ABIArgInfo::getDirect();
3007 void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
3009 FI.getCallingConvention() == llvm::CallingConv::X86_VectorCall;
3011 // We can use up to 4 SSE return registers with vectorcall.
3012 unsigned FreeSSERegs = IsVectorCall ? 4 : 0;
3013 if (!getCXXABI().classifyReturnType(FI))
3014 FI.getReturnInfo() = classify(FI.getReturnType(), FreeSSERegs, true);
3016 // We can use up to 6 SSE register parameters with vectorcall.
3017 FreeSSERegs = IsVectorCall ? 6 : 0;
3018 for (auto &I : FI.arguments())
3019 I.info = classify(I.type, FreeSSERegs, false);
3022 llvm::Value *WinX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
3023 CodeGenFunction &CGF) const {
3024 llvm::Type *BPP = CGF.Int8PtrPtrTy;
3026 CGBuilderTy &Builder = CGF.Builder;
3027 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
3029 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
3031 llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
3032 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
3035 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 8);
3036 llvm::Value *NextAddr =
3037 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
3039 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
3046 class NaClX86_64ABIInfo : public ABIInfo {
3048 NaClX86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX)
3049 : ABIInfo(CGT), PInfo(CGT), NInfo(CGT, HasAVX) {}
3050 void computeInfo(CGFunctionInfo &FI) const override;
3051 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
3052 CodeGenFunction &CGF) const override;
3054 PNaClABIInfo PInfo; // Used for generating calls with pnaclcall callingconv.
3055 X86_64ABIInfo NInfo; // Used for everything else.
3058 class NaClX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
3061 NaClX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX)
3062 : TargetCodeGenInfo(new NaClX86_64ABIInfo(CGT, HasAVX)), HasAVX(HasAVX) {
3064 unsigned getOpenMPSimdDefaultAlignment(QualType) const override {
3065 return HasAVX ? 32 : 16;
3071 void NaClX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
3072 if (FI.getASTCallingConvention() == CC_PnaclCall)
3073 PInfo.computeInfo(FI);
3075 NInfo.computeInfo(FI);
3078 llvm::Value *NaClX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
3079 CodeGenFunction &CGF) const {
3080 // Always use the native convention; calling pnacl-style varargs functions
3082 return NInfo.EmitVAArg(VAListAddr, Ty, CGF);
3088 /// PPC32_SVR4_ABIInfo - The 32-bit PowerPC ELF (SVR4) ABI information.
3089 class PPC32_SVR4_ABIInfo : public DefaultABIInfo {
3091 PPC32_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
3093 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
3094 CodeGenFunction &CGF) const override;
3097 class PPC32TargetCodeGenInfo : public TargetCodeGenInfo {
3099 PPC32TargetCodeGenInfo(CodeGenTypes &CGT) : TargetCodeGenInfo(new PPC32_SVR4_ABIInfo(CGT)) {}
3101 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
3102 // This is recovered from gcc output.
3103 return 1; // r1 is the dedicated stack pointer
3106 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
3107 llvm::Value *Address) const override;
3109 unsigned getOpenMPSimdDefaultAlignment(QualType) const override {
3110 return 16; // Natural alignment for Altivec vectors.
3116 llvm::Value *PPC32_SVR4_ABIInfo::EmitVAArg(llvm::Value *VAListAddr,
3118 CodeGenFunction &CGF) const {
3119 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
3120 // TODO: Implement this. For now ignore.
3125 bool isI64 = Ty->isIntegerType() && getContext().getTypeSize(Ty) == 64;
3126 bool isInt = Ty->isIntegerType() || Ty->isPointerType() || Ty->isAggregateType();
3127 llvm::Type *CharPtr = CGF.Int8PtrTy;
3128 llvm::Type *CharPtrPtr = CGF.Int8PtrPtrTy;
3130 CGBuilderTy &Builder = CGF.Builder;
3131 llvm::Value *GPRPtr = Builder.CreateBitCast(VAListAddr, CharPtr, "gprptr");
3132 llvm::Value *GPRPtrAsInt = Builder.CreatePtrToInt(GPRPtr, CGF.Int32Ty);
3133 llvm::Value *FPRPtrAsInt = Builder.CreateAdd(GPRPtrAsInt, Builder.getInt32(1));
3134 llvm::Value *FPRPtr = Builder.CreateIntToPtr(FPRPtrAsInt, CharPtr);
3135 llvm::Value *OverflowAreaPtrAsInt = Builder.CreateAdd(FPRPtrAsInt, Builder.getInt32(3));
3136 llvm::Value *OverflowAreaPtr = Builder.CreateIntToPtr(OverflowAreaPtrAsInt, CharPtrPtr);
3137 llvm::Value *RegsaveAreaPtrAsInt = Builder.CreateAdd(OverflowAreaPtrAsInt, Builder.getInt32(4));
3138 llvm::Value *RegsaveAreaPtr = Builder.CreateIntToPtr(RegsaveAreaPtrAsInt, CharPtrPtr);
3139 llvm::Value *GPR = Builder.CreateLoad(GPRPtr, false, "gpr");
3140 // Align GPR when TY is i64.
3142 llvm::Value *GPRAnd = Builder.CreateAnd(GPR, Builder.getInt8(1));
3143 llvm::Value *CC64 = Builder.CreateICmpEQ(GPRAnd, Builder.getInt8(1));
3144 llvm::Value *GPRPlusOne = Builder.CreateAdd(GPR, Builder.getInt8(1));
3145 GPR = Builder.CreateSelect(CC64, GPRPlusOne, GPR);
3147 llvm::Value *FPR = Builder.CreateLoad(FPRPtr, false, "fpr");
3148 llvm::Value *OverflowArea = Builder.CreateLoad(OverflowAreaPtr, false, "overflow_area");
3149 llvm::Value *OverflowAreaAsInt = Builder.CreatePtrToInt(OverflowArea, CGF.Int32Ty);
3150 llvm::Value *RegsaveArea = Builder.CreateLoad(RegsaveAreaPtr, false, "regsave_area");
3151 llvm::Value *RegsaveAreaAsInt = Builder.CreatePtrToInt(RegsaveArea, CGF.Int32Ty);
3153 llvm::Value *CC = Builder.CreateICmpULT(isInt ? GPR : FPR,
3154 Builder.getInt8(8), "cond");
3156 llvm::Value *RegConstant = Builder.CreateMul(isInt ? GPR : FPR,
3157 Builder.getInt8(isInt ? 4 : 8));
3159 llvm::Value *OurReg = Builder.CreateAdd(RegsaveAreaAsInt, Builder.CreateSExt(RegConstant, CGF.Int32Ty));
3161 if (Ty->isFloatingType())
3162 OurReg = Builder.CreateAdd(OurReg, Builder.getInt32(32));
3164 llvm::BasicBlock *UsingRegs = CGF.createBasicBlock("using_regs");
3165 llvm::BasicBlock *UsingOverflow = CGF.createBasicBlock("using_overflow");
3166 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
3168 Builder.CreateCondBr(CC, UsingRegs, UsingOverflow);
3170 CGF.EmitBlock(UsingRegs);
3172 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
3173 llvm::Value *Result1 = Builder.CreateIntToPtr(OurReg, PTy);
3174 // Increase the GPR/FPR indexes.
3176 GPR = Builder.CreateAdd(GPR, Builder.getInt8(isI64 ? 2 : 1));
3177 Builder.CreateStore(GPR, GPRPtr);
3179 FPR = Builder.CreateAdd(FPR, Builder.getInt8(1));
3180 Builder.CreateStore(FPR, FPRPtr);
3182 CGF.EmitBranch(Cont);
3184 CGF.EmitBlock(UsingOverflow);
3186 // Increase the overflow area.
3187 llvm::Value *Result2 = Builder.CreateIntToPtr(OverflowAreaAsInt, PTy);
3188 OverflowAreaAsInt = Builder.CreateAdd(OverflowAreaAsInt, Builder.getInt32(isInt ? 4 : 8));
3189 Builder.CreateStore(Builder.CreateIntToPtr(OverflowAreaAsInt, CharPtr), OverflowAreaPtr);
3190 CGF.EmitBranch(Cont);
3192 CGF.EmitBlock(Cont);
3194 llvm::PHINode *Result = CGF.Builder.CreatePHI(PTy, 2, "vaarg.addr");
3195 Result->addIncoming(Result1, UsingRegs);
3196 Result->addIncoming(Result2, UsingOverflow);
3198 if (Ty->isAggregateType()) {
3199 llvm::Value *AGGPtr = Builder.CreateBitCast(Result, CharPtrPtr, "aggrptr") ;
3200 return Builder.CreateLoad(AGGPtr, false, "aggr");
3207 PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
3208 llvm::Value *Address) const {
3209 // This is calculated from the LLVM and GCC tables and verified
3210 // against gcc output. AFAIK all ABIs use the same encoding.
3212 CodeGen::CGBuilderTy &Builder = CGF.Builder;
3214 llvm::IntegerType *i8 = CGF.Int8Ty;
3215 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
3216 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
3217 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
3219 // 0-31: r0-31, the 4-byte general-purpose registers
3220 AssignToArrayRange(Builder, Address, Four8, 0, 31);
3222 // 32-63: fp0-31, the 8-byte floating-point registers
3223 AssignToArrayRange(Builder, Address, Eight8, 32, 63);
3225 // 64-76 are various 4-byte special-purpose registers:
3232 AssignToArrayRange(Builder, Address, Four8, 64, 76);
3234 // 77-108: v0-31, the 16-byte vector registers
3235 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
3242 AssignToArrayRange(Builder, Address, Four8, 109, 113);
3250 /// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information.
3251 class PPC64_SVR4_ABIInfo : public DefaultABIInfo {
3259 static const unsigned GPRBits = 64;
3263 PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind)
3264 : DefaultABIInfo(CGT), Kind(Kind) {}
3266 bool isPromotableTypeForABI(QualType Ty) const;
3267 bool isAlignedParamType(QualType Ty) const;
3269 ABIArgInfo classifyReturnType(QualType RetTy) const;
3270 ABIArgInfo classifyArgumentType(QualType Ty) const;
3272 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
3273 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
3274 uint64_t Members) const override;
3276 // TODO: We can add more logic to computeInfo to improve performance.
3277 // Example: For aggregate arguments that fit in a register, we could
3278 // use getDirectInReg (as is done below for structs containing a single
3279 // floating-point value) to avoid pushing them to memory on function
3280 // entry. This would require changing the logic in PPCISelLowering
3281 // when lowering the parameters in the caller and args in the callee.
3282 void computeInfo(CGFunctionInfo &FI) const override {
3283 if (!getCXXABI().classifyReturnType(FI))
3284 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
3285 for (auto &I : FI.arguments()) {
3286 // We rely on the default argument classification for the most part.
3287 // One exception: An aggregate containing a single floating-point
3288 // or vector item must be passed in a register if one is available.
3289 const Type *T = isSingleElementStruct(I.type, getContext());
3291 const BuiltinType *BT = T->getAs<BuiltinType>();
3292 if ((T->isVectorType() && getContext().getTypeSize(T) == 128) ||
3293 (BT && BT->isFloatingPoint())) {
3295 I.info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT));
3299 I.info = classifyArgumentType(I.type);
3303 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
3304 CodeGenFunction &CGF) const override;
3307 class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo {
3309 PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT,
3310 PPC64_SVR4_ABIInfo::ABIKind Kind)
3311 : TargetCodeGenInfo(new PPC64_SVR4_ABIInfo(CGT, Kind)) {}
3313 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
3314 // This is recovered from gcc output.
3315 return 1; // r1 is the dedicated stack pointer
3318 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
3319 llvm::Value *Address) const override;
3321 unsigned getOpenMPSimdDefaultAlignment(QualType) const override {
3322 return 16; // Natural alignment for Altivec and VSX vectors.
3326 class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo {
3328 PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
3330 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
3331 // This is recovered from gcc output.
3332 return 1; // r1 is the dedicated stack pointer
3335 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
3336 llvm::Value *Address) const override;
3338 unsigned getOpenMPSimdDefaultAlignment(QualType) const override {
3339 return 16; // Natural alignment for Altivec vectors.
3345 // Return true if the ABI requires Ty to be passed sign- or zero-
3346 // extended to 64 bits.
3348 PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const {
3349 // Treat an enum type as its underlying type.
3350 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3351 Ty = EnumTy->getDecl()->getIntegerType();
3353 // Promotable integer types are required to be promoted by the ABI.
3354 if (Ty->isPromotableIntegerType())
3357 // In addition to the usual promotable integer types, we also need to
3358 // extend all 32-bit types, since the ABI requires promotion to 64 bits.
3359 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
3360 switch (BT->getKind()) {
3361 case BuiltinType::Int:
3362 case BuiltinType::UInt:
3371 /// isAlignedParamType - Determine whether a type requires 16-byte
3372 /// alignment in the parameter area.
3374 PPC64_SVR4_ABIInfo::isAlignedParamType(QualType Ty) const {
3375 // Complex types are passed just like their elements.
3376 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
3377 Ty = CTy->getElementType();
3379 // Only vector types of size 16 bytes need alignment (larger types are
3380 // passed via reference, smaller types are not aligned).
3381 if (Ty->isVectorType())
3382 return getContext().getTypeSize(Ty) == 128;
3384 // For single-element float/vector structs, we consider the whole type
3385 // to have the same alignment requirements as its single element.
3386 const Type *AlignAsType = nullptr;
3387 const Type *EltType = isSingleElementStruct(Ty, getContext());
3389 const BuiltinType *BT = EltType->getAs<BuiltinType>();
3390 if ((EltType->isVectorType() &&
3391 getContext().getTypeSize(EltType) == 128) ||
3392 (BT && BT->isFloatingPoint()))
3393 AlignAsType = EltType;
3396 // Likewise for ELFv2 homogeneous aggregates.
3397 const Type *Base = nullptr;
3398 uint64_t Members = 0;
3399 if (!AlignAsType && Kind == ELFv2 &&
3400 isAggregateTypeForABI(Ty) && isHomogeneousAggregate(Ty, Base, Members))
3403 // With special case aggregates, only vector base types need alignment.
3405 return AlignAsType->isVectorType();
3407 // Otherwise, we only need alignment for any aggregate type that
3408 // has an alignment requirement of >= 16 bytes.
3409 if (isAggregateTypeForABI(Ty) && getContext().getTypeAlign(Ty) >= 128)
3415 /// isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous
3416 /// aggregate. Base is set to the base element type, and Members is set
3417 /// to the number of base elements.
3418 bool ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base,
3419 uint64_t &Members) const {
3420 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
3421 uint64_t NElements = AT->getSize().getZExtValue();
3424 if (!isHomogeneousAggregate(AT->getElementType(), Base, Members))
3426 Members *= NElements;
3427 } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
3428 const RecordDecl *RD = RT->getDecl();
3429 if (RD->hasFlexibleArrayMember())
3434 // If this is a C++ record, check the bases first.
3435 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
3436 for (const auto &I : CXXRD->bases()) {
3437 // Ignore empty records.
3438 if (isEmptyRecord(getContext(), I.getType(), true))
3441 uint64_t FldMembers;
3442 if (!isHomogeneousAggregate(I.getType(), Base, FldMembers))
3445 Members += FldMembers;
3449 for (const auto *FD : RD->fields()) {
3450 // Ignore (non-zero arrays of) empty records.
3451 QualType FT = FD->getType();
3452 while (const ConstantArrayType *AT =
3453 getContext().getAsConstantArrayType(FT)) {
3454 if (AT->getSize().getZExtValue() == 0)
3456 FT = AT->getElementType();
3458 if (isEmptyRecord(getContext(), FT, true))
3461 // For compatibility with GCC, ignore empty bitfields in C++ mode.
3462 if (getContext().getLangOpts().CPlusPlus &&
3463 FD->isBitField() && FD->getBitWidthValue(getContext()) == 0)
3466 uint64_t FldMembers;
3467 if (!isHomogeneousAggregate(FD->getType(), Base, FldMembers))
3470 Members = (RD->isUnion() ?
3471 std::max(Members, FldMembers) : Members + FldMembers);
3477 // Ensure there is no padding.
3478 if (getContext().getTypeSize(Base) * Members !=
3479 getContext().getTypeSize(Ty))
3483 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
3485 Ty = CT->getElementType();
3488 // Most ABIs only support float, double, and some vector type widths.
3489 if (!isHomogeneousAggregateBaseType(Ty))
3492 // The base type must be the same for all members. Types that
3493 // agree in both total size and mode (float vs. vector) are
3494 // treated as being equivalent here.
3495 const Type *TyPtr = Ty.getTypePtr();
3499 if (Base->isVectorType() != TyPtr->isVectorType() ||
3500 getContext().getTypeSize(Base) != getContext().getTypeSize(TyPtr))
3503 return Members > 0 && isHomogeneousAggregateSmallEnough(Base, Members);
3506 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
3507 // Homogeneous aggregates for ELFv2 must have base types of float,
3508 // double, long double, or 128-bit vectors.
3509 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
3510 if (BT->getKind() == BuiltinType::Float ||
3511 BT->getKind() == BuiltinType::Double ||
3512 BT->getKind() == BuiltinType::LongDouble)
3515 if (const VectorType *VT = Ty->getAs<VectorType>()) {
3516 if (getContext().getTypeSize(VT) == 128)
3522 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough(
3523 const Type *Base, uint64_t Members) const {
3524 // Vector types require one register, floating point types require one
3525 // or two registers depending on their size.
3527 Base->isVectorType() ? 1 : (getContext().getTypeSize(Base) + 63) / 64;
3529 // Homogeneous Aggregates may occupy at most 8 registers.
3530 return Members * NumRegs <= 8;
3534 PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const {
3535 Ty = useFirstFieldIfTransparentUnion(Ty);
3537 if (Ty->isAnyComplexType())
3538 return ABIArgInfo::getDirect();
3540 // Non-Altivec vector types are passed in GPRs (smaller than 16 bytes)
3541 // or via reference (larger than 16 bytes).
3542 if (Ty->isVectorType()) {
3543 uint64_t Size = getContext().getTypeSize(Ty);
3545 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
3546 else if (Size < 128) {
3547 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
3548 return ABIArgInfo::getDirect(CoerceTy);
3552 if (isAggregateTypeForABI(Ty)) {
3553 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
3554 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
3556 uint64_t ABIAlign = isAlignedParamType(Ty)? 16 : 8;
3557 uint64_t TyAlign = getContext().getTypeAlign(Ty) / 8;
3559 // ELFv2 homogeneous aggregates are passed as array types.
3560 const Type *Base = nullptr;
3561 uint64_t Members = 0;
3562 if (Kind == ELFv2 &&
3563 isHomogeneousAggregate(Ty, Base, Members)) {
3564 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
3565 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
3566 return ABIArgInfo::getDirect(CoerceTy);
3569 // If an aggregate may end up fully in registers, we do not
3570 // use the ByVal method, but pass the aggregate as array.
3571 // This is usually beneficial since we avoid forcing the
3572 // back-end to store the argument to memory.
3573 uint64_t Bits = getContext().getTypeSize(Ty);
3574 if (Bits > 0 && Bits <= 8 * GPRBits) {
3575 llvm::Type *CoerceTy;
3577 // Types up to 8 bytes are passed as integer type (which will be
3578 // properly aligned in the argument save area doubleword).
3579 if (Bits <= GPRBits)
3580 CoerceTy = llvm::IntegerType::get(getVMContext(),
3581 llvm::RoundUpToAlignment(Bits, 8));
3582 // Larger types are passed as arrays, with the base type selected
3583 // according to the required alignment in the save area.
3585 uint64_t RegBits = ABIAlign * 8;
3586 uint64_t NumRegs = llvm::RoundUpToAlignment(Bits, RegBits) / RegBits;
3587 llvm::Type *RegTy = llvm::IntegerType::get(getVMContext(), RegBits);
3588 CoerceTy = llvm::ArrayType::get(RegTy, NumRegs);
3591 return ABIArgInfo::getDirect(CoerceTy);
3594 // All other aggregates are passed ByVal.
3595 return ABIArgInfo::getIndirect(ABIAlign, /*ByVal=*/true,
3596 /*Realign=*/TyAlign > ABIAlign);
3599 return (isPromotableTypeForABI(Ty) ?
3600 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
3604 PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
3605 if (RetTy->isVoidType())
3606 return ABIArgInfo::getIgnore();
3608 if (RetTy->isAnyComplexType())
3609 return ABIArgInfo::getDirect();
3611 // Non-Altivec vector types are returned in GPRs (smaller than 16 bytes)
3612 // or via reference (larger than 16 bytes).
3613 if (RetTy->isVectorType()) {
3614 uint64_t Size = getContext().getTypeSize(RetTy);
3616 return ABIArgInfo::getIndirect(0);
3617 else if (Size < 128) {
3618 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
3619 return ABIArgInfo::getDirect(CoerceTy);
3623 if (isAggregateTypeForABI(RetTy)) {
3624 // ELFv2 homogeneous aggregates are returned as array types.
3625 const Type *Base = nullptr;
3626 uint64_t Members = 0;
3627 if (Kind == ELFv2 &&
3628 isHomogeneousAggregate(RetTy, Base, Members)) {
3629 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
3630 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
3631 return ABIArgInfo::getDirect(CoerceTy);
3634 // ELFv2 small aggregates are returned in up to two registers.
3635 uint64_t Bits = getContext().getTypeSize(RetTy);
3636 if (Kind == ELFv2 && Bits <= 2 * GPRBits) {
3638 return ABIArgInfo::getIgnore();
3640 llvm::Type *CoerceTy;
3641 if (Bits > GPRBits) {
3642 CoerceTy = llvm::IntegerType::get(getVMContext(), GPRBits);
3643 CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy, nullptr);
3645 CoerceTy = llvm::IntegerType::get(getVMContext(),
3646 llvm::RoundUpToAlignment(Bits, 8));
3647 return ABIArgInfo::getDirect(CoerceTy);
3650 // All other aggregates are returned indirectly.
3651 return ABIArgInfo::getIndirect(0);
3654 return (isPromotableTypeForABI(RetTy) ?
3655 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
3658 // Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine.
3659 llvm::Value *PPC64_SVR4_ABIInfo::EmitVAArg(llvm::Value *VAListAddr,
3661 CodeGenFunction &CGF) const {
3662 llvm::Type *BP = CGF.Int8PtrTy;
3663 llvm::Type *BPP = CGF.Int8PtrPtrTy;
3665 CGBuilderTy &Builder = CGF.Builder;
3666 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
3667 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
3669 // Handle types that require 16-byte alignment in the parameter save area.
3670 if (isAlignedParamType(Ty)) {
3671 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty);
3672 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt64(15));
3673 AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt64(-16));
3674 Addr = Builder.CreateIntToPtr(AddrAsInt, BP, "ap.align");
3677 // Update the va_list pointer. The pointer should be bumped by the
3678 // size of the object. We can trust getTypeSize() except for a complex
3679 // type whose base type is smaller than a doubleword. For these, the
3680 // size of the object is 16 bytes; see below for further explanation.
3681 unsigned SizeInBytes = CGF.getContext().getTypeSize(Ty) / 8;
3683 unsigned CplxBaseSize = 0;
3685 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
3686 BaseTy = CTy->getElementType();
3687 CplxBaseSize = CGF.getContext().getTypeSize(BaseTy) / 8;
3688 if (CplxBaseSize < 8)
3692 unsigned Offset = llvm::RoundUpToAlignment(SizeInBytes, 8);
3693 llvm::Value *NextAddr =
3694 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int64Ty, Offset),
3696 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
3698 // If we have a complex type and the base type is smaller than 8 bytes,
3699 // the ABI calls for the real and imaginary parts to be right-adjusted
3700 // in separate doublewords. However, Clang expects us to produce a
3701 // pointer to a structure with the two parts packed tightly. So generate
3702 // loads of the real and imaginary parts relative to the va_list pointer,
3703 // and store them to a temporary structure.
3704 if (CplxBaseSize && CplxBaseSize < 8) {
3705 llvm::Value *RealAddr = Builder.CreatePtrToInt(Addr, CGF.Int64Ty);
3706 llvm::Value *ImagAddr = RealAddr;
3707 if (CGF.CGM.getDataLayout().isBigEndian()) {
3708 RealAddr = Builder.CreateAdd(RealAddr, Builder.getInt64(8 - CplxBaseSize));
3709 ImagAddr = Builder.CreateAdd(ImagAddr, Builder.getInt64(16 - CplxBaseSize));
3711 ImagAddr = Builder.CreateAdd(ImagAddr, Builder.getInt64(8));
3713 llvm::Type *PBaseTy = llvm::PointerType::getUnqual(CGF.ConvertType(BaseTy));
3714 RealAddr = Builder.CreateIntToPtr(RealAddr, PBaseTy);
3715 ImagAddr = Builder.CreateIntToPtr(ImagAddr, PBaseTy);
3716 llvm::Value *Real = Builder.CreateLoad(RealAddr, false, ".vareal");
3717 llvm::Value *Imag = Builder.CreateLoad(ImagAddr, false, ".vaimag");
3718 llvm::Value *Ptr = CGF.CreateTempAlloca(CGT.ConvertTypeForMem(Ty),
3720 llvm::Value *RealPtr = Builder.CreateStructGEP(Ptr, 0, ".real");
3721 llvm::Value *ImagPtr = Builder.CreateStructGEP(Ptr, 1, ".imag");
3722 Builder.CreateStore(Real, RealPtr, false);
3723 Builder.CreateStore(Imag, ImagPtr, false);
3727 // If the argument is smaller than 8 bytes, it is right-adjusted in
3728 // its doubleword slot. Adjust the pointer to pick it up from the
3730 if (SizeInBytes < 8 && CGF.CGM.getDataLayout().isBigEndian()) {
3731 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty);
3732 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt64(8 - SizeInBytes));
3733 Addr = Builder.CreateIntToPtr(AddrAsInt, BP);
3736 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
3737 return Builder.CreateBitCast(Addr, PTy);
3741 PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
3742 llvm::Value *Address) {
3743 // This is calculated from the LLVM and GCC tables and verified
3744 // against gcc output. AFAIK all ABIs use the same encoding.
3746 CodeGen::CGBuilderTy &Builder = CGF.Builder;
3748 llvm::IntegerType *i8 = CGF.Int8Ty;
3749 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
3750 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
3751 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
3753 // 0-31: r0-31, the 8-byte general-purpose registers
3754 AssignToArrayRange(Builder, Address, Eight8, 0, 31);
3756 // 32-63: fp0-31, the 8-byte floating-point registers
3757 AssignToArrayRange(Builder, Address, Eight8, 32, 63);
3759 // 64-76 are various 4-byte special-purpose registers:
3766 AssignToArrayRange(Builder, Address, Four8, 64, 76);
3768 // 77-108: v0-31, the 16-byte vector registers
3769 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
3776 AssignToArrayRange(Builder, Address, Four8, 109, 113);
3782 PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable(
3783 CodeGen::CodeGenFunction &CGF,
3784 llvm::Value *Address) const {
3786 return PPC64_initDwarfEHRegSizeTable(CGF, Address);
3790 PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
3791 llvm::Value *Address) const {
3793 return PPC64_initDwarfEHRegSizeTable(CGF, Address);
3796 //===----------------------------------------------------------------------===//
3797 // AArch64 ABI Implementation
3798 //===----------------------------------------------------------------------===//
3802 class AArch64ABIInfo : public ABIInfo {
3813 AArch64ABIInfo(CodeGenTypes &CGT, ABIKind Kind) : ABIInfo(CGT), Kind(Kind) {}
3816 ABIKind getABIKind() const { return Kind; }
3817 bool isDarwinPCS() const { return Kind == DarwinPCS; }
3819 ABIArgInfo classifyReturnType(QualType RetTy) const;
3820 ABIArgInfo classifyArgumentType(QualType RetTy) const;
3821 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
3822 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
3823 uint64_t Members) const override;
3825 bool isIllegalVectorType(QualType Ty) const;
3827 void computeInfo(CGFunctionInfo &FI) const override {
3828 if (!getCXXABI().classifyReturnType(FI))
3829 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
3831 for (auto &it : FI.arguments())
3832 it.info = classifyArgumentType(it.type);
3835 llvm::Value *EmitDarwinVAArg(llvm::Value *VAListAddr, QualType Ty,
3836 CodeGenFunction &CGF) const;
3838 llvm::Value *EmitAAPCSVAArg(llvm::Value *VAListAddr, QualType Ty,
3839 CodeGenFunction &CGF) const;
3841 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
3842 CodeGenFunction &CGF) const override {
3843 return isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
3844 : EmitAAPCSVAArg(VAListAddr, Ty, CGF);
3848 class AArch64TargetCodeGenInfo : public TargetCodeGenInfo {
3850 AArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind Kind)
3851 : TargetCodeGenInfo(new AArch64ABIInfo(CGT, Kind)) {}
3853 StringRef getARCRetainAutoreleasedReturnValueMarker() const {
3854 return "mov\tfp, fp\t\t; marker for objc_retainAutoreleaseReturnValue";
3857 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { return 31; }
3859 virtual bool doesReturnSlotInterfereWithArgs() const { return false; }
3863 ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty) const {
3864 Ty = useFirstFieldIfTransparentUnion(Ty);
3866 // Handle illegal vector types here.
3867 if (isIllegalVectorType(Ty)) {
3868 uint64_t Size = getContext().getTypeSize(Ty);
3870 llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext());
3871 return ABIArgInfo::getDirect(ResType);
3874 llvm::Type *ResType =
3875 llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2);
3876 return ABIArgInfo::getDirect(ResType);
3879 llvm::Type *ResType =
3880 llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);
3881 return ABIArgInfo::getDirect(ResType);
3883 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
3886 if (!isAggregateTypeForABI(Ty)) {
3887 // Treat an enum type as its underlying type.
3888 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3889 Ty = EnumTy->getDecl()->getIntegerType();
3891 return (Ty->isPromotableIntegerType() && isDarwinPCS()
3892 ? ABIArgInfo::getExtend()
3893 : ABIArgInfo::getDirect());
3896 // Structures with either a non-trivial destructor or a non-trivial
3897 // copy constructor are always indirect.
3898 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
3899 return ABIArgInfo::getIndirect(0, /*ByVal=*/RAA ==
3900 CGCXXABI::RAA_DirectInMemory);
3903 // Empty records are always ignored on Darwin, but actually passed in C++ mode
3904 // elsewhere for GNU compatibility.
3905 if (isEmptyRecord(getContext(), Ty, true)) {
3906 if (!getContext().getLangOpts().CPlusPlus || isDarwinPCS())
3907 return ABIArgInfo::getIgnore();
3909 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
3912 // Homogeneous Floating-point Aggregates (HFAs) need to be expanded.
3913 const Type *Base = nullptr;
3914 uint64_t Members = 0;
3915 if (isHomogeneousAggregate(Ty, Base, Members)) {
3916 return ABIArgInfo::getDirect(
3917 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members));
3920 // Aggregates <= 16 bytes are passed directly in registers or on the stack.
3921 uint64_t Size = getContext().getTypeSize(Ty);
3923 unsigned Alignment = getContext().getTypeAlign(Ty);
3924 Size = 64 * ((Size + 63) / 64); // round up to multiple of 8 bytes
3926 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
3927 // For aggregates with 16-byte alignment, we use i128.
3928 if (Alignment < 128 && Size == 128) {
3929 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
3930 return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64));
3932 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
3935 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
3938 ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy) const {
3939 if (RetTy->isVoidType())
3940 return ABIArgInfo::getIgnore();
3942 // Large vector types should be returned via memory.
3943 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128)
3944 return ABIArgInfo::getIndirect(0);
3946 if (!isAggregateTypeForABI(RetTy)) {
3947 // Treat an enum type as its underlying type.
3948 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
3949 RetTy = EnumTy->getDecl()->getIntegerType();
3951 return (RetTy->isPromotableIntegerType() && isDarwinPCS()
3952 ? ABIArgInfo::getExtend()
3953 : ABIArgInfo::getDirect());
3956 if (isEmptyRecord(getContext(), RetTy, true))
3957 return ABIArgInfo::getIgnore();
3959 const Type *Base = nullptr;
3960 uint64_t Members = 0;
3961 if (isHomogeneousAggregate(RetTy, Base, Members))
3962 // Homogeneous Floating-point Aggregates (HFAs) are returned directly.
3963 return ABIArgInfo::getDirect();
3965 // Aggregates <= 16 bytes are returned directly in registers or on the stack.
3966 uint64_t Size = getContext().getTypeSize(RetTy);
3968 Size = 64 * ((Size + 63) / 64); // round up to multiple of 8 bytes
3969 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
3972 return ABIArgInfo::getIndirect(0);
3975 /// isIllegalVectorType - check whether the vector type is legal for AArch64.
3976 bool AArch64ABIInfo::isIllegalVectorType(QualType Ty) const {
3977 if (const VectorType *VT = Ty->getAs<VectorType>()) {
3978 // Check whether VT is legal.
3979 unsigned NumElements = VT->getNumElements();
3980 uint64_t Size = getContext().getTypeSize(VT);
3981 // NumElements should be power of 2 between 1 and 16.
3982 if ((NumElements & (NumElements - 1)) != 0 || NumElements > 16)
3984 return Size != 64 && (Size != 128 || NumElements == 1);
3989 bool AArch64ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
3990 // Homogeneous aggregates for AAPCS64 must have base types of a floating
3991 // point type or a short-vector type. This is the same as the 32-bit ABI,
3992 // but with the difference that any floating-point type is allowed,
3993 // including __fp16.
3994 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
3995 if (BT->isFloatingPoint())
3997 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
3998 unsigned VecSize = getContext().getTypeSize(VT);
3999 if (VecSize == 64 || VecSize == 128)
4005 bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
4006 uint64_t Members) const {
4007 return Members <= 4;
4010 llvm::Value *AArch64ABIInfo::EmitAAPCSVAArg(llvm::Value *VAListAddr,
4012 CodeGenFunction &CGF) const {
4013 ABIArgInfo AI = classifyArgumentType(Ty);
4014 bool IsIndirect = AI.isIndirect();
4016 llvm::Type *BaseTy = CGF.ConvertType(Ty);
4018 BaseTy = llvm::PointerType::getUnqual(BaseTy);
4019 else if (AI.getCoerceToType())
4020 BaseTy = AI.getCoerceToType();
4022 unsigned NumRegs = 1;
4023 if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) {
4024 BaseTy = ArrTy->getElementType();
4025 NumRegs = ArrTy->getNumElements();
4027 bool IsFPR = BaseTy->isFloatingPointTy() || BaseTy->isVectorTy();
4029 // The AArch64 va_list type and handling is specified in the Procedure Call
4030 // Standard, section B.4:
4040 llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg");
4041 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
4042 llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack");
4043 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
4044 auto &Ctx = CGF.getContext();
4046 llvm::Value *reg_offs_p = nullptr, *reg_offs = nullptr;
4048 int RegSize = IsIndirect ? 8 : getContext().getTypeSize(Ty) / 8;
4050 // 3 is the field number of __gr_offs
4051 reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 3, "gr_offs_p");
4052 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs");
4053 reg_top_index = 1; // field number for __gr_top
4054 RegSize = llvm::RoundUpToAlignment(RegSize, 8);
4056 // 4 is the field number of __vr_offs.
4057 reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 4, "vr_offs_p");
4058 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs");
4059 reg_top_index = 2; // field number for __vr_top
4060 RegSize = 16 * NumRegs;
4063 //=======================================
4064 // Find out where argument was passed
4065 //=======================================
4067 // If reg_offs >= 0 we're already using the stack for this type of
4068 // argument. We don't want to keep updating reg_offs (in case it overflows,
4069 // though anyone passing 2GB of arguments, each at most 16 bytes, deserves
4070 // whatever they get).
4071 llvm::Value *UsingStack = nullptr;
4072 UsingStack = CGF.Builder.CreateICmpSGE(
4073 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, 0));
4075 CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
4077 // Otherwise, at least some kind of argument could go in these registers, the
4078 // question is whether this particular type is too big.
4079 CGF.EmitBlock(MaybeRegBlock);
4081 // Integer arguments may need to correct register alignment (for example a
4082 // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we
4083 // align __gr_offs to calculate the potential address.
4084 if (!IsFPR && !IsIndirect && Ctx.getTypeAlign(Ty) > 64) {
4085 int Align = Ctx.getTypeAlign(Ty) / 8;
4087 reg_offs = CGF.Builder.CreateAdd(
4088 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, Align - 1),
4090 reg_offs = CGF.Builder.CreateAnd(
4091 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, -Align),
4095 // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list.
4096 llvm::Value *NewOffset = nullptr;
4097 NewOffset = CGF.Builder.CreateAdd(
4098 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, RegSize), "new_reg_offs");
4099 CGF.Builder.CreateStore(NewOffset, reg_offs_p);
4101 // Now we're in a position to decide whether this argument really was in
4102 // registers or not.
4103 llvm::Value *InRegs = nullptr;
4104 InRegs = CGF.Builder.CreateICmpSLE(
4105 NewOffset, llvm::ConstantInt::get(CGF.Int32Ty, 0), "inreg");
4107 CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
4109 //=======================================
4110 // Argument was in registers
4111 //=======================================
4113 // Now we emit the code for if the argument was originally passed in
4114 // registers. First start the appropriate block:
4115 CGF.EmitBlock(InRegBlock);
4117 llvm::Value *reg_top_p = nullptr, *reg_top = nullptr;
4119 CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index, "reg_top_p");
4120 reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top");
4121 llvm::Value *BaseAddr = CGF.Builder.CreateGEP(reg_top, reg_offs);
4122 llvm::Value *RegAddr = nullptr;
4123 llvm::Type *MemTy = llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty));
4126 // If it's been passed indirectly (actually a struct), whatever we find from
4127 // stored registers or on the stack will actually be a struct **.
4128 MemTy = llvm::PointerType::getUnqual(MemTy);
4131 const Type *Base = nullptr;
4132 uint64_t NumMembers = 0;
4133 bool IsHFA = isHomogeneousAggregate(Ty, Base, NumMembers);
4134 if (IsHFA && NumMembers > 1) {
4135 // Homogeneous aggregates passed in registers will have their elements split
4136 // and stored 16-bytes apart regardless of size (they're notionally in qN,
4137 // qN+1, ...). We reload and store into a temporary local variable
4139 assert(!IsIndirect && "Homogeneous aggregates should be passed directly");
4140 llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0));
4141 llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
4142 llvm::Value *Tmp = CGF.CreateTempAlloca(HFATy);
4145 if (CGF.CGM.getDataLayout().isBigEndian() && Ctx.getTypeSize(Base) < 128)
4146 Offset = 16 - Ctx.getTypeSize(Base) / 8;
4147 for (unsigned i = 0; i < NumMembers; ++i) {
4148 llvm::Value *BaseOffset =
4149 llvm::ConstantInt::get(CGF.Int32Ty, 16 * i + Offset);
4150 llvm::Value *LoadAddr = CGF.Builder.CreateGEP(BaseAddr, BaseOffset);
4151 LoadAddr = CGF.Builder.CreateBitCast(
4152 LoadAddr, llvm::PointerType::getUnqual(BaseTy));
4153 llvm::Value *StoreAddr = CGF.Builder.CreateStructGEP(Tmp, i);
4155 llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr);
4156 CGF.Builder.CreateStore(Elem, StoreAddr);
4159 RegAddr = CGF.Builder.CreateBitCast(Tmp, MemTy);
4161 // Otherwise the object is contiguous in memory
4162 unsigned BeAlign = reg_top_index == 2 ? 16 : 8;
4163 if (CGF.CGM.getDataLayout().isBigEndian() &&
4164 (IsHFA || !isAggregateTypeForABI(Ty)) &&
4165 Ctx.getTypeSize(Ty) < (BeAlign * 8)) {
4166 int Offset = BeAlign - Ctx.getTypeSize(Ty) / 8;
4167 BaseAddr = CGF.Builder.CreatePtrToInt(BaseAddr, CGF.Int64Ty);
4169 BaseAddr = CGF.Builder.CreateAdd(
4170 BaseAddr, llvm::ConstantInt::get(CGF.Int64Ty, Offset), "align_be");
4172 BaseAddr = CGF.Builder.CreateIntToPtr(BaseAddr, CGF.Int8PtrTy);
4175 RegAddr = CGF.Builder.CreateBitCast(BaseAddr, MemTy);
4178 CGF.EmitBranch(ContBlock);
4180 //=======================================
4181 // Argument was on the stack
4182 //=======================================
4183 CGF.EmitBlock(OnStackBlock);
4185 llvm::Value *stack_p = nullptr, *OnStackAddr = nullptr;
4186 stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "stack_p");
4187 OnStackAddr = CGF.Builder.CreateLoad(stack_p, "stack");
4189 // Again, stack arguments may need realigmnent. In this case both integer and
4190 // floating-point ones might be affected.
4191 if (!IsIndirect && Ctx.getTypeAlign(Ty) > 64) {
4192 int Align = Ctx.getTypeAlign(Ty) / 8;
4194 OnStackAddr = CGF.Builder.CreatePtrToInt(OnStackAddr, CGF.Int64Ty);
4196 OnStackAddr = CGF.Builder.CreateAdd(
4197 OnStackAddr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1),
4199 OnStackAddr = CGF.Builder.CreateAnd(
4200 OnStackAddr, llvm::ConstantInt::get(CGF.Int64Ty, -Align),
4203 OnStackAddr = CGF.Builder.CreateIntToPtr(OnStackAddr, CGF.Int8PtrTy);
4210 StackSize = Ctx.getTypeSize(Ty) / 8;
4212 // All stack slots are 8 bytes
4213 StackSize = llvm::RoundUpToAlignment(StackSize, 8);
4215 llvm::Value *StackSizeC = llvm::ConstantInt::get(CGF.Int32Ty, StackSize);
4216 llvm::Value *NewStack =
4217 CGF.Builder.CreateGEP(OnStackAddr, StackSizeC, "new_stack");
4219 // Write the new value of __stack for the next call to va_arg
4220 CGF.Builder.CreateStore(NewStack, stack_p);
4222 if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) &&
4223 Ctx.getTypeSize(Ty) < 64) {
4224 int Offset = 8 - Ctx.getTypeSize(Ty) / 8;
4225 OnStackAddr = CGF.Builder.CreatePtrToInt(OnStackAddr, CGF.Int64Ty);
4227 OnStackAddr = CGF.Builder.CreateAdd(
4228 OnStackAddr, llvm::ConstantInt::get(CGF.Int64Ty, Offset), "align_be");
4230 OnStackAddr = CGF.Builder.CreateIntToPtr(OnStackAddr, CGF.Int8PtrTy);
4233 OnStackAddr = CGF.Builder.CreateBitCast(OnStackAddr, MemTy);
4235 CGF.EmitBranch(ContBlock);
4237 //=======================================
4239 //=======================================
4240 CGF.EmitBlock(ContBlock);
4242 llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(MemTy, 2, "vaarg.addr");
4243 ResAddr->addIncoming(RegAddr, InRegBlock);
4244 ResAddr->addIncoming(OnStackAddr, OnStackBlock);
4247 return CGF.Builder.CreateLoad(ResAddr, "vaarg.addr");
4252 llvm::Value *AArch64ABIInfo::EmitDarwinVAArg(llvm::Value *VAListAddr, QualType Ty,
4253 CodeGenFunction &CGF) const {
4254 // We do not support va_arg for aggregates or illegal vector types.
4255 // Lower VAArg here for these cases and use the LLVM va_arg instruction for
4257 if (!isAggregateTypeForABI(Ty) && !isIllegalVectorType(Ty))
4260 uint64_t Size = CGF.getContext().getTypeSize(Ty) / 8;
4261 uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
4263 const Type *Base = nullptr;
4264 uint64_t Members = 0;
4265 bool isHA = isHomogeneousAggregate(Ty, Base, Members);
4267 bool isIndirect = false;
4268 // Arguments bigger than 16 bytes which aren't homogeneous aggregates should
4269 // be passed indirectly.
4270 if (Size > 16 && !isHA) {
4276 llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
4277 llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
4279 CGBuilderTy &Builder = CGF.Builder;
4280 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
4281 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
4283 if (isEmptyRecord(getContext(), Ty, true)) {
4284 // These are ignored for parameter passing purposes.
4285 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
4286 return Builder.CreateBitCast(Addr, PTy);
4289 const uint64_t MinABIAlign = 8;
4290 if (Align > MinABIAlign) {
4291 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, Align - 1);
4292 Addr = Builder.CreateGEP(Addr, Offset);
4293 llvm::Value *AsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty);
4294 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, ~(Align - 1));
4295 llvm::Value *Aligned = Builder.CreateAnd(AsInt, Mask);
4296 Addr = Builder.CreateIntToPtr(Aligned, BP, "ap.align");
4299 uint64_t Offset = llvm::RoundUpToAlignment(Size, MinABIAlign);
4300 llvm::Value *NextAddr = Builder.CreateGEP(
4301 Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), "ap.next");
4302 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
4305 Addr = Builder.CreateLoad(Builder.CreateBitCast(Addr, BPP));
4306 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
4307 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
4312 //===----------------------------------------------------------------------===//
4313 // ARM ABI Implementation
4314 //===----------------------------------------------------------------------===//
4318 class ARMABIInfo : public ABIInfo {
4328 mutable int VFPRegs[16];
4329 const unsigned NumVFPs;
4330 const unsigned NumGPRs;
4331 mutable unsigned AllocatedGPRs;
4332 mutable unsigned AllocatedVFPs;
4335 ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) : ABIInfo(CGT), Kind(_Kind),
4336 NumVFPs(16), NumGPRs(4) {
4338 resetAllocatedRegs();
4341 bool isEABI() const {
4342 switch (getTarget().getTriple().getEnvironment()) {
4343 case llvm::Triple::Android:
4344 case llvm::Triple::EABI:
4345 case llvm::Triple::EABIHF:
4346 case llvm::Triple::GNUEABI:
4347 case llvm::Triple::GNUEABIHF:
4354 bool isEABIHF() const {
4355 switch (getTarget().getTriple().getEnvironment()) {
4356 case llvm::Triple::EABIHF:
4357 case llvm::Triple::GNUEABIHF:
4364 ABIKind getABIKind() const { return Kind; }
4367 ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic) const;
4368 ABIArgInfo classifyArgumentType(QualType RetTy, bool isVariadic,
4369 bool &IsCPRC) const;
4370 bool isIllegalVectorType(QualType Ty) const;
4372 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
4373 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
4374 uint64_t Members) const override;
4376 void computeInfo(CGFunctionInfo &FI) const override;
4378 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4379 CodeGenFunction &CGF) const override;
4381 llvm::CallingConv::ID getLLVMDefaultCC() const;
4382 llvm::CallingConv::ID getABIDefaultCC() const;
4385 void markAllocatedGPRs(unsigned Alignment, unsigned NumRequired) const;
4386 void markAllocatedVFPs(unsigned Alignment, unsigned NumRequired) const;
4387 void resetAllocatedRegs(void) const;
4390 class ARMTargetCodeGenInfo : public TargetCodeGenInfo {
4392 ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
4393 :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {}
4395 const ARMABIInfo &getABIInfo() const {
4396 return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo());
4399 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
4403 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
4404 return "mov\tr7, r7\t\t@ marker for objc_retainAutoreleaseReturnValue";
4407 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4408 llvm::Value *Address) const override {
4409 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
4411 // 0-15 are the 16 integer registers.
4412 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15);
4416 unsigned getSizeOfUnwindException() const override {
4417 if (getABIInfo().isEABI()) return 88;
4418 return TargetCodeGenInfo::getSizeOfUnwindException();
4421 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
4422 CodeGen::CodeGenModule &CGM) const override {
4423 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
4427 const ARMInterruptAttr *Attr = FD->getAttr<ARMInterruptAttr>();
4432 switch (Attr->getInterrupt()) {
4433 case ARMInterruptAttr::Generic: Kind = ""; break;
4434 case ARMInterruptAttr::IRQ: Kind = "IRQ"; break;
4435 case ARMInterruptAttr::FIQ: Kind = "FIQ"; break;
4436 case ARMInterruptAttr::SWI: Kind = "SWI"; break;
4437 case ARMInterruptAttr::ABORT: Kind = "ABORT"; break;
4438 case ARMInterruptAttr::UNDEF: Kind = "UNDEF"; break;
4441 llvm::Function *Fn = cast<llvm::Function>(GV);
4443 Fn->addFnAttr("interrupt", Kind);
4445 if (cast<ARMABIInfo>(getABIInfo()).getABIKind() == ARMABIInfo::APCS)
4448 // AAPCS guarantees that sp will be 8-byte aligned on any public interface,
4449 // however this is not necessarily true on taking any interrupt. Instruct
4450 // the backend to perform a realignment as part of the function prologue.
4451 llvm::AttrBuilder B;
4452 B.addStackAlignmentAttr(8);
4453 Fn->addAttributes(llvm::AttributeSet::FunctionIndex,
4454 llvm::AttributeSet::get(CGM.getLLVMContext(),
4455 llvm::AttributeSet::FunctionIndex,
4463 void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
4464 // To correctly handle Homogeneous Aggregate, we need to keep track of the
4465 // VFP registers allocated so far.
4466 // C.1.vfp If the argument is a VFP CPRC and there are sufficient consecutive
4467 // VFP registers of the appropriate type unallocated then the argument is
4468 // allocated to the lowest-numbered sequence of such registers.
4469 // C.2.vfp If the argument is a VFP CPRC then any VFP registers that are
4470 // unallocated are marked as unavailable.
4471 resetAllocatedRegs();
4473 if (getCXXABI().classifyReturnType(FI)) {
4474 if (FI.getReturnInfo().isIndirect())
4475 markAllocatedGPRs(1, 1);
4477 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), FI.isVariadic());
4479 for (auto &I : FI.arguments()) {
4480 unsigned PreAllocationVFPs = AllocatedVFPs;
4481 unsigned PreAllocationGPRs = AllocatedGPRs;
4482 bool IsCPRC = false;
4483 // 6.1.2.3 There is one VFP co-processor register class using registers
4484 // s0-s15 (d0-d7) for passing arguments.
4485 I.info = classifyArgumentType(I.type, FI.isVariadic(), IsCPRC);
4487 // If we have allocated some arguments onto the stack (due to running
4488 // out of VFP registers), we cannot split an argument between GPRs and
4489 // the stack. If this situation occurs, we add padding to prevent the
4490 // GPRs from being used. In this situation, the current argument could
4491 // only be allocated by rule C.8, so rule C.6 would mark these GPRs as
4493 // We do not have to do this if the argument is being passed ByVal, as the
4494 // backend can handle that situation correctly.
4495 const bool StackUsed = PreAllocationGPRs > NumGPRs || PreAllocationVFPs > NumVFPs;
4496 const bool IsByVal = I.info.isIndirect() && I.info.getIndirectByVal();
4497 if (!IsCPRC && PreAllocationGPRs < NumGPRs && AllocatedGPRs > NumGPRs &&
4498 StackUsed && !IsByVal) {
4499 llvm::Type *PaddingTy = llvm::ArrayType::get(
4500 llvm::Type::getInt32Ty(getVMContext()), NumGPRs - PreAllocationGPRs);
4501 if (I.info.canHaveCoerceToType()) {
4502 I.info = ABIArgInfo::getDirect(I.info.getCoerceToType() /* type */,
4503 0 /* offset */, PaddingTy, true);
4505 I.info = ABIArgInfo::getDirect(nullptr /* type */, 0 /* offset */,
4511 // Always honor user-specified calling convention.
4512 if (FI.getCallingConvention() != llvm::CallingConv::C)
4515 llvm::CallingConv::ID cc = getRuntimeCC();
4516 if (cc != llvm::CallingConv::C)
4517 FI.setEffectiveCallingConvention(cc);
4520 /// Return the default calling convention that LLVM will use.
4521 llvm::CallingConv::ID ARMABIInfo::getLLVMDefaultCC() const {
4522 // The default calling convention that LLVM will infer.
4524 return llvm::CallingConv::ARM_AAPCS_VFP;
4526 return llvm::CallingConv::ARM_AAPCS;
4528 return llvm::CallingConv::ARM_APCS;
4531 /// Return the calling convention that our ABI would like us to use
4532 /// as the C calling convention.
4533 llvm::CallingConv::ID ARMABIInfo::getABIDefaultCC() const {
4534 switch (getABIKind()) {
4535 case APCS: return llvm::CallingConv::ARM_APCS;
4536 case AAPCS: return llvm::CallingConv::ARM_AAPCS;
4537 case AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
4539 llvm_unreachable("bad ABI kind");
4542 void ARMABIInfo::setCCs() {
4543 assert(getRuntimeCC() == llvm::CallingConv::C);
4545 // Don't muddy up the IR with a ton of explicit annotations if
4546 // they'd just match what LLVM will infer from the triple.
4547 llvm::CallingConv::ID abiCC = getABIDefaultCC();
4548 if (abiCC != getLLVMDefaultCC())
4551 BuiltinCC = (getABIKind() == APCS ?
4552 llvm::CallingConv::ARM_APCS : llvm::CallingConv::ARM_AAPCS);
4555 /// markAllocatedVFPs - update VFPRegs according to the alignment and
4556 /// number of VFP registers (unit is S register) requested.
4557 void ARMABIInfo::markAllocatedVFPs(unsigned Alignment,
4558 unsigned NumRequired) const {
4560 if (AllocatedVFPs >= 16) {
4561 // We use AllocatedVFP > 16 to signal that some CPRCs were allocated on
4566 // C.1.vfp If the argument is a VFP CPRC and there are sufficient consecutive
4567 // VFP registers of the appropriate type unallocated then the argument is
4568 // allocated to the lowest-numbered sequence of such registers.
4569 for (unsigned I = 0; I < 16; I += Alignment) {
4570 bool FoundSlot = true;
4571 for (unsigned J = I, JEnd = I + NumRequired; J < JEnd; J++)
4572 if (J >= 16 || VFPRegs[J]) {
4577 for (unsigned J = I, JEnd = I + NumRequired; J < JEnd; J++)
4579 AllocatedVFPs += NumRequired;
4583 // C.2.vfp If the argument is a VFP CPRC then any VFP registers that are
4584 // unallocated are marked as unavailable.
4585 for (unsigned I = 0; I < 16; I++)
4587 AllocatedVFPs = 17; // We do not have enough VFP registers.
4590 /// Update AllocatedGPRs to record the number of general purpose registers
4591 /// which have been allocated. It is valid for AllocatedGPRs to go above 4,
4592 /// this represents arguments being stored on the stack.
4593 void ARMABIInfo::markAllocatedGPRs(unsigned Alignment,
4594 unsigned NumRequired) const {
4595 assert((Alignment == 1 || Alignment == 2) && "Alignment must be 4 or 8 bytes");
4597 if (Alignment == 2 && AllocatedGPRs & 0x1)
4600 AllocatedGPRs += NumRequired;
4603 void ARMABIInfo::resetAllocatedRegs(void) const {
4606 for (unsigned i = 0; i < NumVFPs; ++i)
4610 ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool isVariadic,
4611 bool &IsCPRC) const {
4612 // We update number of allocated VFPs according to
4613 // 6.1.2.1 The following argument types are VFP CPRCs:
4614 // A single-precision floating-point type (including promoted
4615 // half-precision types); A double-precision floating-point type;
4616 // A 64-bit or 128-bit containerized vector type; Homogeneous Aggregate
4617 // with a Base Type of a single- or double-precision floating-point type,
4618 // 64-bit containerized vectors or 128-bit containerized vectors with one
4619 // to four Elements.
4620 bool IsEffectivelyAAPCS_VFP = getABIKind() == AAPCS_VFP && !isVariadic;
4622 Ty = useFirstFieldIfTransparentUnion(Ty);
4624 // Handle illegal vector types here.
4625 if (isIllegalVectorType(Ty)) {
4626 uint64_t Size = getContext().getTypeSize(Ty);
4628 llvm::Type *ResType =
4629 llvm::Type::getInt32Ty(getVMContext());
4630 markAllocatedGPRs(1, 1);
4631 return ABIArgInfo::getDirect(ResType);
4634 llvm::Type *ResType = llvm::VectorType::get(
4635 llvm::Type::getInt32Ty(getVMContext()), 2);
4636 if (getABIKind() == ARMABIInfo::AAPCS || isVariadic){
4637 markAllocatedGPRs(2, 2);
4639 markAllocatedVFPs(2, 2);
4642 return ABIArgInfo::getDirect(ResType);
4645 llvm::Type *ResType = llvm::VectorType::get(
4646 llvm::Type::getInt32Ty(getVMContext()), 4);
4647 if (getABIKind() == ARMABIInfo::AAPCS || isVariadic) {
4648 markAllocatedGPRs(2, 4);
4650 markAllocatedVFPs(4, 4);
4653 return ABIArgInfo::getDirect(ResType);
4655 markAllocatedGPRs(1, 1);
4656 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
4658 // Update VFPRegs for legal vector types.
4659 if (getABIKind() == ARMABIInfo::AAPCS_VFP && !isVariadic) {
4660 if (const VectorType *VT = Ty->getAs<VectorType>()) {
4661 uint64_t Size = getContext().getTypeSize(VT);
4662 // Size of a legal vector should be power of 2 and above 64.
4663 markAllocatedVFPs(Size >= 128 ? 4 : 2, Size / 32);
4667 // Update VFPRegs for floating point types.
4668 if (getABIKind() == ARMABIInfo::AAPCS_VFP && !isVariadic) {
4669 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
4670 if (BT->getKind() == BuiltinType::Half ||
4671 BT->getKind() == BuiltinType::Float) {
4672 markAllocatedVFPs(1, 1);
4675 if (BT->getKind() == BuiltinType::Double ||
4676 BT->getKind() == BuiltinType::LongDouble) {
4677 markAllocatedVFPs(2, 2);
4683 if (!isAggregateTypeForABI(Ty)) {
4684 // Treat an enum type as its underlying type.
4685 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
4686 Ty = EnumTy->getDecl()->getIntegerType();
4689 unsigned Size = getContext().getTypeSize(Ty);
4691 markAllocatedGPRs(Size > 32 ? 2 : 1, (Size + 31) / 32);
4692 return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend()
4693 : ABIArgInfo::getDirect());
4696 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
4697 markAllocatedGPRs(1, 1);
4698 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
4701 // Ignore empty records.
4702 if (isEmptyRecord(getContext(), Ty, true))
4703 return ABIArgInfo::getIgnore();
4705 if (IsEffectivelyAAPCS_VFP) {
4706 // Homogeneous Aggregates need to be expanded when we can fit the aggregate
4707 // into VFP registers.
4708 const Type *Base = nullptr;
4709 uint64_t Members = 0;
4710 if (isHomogeneousAggregate(Ty, Base, Members)) {
4711 assert(Base && "Base class should be set for homogeneous aggregate");
4712 // Base can be a floating-point or a vector.
4713 if (Base->isVectorType()) {
4714 // ElementSize is in number of floats.
4715 unsigned ElementSize = getContext().getTypeSize(Base) == 64 ? 2 : 4;
4716 markAllocatedVFPs(ElementSize,
4717 Members * ElementSize);
4718 } else if (Base->isSpecificBuiltinType(BuiltinType::Float))
4719 markAllocatedVFPs(1, Members);
4721 assert(Base->isSpecificBuiltinType(BuiltinType::Double) ||
4722 Base->isSpecificBuiltinType(BuiltinType::LongDouble));
4723 markAllocatedVFPs(2, Members * 2);
4726 return ABIArgInfo::getDirect(nullptr, 0, nullptr, false);
4730 // Support byval for ARM.
4731 // The ABI alignment for APCS is 4-byte and for AAPCS at least 4-byte and at
4732 // most 8-byte. We realign the indirect argument if type alignment is bigger
4733 // than ABI alignment.
4734 uint64_t ABIAlign = 4;
4735 uint64_t TyAlign = getContext().getTypeAlign(Ty) / 8;
4736 if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
4737 getABIKind() == ARMABIInfo::AAPCS)
4738 ABIAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8);
4739 if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) {
4740 // Update Allocated GPRs. Since this is only used when the size of the
4741 // argument is greater than 64 bytes, this will always use up any available
4742 // registers (of which there are 4). We also don't care about getting the
4743 // alignment right, because general-purpose registers cannot be back-filled.
4744 markAllocatedGPRs(1, 4);
4745 return ABIArgInfo::getIndirect(TyAlign, /*ByVal=*/true,
4746 /*Realign=*/TyAlign > ABIAlign);
4749 // Otherwise, pass by coercing to a structure of the appropriate size.
4752 // FIXME: Try to match the types of the arguments more accurately where
4754 if (getContext().getTypeAlign(Ty) <= 32) {
4755 ElemTy = llvm::Type::getInt32Ty(getVMContext());
4756 SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32;
4757 markAllocatedGPRs(1, SizeRegs);
4759 ElemTy = llvm::Type::getInt64Ty(getVMContext());
4760 SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64;
4761 markAllocatedGPRs(2, SizeRegs * 2);
4764 return ABIArgInfo::getDirect(llvm::ArrayType::get(ElemTy, SizeRegs));
4767 static bool isIntegerLikeType(QualType Ty, ASTContext &Context,
4768 llvm::LLVMContext &VMContext) {
4769 // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure
4770 // is called integer-like if its size is less than or equal to one word, and
4771 // the offset of each of its addressable sub-fields is zero.
4773 uint64_t Size = Context.getTypeSize(Ty);
4775 // Check that the type fits in a word.
4779 // FIXME: Handle vector types!
4780 if (Ty->isVectorType())
4783 // Float types are never treated as "integer like".
4784 if (Ty->isRealFloatingType())
4787 // If this is a builtin or pointer type then it is ok.
4788 if (Ty->getAs<BuiltinType>() || Ty->isPointerType())
4791 // Small complex integer types are "integer like".
4792 if (const ComplexType *CT = Ty->getAs<ComplexType>())
4793 return isIntegerLikeType(CT->getElementType(), Context, VMContext);
4795 // Single element and zero sized arrays should be allowed, by the definition
4796 // above, but they are not.
4798 // Otherwise, it must be a record type.
4799 const RecordType *RT = Ty->getAs<RecordType>();
4800 if (!RT) return false;
4802 // Ignore records with flexible arrays.
4803 const RecordDecl *RD = RT->getDecl();
4804 if (RD->hasFlexibleArrayMember())
4807 // Check that all sub-fields are at offset 0, and are themselves "integer
4809 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
4811 bool HadField = false;
4813 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
4814 i != e; ++i, ++idx) {
4815 const FieldDecl *FD = *i;
4817 // Bit-fields are not addressable, we only need to verify they are "integer
4818 // like". We still have to disallow a subsequent non-bitfield, for example:
4819 // struct { int : 0; int x }
4820 // is non-integer like according to gcc.
4821 if (FD->isBitField()) {
4825 if (!isIntegerLikeType(FD->getType(), Context, VMContext))
4831 // Check if this field is at offset 0.
4832 if (Layout.getFieldOffset(idx) != 0)
4835 if (!isIntegerLikeType(FD->getType(), Context, VMContext))
4838 // Only allow at most one field in a structure. This doesn't match the
4839 // wording above, but follows gcc in situations with a field following an
4841 if (!RD->isUnion()) {
4852 ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
4853 bool isVariadic) const {
4854 bool IsEffectivelyAAPCS_VFP = getABIKind() == AAPCS_VFP && !isVariadic;
4856 if (RetTy->isVoidType())
4857 return ABIArgInfo::getIgnore();
4859 // Large vector types should be returned via memory.
4860 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) {
4861 markAllocatedGPRs(1, 1);
4862 return ABIArgInfo::getIndirect(0);
4865 if (!isAggregateTypeForABI(RetTy)) {
4866 // Treat an enum type as its underlying type.
4867 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
4868 RetTy = EnumTy->getDecl()->getIntegerType();
4870 return RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend()
4871 : ABIArgInfo::getDirect();
4874 // Are we following APCS?
4875 if (getABIKind() == APCS) {
4876 if (isEmptyRecord(getContext(), RetTy, false))
4877 return ABIArgInfo::getIgnore();
4879 // Complex types are all returned as packed integers.
4881 // FIXME: Consider using 2 x vector types if the back end handles them
4883 if (RetTy->isAnyComplexType())
4884 return ABIArgInfo::getDirect(llvm::IntegerType::get(
4885 getVMContext(), getContext().getTypeSize(RetTy)));
4887 // Integer like structures are returned in r0.
4888 if (isIntegerLikeType(RetTy, getContext(), getVMContext())) {
4889 // Return in the smallest viable integer type.
4890 uint64_t Size = getContext().getTypeSize(RetTy);
4892 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
4894 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
4895 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
4898 // Otherwise return in memory.
4899 markAllocatedGPRs(1, 1);
4900 return ABIArgInfo::getIndirect(0);
4903 // Otherwise this is an AAPCS variant.
4905 if (isEmptyRecord(getContext(), RetTy, true))
4906 return ABIArgInfo::getIgnore();
4908 // Check for homogeneous aggregates with AAPCS-VFP.
4909 if (IsEffectivelyAAPCS_VFP) {
4910 const Type *Base = nullptr;
4912 if (isHomogeneousAggregate(RetTy, Base, Members)) {
4913 assert(Base && "Base class should be set for homogeneous aggregate");
4914 // Homogeneous Aggregates are returned directly.
4915 return ABIArgInfo::getDirect(nullptr, 0, nullptr, false);
4919 // Aggregates <= 4 bytes are returned in r0; other aggregates
4920 // are returned indirectly.
4921 uint64_t Size = getContext().getTypeSize(RetTy);
4923 if (getDataLayout().isBigEndian())
4924 // Return in 32 bit integer integer type (as if loaded by LDR, AAPCS 5.4)
4925 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
4927 // Return in the smallest viable integer type.
4929 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
4931 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
4932 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
4935 markAllocatedGPRs(1, 1);
4936 return ABIArgInfo::getIndirect(0);
4939 /// isIllegalVector - check whether Ty is an illegal vector type.
4940 bool ARMABIInfo::isIllegalVectorType(QualType Ty) const {
4941 if (const VectorType *VT = Ty->getAs<VectorType>()) {
4942 // Check whether VT is legal.
4943 unsigned NumElements = VT->getNumElements();
4944 uint64_t Size = getContext().getTypeSize(VT);
4945 // NumElements should be power of 2.
4946 if ((NumElements & (NumElements - 1)) != 0)
4948 // Size should be greater than 32 bits.
4954 bool ARMABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
4955 // Homogeneous aggregates for AAPCS-VFP must have base types of float,
4956 // double, or 64-bit or 128-bit vectors.
4957 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
4958 if (BT->getKind() == BuiltinType::Float ||
4959 BT->getKind() == BuiltinType::Double ||
4960 BT->getKind() == BuiltinType::LongDouble)
4962 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
4963 unsigned VecSize = getContext().getTypeSize(VT);
4964 if (VecSize == 64 || VecSize == 128)
4970 bool ARMABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
4971 uint64_t Members) const {
4972 return Members <= 4;
4975 llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4976 CodeGenFunction &CGF) const {
4977 llvm::Type *BP = CGF.Int8PtrTy;
4978 llvm::Type *BPP = CGF.Int8PtrPtrTy;
4980 CGBuilderTy &Builder = CGF.Builder;
4981 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
4982 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
4984 if (isEmptyRecord(getContext(), Ty, true)) {
4985 // These are ignored for parameter passing purposes.
4986 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
4987 return Builder.CreateBitCast(Addr, PTy);
4990 uint64_t Size = CGF.getContext().getTypeSize(Ty) / 8;
4991 uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8;
4992 bool IsIndirect = false;
4994 // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for
4995 // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte.
4996 if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
4997 getABIKind() == ARMABIInfo::AAPCS)
4998 TyAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8);
5001 // Use indirect if size of the illegal vector is bigger than 16 bytes.
5002 if (isIllegalVectorType(Ty) && Size > 16) {
5008 // Handle address alignment for ABI alignment > 4 bytes.
5010 assert((TyAlign & (TyAlign - 1)) == 0 &&
5011 "Alignment is not power of 2!");
5012 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int32Ty);
5013 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt32(TyAlign - 1));
5014 AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1)));
5015 Addr = Builder.CreateIntToPtr(AddrAsInt, BP, "ap.align");
5019 llvm::RoundUpToAlignment(Size, 4);
5020 llvm::Value *NextAddr =
5021 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
5023 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
5026 Addr = Builder.CreateLoad(Builder.CreateBitCast(Addr, BPP));
5027 else if (TyAlign < CGF.getContext().getTypeAlign(Ty) / 8) {
5028 // We can't directly cast ap.cur to pointer to a vector type, since ap.cur
5029 // may not be correctly aligned for the vector type. We create an aligned
5030 // temporary space and copy the content over from ap.cur to the temporary
5031 // space. This is necessary if the natural alignment of the type is greater
5032 // than the ABI alignment.
5033 llvm::Type *I8PtrTy = Builder.getInt8PtrTy();
5034 CharUnits CharSize = getContext().getTypeSizeInChars(Ty);
5035 llvm::Value *AlignedTemp = CGF.CreateTempAlloca(CGF.ConvertType(Ty),
5037 llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy);
5038 llvm::Value *Src = Builder.CreateBitCast(Addr, I8PtrTy);
5039 Builder.CreateMemCpy(Dst, Src,
5040 llvm::ConstantInt::get(CGF.IntPtrTy, CharSize.getQuantity()),
5042 Addr = AlignedTemp; //The content is in aligned location.
5045 llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
5046 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
5053 class NaClARMABIInfo : public ABIInfo {
5055 NaClARMABIInfo(CodeGen::CodeGenTypes &CGT, ARMABIInfo::ABIKind Kind)
5056 : ABIInfo(CGT), PInfo(CGT), NInfo(CGT, Kind) {}
5057 void computeInfo(CGFunctionInfo &FI) const override;
5058 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5059 CodeGenFunction &CGF) const override;
5061 PNaClABIInfo PInfo; // Used for generating calls with pnaclcall callingconv.
5062 ARMABIInfo NInfo; // Used for everything else.
5065 class NaClARMTargetCodeGenInfo : public TargetCodeGenInfo {
5067 NaClARMTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, ARMABIInfo::ABIKind Kind)
5068 : TargetCodeGenInfo(new NaClARMABIInfo(CGT, Kind)) {}
5073 void NaClARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
5074 if (FI.getASTCallingConvention() == CC_PnaclCall)
5075 PInfo.computeInfo(FI);
5077 static_cast<const ABIInfo&>(NInfo).computeInfo(FI);
5080 llvm::Value *NaClARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5081 CodeGenFunction &CGF) const {
5082 // Always use the native convention; calling pnacl-style varargs functions
5084 return static_cast<const ABIInfo&>(NInfo).EmitVAArg(VAListAddr, Ty, CGF);
5087 //===----------------------------------------------------------------------===//
5088 // NVPTX ABI Implementation
5089 //===----------------------------------------------------------------------===//
5093 class NVPTXABIInfo : public ABIInfo {
5095 NVPTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
5097 ABIArgInfo classifyReturnType(QualType RetTy) const;
5098 ABIArgInfo classifyArgumentType(QualType Ty) const;
5100 void computeInfo(CGFunctionInfo &FI) const override;
5101 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5102 CodeGenFunction &CFG) const override;
5105 class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo {
5107 NVPTXTargetCodeGenInfo(CodeGenTypes &CGT)
5108 : TargetCodeGenInfo(new NVPTXABIInfo(CGT)) {}
5110 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5111 CodeGen::CodeGenModule &M) const override;
5113 // Adds a NamedMDNode with F, Name, and Operand as operands, and adds the
5114 // resulting MDNode to the nvvm.annotations MDNode.
5115 static void addNVVMMetadata(llvm::Function *F, StringRef Name, int Operand);
5118 ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const {
5119 if (RetTy->isVoidType())
5120 return ABIArgInfo::getIgnore();
5122 // note: this is different from default ABI
5123 if (!RetTy->isScalarType())
5124 return ABIArgInfo::getDirect();
5126 // Treat an enum type as its underlying type.
5127 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
5128 RetTy = EnumTy->getDecl()->getIntegerType();
5130 return (RetTy->isPromotableIntegerType() ?
5131 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
5134 ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const {
5135 // Treat an enum type as its underlying type.
5136 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
5137 Ty = EnumTy->getDecl()->getIntegerType();
5139 // Return aggregates type as indirect by value
5140 if (isAggregateTypeForABI(Ty))
5141 return ABIArgInfo::getIndirect(0, /* byval */ true);
5143 return (Ty->isPromotableIntegerType() ?
5144 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
5147 void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
5148 if (!getCXXABI().classifyReturnType(FI))
5149 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
5150 for (auto &I : FI.arguments())
5151 I.info = classifyArgumentType(I.type);
5153 // Always honor user-specified calling convention.
5154 if (FI.getCallingConvention() != llvm::CallingConv::C)
5157 FI.setEffectiveCallingConvention(getRuntimeCC());
5160 llvm::Value *NVPTXABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5161 CodeGenFunction &CFG) const {
5162 llvm_unreachable("NVPTX does not support varargs");
5165 void NVPTXTargetCodeGenInfo::
5166 SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5167 CodeGen::CodeGenModule &M) const{
5168 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
5171 llvm::Function *F = cast<llvm::Function>(GV);
5173 // Perform special handling in OpenCL mode
5174 if (M.getLangOpts().OpenCL) {
5175 // Use OpenCL function attributes to check for kernel functions
5176 // By default, all functions are device functions
5177 if (FD->hasAttr<OpenCLKernelAttr>()) {
5178 // OpenCL __kernel functions get kernel metadata
5179 // Create !{<func-ref>, metadata !"kernel", i32 1} node
5180 addNVVMMetadata(F, "kernel", 1);
5181 // And kernel functions are not subject to inlining
5182 F->addFnAttr(llvm::Attribute::NoInline);
5186 // Perform special handling in CUDA mode.
5187 if (M.getLangOpts().CUDA) {
5188 // CUDA __global__ functions get a kernel metadata entry. Since
5189 // __global__ functions cannot be called from the device, we do not
5190 // need to set the noinline attribute.
5191 if (FD->hasAttr<CUDAGlobalAttr>()) {
5192 // Create !{<func-ref>, metadata !"kernel", i32 1} node
5193 addNVVMMetadata(F, "kernel", 1);
5195 if (FD->hasAttr<CUDALaunchBoundsAttr>()) {
5196 // Create !{<func-ref>, metadata !"maxntidx", i32 <val>} node
5197 addNVVMMetadata(F, "maxntidx",
5198 FD->getAttr<CUDALaunchBoundsAttr>()->getMaxThreads());
5199 // min blocks is a default argument for CUDALaunchBoundsAttr, so getting a
5200 // zero value from getMinBlocks either means it was not specified in
5201 // __launch_bounds__ or the user specified a 0 value. In both cases, we
5202 // don't have to add a PTX directive.
5203 int MinCTASM = FD->getAttr<CUDALaunchBoundsAttr>()->getMinBlocks();
5205 // Create !{<func-ref>, metadata !"minctasm", i32 <val>} node
5206 addNVVMMetadata(F, "minctasm", MinCTASM);
5212 void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::Function *F, StringRef Name,
5214 llvm::Module *M = F->getParent();
5215 llvm::LLVMContext &Ctx = M->getContext();
5217 // Get "nvvm.annotations" metadata node
5218 llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations");
5220 llvm::Metadata *MDVals[] = {
5221 llvm::ConstantAsMetadata::get(F), llvm::MDString::get(Ctx, Name),
5222 llvm::ConstantAsMetadata::get(
5223 llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), Operand))};
5224 // Append metadata to nvvm.annotations
5225 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
5229 //===----------------------------------------------------------------------===//
5230 // SystemZ ABI Implementation
5231 //===----------------------------------------------------------------------===//
5235 class SystemZABIInfo : public ABIInfo {
5237 SystemZABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
5239 bool isPromotableIntegerType(QualType Ty) const;
5240 bool isCompoundType(QualType Ty) const;
5241 bool isFPArgumentType(QualType Ty) const;
5243 ABIArgInfo classifyReturnType(QualType RetTy) const;
5244 ABIArgInfo classifyArgumentType(QualType ArgTy) const;
5246 void computeInfo(CGFunctionInfo &FI) const override {
5247 if (!getCXXABI().classifyReturnType(FI))
5248 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
5249 for (auto &I : FI.arguments())
5250 I.info = classifyArgumentType(I.type);
5253 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5254 CodeGenFunction &CGF) const override;
5257 class SystemZTargetCodeGenInfo : public TargetCodeGenInfo {
5259 SystemZTargetCodeGenInfo(CodeGenTypes &CGT)
5260 : TargetCodeGenInfo(new SystemZABIInfo(CGT)) {}
5265 bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const {
5266 // Treat an enum type as its underlying type.
5267 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
5268 Ty = EnumTy->getDecl()->getIntegerType();
5270 // Promotable integer types are required to be promoted by the ABI.
5271 if (Ty->isPromotableIntegerType())
5274 // 32-bit values must also be promoted.
5275 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
5276 switch (BT->getKind()) {
5277 case BuiltinType::Int:
5278 case BuiltinType::UInt:
5286 bool SystemZABIInfo::isCompoundType(QualType Ty) const {
5287 return Ty->isAnyComplexType() || isAggregateTypeForABI(Ty);
5290 bool SystemZABIInfo::isFPArgumentType(QualType Ty) const {
5291 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
5292 switch (BT->getKind()) {
5293 case BuiltinType::Float:
5294 case BuiltinType::Double:
5300 if (const RecordType *RT = Ty->getAsStructureType()) {
5301 const RecordDecl *RD = RT->getDecl();
5304 // If this is a C++ record, check the bases first.
5305 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
5306 for (const auto &I : CXXRD->bases()) {
5307 QualType Base = I.getType();
5309 // Empty bases don't affect things either way.
5310 if (isEmptyRecord(getContext(), Base, true))
5315 Found = isFPArgumentType(Base);
5320 // Check the fields.
5321 for (const auto *FD : RD->fields()) {
5322 // Empty bitfields don't affect things either way.
5323 // Unlike isSingleElementStruct(), empty structure and array fields
5324 // do count. So do anonymous bitfields that aren't zero-sized.
5325 if (FD->isBitField() && FD->getBitWidthValue(getContext()) == 0)
5328 // Unlike isSingleElementStruct(), arrays do not count.
5329 // Nested isFPArgumentType structures still do though.
5332 Found = isFPArgumentType(FD->getType());
5337 // Unlike isSingleElementStruct(), trailing padding is allowed.
5338 // An 8-byte aligned struct s { float f; } is passed as a double.
5345 llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5346 CodeGenFunction &CGF) const {
5347 // Assume that va_list type is correct; should be pointer to LLVM type:
5351 // i8 *__overflow_arg_area;
5352 // i8 *__reg_save_area;
5355 // Every argument occupies 8 bytes and is passed by preference in either
5357 Ty = CGF.getContext().getCanonicalType(Ty);
5358 ABIArgInfo AI = classifyArgumentType(Ty);
5359 bool InFPRs = isFPArgumentType(Ty);
5361 llvm::Type *APTy = llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty));
5362 bool IsIndirect = AI.isIndirect();
5363 unsigned UnpaddedBitSize;
5365 APTy = llvm::PointerType::getUnqual(APTy);
5366 UnpaddedBitSize = 64;
5368 UnpaddedBitSize = getContext().getTypeSize(Ty);
5369 unsigned PaddedBitSize = 64;
5370 assert((UnpaddedBitSize <= PaddedBitSize) && "Invalid argument size.");
5372 unsigned PaddedSize = PaddedBitSize / 8;
5373 unsigned Padding = (PaddedBitSize - UnpaddedBitSize) / 8;
5375 unsigned MaxRegs, RegCountField, RegSaveIndex, RegPadding;
5377 MaxRegs = 4; // Maximum of 4 FPR arguments
5378 RegCountField = 1; // __fpr
5379 RegSaveIndex = 16; // save offset for f0
5380 RegPadding = 0; // floats are passed in the high bits of an FPR
5382 MaxRegs = 5; // Maximum of 5 GPR arguments
5383 RegCountField = 0; // __gpr
5384 RegSaveIndex = 2; // save offset for r2
5385 RegPadding = Padding; // values are passed in the low bits of a GPR
5388 llvm::Value *RegCountPtr =
5389 CGF.Builder.CreateStructGEP(VAListAddr, RegCountField, "reg_count_ptr");
5390 llvm::Value *RegCount = CGF.Builder.CreateLoad(RegCountPtr, "reg_count");
5391 llvm::Type *IndexTy = RegCount->getType();
5392 llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs);
5393 llvm::Value *InRegs = CGF.Builder.CreateICmpULT(RegCount, MaxRegsV,
5396 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
5397 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
5398 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
5399 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
5401 // Emit code to load the value if it was passed in registers.
5402 CGF.EmitBlock(InRegBlock);
5404 // Work out the address of an argument register.
5405 llvm::Value *PaddedSizeV = llvm::ConstantInt::get(IndexTy, PaddedSize);
5406 llvm::Value *ScaledRegCount =
5407 CGF.Builder.CreateMul(RegCount, PaddedSizeV, "scaled_reg_count");
5408 llvm::Value *RegBase =
5409 llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize + RegPadding);
5410 llvm::Value *RegOffset =
5411 CGF.Builder.CreateAdd(ScaledRegCount, RegBase, "reg_offset");
5412 llvm::Value *RegSaveAreaPtr =
5413 CGF.Builder.CreateStructGEP(VAListAddr, 3, "reg_save_area_ptr");
5414 llvm::Value *RegSaveArea =
5415 CGF.Builder.CreateLoad(RegSaveAreaPtr, "reg_save_area");
5416 llvm::Value *RawRegAddr =
5417 CGF.Builder.CreateGEP(RegSaveArea, RegOffset, "raw_reg_addr");
5418 llvm::Value *RegAddr =
5419 CGF.Builder.CreateBitCast(RawRegAddr, APTy, "reg_addr");
5421 // Update the register count
5422 llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1);
5423 llvm::Value *NewRegCount =
5424 CGF.Builder.CreateAdd(RegCount, One, "reg_count");
5425 CGF.Builder.CreateStore(NewRegCount, RegCountPtr);
5426 CGF.EmitBranch(ContBlock);
5428 // Emit code to load the value if it was passed in memory.
5429 CGF.EmitBlock(InMemBlock);
5431 // Work out the address of a stack argument.
5432 llvm::Value *OverflowArgAreaPtr =
5433 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_ptr");
5434 llvm::Value *OverflowArgArea =
5435 CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area");
5436 llvm::Value *PaddingV = llvm::ConstantInt::get(IndexTy, Padding);
5437 llvm::Value *RawMemAddr =
5438 CGF.Builder.CreateGEP(OverflowArgArea, PaddingV, "raw_mem_addr");
5439 llvm::Value *MemAddr =
5440 CGF.Builder.CreateBitCast(RawMemAddr, APTy, "mem_addr");
5442 // Update overflow_arg_area_ptr pointer
5443 llvm::Value *NewOverflowArgArea =
5444 CGF.Builder.CreateGEP(OverflowArgArea, PaddedSizeV, "overflow_arg_area");
5445 CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
5446 CGF.EmitBranch(ContBlock);
5448 // Return the appropriate result.
5449 CGF.EmitBlock(ContBlock);
5450 llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(APTy, 2, "va_arg.addr");
5451 ResAddr->addIncoming(RegAddr, InRegBlock);
5452 ResAddr->addIncoming(MemAddr, InMemBlock);
5455 return CGF.Builder.CreateLoad(ResAddr, "indirect_arg");
5460 ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const {
5461 if (RetTy->isVoidType())
5462 return ABIArgInfo::getIgnore();
5463 if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64)
5464 return ABIArgInfo::getIndirect(0);
5465 return (isPromotableIntegerType(RetTy) ?
5466 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
5469 ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
5470 // Handle the generic C++ ABI.
5471 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
5472 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
5474 // Integers and enums are extended to full register width.
5475 if (isPromotableIntegerType(Ty))
5476 return ABIArgInfo::getExtend();
5478 // Values that are not 1, 2, 4 or 8 bytes in size are passed indirectly.
5479 uint64_t Size = getContext().getTypeSize(Ty);
5480 if (Size != 8 && Size != 16 && Size != 32 && Size != 64)
5481 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
5483 // Handle small structures.
5484 if (const RecordType *RT = Ty->getAs<RecordType>()) {
5485 // Structures with flexible arrays have variable length, so really
5486 // fail the size test above.
5487 const RecordDecl *RD = RT->getDecl();
5488 if (RD->hasFlexibleArrayMember())
5489 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
5491 // The structure is passed as an unextended integer, a float, or a double.
5493 if (isFPArgumentType(Ty)) {
5494 assert(Size == 32 || Size == 64);
5496 PassTy = llvm::Type::getFloatTy(getVMContext());
5498 PassTy = llvm::Type::getDoubleTy(getVMContext());
5500 PassTy = llvm::IntegerType::get(getVMContext(), Size);
5501 return ABIArgInfo::getDirect(PassTy);
5504 // Non-structure compounds are passed indirectly.
5505 if (isCompoundType(Ty))
5506 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
5508 return ABIArgInfo::getDirect(nullptr);
5511 //===----------------------------------------------------------------------===//
5512 // MSP430 ABI Implementation
5513 //===----------------------------------------------------------------------===//
5517 class MSP430TargetCodeGenInfo : public TargetCodeGenInfo {
5519 MSP430TargetCodeGenInfo(CodeGenTypes &CGT)
5520 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
5521 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5522 CodeGen::CodeGenModule &M) const override;
5527 void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
5528 llvm::GlobalValue *GV,
5529 CodeGen::CodeGenModule &M) const {
5530 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
5531 if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) {
5532 // Handle 'interrupt' attribute:
5533 llvm::Function *F = cast<llvm::Function>(GV);
5535 // Step 1: Set ISR calling convention.
5536 F->setCallingConv(llvm::CallingConv::MSP430_INTR);
5538 // Step 2: Add attributes goodness.
5539 F->addFnAttr(llvm::Attribute::NoInline);
5541 // Step 3: Emit ISR vector alias.
5542 unsigned Num = attr->getNumber() / 2;
5543 llvm::GlobalAlias::create(llvm::Function::ExternalLinkage,
5544 "__isr_" + Twine(Num), F);
5549 //===----------------------------------------------------------------------===//
5550 // MIPS ABI Implementation. This works for both little-endian and
5551 // big-endian variants.
5552 //===----------------------------------------------------------------------===//
5555 class MipsABIInfo : public ABIInfo {
5557 unsigned MinABIStackAlignInBytes, StackAlignInBytes;
5558 void CoerceToIntArgs(uint64_t TySize,
5559 SmallVectorImpl<llvm::Type *> &ArgList) const;
5560 llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const;
5561 llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const;
5562 llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const;
5564 MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) :
5565 ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8),
5566 StackAlignInBytes(IsO32 ? 8 : 16) {}
5568 ABIArgInfo classifyReturnType(QualType RetTy) const;
5569 ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const;
5570 void computeInfo(CGFunctionInfo &FI) const override;
5571 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5572 CodeGenFunction &CGF) const override;
5575 class MIPSTargetCodeGenInfo : public TargetCodeGenInfo {
5576 unsigned SizeOfUnwindException;
5578 MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32)
5579 : TargetCodeGenInfo(new MipsABIInfo(CGT, IsO32)),
5580 SizeOfUnwindException(IsO32 ? 24 : 32) {}
5582 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
5586 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5587 CodeGen::CodeGenModule &CGM) const override {
5588 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
5590 llvm::Function *Fn = cast<llvm::Function>(GV);
5591 if (FD->hasAttr<Mips16Attr>()) {
5592 Fn->addFnAttr("mips16");
5594 else if (FD->hasAttr<NoMips16Attr>()) {
5595 Fn->addFnAttr("nomips16");
5599 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
5600 llvm::Value *Address) const override;
5602 unsigned getSizeOfUnwindException() const override {
5603 return SizeOfUnwindException;
5608 void MipsABIInfo::CoerceToIntArgs(uint64_t TySize,
5609 SmallVectorImpl<llvm::Type *> &ArgList) const {
5610 llvm::IntegerType *IntTy =
5611 llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8);
5613 // Add (TySize / MinABIStackAlignInBytes) args of IntTy.
5614 for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N)
5615 ArgList.push_back(IntTy);
5617 // If necessary, add one more integer type to ArgList.
5618 unsigned R = TySize % (MinABIStackAlignInBytes * 8);
5621 ArgList.push_back(llvm::IntegerType::get(getVMContext(), R));
5624 // In N32/64, an aligned double precision floating point field is passed in
5626 llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const {
5627 SmallVector<llvm::Type*, 8> ArgList, IntArgList;
5630 CoerceToIntArgs(TySize, ArgList);
5631 return llvm::StructType::get(getVMContext(), ArgList);
5634 if (Ty->isComplexType())
5635 return CGT.ConvertType(Ty);
5637 const RecordType *RT = Ty->getAs<RecordType>();
5639 // Unions/vectors are passed in integer registers.
5640 if (!RT || !RT->isStructureOrClassType()) {
5641 CoerceToIntArgs(TySize, ArgList);
5642 return llvm::StructType::get(getVMContext(), ArgList);
5645 const RecordDecl *RD = RT->getDecl();
5646 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
5647 assert(!(TySize % 8) && "Size of structure must be multiple of 8.");
5649 uint64_t LastOffset = 0;
5651 llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64);
5653 // Iterate over fields in the struct/class and check if there are any aligned
5655 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
5656 i != e; ++i, ++idx) {
5657 const QualType Ty = i->getType();
5658 const BuiltinType *BT = Ty->getAs<BuiltinType>();
5660 if (!BT || BT->getKind() != BuiltinType::Double)
5663 uint64_t Offset = Layout.getFieldOffset(idx);
5664 if (Offset % 64) // Ignore doubles that are not aligned.
5667 // Add ((Offset - LastOffset) / 64) args of type i64.
5668 for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j)
5669 ArgList.push_back(I64);
5672 ArgList.push_back(llvm::Type::getDoubleTy(getVMContext()));
5673 LastOffset = Offset + 64;
5676 CoerceToIntArgs(TySize - LastOffset, IntArgList);
5677 ArgList.append(IntArgList.begin(), IntArgList.end());
5679 return llvm::StructType::get(getVMContext(), ArgList);
5682 llvm::Type *MipsABIInfo::getPaddingType(uint64_t OrigOffset,
5683 uint64_t Offset) const {
5684 if (OrigOffset + MinABIStackAlignInBytes > Offset)
5687 return llvm::IntegerType::get(getVMContext(), (Offset - OrigOffset) * 8);
5691 MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const {
5692 Ty = useFirstFieldIfTransparentUnion(Ty);
5694 uint64_t OrigOffset = Offset;
5695 uint64_t TySize = getContext().getTypeSize(Ty);
5696 uint64_t Align = getContext().getTypeAlign(Ty) / 8;
5698 Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes),
5699 (uint64_t)StackAlignInBytes);
5700 unsigned CurrOffset = llvm::RoundUpToAlignment(Offset, Align);
5701 Offset = CurrOffset + llvm::RoundUpToAlignment(TySize, Align * 8) / 8;
5703 if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) {
5704 // Ignore empty aggregates.
5706 return ABIArgInfo::getIgnore();
5708 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
5709 Offset = OrigOffset + MinABIStackAlignInBytes;
5710 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
5713 // If we have reached here, aggregates are passed directly by coercing to
5714 // another structure type. Padding is inserted if the offset of the
5715 // aggregate is unaligned.
5716 ABIArgInfo ArgInfo =
5717 ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0,
5718 getPaddingType(OrigOffset, CurrOffset));
5719 ArgInfo.setInReg(true);
5723 // Treat an enum type as its underlying type.
5724 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
5725 Ty = EnumTy->getDecl()->getIntegerType();
5727 // All integral types are promoted to the GPR width.
5728 if (Ty->isIntegralOrEnumerationType())
5729 return ABIArgInfo::getExtend();
5731 return ABIArgInfo::getDirect(
5732 nullptr, 0, IsO32 ? nullptr : getPaddingType(OrigOffset, CurrOffset));
5736 MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const {
5737 const RecordType *RT = RetTy->getAs<RecordType>();
5738 SmallVector<llvm::Type*, 8> RTList;
5740 if (RT && RT->isStructureOrClassType()) {
5741 const RecordDecl *RD = RT->getDecl();
5742 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
5743 unsigned FieldCnt = Layout.getFieldCount();
5745 // N32/64 returns struct/classes in floating point registers if the
5746 // following conditions are met:
5747 // 1. The size of the struct/class is no larger than 128-bit.
5748 // 2. The struct/class has one or two fields all of which are floating
5750 // 3. The offset of the first field is zero (this follows what gcc does).
5752 // Any other composite results are returned in integer registers.
5754 if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) {
5755 RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end();
5756 for (; b != e; ++b) {
5757 const BuiltinType *BT = b->getType()->getAs<BuiltinType>();
5759 if (!BT || !BT->isFloatingPoint())
5762 RTList.push_back(CGT.ConvertType(b->getType()));
5766 return llvm::StructType::get(getVMContext(), RTList,
5767 RD->hasAttr<PackedAttr>());
5773 CoerceToIntArgs(Size, RTList);
5774 return llvm::StructType::get(getVMContext(), RTList);
5777 ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const {
5778 uint64_t Size = getContext().getTypeSize(RetTy);
5780 if (RetTy->isVoidType())
5781 return ABIArgInfo::getIgnore();
5783 // O32 doesn't treat zero-sized structs differently from other structs.
5784 // However, N32/N64 ignores zero sized return values.
5785 if (!IsO32 && Size == 0)
5786 return ABIArgInfo::getIgnore();
5788 if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) {
5790 if (RetTy->isAnyComplexType())
5791 return ABIArgInfo::getDirect();
5793 // O32 returns integer vectors in registers and N32/N64 returns all small
5794 // aggregates in registers.
5796 (RetTy->isVectorType() && !RetTy->hasFloatingRepresentation())) {
5797 ABIArgInfo ArgInfo =
5798 ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size));
5799 ArgInfo.setInReg(true);
5804 return ABIArgInfo::getIndirect(0);
5807 // Treat an enum type as its underlying type.
5808 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
5809 RetTy = EnumTy->getDecl()->getIntegerType();
5811 return (RetTy->isPromotableIntegerType() ?
5812 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
5815 void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const {
5816 ABIArgInfo &RetInfo = FI.getReturnInfo();
5817 if (!getCXXABI().classifyReturnType(FI))
5818 RetInfo = classifyReturnType(FI.getReturnType());
5820 // Check if a pointer to an aggregate is passed as a hidden argument.
5821 uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0;
5823 for (auto &I : FI.arguments())
5824 I.info = classifyArgumentType(I.type, Offset);
5827 llvm::Value* MipsABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5828 CodeGenFunction &CGF) const {
5829 llvm::Type *BP = CGF.Int8PtrTy;
5830 llvm::Type *BPP = CGF.Int8PtrPtrTy;
5832 // Integer arguments are promoted to 32-bit on O32 and 64-bit on N32/N64.
5833 // Pointers are also promoted in the same way but this only matters for N32.
5834 unsigned SlotSizeInBits = IsO32 ? 32 : 64;
5835 unsigned PtrWidth = getTarget().getPointerWidth(0);
5836 if ((Ty->isIntegerType() &&
5837 CGF.getContext().getIntWidth(Ty) < SlotSizeInBits) ||
5838 (Ty->isPointerType() && PtrWidth < SlotSizeInBits)) {
5839 Ty = CGF.getContext().getIntTypeForBitwidth(SlotSizeInBits,
5840 Ty->isSignedIntegerType());
5843 CGBuilderTy &Builder = CGF.Builder;
5844 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
5845 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
5847 std::min(getContext().getTypeAlign(Ty) / 8, StackAlignInBytes);
5848 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
5849 llvm::Value *AddrTyped;
5850 llvm::IntegerType *IntTy = (PtrWidth == 32) ? CGF.Int32Ty : CGF.Int64Ty;
5852 if (TypeAlign > MinABIStackAlignInBytes) {
5853 llvm::Value *AddrAsInt = CGF.Builder.CreatePtrToInt(Addr, IntTy);
5854 llvm::Value *Inc = llvm::ConstantInt::get(IntTy, TypeAlign - 1);
5855 llvm::Value *Mask = llvm::ConstantInt::get(IntTy, -TypeAlign);
5856 llvm::Value *Add = CGF.Builder.CreateAdd(AddrAsInt, Inc);
5857 llvm::Value *And = CGF.Builder.CreateAnd(Add, Mask);
5858 AddrTyped = CGF.Builder.CreateIntToPtr(And, PTy);
5861 AddrTyped = Builder.CreateBitCast(Addr, PTy);
5863 llvm::Value *AlignedAddr = Builder.CreateBitCast(AddrTyped, BP);
5864 TypeAlign = std::max((unsigned)TypeAlign, MinABIStackAlignInBytes);
5865 unsigned ArgSizeInBits = CGF.getContext().getTypeSize(Ty);
5866 uint64_t Offset = llvm::RoundUpToAlignment(ArgSizeInBits / 8, TypeAlign);
5867 llvm::Value *NextAddr =
5868 Builder.CreateGEP(AlignedAddr, llvm::ConstantInt::get(IntTy, Offset),
5870 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
5876 MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
5877 llvm::Value *Address) const {
5878 // This information comes from gcc's implementation, which seems to
5879 // as canonical as it gets.
5881 // Everything on MIPS is 4 bytes. Double-precision FP registers
5882 // are aliased to pairs of single-precision FP registers.
5883 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
5885 // 0-31 are the general purpose registers, $0 - $31.
5886 // 32-63 are the floating-point registers, $f0 - $f31.
5887 // 64 and 65 are the multiply/divide registers, $hi and $lo.
5888 // 66 is the (notional, I think) register for signal-handler return.
5889 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65);
5891 // 67-74 are the floating-point status registers, $fcc0 - $fcc7.
5892 // They are one bit wide and ignored here.
5894 // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31.
5895 // (coprocessor 1 is the FP unit)
5896 // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31.
5897 // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31.
5898 // 176-181 are the DSP accumulator registers.
5899 AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181);
5903 //===----------------------------------------------------------------------===//
5904 // TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults.
5905 // Currently subclassed only to implement custom OpenCL C function attribute
5907 //===----------------------------------------------------------------------===//
5911 class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo {
5913 TCETargetCodeGenInfo(CodeGenTypes &CGT)
5914 : DefaultTargetCodeGenInfo(CGT) {}
5916 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5917 CodeGen::CodeGenModule &M) const override;
5920 void TCETargetCodeGenInfo::SetTargetAttributes(const Decl *D,
5921 llvm::GlobalValue *GV,
5922 CodeGen::CodeGenModule &M) const {
5923 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
5926 llvm::Function *F = cast<llvm::Function>(GV);
5928 if (M.getLangOpts().OpenCL) {
5929 if (FD->hasAttr<OpenCLKernelAttr>()) {
5930 // OpenCL C Kernel functions are not subject to inlining
5931 F->addFnAttr(llvm::Attribute::NoInline);
5932 const ReqdWorkGroupSizeAttr *Attr = FD->getAttr<ReqdWorkGroupSizeAttr>();
5934 // Convert the reqd_work_group_size() attributes to metadata.
5935 llvm::LLVMContext &Context = F->getContext();
5936 llvm::NamedMDNode *OpenCLMetadata =
5937 M.getModule().getOrInsertNamedMetadata("opencl.kernel_wg_size_info");
5939 SmallVector<llvm::Metadata *, 5> Operands;
5940 Operands.push_back(llvm::ConstantAsMetadata::get(F));
5943 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
5944 M.Int32Ty, llvm::APInt(32, Attr->getXDim()))));
5946 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
5947 M.Int32Ty, llvm::APInt(32, Attr->getYDim()))));
5949 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
5950 M.Int32Ty, llvm::APInt(32, Attr->getZDim()))));
5952 // Add a boolean constant operand for "required" (true) or "hint" (false)
5953 // for implementing the work_group_size_hint attr later. Currently
5954 // always true as the hint is not yet implemented.
5956 llvm::ConstantAsMetadata::get(llvm::ConstantInt::getTrue(Context)));
5957 OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands));
5965 //===----------------------------------------------------------------------===//
5966 // Hexagon ABI Implementation
5967 //===----------------------------------------------------------------------===//
5971 class HexagonABIInfo : public ABIInfo {
5975 HexagonABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
5979 ABIArgInfo classifyReturnType(QualType RetTy) const;
5980 ABIArgInfo classifyArgumentType(QualType RetTy) const;
5982 void computeInfo(CGFunctionInfo &FI) const override;
5984 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5985 CodeGenFunction &CGF) const override;
5988 class HexagonTargetCodeGenInfo : public TargetCodeGenInfo {
5990 HexagonTargetCodeGenInfo(CodeGenTypes &CGT)
5991 :TargetCodeGenInfo(new HexagonABIInfo(CGT)) {}
5993 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
6000 void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const {
6001 if (!getCXXABI().classifyReturnType(FI))
6002 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
6003 for (auto &I : FI.arguments())
6004 I.info = classifyArgumentType(I.type);
6007 ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const {
6008 if (!isAggregateTypeForABI(Ty)) {
6009 // Treat an enum type as its underlying type.
6010 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
6011 Ty = EnumTy->getDecl()->getIntegerType();
6013 return (Ty->isPromotableIntegerType() ?
6014 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
6017 // Ignore empty records.
6018 if (isEmptyRecord(getContext(), Ty, true))
6019 return ABIArgInfo::getIgnore();
6021 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
6022 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
6024 uint64_t Size = getContext().getTypeSize(Ty);
6026 return ABIArgInfo::getIndirect(0, /*ByVal=*/true);
6027 // Pass in the smallest viable integer type.
6029 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
6031 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
6033 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
6035 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
6038 ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const {
6039 if (RetTy->isVoidType())
6040 return ABIArgInfo::getIgnore();
6042 // Large vector types should be returned via memory.
6043 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 64)
6044 return ABIArgInfo::getIndirect(0);
6046 if (!isAggregateTypeForABI(RetTy)) {
6047 // Treat an enum type as its underlying type.
6048 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
6049 RetTy = EnumTy->getDecl()->getIntegerType();
6051 return (RetTy->isPromotableIntegerType() ?
6052 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
6055 if (isEmptyRecord(getContext(), RetTy, true))
6056 return ABIArgInfo::getIgnore();
6058 // Aggregates <= 8 bytes are returned in r0; other aggregates
6059 // are returned indirectly.
6060 uint64_t Size = getContext().getTypeSize(RetTy);
6062 // Return in the smallest viable integer type.
6064 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
6066 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
6068 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
6069 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
6072 return ABIArgInfo::getIndirect(0, /*ByVal=*/true);
6075 llvm::Value *HexagonABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
6076 CodeGenFunction &CGF) const {
6077 // FIXME: Need to handle alignment
6078 llvm::Type *BPP = CGF.Int8PtrPtrTy;
6080 CGBuilderTy &Builder = CGF.Builder;
6081 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
6083 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
6085 llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
6086 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
6089 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
6090 llvm::Value *NextAddr =
6091 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
6093 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
6098 //===----------------------------------------------------------------------===//
6099 // AMDGPU ABI Implementation
6100 //===----------------------------------------------------------------------===//
6104 class AMDGPUTargetCodeGenInfo : public TargetCodeGenInfo {
6106 AMDGPUTargetCodeGenInfo(CodeGenTypes &CGT)
6107 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
6108 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
6109 CodeGen::CodeGenModule &M) const override;
6114 void AMDGPUTargetCodeGenInfo::SetTargetAttributes(
6116 llvm::GlobalValue *GV,
6117 CodeGen::CodeGenModule &M) const {
6118 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
6122 if (const auto Attr = FD->getAttr<AMDGPUNumVGPRAttr>()) {
6123 llvm::Function *F = cast<llvm::Function>(GV);
6124 uint32_t NumVGPR = Attr->getNumVGPR();
6126 F->addFnAttr("amdgpu_num_vgpr", llvm::utostr(NumVGPR));
6129 if (const auto Attr = FD->getAttr<AMDGPUNumSGPRAttr>()) {
6130 llvm::Function *F = cast<llvm::Function>(GV);
6131 unsigned NumSGPR = Attr->getNumSGPR();
6133 F->addFnAttr("amdgpu_num_sgpr", llvm::utostr(NumSGPR));
6138 //===----------------------------------------------------------------------===//
6139 // SPARC v9 ABI Implementation.
6140 // Based on the SPARC Compliance Definition version 2.4.1.
6142 // Function arguments a mapped to a nominal "parameter array" and promoted to
6143 // registers depending on their type. Each argument occupies 8 or 16 bytes in
6144 // the array, structs larger than 16 bytes are passed indirectly.
6146 // One case requires special care:
6153 // When a struct mixed is passed by value, it only occupies 8 bytes in the
6154 // parameter array, but the int is passed in an integer register, and the float
6155 // is passed in a floating point register. This is represented as two arguments
6156 // with the LLVM IR inreg attribute:
6158 // declare void f(i32 inreg %i, float inreg %f)
6160 // The code generator will only allocate 4 bytes from the parameter array for
6161 // the inreg arguments. All other arguments are allocated a multiple of 8
6165 class SparcV9ABIInfo : public ABIInfo {
6167 SparcV9ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
6170 ABIArgInfo classifyType(QualType RetTy, unsigned SizeLimit) const;
6171 void computeInfo(CGFunctionInfo &FI) const override;
6172 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
6173 CodeGenFunction &CGF) const override;
6175 // Coercion type builder for structs passed in registers. The coercion type
6176 // serves two purposes:
6178 // 1. Pad structs to a multiple of 64 bits, so they are passed 'left-aligned'
6180 // 2. Expose aligned floating point elements as first-level elements, so the
6181 // code generator knows to pass them in floating point registers.
6183 // We also compute the InReg flag which indicates that the struct contains
6184 // aligned 32-bit floats.
6186 struct CoerceBuilder {
6187 llvm::LLVMContext &Context;
6188 const llvm::DataLayout &DL;
6189 SmallVector<llvm::Type*, 8> Elems;
6193 CoerceBuilder(llvm::LLVMContext &c, const llvm::DataLayout &dl)
6194 : Context(c), DL(dl), Size(0), InReg(false) {}
6196 // Pad Elems with integers until Size is ToSize.
6197 void pad(uint64_t ToSize) {
6198 assert(ToSize >= Size && "Cannot remove elements");
6202 // Finish the current 64-bit word.
6203 uint64_t Aligned = llvm::RoundUpToAlignment(Size, 64);
6204 if (Aligned > Size && Aligned <= ToSize) {
6205 Elems.push_back(llvm::IntegerType::get(Context, Aligned - Size));
6209 // Add whole 64-bit words.
6210 while (Size + 64 <= ToSize) {
6211 Elems.push_back(llvm::Type::getInt64Ty(Context));
6215 // Final in-word padding.
6216 if (Size < ToSize) {
6217 Elems.push_back(llvm::IntegerType::get(Context, ToSize - Size));
6222 // Add a floating point element at Offset.
6223 void addFloat(uint64_t Offset, llvm::Type *Ty, unsigned Bits) {
6224 // Unaligned floats are treated as integers.
6227 // The InReg flag is only required if there are any floats < 64 bits.
6231 Elems.push_back(Ty);
6232 Size = Offset + Bits;
6235 // Add a struct type to the coercion type, starting at Offset (in bits).
6236 void addStruct(uint64_t Offset, llvm::StructType *StrTy) {
6237 const llvm::StructLayout *Layout = DL.getStructLayout(StrTy);
6238 for (unsigned i = 0, e = StrTy->getNumElements(); i != e; ++i) {
6239 llvm::Type *ElemTy = StrTy->getElementType(i);
6240 uint64_t ElemOffset = Offset + Layout->getElementOffsetInBits(i);
6241 switch (ElemTy->getTypeID()) {
6242 case llvm::Type::StructTyID:
6243 addStruct(ElemOffset, cast<llvm::StructType>(ElemTy));
6245 case llvm::Type::FloatTyID:
6246 addFloat(ElemOffset, ElemTy, 32);
6248 case llvm::Type::DoubleTyID:
6249 addFloat(ElemOffset, ElemTy, 64);
6251 case llvm::Type::FP128TyID:
6252 addFloat(ElemOffset, ElemTy, 128);
6254 case llvm::Type::PointerTyID:
6255 if (ElemOffset % 64 == 0) {
6257 Elems.push_back(ElemTy);
6267 // Check if Ty is a usable substitute for the coercion type.
6268 bool isUsableType(llvm::StructType *Ty) const {
6269 if (Ty->getNumElements() != Elems.size())
6271 for (unsigned i = 0, e = Elems.size(); i != e; ++i)
6272 if (Elems[i] != Ty->getElementType(i))
6277 // Get the coercion type as a literal struct type.
6278 llvm::Type *getType() const {
6279 if (Elems.size() == 1)
6280 return Elems.front();
6282 return llvm::StructType::get(Context, Elems);
6286 } // end anonymous namespace
6289 SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const {
6290 if (Ty->isVoidType())
6291 return ABIArgInfo::getIgnore();
6293 uint64_t Size = getContext().getTypeSize(Ty);
6295 // Anything too big to fit in registers is passed with an explicit indirect
6296 // pointer / sret pointer.
6297 if (Size > SizeLimit)
6298 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
6300 // Treat an enum type as its underlying type.
6301 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
6302 Ty = EnumTy->getDecl()->getIntegerType();
6304 // Integer types smaller than a register are extended.
6305 if (Size < 64 && Ty->isIntegerType())
6306 return ABIArgInfo::getExtend();
6308 // Other non-aggregates go in registers.
6309 if (!isAggregateTypeForABI(Ty))
6310 return ABIArgInfo::getDirect();
6312 // If a C++ object has either a non-trivial copy constructor or a non-trivial
6313 // destructor, it is passed with an explicit indirect pointer / sret pointer.
6314 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
6315 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
6317 // This is a small aggregate type that should be passed in registers.
6318 // Build a coercion type from the LLVM struct type.
6319 llvm::StructType *StrTy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty));
6321 return ABIArgInfo::getDirect();
6323 CoerceBuilder CB(getVMContext(), getDataLayout());
6324 CB.addStruct(0, StrTy);
6325 CB.pad(llvm::RoundUpToAlignment(CB.DL.getTypeSizeInBits(StrTy), 64));
6327 // Try to use the original type for coercion.
6328 llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType();
6331 return ABIArgInfo::getDirectInReg(CoerceTy);
6333 return ABIArgInfo::getDirect(CoerceTy);
6336 llvm::Value *SparcV9ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
6337 CodeGenFunction &CGF) const {
6338 ABIArgInfo AI = classifyType(Ty, 16 * 8);
6339 llvm::Type *ArgTy = CGT.ConvertType(Ty);
6340 if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
6341 AI.setCoerceToType(ArgTy);
6343 llvm::Type *BPP = CGF.Int8PtrPtrTy;
6344 CGBuilderTy &Builder = CGF.Builder;
6345 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
6346 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
6347 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
6348 llvm::Value *ArgAddr;
6351 switch (AI.getKind()) {
6352 case ABIArgInfo::Expand:
6353 case ABIArgInfo::InAlloca:
6354 llvm_unreachable("Unsupported ABI kind for va_arg");
6356 case ABIArgInfo::Extend:
6359 .CreateConstGEP1_32(Addr, 8 - getDataLayout().getTypeAllocSize(ArgTy),
6363 case ABIArgInfo::Direct:
6364 Stride = getDataLayout().getTypeAllocSize(AI.getCoerceToType());
6368 case ABIArgInfo::Indirect:
6370 ArgAddr = Builder.CreateBitCast(Addr,
6371 llvm::PointerType::getUnqual(ArgPtrTy),
6373 ArgAddr = Builder.CreateLoad(ArgAddr, "indirect.arg");
6376 case ABIArgInfo::Ignore:
6377 return llvm::UndefValue::get(ArgPtrTy);
6381 Addr = Builder.CreateConstGEP1_32(Addr, Stride, "ap.next");
6382 Builder.CreateStore(Addr, VAListAddrAsBPP);
6384 return Builder.CreatePointerCast(ArgAddr, ArgPtrTy, "arg.addr");
6387 void SparcV9ABIInfo::computeInfo(CGFunctionInfo &FI) const {
6388 FI.getReturnInfo() = classifyType(FI.getReturnType(), 32 * 8);
6389 for (auto &I : FI.arguments())
6390 I.info = classifyType(I.type, 16 * 8);
6394 class SparcV9TargetCodeGenInfo : public TargetCodeGenInfo {
6396 SparcV9TargetCodeGenInfo(CodeGenTypes &CGT)
6397 : TargetCodeGenInfo(new SparcV9ABIInfo(CGT)) {}
6399 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
6403 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
6404 llvm::Value *Address) const override;
6406 } // end anonymous namespace
6409 SparcV9TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
6410 llvm::Value *Address) const {
6411 // This is calculated from the LLVM and GCC tables and verified
6412 // against gcc output. AFAIK all ABIs use the same encoding.
6414 CodeGen::CGBuilderTy &Builder = CGF.Builder;
6416 llvm::IntegerType *i8 = CGF.Int8Ty;
6417 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
6418 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
6420 // 0-31: the 8-byte general-purpose registers
6421 AssignToArrayRange(Builder, Address, Eight8, 0, 31);
6423 // 32-63: f0-31, the 4-byte floating-point registers
6424 AssignToArrayRange(Builder, Address, Four8, 32, 63);
6434 AssignToArrayRange(Builder, Address, Eight8, 64, 71);
6436 // 72-87: d0-15, the 8-byte floating-point registers
6437 AssignToArrayRange(Builder, Address, Eight8, 72, 87);
6443 //===----------------------------------------------------------------------===//
6444 // XCore ABI Implementation
6445 //===----------------------------------------------------------------------===//
6449 /// A SmallStringEnc instance is used to build up the TypeString by passing
6450 /// it by reference between functions that append to it.
6451 typedef llvm::SmallString<128> SmallStringEnc;
6453 /// TypeStringCache caches the meta encodings of Types.
6455 /// The reason for caching TypeStrings is two fold:
6456 /// 1. To cache a type's encoding for later uses;
6457 /// 2. As a means to break recursive member type inclusion.
6459 /// A cache Entry can have a Status of:
6460 /// NonRecursive: The type encoding is not recursive;
6461 /// Recursive: The type encoding is recursive;
6462 /// Incomplete: An incomplete TypeString;
6463 /// IncompleteUsed: An incomplete TypeString that has been used in a
6464 /// Recursive type encoding.
6466 /// A NonRecursive entry will have all of its sub-members expanded as fully
6467 /// as possible. Whilst it may contain types which are recursive, the type
6468 /// itself is not recursive and thus its encoding may be safely used whenever
6469 /// the type is encountered.
6471 /// A Recursive entry will have all of its sub-members expanded as fully as
6472 /// possible. The type itself is recursive and it may contain other types which
6473 /// are recursive. The Recursive encoding must not be used during the expansion
6474 /// of a recursive type's recursive branch. For simplicity the code uses
6475 /// IncompleteCount to reject all usage of Recursive encodings for member types.
6477 /// An Incomplete entry is always a RecordType and only encodes its
6478 /// identifier e.g. "s(S){}". Incomplete 'StubEnc' entries are ephemeral and
6479 /// are placed into the cache during type expansion as a means to identify and
6480 /// handle recursive inclusion of types as sub-members. If there is recursion
6481 /// the entry becomes IncompleteUsed.
6483 /// During the expansion of a RecordType's members:
6485 /// If the cache contains a NonRecursive encoding for the member type, the
6486 /// cached encoding is used;
6488 /// If the cache contains a Recursive encoding for the member type, the
6489 /// cached encoding is 'Swapped' out, as it may be incorrect, and...
6491 /// If the member is a RecordType, an Incomplete encoding is placed into the
6492 /// cache to break potential recursive inclusion of itself as a sub-member;
6494 /// Once a member RecordType has been expanded, its temporary incomplete
6495 /// entry is removed from the cache. If a Recursive encoding was swapped out
6496 /// it is swapped back in;
6498 /// If an incomplete entry is used to expand a sub-member, the incomplete
6499 /// entry is marked as IncompleteUsed. The cache keeps count of how many
6500 /// IncompleteUsed entries it currently contains in IncompleteUsedCount;
6502 /// If a member's encoding is found to be a NonRecursive or Recursive viz:
6503 /// IncompleteUsedCount==0, the member's encoding is added to the cache.
6504 /// Else the member is part of a recursive type and thus the recursion has
6505 /// been exited too soon for the encoding to be correct for the member.
6507 class TypeStringCache {
6508 enum Status {NonRecursive, Recursive, Incomplete, IncompleteUsed};
6510 std::string Str; // The encoded TypeString for the type.
6511 enum Status State; // Information about the encoding in 'Str'.
6512 std::string Swapped; // A temporary place holder for a Recursive encoding
6513 // during the expansion of RecordType's members.
6515 std::map<const IdentifierInfo *, struct Entry> Map;
6516 unsigned IncompleteCount; // Number of Incomplete entries in the Map.
6517 unsigned IncompleteUsedCount; // Number of IncompleteUsed entries in the Map.
6519 TypeStringCache() : IncompleteCount(0), IncompleteUsedCount(0) {};
6520 void addIncomplete(const IdentifierInfo *ID, std::string StubEnc);
6521 bool removeIncomplete(const IdentifierInfo *ID);
6522 void addIfComplete(const IdentifierInfo *ID, StringRef Str,
6524 StringRef lookupStr(const IdentifierInfo *ID);
6527 /// TypeString encodings for enum & union fields must be order.
6528 /// FieldEncoding is a helper for this ordering process.
6529 class FieldEncoding {
6533 FieldEncoding(bool b, SmallStringEnc &e) : HasName(b), Enc(e.c_str()) {};
6534 StringRef str() {return Enc.c_str();};
6535 bool operator<(const FieldEncoding &rhs) const {
6536 if (HasName != rhs.HasName) return HasName;
6537 return Enc < rhs.Enc;
6541 class XCoreABIInfo : public DefaultABIInfo {
6543 XCoreABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
6544 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
6545 CodeGenFunction &CGF) const override;
6548 class XCoreTargetCodeGenInfo : public TargetCodeGenInfo {
6549 mutable TypeStringCache TSC;
6551 XCoreTargetCodeGenInfo(CodeGenTypes &CGT)
6552 :TargetCodeGenInfo(new XCoreABIInfo(CGT)) {}
6553 void emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
6554 CodeGen::CodeGenModule &M) const override;
6557 } // End anonymous namespace.
6559 llvm::Value *XCoreABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
6560 CodeGenFunction &CGF) const {
6561 CGBuilderTy &Builder = CGF.Builder;
6564 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr,
6566 llvm::Value *AP = Builder.CreateLoad(VAListAddrAsBPP);
6568 // Handle the argument.
6569 ABIArgInfo AI = classifyArgumentType(Ty);
6570 llvm::Type *ArgTy = CGT.ConvertType(Ty);
6571 if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
6572 AI.setCoerceToType(ArgTy);
6573 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
6575 uint64_t ArgSize = 0;
6576 switch (AI.getKind()) {
6577 case ABIArgInfo::Expand:
6578 case ABIArgInfo::InAlloca:
6579 llvm_unreachable("Unsupported ABI kind for va_arg");
6580 case ABIArgInfo::Ignore:
6581 Val = llvm::UndefValue::get(ArgPtrTy);
6584 case ABIArgInfo::Extend:
6585 case ABIArgInfo::Direct:
6586 Val = Builder.CreatePointerCast(AP, ArgPtrTy);
6587 ArgSize = getDataLayout().getTypeAllocSize(AI.getCoerceToType());
6591 case ABIArgInfo::Indirect:
6592 llvm::Value *ArgAddr;
6593 ArgAddr = Builder.CreateBitCast(AP, llvm::PointerType::getUnqual(ArgPtrTy));
6594 ArgAddr = Builder.CreateLoad(ArgAddr);
6595 Val = Builder.CreatePointerCast(ArgAddr, ArgPtrTy);
6600 // Increment the VAList.
6602 llvm::Value *APN = Builder.CreateConstGEP1_32(AP, ArgSize);
6603 Builder.CreateStore(APN, VAListAddrAsBPP);
6608 /// During the expansion of a RecordType, an incomplete TypeString is placed
6609 /// into the cache as a means to identify and break recursion.
6610 /// If there is a Recursive encoding in the cache, it is swapped out and will
6611 /// be reinserted by removeIncomplete().
6612 /// All other types of encoding should have been used rather than arriving here.
6613 void TypeStringCache::addIncomplete(const IdentifierInfo *ID,
6614 std::string StubEnc) {
6618 assert( (E.Str.empty() || E.State == Recursive) &&
6619 "Incorrectly use of addIncomplete");
6620 assert(!StubEnc.empty() && "Passing an empty string to addIncomplete()");
6621 E.Swapped.swap(E.Str); // swap out the Recursive
6622 E.Str.swap(StubEnc);
6623 E.State = Incomplete;
6627 /// Once the RecordType has been expanded, the temporary incomplete TypeString
6628 /// must be removed from the cache.
6629 /// If a Recursive was swapped out by addIncomplete(), it will be replaced.
6630 /// Returns true if the RecordType was defined recursively.
6631 bool TypeStringCache::removeIncomplete(const IdentifierInfo *ID) {
6634 auto I = Map.find(ID);
6635 assert(I != Map.end() && "Entry not present");
6636 Entry &E = I->second;
6637 assert( (E.State == Incomplete ||
6638 E.State == IncompleteUsed) &&
6639 "Entry must be an incomplete type");
6640 bool IsRecursive = false;
6641 if (E.State == IncompleteUsed) {
6642 // We made use of our Incomplete encoding, thus we are recursive.
6644 --IncompleteUsedCount;
6646 if (E.Swapped.empty())
6649 // Swap the Recursive back.
6650 E.Swapped.swap(E.Str);
6652 E.State = Recursive;
6658 /// Add the encoded TypeString to the cache only if it is NonRecursive or
6659 /// Recursive (viz: all sub-members were expanded as fully as possible).
6660 void TypeStringCache::addIfComplete(const IdentifierInfo *ID, StringRef Str,
6662 if (!ID || IncompleteUsedCount)
6663 return; // No key or it is is an incomplete sub-type so don't add.
6665 if (IsRecursive && !E.Str.empty()) {
6666 assert(E.State==Recursive && E.Str.size() == Str.size() &&
6667 "This is not the same Recursive entry");
6668 // The parent container was not recursive after all, so we could have used
6669 // this Recursive sub-member entry after all, but we assumed the worse when
6670 // we started viz: IncompleteCount!=0.
6673 assert(E.Str.empty() && "Entry already present");
6675 E.State = IsRecursive? Recursive : NonRecursive;
6678 /// Return a cached TypeString encoding for the ID. If there isn't one, or we
6679 /// are recursively expanding a type (IncompleteCount != 0) and the cached
6680 /// encoding is Recursive, return an empty StringRef.
6681 StringRef TypeStringCache::lookupStr(const IdentifierInfo *ID) {
6683 return StringRef(); // We have no key.
6684 auto I = Map.find(ID);
6686 return StringRef(); // We have no encoding.
6687 Entry &E = I->second;
6688 if (E.State == Recursive && IncompleteCount)
6689 return StringRef(); // We don't use Recursive encodings for member types.
6691 if (E.State == Incomplete) {
6692 // The incomplete type is being used to break out of recursion.
6693 E.State = IncompleteUsed;
6694 ++IncompleteUsedCount;
6696 return E.Str.c_str();
6699 /// The XCore ABI includes a type information section that communicates symbol
6700 /// type information to the linker. The linker uses this information to verify
6701 /// safety/correctness of things such as array bound and pointers et al.
6702 /// The ABI only requires C (and XC) language modules to emit TypeStrings.
6703 /// This type information (TypeString) is emitted into meta data for all global
6704 /// symbols: definitions, declarations, functions & variables.
6706 /// The TypeString carries type, qualifier, name, size & value details.
6707 /// Please see 'Tools Development Guide' section 2.16.2 for format details:
6708 /// <https://www.xmos.com/download/public/Tools-Development-Guide%28X9114A%29.pdf>
6709 /// The output is tested by test/CodeGen/xcore-stringtype.c.
6711 static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
6712 CodeGen::CodeGenModule &CGM, TypeStringCache &TSC);
6714 /// XCore uses emitTargetMD to emit TypeString metadata for global symbols.
6715 void XCoreTargetCodeGenInfo::emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
6716 CodeGen::CodeGenModule &CGM) const {
6718 if (getTypeString(Enc, D, CGM, TSC)) {
6719 llvm::LLVMContext &Ctx = CGM.getModule().getContext();
6720 llvm::SmallVector<llvm::Metadata *, 2> MDVals;
6721 MDVals.push_back(llvm::ConstantAsMetadata::get(GV));
6722 MDVals.push_back(llvm::MDString::get(Ctx, Enc.str()));
6723 llvm::NamedMDNode *MD =
6724 CGM.getModule().getOrInsertNamedMetadata("xcore.typestrings");
6725 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
6729 static bool appendType(SmallStringEnc &Enc, QualType QType,
6730 const CodeGen::CodeGenModule &CGM,
6731 TypeStringCache &TSC);
6733 /// Helper function for appendRecordType().
6734 /// Builds a SmallVector containing the encoded field types in declaration order.
6735 static bool extractFieldType(SmallVectorImpl<FieldEncoding> &FE,
6736 const RecordDecl *RD,
6737 const CodeGen::CodeGenModule &CGM,
6738 TypeStringCache &TSC) {
6739 for (const auto *Field : RD->fields()) {
6742 Enc += Field->getName();
6744 if (Field->isBitField()) {
6746 llvm::raw_svector_ostream OS(Enc);
6748 OS << Field->getBitWidthValue(CGM.getContext());
6752 if (!appendType(Enc, Field->getType(), CGM, TSC))
6754 if (Field->isBitField())
6757 FE.push_back(FieldEncoding(!Field->getName().empty(), Enc));
6762 /// Appends structure and union types to Enc and adds encoding to cache.
6763 /// Recursively calls appendType (via extractFieldType) for each field.
6764 /// Union types have their fields ordered according to the ABI.
6765 static bool appendRecordType(SmallStringEnc &Enc, const RecordType *RT,
6766 const CodeGen::CodeGenModule &CGM,
6767 TypeStringCache &TSC, const IdentifierInfo *ID) {
6768 // Append the cached TypeString if we have one.
6769 StringRef TypeString = TSC.lookupStr(ID);
6770 if (!TypeString.empty()) {
6775 // Start to emit an incomplete TypeString.
6776 size_t Start = Enc.size();
6777 Enc += (RT->isUnionType()? 'u' : 's');
6780 Enc += ID->getName();
6783 // We collect all encoded fields and order as necessary.
6784 bool IsRecursive = false;
6785 const RecordDecl *RD = RT->getDecl()->getDefinition();
6786 if (RD && !RD->field_empty()) {
6787 // An incomplete TypeString stub is placed in the cache for this RecordType
6788 // so that recursive calls to this RecordType will use it whilst building a
6789 // complete TypeString for this RecordType.
6790 SmallVector<FieldEncoding, 16> FE;
6791 std::string StubEnc(Enc.substr(Start).str());
6792 StubEnc += '}'; // StubEnc now holds a valid incomplete TypeString.
6793 TSC.addIncomplete(ID, std::move(StubEnc));
6794 if (!extractFieldType(FE, RD, CGM, TSC)) {
6795 (void) TSC.removeIncomplete(ID);
6798 IsRecursive = TSC.removeIncomplete(ID);
6799 // The ABI requires unions to be sorted but not structures.
6800 // See FieldEncoding::operator< for sort algorithm.
6801 if (RT->isUnionType())
6802 std::sort(FE.begin(), FE.end());
6803 // We can now complete the TypeString.
6804 unsigned E = FE.size();
6805 for (unsigned I = 0; I != E; ++I) {
6812 TSC.addIfComplete(ID, Enc.substr(Start), IsRecursive);
6816 /// Appends enum types to Enc and adds the encoding to the cache.
6817 static bool appendEnumType(SmallStringEnc &Enc, const EnumType *ET,
6818 TypeStringCache &TSC,
6819 const IdentifierInfo *ID) {
6820 // Append the cached TypeString if we have one.
6821 StringRef TypeString = TSC.lookupStr(ID);
6822 if (!TypeString.empty()) {
6827 size_t Start = Enc.size();
6830 Enc += ID->getName();
6833 // We collect all encoded enumerations and order them alphanumerically.
6834 if (const EnumDecl *ED = ET->getDecl()->getDefinition()) {
6835 SmallVector<FieldEncoding, 16> FE;
6836 for (auto I = ED->enumerator_begin(), E = ED->enumerator_end(); I != E;
6838 SmallStringEnc EnumEnc;
6840 EnumEnc += I->getName();
6842 I->getInitVal().toString(EnumEnc);
6844 FE.push_back(FieldEncoding(!I->getName().empty(), EnumEnc));
6846 std::sort(FE.begin(), FE.end());
6847 unsigned E = FE.size();
6848 for (unsigned I = 0; I != E; ++I) {
6855 TSC.addIfComplete(ID, Enc.substr(Start), false);
6859 /// Appends type's qualifier to Enc.
6860 /// This is done prior to appending the type's encoding.
6861 static void appendQualifier(SmallStringEnc &Enc, QualType QT) {
6862 // Qualifiers are emitted in alphabetical order.
6863 static const char *Table[] = {"","c:","r:","cr:","v:","cv:","rv:","crv:"};
6865 if (QT.isConstQualified())
6867 if (QT.isRestrictQualified())
6869 if (QT.isVolatileQualified())
6871 Enc += Table[Lookup];
6874 /// Appends built-in types to Enc.
6875 static bool appendBuiltinType(SmallStringEnc &Enc, const BuiltinType *BT) {
6876 const char *EncType;
6877 switch (BT->getKind()) {
6878 case BuiltinType::Void:
6881 case BuiltinType::Bool:
6884 case BuiltinType::Char_U:
6887 case BuiltinType::UChar:
6890 case BuiltinType::SChar:
6893 case BuiltinType::UShort:
6896 case BuiltinType::Short:
6899 case BuiltinType::UInt:
6902 case BuiltinType::Int:
6905 case BuiltinType::ULong:
6908 case BuiltinType::Long:
6911 case BuiltinType::ULongLong:
6914 case BuiltinType::LongLong:
6917 case BuiltinType::Float:
6920 case BuiltinType::Double:
6923 case BuiltinType::LongDouble:
6933 /// Appends a pointer encoding to Enc before calling appendType for the pointee.
6934 static bool appendPointerType(SmallStringEnc &Enc, const PointerType *PT,
6935 const CodeGen::CodeGenModule &CGM,
6936 TypeStringCache &TSC) {
6938 if (!appendType(Enc, PT->getPointeeType(), CGM, TSC))
6944 /// Appends array encoding to Enc before calling appendType for the element.
6945 static bool appendArrayType(SmallStringEnc &Enc, QualType QT,
6946 const ArrayType *AT,
6947 const CodeGen::CodeGenModule &CGM,
6948 TypeStringCache &TSC, StringRef NoSizeEnc) {
6949 if (AT->getSizeModifier() != ArrayType::Normal)
6952 if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT))
6953 CAT->getSize().toStringUnsigned(Enc);
6955 Enc += NoSizeEnc; // Global arrays use "*", otherwise it is "".
6957 // The Qualifiers should be attached to the type rather than the array.
6958 appendQualifier(Enc, QT);
6959 if (!appendType(Enc, AT->getElementType(), CGM, TSC))
6965 /// Appends a function encoding to Enc, calling appendType for the return type
6966 /// and the arguments.
6967 static bool appendFunctionType(SmallStringEnc &Enc, const FunctionType *FT,
6968 const CodeGen::CodeGenModule &CGM,
6969 TypeStringCache &TSC) {
6971 if (!appendType(Enc, FT->getReturnType(), CGM, TSC))
6974 if (const FunctionProtoType *FPT = FT->getAs<FunctionProtoType>()) {
6975 // N.B. we are only interested in the adjusted param types.
6976 auto I = FPT->param_type_begin();
6977 auto E = FPT->param_type_end();
6980 if (!appendType(Enc, *I, CGM, TSC))
6986 if (FPT->isVariadic())
6989 if (FPT->isVariadic())
6999 /// Handles the type's qualifier before dispatching a call to handle specific
7001 static bool appendType(SmallStringEnc &Enc, QualType QType,
7002 const CodeGen::CodeGenModule &CGM,
7003 TypeStringCache &TSC) {
7005 QualType QT = QType.getCanonicalType();
7007 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe())
7008 // The Qualifiers should be attached to the type rather than the array.
7009 // Thus we don't call appendQualifier() here.
7010 return appendArrayType(Enc, QT, AT, CGM, TSC, "");
7012 appendQualifier(Enc, QT);
7014 if (const BuiltinType *BT = QT->getAs<BuiltinType>())
7015 return appendBuiltinType(Enc, BT);
7017 if (const PointerType *PT = QT->getAs<PointerType>())
7018 return appendPointerType(Enc, PT, CGM, TSC);
7020 if (const EnumType *ET = QT->getAs<EnumType>())
7021 return appendEnumType(Enc, ET, TSC, QT.getBaseTypeIdentifier());
7023 if (const RecordType *RT = QT->getAsStructureType())
7024 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier());
7026 if (const RecordType *RT = QT->getAsUnionType())
7027 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier());
7029 if (const FunctionType *FT = QT->getAs<FunctionType>())
7030 return appendFunctionType(Enc, FT, CGM, TSC);
7035 static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
7036 CodeGen::CodeGenModule &CGM, TypeStringCache &TSC) {
7040 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
7041 if (FD->getLanguageLinkage() != CLanguageLinkage)
7043 return appendType(Enc, FD->getType(), CGM, TSC);
7046 if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
7047 if (VD->getLanguageLinkage() != CLanguageLinkage)
7049 QualType QT = VD->getType().getCanonicalType();
7050 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe()) {
7051 // Global ArrayTypes are given a size of '*' if the size is unknown.
7052 // The Qualifiers should be attached to the type rather than the array.
7053 // Thus we don't call appendQualifier() here.
7054 return appendArrayType(Enc, QT, AT, CGM, TSC, "*");
7056 return appendType(Enc, QT, CGM, TSC);
7062 //===----------------------------------------------------------------------===//
7064 //===----------------------------------------------------------------------===//
7066 const llvm::Triple &CodeGenModule::getTriple() const {
7067 return getTarget().getTriple();
7070 bool CodeGenModule::supportsCOMDAT() const {
7071 return !getTriple().isOSBinFormatMachO();
7074 const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
7075 if (TheTargetCodeGenInfo)
7076 return *TheTargetCodeGenInfo;
7078 const llvm::Triple &Triple = getTarget().getTriple();
7079 switch (Triple.getArch()) {
7081 return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo(Types));
7083 case llvm::Triple::le32:
7084 return *(TheTargetCodeGenInfo = new PNaClTargetCodeGenInfo(Types));
7085 case llvm::Triple::mips:
7086 case llvm::Triple::mipsel:
7087 return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, true));
7089 case llvm::Triple::mips64:
7090 case llvm::Triple::mips64el:
7091 return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, false));
7093 case llvm::Triple::aarch64:
7094 case llvm::Triple::aarch64_be: {
7095 AArch64ABIInfo::ABIKind Kind = AArch64ABIInfo::AAPCS;
7096 if (getTarget().getABI() == "darwinpcs")
7097 Kind = AArch64ABIInfo::DarwinPCS;
7099 return *(TheTargetCodeGenInfo = new AArch64TargetCodeGenInfo(Types, Kind));
7102 case llvm::Triple::arm:
7103 case llvm::Triple::armeb:
7104 case llvm::Triple::thumb:
7105 case llvm::Triple::thumbeb:
7107 ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS;
7108 if (getTarget().getABI() == "apcs-gnu")
7109 Kind = ARMABIInfo::APCS;
7110 else if (CodeGenOpts.FloatABI == "hard" ||
7111 (CodeGenOpts.FloatABI != "soft" &&
7112 Triple.getEnvironment() == llvm::Triple::GNUEABIHF))
7113 Kind = ARMABIInfo::AAPCS_VFP;
7115 switch (Triple.getOS()) {
7116 case llvm::Triple::NaCl:
7117 return *(TheTargetCodeGenInfo =
7118 new NaClARMTargetCodeGenInfo(Types, Kind));
7120 return *(TheTargetCodeGenInfo =
7121 new ARMTargetCodeGenInfo(Types, Kind));
7125 case llvm::Triple::ppc:
7126 return *(TheTargetCodeGenInfo = new PPC32TargetCodeGenInfo(Types));
7127 case llvm::Triple::ppc64:
7128 if (Triple.isOSBinFormatELF()) {
7129 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv1;
7130 if (getTarget().getABI() == "elfv2")
7131 Kind = PPC64_SVR4_ABIInfo::ELFv2;
7133 return *(TheTargetCodeGenInfo =
7134 new PPC64_SVR4_TargetCodeGenInfo(Types, Kind));
7136 return *(TheTargetCodeGenInfo = new PPC64TargetCodeGenInfo(Types));
7137 case llvm::Triple::ppc64le: {
7138 assert(Triple.isOSBinFormatELF() && "PPC64 LE non-ELF not supported!");
7139 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv2;
7140 if (getTarget().getABI() == "elfv1")
7141 Kind = PPC64_SVR4_ABIInfo::ELFv1;
7143 return *(TheTargetCodeGenInfo =
7144 new PPC64_SVR4_TargetCodeGenInfo(Types, Kind));
7147 case llvm::Triple::nvptx:
7148 case llvm::Triple::nvptx64:
7149 return *(TheTargetCodeGenInfo = new NVPTXTargetCodeGenInfo(Types));
7151 case llvm::Triple::msp430:
7152 return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo(Types));
7154 case llvm::Triple::systemz:
7155 return *(TheTargetCodeGenInfo = new SystemZTargetCodeGenInfo(Types));
7157 case llvm::Triple::tce:
7158 return *(TheTargetCodeGenInfo = new TCETargetCodeGenInfo(Types));
7160 case llvm::Triple::x86: {
7161 bool IsDarwinVectorABI = Triple.isOSDarwin();
7162 bool IsSmallStructInRegABI =
7163 X86_32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts);
7164 bool IsWin32FloatStructABI = Triple.isOSWindows() && !Triple.isOSCygMing();
7166 if (Triple.getOS() == llvm::Triple::Win32) {
7167 return *(TheTargetCodeGenInfo =
7168 new WinX86_32TargetCodeGenInfo(Types,
7169 IsDarwinVectorABI, IsSmallStructInRegABI,
7170 IsWin32FloatStructABI,
7171 CodeGenOpts.NumRegisterParameters));
7173 return *(TheTargetCodeGenInfo =
7174 new X86_32TargetCodeGenInfo(Types,
7175 IsDarwinVectorABI, IsSmallStructInRegABI,
7176 IsWin32FloatStructABI,
7177 CodeGenOpts.NumRegisterParameters));
7181 case llvm::Triple::x86_64: {
7182 bool HasAVX = getTarget().getABI() == "avx";
7184 switch (Triple.getOS()) {
7185 case llvm::Triple::Win32:
7186 return *(TheTargetCodeGenInfo =
7187 new WinX86_64TargetCodeGenInfo(Types, HasAVX));
7188 case llvm::Triple::NaCl:
7189 return *(TheTargetCodeGenInfo =
7190 new NaClX86_64TargetCodeGenInfo(Types, HasAVX));
7192 return *(TheTargetCodeGenInfo =
7193 new X86_64TargetCodeGenInfo(Types, HasAVX));
7196 case llvm::Triple::hexagon:
7197 return *(TheTargetCodeGenInfo = new HexagonTargetCodeGenInfo(Types));
7198 case llvm::Triple::r600:
7199 return *(TheTargetCodeGenInfo = new AMDGPUTargetCodeGenInfo(Types));
7200 case llvm::Triple::amdgcn:
7201 return *(TheTargetCodeGenInfo = new AMDGPUTargetCodeGenInfo(Types));
7202 case llvm::Triple::sparcv9:
7203 return *(TheTargetCodeGenInfo = new SparcV9TargetCodeGenInfo(Types));
7204 case llvm::Triple::xcore:
7205 return *(TheTargetCodeGenInfo = new XCoreTargetCodeGenInfo(Types));