1 //===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // These classes wrap the information about a call or function
11 // definition used to handle ABI compliancy.
13 //===----------------------------------------------------------------------===//
15 #include "TargetInfo.h"
19 #include "CodeGenFunction.h"
20 #include "clang/AST/RecordLayout.h"
21 #include "clang/CodeGen/CGFunctionInfo.h"
22 #include "clang/Frontend/CodeGenOptions.h"
23 #include "llvm/ADT/StringExtras.h"
24 #include "llvm/ADT/Triple.h"
25 #include "llvm/IR/DataLayout.h"
26 #include "llvm/IR/Type.h"
27 #include "llvm/Support/raw_ostream.h"
28 #include <algorithm> // std::sort
30 using namespace clang;
31 using namespace CodeGen;
33 static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder,
38 // Alternatively, we could emit this as a loop in the source.
39 for (unsigned I = FirstIndex; I <= LastIndex; ++I) {
40 llvm::Value *Cell = Builder.CreateConstInBoundsGEP1_32(Array, I);
41 Builder.CreateStore(Value, Cell);
45 static bool isAggregateTypeForABI(QualType T) {
46 return !CodeGenFunction::hasScalarEvaluationKind(T) ||
47 T->isMemberFunctionPointerType();
50 ABIInfo::~ABIInfo() {}
52 static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT,
54 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
56 return CGCXXABI::RAA_Default;
57 return CXXABI.getRecordArgABI(RD);
60 static CGCXXABI::RecordArgABI getRecordArgABI(QualType T,
62 const RecordType *RT = T->getAs<RecordType>();
64 return CGCXXABI::RAA_Default;
65 return getRecordArgABI(RT, CXXABI);
68 /// Pass transparent unions as if they were the type of the first element. Sema
69 /// should ensure that all elements of the union have the same "machine type".
70 static QualType useFirstFieldIfTransparentUnion(QualType Ty) {
71 if (const RecordType *UT = Ty->getAsUnionType()) {
72 const RecordDecl *UD = UT->getDecl();
73 if (UD->hasAttr<TransparentUnionAttr>()) {
74 assert(!UD->field_empty() && "sema created an empty transparent union");
75 return UD->field_begin()->getType();
81 CGCXXABI &ABIInfo::getCXXABI() const {
82 return CGT.getCXXABI();
85 ASTContext &ABIInfo::getContext() const {
86 return CGT.getContext();
89 llvm::LLVMContext &ABIInfo::getVMContext() const {
90 return CGT.getLLVMContext();
93 const llvm::DataLayout &ABIInfo::getDataLayout() const {
94 return CGT.getDataLayout();
97 const TargetInfo &ABIInfo::getTarget() const {
98 return CGT.getTarget();
101 bool ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
105 bool ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
106 uint64_t Members) const {
110 void ABIArgInfo::dump() const {
111 raw_ostream &OS = llvm::errs();
112 OS << "(ABIArgInfo Kind=";
115 OS << "Direct Type=";
116 if (llvm::Type *Ty = getCoerceToType())
128 OS << "InAlloca Offset=" << getInAllocaFieldIndex();
131 OS << "Indirect Align=" << getIndirectAlign()
132 << " ByVal=" << getIndirectByVal()
133 << " Realign=" << getIndirectRealign();
142 TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; }
144 // If someone can figure out a general rule for this, that would be great.
145 // It's probably just doomed to be platform-dependent, though.
146 unsigned TargetCodeGenInfo::getSizeOfUnwindException() const {
148 // x86-64 FreeBSD, Linux, Darwin
149 // x86-32 FreeBSD, Linux, Darwin
150 // PowerPC Linux, Darwin
151 // ARM Darwin (*not* EABI)
156 bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args,
157 const FunctionNoProtoType *fnType) const {
158 // The following conventions are known to require this to be false:
161 // For everything else, we just prefer false unless we opt out.
166 TargetCodeGenInfo::getDependentLibraryOption(llvm::StringRef Lib,
167 llvm::SmallString<24> &Opt) const {
168 // This assumes the user is passing a library name like "rt" instead of a
169 // filename like "librt.a/so", and that they don't care whether it's static or
175 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays);
177 /// isEmptyField - Return true iff a the field is "empty", that is it
178 /// is an unnamed bit-field or an (array of) empty record(s).
179 static bool isEmptyField(ASTContext &Context, const FieldDecl *FD,
181 if (FD->isUnnamedBitfield())
184 QualType FT = FD->getType();
186 // Constant arrays of empty records count as empty, strip them off.
187 // Constant arrays of zero length always count as empty.
189 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
190 if (AT->getSize() == 0)
192 FT = AT->getElementType();
195 const RecordType *RT = FT->getAs<RecordType>();
199 // C++ record fields are never empty, at least in the Itanium ABI.
201 // FIXME: We should use a predicate for whether this behavior is true in the
203 if (isa<CXXRecordDecl>(RT->getDecl()))
206 return isEmptyRecord(Context, FT, AllowArrays);
209 /// isEmptyRecord - Return true iff a structure contains only empty
210 /// fields. Note that a structure with a flexible array member is not
211 /// considered empty.
212 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) {
213 const RecordType *RT = T->getAs<RecordType>();
216 const RecordDecl *RD = RT->getDecl();
217 if (RD->hasFlexibleArrayMember())
220 // If this is a C++ record, check the bases first.
221 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
222 for (const auto &I : CXXRD->bases())
223 if (!isEmptyRecord(Context, I.getType(), true))
226 for (const auto *I : RD->fields())
227 if (!isEmptyField(Context, I, AllowArrays))
232 /// isSingleElementStruct - Determine if a structure is a "single
233 /// element struct", i.e. it has exactly one non-empty field or
234 /// exactly one field which is itself a single element
235 /// struct. Structures with flexible array members are never
236 /// considered single element structs.
238 /// \return The field declaration for the single non-empty field, if
240 static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
241 const RecordType *RT = T->getAsStructureType();
245 const RecordDecl *RD = RT->getDecl();
246 if (RD->hasFlexibleArrayMember())
249 const Type *Found = nullptr;
251 // If this is a C++ record, check the bases first.
252 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
253 for (const auto &I : CXXRD->bases()) {
254 // Ignore empty records.
255 if (isEmptyRecord(Context, I.getType(), true))
258 // If we already found an element then this isn't a single-element struct.
262 // If this is non-empty and not a single element struct, the composite
263 // cannot be a single element struct.
264 Found = isSingleElementStruct(I.getType(), Context);
270 // Check for single element.
271 for (const auto *FD : RD->fields()) {
272 QualType FT = FD->getType();
274 // Ignore empty fields.
275 if (isEmptyField(Context, FD, true))
278 // If we already found an element then this isn't a single-element
283 // Treat single element arrays as the element.
284 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
285 if (AT->getSize().getZExtValue() != 1)
287 FT = AT->getElementType();
290 if (!isAggregateTypeForABI(FT)) {
291 Found = FT.getTypePtr();
293 Found = isSingleElementStruct(FT, Context);
299 // We don't consider a struct a single-element struct if it has
300 // padding beyond the element type.
301 if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T))
307 static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
308 // Treat complex types as the element type.
309 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
310 Ty = CTy->getElementType();
312 // Check for a type which we know has a simple scalar argument-passing
313 // convention without any padding. (We're specifically looking for 32
314 // and 64-bit integer and integer-equivalents, float, and double.)
315 if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() &&
316 !Ty->isEnumeralType() && !Ty->isBlockPointerType())
319 uint64_t Size = Context.getTypeSize(Ty);
320 return Size == 32 || Size == 64;
323 /// canExpandIndirectArgument - Test whether an argument type which is to be
324 /// passed indirectly (on the stack) would have the equivalent layout if it was
325 /// expanded into separate arguments. If so, we prefer to do the latter to avoid
326 /// inhibiting optimizations.
328 // FIXME: This predicate is missing many cases, currently it just follows
329 // llvm-gcc (checks that all fields are 32-bit or 64-bit primitive types). We
330 // should probably make this smarter, or better yet make the LLVM backend
331 // capable of handling it.
332 static bool canExpandIndirectArgument(QualType Ty, ASTContext &Context) {
333 // We can only expand structure types.
334 const RecordType *RT = Ty->getAs<RecordType>();
338 // We can only expand (C) structures.
340 // FIXME: This needs to be generalized to handle classes as well.
341 const RecordDecl *RD = RT->getDecl();
342 if (!RD->isStruct() || isa<CXXRecordDecl>(RD))
347 for (const auto *FD : RD->fields()) {
348 if (!is32Or64BitBasicType(FD->getType(), Context))
351 // FIXME: Reject bit-fields wholesale; there are two problems, we don't know
352 // how to expand them yet, and the predicate for telling if a bitfield still
353 // counts as "basic" is more complicated than what we were doing previously.
354 if (FD->isBitField())
357 Size += Context.getTypeSize(FD->getType());
360 // Make sure there are not any holes in the struct.
361 if (Size != Context.getTypeSize(Ty))
368 /// DefaultABIInfo - The default implementation for ABI specific
369 /// details. This implementation provides information which results in
370 /// self-consistent and sensible LLVM IR generation, but does not
371 /// conform to any particular ABI.
372 class DefaultABIInfo : public ABIInfo {
374 DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
376 ABIArgInfo classifyReturnType(QualType RetTy) const;
377 ABIArgInfo classifyArgumentType(QualType RetTy) const;
379 void computeInfo(CGFunctionInfo &FI) const override {
380 if (!getCXXABI().classifyReturnType(FI))
381 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
382 for (auto &I : FI.arguments())
383 I.info = classifyArgumentType(I.type);
386 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
387 CodeGenFunction &CGF) const override;
390 class DefaultTargetCodeGenInfo : public TargetCodeGenInfo {
392 DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
393 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
396 llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
397 CodeGenFunction &CGF) const {
401 ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
402 if (isAggregateTypeForABI(Ty))
403 return ABIArgInfo::getIndirect(0);
405 // Treat an enum type as its underlying type.
406 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
407 Ty = EnumTy->getDecl()->getIntegerType();
409 return (Ty->isPromotableIntegerType() ?
410 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
413 ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
414 if (RetTy->isVoidType())
415 return ABIArgInfo::getIgnore();
417 if (isAggregateTypeForABI(RetTy))
418 return ABIArgInfo::getIndirect(0);
420 // Treat an enum type as its underlying type.
421 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
422 RetTy = EnumTy->getDecl()->getIntegerType();
424 return (RetTy->isPromotableIntegerType() ?
425 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
428 //===----------------------------------------------------------------------===//
429 // le32/PNaCl bitcode ABI Implementation
431 // This is a simplified version of the x86_32 ABI. Arguments and return values
432 // are always passed on the stack.
433 //===----------------------------------------------------------------------===//
435 class PNaClABIInfo : public ABIInfo {
437 PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
439 ABIArgInfo classifyReturnType(QualType RetTy) const;
440 ABIArgInfo classifyArgumentType(QualType RetTy) const;
442 void computeInfo(CGFunctionInfo &FI) const override;
443 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
444 CodeGenFunction &CGF) const override;
447 class PNaClTargetCodeGenInfo : public TargetCodeGenInfo {
449 PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
450 : TargetCodeGenInfo(new PNaClABIInfo(CGT)) {}
453 void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const {
454 if (!getCXXABI().classifyReturnType(FI))
455 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
457 for (auto &I : FI.arguments())
458 I.info = classifyArgumentType(I.type);
461 llvm::Value *PNaClABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
462 CodeGenFunction &CGF) const {
466 /// \brief Classify argument of given type \p Ty.
467 ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty) const {
468 if (isAggregateTypeForABI(Ty)) {
469 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
470 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
471 return ABIArgInfo::getIndirect(0);
472 } else if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
473 // Treat an enum type as its underlying type.
474 Ty = EnumTy->getDecl()->getIntegerType();
475 } else if (Ty->isFloatingType()) {
476 // Floating-point types don't go inreg.
477 return ABIArgInfo::getDirect();
480 return (Ty->isPromotableIntegerType() ?
481 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
484 ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const {
485 if (RetTy->isVoidType())
486 return ABIArgInfo::getIgnore();
488 // In the PNaCl ABI we always return records/structures on the stack.
489 if (isAggregateTypeForABI(RetTy))
490 return ABIArgInfo::getIndirect(0);
492 // Treat an enum type as its underlying type.
493 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
494 RetTy = EnumTy->getDecl()->getIntegerType();
496 return (RetTy->isPromotableIntegerType() ?
497 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
500 /// IsX86_MMXType - Return true if this is an MMX type.
501 bool IsX86_MMXType(llvm::Type *IRType) {
502 // Return true if the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>.
503 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
504 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
505 IRType->getScalarSizeInBits() != 64;
508 static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
509 StringRef Constraint,
511 if ((Constraint == "y" || Constraint == "&y") && Ty->isVectorTy()) {
512 if (cast<llvm::VectorType>(Ty)->getBitWidth() != 64) {
513 // Invalid MMX constraint
517 return llvm::Type::getX86_MMXTy(CGF.getLLVMContext());
520 // No operation needed
524 /// Returns true if this type can be passed in SSE registers with the
525 /// X86_VectorCall calling convention. Shared between x86_32 and x86_64.
526 static bool isX86VectorTypeForVectorCall(ASTContext &Context, QualType Ty) {
527 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
528 if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half)
530 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
531 // vectorcall can pass XMM, YMM, and ZMM vectors. We don't pass SSE1 MMX
532 // registers specially.
533 unsigned VecSize = Context.getTypeSize(VT);
534 if (VecSize == 128 || VecSize == 256 || VecSize == 512)
540 /// Returns true if this aggregate is small enough to be passed in SSE registers
541 /// in the X86_VectorCall calling convention. Shared between x86_32 and x86_64.
542 static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) {
543 return NumMembers <= 4;
546 //===----------------------------------------------------------------------===//
547 // X86-32 ABI Implementation
548 //===----------------------------------------------------------------------===//
550 /// \brief Similar to llvm::CCState, but for Clang.
552 CCState(unsigned CC) : CC(CC), FreeRegs(0), FreeSSERegs(0) {}
556 unsigned FreeSSERegs;
559 /// X86_32ABIInfo - The X86-32 ABI information.
560 class X86_32ABIInfo : public ABIInfo {
566 static const unsigned MinABIStackAlignInBytes = 4;
568 bool IsDarwinVectorABI;
569 bool IsSmallStructInRegABI;
570 bool IsWin32StructABI;
571 unsigned DefaultNumRegisterParameters;
573 static bool isRegisterSize(unsigned Size) {
574 return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
577 bool isHomogeneousAggregateBaseType(QualType Ty) const override {
578 // FIXME: Assumes vectorcall is in use.
579 return isX86VectorTypeForVectorCall(getContext(), Ty);
582 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
583 uint64_t NumMembers) const override {
584 // FIXME: Assumes vectorcall is in use.
585 return isX86VectorCallAggregateSmallEnough(NumMembers);
588 bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context) const;
590 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
591 /// such that the argument will be passed in memory.
592 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const;
594 ABIArgInfo getIndirectReturnResult(CCState &State) const;
596 /// \brief Return the alignment to use for the given type on the stack.
597 unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const;
599 Class classify(QualType Ty) const;
600 ABIArgInfo classifyReturnType(QualType RetTy, CCState &State) const;
601 ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const;
602 bool shouldUseInReg(QualType Ty, CCState &State, bool &NeedsPadding) const;
604 /// \brief Rewrite the function info so that all memory arguments use
606 void rewriteWithInAlloca(CGFunctionInfo &FI) const;
608 void addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
609 unsigned &StackOffset, ABIArgInfo &Info,
610 QualType Type) const;
614 void computeInfo(CGFunctionInfo &FI) const override;
615 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
616 CodeGenFunction &CGF) const override;
618 X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool w,
620 : ABIInfo(CGT), IsDarwinVectorABI(d), IsSmallStructInRegABI(p),
621 IsWin32StructABI(w), DefaultNumRegisterParameters(r) {}
624 class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
626 X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
627 bool d, bool p, bool w, unsigned r)
628 :TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p, w, r)) {}
630 static bool isStructReturnInRegABI(
631 const llvm::Triple &Triple, const CodeGenOptions &Opts);
633 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
634 CodeGen::CodeGenModule &CGM) const override;
636 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
637 // Darwin uses different dwarf register numbers for EH.
638 if (CGM.getTarget().getTriple().isOSDarwin()) return 5;
642 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
643 llvm::Value *Address) const override;
645 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
646 StringRef Constraint,
647 llvm::Type* Ty) const override {
648 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
651 void addReturnRegisterOutputs(CodeGenFunction &CGF, LValue ReturnValue,
652 std::string &Constraints,
653 std::vector<llvm::Type *> &ResultRegTypes,
654 std::vector<llvm::Type *> &ResultTruncRegTypes,
655 std::vector<LValue> &ResultRegDests,
656 std::string &AsmString,
657 unsigned NumOutputs) const override;
660 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
661 unsigned Sig = (0xeb << 0) | // jmp rel8
662 (0x06 << 8) | // .+0x08
665 return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
672 /// Rewrite input constraint references after adding some output constraints.
673 /// In the case where there is one output and one input and we add one output,
674 /// we need to replace all operand references greater than or equal to 1:
677 /// The result will be:
680 static void rewriteInputConstraintReferences(unsigned FirstIn,
682 std::string &AsmString) {
684 llvm::raw_string_ostream OS(Buf);
686 while (Pos < AsmString.size()) {
687 size_t DollarStart = AsmString.find('$', Pos);
688 if (DollarStart == std::string::npos)
689 DollarStart = AsmString.size();
690 size_t DollarEnd = AsmString.find_first_not_of('$', DollarStart);
691 if (DollarEnd == std::string::npos)
692 DollarEnd = AsmString.size();
693 OS << StringRef(&AsmString[Pos], DollarEnd - Pos);
695 size_t NumDollars = DollarEnd - DollarStart;
696 if (NumDollars % 2 != 0 && Pos < AsmString.size()) {
697 // We have an operand reference.
698 size_t DigitStart = Pos;
699 size_t DigitEnd = AsmString.find_first_not_of("0123456789", DigitStart);
700 if (DigitEnd == std::string::npos)
701 DigitEnd = AsmString.size();
702 StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart);
703 unsigned OperandIndex;
704 if (!OperandStr.getAsInteger(10, OperandIndex)) {
705 if (OperandIndex >= FirstIn)
706 OperandIndex += NumNewOuts;
714 AsmString = std::move(OS.str());
717 /// Add output constraints for EAX:EDX because they are return registers.
718 void X86_32TargetCodeGenInfo::addReturnRegisterOutputs(
719 CodeGenFunction &CGF, LValue ReturnSlot, std::string &Constraints,
720 std::vector<llvm::Type *> &ResultRegTypes,
721 std::vector<llvm::Type *> &ResultTruncRegTypes,
722 std::vector<LValue> &ResultRegDests, std::string &AsmString,
723 unsigned NumOutputs) const {
724 uint64_t RetWidth = CGF.getContext().getTypeSize(ReturnSlot.getType());
726 // Use the EAX constraint if the width is 32 or smaller and EAX:EDX if it is
728 if (!Constraints.empty())
730 if (RetWidth <= 32) {
731 Constraints += "={eax}";
732 ResultRegTypes.push_back(CGF.Int32Ty);
734 // Use the 'A' constraint for EAX:EDX.
736 ResultRegTypes.push_back(CGF.Int64Ty);
739 // Truncate EAX or EAX:EDX to an integer of the appropriate size.
740 llvm::Type *CoerceTy = llvm::IntegerType::get(CGF.getLLVMContext(), RetWidth);
741 ResultTruncRegTypes.push_back(CoerceTy);
743 // Coerce the integer by bitcasting the return slot pointer.
744 ReturnSlot.setAddress(CGF.Builder.CreateBitCast(ReturnSlot.getAddress(),
745 CoerceTy->getPointerTo()));
746 ResultRegDests.push_back(ReturnSlot);
748 rewriteInputConstraintReferences(NumOutputs, 1, AsmString);
751 /// shouldReturnTypeInRegister - Determine if the given type should be
752 /// passed in a register (for the Darwin ABI).
753 bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
754 ASTContext &Context) const {
755 uint64_t Size = Context.getTypeSize(Ty);
757 // Type must be register sized.
758 if (!isRegisterSize(Size))
761 if (Ty->isVectorType()) {
762 // 64- and 128- bit vectors inside structures are not returned in
764 if (Size == 64 || Size == 128)
770 // If this is a builtin, pointer, enum, complex type, member pointer, or
771 // member function pointer it is ok.
772 if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() ||
773 Ty->isAnyComplexType() || Ty->isEnumeralType() ||
774 Ty->isBlockPointerType() || Ty->isMemberPointerType())
777 // Arrays are treated like records.
778 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
779 return shouldReturnTypeInRegister(AT->getElementType(), Context);
781 // Otherwise, it must be a record type.
782 const RecordType *RT = Ty->getAs<RecordType>();
783 if (!RT) return false;
785 // FIXME: Traverse bases here too.
787 // Structure types are passed in register if all fields would be
788 // passed in a register.
789 for (const auto *FD : RT->getDecl()->fields()) {
790 // Empty fields are ignored.
791 if (isEmptyField(Context, FD, true))
794 // Check fields recursively.
795 if (!shouldReturnTypeInRegister(FD->getType(), Context))
801 ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(CCState &State) const {
802 // If the return value is indirect, then the hidden argument is consuming one
804 if (State.FreeRegs) {
806 return ABIArgInfo::getIndirectInReg(/*Align=*/0, /*ByVal=*/false);
808 return ABIArgInfo::getIndirect(/*Align=*/0, /*ByVal=*/false);
811 ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy, CCState &State) const {
812 if (RetTy->isVoidType())
813 return ABIArgInfo::getIgnore();
815 const Type *Base = nullptr;
816 uint64_t NumElts = 0;
817 if (State.CC == llvm::CallingConv::X86_VectorCall &&
818 isHomogeneousAggregate(RetTy, Base, NumElts)) {
819 // The LLVM struct type for such an aggregate should lower properly.
820 return ABIArgInfo::getDirect();
823 if (const VectorType *VT = RetTy->getAs<VectorType>()) {
824 // On Darwin, some vectors are returned in registers.
825 if (IsDarwinVectorABI) {
826 uint64_t Size = getContext().getTypeSize(RetTy);
828 // 128-bit vectors are a special case; they are returned in
829 // registers and we need to make sure to pick a type the LLVM
830 // backend will like.
832 return ABIArgInfo::getDirect(llvm::VectorType::get(
833 llvm::Type::getInt64Ty(getVMContext()), 2));
835 // Always return in register if it fits in a general purpose
836 // register, or if it is 64 bits and has a single element.
837 if ((Size == 8 || Size == 16 || Size == 32) ||
838 (Size == 64 && VT->getNumElements() == 1))
839 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
842 return getIndirectReturnResult(State);
845 return ABIArgInfo::getDirect();
848 if (isAggregateTypeForABI(RetTy)) {
849 if (const RecordType *RT = RetTy->getAs<RecordType>()) {
850 // Structures with flexible arrays are always indirect.
851 if (RT->getDecl()->hasFlexibleArrayMember())
852 return getIndirectReturnResult(State);
855 // If specified, structs and unions are always indirect.
856 if (!IsSmallStructInRegABI && !RetTy->isAnyComplexType())
857 return getIndirectReturnResult(State);
859 // Small structures which are register sized are generally returned
861 if (shouldReturnTypeInRegister(RetTy, getContext())) {
862 uint64_t Size = getContext().getTypeSize(RetTy);
864 // As a special-case, if the struct is a "single-element" struct, and
865 // the field is of type "float" or "double", return it in a
866 // floating-point register. (MSVC does not apply this special case.)
867 // We apply a similar transformation for pointer types to improve the
868 // quality of the generated IR.
869 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
870 if ((!IsWin32StructABI && SeltTy->isRealFloatingType())
871 || SeltTy->hasPointerRepresentation())
872 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
874 // FIXME: We should be able to narrow this integer in cases with dead
876 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size));
879 return getIndirectReturnResult(State);
882 // Treat an enum type as its underlying type.
883 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
884 RetTy = EnumTy->getDecl()->getIntegerType();
886 return (RetTy->isPromotableIntegerType() ?
887 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
890 static bool isSSEVectorType(ASTContext &Context, QualType Ty) {
891 return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128;
894 static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) {
895 const RecordType *RT = Ty->getAs<RecordType>();
898 const RecordDecl *RD = RT->getDecl();
900 // If this is a C++ record, check the bases first.
901 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
902 for (const auto &I : CXXRD->bases())
903 if (!isRecordWithSSEVectorType(Context, I.getType()))
906 for (const auto *i : RD->fields()) {
907 QualType FT = i->getType();
909 if (isSSEVectorType(Context, FT))
912 if (isRecordWithSSEVectorType(Context, FT))
919 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty,
920 unsigned Align) const {
921 // Otherwise, if the alignment is less than or equal to the minimum ABI
922 // alignment, just use the default; the backend will handle this.
923 if (Align <= MinABIStackAlignInBytes)
924 return 0; // Use default alignment.
926 // On non-Darwin, the stack type alignment is always 4.
927 if (!IsDarwinVectorABI) {
928 // Set explicit alignment, since we may need to realign the top.
929 return MinABIStackAlignInBytes;
932 // Otherwise, if the type contains an SSE vector type, the alignment is 16.
933 if (Align >= 16 && (isSSEVectorType(getContext(), Ty) ||
934 isRecordWithSSEVectorType(getContext(), Ty)))
937 return MinABIStackAlignInBytes;
940 ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal,
941 CCState &State) const {
943 if (State.FreeRegs) {
944 --State.FreeRegs; // Non-byval indirects just use one pointer.
945 return ABIArgInfo::getIndirectInReg(0, false);
947 return ABIArgInfo::getIndirect(0, false);
950 // Compute the byval alignment.
951 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
952 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
954 return ABIArgInfo::getIndirect(4, /*ByVal=*/true);
956 // If the stack alignment is less than the type alignment, realign the
958 bool Realign = TypeAlign > StackAlign;
959 return ABIArgInfo::getIndirect(StackAlign, /*ByVal=*/true, Realign);
962 X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const {
963 const Type *T = isSingleElementStruct(Ty, getContext());
967 if (const BuiltinType *BT = T->getAs<BuiltinType>()) {
968 BuiltinType::Kind K = BT->getKind();
969 if (K == BuiltinType::Float || K == BuiltinType::Double)
975 bool X86_32ABIInfo::shouldUseInReg(QualType Ty, CCState &State,
976 bool &NeedsPadding) const {
977 NeedsPadding = false;
978 Class C = classify(Ty);
982 unsigned Size = getContext().getTypeSize(Ty);
983 unsigned SizeInRegs = (Size + 31) / 32;
988 if (SizeInRegs > State.FreeRegs) {
993 State.FreeRegs -= SizeInRegs;
995 if (State.CC == llvm::CallingConv::X86_FastCall ||
996 State.CC == llvm::CallingConv::X86_VectorCall) {
1000 if (Ty->isIntegralOrEnumerationType())
1003 if (Ty->isPointerType())
1006 if (Ty->isReferenceType())
1010 NeedsPadding = true;
1018 ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
1019 CCState &State) const {
1020 // FIXME: Set alignment on indirect arguments.
1022 Ty = useFirstFieldIfTransparentUnion(Ty);
1024 // Check with the C++ ABI first.
1025 const RecordType *RT = Ty->getAs<RecordType>();
1027 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
1028 if (RAA == CGCXXABI::RAA_Indirect) {
1029 return getIndirectResult(Ty, false, State);
1030 } else if (RAA == CGCXXABI::RAA_DirectInMemory) {
1031 // The field index doesn't matter, we'll fix it up later.
1032 return ABIArgInfo::getInAlloca(/*FieldIndex=*/0);
1036 // vectorcall adds the concept of a homogenous vector aggregate, similar
1037 // to other targets.
1038 const Type *Base = nullptr;
1039 uint64_t NumElts = 0;
1040 if (State.CC == llvm::CallingConv::X86_VectorCall &&
1041 isHomogeneousAggregate(Ty, Base, NumElts)) {
1042 if (State.FreeSSERegs >= NumElts) {
1043 State.FreeSSERegs -= NumElts;
1044 if (Ty->isBuiltinType() || Ty->isVectorType())
1045 return ABIArgInfo::getDirect();
1046 return ABIArgInfo::getExpand();
1048 return getIndirectResult(Ty, /*ByVal=*/false, State);
1051 if (isAggregateTypeForABI(Ty)) {
1053 // Structs are always byval on win32, regardless of what they contain.
1054 if (IsWin32StructABI)
1055 return getIndirectResult(Ty, true, State);
1057 // Structures with flexible arrays are always indirect.
1058 if (RT->getDecl()->hasFlexibleArrayMember())
1059 return getIndirectResult(Ty, true, State);
1062 // Ignore empty structs/unions.
1063 if (isEmptyRecord(getContext(), Ty, true))
1064 return ABIArgInfo::getIgnore();
1066 llvm::LLVMContext &LLVMContext = getVMContext();
1067 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
1069 if (shouldUseInReg(Ty, State, NeedsPadding)) {
1070 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
1071 SmallVector<llvm::Type*, 3> Elements(SizeInRegs, Int32);
1072 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
1073 return ABIArgInfo::getDirectInReg(Result);
1075 llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : nullptr;
1077 // Expand small (<= 128-bit) record types when we know that the stack layout
1078 // of those arguments will match the struct. This is important because the
1079 // LLVM backend isn't smart enough to remove byval, which inhibits many
1081 if (getContext().getTypeSize(Ty) <= 4*32 &&
1082 canExpandIndirectArgument(Ty, getContext()))
1083 return ABIArgInfo::getExpandWithPadding(
1084 State.CC == llvm::CallingConv::X86_FastCall ||
1085 State.CC == llvm::CallingConv::X86_VectorCall,
1088 return getIndirectResult(Ty, true, State);
1091 if (const VectorType *VT = Ty->getAs<VectorType>()) {
1092 // On Darwin, some vectors are passed in memory, we handle this by passing
1093 // it as an i8/i16/i32/i64.
1094 if (IsDarwinVectorABI) {
1095 uint64_t Size = getContext().getTypeSize(Ty);
1096 if ((Size == 8 || Size == 16 || Size == 32) ||
1097 (Size == 64 && VT->getNumElements() == 1))
1098 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
1102 if (IsX86_MMXType(CGT.ConvertType(Ty)))
1103 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 64));
1105 return ABIArgInfo::getDirect();
1109 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
1110 Ty = EnumTy->getDecl()->getIntegerType();
1113 bool InReg = shouldUseInReg(Ty, State, NeedsPadding);
1115 if (Ty->isPromotableIntegerType()) {
1117 return ABIArgInfo::getExtendInReg();
1118 return ABIArgInfo::getExtend();
1121 return ABIArgInfo::getDirectInReg();
1122 return ABIArgInfo::getDirect();
1125 void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
1126 CCState State(FI.getCallingConvention());
1127 if (State.CC == llvm::CallingConv::X86_FastCall)
1129 else if (State.CC == llvm::CallingConv::X86_VectorCall) {
1131 State.FreeSSERegs = 6;
1132 } else if (FI.getHasRegParm())
1133 State.FreeRegs = FI.getRegParm();
1135 State.FreeRegs = DefaultNumRegisterParameters;
1137 if (!getCXXABI().classifyReturnType(FI)) {
1138 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), State);
1139 } else if (FI.getReturnInfo().isIndirect()) {
1140 // The C++ ABI is not aware of register usage, so we have to check if the
1141 // return value was sret and put it in a register ourselves if appropriate.
1142 if (State.FreeRegs) {
1143 --State.FreeRegs; // The sret parameter consumes a register.
1144 FI.getReturnInfo().setInReg(true);
1148 // The chain argument effectively gives us another free register.
1149 if (FI.isChainCall())
1152 bool UsedInAlloca = false;
1153 for (auto &I : FI.arguments()) {
1154 I.info = classifyArgumentType(I.type, State);
1155 UsedInAlloca |= (I.info.getKind() == ABIArgInfo::InAlloca);
1158 // If we needed to use inalloca for any argument, do a second pass and rewrite
1159 // all the memory arguments to use inalloca.
1161 rewriteWithInAlloca(FI);
1165 X86_32ABIInfo::addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
1166 unsigned &StackOffset,
1167 ABIArgInfo &Info, QualType Type) const {
1168 assert(StackOffset % 4U == 0 && "unaligned inalloca struct");
1169 Info = ABIArgInfo::getInAlloca(FrameFields.size());
1170 FrameFields.push_back(CGT.ConvertTypeForMem(Type));
1171 StackOffset += getContext().getTypeSizeInChars(Type).getQuantity();
1173 // Insert padding bytes to respect alignment. For x86_32, each argument is 4
1175 if (StackOffset % 4U) {
1176 unsigned OldOffset = StackOffset;
1177 StackOffset = llvm::RoundUpToAlignment(StackOffset, 4U);
1178 unsigned NumBytes = StackOffset - OldOffset;
1180 llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext());
1181 Ty = llvm::ArrayType::get(Ty, NumBytes);
1182 FrameFields.push_back(Ty);
1186 static bool isArgInAlloca(const ABIArgInfo &Info) {
1187 // Leave ignored and inreg arguments alone.
1188 switch (Info.getKind()) {
1189 case ABIArgInfo::InAlloca:
1191 case ABIArgInfo::Indirect:
1192 assert(Info.getIndirectByVal());
1194 case ABIArgInfo::Ignore:
1196 case ABIArgInfo::Direct:
1197 case ABIArgInfo::Extend:
1198 case ABIArgInfo::Expand:
1199 if (Info.getInReg())
1203 llvm_unreachable("invalid enum");
1206 void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const {
1207 assert(IsWin32StructABI && "inalloca only supported on win32");
1209 // Build a packed struct type for all of the arguments in memory.
1210 SmallVector<llvm::Type *, 6> FrameFields;
1212 unsigned StackOffset = 0;
1213 CGFunctionInfo::arg_iterator I = FI.arg_begin(), E = FI.arg_end();
1215 // Put 'this' into the struct before 'sret', if necessary.
1217 FI.getCallingConvention() == llvm::CallingConv::X86_ThisCall;
1218 ABIArgInfo &Ret = FI.getReturnInfo();
1219 if (Ret.isIndirect() && Ret.isSRetAfterThis() && !IsThisCall &&
1220 isArgInAlloca(I->info)) {
1221 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
1225 // Put the sret parameter into the inalloca struct if it's in memory.
1226 if (Ret.isIndirect() && !Ret.getInReg()) {
1227 CanQualType PtrTy = getContext().getPointerType(FI.getReturnType());
1228 addFieldToArgStruct(FrameFields, StackOffset, Ret, PtrTy);
1229 // On Windows, the hidden sret parameter is always returned in eax.
1230 Ret.setInAllocaSRet(IsWin32StructABI);
1233 // Skip the 'this' parameter in ecx.
1237 // Put arguments passed in memory into the struct.
1238 for (; I != E; ++I) {
1239 if (isArgInAlloca(I->info))
1240 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
1243 FI.setArgStruct(llvm::StructType::get(getVMContext(), FrameFields,
1244 /*isPacked=*/true));
1247 llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1248 CodeGenFunction &CGF) const {
1249 llvm::Type *BPP = CGF.Int8PtrPtrTy;
1251 CGBuilderTy &Builder = CGF.Builder;
1252 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
1254 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
1256 // Compute if the address needs to be aligned
1257 unsigned Align = CGF.getContext().getTypeAlignInChars(Ty).getQuantity();
1258 Align = getTypeStackAlignInBytes(Ty, Align);
1259 Align = std::max(Align, 4U);
1261 // addr = (addr + align - 1) & -align;
1262 llvm::Value *Offset =
1263 llvm::ConstantInt::get(CGF.Int32Ty, Align - 1);
1264 Addr = CGF.Builder.CreateGEP(Addr, Offset);
1265 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(Addr,
1267 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int32Ty, -Align);
1268 Addr = CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
1274 llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
1275 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
1278 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, Align);
1279 llvm::Value *NextAddr =
1280 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
1282 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
1287 bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
1288 const llvm::Triple &Triple, const CodeGenOptions &Opts) {
1289 assert(Triple.getArch() == llvm::Triple::x86);
1291 switch (Opts.getStructReturnConvention()) {
1292 case CodeGenOptions::SRCK_Default:
1294 case CodeGenOptions::SRCK_OnStack: // -fpcc-struct-return
1296 case CodeGenOptions::SRCK_InRegs: // -freg-struct-return
1300 if (Triple.isOSDarwin())
1303 switch (Triple.getOS()) {
1304 case llvm::Triple::DragonFly:
1305 case llvm::Triple::FreeBSD:
1306 case llvm::Triple::OpenBSD:
1307 case llvm::Triple::Bitrig:
1308 case llvm::Triple::Win32:
1315 void X86_32TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
1316 llvm::GlobalValue *GV,
1317 CodeGen::CodeGenModule &CGM) const {
1318 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
1319 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1320 // Get the LLVM function.
1321 llvm::Function *Fn = cast<llvm::Function>(GV);
1323 // Now add the 'alignstack' attribute with a value of 16.
1324 llvm::AttrBuilder B;
1325 B.addStackAlignmentAttr(16);
1326 Fn->addAttributes(llvm::AttributeSet::FunctionIndex,
1327 llvm::AttributeSet::get(CGM.getLLVMContext(),
1328 llvm::AttributeSet::FunctionIndex,
1334 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
1335 CodeGen::CodeGenFunction &CGF,
1336 llvm::Value *Address) const {
1337 CodeGen::CGBuilderTy &Builder = CGF.Builder;
1339 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
1341 // 0-7 are the eight integer registers; the order is different
1342 // on Darwin (for EH), but the range is the same.
1344 AssignToArrayRange(Builder, Address, Four8, 0, 8);
1346 if (CGF.CGM.getTarget().getTriple().isOSDarwin()) {
1347 // 12-16 are st(0..4). Not sure why we stop at 4.
1348 // These have size 16, which is sizeof(long double) on
1349 // platforms with 8-byte alignment for that type.
1350 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16);
1351 AssignToArrayRange(Builder, Address, Sixteen8, 12, 16);
1354 // 9 is %eflags, which doesn't get a size on Darwin for some
1356 Builder.CreateStore(Four8, Builder.CreateConstInBoundsGEP1_32(Address, 9));
1358 // 11-16 are st(0..5). Not sure why we stop at 5.
1359 // These have size 12, which is sizeof(long double) on
1360 // platforms with 4-byte alignment for that type.
1361 llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12);
1362 AssignToArrayRange(Builder, Address, Twelve8, 11, 16);
1368 //===----------------------------------------------------------------------===//
1369 // X86-64 ABI Implementation
1370 //===----------------------------------------------------------------------===//
1374 /// X86_64ABIInfo - The X86_64 ABI information.
1375 class X86_64ABIInfo : public ABIInfo {
1387 /// merge - Implement the X86_64 ABI merging algorithm.
1389 /// Merge an accumulating classification \arg Accum with a field
1390 /// classification \arg Field.
1392 /// \param Accum - The accumulating classification. This should
1393 /// always be either NoClass or the result of a previous merge
1394 /// call. In addition, this should never be Memory (the caller
1395 /// should just return Memory for the aggregate).
1396 static Class merge(Class Accum, Class Field);
1398 /// postMerge - Implement the X86_64 ABI post merging algorithm.
1400 /// Post merger cleanup, reduces a malformed Hi and Lo pair to
1401 /// final MEMORY or SSE classes when necessary.
1403 /// \param AggregateSize - The size of the current aggregate in
1404 /// the classification process.
1406 /// \param Lo - The classification for the parts of the type
1407 /// residing in the low word of the containing object.
1409 /// \param Hi - The classification for the parts of the type
1410 /// residing in the higher words of the containing object.
1412 void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const;
1414 /// classify - Determine the x86_64 register classes in which the
1415 /// given type T should be passed.
1417 /// \param Lo - The classification for the parts of the type
1418 /// residing in the low word of the containing object.
1420 /// \param Hi - The classification for the parts of the type
1421 /// residing in the high word of the containing object.
1423 /// \param OffsetBase - The bit offset of this type in the
1424 /// containing object. Some parameters are classified different
1425 /// depending on whether they straddle an eightbyte boundary.
1427 /// \param isNamedArg - Whether the argument in question is a "named"
1428 /// argument, as used in AMD64-ABI 3.5.7.
1430 /// If a word is unused its result will be NoClass; if a type should
1431 /// be passed in Memory then at least the classification of \arg Lo
1434 /// The \arg Lo class will be NoClass iff the argument is ignored.
1436 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
1437 /// also be ComplexX87.
1438 void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi,
1439 bool isNamedArg) const;
1441 llvm::Type *GetByteVectorType(QualType Ty) const;
1442 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType,
1443 unsigned IROffset, QualType SourceTy,
1444 unsigned SourceOffset) const;
1445 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType,
1446 unsigned IROffset, QualType SourceTy,
1447 unsigned SourceOffset) const;
1449 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
1450 /// such that the argument will be returned in memory.
1451 ABIArgInfo getIndirectReturnResult(QualType Ty) const;
1453 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
1454 /// such that the argument will be passed in memory.
1456 /// \param freeIntRegs - The number of free integer registers remaining
1458 ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const;
1460 ABIArgInfo classifyReturnType(QualType RetTy) const;
1462 ABIArgInfo classifyArgumentType(QualType Ty,
1463 unsigned freeIntRegs,
1464 unsigned &neededInt,
1465 unsigned &neededSSE,
1466 bool isNamedArg) const;
1468 bool IsIllegalVectorType(QualType Ty) const;
1470 /// The 0.98 ABI revision clarified a lot of ambiguities,
1471 /// unfortunately in ways that were not always consistent with
1472 /// certain previous compilers. In particular, platforms which
1473 /// required strict binary compatibility with older versions of GCC
1474 /// may need to exempt themselves.
1475 bool honorsRevision0_98() const {
1476 return !getTarget().getTriple().isOSDarwin();
1480 // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on
1482 bool Has64BitPointers;
1485 X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool hasavx) :
1486 ABIInfo(CGT), HasAVX(hasavx),
1487 Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) {
1490 bool isPassedUsingAVXType(QualType type) const {
1491 unsigned neededInt, neededSSE;
1492 // The freeIntRegs argument doesn't matter here.
1493 ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE,
1494 /*isNamedArg*/true);
1495 if (info.isDirect()) {
1496 llvm::Type *ty = info.getCoerceToType();
1497 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
1498 return (vectorTy->getBitWidth() > 128);
1503 void computeInfo(CGFunctionInfo &FI) const override;
1505 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1506 CodeGenFunction &CGF) const override;
1509 /// WinX86_64ABIInfo - The Windows X86_64 ABI information.
1510 class WinX86_64ABIInfo : public ABIInfo {
1512 ABIArgInfo classify(QualType Ty, unsigned &FreeSSERegs,
1513 bool IsReturnType) const;
1516 WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
1518 void computeInfo(CGFunctionInfo &FI) const override;
1520 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1521 CodeGenFunction &CGF) const override;
1523 bool isHomogeneousAggregateBaseType(QualType Ty) const override {
1524 // FIXME: Assumes vectorcall is in use.
1525 return isX86VectorTypeForVectorCall(getContext(), Ty);
1528 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
1529 uint64_t NumMembers) const override {
1530 // FIXME: Assumes vectorcall is in use.
1531 return isX86VectorCallAggregateSmallEnough(NumMembers);
1535 class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
1538 X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX)
1539 : TargetCodeGenInfo(new X86_64ABIInfo(CGT, HasAVX)), HasAVX(HasAVX) {}
1541 const X86_64ABIInfo &getABIInfo() const {
1542 return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo());
1545 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
1549 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
1550 llvm::Value *Address) const override {
1551 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
1553 // 0-15 are the 16 integer registers.
1555 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
1559 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
1560 StringRef Constraint,
1561 llvm::Type* Ty) const override {
1562 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
1565 bool isNoProtoCallVariadic(const CallArgList &args,
1566 const FunctionNoProtoType *fnType) const override {
1567 // The default CC on x86-64 sets %al to the number of SSA
1568 // registers used, and GCC sets this when calling an unprototyped
1569 // function, so we override the default behavior. However, don't do
1570 // that when AVX types are involved: the ABI explicitly states it is
1571 // undefined, and it doesn't work in practice because of how the ABI
1572 // defines varargs anyway.
1573 if (fnType->getCallConv() == CC_C) {
1574 bool HasAVXType = false;
1575 for (CallArgList::const_iterator
1576 it = args.begin(), ie = args.end(); it != ie; ++it) {
1577 if (getABIInfo().isPassedUsingAVXType(it->Ty)) {
1587 return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType);
1591 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
1592 unsigned Sig = (0xeb << 0) | // jmp rel8
1593 (0x0a << 8) | // .+0x0c
1596 return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
1599 unsigned getOpenMPSimdDefaultAlignment(QualType) const override {
1600 return HasAVX ? 32 : 16;
1604 static std::string qualifyWindowsLibrary(llvm::StringRef Lib) {
1605 // If the argument does not end in .lib, automatically add the suffix. This
1606 // matches the behavior of MSVC.
1607 std::string ArgStr = Lib;
1608 if (!Lib.endswith_lower(".lib"))
1613 class WinX86_32TargetCodeGenInfo : public X86_32TargetCodeGenInfo {
1615 WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
1616 bool d, bool p, bool w, unsigned RegParms)
1617 : X86_32TargetCodeGenInfo(CGT, d, p, w, RegParms) {}
1619 void getDependentLibraryOption(llvm::StringRef Lib,
1620 llvm::SmallString<24> &Opt) const override {
1621 Opt = "/DEFAULTLIB:";
1622 Opt += qualifyWindowsLibrary(Lib);
1625 void getDetectMismatchOption(llvm::StringRef Name,
1626 llvm::StringRef Value,
1627 llvm::SmallString<32> &Opt) const override {
1628 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
1632 class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
1635 WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX)
1636 : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)), HasAVX(HasAVX) {}
1638 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
1642 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
1643 llvm::Value *Address) const override {
1644 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
1646 // 0-15 are the 16 integer registers.
1648 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
1652 void getDependentLibraryOption(llvm::StringRef Lib,
1653 llvm::SmallString<24> &Opt) const override {
1654 Opt = "/DEFAULTLIB:";
1655 Opt += qualifyWindowsLibrary(Lib);
1658 void getDetectMismatchOption(llvm::StringRef Name,
1659 llvm::StringRef Value,
1660 llvm::SmallString<32> &Opt) const override {
1661 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
1664 unsigned getOpenMPSimdDefaultAlignment(QualType) const override {
1665 return HasAVX ? 32 : 16;
1671 void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo,
1673 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
1675 // (a) If one of the classes is Memory, the whole argument is passed in
1678 // (b) If X87UP is not preceded by X87, the whole argument is passed in
1681 // (c) If the size of the aggregate exceeds two eightbytes and the first
1682 // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole
1683 // argument is passed in memory. NOTE: This is necessary to keep the
1684 // ABI working for processors that don't support the __m256 type.
1686 // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE.
1688 // Some of these are enforced by the merging logic. Others can arise
1689 // only with unions; for example:
1690 // union { _Complex double; unsigned; }
1692 // Note that clauses (b) and (c) were added in 0.98.
1696 if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
1698 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
1700 if (Hi == SSEUp && Lo != SSE)
1704 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
1705 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
1706 // classified recursively so that always two fields are
1707 // considered. The resulting class is calculated according to
1708 // the classes of the fields in the eightbyte:
1710 // (a) If both classes are equal, this is the resulting class.
1712 // (b) If one of the classes is NO_CLASS, the resulting class is
1715 // (c) If one of the classes is MEMORY, the result is the MEMORY
1718 // (d) If one of the classes is INTEGER, the result is the
1721 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
1722 // MEMORY is used as class.
1724 // (f) Otherwise class SSE is used.
1726 // Accum should never be memory (we should have returned) or
1727 // ComplexX87 (because this cannot be passed in a structure).
1728 assert((Accum != Memory && Accum != ComplexX87) &&
1729 "Invalid accumulated classification during merge.");
1730 if (Accum == Field || Field == NoClass)
1732 if (Field == Memory)
1734 if (Accum == NoClass)
1736 if (Accum == Integer || Field == Integer)
1738 if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
1739 Accum == X87 || Accum == X87Up)
1744 void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
1745 Class &Lo, Class &Hi, bool isNamedArg) const {
1746 // FIXME: This code can be simplified by introducing a simple value class for
1747 // Class pairs with appropriate constructor methods for the various
1750 // FIXME: Some of the split computations are wrong; unaligned vectors
1751 // shouldn't be passed in registers for example, so there is no chance they
1752 // can straddle an eightbyte. Verify & simplify.
1756 Class &Current = OffsetBase < 64 ? Lo : Hi;
1759 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
1760 BuiltinType::Kind k = BT->getKind();
1762 if (k == BuiltinType::Void) {
1764 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
1767 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
1769 } else if ((k == BuiltinType::Float || k == BuiltinType::Double) ||
1770 (k == BuiltinType::LongDouble &&
1771 getTarget().getTriple().isOSNaCl())) {
1773 } else if (k == BuiltinType::LongDouble) {
1777 // FIXME: _Decimal32 and _Decimal64 are SSE.
1778 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
1782 if (const EnumType *ET = Ty->getAs<EnumType>()) {
1783 // Classify the underlying integer type.
1784 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
1788 if (Ty->hasPointerRepresentation()) {
1793 if (Ty->isMemberPointerType()) {
1794 if (Ty->isMemberFunctionPointerType()) {
1795 if (Has64BitPointers) {
1796 // If Has64BitPointers, this is an {i64, i64}, so classify both
1800 // Otherwise, with 32-bit pointers, this is an {i32, i32}. If that
1801 // straddles an eightbyte boundary, Hi should be classified as well.
1802 uint64_t EB_FuncPtr = (OffsetBase) / 64;
1803 uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64;
1804 if (EB_FuncPtr != EB_ThisAdj) {
1816 if (const VectorType *VT = Ty->getAs<VectorType>()) {
1817 uint64_t Size = getContext().getTypeSize(VT);
1819 // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x
1820 // float> as integer.
1823 // If this type crosses an eightbyte boundary, it should be
1825 uint64_t EB_Real = (OffsetBase) / 64;
1826 uint64_t EB_Imag = (OffsetBase + Size - 1) / 64;
1827 if (EB_Real != EB_Imag)
1829 } else if (Size == 64) {
1830 // gcc passes <1 x double> in memory. :(
1831 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double))
1834 // gcc passes <1 x long long> as INTEGER.
1835 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong) ||
1836 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULongLong) ||
1837 VT->getElementType()->isSpecificBuiltinType(BuiltinType::Long) ||
1838 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULong))
1843 // If this type crosses an eightbyte boundary, it should be
1845 if (OffsetBase && OffsetBase != 64)
1847 } else if (Size == 128 || (HasAVX && isNamedArg && Size == 256)) {
1848 // Arguments of 256-bits are split into four eightbyte chunks. The
1849 // least significant one belongs to class SSE and all the others to class
1850 // SSEUP. The original Lo and Hi design considers that types can't be
1851 // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense.
1852 // This design isn't correct for 256-bits, but since there're no cases
1853 // where the upper parts would need to be inspected, avoid adding
1854 // complexity and just consider Hi to match the 64-256 part.
1856 // Note that per 3.5.7 of AMD64-ABI, 256-bit args are only passed in
1857 // registers if they are "named", i.e. not part of the "..." of a
1858 // variadic function.
1865 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
1866 QualType ET = getContext().getCanonicalType(CT->getElementType());
1868 uint64_t Size = getContext().getTypeSize(Ty);
1869 if (ET->isIntegralOrEnumerationType()) {
1872 else if (Size <= 128)
1874 } else if (ET == getContext().FloatTy)
1876 else if (ET == getContext().DoubleTy ||
1877 (ET == getContext().LongDoubleTy &&
1878 getTarget().getTriple().isOSNaCl()))
1880 else if (ET == getContext().LongDoubleTy)
1881 Current = ComplexX87;
1883 // If this complex type crosses an eightbyte boundary then it
1885 uint64_t EB_Real = (OffsetBase) / 64;
1886 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64;
1887 if (Hi == NoClass && EB_Real != EB_Imag)
1893 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
1894 // Arrays are treated like structures.
1896 uint64_t Size = getContext().getTypeSize(Ty);
1898 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
1899 // than four eightbytes, ..., it has class MEMORY.
1903 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
1904 // fields, it has class MEMORY.
1906 // Only need to check alignment of array base.
1907 if (OffsetBase % getContext().getTypeAlign(AT->getElementType()))
1910 // Otherwise implement simplified merge. We could be smarter about
1911 // this, but it isn't worth it and would be harder to verify.
1913 uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
1914 uint64_t ArraySize = AT->getSize().getZExtValue();
1916 // The only case a 256-bit wide vector could be used is when the array
1917 // contains a single 256-bit element. Since Lo and Hi logic isn't extended
1918 // to work for sizes wider than 128, early check and fallback to memory.
1919 if (Size > 128 && EltSize != 256)
1922 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
1923 Class FieldLo, FieldHi;
1924 classify(AT->getElementType(), Offset, FieldLo, FieldHi, isNamedArg);
1925 Lo = merge(Lo, FieldLo);
1926 Hi = merge(Hi, FieldHi);
1927 if (Lo == Memory || Hi == Memory)
1931 postMerge(Size, Lo, Hi);
1932 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
1936 if (const RecordType *RT = Ty->getAs<RecordType>()) {
1937 uint64_t Size = getContext().getTypeSize(Ty);
1939 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
1940 // than four eightbytes, ..., it has class MEMORY.
1944 // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial
1945 // copy constructor or a non-trivial destructor, it is passed by invisible
1947 if (getRecordArgABI(RT, getCXXABI()))
1950 const RecordDecl *RD = RT->getDecl();
1952 // Assume variable sized types are passed in memory.
1953 if (RD->hasFlexibleArrayMember())
1956 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
1958 // Reset Lo class, this will be recomputed.
1961 // If this is a C++ record, classify the bases first.
1962 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
1963 for (const auto &I : CXXRD->bases()) {
1964 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
1965 "Unexpected base class!");
1966 const CXXRecordDecl *Base =
1967 cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
1969 // Classify this field.
1971 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a
1972 // single eightbyte, each is classified separately. Each eightbyte gets
1973 // initialized to class NO_CLASS.
1974 Class FieldLo, FieldHi;
1976 OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base));
1977 classify(I.getType(), Offset, FieldLo, FieldHi, isNamedArg);
1978 Lo = merge(Lo, FieldLo);
1979 Hi = merge(Hi, FieldHi);
1980 if (Lo == Memory || Hi == Memory)
1985 // Classify the fields one at a time, merging the results.
1987 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
1988 i != e; ++i, ++idx) {
1989 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
1990 bool BitField = i->isBitField();
1992 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than
1993 // four eightbytes, or it contains unaligned fields, it has class MEMORY.
1995 // The only case a 256-bit wide vector could be used is when the struct
1996 // contains a single 256-bit element. Since Lo and Hi logic isn't extended
1997 // to work for sizes wider than 128, early check and fallback to memory.
1999 if (Size > 128 && getContext().getTypeSize(i->getType()) != 256) {
2003 // Note, skip this test for bit-fields, see below.
2004 if (!BitField && Offset % getContext().getTypeAlign(i->getType())) {
2009 // Classify this field.
2011 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
2012 // exceeds a single eightbyte, each is classified
2013 // separately. Each eightbyte gets initialized to class
2015 Class FieldLo, FieldHi;
2017 // Bit-fields require special handling, they do not force the
2018 // structure to be passed in memory even if unaligned, and
2019 // therefore they can straddle an eightbyte.
2021 // Ignore padding bit-fields.
2022 if (i->isUnnamedBitfield())
2025 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
2026 uint64_t Size = i->getBitWidthValue(getContext());
2028 uint64_t EB_Lo = Offset / 64;
2029 uint64_t EB_Hi = (Offset + Size - 1) / 64;
2032 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.");
2037 FieldHi = EB_Hi ? Integer : NoClass;
2040 classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg);
2041 Lo = merge(Lo, FieldLo);
2042 Hi = merge(Hi, FieldHi);
2043 if (Lo == Memory || Hi == Memory)
2047 postMerge(Size, Lo, Hi);
2051 ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
2052 // If this is a scalar LLVM value then assume LLVM will pass it in the right
2054 if (!isAggregateTypeForABI(Ty)) {
2055 // Treat an enum type as its underlying type.
2056 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2057 Ty = EnumTy->getDecl()->getIntegerType();
2059 return (Ty->isPromotableIntegerType() ?
2060 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
2063 return ABIArgInfo::getIndirect(0);
2066 bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const {
2067 if (const VectorType *VecTy = Ty->getAs<VectorType>()) {
2068 uint64_t Size = getContext().getTypeSize(VecTy);
2069 unsigned LargestVector = HasAVX ? 256 : 128;
2070 if (Size <= 64 || Size > LargestVector)
2077 ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
2078 unsigned freeIntRegs) const {
2079 // If this is a scalar LLVM value then assume LLVM will pass it in the right
2082 // This assumption is optimistic, as there could be free registers available
2083 // when we need to pass this argument in memory, and LLVM could try to pass
2084 // the argument in the free register. This does not seem to happen currently,
2085 // but this code would be much safer if we could mark the argument with
2086 // 'onstack'. See PR12193.
2087 if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty)) {
2088 // Treat an enum type as its underlying type.
2089 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2090 Ty = EnumTy->getDecl()->getIntegerType();
2092 return (Ty->isPromotableIntegerType() ?
2093 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
2096 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
2097 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
2099 // Compute the byval alignment. We specify the alignment of the byval in all
2100 // cases so that the mid-level optimizer knows the alignment of the byval.
2101 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U);
2103 // Attempt to avoid passing indirect results using byval when possible. This
2104 // is important for good codegen.
2106 // We do this by coercing the value into a scalar type which the backend can
2107 // handle naturally (i.e., without using byval).
2109 // For simplicity, we currently only do this when we have exhausted all of the
2110 // free integer registers. Doing this when there are free integer registers
2111 // would require more care, as we would have to ensure that the coerced value
2112 // did not claim the unused register. That would require either reording the
2113 // arguments to the function (so that any subsequent inreg values came first),
2114 // or only doing this optimization when there were no following arguments that
2117 // We currently expect it to be rare (particularly in well written code) for
2118 // arguments to be passed on the stack when there are still free integer
2119 // registers available (this would typically imply large structs being passed
2120 // by value), so this seems like a fair tradeoff for now.
2122 // We can revisit this if the backend grows support for 'onstack' parameter
2123 // attributes. See PR12193.
2124 if (freeIntRegs == 0) {
2125 uint64_t Size = getContext().getTypeSize(Ty);
2127 // If this type fits in an eightbyte, coerce it into the matching integral
2128 // type, which will end up on the stack (with alignment 8).
2129 if (Align == 8 && Size <= 64)
2130 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
2134 return ABIArgInfo::getIndirect(Align);
2137 /// GetByteVectorType - The ABI specifies that a value should be passed in an
2138 /// full vector XMM/YMM register. Pick an LLVM IR type that will be passed as a
2139 /// vector register.
2140 llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const {
2141 llvm::Type *IRType = CGT.ConvertType(Ty);
2143 // Wrapper structs that just contain vectors are passed just like vectors,
2144 // strip them off if present.
2145 llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType);
2146 while (STy && STy->getNumElements() == 1) {
2147 IRType = STy->getElementType(0);
2148 STy = dyn_cast<llvm::StructType>(IRType);
2151 // If the preferred type is a 16-byte vector, prefer to pass it.
2152 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(IRType)){
2153 llvm::Type *EltTy = VT->getElementType();
2154 unsigned BitWidth = VT->getBitWidth();
2155 if ((BitWidth >= 128 && BitWidth <= 256) &&
2156 (EltTy->isFloatTy() || EltTy->isDoubleTy() ||
2157 EltTy->isIntegerTy(8) || EltTy->isIntegerTy(16) ||
2158 EltTy->isIntegerTy(32) || EltTy->isIntegerTy(64) ||
2159 EltTy->isIntegerTy(128)))
2163 return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()), 2);
2166 /// BitsContainNoUserData - Return true if the specified [start,end) bit range
2167 /// is known to either be off the end of the specified type or being in
2168 /// alignment padding. The user type specified is known to be at most 128 bits
2169 /// in size, and have passed through X86_64ABIInfo::classify with a successful
2170 /// classification that put one of the two halves in the INTEGER class.
2172 /// It is conservatively correct to return false.
2173 static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,
2174 unsigned EndBit, ASTContext &Context) {
2175 // If the bytes being queried are off the end of the type, there is no user
2176 // data hiding here. This handles analysis of builtins, vectors and other
2177 // types that don't contain interesting padding.
2178 unsigned TySize = (unsigned)Context.getTypeSize(Ty);
2179 if (TySize <= StartBit)
2182 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
2183 unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType());
2184 unsigned NumElts = (unsigned)AT->getSize().getZExtValue();
2186 // Check each element to see if the element overlaps with the queried range.
2187 for (unsigned i = 0; i != NumElts; ++i) {
2188 // If the element is after the span we care about, then we're done..
2189 unsigned EltOffset = i*EltSize;
2190 if (EltOffset >= EndBit) break;
2192 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
2193 if (!BitsContainNoUserData(AT->getElementType(), EltStart,
2194 EndBit-EltOffset, Context))
2197 // If it overlaps no elements, then it is safe to process as padding.
2201 if (const RecordType *RT = Ty->getAs<RecordType>()) {
2202 const RecordDecl *RD = RT->getDecl();
2203 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
2205 // If this is a C++ record, check the bases first.
2206 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2207 for (const auto &I : CXXRD->bases()) {
2208 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
2209 "Unexpected base class!");
2210 const CXXRecordDecl *Base =
2211 cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
2213 // If the base is after the span we care about, ignore it.
2214 unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base));
2215 if (BaseOffset >= EndBit) continue;
2217 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
2218 if (!BitsContainNoUserData(I.getType(), BaseStart,
2219 EndBit-BaseOffset, Context))
2224 // Verify that no field has data that overlaps the region of interest. Yes
2225 // this could be sped up a lot by being smarter about queried fields,
2226 // however we're only looking at structs up to 16 bytes, so we don't care
2229 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
2230 i != e; ++i, ++idx) {
2231 unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx);
2233 // If we found a field after the region we care about, then we're done.
2234 if (FieldOffset >= EndBit) break;
2236 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
2237 if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset,
2242 // If nothing in this record overlapped the area of interest, then we're
2250 /// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a
2251 /// float member at the specified offset. For example, {int,{float}} has a
2252 /// float at offset 4. It is conservatively correct for this routine to return
2254 static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset,
2255 const llvm::DataLayout &TD) {
2256 // Base case if we find a float.
2257 if (IROffset == 0 && IRType->isFloatTy())
2260 // If this is a struct, recurse into the field at the specified offset.
2261 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
2262 const llvm::StructLayout *SL = TD.getStructLayout(STy);
2263 unsigned Elt = SL->getElementContainingOffset(IROffset);
2264 IROffset -= SL->getElementOffset(Elt);
2265 return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD);
2268 // If this is an array, recurse into the field at the specified offset.
2269 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
2270 llvm::Type *EltTy = ATy->getElementType();
2271 unsigned EltSize = TD.getTypeAllocSize(EltTy);
2272 IROffset -= IROffset/EltSize*EltSize;
2273 return ContainsFloatAtOffset(EltTy, IROffset, TD);
2280 /// GetSSETypeAtOffset - Return a type that will be passed by the backend in the
2281 /// low 8 bytes of an XMM register, corresponding to the SSE class.
2282 llvm::Type *X86_64ABIInfo::
2283 GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset,
2284 QualType SourceTy, unsigned SourceOffset) const {
2285 // The only three choices we have are either double, <2 x float>, or float. We
2286 // pass as float if the last 4 bytes is just padding. This happens for
2287 // structs that contain 3 floats.
2288 if (BitsContainNoUserData(SourceTy, SourceOffset*8+32,
2289 SourceOffset*8+64, getContext()))
2290 return llvm::Type::getFloatTy(getVMContext());
2292 // We want to pass as <2 x float> if the LLVM IR type contains a float at
2293 // offset+0 and offset+4. Walk the LLVM IR type to find out if this is the
2295 if (ContainsFloatAtOffset(IRType, IROffset, getDataLayout()) &&
2296 ContainsFloatAtOffset(IRType, IROffset+4, getDataLayout()))
2297 return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2);
2299 return llvm::Type::getDoubleTy(getVMContext());
2303 /// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in
2304 /// an 8-byte GPR. This means that we either have a scalar or we are talking
2305 /// about the high or low part of an up-to-16-byte struct. This routine picks
2306 /// the best LLVM IR type to represent this, which may be i64 or may be anything
2307 /// else that the backend will pass in a GPR that works better (e.g. i8, %foo*,
2310 /// PrefType is an LLVM IR type that corresponds to (part of) the IR type for
2311 /// the source type. IROffset is an offset in bytes into the LLVM IR type that
2312 /// the 8-byte value references. PrefType may be null.
2314 /// SourceTy is the source-level type for the entire argument. SourceOffset is
2315 /// an offset into this that we're processing (which is always either 0 or 8).
2317 llvm::Type *X86_64ABIInfo::
2318 GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
2319 QualType SourceTy, unsigned SourceOffset) const {
2320 // If we're dealing with an un-offset LLVM IR type, then it means that we're
2321 // returning an 8-byte unit starting with it. See if we can safely use it.
2322 if (IROffset == 0) {
2323 // Pointers and int64's always fill the 8-byte unit.
2324 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) ||
2325 IRType->isIntegerTy(64))
2328 // If we have a 1/2/4-byte integer, we can use it only if the rest of the
2329 // goodness in the source type is just tail padding. This is allowed to
2330 // kick in for struct {double,int} on the int, but not on
2331 // struct{double,int,int} because we wouldn't return the second int. We
2332 // have to do this analysis on the source type because we can't depend on
2333 // unions being lowered a specific way etc.
2334 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
2335 IRType->isIntegerTy(32) ||
2336 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) {
2337 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 :
2338 cast<llvm::IntegerType>(IRType)->getBitWidth();
2340 if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth,
2341 SourceOffset*8+64, getContext()))
2346 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
2347 // If this is a struct, recurse into the field at the specified offset.
2348 const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy);
2349 if (IROffset < SL->getSizeInBytes()) {
2350 unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
2351 IROffset -= SL->getElementOffset(FieldIdx);
2353 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
2354 SourceTy, SourceOffset);
2358 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
2359 llvm::Type *EltTy = ATy->getElementType();
2360 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy);
2361 unsigned EltOffset = IROffset/EltSize*EltSize;
2362 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
2366 // Okay, we don't have any better idea of what to pass, so we pass this in an
2367 // integer register that isn't too big to fit the rest of the struct.
2368 unsigned TySizeInBytes =
2369 (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity();
2371 assert(TySizeInBytes != SourceOffset && "Empty field?");
2373 // It is always safe to classify this as an integer type up to i64 that
2374 // isn't larger than the structure.
2375 return llvm::IntegerType::get(getVMContext(),
2376 std::min(TySizeInBytes-SourceOffset, 8U)*8);
2380 /// GetX86_64ByValArgumentPair - Given a high and low type that can ideally
2381 /// be used as elements of a two register pair to pass or return, return a
2382 /// first class aggregate to represent them. For example, if the low part of
2383 /// a by-value argument should be passed as i32* and the high part as float,
2384 /// return {i32*, float}.
2386 GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi,
2387 const llvm::DataLayout &TD) {
2388 // In order to correctly satisfy the ABI, we need to the high part to start
2389 // at offset 8. If the high and low parts we inferred are both 4-byte types
2390 // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have
2391 // the second element at offset 8. Check for this:
2392 unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo);
2393 unsigned HiAlign = TD.getABITypeAlignment(Hi);
2394 unsigned HiStart = llvm::RoundUpToAlignment(LoSize, HiAlign);
2395 assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!");
2397 // To handle this, we have to increase the size of the low part so that the
2398 // second element will start at an 8 byte offset. We can't increase the size
2399 // of the second element because it might make us access off the end of the
2402 // There are only two sorts of types the ABI generation code can produce for
2403 // the low part of a pair that aren't 8 bytes in size: float or i8/i16/i32.
2404 // Promote these to a larger type.
2405 if (Lo->isFloatTy())
2406 Lo = llvm::Type::getDoubleTy(Lo->getContext());
2408 assert(Lo->isIntegerTy() && "Invalid/unknown lo type");
2409 Lo = llvm::Type::getInt64Ty(Lo->getContext());
2413 llvm::StructType *Result = llvm::StructType::get(Lo, Hi, nullptr);
2416 // Verify that the second element is at an 8-byte offset.
2417 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
2418 "Invalid x86-64 argument pair!");
2422 ABIArgInfo X86_64ABIInfo::
2423 classifyReturnType(QualType RetTy) const {
2424 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
2425 // classification algorithm.
2426 X86_64ABIInfo::Class Lo, Hi;
2427 classify(RetTy, 0, Lo, Hi, /*isNamedArg*/ true);
2429 // Check some invariants.
2430 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
2431 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
2433 llvm::Type *ResType = nullptr;
2437 return ABIArgInfo::getIgnore();
2438 // If the low part is just padding, it takes no register, leave ResType
2440 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
2441 "Unknown missing lo part");
2446 llvm_unreachable("Invalid classification for lo word.");
2448 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
2451 return getIndirectReturnResult(RetTy);
2453 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
2454 // available register of the sequence %rax, %rdx is used.
2456 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
2458 // If we have a sign or zero extended integer, make sure to return Extend
2459 // so that the parameter gets the right LLVM IR attributes.
2460 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
2461 // Treat an enum type as its underlying type.
2462 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
2463 RetTy = EnumTy->getDecl()->getIntegerType();
2465 if (RetTy->isIntegralOrEnumerationType() &&
2466 RetTy->isPromotableIntegerType())
2467 return ABIArgInfo::getExtend();
2471 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
2472 // available SSE register of the sequence %xmm0, %xmm1 is used.
2474 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
2477 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
2478 // returned on the X87 stack in %st0 as 80-bit x87 number.
2480 ResType = llvm::Type::getX86_FP80Ty(getVMContext());
2483 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
2484 // part of the value is returned in %st0 and the imaginary part in
2487 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
2488 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()),
2489 llvm::Type::getX86_FP80Ty(getVMContext()),
2494 llvm::Type *HighPart = nullptr;
2496 // Memory was handled previously and X87 should
2497 // never occur as a hi class.
2500 llvm_unreachable("Invalid classification for hi word.");
2502 case ComplexX87: // Previously handled.
2507 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2508 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
2509 return ABIArgInfo::getDirect(HighPart, 8);
2512 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2513 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
2514 return ABIArgInfo::getDirect(HighPart, 8);
2517 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
2518 // is passed in the next available eightbyte chunk if the last used
2521 // SSEUP should always be preceded by SSE, just widen.
2523 assert(Lo == SSE && "Unexpected SSEUp classification.");
2524 ResType = GetByteVectorType(RetTy);
2527 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
2528 // returned together with the previous X87 value in %st0.
2530 // If X87Up is preceded by X87, we don't need to do
2531 // anything. However, in some cases with unions it may not be
2532 // preceded by X87. In such situations we follow gcc and pass the
2533 // extra bits in an SSE reg.
2535 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2536 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
2537 return ABIArgInfo::getDirect(HighPart, 8);
2542 // If a high part was specified, merge it together with the low part. It is
2543 // known to pass in the high eightbyte of the result. We do this by forming a
2544 // first class struct aggregate with the high and low part: {low, high}
2546 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
2548 return ABIArgInfo::getDirect(ResType);
2551 ABIArgInfo X86_64ABIInfo::classifyArgumentType(
2552 QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE,
2556 Ty = useFirstFieldIfTransparentUnion(Ty);
2558 X86_64ABIInfo::Class Lo, Hi;
2559 classify(Ty, 0, Lo, Hi, isNamedArg);
2561 // Check some invariants.
2562 // FIXME: Enforce these by construction.
2563 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
2564 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
2568 llvm::Type *ResType = nullptr;
2572 return ABIArgInfo::getIgnore();
2573 // If the low part is just padding, it takes no register, leave ResType
2575 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
2576 "Unknown missing lo part");
2579 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
2583 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
2584 // COMPLEX_X87, it is passed in memory.
2587 if (getRecordArgABI(Ty, getCXXABI()) == CGCXXABI::RAA_Indirect)
2589 return getIndirectResult(Ty, freeIntRegs);
2593 llvm_unreachable("Invalid classification for lo word.");
2595 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
2596 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
2601 // Pick an 8-byte type based on the preferred type.
2602 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0);
2604 // If we have a sign or zero extended integer, make sure to return Extend
2605 // so that the parameter gets the right LLVM IR attributes.
2606 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
2607 // Treat an enum type as its underlying type.
2608 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2609 Ty = EnumTy->getDecl()->getIntegerType();
2611 if (Ty->isIntegralOrEnumerationType() &&
2612 Ty->isPromotableIntegerType())
2613 return ABIArgInfo::getExtend();
2618 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
2619 // available SSE register is used, the registers are taken in the
2620 // order from %xmm0 to %xmm7.
2622 llvm::Type *IRType = CGT.ConvertType(Ty);
2623 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
2629 llvm::Type *HighPart = nullptr;
2631 // Memory was handled previously, ComplexX87 and X87 should
2632 // never occur as hi classes, and X87Up must be preceded by X87,
2633 // which is passed in memory.
2637 llvm_unreachable("Invalid classification for hi word.");
2639 case NoClass: break;
2643 // Pick an 8-byte type based on the preferred type.
2644 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
2646 if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
2647 return ABIArgInfo::getDirect(HighPart, 8);
2650 // X87Up generally doesn't occur here (long double is passed in
2651 // memory), except in situations involving unions.
2654 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
2656 if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
2657 return ABIArgInfo::getDirect(HighPart, 8);
2662 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
2663 // eightbyte is passed in the upper half of the last used SSE
2664 // register. This only happens when 128-bit vectors are passed.
2666 assert(Lo == SSE && "Unexpected SSEUp classification");
2667 ResType = GetByteVectorType(Ty);
2671 // If a high part was specified, merge it together with the low part. It is
2672 // known to pass in the high eightbyte of the result. We do this by forming a
2673 // first class struct aggregate with the high and low part: {low, high}
2675 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
2677 return ABIArgInfo::getDirect(ResType);
2680 void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
2682 if (!getCXXABI().classifyReturnType(FI))
2683 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
2685 // Keep track of the number of assigned registers.
2686 unsigned freeIntRegs = 6, freeSSERegs = 8;
2688 // If the return value is indirect, then the hidden argument is consuming one
2689 // integer register.
2690 if (FI.getReturnInfo().isIndirect())
2693 // The chain argument effectively gives us another free register.
2694 if (FI.isChainCall())
2697 unsigned NumRequiredArgs = FI.getNumRequiredArgs();
2698 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
2699 // get assigned (in left-to-right order) for passing as follows...
2701 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
2702 it != ie; ++it, ++ArgNo) {
2703 bool IsNamedArg = ArgNo < NumRequiredArgs;
2705 unsigned neededInt, neededSSE;
2706 it->info = classifyArgumentType(it->type, freeIntRegs, neededInt,
2707 neededSSE, IsNamedArg);
2709 // AMD64-ABI 3.2.3p3: If there are no registers available for any
2710 // eightbyte of an argument, the whole argument is passed on the
2711 // stack. If registers have already been assigned for some
2712 // eightbytes of such an argument, the assignments get reverted.
2713 if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) {
2714 freeIntRegs -= neededInt;
2715 freeSSERegs -= neededSSE;
2717 it->info = getIndirectResult(it->type, freeIntRegs);
2722 static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr,
2724 CodeGenFunction &CGF) {
2725 llvm::Value *overflow_arg_area_p =
2726 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p");
2727 llvm::Value *overflow_arg_area =
2728 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
2730 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
2731 // byte boundary if alignment needed by type exceeds 8 byte boundary.
2732 // It isn't stated explicitly in the standard, but in practice we use
2733 // alignment greater than 16 where necessary.
2734 uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
2736 // overflow_arg_area = (overflow_arg_area + align - 1) & -align;
2737 llvm::Value *Offset =
2738 llvm::ConstantInt::get(CGF.Int64Ty, Align - 1);
2739 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset);
2740 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area,
2742 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, -(uint64_t)Align);
2744 CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
2745 overflow_arg_area->getType(),
2746 "overflow_arg_area.align");
2749 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
2750 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
2752 CGF.Builder.CreateBitCast(overflow_arg_area,
2753 llvm::PointerType::getUnqual(LTy));
2755 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
2756 // l->overflow_arg_area + sizeof(type).
2757 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to
2758 // an 8 byte boundary.
2760 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
2761 llvm::Value *Offset =
2762 llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7);
2763 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset,
2764 "overflow_arg_area.next");
2765 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
2767 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
2771 llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2772 CodeGenFunction &CGF) const {
2773 // Assume that va_list type is correct; should be pointer to LLVM type:
2777 // i8* overflow_arg_area;
2778 // i8* reg_save_area;
2780 unsigned neededInt, neededSSE;
2782 Ty = CGF.getContext().getCanonicalType(Ty);
2783 ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE,
2784 /*isNamedArg*/false);
2786 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
2787 // in the registers. If not go to step 7.
2788 if (!neededInt && !neededSSE)
2789 return EmitVAArgFromMemory(VAListAddr, Ty, CGF);
2791 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
2792 // general purpose registers needed to pass type and num_fp to hold
2793 // the number of floating point registers needed.
2795 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into
2796 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or
2797 // l->fp_offset > 304 - num_fp * 16 go to step 7.
2799 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of
2800 // register save space).
2802 llvm::Value *InRegs = nullptr;
2803 llvm::Value *gp_offset_p = nullptr, *gp_offset = nullptr;
2804 llvm::Value *fp_offset_p = nullptr, *fp_offset = nullptr;
2806 gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p");
2807 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
2808 InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8);
2809 InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp");
2813 fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p");
2814 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
2815 llvm::Value *FitsInFP =
2816 llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16);
2817 FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp");
2818 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
2821 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
2822 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
2823 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
2824 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
2826 // Emit code to load the value if it was passed in registers.
2828 CGF.EmitBlock(InRegBlock);
2830 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with
2831 // an offset of l->gp_offset and/or l->fp_offset. This may require
2832 // copying to a temporary location in case the parameter is passed
2833 // in different register classes or requires an alignment greater
2834 // than 8 for general purpose registers and 16 for XMM registers.
2836 // FIXME: This really results in shameful code when we end up needing to
2837 // collect arguments from different places; often what should result in a
2838 // simple assembling of a structure from scattered addresses has many more
2839 // loads than necessary. Can we clean this up?
2840 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
2841 llvm::Value *RegAddr =
2842 CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3),
2844 if (neededInt && neededSSE) {
2846 assert(AI.isDirect() && "Unexpected ABI info for mixed regs");
2847 llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
2848 llvm::Value *Tmp = CGF.CreateMemTemp(Ty);
2849 Tmp = CGF.Builder.CreateBitCast(Tmp, ST->getPointerTo());
2850 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
2851 llvm::Type *TyLo = ST->getElementType(0);
2852 llvm::Type *TyHi = ST->getElementType(1);
2853 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
2854 "Unexpected ABI info for mixed regs");
2855 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
2856 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
2857 llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
2858 llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
2859 llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr;
2860 llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr;
2862 CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo));
2863 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
2864 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi));
2865 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
2867 RegAddr = CGF.Builder.CreateBitCast(Tmp,
2868 llvm::PointerType::getUnqual(LTy));
2869 } else if (neededInt) {
2870 RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
2871 RegAddr = CGF.Builder.CreateBitCast(RegAddr,
2872 llvm::PointerType::getUnqual(LTy));
2874 // Copy to a temporary if necessary to ensure the appropriate alignment.
2875 std::pair<CharUnits, CharUnits> SizeAlign =
2876 CGF.getContext().getTypeInfoInChars(Ty);
2877 uint64_t TySize = SizeAlign.first.getQuantity();
2878 unsigned TyAlign = SizeAlign.second.getQuantity();
2880 llvm::Value *Tmp = CGF.CreateMemTemp(Ty);
2881 CGF.Builder.CreateMemCpy(Tmp, RegAddr, TySize, 8, false);
2884 } else if (neededSSE == 1) {
2885 RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
2886 RegAddr = CGF.Builder.CreateBitCast(RegAddr,
2887 llvm::PointerType::getUnqual(LTy));
2889 assert(neededSSE == 2 && "Invalid number of needed registers!");
2890 // SSE registers are spaced 16 bytes apart in the register save
2891 // area, we need to collect the two eightbytes together.
2892 llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset);
2893 llvm::Value *RegAddrHi = CGF.Builder.CreateConstGEP1_32(RegAddrLo, 16);
2894 llvm::Type *DoubleTy = CGF.DoubleTy;
2895 llvm::Type *DblPtrTy =
2896 llvm::PointerType::getUnqual(DoubleTy);
2897 llvm::StructType *ST = llvm::StructType::get(DoubleTy, DoubleTy, nullptr);
2898 llvm::Value *V, *Tmp = CGF.CreateMemTemp(Ty);
2899 Tmp = CGF.Builder.CreateBitCast(Tmp, ST->getPointerTo());
2900 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo,
2902 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
2903 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi,
2905 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
2906 RegAddr = CGF.Builder.CreateBitCast(Tmp,
2907 llvm::PointerType::getUnqual(LTy));
2910 // AMD64-ABI 3.5.7p5: Step 5. Set:
2911 // l->gp_offset = l->gp_offset + num_gp * 8
2912 // l->fp_offset = l->fp_offset + num_fp * 16.
2914 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8);
2915 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
2919 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16);
2920 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
2923 CGF.EmitBranch(ContBlock);
2925 // Emit code to load the value if it was passed in memory.
2927 CGF.EmitBlock(InMemBlock);
2928 llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF);
2930 // Return the appropriate result.
2932 CGF.EmitBlock(ContBlock);
2933 llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(), 2,
2935 ResAddr->addIncoming(RegAddr, InRegBlock);
2936 ResAddr->addIncoming(MemAddr, InMemBlock);
2940 ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs,
2941 bool IsReturnType) const {
2943 if (Ty->isVoidType())
2944 return ABIArgInfo::getIgnore();
2946 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2947 Ty = EnumTy->getDecl()->getIntegerType();
2949 TypeInfo Info = getContext().getTypeInfo(Ty);
2950 uint64_t Width = Info.Width;
2951 unsigned Align = getContext().toCharUnitsFromBits(Info.Align).getQuantity();
2953 const RecordType *RT = Ty->getAs<RecordType>();
2955 if (!IsReturnType) {
2956 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()))
2957 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
2960 if (RT->getDecl()->hasFlexibleArrayMember())
2961 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
2963 // FIXME: mingw-w64-gcc emits 128-bit struct as i128
2964 if (Width == 128 && getTarget().getTriple().isWindowsGNUEnvironment())
2965 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
2969 // vectorcall adds the concept of a homogenous vector aggregate, similar to
2971 const Type *Base = nullptr;
2972 uint64_t NumElts = 0;
2973 if (FreeSSERegs && isHomogeneousAggregate(Ty, Base, NumElts)) {
2974 if (FreeSSERegs >= NumElts) {
2975 FreeSSERegs -= NumElts;
2976 if (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType())
2977 return ABIArgInfo::getDirect();
2978 return ABIArgInfo::getExpand();
2980 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
2984 if (Ty->isMemberPointerType()) {
2985 // If the member pointer is represented by an LLVM int or ptr, pass it
2987 llvm::Type *LLTy = CGT.ConvertType(Ty);
2988 if (LLTy->isPointerTy() || LLTy->isIntegerTy())
2989 return ABIArgInfo::getDirect();
2992 if (RT || Ty->isMemberPointerType()) {
2993 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
2994 // not 1, 2, 4, or 8 bytes, must be passed by reference."
2995 if (Width > 64 || !llvm::isPowerOf2_64(Width))
2996 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
2998 // Otherwise, coerce it to a small integer.
2999 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Width));
3002 // Bool type is always extended to the ABI, other builtin types are not
3004 const BuiltinType *BT = Ty->getAs<BuiltinType>();
3005 if (BT && BT->getKind() == BuiltinType::Bool)
3006 return ABIArgInfo::getExtend();
3008 return ABIArgInfo::getDirect();
3011 void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
3013 FI.getCallingConvention() == llvm::CallingConv::X86_VectorCall;
3015 // We can use up to 4 SSE return registers with vectorcall.
3016 unsigned FreeSSERegs = IsVectorCall ? 4 : 0;
3017 if (!getCXXABI().classifyReturnType(FI))
3018 FI.getReturnInfo() = classify(FI.getReturnType(), FreeSSERegs, true);
3020 // We can use up to 6 SSE register parameters with vectorcall.
3021 FreeSSERegs = IsVectorCall ? 6 : 0;
3022 for (auto &I : FI.arguments())
3023 I.info = classify(I.type, FreeSSERegs, false);
3026 llvm::Value *WinX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
3027 CodeGenFunction &CGF) const {
3028 llvm::Type *BPP = CGF.Int8PtrPtrTy;
3030 CGBuilderTy &Builder = CGF.Builder;
3031 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
3033 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
3035 llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
3036 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
3039 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 8);
3040 llvm::Value *NextAddr =
3041 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
3043 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
3050 class NaClX86_64ABIInfo : public ABIInfo {
3052 NaClX86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX)
3053 : ABIInfo(CGT), PInfo(CGT), NInfo(CGT, HasAVX) {}
3054 void computeInfo(CGFunctionInfo &FI) const override;
3055 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
3056 CodeGenFunction &CGF) const override;
3058 PNaClABIInfo PInfo; // Used for generating calls with pnaclcall callingconv.
3059 X86_64ABIInfo NInfo; // Used for everything else.
3062 class NaClX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
3065 NaClX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX)
3066 : TargetCodeGenInfo(new NaClX86_64ABIInfo(CGT, HasAVX)), HasAVX(HasAVX) {
3068 unsigned getOpenMPSimdDefaultAlignment(QualType) const override {
3069 return HasAVX ? 32 : 16;
3075 void NaClX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
3076 if (FI.getASTCallingConvention() == CC_PnaclCall)
3077 PInfo.computeInfo(FI);
3079 NInfo.computeInfo(FI);
3082 llvm::Value *NaClX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
3083 CodeGenFunction &CGF) const {
3084 // Always use the native convention; calling pnacl-style varargs functions
3086 return NInfo.EmitVAArg(VAListAddr, Ty, CGF);
3092 /// PPC32_SVR4_ABIInfo - The 32-bit PowerPC ELF (SVR4) ABI information.
3093 class PPC32_SVR4_ABIInfo : public DefaultABIInfo {
3095 PPC32_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
3097 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
3098 CodeGenFunction &CGF) const override;
3101 class PPC32TargetCodeGenInfo : public TargetCodeGenInfo {
3103 PPC32TargetCodeGenInfo(CodeGenTypes &CGT) : TargetCodeGenInfo(new PPC32_SVR4_ABIInfo(CGT)) {}
3105 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
3106 // This is recovered from gcc output.
3107 return 1; // r1 is the dedicated stack pointer
3110 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
3111 llvm::Value *Address) const override;
3113 unsigned getOpenMPSimdDefaultAlignment(QualType) const override {
3114 return 16; // Natural alignment for Altivec vectors.
3120 llvm::Value *PPC32_SVR4_ABIInfo::EmitVAArg(llvm::Value *VAListAddr,
3122 CodeGenFunction &CGF) const {
3123 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
3124 // TODO: Implement this. For now ignore.
3129 bool isI64 = Ty->isIntegerType() && getContext().getTypeSize(Ty) == 64;
3130 bool isInt = Ty->isIntegerType() || Ty->isPointerType() || Ty->isAggregateType();
3131 llvm::Type *CharPtr = CGF.Int8PtrTy;
3132 llvm::Type *CharPtrPtr = CGF.Int8PtrPtrTy;
3134 CGBuilderTy &Builder = CGF.Builder;
3135 llvm::Value *GPRPtr = Builder.CreateBitCast(VAListAddr, CharPtr, "gprptr");
3136 llvm::Value *GPRPtrAsInt = Builder.CreatePtrToInt(GPRPtr, CGF.Int32Ty);
3137 llvm::Value *FPRPtrAsInt = Builder.CreateAdd(GPRPtrAsInt, Builder.getInt32(1));
3138 llvm::Value *FPRPtr = Builder.CreateIntToPtr(FPRPtrAsInt, CharPtr);
3139 llvm::Value *OverflowAreaPtrAsInt = Builder.CreateAdd(FPRPtrAsInt, Builder.getInt32(3));
3140 llvm::Value *OverflowAreaPtr = Builder.CreateIntToPtr(OverflowAreaPtrAsInt, CharPtrPtr);
3141 llvm::Value *RegsaveAreaPtrAsInt = Builder.CreateAdd(OverflowAreaPtrAsInt, Builder.getInt32(4));
3142 llvm::Value *RegsaveAreaPtr = Builder.CreateIntToPtr(RegsaveAreaPtrAsInt, CharPtrPtr);
3143 llvm::Value *GPR = Builder.CreateLoad(GPRPtr, false, "gpr");
3144 // Align GPR when TY is i64.
3146 llvm::Value *GPRAnd = Builder.CreateAnd(GPR, Builder.getInt8(1));
3147 llvm::Value *CC64 = Builder.CreateICmpEQ(GPRAnd, Builder.getInt8(1));
3148 llvm::Value *GPRPlusOne = Builder.CreateAdd(GPR, Builder.getInt8(1));
3149 GPR = Builder.CreateSelect(CC64, GPRPlusOne, GPR);
3151 llvm::Value *FPR = Builder.CreateLoad(FPRPtr, false, "fpr");
3152 llvm::Value *OverflowArea = Builder.CreateLoad(OverflowAreaPtr, false, "overflow_area");
3153 llvm::Value *OverflowAreaAsInt = Builder.CreatePtrToInt(OverflowArea, CGF.Int32Ty);
3154 llvm::Value *RegsaveArea = Builder.CreateLoad(RegsaveAreaPtr, false, "regsave_area");
3155 llvm::Value *RegsaveAreaAsInt = Builder.CreatePtrToInt(RegsaveArea, CGF.Int32Ty);
3157 llvm::Value *CC = Builder.CreateICmpULT(isInt ? GPR : FPR,
3158 Builder.getInt8(8), "cond");
3160 llvm::Value *RegConstant = Builder.CreateMul(isInt ? GPR : FPR,
3161 Builder.getInt8(isInt ? 4 : 8));
3163 llvm::Value *OurReg = Builder.CreateAdd(RegsaveAreaAsInt, Builder.CreateSExt(RegConstant, CGF.Int32Ty));
3165 if (Ty->isFloatingType())
3166 OurReg = Builder.CreateAdd(OurReg, Builder.getInt32(32));
3168 llvm::BasicBlock *UsingRegs = CGF.createBasicBlock("using_regs");
3169 llvm::BasicBlock *UsingOverflow = CGF.createBasicBlock("using_overflow");
3170 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
3172 Builder.CreateCondBr(CC, UsingRegs, UsingOverflow);
3174 CGF.EmitBlock(UsingRegs);
3176 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
3177 llvm::Value *Result1 = Builder.CreateIntToPtr(OurReg, PTy);
3178 // Increase the GPR/FPR indexes.
3180 GPR = Builder.CreateAdd(GPR, Builder.getInt8(isI64 ? 2 : 1));
3181 Builder.CreateStore(GPR, GPRPtr);
3183 FPR = Builder.CreateAdd(FPR, Builder.getInt8(1));
3184 Builder.CreateStore(FPR, FPRPtr);
3186 CGF.EmitBranch(Cont);
3188 CGF.EmitBlock(UsingOverflow);
3190 // Increase the overflow area.
3191 llvm::Value *Result2 = Builder.CreateIntToPtr(OverflowAreaAsInt, PTy);
3192 OverflowAreaAsInt = Builder.CreateAdd(OverflowAreaAsInt, Builder.getInt32(isInt ? 4 : 8));
3193 Builder.CreateStore(Builder.CreateIntToPtr(OverflowAreaAsInt, CharPtr), OverflowAreaPtr);
3194 CGF.EmitBranch(Cont);
3196 CGF.EmitBlock(Cont);
3198 llvm::PHINode *Result = CGF.Builder.CreatePHI(PTy, 2, "vaarg.addr");
3199 Result->addIncoming(Result1, UsingRegs);
3200 Result->addIncoming(Result2, UsingOverflow);
3202 if (Ty->isAggregateType()) {
3203 llvm::Value *AGGPtr = Builder.CreateBitCast(Result, CharPtrPtr, "aggrptr") ;
3204 return Builder.CreateLoad(AGGPtr, false, "aggr");
3211 PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
3212 llvm::Value *Address) const {
3213 // This is calculated from the LLVM and GCC tables and verified
3214 // against gcc output. AFAIK all ABIs use the same encoding.
3216 CodeGen::CGBuilderTy &Builder = CGF.Builder;
3218 llvm::IntegerType *i8 = CGF.Int8Ty;
3219 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
3220 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
3221 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
3223 // 0-31: r0-31, the 4-byte general-purpose registers
3224 AssignToArrayRange(Builder, Address, Four8, 0, 31);
3226 // 32-63: fp0-31, the 8-byte floating-point registers
3227 AssignToArrayRange(Builder, Address, Eight8, 32, 63);
3229 // 64-76 are various 4-byte special-purpose registers:
3236 AssignToArrayRange(Builder, Address, Four8, 64, 76);
3238 // 77-108: v0-31, the 16-byte vector registers
3239 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
3246 AssignToArrayRange(Builder, Address, Four8, 109, 113);
3254 /// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information.
3255 class PPC64_SVR4_ABIInfo : public DefaultABIInfo {
3263 static const unsigned GPRBits = 64;
3267 PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind)
3268 : DefaultABIInfo(CGT), Kind(Kind) {}
3270 bool isPromotableTypeForABI(QualType Ty) const;
3271 bool isAlignedParamType(QualType Ty) const;
3273 ABIArgInfo classifyReturnType(QualType RetTy) const;
3274 ABIArgInfo classifyArgumentType(QualType Ty) const;
3276 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
3277 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
3278 uint64_t Members) const override;
3280 // TODO: We can add more logic to computeInfo to improve performance.
3281 // Example: For aggregate arguments that fit in a register, we could
3282 // use getDirectInReg (as is done below for structs containing a single
3283 // floating-point value) to avoid pushing them to memory on function
3284 // entry. This would require changing the logic in PPCISelLowering
3285 // when lowering the parameters in the caller and args in the callee.
3286 void computeInfo(CGFunctionInfo &FI) const override {
3287 if (!getCXXABI().classifyReturnType(FI))
3288 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
3289 for (auto &I : FI.arguments()) {
3290 // We rely on the default argument classification for the most part.
3291 // One exception: An aggregate containing a single floating-point
3292 // or vector item must be passed in a register if one is available.
3293 const Type *T = isSingleElementStruct(I.type, getContext());
3295 const BuiltinType *BT = T->getAs<BuiltinType>();
3296 if ((T->isVectorType() && getContext().getTypeSize(T) == 128) ||
3297 (BT && BT->isFloatingPoint())) {
3299 I.info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT));
3303 I.info = classifyArgumentType(I.type);
3307 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
3308 CodeGenFunction &CGF) const override;
3311 class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo {
3313 PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT,
3314 PPC64_SVR4_ABIInfo::ABIKind Kind)
3315 : TargetCodeGenInfo(new PPC64_SVR4_ABIInfo(CGT, Kind)) {}
3317 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
3318 // This is recovered from gcc output.
3319 return 1; // r1 is the dedicated stack pointer
3322 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
3323 llvm::Value *Address) const override;
3325 unsigned getOpenMPSimdDefaultAlignment(QualType) const override {
3326 return 16; // Natural alignment for Altivec and VSX vectors.
3330 class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo {
3332 PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
3334 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
3335 // This is recovered from gcc output.
3336 return 1; // r1 is the dedicated stack pointer
3339 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
3340 llvm::Value *Address) const override;
3342 unsigned getOpenMPSimdDefaultAlignment(QualType) const override {
3343 return 16; // Natural alignment for Altivec vectors.
3349 // Return true if the ABI requires Ty to be passed sign- or zero-
3350 // extended to 64 bits.
3352 PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const {
3353 // Treat an enum type as its underlying type.
3354 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3355 Ty = EnumTy->getDecl()->getIntegerType();
3357 // Promotable integer types are required to be promoted by the ABI.
3358 if (Ty->isPromotableIntegerType())
3361 // In addition to the usual promotable integer types, we also need to
3362 // extend all 32-bit types, since the ABI requires promotion to 64 bits.
3363 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
3364 switch (BT->getKind()) {
3365 case BuiltinType::Int:
3366 case BuiltinType::UInt:
3375 /// isAlignedParamType - Determine whether a type requires 16-byte
3376 /// alignment in the parameter area.
3378 PPC64_SVR4_ABIInfo::isAlignedParamType(QualType Ty) const {
3379 // Complex types are passed just like their elements.
3380 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
3381 Ty = CTy->getElementType();
3383 // Only vector types of size 16 bytes need alignment (larger types are
3384 // passed via reference, smaller types are not aligned).
3385 if (Ty->isVectorType())
3386 return getContext().getTypeSize(Ty) == 128;
3388 // For single-element float/vector structs, we consider the whole type
3389 // to have the same alignment requirements as its single element.
3390 const Type *AlignAsType = nullptr;
3391 const Type *EltType = isSingleElementStruct(Ty, getContext());
3393 const BuiltinType *BT = EltType->getAs<BuiltinType>();
3394 if ((EltType->isVectorType() &&
3395 getContext().getTypeSize(EltType) == 128) ||
3396 (BT && BT->isFloatingPoint()))
3397 AlignAsType = EltType;
3400 // Likewise for ELFv2 homogeneous aggregates.
3401 const Type *Base = nullptr;
3402 uint64_t Members = 0;
3403 if (!AlignAsType && Kind == ELFv2 &&
3404 isAggregateTypeForABI(Ty) && isHomogeneousAggregate(Ty, Base, Members))
3407 // With special case aggregates, only vector base types need alignment.
3409 return AlignAsType->isVectorType();
3411 // Otherwise, we only need alignment for any aggregate type that
3412 // has an alignment requirement of >= 16 bytes.
3413 if (isAggregateTypeForABI(Ty) && getContext().getTypeAlign(Ty) >= 128)
3419 /// isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous
3420 /// aggregate. Base is set to the base element type, and Members is set
3421 /// to the number of base elements.
3422 bool ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base,
3423 uint64_t &Members) const {
3424 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
3425 uint64_t NElements = AT->getSize().getZExtValue();
3428 if (!isHomogeneousAggregate(AT->getElementType(), Base, Members))
3430 Members *= NElements;
3431 } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
3432 const RecordDecl *RD = RT->getDecl();
3433 if (RD->hasFlexibleArrayMember())
3438 // If this is a C++ record, check the bases first.
3439 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
3440 for (const auto &I : CXXRD->bases()) {
3441 // Ignore empty records.
3442 if (isEmptyRecord(getContext(), I.getType(), true))
3445 uint64_t FldMembers;
3446 if (!isHomogeneousAggregate(I.getType(), Base, FldMembers))
3449 Members += FldMembers;
3453 for (const auto *FD : RD->fields()) {
3454 // Ignore (non-zero arrays of) empty records.
3455 QualType FT = FD->getType();
3456 while (const ConstantArrayType *AT =
3457 getContext().getAsConstantArrayType(FT)) {
3458 if (AT->getSize().getZExtValue() == 0)
3460 FT = AT->getElementType();
3462 if (isEmptyRecord(getContext(), FT, true))
3465 // For compatibility with GCC, ignore empty bitfields in C++ mode.
3466 if (getContext().getLangOpts().CPlusPlus &&
3467 FD->isBitField() && FD->getBitWidthValue(getContext()) == 0)
3470 uint64_t FldMembers;
3471 if (!isHomogeneousAggregate(FD->getType(), Base, FldMembers))
3474 Members = (RD->isUnion() ?
3475 std::max(Members, FldMembers) : Members + FldMembers);
3481 // Ensure there is no padding.
3482 if (getContext().getTypeSize(Base) * Members !=
3483 getContext().getTypeSize(Ty))
3487 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
3489 Ty = CT->getElementType();
3492 // Most ABIs only support float, double, and some vector type widths.
3493 if (!isHomogeneousAggregateBaseType(Ty))
3496 // The base type must be the same for all members. Types that
3497 // agree in both total size and mode (float vs. vector) are
3498 // treated as being equivalent here.
3499 const Type *TyPtr = Ty.getTypePtr();
3503 if (Base->isVectorType() != TyPtr->isVectorType() ||
3504 getContext().getTypeSize(Base) != getContext().getTypeSize(TyPtr))
3507 return Members > 0 && isHomogeneousAggregateSmallEnough(Base, Members);
3510 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
3511 // Homogeneous aggregates for ELFv2 must have base types of float,
3512 // double, long double, or 128-bit vectors.
3513 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
3514 if (BT->getKind() == BuiltinType::Float ||
3515 BT->getKind() == BuiltinType::Double ||
3516 BT->getKind() == BuiltinType::LongDouble)
3519 if (const VectorType *VT = Ty->getAs<VectorType>()) {
3520 if (getContext().getTypeSize(VT) == 128)
3526 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough(
3527 const Type *Base, uint64_t Members) const {
3528 // Vector types require one register, floating point types require one
3529 // or two registers depending on their size.
3531 Base->isVectorType() ? 1 : (getContext().getTypeSize(Base) + 63) / 64;
3533 // Homogeneous Aggregates may occupy at most 8 registers.
3534 return Members * NumRegs <= 8;
3538 PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const {
3539 Ty = useFirstFieldIfTransparentUnion(Ty);
3541 if (Ty->isAnyComplexType())
3542 return ABIArgInfo::getDirect();
3544 // Non-Altivec vector types are passed in GPRs (smaller than 16 bytes)
3545 // or via reference (larger than 16 bytes).
3546 if (Ty->isVectorType()) {
3547 uint64_t Size = getContext().getTypeSize(Ty);
3549 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
3550 else if (Size < 128) {
3551 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
3552 return ABIArgInfo::getDirect(CoerceTy);
3556 if (isAggregateTypeForABI(Ty)) {
3557 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
3558 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
3560 uint64_t ABIAlign = isAlignedParamType(Ty)? 16 : 8;
3561 uint64_t TyAlign = getContext().getTypeAlign(Ty) / 8;
3563 // ELFv2 homogeneous aggregates are passed as array types.
3564 const Type *Base = nullptr;
3565 uint64_t Members = 0;
3566 if (Kind == ELFv2 &&
3567 isHomogeneousAggregate(Ty, Base, Members)) {
3568 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
3569 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
3570 return ABIArgInfo::getDirect(CoerceTy);
3573 // If an aggregate may end up fully in registers, we do not
3574 // use the ByVal method, but pass the aggregate as array.
3575 // This is usually beneficial since we avoid forcing the
3576 // back-end to store the argument to memory.
3577 uint64_t Bits = getContext().getTypeSize(Ty);
3578 if (Bits > 0 && Bits <= 8 * GPRBits) {
3579 llvm::Type *CoerceTy;
3581 // Types up to 8 bytes are passed as integer type (which will be
3582 // properly aligned in the argument save area doubleword).
3583 if (Bits <= GPRBits)
3584 CoerceTy = llvm::IntegerType::get(getVMContext(),
3585 llvm::RoundUpToAlignment(Bits, 8));
3586 // Larger types are passed as arrays, with the base type selected
3587 // according to the required alignment in the save area.
3589 uint64_t RegBits = ABIAlign * 8;
3590 uint64_t NumRegs = llvm::RoundUpToAlignment(Bits, RegBits) / RegBits;
3591 llvm::Type *RegTy = llvm::IntegerType::get(getVMContext(), RegBits);
3592 CoerceTy = llvm::ArrayType::get(RegTy, NumRegs);
3595 return ABIArgInfo::getDirect(CoerceTy);
3598 // All other aggregates are passed ByVal.
3599 return ABIArgInfo::getIndirect(ABIAlign, /*ByVal=*/true,
3600 /*Realign=*/TyAlign > ABIAlign);
3603 return (isPromotableTypeForABI(Ty) ?
3604 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
3608 PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
3609 if (RetTy->isVoidType())
3610 return ABIArgInfo::getIgnore();
3612 if (RetTy->isAnyComplexType())
3613 return ABIArgInfo::getDirect();
3615 // Non-Altivec vector types are returned in GPRs (smaller than 16 bytes)
3616 // or via reference (larger than 16 bytes).
3617 if (RetTy->isVectorType()) {
3618 uint64_t Size = getContext().getTypeSize(RetTy);
3620 return ABIArgInfo::getIndirect(0);
3621 else if (Size < 128) {
3622 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
3623 return ABIArgInfo::getDirect(CoerceTy);
3627 if (isAggregateTypeForABI(RetTy)) {
3628 // ELFv2 homogeneous aggregates are returned as array types.
3629 const Type *Base = nullptr;
3630 uint64_t Members = 0;
3631 if (Kind == ELFv2 &&
3632 isHomogeneousAggregate(RetTy, Base, Members)) {
3633 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
3634 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
3635 return ABIArgInfo::getDirect(CoerceTy);
3638 // ELFv2 small aggregates are returned in up to two registers.
3639 uint64_t Bits = getContext().getTypeSize(RetTy);
3640 if (Kind == ELFv2 && Bits <= 2 * GPRBits) {
3642 return ABIArgInfo::getIgnore();
3644 llvm::Type *CoerceTy;
3645 if (Bits > GPRBits) {
3646 CoerceTy = llvm::IntegerType::get(getVMContext(), GPRBits);
3647 CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy, nullptr);
3649 CoerceTy = llvm::IntegerType::get(getVMContext(),
3650 llvm::RoundUpToAlignment(Bits, 8));
3651 return ABIArgInfo::getDirect(CoerceTy);
3654 // All other aggregates are returned indirectly.
3655 return ABIArgInfo::getIndirect(0);
3658 return (isPromotableTypeForABI(RetTy) ?
3659 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
3662 // Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine.
3663 llvm::Value *PPC64_SVR4_ABIInfo::EmitVAArg(llvm::Value *VAListAddr,
3665 CodeGenFunction &CGF) const {
3666 llvm::Type *BP = CGF.Int8PtrTy;
3667 llvm::Type *BPP = CGF.Int8PtrPtrTy;
3669 CGBuilderTy &Builder = CGF.Builder;
3670 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
3671 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
3673 // Handle types that require 16-byte alignment in the parameter save area.
3674 if (isAlignedParamType(Ty)) {
3675 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty);
3676 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt64(15));
3677 AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt64(-16));
3678 Addr = Builder.CreateIntToPtr(AddrAsInt, BP, "ap.align");
3681 // Update the va_list pointer. The pointer should be bumped by the
3682 // size of the object. We can trust getTypeSize() except for a complex
3683 // type whose base type is smaller than a doubleword. For these, the
3684 // size of the object is 16 bytes; see below for further explanation.
3685 unsigned SizeInBytes = CGF.getContext().getTypeSize(Ty) / 8;
3687 unsigned CplxBaseSize = 0;
3689 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
3690 BaseTy = CTy->getElementType();
3691 CplxBaseSize = CGF.getContext().getTypeSize(BaseTy) / 8;
3692 if (CplxBaseSize < 8)
3696 unsigned Offset = llvm::RoundUpToAlignment(SizeInBytes, 8);
3697 llvm::Value *NextAddr =
3698 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int64Ty, Offset),
3700 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
3702 // If we have a complex type and the base type is smaller than 8 bytes,
3703 // the ABI calls for the real and imaginary parts to be right-adjusted
3704 // in separate doublewords. However, Clang expects us to produce a
3705 // pointer to a structure with the two parts packed tightly. So generate
3706 // loads of the real and imaginary parts relative to the va_list pointer,
3707 // and store them to a temporary structure.
3708 if (CplxBaseSize && CplxBaseSize < 8) {
3709 llvm::Value *RealAddr = Builder.CreatePtrToInt(Addr, CGF.Int64Ty);
3710 llvm::Value *ImagAddr = RealAddr;
3711 if (CGF.CGM.getDataLayout().isBigEndian()) {
3712 RealAddr = Builder.CreateAdd(RealAddr, Builder.getInt64(8 - CplxBaseSize));
3713 ImagAddr = Builder.CreateAdd(ImagAddr, Builder.getInt64(16 - CplxBaseSize));
3715 ImagAddr = Builder.CreateAdd(ImagAddr, Builder.getInt64(8));
3717 llvm::Type *PBaseTy = llvm::PointerType::getUnqual(CGF.ConvertType(BaseTy));
3718 RealAddr = Builder.CreateIntToPtr(RealAddr, PBaseTy);
3719 ImagAddr = Builder.CreateIntToPtr(ImagAddr, PBaseTy);
3720 llvm::Value *Real = Builder.CreateLoad(RealAddr, false, ".vareal");
3721 llvm::Value *Imag = Builder.CreateLoad(ImagAddr, false, ".vaimag");
3722 llvm::Value *Ptr = CGF.CreateTempAlloca(CGT.ConvertTypeForMem(Ty),
3724 llvm::Value *RealPtr = Builder.CreateStructGEP(Ptr, 0, ".real");
3725 llvm::Value *ImagPtr = Builder.CreateStructGEP(Ptr, 1, ".imag");
3726 Builder.CreateStore(Real, RealPtr, false);
3727 Builder.CreateStore(Imag, ImagPtr, false);
3731 // If the argument is smaller than 8 bytes, it is right-adjusted in
3732 // its doubleword slot. Adjust the pointer to pick it up from the
3734 if (SizeInBytes < 8 && CGF.CGM.getDataLayout().isBigEndian()) {
3735 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty);
3736 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt64(8 - SizeInBytes));
3737 Addr = Builder.CreateIntToPtr(AddrAsInt, BP);
3740 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
3741 return Builder.CreateBitCast(Addr, PTy);
3745 PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
3746 llvm::Value *Address) {
3747 // This is calculated from the LLVM and GCC tables and verified
3748 // against gcc output. AFAIK all ABIs use the same encoding.
3750 CodeGen::CGBuilderTy &Builder = CGF.Builder;
3752 llvm::IntegerType *i8 = CGF.Int8Ty;
3753 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
3754 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
3755 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
3757 // 0-31: r0-31, the 8-byte general-purpose registers
3758 AssignToArrayRange(Builder, Address, Eight8, 0, 31);
3760 // 32-63: fp0-31, the 8-byte floating-point registers
3761 AssignToArrayRange(Builder, Address, Eight8, 32, 63);
3763 // 64-76 are various 4-byte special-purpose registers:
3770 AssignToArrayRange(Builder, Address, Four8, 64, 76);
3772 // 77-108: v0-31, the 16-byte vector registers
3773 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
3780 AssignToArrayRange(Builder, Address, Four8, 109, 113);
3786 PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable(
3787 CodeGen::CodeGenFunction &CGF,
3788 llvm::Value *Address) const {
3790 return PPC64_initDwarfEHRegSizeTable(CGF, Address);
3794 PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
3795 llvm::Value *Address) const {
3797 return PPC64_initDwarfEHRegSizeTable(CGF, Address);
3800 //===----------------------------------------------------------------------===//
3801 // AArch64 ABI Implementation
3802 //===----------------------------------------------------------------------===//
3806 class AArch64ABIInfo : public ABIInfo {
3817 AArch64ABIInfo(CodeGenTypes &CGT, ABIKind Kind) : ABIInfo(CGT), Kind(Kind) {}
3820 ABIKind getABIKind() const { return Kind; }
3821 bool isDarwinPCS() const { return Kind == DarwinPCS; }
3823 ABIArgInfo classifyReturnType(QualType RetTy) const;
3824 ABIArgInfo classifyArgumentType(QualType RetTy) const;
3825 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
3826 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
3827 uint64_t Members) const override;
3829 bool isIllegalVectorType(QualType Ty) const;
3831 void computeInfo(CGFunctionInfo &FI) const override {
3832 if (!getCXXABI().classifyReturnType(FI))
3833 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
3835 for (auto &it : FI.arguments())
3836 it.info = classifyArgumentType(it.type);
3839 llvm::Value *EmitDarwinVAArg(llvm::Value *VAListAddr, QualType Ty,
3840 CodeGenFunction &CGF) const;
3842 llvm::Value *EmitAAPCSVAArg(llvm::Value *VAListAddr, QualType Ty,
3843 CodeGenFunction &CGF) const;
3845 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
3846 CodeGenFunction &CGF) const override {
3847 return isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
3848 : EmitAAPCSVAArg(VAListAddr, Ty, CGF);
3852 class AArch64TargetCodeGenInfo : public TargetCodeGenInfo {
3854 AArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind Kind)
3855 : TargetCodeGenInfo(new AArch64ABIInfo(CGT, Kind)) {}
3857 StringRef getARCRetainAutoreleasedReturnValueMarker() const {
3858 return "mov\tfp, fp\t\t; marker for objc_retainAutoreleaseReturnValue";
3861 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { return 31; }
3863 virtual bool doesReturnSlotInterfereWithArgs() const { return false; }
3867 ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty) const {
3868 Ty = useFirstFieldIfTransparentUnion(Ty);
3870 // Handle illegal vector types here.
3871 if (isIllegalVectorType(Ty)) {
3872 uint64_t Size = getContext().getTypeSize(Ty);
3874 llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext());
3875 return ABIArgInfo::getDirect(ResType);
3878 llvm::Type *ResType =
3879 llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2);
3880 return ABIArgInfo::getDirect(ResType);
3883 llvm::Type *ResType =
3884 llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);
3885 return ABIArgInfo::getDirect(ResType);
3887 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
3890 if (!isAggregateTypeForABI(Ty)) {
3891 // Treat an enum type as its underlying type.
3892 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3893 Ty = EnumTy->getDecl()->getIntegerType();
3895 return (Ty->isPromotableIntegerType() && isDarwinPCS()
3896 ? ABIArgInfo::getExtend()
3897 : ABIArgInfo::getDirect());
3900 // Structures with either a non-trivial destructor or a non-trivial
3901 // copy constructor are always indirect.
3902 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
3903 return ABIArgInfo::getIndirect(0, /*ByVal=*/RAA ==
3904 CGCXXABI::RAA_DirectInMemory);
3907 // Empty records are always ignored on Darwin, but actually passed in C++ mode
3908 // elsewhere for GNU compatibility.
3909 if (isEmptyRecord(getContext(), Ty, true)) {
3910 if (!getContext().getLangOpts().CPlusPlus || isDarwinPCS())
3911 return ABIArgInfo::getIgnore();
3913 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
3916 // Homogeneous Floating-point Aggregates (HFAs) need to be expanded.
3917 const Type *Base = nullptr;
3918 uint64_t Members = 0;
3919 if (isHomogeneousAggregate(Ty, Base, Members)) {
3920 return ABIArgInfo::getDirect(
3921 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members));
3924 // Aggregates <= 16 bytes are passed directly in registers or on the stack.
3925 uint64_t Size = getContext().getTypeSize(Ty);
3927 unsigned Alignment = getContext().getTypeAlign(Ty);
3928 Size = 64 * ((Size + 63) / 64); // round up to multiple of 8 bytes
3930 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
3931 // For aggregates with 16-byte alignment, we use i128.
3932 if (Alignment < 128 && Size == 128) {
3933 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
3934 return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64));
3936 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
3939 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
3942 ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy) const {
3943 if (RetTy->isVoidType())
3944 return ABIArgInfo::getIgnore();
3946 // Large vector types should be returned via memory.
3947 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128)
3948 return ABIArgInfo::getIndirect(0);
3950 if (!isAggregateTypeForABI(RetTy)) {
3951 // Treat an enum type as its underlying type.
3952 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
3953 RetTy = EnumTy->getDecl()->getIntegerType();
3955 return (RetTy->isPromotableIntegerType() && isDarwinPCS()
3956 ? ABIArgInfo::getExtend()
3957 : ABIArgInfo::getDirect());
3960 if (isEmptyRecord(getContext(), RetTy, true))
3961 return ABIArgInfo::getIgnore();
3963 const Type *Base = nullptr;
3964 uint64_t Members = 0;
3965 if (isHomogeneousAggregate(RetTy, Base, Members))
3966 // Homogeneous Floating-point Aggregates (HFAs) are returned directly.
3967 return ABIArgInfo::getDirect();
3969 // Aggregates <= 16 bytes are returned directly in registers or on the stack.
3970 uint64_t Size = getContext().getTypeSize(RetTy);
3972 Size = 64 * ((Size + 63) / 64); // round up to multiple of 8 bytes
3973 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
3976 return ABIArgInfo::getIndirect(0);
3979 /// isIllegalVectorType - check whether the vector type is legal for AArch64.
3980 bool AArch64ABIInfo::isIllegalVectorType(QualType Ty) const {
3981 if (const VectorType *VT = Ty->getAs<VectorType>()) {
3982 // Check whether VT is legal.
3983 unsigned NumElements = VT->getNumElements();
3984 uint64_t Size = getContext().getTypeSize(VT);
3985 // NumElements should be power of 2 between 1 and 16.
3986 if ((NumElements & (NumElements - 1)) != 0 || NumElements > 16)
3988 return Size != 64 && (Size != 128 || NumElements == 1);
3993 bool AArch64ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
3994 // Homogeneous aggregates for AAPCS64 must have base types of a floating
3995 // point type or a short-vector type. This is the same as the 32-bit ABI,
3996 // but with the difference that any floating-point type is allowed,
3997 // including __fp16.
3998 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
3999 if (BT->isFloatingPoint())
4001 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
4002 unsigned VecSize = getContext().getTypeSize(VT);
4003 if (VecSize == 64 || VecSize == 128)
4009 bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
4010 uint64_t Members) const {
4011 return Members <= 4;
4014 llvm::Value *AArch64ABIInfo::EmitAAPCSVAArg(llvm::Value *VAListAddr,
4016 CodeGenFunction &CGF) const {
4017 ABIArgInfo AI = classifyArgumentType(Ty);
4018 bool IsIndirect = AI.isIndirect();
4020 llvm::Type *BaseTy = CGF.ConvertType(Ty);
4022 BaseTy = llvm::PointerType::getUnqual(BaseTy);
4023 else if (AI.getCoerceToType())
4024 BaseTy = AI.getCoerceToType();
4026 unsigned NumRegs = 1;
4027 if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) {
4028 BaseTy = ArrTy->getElementType();
4029 NumRegs = ArrTy->getNumElements();
4031 bool IsFPR = BaseTy->isFloatingPointTy() || BaseTy->isVectorTy();
4033 // The AArch64 va_list type and handling is specified in the Procedure Call
4034 // Standard, section B.4:
4044 llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg");
4045 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
4046 llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack");
4047 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
4048 auto &Ctx = CGF.getContext();
4050 llvm::Value *reg_offs_p = nullptr, *reg_offs = nullptr;
4052 int RegSize = IsIndirect ? 8 : getContext().getTypeSize(Ty) / 8;
4054 // 3 is the field number of __gr_offs
4055 reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 3, "gr_offs_p");
4056 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs");
4057 reg_top_index = 1; // field number for __gr_top
4058 RegSize = llvm::RoundUpToAlignment(RegSize, 8);
4060 // 4 is the field number of __vr_offs.
4061 reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 4, "vr_offs_p");
4062 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs");
4063 reg_top_index = 2; // field number for __vr_top
4064 RegSize = 16 * NumRegs;
4067 //=======================================
4068 // Find out where argument was passed
4069 //=======================================
4071 // If reg_offs >= 0 we're already using the stack for this type of
4072 // argument. We don't want to keep updating reg_offs (in case it overflows,
4073 // though anyone passing 2GB of arguments, each at most 16 bytes, deserves
4074 // whatever they get).
4075 llvm::Value *UsingStack = nullptr;
4076 UsingStack = CGF.Builder.CreateICmpSGE(
4077 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, 0));
4079 CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
4081 // Otherwise, at least some kind of argument could go in these registers, the
4082 // question is whether this particular type is too big.
4083 CGF.EmitBlock(MaybeRegBlock);
4085 // Integer arguments may need to correct register alignment (for example a
4086 // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we
4087 // align __gr_offs to calculate the potential address.
4088 if (!IsFPR && !IsIndirect && Ctx.getTypeAlign(Ty) > 64) {
4089 int Align = Ctx.getTypeAlign(Ty) / 8;
4091 reg_offs = CGF.Builder.CreateAdd(
4092 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, Align - 1),
4094 reg_offs = CGF.Builder.CreateAnd(
4095 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, -Align),
4099 // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list.
4100 llvm::Value *NewOffset = nullptr;
4101 NewOffset = CGF.Builder.CreateAdd(
4102 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, RegSize), "new_reg_offs");
4103 CGF.Builder.CreateStore(NewOffset, reg_offs_p);
4105 // Now we're in a position to decide whether this argument really was in
4106 // registers or not.
4107 llvm::Value *InRegs = nullptr;
4108 InRegs = CGF.Builder.CreateICmpSLE(
4109 NewOffset, llvm::ConstantInt::get(CGF.Int32Ty, 0), "inreg");
4111 CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
4113 //=======================================
4114 // Argument was in registers
4115 //=======================================
4117 // Now we emit the code for if the argument was originally passed in
4118 // registers. First start the appropriate block:
4119 CGF.EmitBlock(InRegBlock);
4121 llvm::Value *reg_top_p = nullptr, *reg_top = nullptr;
4123 CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index, "reg_top_p");
4124 reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top");
4125 llvm::Value *BaseAddr = CGF.Builder.CreateGEP(reg_top, reg_offs);
4126 llvm::Value *RegAddr = nullptr;
4127 llvm::Type *MemTy = llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty));
4130 // If it's been passed indirectly (actually a struct), whatever we find from
4131 // stored registers or on the stack will actually be a struct **.
4132 MemTy = llvm::PointerType::getUnqual(MemTy);
4135 const Type *Base = nullptr;
4136 uint64_t NumMembers = 0;
4137 bool IsHFA = isHomogeneousAggregate(Ty, Base, NumMembers);
4138 if (IsHFA && NumMembers > 1) {
4139 // Homogeneous aggregates passed in registers will have their elements split
4140 // and stored 16-bytes apart regardless of size (they're notionally in qN,
4141 // qN+1, ...). We reload and store into a temporary local variable
4143 assert(!IsIndirect && "Homogeneous aggregates should be passed directly");
4144 llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0));
4145 llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
4146 llvm::Value *Tmp = CGF.CreateTempAlloca(HFATy);
4149 if (CGF.CGM.getDataLayout().isBigEndian() && Ctx.getTypeSize(Base) < 128)
4150 Offset = 16 - Ctx.getTypeSize(Base) / 8;
4151 for (unsigned i = 0; i < NumMembers; ++i) {
4152 llvm::Value *BaseOffset =
4153 llvm::ConstantInt::get(CGF.Int32Ty, 16 * i + Offset);
4154 llvm::Value *LoadAddr = CGF.Builder.CreateGEP(BaseAddr, BaseOffset);
4155 LoadAddr = CGF.Builder.CreateBitCast(
4156 LoadAddr, llvm::PointerType::getUnqual(BaseTy));
4157 llvm::Value *StoreAddr = CGF.Builder.CreateStructGEP(Tmp, i);
4159 llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr);
4160 CGF.Builder.CreateStore(Elem, StoreAddr);
4163 RegAddr = CGF.Builder.CreateBitCast(Tmp, MemTy);
4165 // Otherwise the object is contiguous in memory
4166 unsigned BeAlign = reg_top_index == 2 ? 16 : 8;
4167 if (CGF.CGM.getDataLayout().isBigEndian() &&
4168 (IsHFA || !isAggregateTypeForABI(Ty)) &&
4169 Ctx.getTypeSize(Ty) < (BeAlign * 8)) {
4170 int Offset = BeAlign - Ctx.getTypeSize(Ty) / 8;
4171 BaseAddr = CGF.Builder.CreatePtrToInt(BaseAddr, CGF.Int64Ty);
4173 BaseAddr = CGF.Builder.CreateAdd(
4174 BaseAddr, llvm::ConstantInt::get(CGF.Int64Ty, Offset), "align_be");
4176 BaseAddr = CGF.Builder.CreateIntToPtr(BaseAddr, CGF.Int8PtrTy);
4179 RegAddr = CGF.Builder.CreateBitCast(BaseAddr, MemTy);
4182 CGF.EmitBranch(ContBlock);
4184 //=======================================
4185 // Argument was on the stack
4186 //=======================================
4187 CGF.EmitBlock(OnStackBlock);
4189 llvm::Value *stack_p = nullptr, *OnStackAddr = nullptr;
4190 stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "stack_p");
4191 OnStackAddr = CGF.Builder.CreateLoad(stack_p, "stack");
4193 // Again, stack arguments may need realigmnent. In this case both integer and
4194 // floating-point ones might be affected.
4195 if (!IsIndirect && Ctx.getTypeAlign(Ty) > 64) {
4196 int Align = Ctx.getTypeAlign(Ty) / 8;
4198 OnStackAddr = CGF.Builder.CreatePtrToInt(OnStackAddr, CGF.Int64Ty);
4200 OnStackAddr = CGF.Builder.CreateAdd(
4201 OnStackAddr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1),
4203 OnStackAddr = CGF.Builder.CreateAnd(
4204 OnStackAddr, llvm::ConstantInt::get(CGF.Int64Ty, -Align),
4207 OnStackAddr = CGF.Builder.CreateIntToPtr(OnStackAddr, CGF.Int8PtrTy);
4214 StackSize = Ctx.getTypeSize(Ty) / 8;
4216 // All stack slots are 8 bytes
4217 StackSize = llvm::RoundUpToAlignment(StackSize, 8);
4219 llvm::Value *StackSizeC = llvm::ConstantInt::get(CGF.Int32Ty, StackSize);
4220 llvm::Value *NewStack =
4221 CGF.Builder.CreateGEP(OnStackAddr, StackSizeC, "new_stack");
4223 // Write the new value of __stack for the next call to va_arg
4224 CGF.Builder.CreateStore(NewStack, stack_p);
4226 if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) &&
4227 Ctx.getTypeSize(Ty) < 64) {
4228 int Offset = 8 - Ctx.getTypeSize(Ty) / 8;
4229 OnStackAddr = CGF.Builder.CreatePtrToInt(OnStackAddr, CGF.Int64Ty);
4231 OnStackAddr = CGF.Builder.CreateAdd(
4232 OnStackAddr, llvm::ConstantInt::get(CGF.Int64Ty, Offset), "align_be");
4234 OnStackAddr = CGF.Builder.CreateIntToPtr(OnStackAddr, CGF.Int8PtrTy);
4237 OnStackAddr = CGF.Builder.CreateBitCast(OnStackAddr, MemTy);
4239 CGF.EmitBranch(ContBlock);
4241 //=======================================
4243 //=======================================
4244 CGF.EmitBlock(ContBlock);
4246 llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(MemTy, 2, "vaarg.addr");
4247 ResAddr->addIncoming(RegAddr, InRegBlock);
4248 ResAddr->addIncoming(OnStackAddr, OnStackBlock);
4251 return CGF.Builder.CreateLoad(ResAddr, "vaarg.addr");
4256 llvm::Value *AArch64ABIInfo::EmitDarwinVAArg(llvm::Value *VAListAddr, QualType Ty,
4257 CodeGenFunction &CGF) const {
4258 // We do not support va_arg for aggregates or illegal vector types.
4259 // Lower VAArg here for these cases and use the LLVM va_arg instruction for
4261 if (!isAggregateTypeForABI(Ty) && !isIllegalVectorType(Ty))
4264 uint64_t Size = CGF.getContext().getTypeSize(Ty) / 8;
4265 uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
4267 const Type *Base = nullptr;
4268 uint64_t Members = 0;
4269 bool isHA = isHomogeneousAggregate(Ty, Base, Members);
4271 bool isIndirect = false;
4272 // Arguments bigger than 16 bytes which aren't homogeneous aggregates should
4273 // be passed indirectly.
4274 if (Size > 16 && !isHA) {
4280 llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
4281 llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
4283 CGBuilderTy &Builder = CGF.Builder;
4284 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
4285 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
4287 if (isEmptyRecord(getContext(), Ty, true)) {
4288 // These are ignored for parameter passing purposes.
4289 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
4290 return Builder.CreateBitCast(Addr, PTy);
4293 const uint64_t MinABIAlign = 8;
4294 if (Align > MinABIAlign) {
4295 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, Align - 1);
4296 Addr = Builder.CreateGEP(Addr, Offset);
4297 llvm::Value *AsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty);
4298 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, ~(Align - 1));
4299 llvm::Value *Aligned = Builder.CreateAnd(AsInt, Mask);
4300 Addr = Builder.CreateIntToPtr(Aligned, BP, "ap.align");
4303 uint64_t Offset = llvm::RoundUpToAlignment(Size, MinABIAlign);
4304 llvm::Value *NextAddr = Builder.CreateGEP(
4305 Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), "ap.next");
4306 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
4309 Addr = Builder.CreateLoad(Builder.CreateBitCast(Addr, BPP));
4310 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
4311 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
4316 //===----------------------------------------------------------------------===//
4317 // ARM ABI Implementation
4318 //===----------------------------------------------------------------------===//
4322 class ARMABIInfo : public ABIInfo {
4332 mutable int VFPRegs[16];
4333 const unsigned NumVFPs;
4334 const unsigned NumGPRs;
4335 mutable unsigned AllocatedGPRs;
4336 mutable unsigned AllocatedVFPs;
4339 ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) : ABIInfo(CGT), Kind(_Kind),
4340 NumVFPs(16), NumGPRs(4) {
4342 resetAllocatedRegs();
4345 bool isEABI() const {
4346 switch (getTarget().getTriple().getEnvironment()) {
4347 case llvm::Triple::Android:
4348 case llvm::Triple::EABI:
4349 case llvm::Triple::EABIHF:
4350 case llvm::Triple::GNUEABI:
4351 case llvm::Triple::GNUEABIHF:
4358 bool isEABIHF() const {
4359 switch (getTarget().getTriple().getEnvironment()) {
4360 case llvm::Triple::EABIHF:
4361 case llvm::Triple::GNUEABIHF:
4368 ABIKind getABIKind() const { return Kind; }
4371 ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic) const;
4372 ABIArgInfo classifyArgumentType(QualType RetTy, bool isVariadic,
4373 bool &IsCPRC) const;
4374 bool isIllegalVectorType(QualType Ty) const;
4376 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
4377 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
4378 uint64_t Members) const override;
4380 void computeInfo(CGFunctionInfo &FI) const override;
4382 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4383 CodeGenFunction &CGF) const override;
4385 llvm::CallingConv::ID getLLVMDefaultCC() const;
4386 llvm::CallingConv::ID getABIDefaultCC() const;
4389 void markAllocatedGPRs(unsigned Alignment, unsigned NumRequired) const;
4390 void markAllocatedVFPs(unsigned Alignment, unsigned NumRequired) const;
4391 void resetAllocatedRegs(void) const;
4394 class ARMTargetCodeGenInfo : public TargetCodeGenInfo {
4396 ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
4397 :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {}
4399 const ARMABIInfo &getABIInfo() const {
4400 return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo());
4403 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
4407 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
4408 return "mov\tr7, r7\t\t@ marker for objc_retainAutoreleaseReturnValue";
4411 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4412 llvm::Value *Address) const override {
4413 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
4415 // 0-15 are the 16 integer registers.
4416 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15);
4420 unsigned getSizeOfUnwindException() const override {
4421 if (getABIInfo().isEABI()) return 88;
4422 return TargetCodeGenInfo::getSizeOfUnwindException();
4425 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
4426 CodeGen::CodeGenModule &CGM) const override {
4427 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
4431 const ARMInterruptAttr *Attr = FD->getAttr<ARMInterruptAttr>();
4436 switch (Attr->getInterrupt()) {
4437 case ARMInterruptAttr::Generic: Kind = ""; break;
4438 case ARMInterruptAttr::IRQ: Kind = "IRQ"; break;
4439 case ARMInterruptAttr::FIQ: Kind = "FIQ"; break;
4440 case ARMInterruptAttr::SWI: Kind = "SWI"; break;
4441 case ARMInterruptAttr::ABORT: Kind = "ABORT"; break;
4442 case ARMInterruptAttr::UNDEF: Kind = "UNDEF"; break;
4445 llvm::Function *Fn = cast<llvm::Function>(GV);
4447 Fn->addFnAttr("interrupt", Kind);
4449 if (cast<ARMABIInfo>(getABIInfo()).getABIKind() == ARMABIInfo::APCS)
4452 // AAPCS guarantees that sp will be 8-byte aligned on any public interface,
4453 // however this is not necessarily true on taking any interrupt. Instruct
4454 // the backend to perform a realignment as part of the function prologue.
4455 llvm::AttrBuilder B;
4456 B.addStackAlignmentAttr(8);
4457 Fn->addAttributes(llvm::AttributeSet::FunctionIndex,
4458 llvm::AttributeSet::get(CGM.getLLVMContext(),
4459 llvm::AttributeSet::FunctionIndex,
4467 void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
4468 // To correctly handle Homogeneous Aggregate, we need to keep track of the
4469 // VFP registers allocated so far.
4470 // C.1.vfp If the argument is a VFP CPRC and there are sufficient consecutive
4471 // VFP registers of the appropriate type unallocated then the argument is
4472 // allocated to the lowest-numbered sequence of such registers.
4473 // C.2.vfp If the argument is a VFP CPRC then any VFP registers that are
4474 // unallocated are marked as unavailable.
4475 resetAllocatedRegs();
4477 if (getCXXABI().classifyReturnType(FI)) {
4478 if (FI.getReturnInfo().isIndirect())
4479 markAllocatedGPRs(1, 1);
4481 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), FI.isVariadic());
4483 for (auto &I : FI.arguments()) {
4484 unsigned PreAllocationVFPs = AllocatedVFPs;
4485 unsigned PreAllocationGPRs = AllocatedGPRs;
4486 bool IsCPRC = false;
4487 // 6.1.2.3 There is one VFP co-processor register class using registers
4488 // s0-s15 (d0-d7) for passing arguments.
4489 I.info = classifyArgumentType(I.type, FI.isVariadic(), IsCPRC);
4491 // If we have allocated some arguments onto the stack (due to running
4492 // out of VFP registers), we cannot split an argument between GPRs and
4493 // the stack. If this situation occurs, we add padding to prevent the
4494 // GPRs from being used. In this situation, the current argument could
4495 // only be allocated by rule C.8, so rule C.6 would mark these GPRs as
4497 // We do not have to do this if the argument is being passed ByVal, as the
4498 // backend can handle that situation correctly.
4499 const bool StackUsed = PreAllocationGPRs > NumGPRs || PreAllocationVFPs > NumVFPs;
4500 const bool IsByVal = I.info.isIndirect() && I.info.getIndirectByVal();
4501 if (!IsCPRC && PreAllocationGPRs < NumGPRs && AllocatedGPRs > NumGPRs &&
4502 StackUsed && !IsByVal) {
4503 llvm::Type *PaddingTy = llvm::ArrayType::get(
4504 llvm::Type::getInt32Ty(getVMContext()), NumGPRs - PreAllocationGPRs);
4505 if (I.info.canHaveCoerceToType()) {
4506 I.info = ABIArgInfo::getDirect(I.info.getCoerceToType() /* type */,
4507 0 /* offset */, PaddingTy, true);
4509 I.info = ABIArgInfo::getDirect(nullptr /* type */, 0 /* offset */,
4515 // Always honor user-specified calling convention.
4516 if (FI.getCallingConvention() != llvm::CallingConv::C)
4519 llvm::CallingConv::ID cc = getRuntimeCC();
4520 if (cc != llvm::CallingConv::C)
4521 FI.setEffectiveCallingConvention(cc);
4524 /// Return the default calling convention that LLVM will use.
4525 llvm::CallingConv::ID ARMABIInfo::getLLVMDefaultCC() const {
4526 // The default calling convention that LLVM will infer.
4528 return llvm::CallingConv::ARM_AAPCS_VFP;
4530 return llvm::CallingConv::ARM_AAPCS;
4532 return llvm::CallingConv::ARM_APCS;
4535 /// Return the calling convention that our ABI would like us to use
4536 /// as the C calling convention.
4537 llvm::CallingConv::ID ARMABIInfo::getABIDefaultCC() const {
4538 switch (getABIKind()) {
4539 case APCS: return llvm::CallingConv::ARM_APCS;
4540 case AAPCS: return llvm::CallingConv::ARM_AAPCS;
4541 case AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
4543 llvm_unreachable("bad ABI kind");
4546 void ARMABIInfo::setCCs() {
4547 assert(getRuntimeCC() == llvm::CallingConv::C);
4549 // Don't muddy up the IR with a ton of explicit annotations if
4550 // they'd just match what LLVM will infer from the triple.
4551 llvm::CallingConv::ID abiCC = getABIDefaultCC();
4552 if (abiCC != getLLVMDefaultCC())
4555 BuiltinCC = (getABIKind() == APCS ?
4556 llvm::CallingConv::ARM_APCS : llvm::CallingConv::ARM_AAPCS);
4559 /// markAllocatedVFPs - update VFPRegs according to the alignment and
4560 /// number of VFP registers (unit is S register) requested.
4561 void ARMABIInfo::markAllocatedVFPs(unsigned Alignment,
4562 unsigned NumRequired) const {
4564 if (AllocatedVFPs >= 16) {
4565 // We use AllocatedVFP > 16 to signal that some CPRCs were allocated on
4570 // C.1.vfp If the argument is a VFP CPRC and there are sufficient consecutive
4571 // VFP registers of the appropriate type unallocated then the argument is
4572 // allocated to the lowest-numbered sequence of such registers.
4573 for (unsigned I = 0; I < 16; I += Alignment) {
4574 bool FoundSlot = true;
4575 for (unsigned J = I, JEnd = I + NumRequired; J < JEnd; J++)
4576 if (J >= 16 || VFPRegs[J]) {
4581 for (unsigned J = I, JEnd = I + NumRequired; J < JEnd; J++)
4583 AllocatedVFPs += NumRequired;
4587 // C.2.vfp If the argument is a VFP CPRC then any VFP registers that are
4588 // unallocated are marked as unavailable.
4589 for (unsigned I = 0; I < 16; I++)
4591 AllocatedVFPs = 17; // We do not have enough VFP registers.
4594 /// Update AllocatedGPRs to record the number of general purpose registers
4595 /// which have been allocated. It is valid for AllocatedGPRs to go above 4,
4596 /// this represents arguments being stored on the stack.
4597 void ARMABIInfo::markAllocatedGPRs(unsigned Alignment,
4598 unsigned NumRequired) const {
4599 assert((Alignment == 1 || Alignment == 2) && "Alignment must be 4 or 8 bytes");
4601 if (Alignment == 2 && AllocatedGPRs & 0x1)
4604 AllocatedGPRs += NumRequired;
4607 void ARMABIInfo::resetAllocatedRegs(void) const {
4610 for (unsigned i = 0; i < NumVFPs; ++i)
4614 ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool isVariadic,
4615 bool &IsCPRC) const {
4616 // We update number of allocated VFPs according to
4617 // 6.1.2.1 The following argument types are VFP CPRCs:
4618 // A single-precision floating-point type (including promoted
4619 // half-precision types); A double-precision floating-point type;
4620 // A 64-bit or 128-bit containerized vector type; Homogeneous Aggregate
4621 // with a Base Type of a single- or double-precision floating-point type,
4622 // 64-bit containerized vectors or 128-bit containerized vectors with one
4623 // to four Elements.
4624 bool IsEffectivelyAAPCS_VFP = getABIKind() == AAPCS_VFP && !isVariadic;
4626 Ty = useFirstFieldIfTransparentUnion(Ty);
4628 // Handle illegal vector types here.
4629 if (isIllegalVectorType(Ty)) {
4630 uint64_t Size = getContext().getTypeSize(Ty);
4632 llvm::Type *ResType =
4633 llvm::Type::getInt32Ty(getVMContext());
4634 markAllocatedGPRs(1, 1);
4635 return ABIArgInfo::getDirect(ResType);
4638 llvm::Type *ResType = llvm::VectorType::get(
4639 llvm::Type::getInt32Ty(getVMContext()), 2);
4640 if (getABIKind() == ARMABIInfo::AAPCS || isVariadic){
4641 markAllocatedGPRs(2, 2);
4643 markAllocatedVFPs(2, 2);
4646 return ABIArgInfo::getDirect(ResType);
4649 llvm::Type *ResType = llvm::VectorType::get(
4650 llvm::Type::getInt32Ty(getVMContext()), 4);
4651 if (getABIKind() == ARMABIInfo::AAPCS || isVariadic) {
4652 markAllocatedGPRs(2, 4);
4654 markAllocatedVFPs(4, 4);
4657 return ABIArgInfo::getDirect(ResType);
4659 markAllocatedGPRs(1, 1);
4660 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
4662 // Update VFPRegs for legal vector types.
4663 if (getABIKind() == ARMABIInfo::AAPCS_VFP && !isVariadic) {
4664 if (const VectorType *VT = Ty->getAs<VectorType>()) {
4665 uint64_t Size = getContext().getTypeSize(VT);
4666 // Size of a legal vector should be power of 2 and above 64.
4667 markAllocatedVFPs(Size >= 128 ? 4 : 2, Size / 32);
4671 // Update VFPRegs for floating point types.
4672 if (getABIKind() == ARMABIInfo::AAPCS_VFP && !isVariadic) {
4673 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
4674 if (BT->getKind() == BuiltinType::Half ||
4675 BT->getKind() == BuiltinType::Float) {
4676 markAllocatedVFPs(1, 1);
4679 if (BT->getKind() == BuiltinType::Double ||
4680 BT->getKind() == BuiltinType::LongDouble) {
4681 markAllocatedVFPs(2, 2);
4687 if (!isAggregateTypeForABI(Ty)) {
4688 // Treat an enum type as its underlying type.
4689 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
4690 Ty = EnumTy->getDecl()->getIntegerType();
4693 unsigned Size = getContext().getTypeSize(Ty);
4695 markAllocatedGPRs(Size > 32 ? 2 : 1, (Size + 31) / 32);
4696 return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend()
4697 : ABIArgInfo::getDirect());
4700 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
4701 markAllocatedGPRs(1, 1);
4702 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
4705 // Ignore empty records.
4706 if (isEmptyRecord(getContext(), Ty, true))
4707 return ABIArgInfo::getIgnore();
4709 if (IsEffectivelyAAPCS_VFP) {
4710 // Homogeneous Aggregates need to be expanded when we can fit the aggregate
4711 // into VFP registers.
4712 const Type *Base = nullptr;
4713 uint64_t Members = 0;
4714 if (isHomogeneousAggregate(Ty, Base, Members)) {
4715 assert(Base && "Base class should be set for homogeneous aggregate");
4716 // Base can be a floating-point or a vector.
4717 if (Base->isVectorType()) {
4718 // ElementSize is in number of floats.
4719 unsigned ElementSize = getContext().getTypeSize(Base) == 64 ? 2 : 4;
4720 markAllocatedVFPs(ElementSize,
4721 Members * ElementSize);
4722 } else if (Base->isSpecificBuiltinType(BuiltinType::Float))
4723 markAllocatedVFPs(1, Members);
4725 assert(Base->isSpecificBuiltinType(BuiltinType::Double) ||
4726 Base->isSpecificBuiltinType(BuiltinType::LongDouble));
4727 markAllocatedVFPs(2, Members * 2);
4730 return ABIArgInfo::getDirect(nullptr, 0, nullptr, false);
4734 // Support byval for ARM.
4735 // The ABI alignment for APCS is 4-byte and for AAPCS at least 4-byte and at
4736 // most 8-byte. We realign the indirect argument if type alignment is bigger
4737 // than ABI alignment.
4738 uint64_t ABIAlign = 4;
4739 uint64_t TyAlign = getContext().getTypeAlign(Ty) / 8;
4740 if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
4741 getABIKind() == ARMABIInfo::AAPCS)
4742 ABIAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8);
4743 if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) {
4744 // Update Allocated GPRs. Since this is only used when the size of the
4745 // argument is greater than 64 bytes, this will always use up any available
4746 // registers (of which there are 4). We also don't care about getting the
4747 // alignment right, because general-purpose registers cannot be back-filled.
4748 markAllocatedGPRs(1, 4);
4749 return ABIArgInfo::getIndirect(TyAlign, /*ByVal=*/true,
4750 /*Realign=*/TyAlign > ABIAlign);
4753 // Otherwise, pass by coercing to a structure of the appropriate size.
4756 // FIXME: Try to match the types of the arguments more accurately where
4758 if (getContext().getTypeAlign(Ty) <= 32) {
4759 ElemTy = llvm::Type::getInt32Ty(getVMContext());
4760 SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32;
4761 markAllocatedGPRs(1, SizeRegs);
4763 ElemTy = llvm::Type::getInt64Ty(getVMContext());
4764 SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64;
4765 markAllocatedGPRs(2, SizeRegs * 2);
4768 return ABIArgInfo::getDirect(llvm::ArrayType::get(ElemTy, SizeRegs));
4771 static bool isIntegerLikeType(QualType Ty, ASTContext &Context,
4772 llvm::LLVMContext &VMContext) {
4773 // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure
4774 // is called integer-like if its size is less than or equal to one word, and
4775 // the offset of each of its addressable sub-fields is zero.
4777 uint64_t Size = Context.getTypeSize(Ty);
4779 // Check that the type fits in a word.
4783 // FIXME: Handle vector types!
4784 if (Ty->isVectorType())
4787 // Float types are never treated as "integer like".
4788 if (Ty->isRealFloatingType())
4791 // If this is a builtin or pointer type then it is ok.
4792 if (Ty->getAs<BuiltinType>() || Ty->isPointerType())
4795 // Small complex integer types are "integer like".
4796 if (const ComplexType *CT = Ty->getAs<ComplexType>())
4797 return isIntegerLikeType(CT->getElementType(), Context, VMContext);
4799 // Single element and zero sized arrays should be allowed, by the definition
4800 // above, but they are not.
4802 // Otherwise, it must be a record type.
4803 const RecordType *RT = Ty->getAs<RecordType>();
4804 if (!RT) return false;
4806 // Ignore records with flexible arrays.
4807 const RecordDecl *RD = RT->getDecl();
4808 if (RD->hasFlexibleArrayMember())
4811 // Check that all sub-fields are at offset 0, and are themselves "integer
4813 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
4815 bool HadField = false;
4817 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
4818 i != e; ++i, ++idx) {
4819 const FieldDecl *FD = *i;
4821 // Bit-fields are not addressable, we only need to verify they are "integer
4822 // like". We still have to disallow a subsequent non-bitfield, for example:
4823 // struct { int : 0; int x }
4824 // is non-integer like according to gcc.
4825 if (FD->isBitField()) {
4829 if (!isIntegerLikeType(FD->getType(), Context, VMContext))
4835 // Check if this field is at offset 0.
4836 if (Layout.getFieldOffset(idx) != 0)
4839 if (!isIntegerLikeType(FD->getType(), Context, VMContext))
4842 // Only allow at most one field in a structure. This doesn't match the
4843 // wording above, but follows gcc in situations with a field following an
4845 if (!RD->isUnion()) {
4856 ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
4857 bool isVariadic) const {
4858 bool IsEffectivelyAAPCS_VFP = getABIKind() == AAPCS_VFP && !isVariadic;
4860 if (RetTy->isVoidType())
4861 return ABIArgInfo::getIgnore();
4863 // Large vector types should be returned via memory.
4864 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) {
4865 markAllocatedGPRs(1, 1);
4866 return ABIArgInfo::getIndirect(0);
4869 if (!isAggregateTypeForABI(RetTy)) {
4870 // Treat an enum type as its underlying type.
4871 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
4872 RetTy = EnumTy->getDecl()->getIntegerType();
4874 return RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend()
4875 : ABIArgInfo::getDirect();
4878 // Are we following APCS?
4879 if (getABIKind() == APCS) {
4880 if (isEmptyRecord(getContext(), RetTy, false))
4881 return ABIArgInfo::getIgnore();
4883 // Complex types are all returned as packed integers.
4885 // FIXME: Consider using 2 x vector types if the back end handles them
4887 if (RetTy->isAnyComplexType())
4888 return ABIArgInfo::getDirect(llvm::IntegerType::get(
4889 getVMContext(), getContext().getTypeSize(RetTy)));
4891 // Integer like structures are returned in r0.
4892 if (isIntegerLikeType(RetTy, getContext(), getVMContext())) {
4893 // Return in the smallest viable integer type.
4894 uint64_t Size = getContext().getTypeSize(RetTy);
4896 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
4898 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
4899 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
4902 // Otherwise return in memory.
4903 markAllocatedGPRs(1, 1);
4904 return ABIArgInfo::getIndirect(0);
4907 // Otherwise this is an AAPCS variant.
4909 if (isEmptyRecord(getContext(), RetTy, true))
4910 return ABIArgInfo::getIgnore();
4912 // Check for homogeneous aggregates with AAPCS-VFP.
4913 if (IsEffectivelyAAPCS_VFP) {
4914 const Type *Base = nullptr;
4916 if (isHomogeneousAggregate(RetTy, Base, Members)) {
4917 assert(Base && "Base class should be set for homogeneous aggregate");
4918 // Homogeneous Aggregates are returned directly.
4919 return ABIArgInfo::getDirect(nullptr, 0, nullptr, false);
4923 // Aggregates <= 4 bytes are returned in r0; other aggregates
4924 // are returned indirectly.
4925 uint64_t Size = getContext().getTypeSize(RetTy);
4927 if (getDataLayout().isBigEndian())
4928 // Return in 32 bit integer integer type (as if loaded by LDR, AAPCS 5.4)
4929 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
4931 // Return in the smallest viable integer type.
4933 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
4935 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
4936 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
4939 markAllocatedGPRs(1, 1);
4940 return ABIArgInfo::getIndirect(0);
4943 /// isIllegalVector - check whether Ty is an illegal vector type.
4944 bool ARMABIInfo::isIllegalVectorType(QualType Ty) const {
4945 if (const VectorType *VT = Ty->getAs<VectorType>()) {
4946 // Check whether VT is legal.
4947 unsigned NumElements = VT->getNumElements();
4948 uint64_t Size = getContext().getTypeSize(VT);
4949 // NumElements should be power of 2.
4950 if ((NumElements & (NumElements - 1)) != 0)
4952 // Size should be greater than 32 bits.
4958 bool ARMABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
4959 // Homogeneous aggregates for AAPCS-VFP must have base types of float,
4960 // double, or 64-bit or 128-bit vectors.
4961 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
4962 if (BT->getKind() == BuiltinType::Float ||
4963 BT->getKind() == BuiltinType::Double ||
4964 BT->getKind() == BuiltinType::LongDouble)
4966 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
4967 unsigned VecSize = getContext().getTypeSize(VT);
4968 if (VecSize == 64 || VecSize == 128)
4974 bool ARMABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
4975 uint64_t Members) const {
4976 return Members <= 4;
4979 llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4980 CodeGenFunction &CGF) const {
4981 llvm::Type *BP = CGF.Int8PtrTy;
4982 llvm::Type *BPP = CGF.Int8PtrPtrTy;
4984 CGBuilderTy &Builder = CGF.Builder;
4985 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
4986 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
4988 if (isEmptyRecord(getContext(), Ty, true)) {
4989 // These are ignored for parameter passing purposes.
4990 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
4991 return Builder.CreateBitCast(Addr, PTy);
4994 uint64_t Size = CGF.getContext().getTypeSize(Ty) / 8;
4995 uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8;
4996 bool IsIndirect = false;
4998 // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for
4999 // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte.
5000 if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
5001 getABIKind() == ARMABIInfo::AAPCS)
5002 TyAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8);
5005 // Use indirect if size of the illegal vector is bigger than 16 bytes.
5006 if (isIllegalVectorType(Ty) && Size > 16) {
5012 // Handle address alignment for ABI alignment > 4 bytes.
5014 assert((TyAlign & (TyAlign - 1)) == 0 &&
5015 "Alignment is not power of 2!");
5016 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int32Ty);
5017 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt32(TyAlign - 1));
5018 AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1)));
5019 Addr = Builder.CreateIntToPtr(AddrAsInt, BP, "ap.align");
5023 llvm::RoundUpToAlignment(Size, 4);
5024 llvm::Value *NextAddr =
5025 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
5027 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
5030 Addr = Builder.CreateLoad(Builder.CreateBitCast(Addr, BPP));
5031 else if (TyAlign < CGF.getContext().getTypeAlign(Ty) / 8) {
5032 // We can't directly cast ap.cur to pointer to a vector type, since ap.cur
5033 // may not be correctly aligned for the vector type. We create an aligned
5034 // temporary space and copy the content over from ap.cur to the temporary
5035 // space. This is necessary if the natural alignment of the type is greater
5036 // than the ABI alignment.
5037 llvm::Type *I8PtrTy = Builder.getInt8PtrTy();
5038 CharUnits CharSize = getContext().getTypeSizeInChars(Ty);
5039 llvm::Value *AlignedTemp = CGF.CreateTempAlloca(CGF.ConvertType(Ty),
5041 llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy);
5042 llvm::Value *Src = Builder.CreateBitCast(Addr, I8PtrTy);
5043 Builder.CreateMemCpy(Dst, Src,
5044 llvm::ConstantInt::get(CGF.IntPtrTy, CharSize.getQuantity()),
5046 Addr = AlignedTemp; //The content is in aligned location.
5049 llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
5050 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
5057 class NaClARMABIInfo : public ABIInfo {
5059 NaClARMABIInfo(CodeGen::CodeGenTypes &CGT, ARMABIInfo::ABIKind Kind)
5060 : ABIInfo(CGT), PInfo(CGT), NInfo(CGT, Kind) {}
5061 void computeInfo(CGFunctionInfo &FI) const override;
5062 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5063 CodeGenFunction &CGF) const override;
5065 PNaClABIInfo PInfo; // Used for generating calls with pnaclcall callingconv.
5066 ARMABIInfo NInfo; // Used for everything else.
5069 class NaClARMTargetCodeGenInfo : public TargetCodeGenInfo {
5071 NaClARMTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, ARMABIInfo::ABIKind Kind)
5072 : TargetCodeGenInfo(new NaClARMABIInfo(CGT, Kind)) {}
5077 void NaClARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
5078 if (FI.getASTCallingConvention() == CC_PnaclCall)
5079 PInfo.computeInfo(FI);
5081 static_cast<const ABIInfo&>(NInfo).computeInfo(FI);
5084 llvm::Value *NaClARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5085 CodeGenFunction &CGF) const {
5086 // Always use the native convention; calling pnacl-style varargs functions
5088 return static_cast<const ABIInfo&>(NInfo).EmitVAArg(VAListAddr, Ty, CGF);
5091 //===----------------------------------------------------------------------===//
5092 // NVPTX ABI Implementation
5093 //===----------------------------------------------------------------------===//
5097 class NVPTXABIInfo : public ABIInfo {
5099 NVPTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
5101 ABIArgInfo classifyReturnType(QualType RetTy) const;
5102 ABIArgInfo classifyArgumentType(QualType Ty) const;
5104 void computeInfo(CGFunctionInfo &FI) const override;
5105 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5106 CodeGenFunction &CFG) const override;
5109 class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo {
5111 NVPTXTargetCodeGenInfo(CodeGenTypes &CGT)
5112 : TargetCodeGenInfo(new NVPTXABIInfo(CGT)) {}
5114 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5115 CodeGen::CodeGenModule &M) const override;
5117 // Adds a NamedMDNode with F, Name, and Operand as operands, and adds the
5118 // resulting MDNode to the nvvm.annotations MDNode.
5119 static void addNVVMMetadata(llvm::Function *F, StringRef Name, int Operand);
5122 ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const {
5123 if (RetTy->isVoidType())
5124 return ABIArgInfo::getIgnore();
5126 // note: this is different from default ABI
5127 if (!RetTy->isScalarType())
5128 return ABIArgInfo::getDirect();
5130 // Treat an enum type as its underlying type.
5131 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
5132 RetTy = EnumTy->getDecl()->getIntegerType();
5134 return (RetTy->isPromotableIntegerType() ?
5135 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
5138 ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const {
5139 // Treat an enum type as its underlying type.
5140 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
5141 Ty = EnumTy->getDecl()->getIntegerType();
5143 // Return aggregates type as indirect by value
5144 if (isAggregateTypeForABI(Ty))
5145 return ABIArgInfo::getIndirect(0, /* byval */ true);
5147 return (Ty->isPromotableIntegerType() ?
5148 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
5151 void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
5152 if (!getCXXABI().classifyReturnType(FI))
5153 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
5154 for (auto &I : FI.arguments())
5155 I.info = classifyArgumentType(I.type);
5157 // Always honor user-specified calling convention.
5158 if (FI.getCallingConvention() != llvm::CallingConv::C)
5161 FI.setEffectiveCallingConvention(getRuntimeCC());
5164 llvm::Value *NVPTXABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5165 CodeGenFunction &CFG) const {
5166 llvm_unreachable("NVPTX does not support varargs");
5169 void NVPTXTargetCodeGenInfo::
5170 SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5171 CodeGen::CodeGenModule &M) const{
5172 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
5175 llvm::Function *F = cast<llvm::Function>(GV);
5177 // Perform special handling in OpenCL mode
5178 if (M.getLangOpts().OpenCL) {
5179 // Use OpenCL function attributes to check for kernel functions
5180 // By default, all functions are device functions
5181 if (FD->hasAttr<OpenCLKernelAttr>()) {
5182 // OpenCL __kernel functions get kernel metadata
5183 // Create !{<func-ref>, metadata !"kernel", i32 1} node
5184 addNVVMMetadata(F, "kernel", 1);
5185 // And kernel functions are not subject to inlining
5186 F->addFnAttr(llvm::Attribute::NoInline);
5190 // Perform special handling in CUDA mode.
5191 if (M.getLangOpts().CUDA) {
5192 // CUDA __global__ functions get a kernel metadata entry. Since
5193 // __global__ functions cannot be called from the device, we do not
5194 // need to set the noinline attribute.
5195 if (FD->hasAttr<CUDAGlobalAttr>()) {
5196 // Create !{<func-ref>, metadata !"kernel", i32 1} node
5197 addNVVMMetadata(F, "kernel", 1);
5199 if (FD->hasAttr<CUDALaunchBoundsAttr>()) {
5200 // Create !{<func-ref>, metadata !"maxntidx", i32 <val>} node
5201 addNVVMMetadata(F, "maxntidx",
5202 FD->getAttr<CUDALaunchBoundsAttr>()->getMaxThreads());
5203 // min blocks is a default argument for CUDALaunchBoundsAttr, so getting a
5204 // zero value from getMinBlocks either means it was not specified in
5205 // __launch_bounds__ or the user specified a 0 value. In both cases, we
5206 // don't have to add a PTX directive.
5207 int MinCTASM = FD->getAttr<CUDALaunchBoundsAttr>()->getMinBlocks();
5209 // Create !{<func-ref>, metadata !"minctasm", i32 <val>} node
5210 addNVVMMetadata(F, "minctasm", MinCTASM);
5216 void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::Function *F, StringRef Name,
5218 llvm::Module *M = F->getParent();
5219 llvm::LLVMContext &Ctx = M->getContext();
5221 // Get "nvvm.annotations" metadata node
5222 llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations");
5224 llvm::Metadata *MDVals[] = {
5225 llvm::ConstantAsMetadata::get(F), llvm::MDString::get(Ctx, Name),
5226 llvm::ConstantAsMetadata::get(
5227 llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), Operand))};
5228 // Append metadata to nvvm.annotations
5229 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
5233 //===----------------------------------------------------------------------===//
5234 // SystemZ ABI Implementation
5235 //===----------------------------------------------------------------------===//
5239 class SystemZABIInfo : public ABIInfo {
5241 SystemZABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
5243 bool isPromotableIntegerType(QualType Ty) const;
5244 bool isCompoundType(QualType Ty) const;
5245 bool isFPArgumentType(QualType Ty) const;
5247 ABIArgInfo classifyReturnType(QualType RetTy) const;
5248 ABIArgInfo classifyArgumentType(QualType ArgTy) const;
5250 void computeInfo(CGFunctionInfo &FI) const override {
5251 if (!getCXXABI().classifyReturnType(FI))
5252 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
5253 for (auto &I : FI.arguments())
5254 I.info = classifyArgumentType(I.type);
5257 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5258 CodeGenFunction &CGF) const override;
5261 class SystemZTargetCodeGenInfo : public TargetCodeGenInfo {
5263 SystemZTargetCodeGenInfo(CodeGenTypes &CGT)
5264 : TargetCodeGenInfo(new SystemZABIInfo(CGT)) {}
5269 bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const {
5270 // Treat an enum type as its underlying type.
5271 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
5272 Ty = EnumTy->getDecl()->getIntegerType();
5274 // Promotable integer types are required to be promoted by the ABI.
5275 if (Ty->isPromotableIntegerType())
5278 // 32-bit values must also be promoted.
5279 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
5280 switch (BT->getKind()) {
5281 case BuiltinType::Int:
5282 case BuiltinType::UInt:
5290 bool SystemZABIInfo::isCompoundType(QualType Ty) const {
5291 return Ty->isAnyComplexType() || isAggregateTypeForABI(Ty);
5294 bool SystemZABIInfo::isFPArgumentType(QualType Ty) const {
5295 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
5296 switch (BT->getKind()) {
5297 case BuiltinType::Float:
5298 case BuiltinType::Double:
5304 if (const RecordType *RT = Ty->getAsStructureType()) {
5305 const RecordDecl *RD = RT->getDecl();
5308 // If this is a C++ record, check the bases first.
5309 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
5310 for (const auto &I : CXXRD->bases()) {
5311 QualType Base = I.getType();
5313 // Empty bases don't affect things either way.
5314 if (isEmptyRecord(getContext(), Base, true))
5319 Found = isFPArgumentType(Base);
5324 // Check the fields.
5325 for (const auto *FD : RD->fields()) {
5326 // Empty bitfields don't affect things either way.
5327 // Unlike isSingleElementStruct(), empty structure and array fields
5328 // do count. So do anonymous bitfields that aren't zero-sized.
5329 if (FD->isBitField() && FD->getBitWidthValue(getContext()) == 0)
5332 // Unlike isSingleElementStruct(), arrays do not count.
5333 // Nested isFPArgumentType structures still do though.
5336 Found = isFPArgumentType(FD->getType());
5341 // Unlike isSingleElementStruct(), trailing padding is allowed.
5342 // An 8-byte aligned struct s { float f; } is passed as a double.
5349 llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5350 CodeGenFunction &CGF) const {
5351 // Assume that va_list type is correct; should be pointer to LLVM type:
5355 // i8 *__overflow_arg_area;
5356 // i8 *__reg_save_area;
5359 // Every argument occupies 8 bytes and is passed by preference in either
5361 Ty = CGF.getContext().getCanonicalType(Ty);
5362 ABIArgInfo AI = classifyArgumentType(Ty);
5363 bool InFPRs = isFPArgumentType(Ty);
5365 llvm::Type *APTy = llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty));
5366 bool IsIndirect = AI.isIndirect();
5367 unsigned UnpaddedBitSize;
5369 APTy = llvm::PointerType::getUnqual(APTy);
5370 UnpaddedBitSize = 64;
5372 UnpaddedBitSize = getContext().getTypeSize(Ty);
5373 unsigned PaddedBitSize = 64;
5374 assert((UnpaddedBitSize <= PaddedBitSize) && "Invalid argument size.");
5376 unsigned PaddedSize = PaddedBitSize / 8;
5377 unsigned Padding = (PaddedBitSize - UnpaddedBitSize) / 8;
5379 unsigned MaxRegs, RegCountField, RegSaveIndex, RegPadding;
5381 MaxRegs = 4; // Maximum of 4 FPR arguments
5382 RegCountField = 1; // __fpr
5383 RegSaveIndex = 16; // save offset for f0
5384 RegPadding = 0; // floats are passed in the high bits of an FPR
5386 MaxRegs = 5; // Maximum of 5 GPR arguments
5387 RegCountField = 0; // __gpr
5388 RegSaveIndex = 2; // save offset for r2
5389 RegPadding = Padding; // values are passed in the low bits of a GPR
5392 llvm::Value *RegCountPtr =
5393 CGF.Builder.CreateStructGEP(VAListAddr, RegCountField, "reg_count_ptr");
5394 llvm::Value *RegCount = CGF.Builder.CreateLoad(RegCountPtr, "reg_count");
5395 llvm::Type *IndexTy = RegCount->getType();
5396 llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs);
5397 llvm::Value *InRegs = CGF.Builder.CreateICmpULT(RegCount, MaxRegsV,
5400 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
5401 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
5402 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
5403 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
5405 // Emit code to load the value if it was passed in registers.
5406 CGF.EmitBlock(InRegBlock);
5408 // Work out the address of an argument register.
5409 llvm::Value *PaddedSizeV = llvm::ConstantInt::get(IndexTy, PaddedSize);
5410 llvm::Value *ScaledRegCount =
5411 CGF.Builder.CreateMul(RegCount, PaddedSizeV, "scaled_reg_count");
5412 llvm::Value *RegBase =
5413 llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize + RegPadding);
5414 llvm::Value *RegOffset =
5415 CGF.Builder.CreateAdd(ScaledRegCount, RegBase, "reg_offset");
5416 llvm::Value *RegSaveAreaPtr =
5417 CGF.Builder.CreateStructGEP(VAListAddr, 3, "reg_save_area_ptr");
5418 llvm::Value *RegSaveArea =
5419 CGF.Builder.CreateLoad(RegSaveAreaPtr, "reg_save_area");
5420 llvm::Value *RawRegAddr =
5421 CGF.Builder.CreateGEP(RegSaveArea, RegOffset, "raw_reg_addr");
5422 llvm::Value *RegAddr =
5423 CGF.Builder.CreateBitCast(RawRegAddr, APTy, "reg_addr");
5425 // Update the register count
5426 llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1);
5427 llvm::Value *NewRegCount =
5428 CGF.Builder.CreateAdd(RegCount, One, "reg_count");
5429 CGF.Builder.CreateStore(NewRegCount, RegCountPtr);
5430 CGF.EmitBranch(ContBlock);
5432 // Emit code to load the value if it was passed in memory.
5433 CGF.EmitBlock(InMemBlock);
5435 // Work out the address of a stack argument.
5436 llvm::Value *OverflowArgAreaPtr =
5437 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_ptr");
5438 llvm::Value *OverflowArgArea =
5439 CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area");
5440 llvm::Value *PaddingV = llvm::ConstantInt::get(IndexTy, Padding);
5441 llvm::Value *RawMemAddr =
5442 CGF.Builder.CreateGEP(OverflowArgArea, PaddingV, "raw_mem_addr");
5443 llvm::Value *MemAddr =
5444 CGF.Builder.CreateBitCast(RawMemAddr, APTy, "mem_addr");
5446 // Update overflow_arg_area_ptr pointer
5447 llvm::Value *NewOverflowArgArea =
5448 CGF.Builder.CreateGEP(OverflowArgArea, PaddedSizeV, "overflow_arg_area");
5449 CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
5450 CGF.EmitBranch(ContBlock);
5452 // Return the appropriate result.
5453 CGF.EmitBlock(ContBlock);
5454 llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(APTy, 2, "va_arg.addr");
5455 ResAddr->addIncoming(RegAddr, InRegBlock);
5456 ResAddr->addIncoming(MemAddr, InMemBlock);
5459 return CGF.Builder.CreateLoad(ResAddr, "indirect_arg");
5464 ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const {
5465 if (RetTy->isVoidType())
5466 return ABIArgInfo::getIgnore();
5467 if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64)
5468 return ABIArgInfo::getIndirect(0);
5469 return (isPromotableIntegerType(RetTy) ?
5470 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
5473 ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
5474 // Handle the generic C++ ABI.
5475 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
5476 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
5478 // Integers and enums are extended to full register width.
5479 if (isPromotableIntegerType(Ty))
5480 return ABIArgInfo::getExtend();
5482 // Values that are not 1, 2, 4 or 8 bytes in size are passed indirectly.
5483 uint64_t Size = getContext().getTypeSize(Ty);
5484 if (Size != 8 && Size != 16 && Size != 32 && Size != 64)
5485 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
5487 // Handle small structures.
5488 if (const RecordType *RT = Ty->getAs<RecordType>()) {
5489 // Structures with flexible arrays have variable length, so really
5490 // fail the size test above.
5491 const RecordDecl *RD = RT->getDecl();
5492 if (RD->hasFlexibleArrayMember())
5493 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
5495 // The structure is passed as an unextended integer, a float, or a double.
5497 if (isFPArgumentType(Ty)) {
5498 assert(Size == 32 || Size == 64);
5500 PassTy = llvm::Type::getFloatTy(getVMContext());
5502 PassTy = llvm::Type::getDoubleTy(getVMContext());
5504 PassTy = llvm::IntegerType::get(getVMContext(), Size);
5505 return ABIArgInfo::getDirect(PassTy);
5508 // Non-structure compounds are passed indirectly.
5509 if (isCompoundType(Ty))
5510 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
5512 return ABIArgInfo::getDirect(nullptr);
5515 //===----------------------------------------------------------------------===//
5516 // MSP430 ABI Implementation
5517 //===----------------------------------------------------------------------===//
5521 class MSP430TargetCodeGenInfo : public TargetCodeGenInfo {
5523 MSP430TargetCodeGenInfo(CodeGenTypes &CGT)
5524 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
5525 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5526 CodeGen::CodeGenModule &M) const override;
5531 void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
5532 llvm::GlobalValue *GV,
5533 CodeGen::CodeGenModule &M) const {
5534 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
5535 if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) {
5536 // Handle 'interrupt' attribute:
5537 llvm::Function *F = cast<llvm::Function>(GV);
5539 // Step 1: Set ISR calling convention.
5540 F->setCallingConv(llvm::CallingConv::MSP430_INTR);
5542 // Step 2: Add attributes goodness.
5543 F->addFnAttr(llvm::Attribute::NoInline);
5545 // Step 3: Emit ISR vector alias.
5546 unsigned Num = attr->getNumber() / 2;
5547 llvm::GlobalAlias::create(llvm::Function::ExternalLinkage,
5548 "__isr_" + Twine(Num), F);
5553 //===----------------------------------------------------------------------===//
5554 // MIPS ABI Implementation. This works for both little-endian and
5555 // big-endian variants.
5556 //===----------------------------------------------------------------------===//
5559 class MipsABIInfo : public ABIInfo {
5561 unsigned MinABIStackAlignInBytes, StackAlignInBytes;
5562 void CoerceToIntArgs(uint64_t TySize,
5563 SmallVectorImpl<llvm::Type *> &ArgList) const;
5564 llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const;
5565 llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const;
5566 llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const;
5568 MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) :
5569 ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8),
5570 StackAlignInBytes(IsO32 ? 8 : 16) {}
5572 ABIArgInfo classifyReturnType(QualType RetTy) const;
5573 ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const;
5574 void computeInfo(CGFunctionInfo &FI) const override;
5575 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5576 CodeGenFunction &CGF) const override;
5579 class MIPSTargetCodeGenInfo : public TargetCodeGenInfo {
5580 unsigned SizeOfUnwindException;
5582 MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32)
5583 : TargetCodeGenInfo(new MipsABIInfo(CGT, IsO32)),
5584 SizeOfUnwindException(IsO32 ? 24 : 32) {}
5586 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
5590 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5591 CodeGen::CodeGenModule &CGM) const override {
5592 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
5594 llvm::Function *Fn = cast<llvm::Function>(GV);
5595 if (FD->hasAttr<Mips16Attr>()) {
5596 Fn->addFnAttr("mips16");
5598 else if (FD->hasAttr<NoMips16Attr>()) {
5599 Fn->addFnAttr("nomips16");
5603 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
5604 llvm::Value *Address) const override;
5606 unsigned getSizeOfUnwindException() const override {
5607 return SizeOfUnwindException;
5612 void MipsABIInfo::CoerceToIntArgs(uint64_t TySize,
5613 SmallVectorImpl<llvm::Type *> &ArgList) const {
5614 llvm::IntegerType *IntTy =
5615 llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8);
5617 // Add (TySize / MinABIStackAlignInBytes) args of IntTy.
5618 for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N)
5619 ArgList.push_back(IntTy);
5621 // If necessary, add one more integer type to ArgList.
5622 unsigned R = TySize % (MinABIStackAlignInBytes * 8);
5625 ArgList.push_back(llvm::IntegerType::get(getVMContext(), R));
5628 // In N32/64, an aligned double precision floating point field is passed in
5630 llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const {
5631 SmallVector<llvm::Type*, 8> ArgList, IntArgList;
5634 CoerceToIntArgs(TySize, ArgList);
5635 return llvm::StructType::get(getVMContext(), ArgList);
5638 if (Ty->isComplexType())
5639 return CGT.ConvertType(Ty);
5641 const RecordType *RT = Ty->getAs<RecordType>();
5643 // Unions/vectors are passed in integer registers.
5644 if (!RT || !RT->isStructureOrClassType()) {
5645 CoerceToIntArgs(TySize, ArgList);
5646 return llvm::StructType::get(getVMContext(), ArgList);
5649 const RecordDecl *RD = RT->getDecl();
5650 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
5651 assert(!(TySize % 8) && "Size of structure must be multiple of 8.");
5653 uint64_t LastOffset = 0;
5655 llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64);
5657 // Iterate over fields in the struct/class and check if there are any aligned
5659 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
5660 i != e; ++i, ++idx) {
5661 const QualType Ty = i->getType();
5662 const BuiltinType *BT = Ty->getAs<BuiltinType>();
5664 if (!BT || BT->getKind() != BuiltinType::Double)
5667 uint64_t Offset = Layout.getFieldOffset(idx);
5668 if (Offset % 64) // Ignore doubles that are not aligned.
5671 // Add ((Offset - LastOffset) / 64) args of type i64.
5672 for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j)
5673 ArgList.push_back(I64);
5676 ArgList.push_back(llvm::Type::getDoubleTy(getVMContext()));
5677 LastOffset = Offset + 64;
5680 CoerceToIntArgs(TySize - LastOffset, IntArgList);
5681 ArgList.append(IntArgList.begin(), IntArgList.end());
5683 return llvm::StructType::get(getVMContext(), ArgList);
5686 llvm::Type *MipsABIInfo::getPaddingType(uint64_t OrigOffset,
5687 uint64_t Offset) const {
5688 if (OrigOffset + MinABIStackAlignInBytes > Offset)
5691 return llvm::IntegerType::get(getVMContext(), (Offset - OrigOffset) * 8);
5695 MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const {
5696 Ty = useFirstFieldIfTransparentUnion(Ty);
5698 uint64_t OrigOffset = Offset;
5699 uint64_t TySize = getContext().getTypeSize(Ty);
5700 uint64_t Align = getContext().getTypeAlign(Ty) / 8;
5702 Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes),
5703 (uint64_t)StackAlignInBytes);
5704 unsigned CurrOffset = llvm::RoundUpToAlignment(Offset, Align);
5705 Offset = CurrOffset + llvm::RoundUpToAlignment(TySize, Align * 8) / 8;
5707 if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) {
5708 // Ignore empty aggregates.
5710 return ABIArgInfo::getIgnore();
5712 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
5713 Offset = OrigOffset + MinABIStackAlignInBytes;
5714 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
5717 // If we have reached here, aggregates are passed directly by coercing to
5718 // another structure type. Padding is inserted if the offset of the
5719 // aggregate is unaligned.
5720 ABIArgInfo ArgInfo =
5721 ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0,
5722 getPaddingType(OrigOffset, CurrOffset));
5723 ArgInfo.setInReg(true);
5727 // Treat an enum type as its underlying type.
5728 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
5729 Ty = EnumTy->getDecl()->getIntegerType();
5731 // All integral types are promoted to the GPR width.
5732 if (Ty->isIntegralOrEnumerationType())
5733 return ABIArgInfo::getExtend();
5735 return ABIArgInfo::getDirect(
5736 nullptr, 0, IsO32 ? nullptr : getPaddingType(OrigOffset, CurrOffset));
5740 MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const {
5741 const RecordType *RT = RetTy->getAs<RecordType>();
5742 SmallVector<llvm::Type*, 8> RTList;
5744 if (RT && RT->isStructureOrClassType()) {
5745 const RecordDecl *RD = RT->getDecl();
5746 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
5747 unsigned FieldCnt = Layout.getFieldCount();
5749 // N32/64 returns struct/classes in floating point registers if the
5750 // following conditions are met:
5751 // 1. The size of the struct/class is no larger than 128-bit.
5752 // 2. The struct/class has one or two fields all of which are floating
5754 // 3. The offset of the first field is zero (this follows what gcc does).
5756 // Any other composite results are returned in integer registers.
5758 if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) {
5759 RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end();
5760 for (; b != e; ++b) {
5761 const BuiltinType *BT = b->getType()->getAs<BuiltinType>();
5763 if (!BT || !BT->isFloatingPoint())
5766 RTList.push_back(CGT.ConvertType(b->getType()));
5770 return llvm::StructType::get(getVMContext(), RTList,
5771 RD->hasAttr<PackedAttr>());
5777 CoerceToIntArgs(Size, RTList);
5778 return llvm::StructType::get(getVMContext(), RTList);
5781 ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const {
5782 uint64_t Size = getContext().getTypeSize(RetTy);
5784 if (RetTy->isVoidType())
5785 return ABIArgInfo::getIgnore();
5787 // O32 doesn't treat zero-sized structs differently from other structs.
5788 // However, N32/N64 ignores zero sized return values.
5789 if (!IsO32 && Size == 0)
5790 return ABIArgInfo::getIgnore();
5792 if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) {
5794 if (RetTy->isAnyComplexType())
5795 return ABIArgInfo::getDirect();
5797 // O32 returns integer vectors in registers and N32/N64 returns all small
5798 // aggregates in registers.
5800 (RetTy->isVectorType() && !RetTy->hasFloatingRepresentation())) {
5801 ABIArgInfo ArgInfo =
5802 ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size));
5803 ArgInfo.setInReg(true);
5808 return ABIArgInfo::getIndirect(0);
5811 // Treat an enum type as its underlying type.
5812 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
5813 RetTy = EnumTy->getDecl()->getIntegerType();
5815 return (RetTy->isPromotableIntegerType() ?
5816 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
5819 void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const {
5820 ABIArgInfo &RetInfo = FI.getReturnInfo();
5821 if (!getCXXABI().classifyReturnType(FI))
5822 RetInfo = classifyReturnType(FI.getReturnType());
5824 // Check if a pointer to an aggregate is passed as a hidden argument.
5825 uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0;
5827 for (auto &I : FI.arguments())
5828 I.info = classifyArgumentType(I.type, Offset);
5831 llvm::Value* MipsABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5832 CodeGenFunction &CGF) const {
5833 llvm::Type *BP = CGF.Int8PtrTy;
5834 llvm::Type *BPP = CGF.Int8PtrPtrTy;
5836 // Integer arguments are promoted to 32-bit on O32 and 64-bit on N32/N64.
5837 // Pointers are also promoted in the same way but this only matters for N32.
5838 unsigned SlotSizeInBits = IsO32 ? 32 : 64;
5839 unsigned PtrWidth = getTarget().getPointerWidth(0);
5840 if ((Ty->isIntegerType() &&
5841 CGF.getContext().getIntWidth(Ty) < SlotSizeInBits) ||
5842 (Ty->isPointerType() && PtrWidth < SlotSizeInBits)) {
5843 Ty = CGF.getContext().getIntTypeForBitwidth(SlotSizeInBits,
5844 Ty->isSignedIntegerType());
5847 CGBuilderTy &Builder = CGF.Builder;
5848 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
5849 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
5851 std::min(getContext().getTypeAlign(Ty) / 8, StackAlignInBytes);
5852 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
5853 llvm::Value *AddrTyped;
5854 llvm::IntegerType *IntTy = (PtrWidth == 32) ? CGF.Int32Ty : CGF.Int64Ty;
5856 if (TypeAlign > MinABIStackAlignInBytes) {
5857 llvm::Value *AddrAsInt = CGF.Builder.CreatePtrToInt(Addr, IntTy);
5858 llvm::Value *Inc = llvm::ConstantInt::get(IntTy, TypeAlign - 1);
5859 llvm::Value *Mask = llvm::ConstantInt::get(IntTy, -TypeAlign);
5860 llvm::Value *Add = CGF.Builder.CreateAdd(AddrAsInt, Inc);
5861 llvm::Value *And = CGF.Builder.CreateAnd(Add, Mask);
5862 AddrTyped = CGF.Builder.CreateIntToPtr(And, PTy);
5865 AddrTyped = Builder.CreateBitCast(Addr, PTy);
5867 llvm::Value *AlignedAddr = Builder.CreateBitCast(AddrTyped, BP);
5868 TypeAlign = std::max((unsigned)TypeAlign, MinABIStackAlignInBytes);
5869 unsigned ArgSizeInBits = CGF.getContext().getTypeSize(Ty);
5870 uint64_t Offset = llvm::RoundUpToAlignment(ArgSizeInBits / 8, TypeAlign);
5871 llvm::Value *NextAddr =
5872 Builder.CreateGEP(AlignedAddr, llvm::ConstantInt::get(IntTy, Offset),
5874 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
5880 MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
5881 llvm::Value *Address) const {
5882 // This information comes from gcc's implementation, which seems to
5883 // as canonical as it gets.
5885 // Everything on MIPS is 4 bytes. Double-precision FP registers
5886 // are aliased to pairs of single-precision FP registers.
5887 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
5889 // 0-31 are the general purpose registers, $0 - $31.
5890 // 32-63 are the floating-point registers, $f0 - $f31.
5891 // 64 and 65 are the multiply/divide registers, $hi and $lo.
5892 // 66 is the (notional, I think) register for signal-handler return.
5893 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65);
5895 // 67-74 are the floating-point status registers, $fcc0 - $fcc7.
5896 // They are one bit wide and ignored here.
5898 // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31.
5899 // (coprocessor 1 is the FP unit)
5900 // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31.
5901 // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31.
5902 // 176-181 are the DSP accumulator registers.
5903 AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181);
5907 //===----------------------------------------------------------------------===//
5908 // TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults.
5909 // Currently subclassed only to implement custom OpenCL C function attribute
5911 //===----------------------------------------------------------------------===//
5915 class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo {
5917 TCETargetCodeGenInfo(CodeGenTypes &CGT)
5918 : DefaultTargetCodeGenInfo(CGT) {}
5920 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5921 CodeGen::CodeGenModule &M) const override;
5924 void TCETargetCodeGenInfo::SetTargetAttributes(const Decl *D,
5925 llvm::GlobalValue *GV,
5926 CodeGen::CodeGenModule &M) const {
5927 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
5930 llvm::Function *F = cast<llvm::Function>(GV);
5932 if (M.getLangOpts().OpenCL) {
5933 if (FD->hasAttr<OpenCLKernelAttr>()) {
5934 // OpenCL C Kernel functions are not subject to inlining
5935 F->addFnAttr(llvm::Attribute::NoInline);
5936 const ReqdWorkGroupSizeAttr *Attr = FD->getAttr<ReqdWorkGroupSizeAttr>();
5938 // Convert the reqd_work_group_size() attributes to metadata.
5939 llvm::LLVMContext &Context = F->getContext();
5940 llvm::NamedMDNode *OpenCLMetadata =
5941 M.getModule().getOrInsertNamedMetadata("opencl.kernel_wg_size_info");
5943 SmallVector<llvm::Metadata *, 5> Operands;
5944 Operands.push_back(llvm::ConstantAsMetadata::get(F));
5947 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
5948 M.Int32Ty, llvm::APInt(32, Attr->getXDim()))));
5950 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
5951 M.Int32Ty, llvm::APInt(32, Attr->getYDim()))));
5953 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
5954 M.Int32Ty, llvm::APInt(32, Attr->getZDim()))));
5956 // Add a boolean constant operand for "required" (true) or "hint" (false)
5957 // for implementing the work_group_size_hint attr later. Currently
5958 // always true as the hint is not yet implemented.
5960 llvm::ConstantAsMetadata::get(llvm::ConstantInt::getTrue(Context)));
5961 OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands));
5969 //===----------------------------------------------------------------------===//
5970 // Hexagon ABI Implementation
5971 //===----------------------------------------------------------------------===//
5975 class HexagonABIInfo : public ABIInfo {
5979 HexagonABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
5983 ABIArgInfo classifyReturnType(QualType RetTy) const;
5984 ABIArgInfo classifyArgumentType(QualType RetTy) const;
5986 void computeInfo(CGFunctionInfo &FI) const override;
5988 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5989 CodeGenFunction &CGF) const override;
5992 class HexagonTargetCodeGenInfo : public TargetCodeGenInfo {
5994 HexagonTargetCodeGenInfo(CodeGenTypes &CGT)
5995 :TargetCodeGenInfo(new HexagonABIInfo(CGT)) {}
5997 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
6004 void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const {
6005 if (!getCXXABI().classifyReturnType(FI))
6006 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
6007 for (auto &I : FI.arguments())
6008 I.info = classifyArgumentType(I.type);
6011 ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const {
6012 if (!isAggregateTypeForABI(Ty)) {
6013 // Treat an enum type as its underlying type.
6014 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
6015 Ty = EnumTy->getDecl()->getIntegerType();
6017 return (Ty->isPromotableIntegerType() ?
6018 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
6021 // Ignore empty records.
6022 if (isEmptyRecord(getContext(), Ty, true))
6023 return ABIArgInfo::getIgnore();
6025 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
6026 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
6028 uint64_t Size = getContext().getTypeSize(Ty);
6030 return ABIArgInfo::getIndirect(0, /*ByVal=*/true);
6031 // Pass in the smallest viable integer type.
6033 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
6035 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
6037 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
6039 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
6042 ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const {
6043 if (RetTy->isVoidType())
6044 return ABIArgInfo::getIgnore();
6046 // Large vector types should be returned via memory.
6047 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 64)
6048 return ABIArgInfo::getIndirect(0);
6050 if (!isAggregateTypeForABI(RetTy)) {
6051 // Treat an enum type as its underlying type.
6052 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
6053 RetTy = EnumTy->getDecl()->getIntegerType();
6055 return (RetTy->isPromotableIntegerType() ?
6056 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
6059 if (isEmptyRecord(getContext(), RetTy, true))
6060 return ABIArgInfo::getIgnore();
6062 // Aggregates <= 8 bytes are returned in r0; other aggregates
6063 // are returned indirectly.
6064 uint64_t Size = getContext().getTypeSize(RetTy);
6066 // Return in the smallest viable integer type.
6068 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
6070 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
6072 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
6073 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
6076 return ABIArgInfo::getIndirect(0, /*ByVal=*/true);
6079 llvm::Value *HexagonABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
6080 CodeGenFunction &CGF) const {
6081 // FIXME: Need to handle alignment
6082 llvm::Type *BPP = CGF.Int8PtrPtrTy;
6084 CGBuilderTy &Builder = CGF.Builder;
6085 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
6087 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
6089 llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
6090 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
6093 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
6094 llvm::Value *NextAddr =
6095 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
6097 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
6102 //===----------------------------------------------------------------------===//
6103 // AMDGPU ABI Implementation
6104 //===----------------------------------------------------------------------===//
6108 class AMDGPUTargetCodeGenInfo : public TargetCodeGenInfo {
6110 AMDGPUTargetCodeGenInfo(CodeGenTypes &CGT)
6111 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
6112 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
6113 CodeGen::CodeGenModule &M) const override;
6118 void AMDGPUTargetCodeGenInfo::SetTargetAttributes(
6120 llvm::GlobalValue *GV,
6121 CodeGen::CodeGenModule &M) const {
6122 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
6126 if (const auto Attr = FD->getAttr<AMDGPUNumVGPRAttr>()) {
6127 llvm::Function *F = cast<llvm::Function>(GV);
6128 uint32_t NumVGPR = Attr->getNumVGPR();
6130 F->addFnAttr("amdgpu_num_vgpr", llvm::utostr(NumVGPR));
6133 if (const auto Attr = FD->getAttr<AMDGPUNumSGPRAttr>()) {
6134 llvm::Function *F = cast<llvm::Function>(GV);
6135 unsigned NumSGPR = Attr->getNumSGPR();
6137 F->addFnAttr("amdgpu_num_sgpr", llvm::utostr(NumSGPR));
6142 //===----------------------------------------------------------------------===//
6143 // SPARC v9 ABI Implementation.
6144 // Based on the SPARC Compliance Definition version 2.4.1.
6146 // Function arguments a mapped to a nominal "parameter array" and promoted to
6147 // registers depending on their type. Each argument occupies 8 or 16 bytes in
6148 // the array, structs larger than 16 bytes are passed indirectly.
6150 // One case requires special care:
6157 // When a struct mixed is passed by value, it only occupies 8 bytes in the
6158 // parameter array, but the int is passed in an integer register, and the float
6159 // is passed in a floating point register. This is represented as two arguments
6160 // with the LLVM IR inreg attribute:
6162 // declare void f(i32 inreg %i, float inreg %f)
6164 // The code generator will only allocate 4 bytes from the parameter array for
6165 // the inreg arguments. All other arguments are allocated a multiple of 8
6169 class SparcV9ABIInfo : public ABIInfo {
6171 SparcV9ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
6174 ABIArgInfo classifyType(QualType RetTy, unsigned SizeLimit) const;
6175 void computeInfo(CGFunctionInfo &FI) const override;
6176 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
6177 CodeGenFunction &CGF) const override;
6179 // Coercion type builder for structs passed in registers. The coercion type
6180 // serves two purposes:
6182 // 1. Pad structs to a multiple of 64 bits, so they are passed 'left-aligned'
6184 // 2. Expose aligned floating point elements as first-level elements, so the
6185 // code generator knows to pass them in floating point registers.
6187 // We also compute the InReg flag which indicates that the struct contains
6188 // aligned 32-bit floats.
6190 struct CoerceBuilder {
6191 llvm::LLVMContext &Context;
6192 const llvm::DataLayout &DL;
6193 SmallVector<llvm::Type*, 8> Elems;
6197 CoerceBuilder(llvm::LLVMContext &c, const llvm::DataLayout &dl)
6198 : Context(c), DL(dl), Size(0), InReg(false) {}
6200 // Pad Elems with integers until Size is ToSize.
6201 void pad(uint64_t ToSize) {
6202 assert(ToSize >= Size && "Cannot remove elements");
6206 // Finish the current 64-bit word.
6207 uint64_t Aligned = llvm::RoundUpToAlignment(Size, 64);
6208 if (Aligned > Size && Aligned <= ToSize) {
6209 Elems.push_back(llvm::IntegerType::get(Context, Aligned - Size));
6213 // Add whole 64-bit words.
6214 while (Size + 64 <= ToSize) {
6215 Elems.push_back(llvm::Type::getInt64Ty(Context));
6219 // Final in-word padding.
6220 if (Size < ToSize) {
6221 Elems.push_back(llvm::IntegerType::get(Context, ToSize - Size));
6226 // Add a floating point element at Offset.
6227 void addFloat(uint64_t Offset, llvm::Type *Ty, unsigned Bits) {
6228 // Unaligned floats are treated as integers.
6231 // The InReg flag is only required if there are any floats < 64 bits.
6235 Elems.push_back(Ty);
6236 Size = Offset + Bits;
6239 // Add a struct type to the coercion type, starting at Offset (in bits).
6240 void addStruct(uint64_t Offset, llvm::StructType *StrTy) {
6241 const llvm::StructLayout *Layout = DL.getStructLayout(StrTy);
6242 for (unsigned i = 0, e = StrTy->getNumElements(); i != e; ++i) {
6243 llvm::Type *ElemTy = StrTy->getElementType(i);
6244 uint64_t ElemOffset = Offset + Layout->getElementOffsetInBits(i);
6245 switch (ElemTy->getTypeID()) {
6246 case llvm::Type::StructTyID:
6247 addStruct(ElemOffset, cast<llvm::StructType>(ElemTy));
6249 case llvm::Type::FloatTyID:
6250 addFloat(ElemOffset, ElemTy, 32);
6252 case llvm::Type::DoubleTyID:
6253 addFloat(ElemOffset, ElemTy, 64);
6255 case llvm::Type::FP128TyID:
6256 addFloat(ElemOffset, ElemTy, 128);
6258 case llvm::Type::PointerTyID:
6259 if (ElemOffset % 64 == 0) {
6261 Elems.push_back(ElemTy);
6271 // Check if Ty is a usable substitute for the coercion type.
6272 bool isUsableType(llvm::StructType *Ty) const {
6273 if (Ty->getNumElements() != Elems.size())
6275 for (unsigned i = 0, e = Elems.size(); i != e; ++i)
6276 if (Elems[i] != Ty->getElementType(i))
6281 // Get the coercion type as a literal struct type.
6282 llvm::Type *getType() const {
6283 if (Elems.size() == 1)
6284 return Elems.front();
6286 return llvm::StructType::get(Context, Elems);
6290 } // end anonymous namespace
6293 SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const {
6294 if (Ty->isVoidType())
6295 return ABIArgInfo::getIgnore();
6297 uint64_t Size = getContext().getTypeSize(Ty);
6299 // Anything too big to fit in registers is passed with an explicit indirect
6300 // pointer / sret pointer.
6301 if (Size > SizeLimit)
6302 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
6304 // Treat an enum type as its underlying type.
6305 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
6306 Ty = EnumTy->getDecl()->getIntegerType();
6308 // Integer types smaller than a register are extended.
6309 if (Size < 64 && Ty->isIntegerType())
6310 return ABIArgInfo::getExtend();
6312 // Other non-aggregates go in registers.
6313 if (!isAggregateTypeForABI(Ty))
6314 return ABIArgInfo::getDirect();
6316 // If a C++ object has either a non-trivial copy constructor or a non-trivial
6317 // destructor, it is passed with an explicit indirect pointer / sret pointer.
6318 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
6319 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
6321 // This is a small aggregate type that should be passed in registers.
6322 // Build a coercion type from the LLVM struct type.
6323 llvm::StructType *StrTy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty));
6325 return ABIArgInfo::getDirect();
6327 CoerceBuilder CB(getVMContext(), getDataLayout());
6328 CB.addStruct(0, StrTy);
6329 CB.pad(llvm::RoundUpToAlignment(CB.DL.getTypeSizeInBits(StrTy), 64));
6331 // Try to use the original type for coercion.
6332 llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType();
6335 return ABIArgInfo::getDirectInReg(CoerceTy);
6337 return ABIArgInfo::getDirect(CoerceTy);
6340 llvm::Value *SparcV9ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
6341 CodeGenFunction &CGF) const {
6342 ABIArgInfo AI = classifyType(Ty, 16 * 8);
6343 llvm::Type *ArgTy = CGT.ConvertType(Ty);
6344 if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
6345 AI.setCoerceToType(ArgTy);
6347 llvm::Type *BPP = CGF.Int8PtrPtrTy;
6348 CGBuilderTy &Builder = CGF.Builder;
6349 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
6350 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
6351 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
6352 llvm::Value *ArgAddr;
6355 switch (AI.getKind()) {
6356 case ABIArgInfo::Expand:
6357 case ABIArgInfo::InAlloca:
6358 llvm_unreachable("Unsupported ABI kind for va_arg");
6360 case ABIArgInfo::Extend:
6363 .CreateConstGEP1_32(Addr, 8 - getDataLayout().getTypeAllocSize(ArgTy),
6367 case ABIArgInfo::Direct:
6368 Stride = getDataLayout().getTypeAllocSize(AI.getCoerceToType());
6372 case ABIArgInfo::Indirect:
6374 ArgAddr = Builder.CreateBitCast(Addr,
6375 llvm::PointerType::getUnqual(ArgPtrTy),
6377 ArgAddr = Builder.CreateLoad(ArgAddr, "indirect.arg");
6380 case ABIArgInfo::Ignore:
6381 return llvm::UndefValue::get(ArgPtrTy);
6385 Addr = Builder.CreateConstGEP1_32(Addr, Stride, "ap.next");
6386 Builder.CreateStore(Addr, VAListAddrAsBPP);
6388 return Builder.CreatePointerCast(ArgAddr, ArgPtrTy, "arg.addr");
6391 void SparcV9ABIInfo::computeInfo(CGFunctionInfo &FI) const {
6392 FI.getReturnInfo() = classifyType(FI.getReturnType(), 32 * 8);
6393 for (auto &I : FI.arguments())
6394 I.info = classifyType(I.type, 16 * 8);
6398 class SparcV9TargetCodeGenInfo : public TargetCodeGenInfo {
6400 SparcV9TargetCodeGenInfo(CodeGenTypes &CGT)
6401 : TargetCodeGenInfo(new SparcV9ABIInfo(CGT)) {}
6403 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
6407 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
6408 llvm::Value *Address) const override;
6410 } // end anonymous namespace
6413 SparcV9TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
6414 llvm::Value *Address) const {
6415 // This is calculated from the LLVM and GCC tables and verified
6416 // against gcc output. AFAIK all ABIs use the same encoding.
6418 CodeGen::CGBuilderTy &Builder = CGF.Builder;
6420 llvm::IntegerType *i8 = CGF.Int8Ty;
6421 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
6422 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
6424 // 0-31: the 8-byte general-purpose registers
6425 AssignToArrayRange(Builder, Address, Eight8, 0, 31);
6427 // 32-63: f0-31, the 4-byte floating-point registers
6428 AssignToArrayRange(Builder, Address, Four8, 32, 63);
6438 AssignToArrayRange(Builder, Address, Eight8, 64, 71);
6440 // 72-87: d0-15, the 8-byte floating-point registers
6441 AssignToArrayRange(Builder, Address, Eight8, 72, 87);
6447 //===----------------------------------------------------------------------===//
6448 // XCore ABI Implementation
6449 //===----------------------------------------------------------------------===//
6453 /// A SmallStringEnc instance is used to build up the TypeString by passing
6454 /// it by reference between functions that append to it.
6455 typedef llvm::SmallString<128> SmallStringEnc;
6457 /// TypeStringCache caches the meta encodings of Types.
6459 /// The reason for caching TypeStrings is two fold:
6460 /// 1. To cache a type's encoding for later uses;
6461 /// 2. As a means to break recursive member type inclusion.
6463 /// A cache Entry can have a Status of:
6464 /// NonRecursive: The type encoding is not recursive;
6465 /// Recursive: The type encoding is recursive;
6466 /// Incomplete: An incomplete TypeString;
6467 /// IncompleteUsed: An incomplete TypeString that has been used in a
6468 /// Recursive type encoding.
6470 /// A NonRecursive entry will have all of its sub-members expanded as fully
6471 /// as possible. Whilst it may contain types which are recursive, the type
6472 /// itself is not recursive and thus its encoding may be safely used whenever
6473 /// the type is encountered.
6475 /// A Recursive entry will have all of its sub-members expanded as fully as
6476 /// possible. The type itself is recursive and it may contain other types which
6477 /// are recursive. The Recursive encoding must not be used during the expansion
6478 /// of a recursive type's recursive branch. For simplicity the code uses
6479 /// IncompleteCount to reject all usage of Recursive encodings for member types.
6481 /// An Incomplete entry is always a RecordType and only encodes its
6482 /// identifier e.g. "s(S){}". Incomplete 'StubEnc' entries are ephemeral and
6483 /// are placed into the cache during type expansion as a means to identify and
6484 /// handle recursive inclusion of types as sub-members. If there is recursion
6485 /// the entry becomes IncompleteUsed.
6487 /// During the expansion of a RecordType's members:
6489 /// If the cache contains a NonRecursive encoding for the member type, the
6490 /// cached encoding is used;
6492 /// If the cache contains a Recursive encoding for the member type, the
6493 /// cached encoding is 'Swapped' out, as it may be incorrect, and...
6495 /// If the member is a RecordType, an Incomplete encoding is placed into the
6496 /// cache to break potential recursive inclusion of itself as a sub-member;
6498 /// Once a member RecordType has been expanded, its temporary incomplete
6499 /// entry is removed from the cache. If a Recursive encoding was swapped out
6500 /// it is swapped back in;
6502 /// If an incomplete entry is used to expand a sub-member, the incomplete
6503 /// entry is marked as IncompleteUsed. The cache keeps count of how many
6504 /// IncompleteUsed entries it currently contains in IncompleteUsedCount;
6506 /// If a member's encoding is found to be a NonRecursive or Recursive viz:
6507 /// IncompleteUsedCount==0, the member's encoding is added to the cache.
6508 /// Else the member is part of a recursive type and thus the recursion has
6509 /// been exited too soon for the encoding to be correct for the member.
6511 class TypeStringCache {
6512 enum Status {NonRecursive, Recursive, Incomplete, IncompleteUsed};
6514 std::string Str; // The encoded TypeString for the type.
6515 enum Status State; // Information about the encoding in 'Str'.
6516 std::string Swapped; // A temporary place holder for a Recursive encoding
6517 // during the expansion of RecordType's members.
6519 std::map<const IdentifierInfo *, struct Entry> Map;
6520 unsigned IncompleteCount; // Number of Incomplete entries in the Map.
6521 unsigned IncompleteUsedCount; // Number of IncompleteUsed entries in the Map.
6523 TypeStringCache() : IncompleteCount(0), IncompleteUsedCount(0) {};
6524 void addIncomplete(const IdentifierInfo *ID, std::string StubEnc);
6525 bool removeIncomplete(const IdentifierInfo *ID);
6526 void addIfComplete(const IdentifierInfo *ID, StringRef Str,
6528 StringRef lookupStr(const IdentifierInfo *ID);
6531 /// TypeString encodings for enum & union fields must be order.
6532 /// FieldEncoding is a helper for this ordering process.
6533 class FieldEncoding {
6537 FieldEncoding(bool b, SmallStringEnc &e) : HasName(b), Enc(e.c_str()) {};
6538 StringRef str() {return Enc.c_str();};
6539 bool operator<(const FieldEncoding &rhs) const {
6540 if (HasName != rhs.HasName) return HasName;
6541 return Enc < rhs.Enc;
6545 class XCoreABIInfo : public DefaultABIInfo {
6547 XCoreABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
6548 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
6549 CodeGenFunction &CGF) const override;
6552 class XCoreTargetCodeGenInfo : public TargetCodeGenInfo {
6553 mutable TypeStringCache TSC;
6555 XCoreTargetCodeGenInfo(CodeGenTypes &CGT)
6556 :TargetCodeGenInfo(new XCoreABIInfo(CGT)) {}
6557 void emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
6558 CodeGen::CodeGenModule &M) const override;
6561 } // End anonymous namespace.
6563 llvm::Value *XCoreABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
6564 CodeGenFunction &CGF) const {
6565 CGBuilderTy &Builder = CGF.Builder;
6568 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr,
6570 llvm::Value *AP = Builder.CreateLoad(VAListAddrAsBPP);
6572 // Handle the argument.
6573 ABIArgInfo AI = classifyArgumentType(Ty);
6574 llvm::Type *ArgTy = CGT.ConvertType(Ty);
6575 if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
6576 AI.setCoerceToType(ArgTy);
6577 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
6579 uint64_t ArgSize = 0;
6580 switch (AI.getKind()) {
6581 case ABIArgInfo::Expand:
6582 case ABIArgInfo::InAlloca:
6583 llvm_unreachable("Unsupported ABI kind for va_arg");
6584 case ABIArgInfo::Ignore:
6585 Val = llvm::UndefValue::get(ArgPtrTy);
6588 case ABIArgInfo::Extend:
6589 case ABIArgInfo::Direct:
6590 Val = Builder.CreatePointerCast(AP, ArgPtrTy);
6591 ArgSize = getDataLayout().getTypeAllocSize(AI.getCoerceToType());
6595 case ABIArgInfo::Indirect:
6596 llvm::Value *ArgAddr;
6597 ArgAddr = Builder.CreateBitCast(AP, llvm::PointerType::getUnqual(ArgPtrTy));
6598 ArgAddr = Builder.CreateLoad(ArgAddr);
6599 Val = Builder.CreatePointerCast(ArgAddr, ArgPtrTy);
6604 // Increment the VAList.
6606 llvm::Value *APN = Builder.CreateConstGEP1_32(AP, ArgSize);
6607 Builder.CreateStore(APN, VAListAddrAsBPP);
6612 /// During the expansion of a RecordType, an incomplete TypeString is placed
6613 /// into the cache as a means to identify and break recursion.
6614 /// If there is a Recursive encoding in the cache, it is swapped out and will
6615 /// be reinserted by removeIncomplete().
6616 /// All other types of encoding should have been used rather than arriving here.
6617 void TypeStringCache::addIncomplete(const IdentifierInfo *ID,
6618 std::string StubEnc) {
6622 assert( (E.Str.empty() || E.State == Recursive) &&
6623 "Incorrectly use of addIncomplete");
6624 assert(!StubEnc.empty() && "Passing an empty string to addIncomplete()");
6625 E.Swapped.swap(E.Str); // swap out the Recursive
6626 E.Str.swap(StubEnc);
6627 E.State = Incomplete;
6631 /// Once the RecordType has been expanded, the temporary incomplete TypeString
6632 /// must be removed from the cache.
6633 /// If a Recursive was swapped out by addIncomplete(), it will be replaced.
6634 /// Returns true if the RecordType was defined recursively.
6635 bool TypeStringCache::removeIncomplete(const IdentifierInfo *ID) {
6638 auto I = Map.find(ID);
6639 assert(I != Map.end() && "Entry not present");
6640 Entry &E = I->second;
6641 assert( (E.State == Incomplete ||
6642 E.State == IncompleteUsed) &&
6643 "Entry must be an incomplete type");
6644 bool IsRecursive = false;
6645 if (E.State == IncompleteUsed) {
6646 // We made use of our Incomplete encoding, thus we are recursive.
6648 --IncompleteUsedCount;
6650 if (E.Swapped.empty())
6653 // Swap the Recursive back.
6654 E.Swapped.swap(E.Str);
6656 E.State = Recursive;
6662 /// Add the encoded TypeString to the cache only if it is NonRecursive or
6663 /// Recursive (viz: all sub-members were expanded as fully as possible).
6664 void TypeStringCache::addIfComplete(const IdentifierInfo *ID, StringRef Str,
6666 if (!ID || IncompleteUsedCount)
6667 return; // No key or it is is an incomplete sub-type so don't add.
6669 if (IsRecursive && !E.Str.empty()) {
6670 assert(E.State==Recursive && E.Str.size() == Str.size() &&
6671 "This is not the same Recursive entry");
6672 // The parent container was not recursive after all, so we could have used
6673 // this Recursive sub-member entry after all, but we assumed the worse when
6674 // we started viz: IncompleteCount!=0.
6677 assert(E.Str.empty() && "Entry already present");
6679 E.State = IsRecursive? Recursive : NonRecursive;
6682 /// Return a cached TypeString encoding for the ID. If there isn't one, or we
6683 /// are recursively expanding a type (IncompleteCount != 0) and the cached
6684 /// encoding is Recursive, return an empty StringRef.
6685 StringRef TypeStringCache::lookupStr(const IdentifierInfo *ID) {
6687 return StringRef(); // We have no key.
6688 auto I = Map.find(ID);
6690 return StringRef(); // We have no encoding.
6691 Entry &E = I->second;
6692 if (E.State == Recursive && IncompleteCount)
6693 return StringRef(); // We don't use Recursive encodings for member types.
6695 if (E.State == Incomplete) {
6696 // The incomplete type is being used to break out of recursion.
6697 E.State = IncompleteUsed;
6698 ++IncompleteUsedCount;
6700 return E.Str.c_str();
6703 /// The XCore ABI includes a type information section that communicates symbol
6704 /// type information to the linker. The linker uses this information to verify
6705 /// safety/correctness of things such as array bound and pointers et al.
6706 /// The ABI only requires C (and XC) language modules to emit TypeStrings.
6707 /// This type information (TypeString) is emitted into meta data for all global
6708 /// symbols: definitions, declarations, functions & variables.
6710 /// The TypeString carries type, qualifier, name, size & value details.
6711 /// Please see 'Tools Development Guide' section 2.16.2 for format details:
6712 /// <https://www.xmos.com/download/public/Tools-Development-Guide%28X9114A%29.pdf>
6713 /// The output is tested by test/CodeGen/xcore-stringtype.c.
6715 static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
6716 CodeGen::CodeGenModule &CGM, TypeStringCache &TSC);
6718 /// XCore uses emitTargetMD to emit TypeString metadata for global symbols.
6719 void XCoreTargetCodeGenInfo::emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
6720 CodeGen::CodeGenModule &CGM) const {
6722 if (getTypeString(Enc, D, CGM, TSC)) {
6723 llvm::LLVMContext &Ctx = CGM.getModule().getContext();
6724 llvm::SmallVector<llvm::Metadata *, 2> MDVals;
6725 MDVals.push_back(llvm::ConstantAsMetadata::get(GV));
6726 MDVals.push_back(llvm::MDString::get(Ctx, Enc.str()));
6727 llvm::NamedMDNode *MD =
6728 CGM.getModule().getOrInsertNamedMetadata("xcore.typestrings");
6729 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
6733 static bool appendType(SmallStringEnc &Enc, QualType QType,
6734 const CodeGen::CodeGenModule &CGM,
6735 TypeStringCache &TSC);
6737 /// Helper function for appendRecordType().
6738 /// Builds a SmallVector containing the encoded field types in declaration order.
6739 static bool extractFieldType(SmallVectorImpl<FieldEncoding> &FE,
6740 const RecordDecl *RD,
6741 const CodeGen::CodeGenModule &CGM,
6742 TypeStringCache &TSC) {
6743 for (const auto *Field : RD->fields()) {
6746 Enc += Field->getName();
6748 if (Field->isBitField()) {
6750 llvm::raw_svector_ostream OS(Enc);
6752 OS << Field->getBitWidthValue(CGM.getContext());
6756 if (!appendType(Enc, Field->getType(), CGM, TSC))
6758 if (Field->isBitField())
6761 FE.push_back(FieldEncoding(!Field->getName().empty(), Enc));
6766 /// Appends structure and union types to Enc and adds encoding to cache.
6767 /// Recursively calls appendType (via extractFieldType) for each field.
6768 /// Union types have their fields ordered according to the ABI.
6769 static bool appendRecordType(SmallStringEnc &Enc, const RecordType *RT,
6770 const CodeGen::CodeGenModule &CGM,
6771 TypeStringCache &TSC, const IdentifierInfo *ID) {
6772 // Append the cached TypeString if we have one.
6773 StringRef TypeString = TSC.lookupStr(ID);
6774 if (!TypeString.empty()) {
6779 // Start to emit an incomplete TypeString.
6780 size_t Start = Enc.size();
6781 Enc += (RT->isUnionType()? 'u' : 's');
6784 Enc += ID->getName();
6787 // We collect all encoded fields and order as necessary.
6788 bool IsRecursive = false;
6789 const RecordDecl *RD = RT->getDecl()->getDefinition();
6790 if (RD && !RD->field_empty()) {
6791 // An incomplete TypeString stub is placed in the cache for this RecordType
6792 // so that recursive calls to this RecordType will use it whilst building a
6793 // complete TypeString for this RecordType.
6794 SmallVector<FieldEncoding, 16> FE;
6795 std::string StubEnc(Enc.substr(Start).str());
6796 StubEnc += '}'; // StubEnc now holds a valid incomplete TypeString.
6797 TSC.addIncomplete(ID, std::move(StubEnc));
6798 if (!extractFieldType(FE, RD, CGM, TSC)) {
6799 (void) TSC.removeIncomplete(ID);
6802 IsRecursive = TSC.removeIncomplete(ID);
6803 // The ABI requires unions to be sorted but not structures.
6804 // See FieldEncoding::operator< for sort algorithm.
6805 if (RT->isUnionType())
6806 std::sort(FE.begin(), FE.end());
6807 // We can now complete the TypeString.
6808 unsigned E = FE.size();
6809 for (unsigned I = 0; I != E; ++I) {
6816 TSC.addIfComplete(ID, Enc.substr(Start), IsRecursive);
6820 /// Appends enum types to Enc and adds the encoding to the cache.
6821 static bool appendEnumType(SmallStringEnc &Enc, const EnumType *ET,
6822 TypeStringCache &TSC,
6823 const IdentifierInfo *ID) {
6824 // Append the cached TypeString if we have one.
6825 StringRef TypeString = TSC.lookupStr(ID);
6826 if (!TypeString.empty()) {
6831 size_t Start = Enc.size();
6834 Enc += ID->getName();
6837 // We collect all encoded enumerations and order them alphanumerically.
6838 if (const EnumDecl *ED = ET->getDecl()->getDefinition()) {
6839 SmallVector<FieldEncoding, 16> FE;
6840 for (auto I = ED->enumerator_begin(), E = ED->enumerator_end(); I != E;
6842 SmallStringEnc EnumEnc;
6844 EnumEnc += I->getName();
6846 I->getInitVal().toString(EnumEnc);
6848 FE.push_back(FieldEncoding(!I->getName().empty(), EnumEnc));
6850 std::sort(FE.begin(), FE.end());
6851 unsigned E = FE.size();
6852 for (unsigned I = 0; I != E; ++I) {
6859 TSC.addIfComplete(ID, Enc.substr(Start), false);
6863 /// Appends type's qualifier to Enc.
6864 /// This is done prior to appending the type's encoding.
6865 static void appendQualifier(SmallStringEnc &Enc, QualType QT) {
6866 // Qualifiers are emitted in alphabetical order.
6867 static const char *Table[] = {"","c:","r:","cr:","v:","cv:","rv:","crv:"};
6869 if (QT.isConstQualified())
6871 if (QT.isRestrictQualified())
6873 if (QT.isVolatileQualified())
6875 Enc += Table[Lookup];
6878 /// Appends built-in types to Enc.
6879 static bool appendBuiltinType(SmallStringEnc &Enc, const BuiltinType *BT) {
6880 const char *EncType;
6881 switch (BT->getKind()) {
6882 case BuiltinType::Void:
6885 case BuiltinType::Bool:
6888 case BuiltinType::Char_U:
6891 case BuiltinType::UChar:
6894 case BuiltinType::SChar:
6897 case BuiltinType::UShort:
6900 case BuiltinType::Short:
6903 case BuiltinType::UInt:
6906 case BuiltinType::Int:
6909 case BuiltinType::ULong:
6912 case BuiltinType::Long:
6915 case BuiltinType::ULongLong:
6918 case BuiltinType::LongLong:
6921 case BuiltinType::Float:
6924 case BuiltinType::Double:
6927 case BuiltinType::LongDouble:
6937 /// Appends a pointer encoding to Enc before calling appendType for the pointee.
6938 static bool appendPointerType(SmallStringEnc &Enc, const PointerType *PT,
6939 const CodeGen::CodeGenModule &CGM,
6940 TypeStringCache &TSC) {
6942 if (!appendType(Enc, PT->getPointeeType(), CGM, TSC))
6948 /// Appends array encoding to Enc before calling appendType for the element.
6949 static bool appendArrayType(SmallStringEnc &Enc, QualType QT,
6950 const ArrayType *AT,
6951 const CodeGen::CodeGenModule &CGM,
6952 TypeStringCache &TSC, StringRef NoSizeEnc) {
6953 if (AT->getSizeModifier() != ArrayType::Normal)
6956 if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT))
6957 CAT->getSize().toStringUnsigned(Enc);
6959 Enc += NoSizeEnc; // Global arrays use "*", otherwise it is "".
6961 // The Qualifiers should be attached to the type rather than the array.
6962 appendQualifier(Enc, QT);
6963 if (!appendType(Enc, AT->getElementType(), CGM, TSC))
6969 /// Appends a function encoding to Enc, calling appendType for the return type
6970 /// and the arguments.
6971 static bool appendFunctionType(SmallStringEnc &Enc, const FunctionType *FT,
6972 const CodeGen::CodeGenModule &CGM,
6973 TypeStringCache &TSC) {
6975 if (!appendType(Enc, FT->getReturnType(), CGM, TSC))
6978 if (const FunctionProtoType *FPT = FT->getAs<FunctionProtoType>()) {
6979 // N.B. we are only interested in the adjusted param types.
6980 auto I = FPT->param_type_begin();
6981 auto E = FPT->param_type_end();
6984 if (!appendType(Enc, *I, CGM, TSC))
6990 if (FPT->isVariadic())
6993 if (FPT->isVariadic())
7003 /// Handles the type's qualifier before dispatching a call to handle specific
7005 static bool appendType(SmallStringEnc &Enc, QualType QType,
7006 const CodeGen::CodeGenModule &CGM,
7007 TypeStringCache &TSC) {
7009 QualType QT = QType.getCanonicalType();
7011 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe())
7012 // The Qualifiers should be attached to the type rather than the array.
7013 // Thus we don't call appendQualifier() here.
7014 return appendArrayType(Enc, QT, AT, CGM, TSC, "");
7016 appendQualifier(Enc, QT);
7018 if (const BuiltinType *BT = QT->getAs<BuiltinType>())
7019 return appendBuiltinType(Enc, BT);
7021 if (const PointerType *PT = QT->getAs<PointerType>())
7022 return appendPointerType(Enc, PT, CGM, TSC);
7024 if (const EnumType *ET = QT->getAs<EnumType>())
7025 return appendEnumType(Enc, ET, TSC, QT.getBaseTypeIdentifier());
7027 if (const RecordType *RT = QT->getAsStructureType())
7028 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier());
7030 if (const RecordType *RT = QT->getAsUnionType())
7031 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier());
7033 if (const FunctionType *FT = QT->getAs<FunctionType>())
7034 return appendFunctionType(Enc, FT, CGM, TSC);
7039 static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
7040 CodeGen::CodeGenModule &CGM, TypeStringCache &TSC) {
7044 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
7045 if (FD->getLanguageLinkage() != CLanguageLinkage)
7047 return appendType(Enc, FD->getType(), CGM, TSC);
7050 if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
7051 if (VD->getLanguageLinkage() != CLanguageLinkage)
7053 QualType QT = VD->getType().getCanonicalType();
7054 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe()) {
7055 // Global ArrayTypes are given a size of '*' if the size is unknown.
7056 // The Qualifiers should be attached to the type rather than the array.
7057 // Thus we don't call appendQualifier() here.
7058 return appendArrayType(Enc, QT, AT, CGM, TSC, "*");
7060 return appendType(Enc, QT, CGM, TSC);
7066 //===----------------------------------------------------------------------===//
7068 //===----------------------------------------------------------------------===//
7070 const llvm::Triple &CodeGenModule::getTriple() const {
7071 return getTarget().getTriple();
7074 bool CodeGenModule::supportsCOMDAT() const {
7075 return !getTriple().isOSBinFormatMachO();
7078 const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
7079 if (TheTargetCodeGenInfo)
7080 return *TheTargetCodeGenInfo;
7082 const llvm::Triple &Triple = getTarget().getTriple();
7083 switch (Triple.getArch()) {
7085 return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo(Types));
7087 case llvm::Triple::le32:
7088 return *(TheTargetCodeGenInfo = new PNaClTargetCodeGenInfo(Types));
7089 case llvm::Triple::mips:
7090 case llvm::Triple::mipsel:
7091 return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, true));
7093 case llvm::Triple::mips64:
7094 case llvm::Triple::mips64el:
7095 return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, false));
7097 case llvm::Triple::aarch64:
7098 case llvm::Triple::aarch64_be: {
7099 AArch64ABIInfo::ABIKind Kind = AArch64ABIInfo::AAPCS;
7100 if (getTarget().getABI() == "darwinpcs")
7101 Kind = AArch64ABIInfo::DarwinPCS;
7103 return *(TheTargetCodeGenInfo = new AArch64TargetCodeGenInfo(Types, Kind));
7106 case llvm::Triple::arm:
7107 case llvm::Triple::armeb:
7108 case llvm::Triple::thumb:
7109 case llvm::Triple::thumbeb:
7111 ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS;
7112 if (getTarget().getABI() == "apcs-gnu")
7113 Kind = ARMABIInfo::APCS;
7114 else if (CodeGenOpts.FloatABI == "hard" ||
7115 (CodeGenOpts.FloatABI != "soft" &&
7116 Triple.getEnvironment() == llvm::Triple::GNUEABIHF))
7117 Kind = ARMABIInfo::AAPCS_VFP;
7119 switch (Triple.getOS()) {
7120 case llvm::Triple::NaCl:
7121 return *(TheTargetCodeGenInfo =
7122 new NaClARMTargetCodeGenInfo(Types, Kind));
7124 return *(TheTargetCodeGenInfo =
7125 new ARMTargetCodeGenInfo(Types, Kind));
7129 case llvm::Triple::ppc:
7130 return *(TheTargetCodeGenInfo = new PPC32TargetCodeGenInfo(Types));
7131 case llvm::Triple::ppc64:
7132 if (Triple.isOSBinFormatELF()) {
7133 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv1;
7134 if (getTarget().getABI() == "elfv2")
7135 Kind = PPC64_SVR4_ABIInfo::ELFv2;
7137 return *(TheTargetCodeGenInfo =
7138 new PPC64_SVR4_TargetCodeGenInfo(Types, Kind));
7140 return *(TheTargetCodeGenInfo = new PPC64TargetCodeGenInfo(Types));
7141 case llvm::Triple::ppc64le: {
7142 assert(Triple.isOSBinFormatELF() && "PPC64 LE non-ELF not supported!");
7143 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv2;
7144 if (getTarget().getABI() == "elfv1")
7145 Kind = PPC64_SVR4_ABIInfo::ELFv1;
7147 return *(TheTargetCodeGenInfo =
7148 new PPC64_SVR4_TargetCodeGenInfo(Types, Kind));
7151 case llvm::Triple::nvptx:
7152 case llvm::Triple::nvptx64:
7153 return *(TheTargetCodeGenInfo = new NVPTXTargetCodeGenInfo(Types));
7155 case llvm::Triple::msp430:
7156 return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo(Types));
7158 case llvm::Triple::systemz:
7159 return *(TheTargetCodeGenInfo = new SystemZTargetCodeGenInfo(Types));
7161 case llvm::Triple::tce:
7162 return *(TheTargetCodeGenInfo = new TCETargetCodeGenInfo(Types));
7164 case llvm::Triple::x86: {
7165 bool IsDarwinVectorABI = Triple.isOSDarwin();
7166 bool IsSmallStructInRegABI =
7167 X86_32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts);
7168 bool IsWin32FloatStructABI = Triple.isOSWindows() && !Triple.isOSCygMing();
7170 if (Triple.getOS() == llvm::Triple::Win32) {
7171 return *(TheTargetCodeGenInfo =
7172 new WinX86_32TargetCodeGenInfo(Types,
7173 IsDarwinVectorABI, IsSmallStructInRegABI,
7174 IsWin32FloatStructABI,
7175 CodeGenOpts.NumRegisterParameters));
7177 return *(TheTargetCodeGenInfo =
7178 new X86_32TargetCodeGenInfo(Types,
7179 IsDarwinVectorABI, IsSmallStructInRegABI,
7180 IsWin32FloatStructABI,
7181 CodeGenOpts.NumRegisterParameters));
7185 case llvm::Triple::x86_64: {
7186 bool HasAVX = getTarget().getABI() == "avx";
7188 switch (Triple.getOS()) {
7189 case llvm::Triple::Win32:
7190 return *(TheTargetCodeGenInfo =
7191 new WinX86_64TargetCodeGenInfo(Types, HasAVX));
7192 case llvm::Triple::NaCl:
7193 return *(TheTargetCodeGenInfo =
7194 new NaClX86_64TargetCodeGenInfo(Types, HasAVX));
7196 return *(TheTargetCodeGenInfo =
7197 new X86_64TargetCodeGenInfo(Types, HasAVX));
7200 case llvm::Triple::hexagon:
7201 return *(TheTargetCodeGenInfo = new HexagonTargetCodeGenInfo(Types));
7202 case llvm::Triple::r600:
7203 return *(TheTargetCodeGenInfo = new AMDGPUTargetCodeGenInfo(Types));
7204 case llvm::Triple::amdgcn:
7205 return *(TheTargetCodeGenInfo = new AMDGPUTargetCodeGenInfo(Types));
7206 case llvm::Triple::sparcv9:
7207 return *(TheTargetCodeGenInfo = new SparcV9TargetCodeGenInfo(Types));
7208 case llvm::Triple::xcore:
7209 return *(TheTargetCodeGenInfo = new XCoreTargetCodeGenInfo(Types));