1 //===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // These classes wrap the information about a call or function
11 // definition used to handle ABI compliancy.
13 //===----------------------------------------------------------------------===//
15 #include "TargetInfo.h"
19 #include "CodeGenFunction.h"
20 #include "clang/AST/RecordLayout.h"
21 #include "clang/CodeGen/CGFunctionInfo.h"
22 #include "clang/Frontend/CodeGenOptions.h"
23 #include "llvm/ADT/StringExtras.h"
24 #include "llvm/ADT/Triple.h"
25 #include "llvm/IR/DataLayout.h"
26 #include "llvm/IR/Type.h"
27 #include "llvm/Support/raw_ostream.h"
28 #include <algorithm> // std::sort
30 using namespace clang;
31 using namespace CodeGen;
33 static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder,
38 // Alternatively, we could emit this as a loop in the source.
39 for (unsigned I = FirstIndex; I <= LastIndex; ++I) {
41 Builder.CreateConstInBoundsGEP1_32(Builder.getInt8Ty(), Array, I);
42 Builder.CreateStore(Value, Cell);
46 static bool isAggregateTypeForABI(QualType T) {
47 return !CodeGenFunction::hasScalarEvaluationKind(T) ||
48 T->isMemberFunctionPointerType();
51 ABIInfo::~ABIInfo() {}
53 static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT,
55 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
57 return CGCXXABI::RAA_Default;
58 return CXXABI.getRecordArgABI(RD);
61 static CGCXXABI::RecordArgABI getRecordArgABI(QualType T,
63 const RecordType *RT = T->getAs<RecordType>();
65 return CGCXXABI::RAA_Default;
66 return getRecordArgABI(RT, CXXABI);
69 /// Pass transparent unions as if they were the type of the first element. Sema
70 /// should ensure that all elements of the union have the same "machine type".
71 static QualType useFirstFieldIfTransparentUnion(QualType Ty) {
72 if (const RecordType *UT = Ty->getAsUnionType()) {
73 const RecordDecl *UD = UT->getDecl();
74 if (UD->hasAttr<TransparentUnionAttr>()) {
75 assert(!UD->field_empty() && "sema created an empty transparent union");
76 return UD->field_begin()->getType();
82 CGCXXABI &ABIInfo::getCXXABI() const {
83 return CGT.getCXXABI();
86 ASTContext &ABIInfo::getContext() const {
87 return CGT.getContext();
90 llvm::LLVMContext &ABIInfo::getVMContext() const {
91 return CGT.getLLVMContext();
94 const llvm::DataLayout &ABIInfo::getDataLayout() const {
95 return CGT.getDataLayout();
98 const TargetInfo &ABIInfo::getTarget() const {
99 return CGT.getTarget();
102 bool ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
106 bool ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
107 uint64_t Members) const {
111 bool ABIInfo::shouldSignExtUnsignedType(QualType Ty) const {
115 void ABIArgInfo::dump() const {
116 raw_ostream &OS = llvm::errs();
117 OS << "(ABIArgInfo Kind=";
120 OS << "Direct Type=";
121 if (llvm::Type *Ty = getCoerceToType())
133 OS << "InAlloca Offset=" << getInAllocaFieldIndex();
136 OS << "Indirect Align=" << getIndirectAlign()
137 << " ByVal=" << getIndirectByVal()
138 << " Realign=" << getIndirectRealign();
147 TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; }
149 // If someone can figure out a general rule for this, that would be great.
150 // It's probably just doomed to be platform-dependent, though.
151 unsigned TargetCodeGenInfo::getSizeOfUnwindException() const {
153 // x86-64 FreeBSD, Linux, Darwin
154 // x86-32 FreeBSD, Linux, Darwin
155 // PowerPC Linux, Darwin
156 // ARM Darwin (*not* EABI)
161 bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args,
162 const FunctionNoProtoType *fnType) const {
163 // The following conventions are known to require this to be false:
166 // For everything else, we just prefer false unless we opt out.
171 TargetCodeGenInfo::getDependentLibraryOption(llvm::StringRef Lib,
172 llvm::SmallString<24> &Opt) const {
173 // This assumes the user is passing a library name like "rt" instead of a
174 // filename like "librt.a/so", and that they don't care whether it's static or
180 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays);
182 /// isEmptyField - Return true iff a the field is "empty", that is it
183 /// is an unnamed bit-field or an (array of) empty record(s).
184 static bool isEmptyField(ASTContext &Context, const FieldDecl *FD,
186 if (FD->isUnnamedBitfield())
189 QualType FT = FD->getType();
191 // Constant arrays of empty records count as empty, strip them off.
192 // Constant arrays of zero length always count as empty.
194 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
195 if (AT->getSize() == 0)
197 FT = AT->getElementType();
200 const RecordType *RT = FT->getAs<RecordType>();
204 // C++ record fields are never empty, at least in the Itanium ABI.
206 // FIXME: We should use a predicate for whether this behavior is true in the
208 if (isa<CXXRecordDecl>(RT->getDecl()))
211 return isEmptyRecord(Context, FT, AllowArrays);
214 /// isEmptyRecord - Return true iff a structure contains only empty
215 /// fields. Note that a structure with a flexible array member is not
216 /// considered empty.
217 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) {
218 const RecordType *RT = T->getAs<RecordType>();
221 const RecordDecl *RD = RT->getDecl();
222 if (RD->hasFlexibleArrayMember())
225 // If this is a C++ record, check the bases first.
226 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
227 for (const auto &I : CXXRD->bases())
228 if (!isEmptyRecord(Context, I.getType(), true))
231 for (const auto *I : RD->fields())
232 if (!isEmptyField(Context, I, AllowArrays))
237 /// isSingleElementStruct - Determine if a structure is a "single
238 /// element struct", i.e. it has exactly one non-empty field or
239 /// exactly one field which is itself a single element
240 /// struct. Structures with flexible array members are never
241 /// considered single element structs.
243 /// \return The field declaration for the single non-empty field, if
245 static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
246 const RecordType *RT = T->getAs<RecordType>();
250 const RecordDecl *RD = RT->getDecl();
251 if (RD->hasFlexibleArrayMember())
254 const Type *Found = nullptr;
256 // If this is a C++ record, check the bases first.
257 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
258 for (const auto &I : CXXRD->bases()) {
259 // Ignore empty records.
260 if (isEmptyRecord(Context, I.getType(), true))
263 // If we already found an element then this isn't a single-element struct.
267 // If this is non-empty and not a single element struct, the composite
268 // cannot be a single element struct.
269 Found = isSingleElementStruct(I.getType(), Context);
275 // Check for single element.
276 for (const auto *FD : RD->fields()) {
277 QualType FT = FD->getType();
279 // Ignore empty fields.
280 if (isEmptyField(Context, FD, true))
283 // If we already found an element then this isn't a single-element
288 // Treat single element arrays as the element.
289 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
290 if (AT->getSize().getZExtValue() != 1)
292 FT = AT->getElementType();
295 if (!isAggregateTypeForABI(FT)) {
296 Found = FT.getTypePtr();
298 Found = isSingleElementStruct(FT, Context);
304 // We don't consider a struct a single-element struct if it has
305 // padding beyond the element type.
306 if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T))
312 static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
313 // Treat complex types as the element type.
314 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
315 Ty = CTy->getElementType();
317 // Check for a type which we know has a simple scalar argument-passing
318 // convention without any padding. (We're specifically looking for 32
319 // and 64-bit integer and integer-equivalents, float, and double.)
320 if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() &&
321 !Ty->isEnumeralType() && !Ty->isBlockPointerType())
324 uint64_t Size = Context.getTypeSize(Ty);
325 return Size == 32 || Size == 64;
328 /// canExpandIndirectArgument - Test whether an argument type which is to be
329 /// passed indirectly (on the stack) would have the equivalent layout if it was
330 /// expanded into separate arguments. If so, we prefer to do the latter to avoid
331 /// inhibiting optimizations.
333 // FIXME: This predicate is missing many cases, currently it just follows
334 // llvm-gcc (checks that all fields are 32-bit or 64-bit primitive types). We
335 // should probably make this smarter, or better yet make the LLVM backend
336 // capable of handling it.
337 static bool canExpandIndirectArgument(QualType Ty, ASTContext &Context) {
338 // We can only expand structure types.
339 const RecordType *RT = Ty->getAs<RecordType>();
343 // We can only expand (C) structures.
345 // FIXME: This needs to be generalized to handle classes as well.
346 const RecordDecl *RD = RT->getDecl();
350 // We try to expand CLike CXXRecordDecl.
351 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
352 if (!CXXRD->isCLike())
358 for (const auto *FD : RD->fields()) {
359 if (!is32Or64BitBasicType(FD->getType(), Context))
362 // FIXME: Reject bit-fields wholesale; there are two problems, we don't know
363 // how to expand them yet, and the predicate for telling if a bitfield still
364 // counts as "basic" is more complicated than what we were doing previously.
365 if (FD->isBitField())
368 Size += Context.getTypeSize(FD->getType());
371 // Make sure there are not any holes in the struct.
372 if (Size != Context.getTypeSize(Ty))
379 /// DefaultABIInfo - The default implementation for ABI specific
380 /// details. This implementation provides information which results in
381 /// self-consistent and sensible LLVM IR generation, but does not
382 /// conform to any particular ABI.
383 class DefaultABIInfo : public ABIInfo {
385 DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
387 ABIArgInfo classifyReturnType(QualType RetTy) const;
388 ABIArgInfo classifyArgumentType(QualType RetTy) const;
390 void computeInfo(CGFunctionInfo &FI) const override {
391 if (!getCXXABI().classifyReturnType(FI))
392 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
393 for (auto &I : FI.arguments())
394 I.info = classifyArgumentType(I.type);
397 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
398 CodeGenFunction &CGF) const override;
401 class DefaultTargetCodeGenInfo : public TargetCodeGenInfo {
403 DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
404 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
407 llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
408 CodeGenFunction &CGF) const {
412 ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
413 Ty = useFirstFieldIfTransparentUnion(Ty);
415 if (isAggregateTypeForABI(Ty)) {
416 // Records with non-trivial destructors/copy-constructors should not be
418 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
419 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
421 return ABIArgInfo::getIndirect(0);
424 // Treat an enum type as its underlying type.
425 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
426 Ty = EnumTy->getDecl()->getIntegerType();
428 return (Ty->isPromotableIntegerType() ?
429 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
432 ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
433 if (RetTy->isVoidType())
434 return ABIArgInfo::getIgnore();
436 if (isAggregateTypeForABI(RetTy))
437 return ABIArgInfo::getIndirect(0);
439 // Treat an enum type as its underlying type.
440 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
441 RetTy = EnumTy->getDecl()->getIntegerType();
443 return (RetTy->isPromotableIntegerType() ?
444 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
447 //===----------------------------------------------------------------------===//
448 // le32/PNaCl bitcode ABI Implementation
450 // This is a simplified version of the x86_32 ABI. Arguments and return values
451 // are always passed on the stack.
452 //===----------------------------------------------------------------------===//
454 class PNaClABIInfo : public ABIInfo {
456 PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
458 ABIArgInfo classifyReturnType(QualType RetTy) const;
459 ABIArgInfo classifyArgumentType(QualType RetTy) const;
461 void computeInfo(CGFunctionInfo &FI) const override;
462 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
463 CodeGenFunction &CGF) const override;
466 class PNaClTargetCodeGenInfo : public TargetCodeGenInfo {
468 PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
469 : TargetCodeGenInfo(new PNaClABIInfo(CGT)) {}
472 void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const {
473 if (!getCXXABI().classifyReturnType(FI))
474 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
476 for (auto &I : FI.arguments())
477 I.info = classifyArgumentType(I.type);
480 llvm::Value *PNaClABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
481 CodeGenFunction &CGF) const {
485 /// \brief Classify argument of given type \p Ty.
486 ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty) const {
487 if (isAggregateTypeForABI(Ty)) {
488 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
489 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
490 return ABIArgInfo::getIndirect(0);
491 } else if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
492 // Treat an enum type as its underlying type.
493 Ty = EnumTy->getDecl()->getIntegerType();
494 } else if (Ty->isFloatingType()) {
495 // Floating-point types don't go inreg.
496 return ABIArgInfo::getDirect();
499 return (Ty->isPromotableIntegerType() ?
500 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
503 ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const {
504 if (RetTy->isVoidType())
505 return ABIArgInfo::getIgnore();
507 // In the PNaCl ABI we always return records/structures on the stack.
508 if (isAggregateTypeForABI(RetTy))
509 return ABIArgInfo::getIndirect(0);
511 // Treat an enum type as its underlying type.
512 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
513 RetTy = EnumTy->getDecl()->getIntegerType();
515 return (RetTy->isPromotableIntegerType() ?
516 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
519 /// IsX86_MMXType - Return true if this is an MMX type.
520 bool IsX86_MMXType(llvm::Type *IRType) {
521 // Return true if the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>.
522 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
523 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
524 IRType->getScalarSizeInBits() != 64;
527 static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
528 StringRef Constraint,
530 if ((Constraint == "y" || Constraint == "&y") && Ty->isVectorTy()) {
531 if (cast<llvm::VectorType>(Ty)->getBitWidth() != 64) {
532 // Invalid MMX constraint
536 return llvm::Type::getX86_MMXTy(CGF.getLLVMContext());
539 // No operation needed
543 /// Returns true if this type can be passed in SSE registers with the
544 /// X86_VectorCall calling convention. Shared between x86_32 and x86_64.
545 static bool isX86VectorTypeForVectorCall(ASTContext &Context, QualType Ty) {
546 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
547 if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half)
549 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
550 // vectorcall can pass XMM, YMM, and ZMM vectors. We don't pass SSE1 MMX
551 // registers specially.
552 unsigned VecSize = Context.getTypeSize(VT);
553 if (VecSize == 128 || VecSize == 256 || VecSize == 512)
559 /// Returns true if this aggregate is small enough to be passed in SSE registers
560 /// in the X86_VectorCall calling convention. Shared between x86_32 and x86_64.
561 static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) {
562 return NumMembers <= 4;
565 //===----------------------------------------------------------------------===//
566 // X86-32 ABI Implementation
567 //===----------------------------------------------------------------------===//
569 /// \brief Similar to llvm::CCState, but for Clang.
571 CCState(unsigned CC) : CC(CC), FreeRegs(0), FreeSSERegs(0) {}
575 unsigned FreeSSERegs;
578 /// X86_32ABIInfo - The X86-32 ABI information.
579 class X86_32ABIInfo : public ABIInfo {
585 static const unsigned MinABIStackAlignInBytes = 4;
587 bool IsDarwinVectorABI;
588 bool IsSmallStructInRegABI;
589 bool IsWin32StructABI;
590 unsigned DefaultNumRegisterParameters;
592 static bool isRegisterSize(unsigned Size) {
593 return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
596 bool isHomogeneousAggregateBaseType(QualType Ty) const override {
597 // FIXME: Assumes vectorcall is in use.
598 return isX86VectorTypeForVectorCall(getContext(), Ty);
601 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
602 uint64_t NumMembers) const override {
603 // FIXME: Assumes vectorcall is in use.
604 return isX86VectorCallAggregateSmallEnough(NumMembers);
607 bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context) const;
609 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
610 /// such that the argument will be passed in memory.
611 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const;
613 ABIArgInfo getIndirectReturnResult(CCState &State) const;
615 /// \brief Return the alignment to use for the given type on the stack.
616 unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const;
618 Class classify(QualType Ty) const;
619 ABIArgInfo classifyReturnType(QualType RetTy, CCState &State) const;
620 ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const;
621 bool shouldUseInReg(QualType Ty, CCState &State, bool &NeedsPadding) const;
623 /// \brief Rewrite the function info so that all memory arguments use
625 void rewriteWithInAlloca(CGFunctionInfo &FI) const;
627 void addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
628 unsigned &StackOffset, ABIArgInfo &Info,
629 QualType Type) const;
633 void computeInfo(CGFunctionInfo &FI) const override;
634 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
635 CodeGenFunction &CGF) const override;
637 X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool w,
639 : ABIInfo(CGT), IsDarwinVectorABI(d), IsSmallStructInRegABI(p),
640 IsWin32StructABI(w), DefaultNumRegisterParameters(r) {}
643 class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
645 X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
646 bool d, bool p, bool w, unsigned r)
647 :TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p, w, r)) {}
649 static bool isStructReturnInRegABI(
650 const llvm::Triple &Triple, const CodeGenOptions &Opts);
652 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
653 CodeGen::CodeGenModule &CGM) const override;
655 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
656 // Darwin uses different dwarf register numbers for EH.
657 if (CGM.getTarget().getTriple().isOSDarwin()) return 5;
661 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
662 llvm::Value *Address) const override;
664 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
665 StringRef Constraint,
666 llvm::Type* Ty) const override {
667 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
670 void addReturnRegisterOutputs(CodeGenFunction &CGF, LValue ReturnValue,
671 std::string &Constraints,
672 std::vector<llvm::Type *> &ResultRegTypes,
673 std::vector<llvm::Type *> &ResultTruncRegTypes,
674 std::vector<LValue> &ResultRegDests,
675 std::string &AsmString,
676 unsigned NumOutputs) const override;
679 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
680 unsigned Sig = (0xeb << 0) | // jmp rel8
681 (0x06 << 8) | // .+0x08
684 return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
690 /// Rewrite input constraint references after adding some output constraints.
691 /// In the case where there is one output and one input and we add one output,
692 /// we need to replace all operand references greater than or equal to 1:
695 /// The result will be:
698 static void rewriteInputConstraintReferences(unsigned FirstIn,
700 std::string &AsmString) {
702 llvm::raw_string_ostream OS(Buf);
704 while (Pos < AsmString.size()) {
705 size_t DollarStart = AsmString.find('$', Pos);
706 if (DollarStart == std::string::npos)
707 DollarStart = AsmString.size();
708 size_t DollarEnd = AsmString.find_first_not_of('$', DollarStart);
709 if (DollarEnd == std::string::npos)
710 DollarEnd = AsmString.size();
711 OS << StringRef(&AsmString[Pos], DollarEnd - Pos);
713 size_t NumDollars = DollarEnd - DollarStart;
714 if (NumDollars % 2 != 0 && Pos < AsmString.size()) {
715 // We have an operand reference.
716 size_t DigitStart = Pos;
717 size_t DigitEnd = AsmString.find_first_not_of("0123456789", DigitStart);
718 if (DigitEnd == std::string::npos)
719 DigitEnd = AsmString.size();
720 StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart);
721 unsigned OperandIndex;
722 if (!OperandStr.getAsInteger(10, OperandIndex)) {
723 if (OperandIndex >= FirstIn)
724 OperandIndex += NumNewOuts;
732 AsmString = std::move(OS.str());
735 /// Add output constraints for EAX:EDX because they are return registers.
736 void X86_32TargetCodeGenInfo::addReturnRegisterOutputs(
737 CodeGenFunction &CGF, LValue ReturnSlot, std::string &Constraints,
738 std::vector<llvm::Type *> &ResultRegTypes,
739 std::vector<llvm::Type *> &ResultTruncRegTypes,
740 std::vector<LValue> &ResultRegDests, std::string &AsmString,
741 unsigned NumOutputs) const {
742 uint64_t RetWidth = CGF.getContext().getTypeSize(ReturnSlot.getType());
744 // Use the EAX constraint if the width is 32 or smaller and EAX:EDX if it is
746 if (!Constraints.empty())
748 if (RetWidth <= 32) {
749 Constraints += "={eax}";
750 ResultRegTypes.push_back(CGF.Int32Ty);
752 // Use the 'A' constraint for EAX:EDX.
754 ResultRegTypes.push_back(CGF.Int64Ty);
757 // Truncate EAX or EAX:EDX to an integer of the appropriate size.
758 llvm::Type *CoerceTy = llvm::IntegerType::get(CGF.getLLVMContext(), RetWidth);
759 ResultTruncRegTypes.push_back(CoerceTy);
761 // Coerce the integer by bitcasting the return slot pointer.
762 ReturnSlot.setAddress(CGF.Builder.CreateBitCast(ReturnSlot.getAddress(),
763 CoerceTy->getPointerTo()));
764 ResultRegDests.push_back(ReturnSlot);
766 rewriteInputConstraintReferences(NumOutputs, 1, AsmString);
769 /// shouldReturnTypeInRegister - Determine if the given type should be
770 /// passed in a register (for the Darwin ABI).
771 bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
772 ASTContext &Context) const {
773 uint64_t Size = Context.getTypeSize(Ty);
775 // Type must be register sized.
776 if (!isRegisterSize(Size))
779 if (Ty->isVectorType()) {
780 // 64- and 128- bit vectors inside structures are not returned in
782 if (Size == 64 || Size == 128)
788 // If this is a builtin, pointer, enum, complex type, member pointer, or
789 // member function pointer it is ok.
790 if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() ||
791 Ty->isAnyComplexType() || Ty->isEnumeralType() ||
792 Ty->isBlockPointerType() || Ty->isMemberPointerType())
795 // Arrays are treated like records.
796 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
797 return shouldReturnTypeInRegister(AT->getElementType(), Context);
799 // Otherwise, it must be a record type.
800 const RecordType *RT = Ty->getAs<RecordType>();
801 if (!RT) return false;
803 // FIXME: Traverse bases here too.
805 // Structure types are passed in register if all fields would be
806 // passed in a register.
807 for (const auto *FD : RT->getDecl()->fields()) {
808 // Empty fields are ignored.
809 if (isEmptyField(Context, FD, true))
812 // Check fields recursively.
813 if (!shouldReturnTypeInRegister(FD->getType(), Context))
819 ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(CCState &State) const {
820 // If the return value is indirect, then the hidden argument is consuming one
822 if (State.FreeRegs) {
824 return ABIArgInfo::getIndirectInReg(/*Align=*/0, /*ByVal=*/false);
826 return ABIArgInfo::getIndirect(/*Align=*/0, /*ByVal=*/false);
829 ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
830 CCState &State) const {
831 if (RetTy->isVoidType())
832 return ABIArgInfo::getIgnore();
834 const Type *Base = nullptr;
835 uint64_t NumElts = 0;
836 if (State.CC == llvm::CallingConv::X86_VectorCall &&
837 isHomogeneousAggregate(RetTy, Base, NumElts)) {
838 // The LLVM struct type for such an aggregate should lower properly.
839 return ABIArgInfo::getDirect();
842 if (const VectorType *VT = RetTy->getAs<VectorType>()) {
843 // On Darwin, some vectors are returned in registers.
844 if (IsDarwinVectorABI) {
845 uint64_t Size = getContext().getTypeSize(RetTy);
847 // 128-bit vectors are a special case; they are returned in
848 // registers and we need to make sure to pick a type the LLVM
849 // backend will like.
851 return ABIArgInfo::getDirect(llvm::VectorType::get(
852 llvm::Type::getInt64Ty(getVMContext()), 2));
854 // Always return in register if it fits in a general purpose
855 // register, or if it is 64 bits and has a single element.
856 if ((Size == 8 || Size == 16 || Size == 32) ||
857 (Size == 64 && VT->getNumElements() == 1))
858 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
861 return getIndirectReturnResult(State);
864 return ABIArgInfo::getDirect();
867 if (isAggregateTypeForABI(RetTy)) {
868 if (const RecordType *RT = RetTy->getAs<RecordType>()) {
869 // Structures with flexible arrays are always indirect.
870 if (RT->getDecl()->hasFlexibleArrayMember())
871 return getIndirectReturnResult(State);
874 // If specified, structs and unions are always indirect.
875 if (!IsSmallStructInRegABI && !RetTy->isAnyComplexType())
876 return getIndirectReturnResult(State);
878 // Small structures which are register sized are generally returned
880 if (shouldReturnTypeInRegister(RetTy, getContext())) {
881 uint64_t Size = getContext().getTypeSize(RetTy);
883 // As a special-case, if the struct is a "single-element" struct, and
884 // the field is of type "float" or "double", return it in a
885 // floating-point register. (MSVC does not apply this special case.)
886 // We apply a similar transformation for pointer types to improve the
887 // quality of the generated IR.
888 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
889 if ((!IsWin32StructABI && SeltTy->isRealFloatingType())
890 || SeltTy->hasPointerRepresentation())
891 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
893 // FIXME: We should be able to narrow this integer in cases with dead
895 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size));
898 return getIndirectReturnResult(State);
901 // Treat an enum type as its underlying type.
902 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
903 RetTy = EnumTy->getDecl()->getIntegerType();
905 return (RetTy->isPromotableIntegerType() ?
906 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
909 static bool isSSEVectorType(ASTContext &Context, QualType Ty) {
910 return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128;
913 static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) {
914 const RecordType *RT = Ty->getAs<RecordType>();
917 const RecordDecl *RD = RT->getDecl();
919 // If this is a C++ record, check the bases first.
920 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
921 for (const auto &I : CXXRD->bases())
922 if (!isRecordWithSSEVectorType(Context, I.getType()))
925 for (const auto *i : RD->fields()) {
926 QualType FT = i->getType();
928 if (isSSEVectorType(Context, FT))
931 if (isRecordWithSSEVectorType(Context, FT))
938 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty,
939 unsigned Align) const {
940 // Otherwise, if the alignment is less than or equal to the minimum ABI
941 // alignment, just use the default; the backend will handle this.
942 if (Align <= MinABIStackAlignInBytes)
943 return 0; // Use default alignment.
945 // On non-Darwin, the stack type alignment is always 4.
946 if (!IsDarwinVectorABI) {
947 // Set explicit alignment, since we may need to realign the top.
948 return MinABIStackAlignInBytes;
951 // Otherwise, if the type contains an SSE vector type, the alignment is 16.
952 if (Align >= 16 && (isSSEVectorType(getContext(), Ty) ||
953 isRecordWithSSEVectorType(getContext(), Ty)))
956 return MinABIStackAlignInBytes;
959 ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal,
960 CCState &State) const {
962 if (State.FreeRegs) {
963 --State.FreeRegs; // Non-byval indirects just use one pointer.
964 return ABIArgInfo::getIndirectInReg(0, false);
966 return ABIArgInfo::getIndirect(0, false);
969 // Compute the byval alignment.
970 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
971 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
973 return ABIArgInfo::getIndirect(4, /*ByVal=*/true);
975 // If the stack alignment is less than the type alignment, realign the
977 bool Realign = TypeAlign > StackAlign;
978 return ABIArgInfo::getIndirect(StackAlign, /*ByVal=*/true, Realign);
981 X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const {
982 const Type *T = isSingleElementStruct(Ty, getContext());
986 if (const BuiltinType *BT = T->getAs<BuiltinType>()) {
987 BuiltinType::Kind K = BT->getKind();
988 if (K == BuiltinType::Float || K == BuiltinType::Double)
994 bool X86_32ABIInfo::shouldUseInReg(QualType Ty, CCState &State,
995 bool &NeedsPadding) const {
996 NeedsPadding = false;
997 Class C = classify(Ty);
1001 unsigned Size = getContext().getTypeSize(Ty);
1002 unsigned SizeInRegs = (Size + 31) / 32;
1004 if (SizeInRegs == 0)
1007 if (SizeInRegs > State.FreeRegs) {
1012 State.FreeRegs -= SizeInRegs;
1014 if (State.CC == llvm::CallingConv::X86_FastCall ||
1015 State.CC == llvm::CallingConv::X86_VectorCall) {
1019 if (Ty->isIntegralOrEnumerationType())
1022 if (Ty->isPointerType())
1025 if (Ty->isReferenceType())
1029 NeedsPadding = true;
1037 ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
1038 CCState &State) const {
1039 // FIXME: Set alignment on indirect arguments.
1041 Ty = useFirstFieldIfTransparentUnion(Ty);
1043 // Check with the C++ ABI first.
1044 const RecordType *RT = Ty->getAs<RecordType>();
1046 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
1047 if (RAA == CGCXXABI::RAA_Indirect) {
1048 return getIndirectResult(Ty, false, State);
1049 } else if (RAA == CGCXXABI::RAA_DirectInMemory) {
1050 // The field index doesn't matter, we'll fix it up later.
1051 return ABIArgInfo::getInAlloca(/*FieldIndex=*/0);
1055 // vectorcall adds the concept of a homogenous vector aggregate, similar
1056 // to other targets.
1057 const Type *Base = nullptr;
1058 uint64_t NumElts = 0;
1059 if (State.CC == llvm::CallingConv::X86_VectorCall &&
1060 isHomogeneousAggregate(Ty, Base, NumElts)) {
1061 if (State.FreeSSERegs >= NumElts) {
1062 State.FreeSSERegs -= NumElts;
1063 if (Ty->isBuiltinType() || Ty->isVectorType())
1064 return ABIArgInfo::getDirect();
1065 return ABIArgInfo::getExpand();
1067 return getIndirectResult(Ty, /*ByVal=*/false, State);
1070 if (isAggregateTypeForABI(Ty)) {
1072 // Structs are always byval on win32, regardless of what they contain.
1073 if (IsWin32StructABI)
1074 return getIndirectResult(Ty, true, State);
1076 // Structures with flexible arrays are always indirect.
1077 if (RT->getDecl()->hasFlexibleArrayMember())
1078 return getIndirectResult(Ty, true, State);
1081 // Ignore empty structs/unions.
1082 if (isEmptyRecord(getContext(), Ty, true))
1083 return ABIArgInfo::getIgnore();
1085 llvm::LLVMContext &LLVMContext = getVMContext();
1086 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
1088 if (shouldUseInReg(Ty, State, NeedsPadding)) {
1089 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
1090 SmallVector<llvm::Type*, 3> Elements(SizeInRegs, Int32);
1091 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
1092 return ABIArgInfo::getDirectInReg(Result);
1094 llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : nullptr;
1096 // Expand small (<= 128-bit) record types when we know that the stack layout
1097 // of those arguments will match the struct. This is important because the
1098 // LLVM backend isn't smart enough to remove byval, which inhibits many
1100 if (getContext().getTypeSize(Ty) <= 4*32 &&
1101 canExpandIndirectArgument(Ty, getContext()))
1102 return ABIArgInfo::getExpandWithPadding(
1103 State.CC == llvm::CallingConv::X86_FastCall ||
1104 State.CC == llvm::CallingConv::X86_VectorCall,
1107 return getIndirectResult(Ty, true, State);
1110 if (const VectorType *VT = Ty->getAs<VectorType>()) {
1111 // On Darwin, some vectors are passed in memory, we handle this by passing
1112 // it as an i8/i16/i32/i64.
1113 if (IsDarwinVectorABI) {
1114 uint64_t Size = getContext().getTypeSize(Ty);
1115 if ((Size == 8 || Size == 16 || Size == 32) ||
1116 (Size == 64 && VT->getNumElements() == 1))
1117 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
1121 if (IsX86_MMXType(CGT.ConvertType(Ty)))
1122 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 64));
1124 return ABIArgInfo::getDirect();
1128 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
1129 Ty = EnumTy->getDecl()->getIntegerType();
1132 bool InReg = shouldUseInReg(Ty, State, NeedsPadding);
1134 if (Ty->isPromotableIntegerType()) {
1136 return ABIArgInfo::getExtendInReg();
1137 return ABIArgInfo::getExtend();
1140 return ABIArgInfo::getDirectInReg();
1141 return ABIArgInfo::getDirect();
1144 void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
1145 CCState State(FI.getCallingConvention());
1146 if (State.CC == llvm::CallingConv::X86_FastCall)
1148 else if (State.CC == llvm::CallingConv::X86_VectorCall) {
1150 State.FreeSSERegs = 6;
1151 } else if (FI.getHasRegParm())
1152 State.FreeRegs = FI.getRegParm();
1154 State.FreeRegs = DefaultNumRegisterParameters;
1156 if (!getCXXABI().classifyReturnType(FI)) {
1157 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), State);
1158 } else if (FI.getReturnInfo().isIndirect()) {
1159 // The C++ ABI is not aware of register usage, so we have to check if the
1160 // return value was sret and put it in a register ourselves if appropriate.
1161 if (State.FreeRegs) {
1162 --State.FreeRegs; // The sret parameter consumes a register.
1163 FI.getReturnInfo().setInReg(true);
1167 // The chain argument effectively gives us another free register.
1168 if (FI.isChainCall())
1171 bool UsedInAlloca = false;
1172 for (auto &I : FI.arguments()) {
1173 I.info = classifyArgumentType(I.type, State);
1174 UsedInAlloca |= (I.info.getKind() == ABIArgInfo::InAlloca);
1177 // If we needed to use inalloca for any argument, do a second pass and rewrite
1178 // all the memory arguments to use inalloca.
1180 rewriteWithInAlloca(FI);
1184 X86_32ABIInfo::addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
1185 unsigned &StackOffset,
1186 ABIArgInfo &Info, QualType Type) const {
1187 assert(StackOffset % 4U == 0 && "unaligned inalloca struct");
1188 Info = ABIArgInfo::getInAlloca(FrameFields.size());
1189 FrameFields.push_back(CGT.ConvertTypeForMem(Type));
1190 StackOffset += getContext().getTypeSizeInChars(Type).getQuantity();
1192 // Insert padding bytes to respect alignment. For x86_32, each argument is 4
1194 if (StackOffset % 4U) {
1195 unsigned OldOffset = StackOffset;
1196 StackOffset = llvm::RoundUpToAlignment(StackOffset, 4U);
1197 unsigned NumBytes = StackOffset - OldOffset;
1199 llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext());
1200 Ty = llvm::ArrayType::get(Ty, NumBytes);
1201 FrameFields.push_back(Ty);
1205 static bool isArgInAlloca(const ABIArgInfo &Info) {
1206 // Leave ignored and inreg arguments alone.
1207 switch (Info.getKind()) {
1208 case ABIArgInfo::InAlloca:
1210 case ABIArgInfo::Indirect:
1211 assert(Info.getIndirectByVal());
1213 case ABIArgInfo::Ignore:
1215 case ABIArgInfo::Direct:
1216 case ABIArgInfo::Extend:
1217 case ABIArgInfo::Expand:
1218 if (Info.getInReg())
1222 llvm_unreachable("invalid enum");
1225 void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const {
1226 assert(IsWin32StructABI && "inalloca only supported on win32");
1228 // Build a packed struct type for all of the arguments in memory.
1229 SmallVector<llvm::Type *, 6> FrameFields;
1231 unsigned StackOffset = 0;
1232 CGFunctionInfo::arg_iterator I = FI.arg_begin(), E = FI.arg_end();
1234 // Put 'this' into the struct before 'sret', if necessary.
1236 FI.getCallingConvention() == llvm::CallingConv::X86_ThisCall;
1237 ABIArgInfo &Ret = FI.getReturnInfo();
1238 if (Ret.isIndirect() && Ret.isSRetAfterThis() && !IsThisCall &&
1239 isArgInAlloca(I->info)) {
1240 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
1244 // Put the sret parameter into the inalloca struct if it's in memory.
1245 if (Ret.isIndirect() && !Ret.getInReg()) {
1246 CanQualType PtrTy = getContext().getPointerType(FI.getReturnType());
1247 addFieldToArgStruct(FrameFields, StackOffset, Ret, PtrTy);
1248 // On Windows, the hidden sret parameter is always returned in eax.
1249 Ret.setInAllocaSRet(IsWin32StructABI);
1252 // Skip the 'this' parameter in ecx.
1256 // Put arguments passed in memory into the struct.
1257 for (; I != E; ++I) {
1258 if (isArgInAlloca(I->info))
1259 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
1262 FI.setArgStruct(llvm::StructType::get(getVMContext(), FrameFields,
1263 /*isPacked=*/true));
1266 llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1267 CodeGenFunction &CGF) const {
1268 llvm::Type *BPP = CGF.Int8PtrPtrTy;
1270 CGBuilderTy &Builder = CGF.Builder;
1271 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
1273 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
1275 // Compute if the address needs to be aligned
1276 unsigned Align = CGF.getContext().getTypeAlignInChars(Ty).getQuantity();
1277 Align = getTypeStackAlignInBytes(Ty, Align);
1278 Align = std::max(Align, 4U);
1280 // addr = (addr + align - 1) & -align;
1281 llvm::Value *Offset =
1282 llvm::ConstantInt::get(CGF.Int32Ty, Align - 1);
1283 Addr = CGF.Builder.CreateGEP(Addr, Offset);
1284 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(Addr,
1286 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int32Ty, -Align);
1287 Addr = CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
1293 llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
1294 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
1297 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, Align);
1298 llvm::Value *NextAddr =
1299 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
1301 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
1306 bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
1307 const llvm::Triple &Triple, const CodeGenOptions &Opts) {
1308 assert(Triple.getArch() == llvm::Triple::x86);
1310 switch (Opts.getStructReturnConvention()) {
1311 case CodeGenOptions::SRCK_Default:
1313 case CodeGenOptions::SRCK_OnStack: // -fpcc-struct-return
1315 case CodeGenOptions::SRCK_InRegs: // -freg-struct-return
1319 if (Triple.isOSDarwin())
1322 switch (Triple.getOS()) {
1323 case llvm::Triple::DragonFly:
1324 case llvm::Triple::FreeBSD:
1325 case llvm::Triple::OpenBSD:
1326 case llvm::Triple::Bitrig:
1327 case llvm::Triple::Win32:
1334 void X86_32TargetCodeGenInfo::setTargetAttributes(const Decl *D,
1335 llvm::GlobalValue *GV,
1336 CodeGen::CodeGenModule &CGM) const {
1337 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
1338 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1339 // Get the LLVM function.
1340 llvm::Function *Fn = cast<llvm::Function>(GV);
1342 // Now add the 'alignstack' attribute with a value of 16.
1343 llvm::AttrBuilder B;
1344 B.addStackAlignmentAttr(16);
1345 Fn->addAttributes(llvm::AttributeSet::FunctionIndex,
1346 llvm::AttributeSet::get(CGM.getLLVMContext(),
1347 llvm::AttributeSet::FunctionIndex,
1353 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
1354 CodeGen::CodeGenFunction &CGF,
1355 llvm::Value *Address) const {
1356 CodeGen::CGBuilderTy &Builder = CGF.Builder;
1358 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
1360 // 0-7 are the eight integer registers; the order is different
1361 // on Darwin (for EH), but the range is the same.
1363 AssignToArrayRange(Builder, Address, Four8, 0, 8);
1365 if (CGF.CGM.getTarget().getTriple().isOSDarwin()) {
1366 // 12-16 are st(0..4). Not sure why we stop at 4.
1367 // These have size 16, which is sizeof(long double) on
1368 // platforms with 8-byte alignment for that type.
1369 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16);
1370 AssignToArrayRange(Builder, Address, Sixteen8, 12, 16);
1373 // 9 is %eflags, which doesn't get a size on Darwin for some
1375 Builder.CreateStore(
1376 Four8, Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, Address, 9));
1378 // 11-16 are st(0..5). Not sure why we stop at 5.
1379 // These have size 12, which is sizeof(long double) on
1380 // platforms with 4-byte alignment for that type.
1381 llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12);
1382 AssignToArrayRange(Builder, Address, Twelve8, 11, 16);
1388 //===----------------------------------------------------------------------===//
1389 // X86-64 ABI Implementation
1390 //===----------------------------------------------------------------------===//
1394 /// The AVX ABI level for X86 targets.
1395 enum class X86AVXABILevel {
1401 /// \p returns the size in bits of the largest (native) vector for \p AVXLevel.
1402 static unsigned getNativeVectorSizeForAVXABI(X86AVXABILevel AVXLevel) {
1404 case X86AVXABILevel::AVX512:
1406 case X86AVXABILevel::AVX:
1408 case X86AVXABILevel::None:
1411 llvm_unreachable("Unknown AVXLevel");
1414 /// X86_64ABIInfo - The X86_64 ABI information.
1415 class X86_64ABIInfo : public ABIInfo {
1427 /// merge - Implement the X86_64 ABI merging algorithm.
1429 /// Merge an accumulating classification \arg Accum with a field
1430 /// classification \arg Field.
1432 /// \param Accum - The accumulating classification. This should
1433 /// always be either NoClass or the result of a previous merge
1434 /// call. In addition, this should never be Memory (the caller
1435 /// should just return Memory for the aggregate).
1436 static Class merge(Class Accum, Class Field);
1438 /// postMerge - Implement the X86_64 ABI post merging algorithm.
1440 /// Post merger cleanup, reduces a malformed Hi and Lo pair to
1441 /// final MEMORY or SSE classes when necessary.
1443 /// \param AggregateSize - The size of the current aggregate in
1444 /// the classification process.
1446 /// \param Lo - The classification for the parts of the type
1447 /// residing in the low word of the containing object.
1449 /// \param Hi - The classification for the parts of the type
1450 /// residing in the higher words of the containing object.
1452 void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const;
1454 /// classify - Determine the x86_64 register classes in which the
1455 /// given type T should be passed.
1457 /// \param Lo - The classification for the parts of the type
1458 /// residing in the low word of the containing object.
1460 /// \param Hi - The classification for the parts of the type
1461 /// residing in the high word of the containing object.
1463 /// \param OffsetBase - The bit offset of this type in the
1464 /// containing object. Some parameters are classified different
1465 /// depending on whether they straddle an eightbyte boundary.
1467 /// \param isNamedArg - Whether the argument in question is a "named"
1468 /// argument, as used in AMD64-ABI 3.5.7.
1470 /// If a word is unused its result will be NoClass; if a type should
1471 /// be passed in Memory then at least the classification of \arg Lo
1474 /// The \arg Lo class will be NoClass iff the argument is ignored.
1476 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
1477 /// also be ComplexX87.
1478 void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi,
1479 bool isNamedArg) const;
1481 llvm::Type *GetByteVectorType(QualType Ty) const;
1482 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType,
1483 unsigned IROffset, QualType SourceTy,
1484 unsigned SourceOffset) const;
1485 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType,
1486 unsigned IROffset, QualType SourceTy,
1487 unsigned SourceOffset) const;
1489 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
1490 /// such that the argument will be returned in memory.
1491 ABIArgInfo getIndirectReturnResult(QualType Ty) const;
1493 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
1494 /// such that the argument will be passed in memory.
1496 /// \param freeIntRegs - The number of free integer registers remaining
1498 ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const;
1500 ABIArgInfo classifyReturnType(QualType RetTy) const;
1502 ABIArgInfo classifyArgumentType(QualType Ty,
1503 unsigned freeIntRegs,
1504 unsigned &neededInt,
1505 unsigned &neededSSE,
1506 bool isNamedArg) const;
1508 bool IsIllegalVectorType(QualType Ty) const;
1510 /// The 0.98 ABI revision clarified a lot of ambiguities,
1511 /// unfortunately in ways that were not always consistent with
1512 /// certain previous compilers. In particular, platforms which
1513 /// required strict binary compatibility with older versions of GCC
1514 /// may need to exempt themselves.
1515 bool honorsRevision0_98() const {
1516 return !getTarget().getTriple().isOSDarwin();
1519 X86AVXABILevel AVXLevel;
1520 // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on
1522 bool Has64BitPointers;
1525 X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel) :
1526 ABIInfo(CGT), AVXLevel(AVXLevel),
1527 Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) {
1530 bool isPassedUsingAVXType(QualType type) const {
1531 unsigned neededInt, neededSSE;
1532 // The freeIntRegs argument doesn't matter here.
1533 ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE,
1534 /*isNamedArg*/true);
1535 if (info.isDirect()) {
1536 llvm::Type *ty = info.getCoerceToType();
1537 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
1538 return (vectorTy->getBitWidth() > 128);
1543 void computeInfo(CGFunctionInfo &FI) const override;
1545 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1546 CodeGenFunction &CGF) const override;
1548 bool has64BitPointers() const {
1549 return Has64BitPointers;
1553 /// WinX86_64ABIInfo - The Windows X86_64 ABI information.
1554 class WinX86_64ABIInfo : public ABIInfo {
1556 ABIArgInfo classify(QualType Ty, unsigned &FreeSSERegs,
1557 bool IsReturnType) const;
1560 WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
1562 void computeInfo(CGFunctionInfo &FI) const override;
1564 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1565 CodeGenFunction &CGF) const override;
1567 bool isHomogeneousAggregateBaseType(QualType Ty) const override {
1568 // FIXME: Assumes vectorcall is in use.
1569 return isX86VectorTypeForVectorCall(getContext(), Ty);
1572 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
1573 uint64_t NumMembers) const override {
1574 // FIXME: Assumes vectorcall is in use.
1575 return isX86VectorCallAggregateSmallEnough(NumMembers);
1579 class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
1581 X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
1582 : TargetCodeGenInfo(new X86_64ABIInfo(CGT, AVXLevel)) {}
1584 const X86_64ABIInfo &getABIInfo() const {
1585 return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo());
1588 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
1592 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
1593 llvm::Value *Address) const override {
1594 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
1596 // 0-15 are the 16 integer registers.
1598 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
1602 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
1603 StringRef Constraint,
1604 llvm::Type* Ty) const override {
1605 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
1608 bool isNoProtoCallVariadic(const CallArgList &args,
1609 const FunctionNoProtoType *fnType) const override {
1610 // The default CC on x86-64 sets %al to the number of SSA
1611 // registers used, and GCC sets this when calling an unprototyped
1612 // function, so we override the default behavior. However, don't do
1613 // that when AVX types are involved: the ABI explicitly states it is
1614 // undefined, and it doesn't work in practice because of how the ABI
1615 // defines varargs anyway.
1616 if (fnType->getCallConv() == CC_C) {
1617 bool HasAVXType = false;
1618 for (CallArgList::const_iterator
1619 it = args.begin(), ie = args.end(); it != ie; ++it) {
1620 if (getABIInfo().isPassedUsingAVXType(it->Ty)) {
1630 return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType);
1634 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
1636 if (getABIInfo().has64BitPointers())
1637 Sig = (0xeb << 0) | // jmp rel8
1638 (0x0a << 8) | // .+0x0c
1642 Sig = (0xeb << 0) | // jmp rel8
1643 (0x06 << 8) | // .+0x08
1646 return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
1650 class PS4TargetCodeGenInfo : public X86_64TargetCodeGenInfo {
1652 PS4TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
1653 : X86_64TargetCodeGenInfo(CGT, AVXLevel) {}
1655 void getDependentLibraryOption(llvm::StringRef Lib,
1656 llvm::SmallString<24> &Opt) const override {
1662 static std::string qualifyWindowsLibrary(llvm::StringRef Lib) {
1663 // If the argument does not end in .lib, automatically add the suffix.
1664 // If the argument contains a space, enclose it in quotes.
1665 // This matches the behavior of MSVC.
1666 bool Quote = (Lib.find(" ") != StringRef::npos);
1667 std::string ArgStr = Quote ? "\"" : "";
1669 if (!Lib.endswith_lower(".lib"))
1671 ArgStr += Quote ? "\"" : "";
1675 class WinX86_32TargetCodeGenInfo : public X86_32TargetCodeGenInfo {
1677 WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
1678 bool d, bool p, bool w, unsigned RegParms)
1679 : X86_32TargetCodeGenInfo(CGT, d, p, w, RegParms) {}
1681 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
1682 CodeGen::CodeGenModule &CGM) const override;
1684 void getDependentLibraryOption(llvm::StringRef Lib,
1685 llvm::SmallString<24> &Opt) const override {
1686 Opt = "/DEFAULTLIB:";
1687 Opt += qualifyWindowsLibrary(Lib);
1690 void getDetectMismatchOption(llvm::StringRef Name,
1691 llvm::StringRef Value,
1692 llvm::SmallString<32> &Opt) const override {
1693 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
1697 static void addStackProbeSizeTargetAttribute(const Decl *D,
1698 llvm::GlobalValue *GV,
1699 CodeGen::CodeGenModule &CGM) {
1700 if (isa<FunctionDecl>(D)) {
1701 if (CGM.getCodeGenOpts().StackProbeSize != 4096) {
1702 llvm::Function *Fn = cast<llvm::Function>(GV);
1704 Fn->addFnAttr("stack-probe-size",
1705 llvm::utostr(CGM.getCodeGenOpts().StackProbeSize));
1710 void WinX86_32TargetCodeGenInfo::setTargetAttributes(const Decl *D,
1711 llvm::GlobalValue *GV,
1712 CodeGen::CodeGenModule &CGM) const {
1713 X86_32TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
1715 addStackProbeSizeTargetAttribute(D, GV, CGM);
1718 class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
1720 WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
1721 X86AVXABILevel AVXLevel)
1722 : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)) {}
1724 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
1725 CodeGen::CodeGenModule &CGM) const override;
1727 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
1731 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
1732 llvm::Value *Address) const override {
1733 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
1735 // 0-15 are the 16 integer registers.
1737 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
1741 void getDependentLibraryOption(llvm::StringRef Lib,
1742 llvm::SmallString<24> &Opt) const override {
1743 Opt = "/DEFAULTLIB:";
1744 Opt += qualifyWindowsLibrary(Lib);
1747 void getDetectMismatchOption(llvm::StringRef Name,
1748 llvm::StringRef Value,
1749 llvm::SmallString<32> &Opt) const override {
1750 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
1754 void WinX86_64TargetCodeGenInfo::setTargetAttributes(const Decl *D,
1755 llvm::GlobalValue *GV,
1756 CodeGen::CodeGenModule &CGM) const {
1757 TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
1759 addStackProbeSizeTargetAttribute(D, GV, CGM);
1763 void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo,
1765 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
1767 // (a) If one of the classes is Memory, the whole argument is passed in
1770 // (b) If X87UP is not preceded by X87, the whole argument is passed in
1773 // (c) If the size of the aggregate exceeds two eightbytes and the first
1774 // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole
1775 // argument is passed in memory. NOTE: This is necessary to keep the
1776 // ABI working for processors that don't support the __m256 type.
1778 // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE.
1780 // Some of these are enforced by the merging logic. Others can arise
1781 // only with unions; for example:
1782 // union { _Complex double; unsigned; }
1784 // Note that clauses (b) and (c) were added in 0.98.
1788 if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
1790 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
1792 if (Hi == SSEUp && Lo != SSE)
1796 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
1797 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
1798 // classified recursively so that always two fields are
1799 // considered. The resulting class is calculated according to
1800 // the classes of the fields in the eightbyte:
1802 // (a) If both classes are equal, this is the resulting class.
1804 // (b) If one of the classes is NO_CLASS, the resulting class is
1807 // (c) If one of the classes is MEMORY, the result is the MEMORY
1810 // (d) If one of the classes is INTEGER, the result is the
1813 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
1814 // MEMORY is used as class.
1816 // (f) Otherwise class SSE is used.
1818 // Accum should never be memory (we should have returned) or
1819 // ComplexX87 (because this cannot be passed in a structure).
1820 assert((Accum != Memory && Accum != ComplexX87) &&
1821 "Invalid accumulated classification during merge.");
1822 if (Accum == Field || Field == NoClass)
1824 if (Field == Memory)
1826 if (Accum == NoClass)
1828 if (Accum == Integer || Field == Integer)
1830 if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
1831 Accum == X87 || Accum == X87Up)
1836 void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
1837 Class &Lo, Class &Hi, bool isNamedArg) const {
1838 // FIXME: This code can be simplified by introducing a simple value class for
1839 // Class pairs with appropriate constructor methods for the various
1842 // FIXME: Some of the split computations are wrong; unaligned vectors
1843 // shouldn't be passed in registers for example, so there is no chance they
1844 // can straddle an eightbyte. Verify & simplify.
1848 Class &Current = OffsetBase < 64 ? Lo : Hi;
1851 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
1852 BuiltinType::Kind k = BT->getKind();
1854 if (k == BuiltinType::Void) {
1856 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
1859 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
1861 } else if (k == BuiltinType::Float || k == BuiltinType::Double) {
1863 } else if (k == BuiltinType::LongDouble) {
1864 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
1865 if (LDF == &llvm::APFloat::IEEEquad) {
1868 } else if (LDF == &llvm::APFloat::x87DoubleExtended) {
1871 } else if (LDF == &llvm::APFloat::IEEEdouble) {
1874 llvm_unreachable("unexpected long double representation!");
1876 // FIXME: _Decimal32 and _Decimal64 are SSE.
1877 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
1881 if (const EnumType *ET = Ty->getAs<EnumType>()) {
1882 // Classify the underlying integer type.
1883 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
1887 if (Ty->hasPointerRepresentation()) {
1892 if (Ty->isMemberPointerType()) {
1893 if (Ty->isMemberFunctionPointerType()) {
1894 if (Has64BitPointers) {
1895 // If Has64BitPointers, this is an {i64, i64}, so classify both
1899 // Otherwise, with 32-bit pointers, this is an {i32, i32}. If that
1900 // straddles an eightbyte boundary, Hi should be classified as well.
1901 uint64_t EB_FuncPtr = (OffsetBase) / 64;
1902 uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64;
1903 if (EB_FuncPtr != EB_ThisAdj) {
1915 if (const VectorType *VT = Ty->getAs<VectorType>()) {
1916 uint64_t Size = getContext().getTypeSize(VT);
1918 // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x
1919 // float> as integer.
1922 // If this type crosses an eightbyte boundary, it should be
1924 uint64_t EB_Real = (OffsetBase) / 64;
1925 uint64_t EB_Imag = (OffsetBase + Size - 1) / 64;
1926 if (EB_Real != EB_Imag)
1928 } else if (Size == 64) {
1929 // gcc passes <1 x double> in memory. :(
1930 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double))
1933 // gcc passes <1 x long long> as INTEGER.
1934 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong) ||
1935 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULongLong) ||
1936 VT->getElementType()->isSpecificBuiltinType(BuiltinType::Long) ||
1937 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULong))
1942 // If this type crosses an eightbyte boundary, it should be
1944 if (OffsetBase && OffsetBase != 64)
1946 } else if (Size == 128 ||
1947 (isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) {
1948 // Arguments of 256-bits are split into four eightbyte chunks. The
1949 // least significant one belongs to class SSE and all the others to class
1950 // SSEUP. The original Lo and Hi design considers that types can't be
1951 // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense.
1952 // This design isn't correct for 256-bits, but since there're no cases
1953 // where the upper parts would need to be inspected, avoid adding
1954 // complexity and just consider Hi to match the 64-256 part.
1956 // Note that per 3.5.7 of AMD64-ABI, 256-bit args are only passed in
1957 // registers if they are "named", i.e. not part of the "..." of a
1958 // variadic function.
1960 // Similarly, per 3.2.3. of the AVX512 draft, 512-bits ("named") args are
1961 // split into eight eightbyte chunks, one SSE and seven SSEUP.
1968 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
1969 QualType ET = getContext().getCanonicalType(CT->getElementType());
1971 uint64_t Size = getContext().getTypeSize(Ty);
1972 if (ET->isIntegralOrEnumerationType()) {
1975 else if (Size <= 128)
1977 } else if (ET == getContext().FloatTy) {
1979 } else if (ET == getContext().DoubleTy) {
1981 } else if (ET == getContext().LongDoubleTy) {
1982 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
1983 if (LDF == &llvm::APFloat::IEEEquad)
1985 else if (LDF == &llvm::APFloat::x87DoubleExtended)
1986 Current = ComplexX87;
1987 else if (LDF == &llvm::APFloat::IEEEdouble)
1990 llvm_unreachable("unexpected long double representation!");
1993 // If this complex type crosses an eightbyte boundary then it
1995 uint64_t EB_Real = (OffsetBase) / 64;
1996 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64;
1997 if (Hi == NoClass && EB_Real != EB_Imag)
2003 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
2004 // Arrays are treated like structures.
2006 uint64_t Size = getContext().getTypeSize(Ty);
2008 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
2009 // than four eightbytes, ..., it has class MEMORY.
2013 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
2014 // fields, it has class MEMORY.
2016 // Only need to check alignment of array base.
2017 if (OffsetBase % getContext().getTypeAlign(AT->getElementType()))
2020 // Otherwise implement simplified merge. We could be smarter about
2021 // this, but it isn't worth it and would be harder to verify.
2023 uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
2024 uint64_t ArraySize = AT->getSize().getZExtValue();
2026 // The only case a 256-bit wide vector could be used is when the array
2027 // contains a single 256-bit element. Since Lo and Hi logic isn't extended
2028 // to work for sizes wider than 128, early check and fallback to memory.
2029 if (Size > 128 && EltSize != 256)
2032 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
2033 Class FieldLo, FieldHi;
2034 classify(AT->getElementType(), Offset, FieldLo, FieldHi, isNamedArg);
2035 Lo = merge(Lo, FieldLo);
2036 Hi = merge(Hi, FieldHi);
2037 if (Lo == Memory || Hi == Memory)
2041 postMerge(Size, Lo, Hi);
2042 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
2046 if (const RecordType *RT = Ty->getAs<RecordType>()) {
2047 uint64_t Size = getContext().getTypeSize(Ty);
2049 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
2050 // than four eightbytes, ..., it has class MEMORY.
2054 // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial
2055 // copy constructor or a non-trivial destructor, it is passed by invisible
2057 if (getRecordArgABI(RT, getCXXABI()))
2060 const RecordDecl *RD = RT->getDecl();
2062 // Assume variable sized types are passed in memory.
2063 if (RD->hasFlexibleArrayMember())
2066 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
2068 // Reset Lo class, this will be recomputed.
2071 // If this is a C++ record, classify the bases first.
2072 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2073 for (const auto &I : CXXRD->bases()) {
2074 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
2075 "Unexpected base class!");
2076 const CXXRecordDecl *Base =
2077 cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
2079 // Classify this field.
2081 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a
2082 // single eightbyte, each is classified separately. Each eightbyte gets
2083 // initialized to class NO_CLASS.
2084 Class FieldLo, FieldHi;
2086 OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base));
2087 classify(I.getType(), Offset, FieldLo, FieldHi, isNamedArg);
2088 Lo = merge(Lo, FieldLo);
2089 Hi = merge(Hi, FieldHi);
2090 if (Lo == Memory || Hi == Memory) {
2091 postMerge(Size, Lo, Hi);
2097 // Classify the fields one at a time, merging the results.
2099 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
2100 i != e; ++i, ++idx) {
2101 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
2102 bool BitField = i->isBitField();
2104 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than
2105 // four eightbytes, or it contains unaligned fields, it has class MEMORY.
2107 // The only case a 256-bit wide vector could be used is when the struct
2108 // contains a single 256-bit element. Since Lo and Hi logic isn't extended
2109 // to work for sizes wider than 128, early check and fallback to memory.
2111 if (Size > 128 && getContext().getTypeSize(i->getType()) != 256) {
2113 postMerge(Size, Lo, Hi);
2116 // Note, skip this test for bit-fields, see below.
2117 if (!BitField && Offset % getContext().getTypeAlign(i->getType())) {
2119 postMerge(Size, Lo, Hi);
2123 // Classify this field.
2125 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
2126 // exceeds a single eightbyte, each is classified
2127 // separately. Each eightbyte gets initialized to class
2129 Class FieldLo, FieldHi;
2131 // Bit-fields require special handling, they do not force the
2132 // structure to be passed in memory even if unaligned, and
2133 // therefore they can straddle an eightbyte.
2135 // Ignore padding bit-fields.
2136 if (i->isUnnamedBitfield())
2139 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
2140 uint64_t Size = i->getBitWidthValue(getContext());
2142 uint64_t EB_Lo = Offset / 64;
2143 uint64_t EB_Hi = (Offset + Size - 1) / 64;
2146 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.");
2151 FieldHi = EB_Hi ? Integer : NoClass;
2154 classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg);
2155 Lo = merge(Lo, FieldLo);
2156 Hi = merge(Hi, FieldHi);
2157 if (Lo == Memory || Hi == Memory)
2161 postMerge(Size, Lo, Hi);
2165 ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
2166 // If this is a scalar LLVM value then assume LLVM will pass it in the right
2168 if (!isAggregateTypeForABI(Ty)) {
2169 // Treat an enum type as its underlying type.
2170 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2171 Ty = EnumTy->getDecl()->getIntegerType();
2173 return (Ty->isPromotableIntegerType() ?
2174 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
2177 return ABIArgInfo::getIndirect(0);
2180 bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const {
2181 if (const VectorType *VecTy = Ty->getAs<VectorType>()) {
2182 uint64_t Size = getContext().getTypeSize(VecTy);
2183 unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel);
2184 if (Size <= 64 || Size > LargestVector)
2191 ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
2192 unsigned freeIntRegs) const {
2193 // If this is a scalar LLVM value then assume LLVM will pass it in the right
2196 // This assumption is optimistic, as there could be free registers available
2197 // when we need to pass this argument in memory, and LLVM could try to pass
2198 // the argument in the free register. This does not seem to happen currently,
2199 // but this code would be much safer if we could mark the argument with
2200 // 'onstack'. See PR12193.
2201 if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty)) {
2202 // Treat an enum type as its underlying type.
2203 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2204 Ty = EnumTy->getDecl()->getIntegerType();
2206 return (Ty->isPromotableIntegerType() ?
2207 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
2210 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
2211 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
2213 // Compute the byval alignment. We specify the alignment of the byval in all
2214 // cases so that the mid-level optimizer knows the alignment of the byval.
2215 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U);
2217 // Attempt to avoid passing indirect results using byval when possible. This
2218 // is important for good codegen.
2220 // We do this by coercing the value into a scalar type which the backend can
2221 // handle naturally (i.e., without using byval).
2223 // For simplicity, we currently only do this when we have exhausted all of the
2224 // free integer registers. Doing this when there are free integer registers
2225 // would require more care, as we would have to ensure that the coerced value
2226 // did not claim the unused register. That would require either reording the
2227 // arguments to the function (so that any subsequent inreg values came first),
2228 // or only doing this optimization when there were no following arguments that
2231 // We currently expect it to be rare (particularly in well written code) for
2232 // arguments to be passed on the stack when there are still free integer
2233 // registers available (this would typically imply large structs being passed
2234 // by value), so this seems like a fair tradeoff for now.
2236 // We can revisit this if the backend grows support for 'onstack' parameter
2237 // attributes. See PR12193.
2238 if (freeIntRegs == 0) {
2239 uint64_t Size = getContext().getTypeSize(Ty);
2241 // If this type fits in an eightbyte, coerce it into the matching integral
2242 // type, which will end up on the stack (with alignment 8).
2243 if (Align == 8 && Size <= 64)
2244 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
2248 return ABIArgInfo::getIndirect(Align);
2251 /// The ABI specifies that a value should be passed in a full vector XMM/YMM
2252 /// register. Pick an LLVM IR type that will be passed as a vector register.
2253 llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const {
2254 // Wrapper structs/arrays that only contain vectors are passed just like
2255 // vectors; strip them off if present.
2256 if (const Type *InnerTy = isSingleElementStruct(Ty, getContext()))
2257 Ty = QualType(InnerTy, 0);
2259 llvm::Type *IRType = CGT.ConvertType(Ty);
2260 if (isa<llvm::VectorType>(IRType) ||
2261 IRType->getTypeID() == llvm::Type::FP128TyID)
2264 // We couldn't find the preferred IR vector type for 'Ty'.
2265 uint64_t Size = getContext().getTypeSize(Ty);
2266 assert((Size == 128 || Size == 256) && "Invalid type found!");
2268 // Return a LLVM IR vector type based on the size of 'Ty'.
2269 return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()),
2273 /// BitsContainNoUserData - Return true if the specified [start,end) bit range
2274 /// is known to either be off the end of the specified type or being in
2275 /// alignment padding. The user type specified is known to be at most 128 bits
2276 /// in size, and have passed through X86_64ABIInfo::classify with a successful
2277 /// classification that put one of the two halves in the INTEGER class.
2279 /// It is conservatively correct to return false.
2280 static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,
2281 unsigned EndBit, ASTContext &Context) {
2282 // If the bytes being queried are off the end of the type, there is no user
2283 // data hiding here. This handles analysis of builtins, vectors and other
2284 // types that don't contain interesting padding.
2285 unsigned TySize = (unsigned)Context.getTypeSize(Ty);
2286 if (TySize <= StartBit)
2289 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
2290 unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType());
2291 unsigned NumElts = (unsigned)AT->getSize().getZExtValue();
2293 // Check each element to see if the element overlaps with the queried range.
2294 for (unsigned i = 0; i != NumElts; ++i) {
2295 // If the element is after the span we care about, then we're done..
2296 unsigned EltOffset = i*EltSize;
2297 if (EltOffset >= EndBit) break;
2299 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
2300 if (!BitsContainNoUserData(AT->getElementType(), EltStart,
2301 EndBit-EltOffset, Context))
2304 // If it overlaps no elements, then it is safe to process as padding.
2308 if (const RecordType *RT = Ty->getAs<RecordType>()) {
2309 const RecordDecl *RD = RT->getDecl();
2310 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
2312 // If this is a C++ record, check the bases first.
2313 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2314 for (const auto &I : CXXRD->bases()) {
2315 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
2316 "Unexpected base class!");
2317 const CXXRecordDecl *Base =
2318 cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
2320 // If the base is after the span we care about, ignore it.
2321 unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base));
2322 if (BaseOffset >= EndBit) continue;
2324 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
2325 if (!BitsContainNoUserData(I.getType(), BaseStart,
2326 EndBit-BaseOffset, Context))
2331 // Verify that no field has data that overlaps the region of interest. Yes
2332 // this could be sped up a lot by being smarter about queried fields,
2333 // however we're only looking at structs up to 16 bytes, so we don't care
2336 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
2337 i != e; ++i, ++idx) {
2338 unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx);
2340 // If we found a field after the region we care about, then we're done.
2341 if (FieldOffset >= EndBit) break;
2343 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
2344 if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset,
2349 // If nothing in this record overlapped the area of interest, then we're
2357 /// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a
2358 /// float member at the specified offset. For example, {int,{float}} has a
2359 /// float at offset 4. It is conservatively correct for this routine to return
2361 static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset,
2362 const llvm::DataLayout &TD) {
2363 // Base case if we find a float.
2364 if (IROffset == 0 && IRType->isFloatTy())
2367 // If this is a struct, recurse into the field at the specified offset.
2368 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
2369 const llvm::StructLayout *SL = TD.getStructLayout(STy);
2370 unsigned Elt = SL->getElementContainingOffset(IROffset);
2371 IROffset -= SL->getElementOffset(Elt);
2372 return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD);
2375 // If this is an array, recurse into the field at the specified offset.
2376 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
2377 llvm::Type *EltTy = ATy->getElementType();
2378 unsigned EltSize = TD.getTypeAllocSize(EltTy);
2379 IROffset -= IROffset/EltSize*EltSize;
2380 return ContainsFloatAtOffset(EltTy, IROffset, TD);
2387 /// GetSSETypeAtOffset - Return a type that will be passed by the backend in the
2388 /// low 8 bytes of an XMM register, corresponding to the SSE class.
2389 llvm::Type *X86_64ABIInfo::
2390 GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset,
2391 QualType SourceTy, unsigned SourceOffset) const {
2392 // The only three choices we have are either double, <2 x float>, or float. We
2393 // pass as float if the last 4 bytes is just padding. This happens for
2394 // structs that contain 3 floats.
2395 if (BitsContainNoUserData(SourceTy, SourceOffset*8+32,
2396 SourceOffset*8+64, getContext()))
2397 return llvm::Type::getFloatTy(getVMContext());
2399 // We want to pass as <2 x float> if the LLVM IR type contains a float at
2400 // offset+0 and offset+4. Walk the LLVM IR type to find out if this is the
2402 if (ContainsFloatAtOffset(IRType, IROffset, getDataLayout()) &&
2403 ContainsFloatAtOffset(IRType, IROffset+4, getDataLayout()))
2404 return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2);
2406 return llvm::Type::getDoubleTy(getVMContext());
2410 /// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in
2411 /// an 8-byte GPR. This means that we either have a scalar or we are talking
2412 /// about the high or low part of an up-to-16-byte struct. This routine picks
2413 /// the best LLVM IR type to represent this, which may be i64 or may be anything
2414 /// else that the backend will pass in a GPR that works better (e.g. i8, %foo*,
2417 /// PrefType is an LLVM IR type that corresponds to (part of) the IR type for
2418 /// the source type. IROffset is an offset in bytes into the LLVM IR type that
2419 /// the 8-byte value references. PrefType may be null.
2421 /// SourceTy is the source-level type for the entire argument. SourceOffset is
2422 /// an offset into this that we're processing (which is always either 0 or 8).
2424 llvm::Type *X86_64ABIInfo::
2425 GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
2426 QualType SourceTy, unsigned SourceOffset) const {
2427 // If we're dealing with an un-offset LLVM IR type, then it means that we're
2428 // returning an 8-byte unit starting with it. See if we can safely use it.
2429 if (IROffset == 0) {
2430 // Pointers and int64's always fill the 8-byte unit.
2431 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) ||
2432 IRType->isIntegerTy(64))
2435 // If we have a 1/2/4-byte integer, we can use it only if the rest of the
2436 // goodness in the source type is just tail padding. This is allowed to
2437 // kick in for struct {double,int} on the int, but not on
2438 // struct{double,int,int} because we wouldn't return the second int. We
2439 // have to do this analysis on the source type because we can't depend on
2440 // unions being lowered a specific way etc.
2441 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
2442 IRType->isIntegerTy(32) ||
2443 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) {
2444 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 :
2445 cast<llvm::IntegerType>(IRType)->getBitWidth();
2447 if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth,
2448 SourceOffset*8+64, getContext()))
2453 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
2454 // If this is a struct, recurse into the field at the specified offset.
2455 const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy);
2456 if (IROffset < SL->getSizeInBytes()) {
2457 unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
2458 IROffset -= SL->getElementOffset(FieldIdx);
2460 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
2461 SourceTy, SourceOffset);
2465 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
2466 llvm::Type *EltTy = ATy->getElementType();
2467 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy);
2468 unsigned EltOffset = IROffset/EltSize*EltSize;
2469 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
2473 // Okay, we don't have any better idea of what to pass, so we pass this in an
2474 // integer register that isn't too big to fit the rest of the struct.
2475 unsigned TySizeInBytes =
2476 (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity();
2478 assert(TySizeInBytes != SourceOffset && "Empty field?");
2480 // It is always safe to classify this as an integer type up to i64 that
2481 // isn't larger than the structure.
2482 return llvm::IntegerType::get(getVMContext(),
2483 std::min(TySizeInBytes-SourceOffset, 8U)*8);
2487 /// GetX86_64ByValArgumentPair - Given a high and low type that can ideally
2488 /// be used as elements of a two register pair to pass or return, return a
2489 /// first class aggregate to represent them. For example, if the low part of
2490 /// a by-value argument should be passed as i32* and the high part as float,
2491 /// return {i32*, float}.
2493 GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi,
2494 const llvm::DataLayout &TD) {
2495 // In order to correctly satisfy the ABI, we need to the high part to start
2496 // at offset 8. If the high and low parts we inferred are both 4-byte types
2497 // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have
2498 // the second element at offset 8. Check for this:
2499 unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo);
2500 unsigned HiAlign = TD.getABITypeAlignment(Hi);
2501 unsigned HiStart = llvm::RoundUpToAlignment(LoSize, HiAlign);
2502 assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!");
2504 // To handle this, we have to increase the size of the low part so that the
2505 // second element will start at an 8 byte offset. We can't increase the size
2506 // of the second element because it might make us access off the end of the
2509 // There are usually two sorts of types the ABI generation code can produce
2510 // for the low part of a pair that aren't 8 bytes in size: float or
2511 // i8/i16/i32. This can also include pointers when they are 32-bit (X32 and
2513 // Promote these to a larger type.
2514 if (Lo->isFloatTy())
2515 Lo = llvm::Type::getDoubleTy(Lo->getContext());
2517 assert((Lo->isIntegerTy() || Lo->isPointerTy())
2518 && "Invalid/unknown lo type");
2519 Lo = llvm::Type::getInt64Ty(Lo->getContext());
2523 llvm::StructType *Result = llvm::StructType::get(Lo, Hi, nullptr);
2526 // Verify that the second element is at an 8-byte offset.
2527 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
2528 "Invalid x86-64 argument pair!");
2532 ABIArgInfo X86_64ABIInfo::
2533 classifyReturnType(QualType RetTy) const {
2534 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
2535 // classification algorithm.
2536 X86_64ABIInfo::Class Lo, Hi;
2537 classify(RetTy, 0, Lo, Hi, /*isNamedArg*/ true);
2539 // Check some invariants.
2540 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
2541 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
2543 llvm::Type *ResType = nullptr;
2547 return ABIArgInfo::getIgnore();
2548 // If the low part is just padding, it takes no register, leave ResType
2550 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
2551 "Unknown missing lo part");
2556 llvm_unreachable("Invalid classification for lo word.");
2558 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
2561 return getIndirectReturnResult(RetTy);
2563 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
2564 // available register of the sequence %rax, %rdx is used.
2566 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
2568 // If we have a sign or zero extended integer, make sure to return Extend
2569 // so that the parameter gets the right LLVM IR attributes.
2570 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
2571 // Treat an enum type as its underlying type.
2572 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
2573 RetTy = EnumTy->getDecl()->getIntegerType();
2575 if (RetTy->isIntegralOrEnumerationType() &&
2576 RetTy->isPromotableIntegerType())
2577 return ABIArgInfo::getExtend();
2581 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
2582 // available SSE register of the sequence %xmm0, %xmm1 is used.
2584 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
2587 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
2588 // returned on the X87 stack in %st0 as 80-bit x87 number.
2590 ResType = llvm::Type::getX86_FP80Ty(getVMContext());
2593 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
2594 // part of the value is returned in %st0 and the imaginary part in
2597 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
2598 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()),
2599 llvm::Type::getX86_FP80Ty(getVMContext()),
2604 llvm::Type *HighPart = nullptr;
2606 // Memory was handled previously and X87 should
2607 // never occur as a hi class.
2610 llvm_unreachable("Invalid classification for hi word.");
2612 case ComplexX87: // Previously handled.
2617 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2618 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
2619 return ABIArgInfo::getDirect(HighPart, 8);
2622 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2623 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
2624 return ABIArgInfo::getDirect(HighPart, 8);
2627 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
2628 // is passed in the next available eightbyte chunk if the last used
2631 // SSEUP should always be preceded by SSE, just widen.
2633 assert(Lo == SSE && "Unexpected SSEUp classification.");
2634 ResType = GetByteVectorType(RetTy);
2637 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
2638 // returned together with the previous X87 value in %st0.
2640 // If X87Up is preceded by X87, we don't need to do
2641 // anything. However, in some cases with unions it may not be
2642 // preceded by X87. In such situations we follow gcc and pass the
2643 // extra bits in an SSE reg.
2645 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2646 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
2647 return ABIArgInfo::getDirect(HighPart, 8);
2652 // If a high part was specified, merge it together with the low part. It is
2653 // known to pass in the high eightbyte of the result. We do this by forming a
2654 // first class struct aggregate with the high and low part: {low, high}
2656 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
2658 return ABIArgInfo::getDirect(ResType);
2661 ABIArgInfo X86_64ABIInfo::classifyArgumentType(
2662 QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE,
2666 Ty = useFirstFieldIfTransparentUnion(Ty);
2668 X86_64ABIInfo::Class Lo, Hi;
2669 classify(Ty, 0, Lo, Hi, isNamedArg);
2671 // Check some invariants.
2672 // FIXME: Enforce these by construction.
2673 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
2674 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
2678 llvm::Type *ResType = nullptr;
2682 return ABIArgInfo::getIgnore();
2683 // If the low part is just padding, it takes no register, leave ResType
2685 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
2686 "Unknown missing lo part");
2689 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
2693 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
2694 // COMPLEX_X87, it is passed in memory.
2697 if (getRecordArgABI(Ty, getCXXABI()) == CGCXXABI::RAA_Indirect)
2699 return getIndirectResult(Ty, freeIntRegs);
2703 llvm_unreachable("Invalid classification for lo word.");
2705 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
2706 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
2711 // Pick an 8-byte type based on the preferred type.
2712 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0);
2714 // If we have a sign or zero extended integer, make sure to return Extend
2715 // so that the parameter gets the right LLVM IR attributes.
2716 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
2717 // Treat an enum type as its underlying type.
2718 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2719 Ty = EnumTy->getDecl()->getIntegerType();
2721 if (Ty->isIntegralOrEnumerationType() &&
2722 Ty->isPromotableIntegerType())
2723 return ABIArgInfo::getExtend();
2728 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
2729 // available SSE register is used, the registers are taken in the
2730 // order from %xmm0 to %xmm7.
2732 llvm::Type *IRType = CGT.ConvertType(Ty);
2733 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
2739 llvm::Type *HighPart = nullptr;
2741 // Memory was handled previously, ComplexX87 and X87 should
2742 // never occur as hi classes, and X87Up must be preceded by X87,
2743 // which is passed in memory.
2747 llvm_unreachable("Invalid classification for hi word.");
2749 case NoClass: break;
2753 // Pick an 8-byte type based on the preferred type.
2754 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
2756 if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
2757 return ABIArgInfo::getDirect(HighPart, 8);
2760 // X87Up generally doesn't occur here (long double is passed in
2761 // memory), except in situations involving unions.
2764 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
2766 if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
2767 return ABIArgInfo::getDirect(HighPart, 8);
2772 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
2773 // eightbyte is passed in the upper half of the last used SSE
2774 // register. This only happens when 128-bit vectors are passed.
2776 assert(Lo == SSE && "Unexpected SSEUp classification");
2777 ResType = GetByteVectorType(Ty);
2781 // If a high part was specified, merge it together with the low part. It is
2782 // known to pass in the high eightbyte of the result. We do this by forming a
2783 // first class struct aggregate with the high and low part: {low, high}
2785 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
2787 return ABIArgInfo::getDirect(ResType);
2790 void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
2792 if (!getCXXABI().classifyReturnType(FI))
2793 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
2795 // Keep track of the number of assigned registers.
2796 unsigned freeIntRegs = 6, freeSSERegs = 8;
2798 // If the return value is indirect, then the hidden argument is consuming one
2799 // integer register.
2800 if (FI.getReturnInfo().isIndirect())
2803 // The chain argument effectively gives us another free register.
2804 if (FI.isChainCall())
2807 unsigned NumRequiredArgs = FI.getNumRequiredArgs();
2808 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
2809 // get assigned (in left-to-right order) for passing as follows...
2811 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
2812 it != ie; ++it, ++ArgNo) {
2813 bool IsNamedArg = ArgNo < NumRequiredArgs;
2815 unsigned neededInt, neededSSE;
2816 it->info = classifyArgumentType(it->type, freeIntRegs, neededInt,
2817 neededSSE, IsNamedArg);
2819 // AMD64-ABI 3.2.3p3: If there are no registers available for any
2820 // eightbyte of an argument, the whole argument is passed on the
2821 // stack. If registers have already been assigned for some
2822 // eightbytes of such an argument, the assignments get reverted.
2823 if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) {
2824 freeIntRegs -= neededInt;
2825 freeSSERegs -= neededSSE;
2827 it->info = getIndirectResult(it->type, freeIntRegs);
2832 static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr,
2834 CodeGenFunction &CGF) {
2835 llvm::Value *overflow_arg_area_p = CGF.Builder.CreateStructGEP(
2836 nullptr, VAListAddr, 2, "overflow_arg_area_p");
2837 llvm::Value *overflow_arg_area =
2838 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
2840 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
2841 // byte boundary if alignment needed by type exceeds 8 byte boundary.
2842 // It isn't stated explicitly in the standard, but in practice we use
2843 // alignment greater than 16 where necessary.
2844 uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
2846 // overflow_arg_area = (overflow_arg_area + align - 1) & -align;
2847 llvm::Value *Offset =
2848 llvm::ConstantInt::get(CGF.Int64Ty, Align - 1);
2849 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset);
2850 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area,
2852 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, -(uint64_t)Align);
2854 CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
2855 overflow_arg_area->getType(),
2856 "overflow_arg_area.align");
2859 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
2860 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
2862 CGF.Builder.CreateBitCast(overflow_arg_area,
2863 llvm::PointerType::getUnqual(LTy));
2865 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
2866 // l->overflow_arg_area + sizeof(type).
2867 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to
2868 // an 8 byte boundary.
2870 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
2871 llvm::Value *Offset =
2872 llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7);
2873 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset,
2874 "overflow_arg_area.next");
2875 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
2877 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
2881 llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2882 CodeGenFunction &CGF) const {
2883 // Assume that va_list type is correct; should be pointer to LLVM type:
2887 // i8* overflow_arg_area;
2888 // i8* reg_save_area;
2890 unsigned neededInt, neededSSE;
2892 Ty = CGF.getContext().getCanonicalType(Ty);
2893 ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE,
2894 /*isNamedArg*/false);
2896 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
2897 // in the registers. If not go to step 7.
2898 if (!neededInt && !neededSSE)
2899 return EmitVAArgFromMemory(VAListAddr, Ty, CGF);
2901 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
2902 // general purpose registers needed to pass type and num_fp to hold
2903 // the number of floating point registers needed.
2905 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into
2906 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or
2907 // l->fp_offset > 304 - num_fp * 16 go to step 7.
2909 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of
2910 // register save space).
2912 llvm::Value *InRegs = nullptr;
2913 llvm::Value *gp_offset_p = nullptr, *gp_offset = nullptr;
2914 llvm::Value *fp_offset_p = nullptr, *fp_offset = nullptr;
2917 CGF.Builder.CreateStructGEP(nullptr, VAListAddr, 0, "gp_offset_p");
2918 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
2919 InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8);
2920 InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp");
2925 CGF.Builder.CreateStructGEP(nullptr, VAListAddr, 1, "fp_offset_p");
2926 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
2927 llvm::Value *FitsInFP =
2928 llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16);
2929 FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp");
2930 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
2933 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
2934 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
2935 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
2936 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
2938 // Emit code to load the value if it was passed in registers.
2940 CGF.EmitBlock(InRegBlock);
2942 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with
2943 // an offset of l->gp_offset and/or l->fp_offset. This may require
2944 // copying to a temporary location in case the parameter is passed
2945 // in different register classes or requires an alignment greater
2946 // than 8 for general purpose registers and 16 for XMM registers.
2948 // FIXME: This really results in shameful code when we end up needing to
2949 // collect arguments from different places; often what should result in a
2950 // simple assembling of a structure from scattered addresses has many more
2951 // loads than necessary. Can we clean this up?
2952 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
2953 llvm::Value *RegAddr = CGF.Builder.CreateLoad(
2954 CGF.Builder.CreateStructGEP(nullptr, VAListAddr, 3), "reg_save_area");
2955 if (neededInt && neededSSE) {
2957 assert(AI.isDirect() && "Unexpected ABI info for mixed regs");
2958 llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
2959 llvm::Value *Tmp = CGF.CreateMemTemp(Ty);
2960 Tmp = CGF.Builder.CreateBitCast(Tmp, ST->getPointerTo());
2961 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
2962 llvm::Type *TyLo = ST->getElementType(0);
2963 llvm::Type *TyHi = ST->getElementType(1);
2964 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
2965 "Unexpected ABI info for mixed regs");
2966 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
2967 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
2968 llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
2969 llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
2970 llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr;
2971 llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr;
2973 CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo));
2974 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(ST, Tmp, 0));
2975 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi));
2976 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(ST, Tmp, 1));
2978 RegAddr = CGF.Builder.CreateBitCast(Tmp,
2979 llvm::PointerType::getUnqual(LTy));
2980 } else if (neededInt) {
2981 RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
2982 RegAddr = CGF.Builder.CreateBitCast(RegAddr,
2983 llvm::PointerType::getUnqual(LTy));
2985 // Copy to a temporary if necessary to ensure the appropriate alignment.
2986 std::pair<CharUnits, CharUnits> SizeAlign =
2987 CGF.getContext().getTypeInfoInChars(Ty);
2988 uint64_t TySize = SizeAlign.first.getQuantity();
2989 unsigned TyAlign = SizeAlign.second.getQuantity();
2991 llvm::Value *Tmp = CGF.CreateMemTemp(Ty);
2992 CGF.Builder.CreateMemCpy(Tmp, RegAddr, TySize, 8, false);
2995 } else if (neededSSE == 1) {
2996 RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
2997 RegAddr = CGF.Builder.CreateBitCast(RegAddr,
2998 llvm::PointerType::getUnqual(LTy));
3000 assert(neededSSE == 2 && "Invalid number of needed registers!");
3001 // SSE registers are spaced 16 bytes apart in the register save
3002 // area, we need to collect the two eightbytes together.
3003 llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset);
3004 llvm::Value *RegAddrHi = CGF.Builder.CreateConstGEP1_32(RegAddrLo, 16);
3005 llvm::Type *DoubleTy = CGF.DoubleTy;
3006 llvm::Type *DblPtrTy =
3007 llvm::PointerType::getUnqual(DoubleTy);
3008 llvm::StructType *ST = llvm::StructType::get(DoubleTy, DoubleTy, nullptr);
3009 llvm::Value *V, *Tmp = CGF.CreateMemTemp(Ty);
3010 Tmp = CGF.Builder.CreateBitCast(Tmp, ST->getPointerTo());
3011 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo,
3013 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(ST, Tmp, 0));
3014 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi,
3016 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(ST, Tmp, 1));
3017 RegAddr = CGF.Builder.CreateBitCast(Tmp,
3018 llvm::PointerType::getUnqual(LTy));
3021 // AMD64-ABI 3.5.7p5: Step 5. Set:
3022 // l->gp_offset = l->gp_offset + num_gp * 8
3023 // l->fp_offset = l->fp_offset + num_fp * 16.
3025 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8);
3026 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
3030 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16);
3031 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
3034 CGF.EmitBranch(ContBlock);
3036 // Emit code to load the value if it was passed in memory.
3038 CGF.EmitBlock(InMemBlock);
3039 llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF);
3041 // Return the appropriate result.
3043 CGF.EmitBlock(ContBlock);
3044 llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(), 2,
3046 ResAddr->addIncoming(RegAddr, InRegBlock);
3047 ResAddr->addIncoming(MemAddr, InMemBlock);
3051 ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs,
3052 bool IsReturnType) const {
3054 if (Ty->isVoidType())
3055 return ABIArgInfo::getIgnore();
3057 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3058 Ty = EnumTy->getDecl()->getIntegerType();
3060 TypeInfo Info = getContext().getTypeInfo(Ty);
3061 uint64_t Width = Info.Width;
3062 unsigned Align = getContext().toCharUnitsFromBits(Info.Align).getQuantity();
3064 const RecordType *RT = Ty->getAs<RecordType>();
3066 if (!IsReturnType) {
3067 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()))
3068 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
3071 if (RT->getDecl()->hasFlexibleArrayMember())
3072 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
3074 // FIXME: mingw-w64-gcc emits 128-bit struct as i128
3075 if (Width == 128 && getTarget().getTriple().isWindowsGNUEnvironment())
3076 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
3080 // vectorcall adds the concept of a homogenous vector aggregate, similar to
3082 const Type *Base = nullptr;
3083 uint64_t NumElts = 0;
3084 if (FreeSSERegs && isHomogeneousAggregate(Ty, Base, NumElts)) {
3085 if (FreeSSERegs >= NumElts) {
3086 FreeSSERegs -= NumElts;
3087 if (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType())
3088 return ABIArgInfo::getDirect();
3089 return ABIArgInfo::getExpand();
3091 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
3095 if (Ty->isMemberPointerType()) {
3096 // If the member pointer is represented by an LLVM int or ptr, pass it
3098 llvm::Type *LLTy = CGT.ConvertType(Ty);
3099 if (LLTy->isPointerTy() || LLTy->isIntegerTy())
3100 return ABIArgInfo::getDirect();
3103 if (RT || Ty->isAnyComplexType() || Ty->isMemberPointerType()) {
3104 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
3105 // not 1, 2, 4, or 8 bytes, must be passed by reference."
3106 if (Width > 64 || !llvm::isPowerOf2_64(Width))
3107 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
3109 // Otherwise, coerce it to a small integer.
3110 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Width));
3113 // Bool type is always extended to the ABI, other builtin types are not
3115 const BuiltinType *BT = Ty->getAs<BuiltinType>();
3116 if (BT && BT->getKind() == BuiltinType::Bool)
3117 return ABIArgInfo::getExtend();
3119 return ABIArgInfo::getDirect();
3122 void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
3124 FI.getCallingConvention() == llvm::CallingConv::X86_VectorCall;
3126 // We can use up to 4 SSE return registers with vectorcall.
3127 unsigned FreeSSERegs = IsVectorCall ? 4 : 0;
3128 if (!getCXXABI().classifyReturnType(FI))
3129 FI.getReturnInfo() = classify(FI.getReturnType(), FreeSSERegs, true);
3131 // We can use up to 6 SSE register parameters with vectorcall.
3132 FreeSSERegs = IsVectorCall ? 6 : 0;
3133 for (auto &I : FI.arguments())
3134 I.info = classify(I.type, FreeSSERegs, false);
3137 llvm::Value *WinX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
3138 CodeGenFunction &CGF) const {
3139 llvm::Type *BPP = CGF.Int8PtrPtrTy;
3141 CGBuilderTy &Builder = CGF.Builder;
3142 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
3144 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
3146 llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
3147 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
3150 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 8);
3151 llvm::Value *NextAddr =
3152 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
3154 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
3161 /// PPC32_SVR4_ABIInfo - The 32-bit PowerPC ELF (SVR4) ABI information.
3162 class PPC32_SVR4_ABIInfo : public DefaultABIInfo {
3164 PPC32_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
3166 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
3167 CodeGenFunction &CGF) const override;
3170 class PPC32TargetCodeGenInfo : public TargetCodeGenInfo {
3172 PPC32TargetCodeGenInfo(CodeGenTypes &CGT)
3173 : TargetCodeGenInfo(new PPC32_SVR4_ABIInfo(CGT)) {}
3175 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
3176 // This is recovered from gcc output.
3177 return 1; // r1 is the dedicated stack pointer
3180 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
3181 llvm::Value *Address) const override;
3186 llvm::Value *PPC32_SVR4_ABIInfo::EmitVAArg(llvm::Value *VAListAddr,
3188 CodeGenFunction &CGF) const {
3189 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
3190 // TODO: Implement this. For now ignore.
3195 bool isI64 = Ty->isIntegerType() && getContext().getTypeSize(Ty) == 64;
3197 Ty->isIntegerType() || Ty->isPointerType() || Ty->isAggregateType();
3198 llvm::Type *CharPtr = CGF.Int8PtrTy;
3199 llvm::Type *CharPtrPtr = CGF.Int8PtrPtrTy;
3201 CGBuilderTy &Builder = CGF.Builder;
3202 llvm::Value *GPRPtr = Builder.CreateBitCast(VAListAddr, CharPtr, "gprptr");
3203 llvm::Value *GPRPtrAsInt = Builder.CreatePtrToInt(GPRPtr, CGF.Int32Ty);
3204 llvm::Value *FPRPtrAsInt =
3205 Builder.CreateAdd(GPRPtrAsInt, Builder.getInt32(1));
3206 llvm::Value *FPRPtr = Builder.CreateIntToPtr(FPRPtrAsInt, CharPtr);
3207 llvm::Value *OverflowAreaPtrAsInt =
3208 Builder.CreateAdd(FPRPtrAsInt, Builder.getInt32(3));
3209 llvm::Value *OverflowAreaPtr =
3210 Builder.CreateIntToPtr(OverflowAreaPtrAsInt, CharPtrPtr);
3211 llvm::Value *RegsaveAreaPtrAsInt =
3212 Builder.CreateAdd(OverflowAreaPtrAsInt, Builder.getInt32(4));
3213 llvm::Value *RegsaveAreaPtr =
3214 Builder.CreateIntToPtr(RegsaveAreaPtrAsInt, CharPtrPtr);
3215 llvm::Value *GPR = Builder.CreateLoad(GPRPtr, false, "gpr");
3216 // Align GPR when TY is i64.
3218 llvm::Value *GPRAnd = Builder.CreateAnd(GPR, Builder.getInt8(1));
3219 llvm::Value *CC64 = Builder.CreateICmpEQ(GPRAnd, Builder.getInt8(1));
3220 llvm::Value *GPRPlusOne = Builder.CreateAdd(GPR, Builder.getInt8(1));
3221 GPR = Builder.CreateSelect(CC64, GPRPlusOne, GPR);
3223 llvm::Value *FPR = Builder.CreateLoad(FPRPtr, false, "fpr");
3224 llvm::Value *OverflowArea =
3225 Builder.CreateLoad(OverflowAreaPtr, false, "overflow_area");
3226 llvm::Value *OverflowAreaAsInt =
3227 Builder.CreatePtrToInt(OverflowArea, CGF.Int32Ty);
3228 llvm::Value *RegsaveArea =
3229 Builder.CreateLoad(RegsaveAreaPtr, false, "regsave_area");
3230 llvm::Value *RegsaveAreaAsInt =
3231 Builder.CreatePtrToInt(RegsaveArea, CGF.Int32Ty);
3234 Builder.CreateICmpULT(isInt ? GPR : FPR, Builder.getInt8(8), "cond");
3236 llvm::Value *RegConstant =
3237 Builder.CreateMul(isInt ? GPR : FPR, Builder.getInt8(isInt ? 4 : 8));
3239 llvm::Value *OurReg = Builder.CreateAdd(
3240 RegsaveAreaAsInt, Builder.CreateSExt(RegConstant, CGF.Int32Ty));
3242 if (Ty->isFloatingType())
3243 OurReg = Builder.CreateAdd(OurReg, Builder.getInt32(32));
3245 llvm::BasicBlock *UsingRegs = CGF.createBasicBlock("using_regs");
3246 llvm::BasicBlock *UsingOverflow = CGF.createBasicBlock("using_overflow");
3247 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
3249 Builder.CreateCondBr(CC, UsingRegs, UsingOverflow);
3251 CGF.EmitBlock(UsingRegs);
3253 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
3254 llvm::Value *Result1 = Builder.CreateIntToPtr(OurReg, PTy);
3255 // Increase the GPR/FPR indexes.
3257 GPR = Builder.CreateAdd(GPR, Builder.getInt8(isI64 ? 2 : 1));
3258 Builder.CreateStore(GPR, GPRPtr);
3260 FPR = Builder.CreateAdd(FPR, Builder.getInt8(1));
3261 Builder.CreateStore(FPR, FPRPtr);
3263 CGF.EmitBranch(Cont);
3265 CGF.EmitBlock(UsingOverflow);
3267 // Increase the overflow area.
3268 llvm::Value *Result2 = Builder.CreateIntToPtr(OverflowAreaAsInt, PTy);
3270 Builder.CreateAdd(OverflowAreaAsInt, Builder.getInt32(isInt ? 4 : 8));
3271 Builder.CreateStore(Builder.CreateIntToPtr(OverflowAreaAsInt, CharPtr),
3273 CGF.EmitBranch(Cont);
3275 CGF.EmitBlock(Cont);
3277 llvm::PHINode *Result = CGF.Builder.CreatePHI(PTy, 2, "vaarg.addr");
3278 Result->addIncoming(Result1, UsingRegs);
3279 Result->addIncoming(Result2, UsingOverflow);
3281 if (Ty->isAggregateType()) {
3282 llvm::Value *AGGPtr = Builder.CreateBitCast(Result, CharPtrPtr, "aggrptr");
3283 return Builder.CreateLoad(AGGPtr, false, "aggr");
3290 PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
3291 llvm::Value *Address) const {
3292 // This is calculated from the LLVM and GCC tables and verified
3293 // against gcc output. AFAIK all ABIs use the same encoding.
3295 CodeGen::CGBuilderTy &Builder = CGF.Builder;
3297 llvm::IntegerType *i8 = CGF.Int8Ty;
3298 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
3299 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
3300 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
3302 // 0-31: r0-31, the 4-byte general-purpose registers
3303 AssignToArrayRange(Builder, Address, Four8, 0, 31);
3305 // 32-63: fp0-31, the 8-byte floating-point registers
3306 AssignToArrayRange(Builder, Address, Eight8, 32, 63);
3308 // 64-76 are various 4-byte special-purpose registers:
3315 AssignToArrayRange(Builder, Address, Four8, 64, 76);
3317 // 77-108: v0-31, the 16-byte vector registers
3318 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
3325 AssignToArrayRange(Builder, Address, Four8, 109, 113);
3333 /// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information.
3334 class PPC64_SVR4_ABIInfo : public DefaultABIInfo {
3342 static const unsigned GPRBits = 64;
3346 // A vector of float or double will be promoted to <4 x f32> or <4 x f64> and
3347 // will be passed in a QPX register.
3348 bool IsQPXVectorTy(const Type *Ty) const {
3352 if (const VectorType *VT = Ty->getAs<VectorType>()) {
3353 unsigned NumElements = VT->getNumElements();
3354 if (NumElements == 1)
3357 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) {
3358 if (getContext().getTypeSize(Ty) <= 256)
3360 } else if (VT->getElementType()->
3361 isSpecificBuiltinType(BuiltinType::Float)) {
3362 if (getContext().getTypeSize(Ty) <= 128)
3370 bool IsQPXVectorTy(QualType Ty) const {
3371 return IsQPXVectorTy(Ty.getTypePtr());
3375 PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind, bool HasQPX)
3376 : DefaultABIInfo(CGT), Kind(Kind), HasQPX(HasQPX) {}
3378 bool isPromotableTypeForABI(QualType Ty) const;
3379 bool isAlignedParamType(QualType Ty, bool &Align32) const;
3381 ABIArgInfo classifyReturnType(QualType RetTy) const;
3382 ABIArgInfo classifyArgumentType(QualType Ty) const;
3384 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
3385 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
3386 uint64_t Members) const override;
3388 // TODO: We can add more logic to computeInfo to improve performance.
3389 // Example: For aggregate arguments that fit in a register, we could
3390 // use getDirectInReg (as is done below for structs containing a single
3391 // floating-point value) to avoid pushing them to memory on function
3392 // entry. This would require changing the logic in PPCISelLowering
3393 // when lowering the parameters in the caller and args in the callee.
3394 void computeInfo(CGFunctionInfo &FI) const override {
3395 if (!getCXXABI().classifyReturnType(FI))
3396 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
3397 for (auto &I : FI.arguments()) {
3398 // We rely on the default argument classification for the most part.
3399 // One exception: An aggregate containing a single floating-point
3400 // or vector item must be passed in a register if one is available.
3401 const Type *T = isSingleElementStruct(I.type, getContext());
3403 const BuiltinType *BT = T->getAs<BuiltinType>();
3404 if (IsQPXVectorTy(T) ||
3405 (T->isVectorType() && getContext().getTypeSize(T) == 128) ||
3406 (BT && BT->isFloatingPoint())) {
3408 I.info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT));
3412 I.info = classifyArgumentType(I.type);
3416 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
3417 CodeGenFunction &CGF) const override;
3420 class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo {
3423 PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT,
3424 PPC64_SVR4_ABIInfo::ABIKind Kind, bool HasQPX)
3425 : TargetCodeGenInfo(new PPC64_SVR4_ABIInfo(CGT, Kind, HasQPX)) {}
3427 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
3428 // This is recovered from gcc output.
3429 return 1; // r1 is the dedicated stack pointer
3432 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
3433 llvm::Value *Address) const override;
3436 class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo {
3438 PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
3440 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
3441 // This is recovered from gcc output.
3442 return 1; // r1 is the dedicated stack pointer
3445 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
3446 llvm::Value *Address) const override;
3451 // Return true if the ABI requires Ty to be passed sign- or zero-
3452 // extended to 64 bits.
3454 PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const {
3455 // Treat an enum type as its underlying type.
3456 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3457 Ty = EnumTy->getDecl()->getIntegerType();
3459 // Promotable integer types are required to be promoted by the ABI.
3460 if (Ty->isPromotableIntegerType())
3463 // In addition to the usual promotable integer types, we also need to
3464 // extend all 32-bit types, since the ABI requires promotion to 64 bits.
3465 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
3466 switch (BT->getKind()) {
3467 case BuiltinType::Int:
3468 case BuiltinType::UInt:
3477 /// isAlignedParamType - Determine whether a type requires 16-byte
3478 /// alignment in the parameter area.
3480 PPC64_SVR4_ABIInfo::isAlignedParamType(QualType Ty, bool &Align32) const {
3483 // Complex types are passed just like their elements.
3484 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
3485 Ty = CTy->getElementType();
3487 // Only vector types of size 16 bytes need alignment (larger types are
3488 // passed via reference, smaller types are not aligned).
3489 if (IsQPXVectorTy(Ty)) {
3490 if (getContext().getTypeSize(Ty) > 128)
3494 } else if (Ty->isVectorType()) {
3495 return getContext().getTypeSize(Ty) == 128;
3498 // For single-element float/vector structs, we consider the whole type
3499 // to have the same alignment requirements as its single element.
3500 const Type *AlignAsType = nullptr;
3501 const Type *EltType = isSingleElementStruct(Ty, getContext());
3503 const BuiltinType *BT = EltType->getAs<BuiltinType>();
3504 if (IsQPXVectorTy(EltType) || (EltType->isVectorType() &&
3505 getContext().getTypeSize(EltType) == 128) ||
3506 (BT && BT->isFloatingPoint()))
3507 AlignAsType = EltType;
3510 // Likewise for ELFv2 homogeneous aggregates.
3511 const Type *Base = nullptr;
3512 uint64_t Members = 0;
3513 if (!AlignAsType && Kind == ELFv2 &&
3514 isAggregateTypeForABI(Ty) && isHomogeneousAggregate(Ty, Base, Members))
3517 // With special case aggregates, only vector base types need alignment.
3518 if (AlignAsType && IsQPXVectorTy(AlignAsType)) {
3519 if (getContext().getTypeSize(AlignAsType) > 128)
3523 } else if (AlignAsType) {
3524 return AlignAsType->isVectorType();
3527 // Otherwise, we only need alignment for any aggregate type that
3528 // has an alignment requirement of >= 16 bytes.
3529 if (isAggregateTypeForABI(Ty) && getContext().getTypeAlign(Ty) >= 128) {
3530 if (HasQPX && getContext().getTypeAlign(Ty) >= 256)
3538 /// isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous
3539 /// aggregate. Base is set to the base element type, and Members is set
3540 /// to the number of base elements.
3541 bool ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base,
3542 uint64_t &Members) const {
3543 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
3544 uint64_t NElements = AT->getSize().getZExtValue();
3547 if (!isHomogeneousAggregate(AT->getElementType(), Base, Members))
3549 Members *= NElements;
3550 } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
3551 const RecordDecl *RD = RT->getDecl();
3552 if (RD->hasFlexibleArrayMember())
3557 // If this is a C++ record, check the bases first.
3558 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
3559 for (const auto &I : CXXRD->bases()) {
3560 // Ignore empty records.
3561 if (isEmptyRecord(getContext(), I.getType(), true))
3564 uint64_t FldMembers;
3565 if (!isHomogeneousAggregate(I.getType(), Base, FldMembers))
3568 Members += FldMembers;
3572 for (const auto *FD : RD->fields()) {
3573 // Ignore (non-zero arrays of) empty records.
3574 QualType FT = FD->getType();
3575 while (const ConstantArrayType *AT =
3576 getContext().getAsConstantArrayType(FT)) {
3577 if (AT->getSize().getZExtValue() == 0)
3579 FT = AT->getElementType();
3581 if (isEmptyRecord(getContext(), FT, true))
3584 // For compatibility with GCC, ignore empty bitfields in C++ mode.
3585 if (getContext().getLangOpts().CPlusPlus &&
3586 FD->isBitField() && FD->getBitWidthValue(getContext()) == 0)
3589 uint64_t FldMembers;
3590 if (!isHomogeneousAggregate(FD->getType(), Base, FldMembers))
3593 Members = (RD->isUnion() ?
3594 std::max(Members, FldMembers) : Members + FldMembers);
3600 // Ensure there is no padding.
3601 if (getContext().getTypeSize(Base) * Members !=
3602 getContext().getTypeSize(Ty))
3606 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
3608 Ty = CT->getElementType();
3611 // Most ABIs only support float, double, and some vector type widths.
3612 if (!isHomogeneousAggregateBaseType(Ty))
3615 // The base type must be the same for all members. Types that
3616 // agree in both total size and mode (float vs. vector) are
3617 // treated as being equivalent here.
3618 const Type *TyPtr = Ty.getTypePtr();
3622 if (Base->isVectorType() != TyPtr->isVectorType() ||
3623 getContext().getTypeSize(Base) != getContext().getTypeSize(TyPtr))
3626 return Members > 0 && isHomogeneousAggregateSmallEnough(Base, Members);
3629 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
3630 // Homogeneous aggregates for ELFv2 must have base types of float,
3631 // double, long double, or 128-bit vectors.
3632 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
3633 if (BT->getKind() == BuiltinType::Float ||
3634 BT->getKind() == BuiltinType::Double ||
3635 BT->getKind() == BuiltinType::LongDouble)
3638 if (const VectorType *VT = Ty->getAs<VectorType>()) {
3639 if (getContext().getTypeSize(VT) == 128 || IsQPXVectorTy(Ty))
3645 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough(
3646 const Type *Base, uint64_t Members) const {
3647 // Vector types require one register, floating point types require one
3648 // or two registers depending on their size.
3650 Base->isVectorType() ? 1 : (getContext().getTypeSize(Base) + 63) / 64;
3652 // Homogeneous Aggregates may occupy at most 8 registers.
3653 return Members * NumRegs <= 8;
3657 PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const {
3658 Ty = useFirstFieldIfTransparentUnion(Ty);
3660 if (Ty->isAnyComplexType())
3661 return ABIArgInfo::getDirect();
3663 // Non-Altivec vector types are passed in GPRs (smaller than 16 bytes)
3664 // or via reference (larger than 16 bytes).
3665 if (Ty->isVectorType() && !IsQPXVectorTy(Ty)) {
3666 uint64_t Size = getContext().getTypeSize(Ty);
3668 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
3669 else if (Size < 128) {
3670 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
3671 return ABIArgInfo::getDirect(CoerceTy);
3675 if (isAggregateTypeForABI(Ty)) {
3676 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
3677 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
3680 uint64_t ABIAlign = isAlignedParamType(Ty, Align32) ?
3681 (Align32 ? 32 : 16) : 8;
3682 uint64_t TyAlign = getContext().getTypeAlign(Ty) / 8;
3684 // ELFv2 homogeneous aggregates are passed as array types.
3685 const Type *Base = nullptr;
3686 uint64_t Members = 0;
3687 if (Kind == ELFv2 &&
3688 isHomogeneousAggregate(Ty, Base, Members)) {
3689 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
3690 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
3691 return ABIArgInfo::getDirect(CoerceTy);
3694 // If an aggregate may end up fully in registers, we do not
3695 // use the ByVal method, but pass the aggregate as array.
3696 // This is usually beneficial since we avoid forcing the
3697 // back-end to store the argument to memory.
3698 uint64_t Bits = getContext().getTypeSize(Ty);
3699 if (Bits > 0 && Bits <= 8 * GPRBits) {
3700 llvm::Type *CoerceTy;
3702 // Types up to 8 bytes are passed as integer type (which will be
3703 // properly aligned in the argument save area doubleword).
3704 if (Bits <= GPRBits)
3705 CoerceTy = llvm::IntegerType::get(getVMContext(),
3706 llvm::RoundUpToAlignment(Bits, 8));
3707 // Larger types are passed as arrays, with the base type selected
3708 // according to the required alignment in the save area.
3710 uint64_t RegBits = ABIAlign * 8;
3711 uint64_t NumRegs = llvm::RoundUpToAlignment(Bits, RegBits) / RegBits;
3712 llvm::Type *RegTy = llvm::IntegerType::get(getVMContext(), RegBits);
3713 CoerceTy = llvm::ArrayType::get(RegTy, NumRegs);
3716 return ABIArgInfo::getDirect(CoerceTy);
3719 // All other aggregates are passed ByVal.
3720 return ABIArgInfo::getIndirect(ABIAlign, /*ByVal=*/true,
3721 /*Realign=*/TyAlign > ABIAlign);
3724 return (isPromotableTypeForABI(Ty) ?
3725 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
3729 PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
3730 if (RetTy->isVoidType())
3731 return ABIArgInfo::getIgnore();
3733 if (RetTy->isAnyComplexType())
3734 return ABIArgInfo::getDirect();
3736 // Non-Altivec vector types are returned in GPRs (smaller than 16 bytes)
3737 // or via reference (larger than 16 bytes).
3738 if (RetTy->isVectorType() && !IsQPXVectorTy(RetTy)) {
3739 uint64_t Size = getContext().getTypeSize(RetTy);
3741 return ABIArgInfo::getIndirect(0);
3742 else if (Size < 128) {
3743 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
3744 return ABIArgInfo::getDirect(CoerceTy);
3748 if (isAggregateTypeForABI(RetTy)) {
3749 // ELFv2 homogeneous aggregates are returned as array types.
3750 const Type *Base = nullptr;
3751 uint64_t Members = 0;
3752 if (Kind == ELFv2 &&
3753 isHomogeneousAggregate(RetTy, Base, Members)) {
3754 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
3755 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
3756 return ABIArgInfo::getDirect(CoerceTy);
3759 // ELFv2 small aggregates are returned in up to two registers.
3760 uint64_t Bits = getContext().getTypeSize(RetTy);
3761 if (Kind == ELFv2 && Bits <= 2 * GPRBits) {
3763 return ABIArgInfo::getIgnore();
3765 llvm::Type *CoerceTy;
3766 if (Bits > GPRBits) {
3767 CoerceTy = llvm::IntegerType::get(getVMContext(), GPRBits);
3768 CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy, nullptr);
3770 CoerceTy = llvm::IntegerType::get(getVMContext(),
3771 llvm::RoundUpToAlignment(Bits, 8));
3772 return ABIArgInfo::getDirect(CoerceTy);
3775 // All other aggregates are returned indirectly.
3776 return ABIArgInfo::getIndirect(0);
3779 return (isPromotableTypeForABI(RetTy) ?
3780 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
3783 // Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine.
3784 llvm::Value *PPC64_SVR4_ABIInfo::EmitVAArg(llvm::Value *VAListAddr,
3786 CodeGenFunction &CGF) const {
3787 llvm::Type *BP = CGF.Int8PtrTy;
3788 llvm::Type *BPP = CGF.Int8PtrPtrTy;
3790 CGBuilderTy &Builder = CGF.Builder;
3791 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
3792 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
3794 // Handle types that require 16-byte alignment in the parameter save area.
3796 if (isAlignedParamType(Ty, Align32)) {
3797 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty);
3798 AddrAsInt = Builder.CreateAdd(AddrAsInt,
3799 Builder.getInt64(Align32 ? 31 : 15));
3800 AddrAsInt = Builder.CreateAnd(AddrAsInt,
3801 Builder.getInt64(Align32 ? -32 : -16));
3802 Addr = Builder.CreateIntToPtr(AddrAsInt, BP, "ap.align");
3805 // Update the va_list pointer. The pointer should be bumped by the
3806 // size of the object. We can trust getTypeSize() except for a complex
3807 // type whose base type is smaller than a doubleword. For these, the
3808 // size of the object is 16 bytes; see below for further explanation.
3809 unsigned SizeInBytes = CGF.getContext().getTypeSize(Ty) / 8;
3811 unsigned CplxBaseSize = 0;
3813 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
3814 BaseTy = CTy->getElementType();
3815 CplxBaseSize = CGF.getContext().getTypeSize(BaseTy) / 8;
3816 if (CplxBaseSize < 8)
3820 unsigned Offset = llvm::RoundUpToAlignment(SizeInBytes, 8);
3821 llvm::Value *NextAddr =
3822 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int64Ty, Offset),
3824 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
3826 // If we have a complex type and the base type is smaller than 8 bytes,
3827 // the ABI calls for the real and imaginary parts to be right-adjusted
3828 // in separate doublewords. However, Clang expects us to produce a
3829 // pointer to a structure with the two parts packed tightly. So generate
3830 // loads of the real and imaginary parts relative to the va_list pointer,
3831 // and store them to a temporary structure.
3832 if (CplxBaseSize && CplxBaseSize < 8) {
3833 llvm::Value *RealAddr = Builder.CreatePtrToInt(Addr, CGF.Int64Ty);
3834 llvm::Value *ImagAddr = RealAddr;
3835 if (CGF.CGM.getDataLayout().isBigEndian()) {
3837 Builder.CreateAdd(RealAddr, Builder.getInt64(8 - CplxBaseSize));
3839 Builder.CreateAdd(ImagAddr, Builder.getInt64(16 - CplxBaseSize));
3841 ImagAddr = Builder.CreateAdd(ImagAddr, Builder.getInt64(8));
3843 llvm::Type *PBaseTy = llvm::PointerType::getUnqual(CGF.ConvertType(BaseTy));
3844 RealAddr = Builder.CreateIntToPtr(RealAddr, PBaseTy);
3845 ImagAddr = Builder.CreateIntToPtr(ImagAddr, PBaseTy);
3846 llvm::Value *Real = Builder.CreateLoad(RealAddr, false, ".vareal");
3847 llvm::Value *Imag = Builder.CreateLoad(ImagAddr, false, ".vaimag");
3848 llvm::AllocaInst *Ptr =
3849 CGF.CreateTempAlloca(CGT.ConvertTypeForMem(Ty), "vacplx");
3850 llvm::Value *RealPtr =
3851 Builder.CreateStructGEP(Ptr->getAllocatedType(), Ptr, 0, ".real");
3852 llvm::Value *ImagPtr =
3853 Builder.CreateStructGEP(Ptr->getAllocatedType(), Ptr, 1, ".imag");
3854 Builder.CreateStore(Real, RealPtr, false);
3855 Builder.CreateStore(Imag, ImagPtr, false);
3859 // If the argument is smaller than 8 bytes, it is right-adjusted in
3860 // its doubleword slot. Adjust the pointer to pick it up from the
3862 if (SizeInBytes < 8 && CGF.CGM.getDataLayout().isBigEndian()) {
3863 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty);
3864 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt64(8 - SizeInBytes));
3865 Addr = Builder.CreateIntToPtr(AddrAsInt, BP);
3868 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
3869 return Builder.CreateBitCast(Addr, PTy);
3873 PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
3874 llvm::Value *Address) {
3875 // This is calculated from the LLVM and GCC tables and verified
3876 // against gcc output. AFAIK all ABIs use the same encoding.
3878 CodeGen::CGBuilderTy &Builder = CGF.Builder;
3880 llvm::IntegerType *i8 = CGF.Int8Ty;
3881 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
3882 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
3883 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
3885 // 0-31: r0-31, the 8-byte general-purpose registers
3886 AssignToArrayRange(Builder, Address, Eight8, 0, 31);
3888 // 32-63: fp0-31, the 8-byte floating-point registers
3889 AssignToArrayRange(Builder, Address, Eight8, 32, 63);
3891 // 64-76 are various 4-byte special-purpose registers:
3898 AssignToArrayRange(Builder, Address, Four8, 64, 76);
3900 // 77-108: v0-31, the 16-byte vector registers
3901 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
3908 AssignToArrayRange(Builder, Address, Four8, 109, 113);
3914 PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable(
3915 CodeGen::CodeGenFunction &CGF,
3916 llvm::Value *Address) const {
3918 return PPC64_initDwarfEHRegSizeTable(CGF, Address);
3922 PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
3923 llvm::Value *Address) const {
3925 return PPC64_initDwarfEHRegSizeTable(CGF, Address);
3928 //===----------------------------------------------------------------------===//
3929 // AArch64 ABI Implementation
3930 //===----------------------------------------------------------------------===//
3934 class AArch64ABIInfo : public ABIInfo {
3945 AArch64ABIInfo(CodeGenTypes &CGT, ABIKind Kind) : ABIInfo(CGT), Kind(Kind) {}
3948 ABIKind getABIKind() const { return Kind; }
3949 bool isDarwinPCS() const { return Kind == DarwinPCS; }
3951 ABIArgInfo classifyReturnType(QualType RetTy) const;
3952 ABIArgInfo classifyArgumentType(QualType RetTy) const;
3953 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
3954 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
3955 uint64_t Members) const override;
3957 bool isIllegalVectorType(QualType Ty) const;
3959 void computeInfo(CGFunctionInfo &FI) const override {
3960 if (!getCXXABI().classifyReturnType(FI))
3961 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
3963 for (auto &it : FI.arguments())
3964 it.info = classifyArgumentType(it.type);
3967 llvm::Value *EmitDarwinVAArg(llvm::Value *VAListAddr, QualType Ty,
3968 CodeGenFunction &CGF) const;
3970 llvm::Value *EmitAAPCSVAArg(llvm::Value *VAListAddr, QualType Ty,
3971 CodeGenFunction &CGF) const;
3973 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
3974 CodeGenFunction &CGF) const override {
3975 return isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
3976 : EmitAAPCSVAArg(VAListAddr, Ty, CGF);
3980 class AArch64TargetCodeGenInfo : public TargetCodeGenInfo {
3982 AArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind Kind)
3983 : TargetCodeGenInfo(new AArch64ABIInfo(CGT, Kind)) {}
3985 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
3986 return "mov\tfp, fp\t\t; marker for objc_retainAutoreleaseReturnValue";
3989 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
3993 bool doesReturnSlotInterfereWithArgs() const override { return false; }
3997 ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty) const {
3998 Ty = useFirstFieldIfTransparentUnion(Ty);
4000 // Handle illegal vector types here.
4001 if (isIllegalVectorType(Ty)) {
4002 uint64_t Size = getContext().getTypeSize(Ty);
4004 llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext());
4005 return ABIArgInfo::getDirect(ResType);
4008 llvm::Type *ResType =
4009 llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2);
4010 return ABIArgInfo::getDirect(ResType);
4013 llvm::Type *ResType =
4014 llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);
4015 return ABIArgInfo::getDirect(ResType);
4017 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
4020 if (!isAggregateTypeForABI(Ty)) {
4021 // Treat an enum type as its underlying type.
4022 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
4023 Ty = EnumTy->getDecl()->getIntegerType();
4025 return (Ty->isPromotableIntegerType() && isDarwinPCS()
4026 ? ABIArgInfo::getExtend()
4027 : ABIArgInfo::getDirect());
4030 // Structures with either a non-trivial destructor or a non-trivial
4031 // copy constructor are always indirect.
4032 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
4033 return ABIArgInfo::getIndirect(0, /*ByVal=*/RAA ==
4034 CGCXXABI::RAA_DirectInMemory);
4037 // Empty records are always ignored on Darwin, but actually passed in C++ mode
4038 // elsewhere for GNU compatibility.
4039 if (isEmptyRecord(getContext(), Ty, true)) {
4040 if (!getContext().getLangOpts().CPlusPlus || isDarwinPCS())
4041 return ABIArgInfo::getIgnore();
4043 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
4046 // Homogeneous Floating-point Aggregates (HFAs) need to be expanded.
4047 const Type *Base = nullptr;
4048 uint64_t Members = 0;
4049 if (isHomogeneousAggregate(Ty, Base, Members)) {
4050 return ABIArgInfo::getDirect(
4051 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members));
4054 // Aggregates <= 16 bytes are passed directly in registers or on the stack.
4055 uint64_t Size = getContext().getTypeSize(Ty);
4057 unsigned Alignment = getContext().getTypeAlign(Ty);
4058 Size = 64 * ((Size + 63) / 64); // round up to multiple of 8 bytes
4060 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
4061 // For aggregates with 16-byte alignment, we use i128.
4062 if (Alignment < 128 && Size == 128) {
4063 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
4064 return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64));
4066 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
4069 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
4072 ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy) const {
4073 if (RetTy->isVoidType())
4074 return ABIArgInfo::getIgnore();
4076 // Large vector types should be returned via memory.
4077 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128)
4078 return ABIArgInfo::getIndirect(0);
4080 if (!isAggregateTypeForABI(RetTy)) {
4081 // Treat an enum type as its underlying type.
4082 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
4083 RetTy = EnumTy->getDecl()->getIntegerType();
4085 return (RetTy->isPromotableIntegerType() && isDarwinPCS()
4086 ? ABIArgInfo::getExtend()
4087 : ABIArgInfo::getDirect());
4090 if (isEmptyRecord(getContext(), RetTy, true))
4091 return ABIArgInfo::getIgnore();
4093 const Type *Base = nullptr;
4094 uint64_t Members = 0;
4095 if (isHomogeneousAggregate(RetTy, Base, Members))
4096 // Homogeneous Floating-point Aggregates (HFAs) are returned directly.
4097 return ABIArgInfo::getDirect();
4099 // Aggregates <= 16 bytes are returned directly in registers or on the stack.
4100 uint64_t Size = getContext().getTypeSize(RetTy);
4102 unsigned Alignment = getContext().getTypeAlign(RetTy);
4103 Size = 64 * ((Size + 63) / 64); // round up to multiple of 8 bytes
4105 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
4106 // For aggregates with 16-byte alignment, we use i128.
4107 if (Alignment < 128 && Size == 128) {
4108 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
4109 return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64));
4111 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
4114 return ABIArgInfo::getIndirect(0);
4117 /// isIllegalVectorType - check whether the vector type is legal for AArch64.
4118 bool AArch64ABIInfo::isIllegalVectorType(QualType Ty) const {
4119 if (const VectorType *VT = Ty->getAs<VectorType>()) {
4120 // Check whether VT is legal.
4121 unsigned NumElements = VT->getNumElements();
4122 uint64_t Size = getContext().getTypeSize(VT);
4123 // NumElements should be power of 2 between 1 and 16.
4124 if ((NumElements & (NumElements - 1)) != 0 || NumElements > 16)
4126 return Size != 64 && (Size != 128 || NumElements == 1);
4131 bool AArch64ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
4132 // Homogeneous aggregates for AAPCS64 must have base types of a floating
4133 // point type or a short-vector type. This is the same as the 32-bit ABI,
4134 // but with the difference that any floating-point type is allowed,
4135 // including __fp16.
4136 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
4137 if (BT->isFloatingPoint())
4139 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
4140 unsigned VecSize = getContext().getTypeSize(VT);
4141 if (VecSize == 64 || VecSize == 128)
4147 bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
4148 uint64_t Members) const {
4149 return Members <= 4;
4152 llvm::Value *AArch64ABIInfo::EmitAAPCSVAArg(llvm::Value *VAListAddr,
4154 CodeGenFunction &CGF) const {
4155 ABIArgInfo AI = classifyArgumentType(Ty);
4156 bool IsIndirect = AI.isIndirect();
4158 llvm::Type *BaseTy = CGF.ConvertType(Ty);
4160 BaseTy = llvm::PointerType::getUnqual(BaseTy);
4161 else if (AI.getCoerceToType())
4162 BaseTy = AI.getCoerceToType();
4164 unsigned NumRegs = 1;
4165 if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) {
4166 BaseTy = ArrTy->getElementType();
4167 NumRegs = ArrTy->getNumElements();
4169 bool IsFPR = BaseTy->isFloatingPointTy() || BaseTy->isVectorTy();
4171 // The AArch64 va_list type and handling is specified in the Procedure Call
4172 // Standard, section B.4:
4182 llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg");
4183 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
4184 llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack");
4185 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
4186 auto &Ctx = CGF.getContext();
4188 llvm::Value *reg_offs_p = nullptr, *reg_offs = nullptr;
4190 int RegSize = IsIndirect ? 8 : getContext().getTypeSize(Ty) / 8;
4192 // 3 is the field number of __gr_offs
4194 CGF.Builder.CreateStructGEP(nullptr, VAListAddr, 3, "gr_offs_p");
4195 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs");
4196 reg_top_index = 1; // field number for __gr_top
4197 RegSize = llvm::RoundUpToAlignment(RegSize, 8);
4199 // 4 is the field number of __vr_offs.
4201 CGF.Builder.CreateStructGEP(nullptr, VAListAddr, 4, "vr_offs_p");
4202 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs");
4203 reg_top_index = 2; // field number for __vr_top
4204 RegSize = 16 * NumRegs;
4207 //=======================================
4208 // Find out where argument was passed
4209 //=======================================
4211 // If reg_offs >= 0 we're already using the stack for this type of
4212 // argument. We don't want to keep updating reg_offs (in case it overflows,
4213 // though anyone passing 2GB of arguments, each at most 16 bytes, deserves
4214 // whatever they get).
4215 llvm::Value *UsingStack = nullptr;
4216 UsingStack = CGF.Builder.CreateICmpSGE(
4217 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, 0));
4219 CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
4221 // Otherwise, at least some kind of argument could go in these registers, the
4222 // question is whether this particular type is too big.
4223 CGF.EmitBlock(MaybeRegBlock);
4225 // Integer arguments may need to correct register alignment (for example a
4226 // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we
4227 // align __gr_offs to calculate the potential address.
4228 if (!IsFPR && !IsIndirect && Ctx.getTypeAlign(Ty) > 64) {
4229 int Align = Ctx.getTypeAlign(Ty) / 8;
4231 reg_offs = CGF.Builder.CreateAdd(
4232 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, Align - 1),
4234 reg_offs = CGF.Builder.CreateAnd(
4235 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, -Align),
4239 // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list.
4240 llvm::Value *NewOffset = nullptr;
4241 NewOffset = CGF.Builder.CreateAdd(
4242 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, RegSize), "new_reg_offs");
4243 CGF.Builder.CreateStore(NewOffset, reg_offs_p);
4245 // Now we're in a position to decide whether this argument really was in
4246 // registers or not.
4247 llvm::Value *InRegs = nullptr;
4248 InRegs = CGF.Builder.CreateICmpSLE(
4249 NewOffset, llvm::ConstantInt::get(CGF.Int32Ty, 0), "inreg");
4251 CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
4253 //=======================================
4254 // Argument was in registers
4255 //=======================================
4257 // Now we emit the code for if the argument was originally passed in
4258 // registers. First start the appropriate block:
4259 CGF.EmitBlock(InRegBlock);
4261 llvm::Value *reg_top_p = nullptr, *reg_top = nullptr;
4262 reg_top_p = CGF.Builder.CreateStructGEP(nullptr, VAListAddr, reg_top_index,
4264 reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top");
4265 llvm::Value *BaseAddr = CGF.Builder.CreateGEP(reg_top, reg_offs);
4266 llvm::Value *RegAddr = nullptr;
4267 llvm::Type *MemTy = llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty));
4270 // If it's been passed indirectly (actually a struct), whatever we find from
4271 // stored registers or on the stack will actually be a struct **.
4272 MemTy = llvm::PointerType::getUnqual(MemTy);
4275 const Type *Base = nullptr;
4276 uint64_t NumMembers = 0;
4277 bool IsHFA = isHomogeneousAggregate(Ty, Base, NumMembers);
4278 if (IsHFA && NumMembers > 1) {
4279 // Homogeneous aggregates passed in registers will have their elements split
4280 // and stored 16-bytes apart regardless of size (they're notionally in qN,
4281 // qN+1, ...). We reload and store into a temporary local variable
4283 assert(!IsIndirect && "Homogeneous aggregates should be passed directly");
4284 llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0));
4285 llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
4286 llvm::AllocaInst *Tmp = CGF.CreateTempAlloca(HFATy);
4289 if (CGF.CGM.getDataLayout().isBigEndian() && Ctx.getTypeSize(Base) < 128)
4290 Offset = 16 - Ctx.getTypeSize(Base) / 8;
4291 for (unsigned i = 0; i < NumMembers; ++i) {
4292 llvm::Value *BaseOffset =
4293 llvm::ConstantInt::get(CGF.Int32Ty, 16 * i + Offset);
4294 llvm::Value *LoadAddr = CGF.Builder.CreateGEP(BaseAddr, BaseOffset);
4295 LoadAddr = CGF.Builder.CreateBitCast(
4296 LoadAddr, llvm::PointerType::getUnqual(BaseTy));
4297 llvm::Value *StoreAddr =
4298 CGF.Builder.CreateStructGEP(Tmp->getAllocatedType(), Tmp, i);
4300 llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr);
4301 CGF.Builder.CreateStore(Elem, StoreAddr);
4304 RegAddr = CGF.Builder.CreateBitCast(Tmp, MemTy);
4306 // Otherwise the object is contiguous in memory
4307 unsigned BeAlign = reg_top_index == 2 ? 16 : 8;
4308 if (CGF.CGM.getDataLayout().isBigEndian() &&
4309 (IsHFA || !isAggregateTypeForABI(Ty)) &&
4310 Ctx.getTypeSize(Ty) < (BeAlign * 8)) {
4311 int Offset = BeAlign - Ctx.getTypeSize(Ty) / 8;
4312 BaseAddr = CGF.Builder.CreatePtrToInt(BaseAddr, CGF.Int64Ty);
4314 BaseAddr = CGF.Builder.CreateAdd(
4315 BaseAddr, llvm::ConstantInt::get(CGF.Int64Ty, Offset), "align_be");
4317 BaseAddr = CGF.Builder.CreateIntToPtr(BaseAddr, CGF.Int8PtrTy);
4320 RegAddr = CGF.Builder.CreateBitCast(BaseAddr, MemTy);
4323 CGF.EmitBranch(ContBlock);
4325 //=======================================
4326 // Argument was on the stack
4327 //=======================================
4328 CGF.EmitBlock(OnStackBlock);
4330 llvm::Value *stack_p = nullptr, *OnStackAddr = nullptr;
4331 stack_p = CGF.Builder.CreateStructGEP(nullptr, VAListAddr, 0, "stack_p");
4332 OnStackAddr = CGF.Builder.CreateLoad(stack_p, "stack");
4334 // Again, stack arguments may need realigmnent. In this case both integer and
4335 // floating-point ones might be affected.
4336 if (!IsIndirect && Ctx.getTypeAlign(Ty) > 64) {
4337 int Align = Ctx.getTypeAlign(Ty) / 8;
4339 OnStackAddr = CGF.Builder.CreatePtrToInt(OnStackAddr, CGF.Int64Ty);
4341 OnStackAddr = CGF.Builder.CreateAdd(
4342 OnStackAddr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1),
4344 OnStackAddr = CGF.Builder.CreateAnd(
4345 OnStackAddr, llvm::ConstantInt::get(CGF.Int64Ty, -Align),
4348 OnStackAddr = CGF.Builder.CreateIntToPtr(OnStackAddr, CGF.Int8PtrTy);
4355 StackSize = Ctx.getTypeSize(Ty) / 8;
4357 // All stack slots are 8 bytes
4358 StackSize = llvm::RoundUpToAlignment(StackSize, 8);
4360 llvm::Value *StackSizeC = llvm::ConstantInt::get(CGF.Int32Ty, StackSize);
4361 llvm::Value *NewStack =
4362 CGF.Builder.CreateGEP(OnStackAddr, StackSizeC, "new_stack");
4364 // Write the new value of __stack for the next call to va_arg
4365 CGF.Builder.CreateStore(NewStack, stack_p);
4367 if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) &&
4368 Ctx.getTypeSize(Ty) < 64) {
4369 int Offset = 8 - Ctx.getTypeSize(Ty) / 8;
4370 OnStackAddr = CGF.Builder.CreatePtrToInt(OnStackAddr, CGF.Int64Ty);
4372 OnStackAddr = CGF.Builder.CreateAdd(
4373 OnStackAddr, llvm::ConstantInt::get(CGF.Int64Ty, Offset), "align_be");
4375 OnStackAddr = CGF.Builder.CreateIntToPtr(OnStackAddr, CGF.Int8PtrTy);
4378 OnStackAddr = CGF.Builder.CreateBitCast(OnStackAddr, MemTy);
4380 CGF.EmitBranch(ContBlock);
4382 //=======================================
4384 //=======================================
4385 CGF.EmitBlock(ContBlock);
4387 llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(MemTy, 2, "vaarg.addr");
4388 ResAddr->addIncoming(RegAddr, InRegBlock);
4389 ResAddr->addIncoming(OnStackAddr, OnStackBlock);
4392 return CGF.Builder.CreateLoad(ResAddr, "vaarg.addr");
4397 llvm::Value *AArch64ABIInfo::EmitDarwinVAArg(llvm::Value *VAListAddr,
4399 CodeGenFunction &CGF) const {
4400 // We do not support va_arg for aggregates or illegal vector types.
4401 // Lower VAArg here for these cases and use the LLVM va_arg instruction for
4403 if (!isAggregateTypeForABI(Ty) && !isIllegalVectorType(Ty))
4406 uint64_t Size = CGF.getContext().getTypeSize(Ty) / 8;
4407 uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
4409 const Type *Base = nullptr;
4410 uint64_t Members = 0;
4411 bool isHA = isHomogeneousAggregate(Ty, Base, Members);
4413 bool isIndirect = false;
4414 // Arguments bigger than 16 bytes which aren't homogeneous aggregates should
4415 // be passed indirectly.
4416 if (Size > 16 && !isHA) {
4422 llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
4423 llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
4425 CGBuilderTy &Builder = CGF.Builder;
4426 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
4427 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
4429 if (isEmptyRecord(getContext(), Ty, true)) {
4430 // These are ignored for parameter passing purposes.
4431 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
4432 return Builder.CreateBitCast(Addr, PTy);
4435 const uint64_t MinABIAlign = 8;
4436 if (Align > MinABIAlign) {
4437 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, Align - 1);
4438 Addr = Builder.CreateGEP(Addr, Offset);
4439 llvm::Value *AsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty);
4440 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, ~(Align - 1));
4441 llvm::Value *Aligned = Builder.CreateAnd(AsInt, Mask);
4442 Addr = Builder.CreateIntToPtr(Aligned, BP, "ap.align");
4445 uint64_t Offset = llvm::RoundUpToAlignment(Size, MinABIAlign);
4446 llvm::Value *NextAddr = Builder.CreateGEP(
4447 Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), "ap.next");
4448 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
4451 Addr = Builder.CreateLoad(Builder.CreateBitCast(Addr, BPP));
4452 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
4453 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
4458 //===----------------------------------------------------------------------===//
4459 // ARM ABI Implementation
4460 //===----------------------------------------------------------------------===//
4464 class ARMABIInfo : public ABIInfo {
4476 ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) : ABIInfo(CGT), Kind(_Kind) {
4480 bool isEABI() const {
4481 switch (getTarget().getTriple().getEnvironment()) {
4482 case llvm::Triple::Android:
4483 case llvm::Triple::EABI:
4484 case llvm::Triple::EABIHF:
4485 case llvm::Triple::GNUEABI:
4486 case llvm::Triple::GNUEABIHF:
4493 bool isEABIHF() const {
4494 switch (getTarget().getTriple().getEnvironment()) {
4495 case llvm::Triple::EABIHF:
4496 case llvm::Triple::GNUEABIHF:
4503 ABIKind getABIKind() const { return Kind; }
4506 ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic) const;
4507 ABIArgInfo classifyArgumentType(QualType RetTy, bool isVariadic) const;
4508 bool isIllegalVectorType(QualType Ty) const;
4510 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
4511 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
4512 uint64_t Members) const override;
4514 void computeInfo(CGFunctionInfo &FI) const override;
4516 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4517 CodeGenFunction &CGF) const override;
4519 llvm::CallingConv::ID getLLVMDefaultCC() const;
4520 llvm::CallingConv::ID getABIDefaultCC() const;
4524 class ARMTargetCodeGenInfo : public TargetCodeGenInfo {
4526 ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
4527 :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {}
4529 const ARMABIInfo &getABIInfo() const {
4530 return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo());
4533 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
4537 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
4538 return "mov\tr7, r7\t\t@ marker for objc_retainAutoreleaseReturnValue";
4541 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4542 llvm::Value *Address) const override {
4543 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
4545 // 0-15 are the 16 integer registers.
4546 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15);
4550 unsigned getSizeOfUnwindException() const override {
4551 if (getABIInfo().isEABI()) return 88;
4552 return TargetCodeGenInfo::getSizeOfUnwindException();
4555 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
4556 CodeGen::CodeGenModule &CGM) const override {
4557 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
4561 const ARMInterruptAttr *Attr = FD->getAttr<ARMInterruptAttr>();
4566 switch (Attr->getInterrupt()) {
4567 case ARMInterruptAttr::Generic: Kind = ""; break;
4568 case ARMInterruptAttr::IRQ: Kind = "IRQ"; break;
4569 case ARMInterruptAttr::FIQ: Kind = "FIQ"; break;
4570 case ARMInterruptAttr::SWI: Kind = "SWI"; break;
4571 case ARMInterruptAttr::ABORT: Kind = "ABORT"; break;
4572 case ARMInterruptAttr::UNDEF: Kind = "UNDEF"; break;
4575 llvm::Function *Fn = cast<llvm::Function>(GV);
4577 Fn->addFnAttr("interrupt", Kind);
4579 if (cast<ARMABIInfo>(getABIInfo()).getABIKind() == ARMABIInfo::APCS)
4582 // AAPCS guarantees that sp will be 8-byte aligned on any public interface,
4583 // however this is not necessarily true on taking any interrupt. Instruct
4584 // the backend to perform a realignment as part of the function prologue.
4585 llvm::AttrBuilder B;
4586 B.addStackAlignmentAttr(8);
4587 Fn->addAttributes(llvm::AttributeSet::FunctionIndex,
4588 llvm::AttributeSet::get(CGM.getLLVMContext(),
4589 llvm::AttributeSet::FunctionIndex,
4594 class WindowsARMTargetCodeGenInfo : public ARMTargetCodeGenInfo {
4595 void addStackProbeSizeTargetAttribute(const Decl *D, llvm::GlobalValue *GV,
4596 CodeGen::CodeGenModule &CGM) const;
4599 WindowsARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
4600 : ARMTargetCodeGenInfo(CGT, K) {}
4602 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
4603 CodeGen::CodeGenModule &CGM) const override;
4606 void WindowsARMTargetCodeGenInfo::addStackProbeSizeTargetAttribute(
4607 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
4608 if (!isa<FunctionDecl>(D))
4610 if (CGM.getCodeGenOpts().StackProbeSize == 4096)
4613 llvm::Function *F = cast<llvm::Function>(GV);
4614 F->addFnAttr("stack-probe-size",
4615 llvm::utostr(CGM.getCodeGenOpts().StackProbeSize));
4618 void WindowsARMTargetCodeGenInfo::setTargetAttributes(
4619 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
4620 ARMTargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
4621 addStackProbeSizeTargetAttribute(D, GV, CGM);
4625 void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
4626 if (!getCXXABI().classifyReturnType(FI))
4627 FI.getReturnInfo() =
4628 classifyReturnType(FI.getReturnType(), FI.isVariadic());
4630 for (auto &I : FI.arguments())
4631 I.info = classifyArgumentType(I.type, FI.isVariadic());
4633 // Always honor user-specified calling convention.
4634 if (FI.getCallingConvention() != llvm::CallingConv::C)
4637 llvm::CallingConv::ID cc = getRuntimeCC();
4638 if (cc != llvm::CallingConv::C)
4639 FI.setEffectiveCallingConvention(cc);
4642 /// Return the default calling convention that LLVM will use.
4643 llvm::CallingConv::ID ARMABIInfo::getLLVMDefaultCC() const {
4644 // The default calling convention that LLVM will infer.
4646 return llvm::CallingConv::ARM_AAPCS_VFP;
4648 return llvm::CallingConv::ARM_AAPCS;
4650 return llvm::CallingConv::ARM_APCS;
4653 /// Return the calling convention that our ABI would like us to use
4654 /// as the C calling convention.
4655 llvm::CallingConv::ID ARMABIInfo::getABIDefaultCC() const {
4656 switch (getABIKind()) {
4657 case APCS: return llvm::CallingConv::ARM_APCS;
4658 case AAPCS: return llvm::CallingConv::ARM_AAPCS;
4659 case AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
4661 llvm_unreachable("bad ABI kind");
4664 void ARMABIInfo::setCCs() {
4665 assert(getRuntimeCC() == llvm::CallingConv::C);
4667 // Don't muddy up the IR with a ton of explicit annotations if
4668 // they'd just match what LLVM will infer from the triple.
4669 llvm::CallingConv::ID abiCC = getABIDefaultCC();
4670 if (abiCC != getLLVMDefaultCC())
4673 BuiltinCC = (getABIKind() == APCS ?
4674 llvm::CallingConv::ARM_APCS : llvm::CallingConv::ARM_AAPCS);
4677 ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty,
4678 bool isVariadic) const {
4679 // 6.1.2.1 The following argument types are VFP CPRCs:
4680 // A single-precision floating-point type (including promoted
4681 // half-precision types); A double-precision floating-point type;
4682 // A 64-bit or 128-bit containerized vector type; Homogeneous Aggregate
4683 // with a Base Type of a single- or double-precision floating-point type,
4684 // 64-bit containerized vectors or 128-bit containerized vectors with one
4685 // to four Elements.
4686 bool IsEffectivelyAAPCS_VFP = getABIKind() == AAPCS_VFP && !isVariadic;
4688 Ty = useFirstFieldIfTransparentUnion(Ty);
4690 // Handle illegal vector types here.
4691 if (isIllegalVectorType(Ty)) {
4692 uint64_t Size = getContext().getTypeSize(Ty);
4694 llvm::Type *ResType =
4695 llvm::Type::getInt32Ty(getVMContext());
4696 return ABIArgInfo::getDirect(ResType);
4699 llvm::Type *ResType = llvm::VectorType::get(
4700 llvm::Type::getInt32Ty(getVMContext()), 2);
4701 return ABIArgInfo::getDirect(ResType);
4704 llvm::Type *ResType = llvm::VectorType::get(
4705 llvm::Type::getInt32Ty(getVMContext()), 4);
4706 return ABIArgInfo::getDirect(ResType);
4708 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
4711 if (!isAggregateTypeForABI(Ty)) {
4712 // Treat an enum type as its underlying type.
4713 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
4714 Ty = EnumTy->getDecl()->getIntegerType();
4717 return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend()
4718 : ABIArgInfo::getDirect());
4721 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
4722 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
4725 // Ignore empty records.
4726 if (isEmptyRecord(getContext(), Ty, true))
4727 return ABIArgInfo::getIgnore();
4729 if (IsEffectivelyAAPCS_VFP) {
4730 // Homogeneous Aggregates need to be expanded when we can fit the aggregate
4731 // into VFP registers.
4732 const Type *Base = nullptr;
4733 uint64_t Members = 0;
4734 if (isHomogeneousAggregate(Ty, Base, Members)) {
4735 assert(Base && "Base class should be set for homogeneous aggregate");
4736 // Base can be a floating-point or a vector.
4737 return ABIArgInfo::getDirect(nullptr, 0, nullptr, false);
4741 // Support byval for ARM.
4742 // The ABI alignment for APCS is 4-byte and for AAPCS at least 4-byte and at
4743 // most 8-byte. We realign the indirect argument if type alignment is bigger
4744 // than ABI alignment.
4745 uint64_t ABIAlign = 4;
4746 uint64_t TyAlign = getContext().getTypeAlign(Ty) / 8;
4747 if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
4748 getABIKind() == ARMABIInfo::AAPCS)
4749 ABIAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8);
4751 if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) {
4752 return ABIArgInfo::getIndirect(ABIAlign, /*ByVal=*/true,
4753 /*Realign=*/TyAlign > ABIAlign);
4756 // Otherwise, pass by coercing to a structure of the appropriate size.
4759 // FIXME: Try to match the types of the arguments more accurately where
4761 if (getContext().getTypeAlign(Ty) <= 32) {
4762 ElemTy = llvm::Type::getInt32Ty(getVMContext());
4763 SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32;
4765 ElemTy = llvm::Type::getInt64Ty(getVMContext());
4766 SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64;
4769 return ABIArgInfo::getDirect(llvm::ArrayType::get(ElemTy, SizeRegs));
4772 static bool isIntegerLikeType(QualType Ty, ASTContext &Context,
4773 llvm::LLVMContext &VMContext) {
4774 // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure
4775 // is called integer-like if its size is less than or equal to one word, and
4776 // the offset of each of its addressable sub-fields is zero.
4778 uint64_t Size = Context.getTypeSize(Ty);
4780 // Check that the type fits in a word.
4784 // FIXME: Handle vector types!
4785 if (Ty->isVectorType())
4788 // Float types are never treated as "integer like".
4789 if (Ty->isRealFloatingType())
4792 // If this is a builtin or pointer type then it is ok.
4793 if (Ty->getAs<BuiltinType>() || Ty->isPointerType())
4796 // Small complex integer types are "integer like".
4797 if (const ComplexType *CT = Ty->getAs<ComplexType>())
4798 return isIntegerLikeType(CT->getElementType(), Context, VMContext);
4800 // Single element and zero sized arrays should be allowed, by the definition
4801 // above, but they are not.
4803 // Otherwise, it must be a record type.
4804 const RecordType *RT = Ty->getAs<RecordType>();
4805 if (!RT) return false;
4807 // Ignore records with flexible arrays.
4808 const RecordDecl *RD = RT->getDecl();
4809 if (RD->hasFlexibleArrayMember())
4812 // Check that all sub-fields are at offset 0, and are themselves "integer
4814 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
4816 bool HadField = false;
4818 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
4819 i != e; ++i, ++idx) {
4820 const FieldDecl *FD = *i;
4822 // Bit-fields are not addressable, we only need to verify they are "integer
4823 // like". We still have to disallow a subsequent non-bitfield, for example:
4824 // struct { int : 0; int x }
4825 // is non-integer like according to gcc.
4826 if (FD->isBitField()) {
4830 if (!isIntegerLikeType(FD->getType(), Context, VMContext))
4836 // Check if this field is at offset 0.
4837 if (Layout.getFieldOffset(idx) != 0)
4840 if (!isIntegerLikeType(FD->getType(), Context, VMContext))
4843 // Only allow at most one field in a structure. This doesn't match the
4844 // wording above, but follows gcc in situations with a field following an
4846 if (!RD->isUnion()) {
4857 ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
4858 bool isVariadic) const {
4859 bool IsEffectivelyAAPCS_VFP = getABIKind() == AAPCS_VFP && !isVariadic;
4861 if (RetTy->isVoidType())
4862 return ABIArgInfo::getIgnore();
4864 // Large vector types should be returned via memory.
4865 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) {
4866 return ABIArgInfo::getIndirect(0);
4869 if (!isAggregateTypeForABI(RetTy)) {
4870 // Treat an enum type as its underlying type.
4871 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
4872 RetTy = EnumTy->getDecl()->getIntegerType();
4874 return RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend()
4875 : ABIArgInfo::getDirect();
4878 // Are we following APCS?
4879 if (getABIKind() == APCS) {
4880 if (isEmptyRecord(getContext(), RetTy, false))
4881 return ABIArgInfo::getIgnore();
4883 // Complex types are all returned as packed integers.
4885 // FIXME: Consider using 2 x vector types if the back end handles them
4887 if (RetTy->isAnyComplexType())
4888 return ABIArgInfo::getDirect(llvm::IntegerType::get(
4889 getVMContext(), getContext().getTypeSize(RetTy)));
4891 // Integer like structures are returned in r0.
4892 if (isIntegerLikeType(RetTy, getContext(), getVMContext())) {
4893 // Return in the smallest viable integer type.
4894 uint64_t Size = getContext().getTypeSize(RetTy);
4896 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
4898 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
4899 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
4902 // Otherwise return in memory.
4903 return ABIArgInfo::getIndirect(0);
4906 // Otherwise this is an AAPCS variant.
4908 if (isEmptyRecord(getContext(), RetTy, true))
4909 return ABIArgInfo::getIgnore();
4911 // Check for homogeneous aggregates with AAPCS-VFP.
4912 if (IsEffectivelyAAPCS_VFP) {
4913 const Type *Base = nullptr;
4915 if (isHomogeneousAggregate(RetTy, Base, Members)) {
4916 assert(Base && "Base class should be set for homogeneous aggregate");
4917 // Homogeneous Aggregates are returned directly.
4918 return ABIArgInfo::getDirect(nullptr, 0, nullptr, false);
4922 // Aggregates <= 4 bytes are returned in r0; other aggregates
4923 // are returned indirectly.
4924 uint64_t Size = getContext().getTypeSize(RetTy);
4926 if (getDataLayout().isBigEndian())
4927 // Return in 32 bit integer integer type (as if loaded by LDR, AAPCS 5.4)
4928 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
4930 // Return in the smallest viable integer type.
4932 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
4934 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
4935 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
4938 return ABIArgInfo::getIndirect(0);
4941 /// isIllegalVector - check whether Ty is an illegal vector type.
4942 bool ARMABIInfo::isIllegalVectorType(QualType Ty) const {
4943 if (const VectorType *VT = Ty->getAs<VectorType>()) {
4944 // Check whether VT is legal.
4945 unsigned NumElements = VT->getNumElements();
4946 uint64_t Size = getContext().getTypeSize(VT);
4947 // NumElements should be power of 2.
4948 if ((NumElements & (NumElements - 1)) != 0)
4950 // Size should be greater than 32 bits.
4956 bool ARMABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
4957 // Homogeneous aggregates for AAPCS-VFP must have base types of float,
4958 // double, or 64-bit or 128-bit vectors.
4959 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
4960 if (BT->getKind() == BuiltinType::Float ||
4961 BT->getKind() == BuiltinType::Double ||
4962 BT->getKind() == BuiltinType::LongDouble)
4964 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
4965 unsigned VecSize = getContext().getTypeSize(VT);
4966 if (VecSize == 64 || VecSize == 128)
4972 bool ARMABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
4973 uint64_t Members) const {
4974 return Members <= 4;
4977 llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4978 CodeGenFunction &CGF) const {
4979 llvm::Type *BP = CGF.Int8PtrTy;
4980 llvm::Type *BPP = CGF.Int8PtrPtrTy;
4982 CGBuilderTy &Builder = CGF.Builder;
4983 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
4984 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
4986 if (isEmptyRecord(getContext(), Ty, true)) {
4987 // These are ignored for parameter passing purposes.
4988 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
4989 return Builder.CreateBitCast(Addr, PTy);
4992 uint64_t Size = CGF.getContext().getTypeSize(Ty) / 8;
4993 uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8;
4994 bool IsIndirect = false;
4996 // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for
4997 // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte.
4998 if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
4999 getABIKind() == ARMABIInfo::AAPCS)
5000 TyAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8);
5003 // Use indirect if size of the illegal vector is bigger than 16 bytes.
5004 if (isIllegalVectorType(Ty) && Size > 16) {
5010 // Handle address alignment for ABI alignment > 4 bytes.
5012 assert((TyAlign & (TyAlign - 1)) == 0 &&
5013 "Alignment is not power of 2!");
5014 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int32Ty);
5015 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt32(TyAlign - 1));
5016 AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1)));
5017 Addr = Builder.CreateIntToPtr(AddrAsInt, BP, "ap.align");
5021 llvm::RoundUpToAlignment(Size, 4);
5022 llvm::Value *NextAddr =
5023 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
5025 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
5028 Addr = Builder.CreateLoad(Builder.CreateBitCast(Addr, BPP));
5029 else if (TyAlign < CGF.getContext().getTypeAlign(Ty) / 8) {
5030 // We can't directly cast ap.cur to pointer to a vector type, since ap.cur
5031 // may not be correctly aligned for the vector type. We create an aligned
5032 // temporary space and copy the content over from ap.cur to the temporary
5033 // space. This is necessary if the natural alignment of the type is greater
5034 // than the ABI alignment.
5035 llvm::Type *I8PtrTy = Builder.getInt8PtrTy();
5036 CharUnits CharSize = getContext().getTypeSizeInChars(Ty);
5037 llvm::Value *AlignedTemp = CGF.CreateTempAlloca(CGF.ConvertType(Ty),
5039 llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy);
5040 llvm::Value *Src = Builder.CreateBitCast(Addr, I8PtrTy);
5041 Builder.CreateMemCpy(Dst, Src,
5042 llvm::ConstantInt::get(CGF.IntPtrTy, CharSize.getQuantity()),
5044 Addr = AlignedTemp; //The content is in aligned location.
5047 llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
5048 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
5053 //===----------------------------------------------------------------------===//
5054 // NVPTX ABI Implementation
5055 //===----------------------------------------------------------------------===//
5059 class NVPTXABIInfo : public ABIInfo {
5061 NVPTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
5063 ABIArgInfo classifyReturnType(QualType RetTy) const;
5064 ABIArgInfo classifyArgumentType(QualType Ty) const;
5066 void computeInfo(CGFunctionInfo &FI) const override;
5067 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5068 CodeGenFunction &CFG) const override;
5071 class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo {
5073 NVPTXTargetCodeGenInfo(CodeGenTypes &CGT)
5074 : TargetCodeGenInfo(new NVPTXABIInfo(CGT)) {}
5076 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5077 CodeGen::CodeGenModule &M) const override;
5079 // Adds a NamedMDNode with F, Name, and Operand as operands, and adds the
5080 // resulting MDNode to the nvvm.annotations MDNode.
5081 static void addNVVMMetadata(llvm::Function *F, StringRef Name, int Operand);
5084 ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const {
5085 if (RetTy->isVoidType())
5086 return ABIArgInfo::getIgnore();
5088 // note: this is different from default ABI
5089 if (!RetTy->isScalarType())
5090 return ABIArgInfo::getDirect();
5092 // Treat an enum type as its underlying type.
5093 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
5094 RetTy = EnumTy->getDecl()->getIntegerType();
5096 return (RetTy->isPromotableIntegerType() ?
5097 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
5100 ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const {
5101 // Treat an enum type as its underlying type.
5102 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
5103 Ty = EnumTy->getDecl()->getIntegerType();
5105 // Return aggregates type as indirect by value
5106 if (isAggregateTypeForABI(Ty))
5107 return ABIArgInfo::getIndirect(0, /* byval */ true);
5109 return (Ty->isPromotableIntegerType() ?
5110 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
5113 void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
5114 if (!getCXXABI().classifyReturnType(FI))
5115 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
5116 for (auto &I : FI.arguments())
5117 I.info = classifyArgumentType(I.type);
5119 // Always honor user-specified calling convention.
5120 if (FI.getCallingConvention() != llvm::CallingConv::C)
5123 FI.setEffectiveCallingConvention(getRuntimeCC());
5126 llvm::Value *NVPTXABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5127 CodeGenFunction &CFG) const {
5128 llvm_unreachable("NVPTX does not support varargs");
5131 void NVPTXTargetCodeGenInfo::
5132 setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5133 CodeGen::CodeGenModule &M) const{
5134 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
5137 llvm::Function *F = cast<llvm::Function>(GV);
5139 // Perform special handling in OpenCL mode
5140 if (M.getLangOpts().OpenCL) {
5141 // Use OpenCL function attributes to check for kernel functions
5142 // By default, all functions are device functions
5143 if (FD->hasAttr<OpenCLKernelAttr>()) {
5144 // OpenCL __kernel functions get kernel metadata
5145 // Create !{<func-ref>, metadata !"kernel", i32 1} node
5146 addNVVMMetadata(F, "kernel", 1);
5147 // And kernel functions are not subject to inlining
5148 F->addFnAttr(llvm::Attribute::NoInline);
5152 // Perform special handling in CUDA mode.
5153 if (M.getLangOpts().CUDA) {
5154 // CUDA __global__ functions get a kernel metadata entry. Since
5155 // __global__ functions cannot be called from the device, we do not
5156 // need to set the noinline attribute.
5157 if (FD->hasAttr<CUDAGlobalAttr>()) {
5158 // Create !{<func-ref>, metadata !"kernel", i32 1} node
5159 addNVVMMetadata(F, "kernel", 1);
5161 if (CUDALaunchBoundsAttr *Attr = FD->getAttr<CUDALaunchBoundsAttr>()) {
5162 // Create !{<func-ref>, metadata !"maxntidx", i32 <val>} node
5163 llvm::APSInt MaxThreads(32);
5164 MaxThreads = Attr->getMaxThreads()->EvaluateKnownConstInt(M.getContext());
5166 addNVVMMetadata(F, "maxntidx", MaxThreads.getExtValue());
5168 // min blocks is an optional argument for CUDALaunchBoundsAttr. If it was
5169 // not specified in __launch_bounds__ or if the user specified a 0 value,
5170 // we don't have to add a PTX directive.
5171 if (Attr->getMinBlocks()) {
5172 llvm::APSInt MinBlocks(32);
5173 MinBlocks = Attr->getMinBlocks()->EvaluateKnownConstInt(M.getContext());
5175 // Create !{<func-ref>, metadata !"minctasm", i32 <val>} node
5176 addNVVMMetadata(F, "minctasm", MinBlocks.getExtValue());
5182 void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::Function *F, StringRef Name,
5184 llvm::Module *M = F->getParent();
5185 llvm::LLVMContext &Ctx = M->getContext();
5187 // Get "nvvm.annotations" metadata node
5188 llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations");
5190 llvm::Metadata *MDVals[] = {
5191 llvm::ConstantAsMetadata::get(F), llvm::MDString::get(Ctx, Name),
5192 llvm::ConstantAsMetadata::get(
5193 llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), Operand))};
5194 // Append metadata to nvvm.annotations
5195 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
5199 //===----------------------------------------------------------------------===//
5200 // SystemZ ABI Implementation
5201 //===----------------------------------------------------------------------===//
5205 class SystemZABIInfo : public ABIInfo {
5209 SystemZABIInfo(CodeGenTypes &CGT, bool HV)
5210 : ABIInfo(CGT), HasVector(HV) {}
5212 bool isPromotableIntegerType(QualType Ty) const;
5213 bool isCompoundType(QualType Ty) const;
5214 bool isVectorArgumentType(QualType Ty) const;
5215 bool isFPArgumentType(QualType Ty) const;
5216 QualType GetSingleElementType(QualType Ty) const;
5218 ABIArgInfo classifyReturnType(QualType RetTy) const;
5219 ABIArgInfo classifyArgumentType(QualType ArgTy) const;
5221 void computeInfo(CGFunctionInfo &FI) const override {
5222 if (!getCXXABI().classifyReturnType(FI))
5223 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
5224 for (auto &I : FI.arguments())
5225 I.info = classifyArgumentType(I.type);
5228 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5229 CodeGenFunction &CGF) const override;
5232 class SystemZTargetCodeGenInfo : public TargetCodeGenInfo {
5234 SystemZTargetCodeGenInfo(CodeGenTypes &CGT, bool HasVector)
5235 : TargetCodeGenInfo(new SystemZABIInfo(CGT, HasVector)) {}
5240 bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const {
5241 // Treat an enum type as its underlying type.
5242 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
5243 Ty = EnumTy->getDecl()->getIntegerType();
5245 // Promotable integer types are required to be promoted by the ABI.
5246 if (Ty->isPromotableIntegerType())
5249 // 32-bit values must also be promoted.
5250 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
5251 switch (BT->getKind()) {
5252 case BuiltinType::Int:
5253 case BuiltinType::UInt:
5261 bool SystemZABIInfo::isCompoundType(QualType Ty) const {
5262 return (Ty->isAnyComplexType() ||
5263 Ty->isVectorType() ||
5264 isAggregateTypeForABI(Ty));
5267 bool SystemZABIInfo::isVectorArgumentType(QualType Ty) const {
5268 return (HasVector &&
5269 Ty->isVectorType() &&
5270 getContext().getTypeSize(Ty) <= 128);
5273 bool SystemZABIInfo::isFPArgumentType(QualType Ty) const {
5274 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
5275 switch (BT->getKind()) {
5276 case BuiltinType::Float:
5277 case BuiltinType::Double:
5286 QualType SystemZABIInfo::GetSingleElementType(QualType Ty) const {
5287 if (const RecordType *RT = Ty->getAsStructureType()) {
5288 const RecordDecl *RD = RT->getDecl();
5291 // If this is a C++ record, check the bases first.
5292 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
5293 for (const auto &I : CXXRD->bases()) {
5294 QualType Base = I.getType();
5296 // Empty bases don't affect things either way.
5297 if (isEmptyRecord(getContext(), Base, true))
5300 if (!Found.isNull())
5302 Found = GetSingleElementType(Base);
5305 // Check the fields.
5306 for (const auto *FD : RD->fields()) {
5307 // For compatibility with GCC, ignore empty bitfields in C++ mode.
5308 // Unlike isSingleElementStruct(), empty structure and array fields
5309 // do count. So do anonymous bitfields that aren't zero-sized.
5310 if (getContext().getLangOpts().CPlusPlus &&
5311 FD->isBitField() && FD->getBitWidthValue(getContext()) == 0)
5314 // Unlike isSingleElementStruct(), arrays do not count.
5315 // Nested structures still do though.
5316 if (!Found.isNull())
5318 Found = GetSingleElementType(FD->getType());
5321 // Unlike isSingleElementStruct(), trailing padding is allowed.
5322 // An 8-byte aligned struct s { float f; } is passed as a double.
5323 if (!Found.isNull())
5330 llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5331 CodeGenFunction &CGF) const {
5332 // Assume that va_list type is correct; should be pointer to LLVM type:
5336 // i8 *__overflow_arg_area;
5337 // i8 *__reg_save_area;
5340 // Every non-vector argument occupies 8 bytes and is passed by preference
5341 // in either GPRs or FPRs. Vector arguments occupy 8 or 16 bytes and are
5342 // always passed on the stack.
5343 Ty = CGF.getContext().getCanonicalType(Ty);
5344 llvm::Type *ArgTy = CGF.ConvertTypeForMem(Ty);
5345 llvm::Type *APTy = llvm::PointerType::getUnqual(ArgTy);
5346 ABIArgInfo AI = classifyArgumentType(Ty);
5347 bool IsIndirect = AI.isIndirect();
5348 bool InFPRs = false;
5349 bool IsVector = false;
5350 unsigned UnpaddedBitSize;
5352 APTy = llvm::PointerType::getUnqual(APTy);
5353 UnpaddedBitSize = 64;
5355 if (AI.getCoerceToType())
5356 ArgTy = AI.getCoerceToType();
5357 InFPRs = ArgTy->isFloatTy() || ArgTy->isDoubleTy();
5358 IsVector = ArgTy->isVectorTy();
5359 UnpaddedBitSize = getContext().getTypeSize(Ty);
5361 unsigned PaddedBitSize = (IsVector && UnpaddedBitSize > 64) ? 128 : 64;
5362 assert((UnpaddedBitSize <= PaddedBitSize) && "Invalid argument size.");
5364 unsigned PaddedSize = PaddedBitSize / 8;
5365 unsigned Padding = (PaddedBitSize - UnpaddedBitSize) / 8;
5367 llvm::Type *IndexTy = CGF.Int64Ty;
5368 llvm::Value *PaddedSizeV = llvm::ConstantInt::get(IndexTy, PaddedSize);
5371 // Work out the address of a vector argument on the stack.
5372 // Vector arguments are always passed in the high bits of a
5373 // single (8 byte) or double (16 byte) stack slot.
5374 llvm::Value *OverflowArgAreaPtr =
5375 CGF.Builder.CreateStructGEP(nullptr, VAListAddr, 2,
5376 "overflow_arg_area_ptr");
5377 llvm::Value *OverflowArgArea =
5378 CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area");
5379 llvm::Value *MemAddr =
5380 CGF.Builder.CreateBitCast(OverflowArgArea, APTy, "mem_addr");
5382 // Update overflow_arg_area_ptr pointer
5383 llvm::Value *NewOverflowArgArea =
5384 CGF.Builder.CreateGEP(OverflowArgArea, PaddedSizeV, "overflow_arg_area");
5385 CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
5390 unsigned MaxRegs, RegCountField, RegSaveIndex, RegPadding;
5392 MaxRegs = 4; // Maximum of 4 FPR arguments
5393 RegCountField = 1; // __fpr
5394 RegSaveIndex = 16; // save offset for f0
5395 RegPadding = 0; // floats are passed in the high bits of an FPR
5397 MaxRegs = 5; // Maximum of 5 GPR arguments
5398 RegCountField = 0; // __gpr
5399 RegSaveIndex = 2; // save offset for r2
5400 RegPadding = Padding; // values are passed in the low bits of a GPR
5403 llvm::Value *RegCountPtr = CGF.Builder.CreateStructGEP(
5404 nullptr, VAListAddr, RegCountField, "reg_count_ptr");
5405 llvm::Value *RegCount = CGF.Builder.CreateLoad(RegCountPtr, "reg_count");
5406 llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs);
5407 llvm::Value *InRegs = CGF.Builder.CreateICmpULT(RegCount, MaxRegsV,
5410 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
5411 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
5412 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
5413 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
5415 // Emit code to load the value if it was passed in registers.
5416 CGF.EmitBlock(InRegBlock);
5418 // Work out the address of an argument register.
5419 llvm::Value *ScaledRegCount =
5420 CGF.Builder.CreateMul(RegCount, PaddedSizeV, "scaled_reg_count");
5421 llvm::Value *RegBase =
5422 llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize + RegPadding);
5423 llvm::Value *RegOffset =
5424 CGF.Builder.CreateAdd(ScaledRegCount, RegBase, "reg_offset");
5425 llvm::Value *RegSaveAreaPtr =
5426 CGF.Builder.CreateStructGEP(nullptr, VAListAddr, 3, "reg_save_area_ptr");
5427 llvm::Value *RegSaveArea =
5428 CGF.Builder.CreateLoad(RegSaveAreaPtr, "reg_save_area");
5429 llvm::Value *RawRegAddr =
5430 CGF.Builder.CreateGEP(RegSaveArea, RegOffset, "raw_reg_addr");
5431 llvm::Value *RegAddr =
5432 CGF.Builder.CreateBitCast(RawRegAddr, APTy, "reg_addr");
5434 // Update the register count
5435 llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1);
5436 llvm::Value *NewRegCount =
5437 CGF.Builder.CreateAdd(RegCount, One, "reg_count");
5438 CGF.Builder.CreateStore(NewRegCount, RegCountPtr);
5439 CGF.EmitBranch(ContBlock);
5441 // Emit code to load the value if it was passed in memory.
5442 CGF.EmitBlock(InMemBlock);
5444 // Work out the address of a stack argument.
5445 llvm::Value *OverflowArgAreaPtr = CGF.Builder.CreateStructGEP(
5446 nullptr, VAListAddr, 2, "overflow_arg_area_ptr");
5447 llvm::Value *OverflowArgArea =
5448 CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area");
5449 llvm::Value *PaddingV = llvm::ConstantInt::get(IndexTy, Padding);
5450 llvm::Value *RawMemAddr =
5451 CGF.Builder.CreateGEP(OverflowArgArea, PaddingV, "raw_mem_addr");
5452 llvm::Value *MemAddr =
5453 CGF.Builder.CreateBitCast(RawMemAddr, APTy, "mem_addr");
5455 // Update overflow_arg_area_ptr pointer
5456 llvm::Value *NewOverflowArgArea =
5457 CGF.Builder.CreateGEP(OverflowArgArea, PaddedSizeV, "overflow_arg_area");
5458 CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
5459 CGF.EmitBranch(ContBlock);
5461 // Return the appropriate result.
5462 CGF.EmitBlock(ContBlock);
5463 llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(APTy, 2, "va_arg.addr");
5464 ResAddr->addIncoming(RegAddr, InRegBlock);
5465 ResAddr->addIncoming(MemAddr, InMemBlock);
5468 return CGF.Builder.CreateLoad(ResAddr, "indirect_arg");
5473 ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const {
5474 if (RetTy->isVoidType())
5475 return ABIArgInfo::getIgnore();
5476 if (isVectorArgumentType(RetTy))
5477 return ABIArgInfo::getDirect();
5478 if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64)
5479 return ABIArgInfo::getIndirect(0);
5480 return (isPromotableIntegerType(RetTy) ?
5481 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
5484 ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
5485 // Handle the generic C++ ABI.
5486 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
5487 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
5489 // Integers and enums are extended to full register width.
5490 if (isPromotableIntegerType(Ty))
5491 return ABIArgInfo::getExtend();
5493 // Handle vector types and vector-like structure types. Note that
5494 // as opposed to float-like structure types, we do not allow any
5495 // padding for vector-like structures, so verify the sizes match.
5496 uint64_t Size = getContext().getTypeSize(Ty);
5497 QualType SingleElementTy = GetSingleElementType(Ty);
5498 if (isVectorArgumentType(SingleElementTy) &&
5499 getContext().getTypeSize(SingleElementTy) == Size)
5500 return ABIArgInfo::getDirect(CGT.ConvertType(SingleElementTy));
5502 // Values that are not 1, 2, 4 or 8 bytes in size are passed indirectly.
5503 if (Size != 8 && Size != 16 && Size != 32 && Size != 64)
5504 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
5506 // Handle small structures.
5507 if (const RecordType *RT = Ty->getAs<RecordType>()) {
5508 // Structures with flexible arrays have variable length, so really
5509 // fail the size test above.
5510 const RecordDecl *RD = RT->getDecl();
5511 if (RD->hasFlexibleArrayMember())
5512 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
5514 // The structure is passed as an unextended integer, a float, or a double.
5516 if (isFPArgumentType(SingleElementTy)) {
5517 assert(Size == 32 || Size == 64);
5519 PassTy = llvm::Type::getFloatTy(getVMContext());
5521 PassTy = llvm::Type::getDoubleTy(getVMContext());
5523 PassTy = llvm::IntegerType::get(getVMContext(), Size);
5524 return ABIArgInfo::getDirect(PassTy);
5527 // Non-structure compounds are passed indirectly.
5528 if (isCompoundType(Ty))
5529 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
5531 return ABIArgInfo::getDirect(nullptr);
5534 //===----------------------------------------------------------------------===//
5535 // MSP430 ABI Implementation
5536 //===----------------------------------------------------------------------===//
5540 class MSP430TargetCodeGenInfo : public TargetCodeGenInfo {
5542 MSP430TargetCodeGenInfo(CodeGenTypes &CGT)
5543 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
5544 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5545 CodeGen::CodeGenModule &M) const override;
5550 void MSP430TargetCodeGenInfo::setTargetAttributes(const Decl *D,
5551 llvm::GlobalValue *GV,
5552 CodeGen::CodeGenModule &M) const {
5553 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
5554 if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) {
5555 // Handle 'interrupt' attribute:
5556 llvm::Function *F = cast<llvm::Function>(GV);
5558 // Step 1: Set ISR calling convention.
5559 F->setCallingConv(llvm::CallingConv::MSP430_INTR);
5561 // Step 2: Add attributes goodness.
5562 F->addFnAttr(llvm::Attribute::NoInline);
5564 // Step 3: Emit ISR vector alias.
5565 unsigned Num = attr->getNumber() / 2;
5566 llvm::GlobalAlias::create(llvm::Function::ExternalLinkage,
5567 "__isr_" + Twine(Num), F);
5572 //===----------------------------------------------------------------------===//
5573 // MIPS ABI Implementation. This works for both little-endian and
5574 // big-endian variants.
5575 //===----------------------------------------------------------------------===//
5578 class MipsABIInfo : public ABIInfo {
5580 unsigned MinABIStackAlignInBytes, StackAlignInBytes;
5581 void CoerceToIntArgs(uint64_t TySize,
5582 SmallVectorImpl<llvm::Type *> &ArgList) const;
5583 llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const;
5584 llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const;
5585 llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const;
5587 MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) :
5588 ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8),
5589 StackAlignInBytes(IsO32 ? 8 : 16) {}
5591 ABIArgInfo classifyReturnType(QualType RetTy) const;
5592 ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const;
5593 void computeInfo(CGFunctionInfo &FI) const override;
5594 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5595 CodeGenFunction &CGF) const override;
5596 bool shouldSignExtUnsignedType(QualType Ty) const override;
5599 class MIPSTargetCodeGenInfo : public TargetCodeGenInfo {
5600 unsigned SizeOfUnwindException;
5602 MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32)
5603 : TargetCodeGenInfo(new MipsABIInfo(CGT, IsO32)),
5604 SizeOfUnwindException(IsO32 ? 24 : 32) {}
5606 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
5610 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5611 CodeGen::CodeGenModule &CGM) const override {
5612 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
5614 llvm::Function *Fn = cast<llvm::Function>(GV);
5615 if (FD->hasAttr<Mips16Attr>()) {
5616 Fn->addFnAttr("mips16");
5618 else if (FD->hasAttr<NoMips16Attr>()) {
5619 Fn->addFnAttr("nomips16");
5623 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
5624 llvm::Value *Address) const override;
5626 unsigned getSizeOfUnwindException() const override {
5627 return SizeOfUnwindException;
5632 void MipsABIInfo::CoerceToIntArgs(
5633 uint64_t TySize, SmallVectorImpl<llvm::Type *> &ArgList) const {
5634 llvm::IntegerType *IntTy =
5635 llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8);
5637 // Add (TySize / MinABIStackAlignInBytes) args of IntTy.
5638 for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N)
5639 ArgList.push_back(IntTy);
5641 // If necessary, add one more integer type to ArgList.
5642 unsigned R = TySize % (MinABIStackAlignInBytes * 8);
5645 ArgList.push_back(llvm::IntegerType::get(getVMContext(), R));
5648 // In N32/64, an aligned double precision floating point field is passed in
5650 llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const {
5651 SmallVector<llvm::Type*, 8> ArgList, IntArgList;
5654 CoerceToIntArgs(TySize, ArgList);
5655 return llvm::StructType::get(getVMContext(), ArgList);
5658 if (Ty->isComplexType())
5659 return CGT.ConvertType(Ty);
5661 const RecordType *RT = Ty->getAs<RecordType>();
5663 // Unions/vectors are passed in integer registers.
5664 if (!RT || !RT->isStructureOrClassType()) {
5665 CoerceToIntArgs(TySize, ArgList);
5666 return llvm::StructType::get(getVMContext(), ArgList);
5669 const RecordDecl *RD = RT->getDecl();
5670 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
5671 assert(!(TySize % 8) && "Size of structure must be multiple of 8.");
5673 uint64_t LastOffset = 0;
5675 llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64);
5677 // Iterate over fields in the struct/class and check if there are any aligned
5679 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
5680 i != e; ++i, ++idx) {
5681 const QualType Ty = i->getType();
5682 const BuiltinType *BT = Ty->getAs<BuiltinType>();
5684 if (!BT || BT->getKind() != BuiltinType::Double)
5687 uint64_t Offset = Layout.getFieldOffset(idx);
5688 if (Offset % 64) // Ignore doubles that are not aligned.
5691 // Add ((Offset - LastOffset) / 64) args of type i64.
5692 for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j)
5693 ArgList.push_back(I64);
5696 ArgList.push_back(llvm::Type::getDoubleTy(getVMContext()));
5697 LastOffset = Offset + 64;
5700 CoerceToIntArgs(TySize - LastOffset, IntArgList);
5701 ArgList.append(IntArgList.begin(), IntArgList.end());
5703 return llvm::StructType::get(getVMContext(), ArgList);
5706 llvm::Type *MipsABIInfo::getPaddingType(uint64_t OrigOffset,
5707 uint64_t Offset) const {
5708 if (OrigOffset + MinABIStackAlignInBytes > Offset)
5711 return llvm::IntegerType::get(getVMContext(), (Offset - OrigOffset) * 8);
5715 MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const {
5716 Ty = useFirstFieldIfTransparentUnion(Ty);
5718 uint64_t OrigOffset = Offset;
5719 uint64_t TySize = getContext().getTypeSize(Ty);
5720 uint64_t Align = getContext().getTypeAlign(Ty) / 8;
5722 Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes),
5723 (uint64_t)StackAlignInBytes);
5724 unsigned CurrOffset = llvm::RoundUpToAlignment(Offset, Align);
5725 Offset = CurrOffset + llvm::RoundUpToAlignment(TySize, Align * 8) / 8;
5727 if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) {
5728 // Ignore empty aggregates.
5730 return ABIArgInfo::getIgnore();
5732 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
5733 Offset = OrigOffset + MinABIStackAlignInBytes;
5734 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
5737 // If we have reached here, aggregates are passed directly by coercing to
5738 // another structure type. Padding is inserted if the offset of the
5739 // aggregate is unaligned.
5740 ABIArgInfo ArgInfo =
5741 ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0,
5742 getPaddingType(OrigOffset, CurrOffset));
5743 ArgInfo.setInReg(true);
5747 // Treat an enum type as its underlying type.
5748 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
5749 Ty = EnumTy->getDecl()->getIntegerType();
5751 // All integral types are promoted to the GPR width.
5752 if (Ty->isIntegralOrEnumerationType())
5753 return ABIArgInfo::getExtend();
5755 return ABIArgInfo::getDirect(
5756 nullptr, 0, IsO32 ? nullptr : getPaddingType(OrigOffset, CurrOffset));
5760 MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const {
5761 const RecordType *RT = RetTy->getAs<RecordType>();
5762 SmallVector<llvm::Type*, 8> RTList;
5764 if (RT && RT->isStructureOrClassType()) {
5765 const RecordDecl *RD = RT->getDecl();
5766 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
5767 unsigned FieldCnt = Layout.getFieldCount();
5769 // N32/64 returns struct/classes in floating point registers if the
5770 // following conditions are met:
5771 // 1. The size of the struct/class is no larger than 128-bit.
5772 // 2. The struct/class has one or two fields all of which are floating
5774 // 3. The offset of the first field is zero (this follows what gcc does).
5776 // Any other composite results are returned in integer registers.
5778 if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) {
5779 RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end();
5780 for (; b != e; ++b) {
5781 const BuiltinType *BT = b->getType()->getAs<BuiltinType>();
5783 if (!BT || !BT->isFloatingPoint())
5786 RTList.push_back(CGT.ConvertType(b->getType()));
5790 return llvm::StructType::get(getVMContext(), RTList,
5791 RD->hasAttr<PackedAttr>());
5797 CoerceToIntArgs(Size, RTList);
5798 return llvm::StructType::get(getVMContext(), RTList);
5801 ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const {
5802 uint64_t Size = getContext().getTypeSize(RetTy);
5804 if (RetTy->isVoidType())
5805 return ABIArgInfo::getIgnore();
5807 // O32 doesn't treat zero-sized structs differently from other structs.
5808 // However, N32/N64 ignores zero sized return values.
5809 if (!IsO32 && Size == 0)
5810 return ABIArgInfo::getIgnore();
5812 if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) {
5814 if (RetTy->isAnyComplexType())
5815 return ABIArgInfo::getDirect();
5817 // O32 returns integer vectors in registers and N32/N64 returns all small
5818 // aggregates in registers.
5820 (RetTy->isVectorType() && !RetTy->hasFloatingRepresentation())) {
5821 ABIArgInfo ArgInfo =
5822 ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size));
5823 ArgInfo.setInReg(true);
5828 return ABIArgInfo::getIndirect(0);
5831 // Treat an enum type as its underlying type.
5832 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
5833 RetTy = EnumTy->getDecl()->getIntegerType();
5835 return (RetTy->isPromotableIntegerType() ?
5836 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
5839 void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const {
5840 ABIArgInfo &RetInfo = FI.getReturnInfo();
5841 if (!getCXXABI().classifyReturnType(FI))
5842 RetInfo = classifyReturnType(FI.getReturnType());
5844 // Check if a pointer to an aggregate is passed as a hidden argument.
5845 uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0;
5847 for (auto &I : FI.arguments())
5848 I.info = classifyArgumentType(I.type, Offset);
5851 llvm::Value* MipsABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5852 CodeGenFunction &CGF) const {
5853 llvm::Type *BP = CGF.Int8PtrTy;
5854 llvm::Type *BPP = CGF.Int8PtrPtrTy;
5856 // Integer arguments are promoted to 32-bit on O32 and 64-bit on N32/N64.
5857 // Pointers are also promoted in the same way but this only matters for N32.
5858 unsigned SlotSizeInBits = IsO32 ? 32 : 64;
5859 unsigned PtrWidth = getTarget().getPointerWidth(0);
5860 if ((Ty->isIntegerType() &&
5861 CGF.getContext().getIntWidth(Ty) < SlotSizeInBits) ||
5862 (Ty->isPointerType() && PtrWidth < SlotSizeInBits)) {
5863 Ty = CGF.getContext().getIntTypeForBitwidth(SlotSizeInBits,
5864 Ty->isSignedIntegerType());
5867 CGBuilderTy &Builder = CGF.Builder;
5868 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
5869 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
5871 std::min(getContext().getTypeAlign(Ty) / 8, StackAlignInBytes);
5872 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
5873 llvm::Value *AddrTyped;
5874 llvm::IntegerType *IntTy = (PtrWidth == 32) ? CGF.Int32Ty : CGF.Int64Ty;
5876 if (TypeAlign > MinABIStackAlignInBytes) {
5877 llvm::Value *AddrAsInt = CGF.Builder.CreatePtrToInt(Addr, IntTy);
5878 llvm::Value *Inc = llvm::ConstantInt::get(IntTy, TypeAlign - 1);
5879 llvm::Value *Mask = llvm::ConstantInt::get(IntTy, -TypeAlign);
5880 llvm::Value *Add = CGF.Builder.CreateAdd(AddrAsInt, Inc);
5881 llvm::Value *And = CGF.Builder.CreateAnd(Add, Mask);
5882 AddrTyped = CGF.Builder.CreateIntToPtr(And, PTy);
5885 AddrTyped = Builder.CreateBitCast(Addr, PTy);
5887 llvm::Value *AlignedAddr = Builder.CreateBitCast(AddrTyped, BP);
5888 TypeAlign = std::max((unsigned)TypeAlign, MinABIStackAlignInBytes);
5889 unsigned ArgSizeInBits = CGF.getContext().getTypeSize(Ty);
5890 uint64_t Offset = llvm::RoundUpToAlignment(ArgSizeInBits / 8, TypeAlign);
5891 llvm::Value *NextAddr =
5892 Builder.CreateGEP(AlignedAddr, llvm::ConstantInt::get(IntTy, Offset),
5894 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
5899 bool MipsABIInfo::shouldSignExtUnsignedType(QualType Ty) const {
5900 int TySize = getContext().getTypeSize(Ty);
5902 // MIPS64 ABI requires unsigned 32 bit integers to be sign extended.
5903 if (Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32)
5910 MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
5911 llvm::Value *Address) const {
5912 // This information comes from gcc's implementation, which seems to
5913 // as canonical as it gets.
5915 // Everything on MIPS is 4 bytes. Double-precision FP registers
5916 // are aliased to pairs of single-precision FP registers.
5917 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
5919 // 0-31 are the general purpose registers, $0 - $31.
5920 // 32-63 are the floating-point registers, $f0 - $f31.
5921 // 64 and 65 are the multiply/divide registers, $hi and $lo.
5922 // 66 is the (notional, I think) register for signal-handler return.
5923 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65);
5925 // 67-74 are the floating-point status registers, $fcc0 - $fcc7.
5926 // They are one bit wide and ignored here.
5928 // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31.
5929 // (coprocessor 1 is the FP unit)
5930 // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31.
5931 // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31.
5932 // 176-181 are the DSP accumulator registers.
5933 AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181);
5937 //===----------------------------------------------------------------------===//
5938 // TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults.
5939 // Currently subclassed only to implement custom OpenCL C function attribute
5941 //===----------------------------------------------------------------------===//
5945 class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo {
5947 TCETargetCodeGenInfo(CodeGenTypes &CGT)
5948 : DefaultTargetCodeGenInfo(CGT) {}
5950 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5951 CodeGen::CodeGenModule &M) const override;
5954 void TCETargetCodeGenInfo::setTargetAttributes(
5955 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
5956 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
5959 llvm::Function *F = cast<llvm::Function>(GV);
5961 if (M.getLangOpts().OpenCL) {
5962 if (FD->hasAttr<OpenCLKernelAttr>()) {
5963 // OpenCL C Kernel functions are not subject to inlining
5964 F->addFnAttr(llvm::Attribute::NoInline);
5965 const ReqdWorkGroupSizeAttr *Attr = FD->getAttr<ReqdWorkGroupSizeAttr>();
5967 // Convert the reqd_work_group_size() attributes to metadata.
5968 llvm::LLVMContext &Context = F->getContext();
5969 llvm::NamedMDNode *OpenCLMetadata =
5970 M.getModule().getOrInsertNamedMetadata(
5971 "opencl.kernel_wg_size_info");
5973 SmallVector<llvm::Metadata *, 5> Operands;
5974 Operands.push_back(llvm::ConstantAsMetadata::get(F));
5977 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
5978 M.Int32Ty, llvm::APInt(32, Attr->getXDim()))));
5980 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
5981 M.Int32Ty, llvm::APInt(32, Attr->getYDim()))));
5983 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
5984 M.Int32Ty, llvm::APInt(32, Attr->getZDim()))));
5986 // Add a boolean constant operand for "required" (true) or "hint"
5987 // (false) for implementing the work_group_size_hint attr later.
5988 // Currently always true as the hint is not yet implemented.
5990 llvm::ConstantAsMetadata::get(llvm::ConstantInt::getTrue(Context)));
5991 OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands));
5999 //===----------------------------------------------------------------------===//
6000 // Hexagon ABI Implementation
6001 //===----------------------------------------------------------------------===//
6005 class HexagonABIInfo : public ABIInfo {
6009 HexagonABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
6013 ABIArgInfo classifyReturnType(QualType RetTy) const;
6014 ABIArgInfo classifyArgumentType(QualType RetTy) const;
6016 void computeInfo(CGFunctionInfo &FI) const override;
6018 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
6019 CodeGenFunction &CGF) const override;
6022 class HexagonTargetCodeGenInfo : public TargetCodeGenInfo {
6024 HexagonTargetCodeGenInfo(CodeGenTypes &CGT)
6025 :TargetCodeGenInfo(new HexagonABIInfo(CGT)) {}
6027 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
6034 void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const {
6035 if (!getCXXABI().classifyReturnType(FI))
6036 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
6037 for (auto &I : FI.arguments())
6038 I.info = classifyArgumentType(I.type);
6041 ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const {
6042 if (!isAggregateTypeForABI(Ty)) {
6043 // Treat an enum type as its underlying type.
6044 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
6045 Ty = EnumTy->getDecl()->getIntegerType();
6047 return (Ty->isPromotableIntegerType() ?
6048 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
6051 // Ignore empty records.
6052 if (isEmptyRecord(getContext(), Ty, true))
6053 return ABIArgInfo::getIgnore();
6055 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
6056 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
6058 uint64_t Size = getContext().getTypeSize(Ty);
6060 return ABIArgInfo::getIndirect(0, /*ByVal=*/true);
6061 // Pass in the smallest viable integer type.
6063 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
6065 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
6067 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
6069 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
6072 ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const {
6073 if (RetTy->isVoidType())
6074 return ABIArgInfo::getIgnore();
6076 // Large vector types should be returned via memory.
6077 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 64)
6078 return ABIArgInfo::getIndirect(0);
6080 if (!isAggregateTypeForABI(RetTy)) {
6081 // Treat an enum type as its underlying type.
6082 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
6083 RetTy = EnumTy->getDecl()->getIntegerType();
6085 return (RetTy->isPromotableIntegerType() ?
6086 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
6089 if (isEmptyRecord(getContext(), RetTy, true))
6090 return ABIArgInfo::getIgnore();
6092 // Aggregates <= 8 bytes are returned in r0; other aggregates
6093 // are returned indirectly.
6094 uint64_t Size = getContext().getTypeSize(RetTy);
6096 // Return in the smallest viable integer type.
6098 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
6100 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
6102 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
6103 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
6106 return ABIArgInfo::getIndirect(0, /*ByVal=*/true);
6109 llvm::Value *HexagonABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
6110 CodeGenFunction &CGF) const {
6111 // FIXME: Need to handle alignment
6112 llvm::Type *BPP = CGF.Int8PtrPtrTy;
6114 CGBuilderTy &Builder = CGF.Builder;
6115 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
6117 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
6119 llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
6120 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
6123 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
6124 llvm::Value *NextAddr =
6125 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
6127 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
6132 //===----------------------------------------------------------------------===//
6133 // AMDGPU ABI Implementation
6134 //===----------------------------------------------------------------------===//
6138 class AMDGPUTargetCodeGenInfo : public TargetCodeGenInfo {
6140 AMDGPUTargetCodeGenInfo(CodeGenTypes &CGT)
6141 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
6142 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
6143 CodeGen::CodeGenModule &M) const override;
6148 void AMDGPUTargetCodeGenInfo::setTargetAttributes(
6150 llvm::GlobalValue *GV,
6151 CodeGen::CodeGenModule &M) const {
6152 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
6156 if (const auto Attr = FD->getAttr<AMDGPUNumVGPRAttr>()) {
6157 llvm::Function *F = cast<llvm::Function>(GV);
6158 uint32_t NumVGPR = Attr->getNumVGPR();
6160 F->addFnAttr("amdgpu_num_vgpr", llvm::utostr(NumVGPR));
6163 if (const auto Attr = FD->getAttr<AMDGPUNumSGPRAttr>()) {
6164 llvm::Function *F = cast<llvm::Function>(GV);
6165 unsigned NumSGPR = Attr->getNumSGPR();
6167 F->addFnAttr("amdgpu_num_sgpr", llvm::utostr(NumSGPR));
6172 //===----------------------------------------------------------------------===//
6173 // SPARC v9 ABI Implementation.
6174 // Based on the SPARC Compliance Definition version 2.4.1.
6176 // Function arguments a mapped to a nominal "parameter array" and promoted to
6177 // registers depending on their type. Each argument occupies 8 or 16 bytes in
6178 // the array, structs larger than 16 bytes are passed indirectly.
6180 // One case requires special care:
6187 // When a struct mixed is passed by value, it only occupies 8 bytes in the
6188 // parameter array, but the int is passed in an integer register, and the float
6189 // is passed in a floating point register. This is represented as two arguments
6190 // with the LLVM IR inreg attribute:
6192 // declare void f(i32 inreg %i, float inreg %f)
6194 // The code generator will only allocate 4 bytes from the parameter array for
6195 // the inreg arguments. All other arguments are allocated a multiple of 8
6199 class SparcV9ABIInfo : public ABIInfo {
6201 SparcV9ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
6204 ABIArgInfo classifyType(QualType RetTy, unsigned SizeLimit) const;
6205 void computeInfo(CGFunctionInfo &FI) const override;
6206 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
6207 CodeGenFunction &CGF) const override;
6209 // Coercion type builder for structs passed in registers. The coercion type
6210 // serves two purposes:
6212 // 1. Pad structs to a multiple of 64 bits, so they are passed 'left-aligned'
6214 // 2. Expose aligned floating point elements as first-level elements, so the
6215 // code generator knows to pass them in floating point registers.
6217 // We also compute the InReg flag which indicates that the struct contains
6218 // aligned 32-bit floats.
6220 struct CoerceBuilder {
6221 llvm::LLVMContext &Context;
6222 const llvm::DataLayout &DL;
6223 SmallVector<llvm::Type*, 8> Elems;
6227 CoerceBuilder(llvm::LLVMContext &c, const llvm::DataLayout &dl)
6228 : Context(c), DL(dl), Size(0), InReg(false) {}
6230 // Pad Elems with integers until Size is ToSize.
6231 void pad(uint64_t ToSize) {
6232 assert(ToSize >= Size && "Cannot remove elements");
6236 // Finish the current 64-bit word.
6237 uint64_t Aligned = llvm::RoundUpToAlignment(Size, 64);
6238 if (Aligned > Size && Aligned <= ToSize) {
6239 Elems.push_back(llvm::IntegerType::get(Context, Aligned - Size));
6243 // Add whole 64-bit words.
6244 while (Size + 64 <= ToSize) {
6245 Elems.push_back(llvm::Type::getInt64Ty(Context));
6249 // Final in-word padding.
6250 if (Size < ToSize) {
6251 Elems.push_back(llvm::IntegerType::get(Context, ToSize - Size));
6256 // Add a floating point element at Offset.
6257 void addFloat(uint64_t Offset, llvm::Type *Ty, unsigned Bits) {
6258 // Unaligned floats are treated as integers.
6261 // The InReg flag is only required if there are any floats < 64 bits.
6265 Elems.push_back(Ty);
6266 Size = Offset + Bits;
6269 // Add a struct type to the coercion type, starting at Offset (in bits).
6270 void addStruct(uint64_t Offset, llvm::StructType *StrTy) {
6271 const llvm::StructLayout *Layout = DL.getStructLayout(StrTy);
6272 for (unsigned i = 0, e = StrTy->getNumElements(); i != e; ++i) {
6273 llvm::Type *ElemTy = StrTy->getElementType(i);
6274 uint64_t ElemOffset = Offset + Layout->getElementOffsetInBits(i);
6275 switch (ElemTy->getTypeID()) {
6276 case llvm::Type::StructTyID:
6277 addStruct(ElemOffset, cast<llvm::StructType>(ElemTy));
6279 case llvm::Type::FloatTyID:
6280 addFloat(ElemOffset, ElemTy, 32);
6282 case llvm::Type::DoubleTyID:
6283 addFloat(ElemOffset, ElemTy, 64);
6285 case llvm::Type::FP128TyID:
6286 addFloat(ElemOffset, ElemTy, 128);
6288 case llvm::Type::PointerTyID:
6289 if (ElemOffset % 64 == 0) {
6291 Elems.push_back(ElemTy);
6301 // Check if Ty is a usable substitute for the coercion type.
6302 bool isUsableType(llvm::StructType *Ty) const {
6303 return llvm::makeArrayRef(Elems) == Ty->elements();
6306 // Get the coercion type as a literal struct type.
6307 llvm::Type *getType() const {
6308 if (Elems.size() == 1)
6309 return Elems.front();
6311 return llvm::StructType::get(Context, Elems);
6315 } // end anonymous namespace
6318 SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const {
6319 if (Ty->isVoidType())
6320 return ABIArgInfo::getIgnore();
6322 uint64_t Size = getContext().getTypeSize(Ty);
6324 // Anything too big to fit in registers is passed with an explicit indirect
6325 // pointer / sret pointer.
6326 if (Size > SizeLimit)
6327 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
6329 // Treat an enum type as its underlying type.
6330 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
6331 Ty = EnumTy->getDecl()->getIntegerType();
6333 // Integer types smaller than a register are extended.
6334 if (Size < 64 && Ty->isIntegerType())
6335 return ABIArgInfo::getExtend();
6337 // Other non-aggregates go in registers.
6338 if (!isAggregateTypeForABI(Ty))
6339 return ABIArgInfo::getDirect();
6341 // If a C++ object has either a non-trivial copy constructor or a non-trivial
6342 // destructor, it is passed with an explicit indirect pointer / sret pointer.
6343 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
6344 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
6346 // This is a small aggregate type that should be passed in registers.
6347 // Build a coercion type from the LLVM struct type.
6348 llvm::StructType *StrTy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty));
6350 return ABIArgInfo::getDirect();
6352 CoerceBuilder CB(getVMContext(), getDataLayout());
6353 CB.addStruct(0, StrTy);
6354 CB.pad(llvm::RoundUpToAlignment(CB.DL.getTypeSizeInBits(StrTy), 64));
6356 // Try to use the original type for coercion.
6357 llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType();
6360 return ABIArgInfo::getDirectInReg(CoerceTy);
6362 return ABIArgInfo::getDirect(CoerceTy);
6365 llvm::Value *SparcV9ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
6366 CodeGenFunction &CGF) const {
6367 ABIArgInfo AI = classifyType(Ty, 16 * 8);
6368 llvm::Type *ArgTy = CGT.ConvertType(Ty);
6369 if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
6370 AI.setCoerceToType(ArgTy);
6372 llvm::Type *BPP = CGF.Int8PtrPtrTy;
6373 CGBuilderTy &Builder = CGF.Builder;
6374 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
6375 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
6376 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
6377 llvm::Value *ArgAddr;
6380 switch (AI.getKind()) {
6381 case ABIArgInfo::Expand:
6382 case ABIArgInfo::InAlloca:
6383 llvm_unreachable("Unsupported ABI kind for va_arg");
6385 case ABIArgInfo::Extend:
6388 .CreateConstGEP1_32(Addr, 8 - getDataLayout().getTypeAllocSize(ArgTy),
6392 case ABIArgInfo::Direct:
6393 Stride = getDataLayout().getTypeAllocSize(AI.getCoerceToType());
6397 case ABIArgInfo::Indirect:
6399 ArgAddr = Builder.CreateBitCast(Addr,
6400 llvm::PointerType::getUnqual(ArgPtrTy),
6402 ArgAddr = Builder.CreateLoad(ArgAddr, "indirect.arg");
6405 case ABIArgInfo::Ignore:
6406 return llvm::UndefValue::get(ArgPtrTy);
6410 Addr = Builder.CreateConstGEP1_32(Addr, Stride, "ap.next");
6411 Builder.CreateStore(Addr, VAListAddrAsBPP);
6413 return Builder.CreatePointerCast(ArgAddr, ArgPtrTy, "arg.addr");
6416 void SparcV9ABIInfo::computeInfo(CGFunctionInfo &FI) const {
6417 FI.getReturnInfo() = classifyType(FI.getReturnType(), 32 * 8);
6418 for (auto &I : FI.arguments())
6419 I.info = classifyType(I.type, 16 * 8);
6423 class SparcV9TargetCodeGenInfo : public TargetCodeGenInfo {
6425 SparcV9TargetCodeGenInfo(CodeGenTypes &CGT)
6426 : TargetCodeGenInfo(new SparcV9ABIInfo(CGT)) {}
6428 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
6432 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
6433 llvm::Value *Address) const override;
6435 } // end anonymous namespace
6438 SparcV9TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
6439 llvm::Value *Address) const {
6440 // This is calculated from the LLVM and GCC tables and verified
6441 // against gcc output. AFAIK all ABIs use the same encoding.
6443 CodeGen::CGBuilderTy &Builder = CGF.Builder;
6445 llvm::IntegerType *i8 = CGF.Int8Ty;
6446 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
6447 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
6449 // 0-31: the 8-byte general-purpose registers
6450 AssignToArrayRange(Builder, Address, Eight8, 0, 31);
6452 // 32-63: f0-31, the 4-byte floating-point registers
6453 AssignToArrayRange(Builder, Address, Four8, 32, 63);
6463 AssignToArrayRange(Builder, Address, Eight8, 64, 71);
6465 // 72-87: d0-15, the 8-byte floating-point registers
6466 AssignToArrayRange(Builder, Address, Eight8, 72, 87);
6472 //===----------------------------------------------------------------------===//
6473 // XCore ABI Implementation
6474 //===----------------------------------------------------------------------===//
6478 /// A SmallStringEnc instance is used to build up the TypeString by passing
6479 /// it by reference between functions that append to it.
6480 typedef llvm::SmallString<128> SmallStringEnc;
6482 /// TypeStringCache caches the meta encodings of Types.
6484 /// The reason for caching TypeStrings is two fold:
6485 /// 1. To cache a type's encoding for later uses;
6486 /// 2. As a means to break recursive member type inclusion.
6488 /// A cache Entry can have a Status of:
6489 /// NonRecursive: The type encoding is not recursive;
6490 /// Recursive: The type encoding is recursive;
6491 /// Incomplete: An incomplete TypeString;
6492 /// IncompleteUsed: An incomplete TypeString that has been used in a
6493 /// Recursive type encoding.
6495 /// A NonRecursive entry will have all of its sub-members expanded as fully
6496 /// as possible. Whilst it may contain types which are recursive, the type
6497 /// itself is not recursive and thus its encoding may be safely used whenever
6498 /// the type is encountered.
6500 /// A Recursive entry will have all of its sub-members expanded as fully as
6501 /// possible. The type itself is recursive and it may contain other types which
6502 /// are recursive. The Recursive encoding must not be used during the expansion
6503 /// of a recursive type's recursive branch. For simplicity the code uses
6504 /// IncompleteCount to reject all usage of Recursive encodings for member types.
6506 /// An Incomplete entry is always a RecordType and only encodes its
6507 /// identifier e.g. "s(S){}". Incomplete 'StubEnc' entries are ephemeral and
6508 /// are placed into the cache during type expansion as a means to identify and
6509 /// handle recursive inclusion of types as sub-members. If there is recursion
6510 /// the entry becomes IncompleteUsed.
6512 /// During the expansion of a RecordType's members:
6514 /// If the cache contains a NonRecursive encoding for the member type, the
6515 /// cached encoding is used;
6517 /// If the cache contains a Recursive encoding for the member type, the
6518 /// cached encoding is 'Swapped' out, as it may be incorrect, and...
6520 /// If the member is a RecordType, an Incomplete encoding is placed into the
6521 /// cache to break potential recursive inclusion of itself as a sub-member;
6523 /// Once a member RecordType has been expanded, its temporary incomplete
6524 /// entry is removed from the cache. If a Recursive encoding was swapped out
6525 /// it is swapped back in;
6527 /// If an incomplete entry is used to expand a sub-member, the incomplete
6528 /// entry is marked as IncompleteUsed. The cache keeps count of how many
6529 /// IncompleteUsed entries it currently contains in IncompleteUsedCount;
6531 /// If a member's encoding is found to be a NonRecursive or Recursive viz:
6532 /// IncompleteUsedCount==0, the member's encoding is added to the cache.
6533 /// Else the member is part of a recursive type and thus the recursion has
6534 /// been exited too soon for the encoding to be correct for the member.
6536 class TypeStringCache {
6537 enum Status {NonRecursive, Recursive, Incomplete, IncompleteUsed};
6539 std::string Str; // The encoded TypeString for the type.
6540 enum Status State; // Information about the encoding in 'Str'.
6541 std::string Swapped; // A temporary place holder for a Recursive encoding
6542 // during the expansion of RecordType's members.
6544 std::map<const IdentifierInfo *, struct Entry> Map;
6545 unsigned IncompleteCount; // Number of Incomplete entries in the Map.
6546 unsigned IncompleteUsedCount; // Number of IncompleteUsed entries in the Map.
6548 TypeStringCache() : IncompleteCount(0), IncompleteUsedCount(0) {};
6549 void addIncomplete(const IdentifierInfo *ID, std::string StubEnc);
6550 bool removeIncomplete(const IdentifierInfo *ID);
6551 void addIfComplete(const IdentifierInfo *ID, StringRef Str,
6553 StringRef lookupStr(const IdentifierInfo *ID);
6556 /// TypeString encodings for enum & union fields must be order.
6557 /// FieldEncoding is a helper for this ordering process.
6558 class FieldEncoding {
6562 FieldEncoding(bool b, SmallStringEnc &e) : HasName(b), Enc(e.c_str()) {};
6563 StringRef str() {return Enc.c_str();};
6564 bool operator<(const FieldEncoding &rhs) const {
6565 if (HasName != rhs.HasName) return HasName;
6566 return Enc < rhs.Enc;
6570 class XCoreABIInfo : public DefaultABIInfo {
6572 XCoreABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
6573 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
6574 CodeGenFunction &CGF) const override;
6577 class XCoreTargetCodeGenInfo : public TargetCodeGenInfo {
6578 mutable TypeStringCache TSC;
6580 XCoreTargetCodeGenInfo(CodeGenTypes &CGT)
6581 :TargetCodeGenInfo(new XCoreABIInfo(CGT)) {}
6582 void emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
6583 CodeGen::CodeGenModule &M) const override;
6586 } // End anonymous namespace.
6588 llvm::Value *XCoreABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
6589 CodeGenFunction &CGF) const {
6590 CGBuilderTy &Builder = CGF.Builder;
6593 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr,
6595 llvm::Value *AP = Builder.CreateLoad(VAListAddrAsBPP);
6597 // Handle the argument.
6598 ABIArgInfo AI = classifyArgumentType(Ty);
6599 llvm::Type *ArgTy = CGT.ConvertType(Ty);
6600 if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
6601 AI.setCoerceToType(ArgTy);
6602 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
6604 uint64_t ArgSize = 0;
6605 switch (AI.getKind()) {
6606 case ABIArgInfo::Expand:
6607 case ABIArgInfo::InAlloca:
6608 llvm_unreachable("Unsupported ABI kind for va_arg");
6609 case ABIArgInfo::Ignore:
6610 Val = llvm::UndefValue::get(ArgPtrTy);
6613 case ABIArgInfo::Extend:
6614 case ABIArgInfo::Direct:
6615 Val = Builder.CreatePointerCast(AP, ArgPtrTy);
6616 ArgSize = getDataLayout().getTypeAllocSize(AI.getCoerceToType());
6620 case ABIArgInfo::Indirect:
6621 llvm::Value *ArgAddr;
6622 ArgAddr = Builder.CreateBitCast(AP, llvm::PointerType::getUnqual(ArgPtrTy));
6623 ArgAddr = Builder.CreateLoad(ArgAddr);
6624 Val = Builder.CreatePointerCast(ArgAddr, ArgPtrTy);
6629 // Increment the VAList.
6631 llvm::Value *APN = Builder.CreateConstGEP1_32(AP, ArgSize);
6632 Builder.CreateStore(APN, VAListAddrAsBPP);
6637 /// During the expansion of a RecordType, an incomplete TypeString is placed
6638 /// into the cache as a means to identify and break recursion.
6639 /// If there is a Recursive encoding in the cache, it is swapped out and will
6640 /// be reinserted by removeIncomplete().
6641 /// All other types of encoding should have been used rather than arriving here.
6642 void TypeStringCache::addIncomplete(const IdentifierInfo *ID,
6643 std::string StubEnc) {
6647 assert( (E.Str.empty() || E.State == Recursive) &&
6648 "Incorrectly use of addIncomplete");
6649 assert(!StubEnc.empty() && "Passing an empty string to addIncomplete()");
6650 E.Swapped.swap(E.Str); // swap out the Recursive
6651 E.Str.swap(StubEnc);
6652 E.State = Incomplete;
6656 /// Once the RecordType has been expanded, the temporary incomplete TypeString
6657 /// must be removed from the cache.
6658 /// If a Recursive was swapped out by addIncomplete(), it will be replaced.
6659 /// Returns true if the RecordType was defined recursively.
6660 bool TypeStringCache::removeIncomplete(const IdentifierInfo *ID) {
6663 auto I = Map.find(ID);
6664 assert(I != Map.end() && "Entry not present");
6665 Entry &E = I->second;
6666 assert( (E.State == Incomplete ||
6667 E.State == IncompleteUsed) &&
6668 "Entry must be an incomplete type");
6669 bool IsRecursive = false;
6670 if (E.State == IncompleteUsed) {
6671 // We made use of our Incomplete encoding, thus we are recursive.
6673 --IncompleteUsedCount;
6675 if (E.Swapped.empty())
6678 // Swap the Recursive back.
6679 E.Swapped.swap(E.Str);
6681 E.State = Recursive;
6687 /// Add the encoded TypeString to the cache only if it is NonRecursive or
6688 /// Recursive (viz: all sub-members were expanded as fully as possible).
6689 void TypeStringCache::addIfComplete(const IdentifierInfo *ID, StringRef Str,
6691 if (!ID || IncompleteUsedCount)
6692 return; // No key or it is is an incomplete sub-type so don't add.
6694 if (IsRecursive && !E.Str.empty()) {
6695 assert(E.State==Recursive && E.Str.size() == Str.size() &&
6696 "This is not the same Recursive entry");
6697 // The parent container was not recursive after all, so we could have used
6698 // this Recursive sub-member entry after all, but we assumed the worse when
6699 // we started viz: IncompleteCount!=0.
6702 assert(E.Str.empty() && "Entry already present");
6704 E.State = IsRecursive? Recursive : NonRecursive;
6707 /// Return a cached TypeString encoding for the ID. If there isn't one, or we
6708 /// are recursively expanding a type (IncompleteCount != 0) and the cached
6709 /// encoding is Recursive, return an empty StringRef.
6710 StringRef TypeStringCache::lookupStr(const IdentifierInfo *ID) {
6712 return StringRef(); // We have no key.
6713 auto I = Map.find(ID);
6715 return StringRef(); // We have no encoding.
6716 Entry &E = I->second;
6717 if (E.State == Recursive && IncompleteCount)
6718 return StringRef(); // We don't use Recursive encodings for member types.
6720 if (E.State == Incomplete) {
6721 // The incomplete type is being used to break out of recursion.
6722 E.State = IncompleteUsed;
6723 ++IncompleteUsedCount;
6725 return E.Str.c_str();
6728 /// The XCore ABI includes a type information section that communicates symbol
6729 /// type information to the linker. The linker uses this information to verify
6730 /// safety/correctness of things such as array bound and pointers et al.
6731 /// The ABI only requires C (and XC) language modules to emit TypeStrings.
6732 /// This type information (TypeString) is emitted into meta data for all global
6733 /// symbols: definitions, declarations, functions & variables.
6735 /// The TypeString carries type, qualifier, name, size & value details.
6736 /// Please see 'Tools Development Guide' section 2.16.2 for format details:
6737 /// https://www.xmos.com/download/public/Tools-Development-Guide%28X9114A%29.pdf
6738 /// The output is tested by test/CodeGen/xcore-stringtype.c.
6740 static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
6741 CodeGen::CodeGenModule &CGM, TypeStringCache &TSC);
6743 /// XCore uses emitTargetMD to emit TypeString metadata for global symbols.
6744 void XCoreTargetCodeGenInfo::emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
6745 CodeGen::CodeGenModule &CGM) const {
6747 if (getTypeString(Enc, D, CGM, TSC)) {
6748 llvm::LLVMContext &Ctx = CGM.getModule().getContext();
6749 llvm::SmallVector<llvm::Metadata *, 2> MDVals;
6750 MDVals.push_back(llvm::ConstantAsMetadata::get(GV));
6751 MDVals.push_back(llvm::MDString::get(Ctx, Enc.str()));
6752 llvm::NamedMDNode *MD =
6753 CGM.getModule().getOrInsertNamedMetadata("xcore.typestrings");
6754 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
6758 static bool appendType(SmallStringEnc &Enc, QualType QType,
6759 const CodeGen::CodeGenModule &CGM,
6760 TypeStringCache &TSC);
6762 /// Helper function for appendRecordType().
6763 /// Builds a SmallVector containing the encoded field types in declaration
6765 static bool extractFieldType(SmallVectorImpl<FieldEncoding> &FE,
6766 const RecordDecl *RD,
6767 const CodeGen::CodeGenModule &CGM,
6768 TypeStringCache &TSC) {
6769 for (const auto *Field : RD->fields()) {
6772 Enc += Field->getName();
6774 if (Field->isBitField()) {
6776 llvm::raw_svector_ostream OS(Enc);
6778 OS << Field->getBitWidthValue(CGM.getContext());
6782 if (!appendType(Enc, Field->getType(), CGM, TSC))
6784 if (Field->isBitField())
6787 FE.emplace_back(!Field->getName().empty(), Enc);
6792 /// Appends structure and union types to Enc and adds encoding to cache.
6793 /// Recursively calls appendType (via extractFieldType) for each field.
6794 /// Union types have their fields ordered according to the ABI.
6795 static bool appendRecordType(SmallStringEnc &Enc, const RecordType *RT,
6796 const CodeGen::CodeGenModule &CGM,
6797 TypeStringCache &TSC, const IdentifierInfo *ID) {
6798 // Append the cached TypeString if we have one.
6799 StringRef TypeString = TSC.lookupStr(ID);
6800 if (!TypeString.empty()) {
6805 // Start to emit an incomplete TypeString.
6806 size_t Start = Enc.size();
6807 Enc += (RT->isUnionType()? 'u' : 's');
6810 Enc += ID->getName();
6813 // We collect all encoded fields and order as necessary.
6814 bool IsRecursive = false;
6815 const RecordDecl *RD = RT->getDecl()->getDefinition();
6816 if (RD && !RD->field_empty()) {
6817 // An incomplete TypeString stub is placed in the cache for this RecordType
6818 // so that recursive calls to this RecordType will use it whilst building a
6819 // complete TypeString for this RecordType.
6820 SmallVector<FieldEncoding, 16> FE;
6821 std::string StubEnc(Enc.substr(Start).str());
6822 StubEnc += '}'; // StubEnc now holds a valid incomplete TypeString.
6823 TSC.addIncomplete(ID, std::move(StubEnc));
6824 if (!extractFieldType(FE, RD, CGM, TSC)) {
6825 (void) TSC.removeIncomplete(ID);
6828 IsRecursive = TSC.removeIncomplete(ID);
6829 // The ABI requires unions to be sorted but not structures.
6830 // See FieldEncoding::operator< for sort algorithm.
6831 if (RT->isUnionType())
6832 std::sort(FE.begin(), FE.end());
6833 // We can now complete the TypeString.
6834 unsigned E = FE.size();
6835 for (unsigned I = 0; I != E; ++I) {
6842 TSC.addIfComplete(ID, Enc.substr(Start), IsRecursive);
6846 /// Appends enum types to Enc and adds the encoding to the cache.
6847 static bool appendEnumType(SmallStringEnc &Enc, const EnumType *ET,
6848 TypeStringCache &TSC,
6849 const IdentifierInfo *ID) {
6850 // Append the cached TypeString if we have one.
6851 StringRef TypeString = TSC.lookupStr(ID);
6852 if (!TypeString.empty()) {
6857 size_t Start = Enc.size();
6860 Enc += ID->getName();
6863 // We collect all encoded enumerations and order them alphanumerically.
6864 if (const EnumDecl *ED = ET->getDecl()->getDefinition()) {
6865 SmallVector<FieldEncoding, 16> FE;
6866 for (auto I = ED->enumerator_begin(), E = ED->enumerator_end(); I != E;
6868 SmallStringEnc EnumEnc;
6870 EnumEnc += I->getName();
6872 I->getInitVal().toString(EnumEnc);
6874 FE.push_back(FieldEncoding(!I->getName().empty(), EnumEnc));
6876 std::sort(FE.begin(), FE.end());
6877 unsigned E = FE.size();
6878 for (unsigned I = 0; I != E; ++I) {
6885 TSC.addIfComplete(ID, Enc.substr(Start), false);
6889 /// Appends type's qualifier to Enc.
6890 /// This is done prior to appending the type's encoding.
6891 static void appendQualifier(SmallStringEnc &Enc, QualType QT) {
6892 // Qualifiers are emitted in alphabetical order.
6893 static const char *Table[] = {"","c:","r:","cr:","v:","cv:","rv:","crv:"};
6895 if (QT.isConstQualified())
6897 if (QT.isRestrictQualified())
6899 if (QT.isVolatileQualified())
6901 Enc += Table[Lookup];
6904 /// Appends built-in types to Enc.
6905 static bool appendBuiltinType(SmallStringEnc &Enc, const BuiltinType *BT) {
6906 const char *EncType;
6907 switch (BT->getKind()) {
6908 case BuiltinType::Void:
6911 case BuiltinType::Bool:
6914 case BuiltinType::Char_U:
6917 case BuiltinType::UChar:
6920 case BuiltinType::SChar:
6923 case BuiltinType::UShort:
6926 case BuiltinType::Short:
6929 case BuiltinType::UInt:
6932 case BuiltinType::Int:
6935 case BuiltinType::ULong:
6938 case BuiltinType::Long:
6941 case BuiltinType::ULongLong:
6944 case BuiltinType::LongLong:
6947 case BuiltinType::Float:
6950 case BuiltinType::Double:
6953 case BuiltinType::LongDouble:
6963 /// Appends a pointer encoding to Enc before calling appendType for the pointee.
6964 static bool appendPointerType(SmallStringEnc &Enc, const PointerType *PT,
6965 const CodeGen::CodeGenModule &CGM,
6966 TypeStringCache &TSC) {
6968 if (!appendType(Enc, PT->getPointeeType(), CGM, TSC))
6974 /// Appends array encoding to Enc before calling appendType for the element.
6975 static bool appendArrayType(SmallStringEnc &Enc, QualType QT,
6976 const ArrayType *AT,
6977 const CodeGen::CodeGenModule &CGM,
6978 TypeStringCache &TSC, StringRef NoSizeEnc) {
6979 if (AT->getSizeModifier() != ArrayType::Normal)
6982 if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT))
6983 CAT->getSize().toStringUnsigned(Enc);
6985 Enc += NoSizeEnc; // Global arrays use "*", otherwise it is "".
6987 // The Qualifiers should be attached to the type rather than the array.
6988 appendQualifier(Enc, QT);
6989 if (!appendType(Enc, AT->getElementType(), CGM, TSC))
6995 /// Appends a function encoding to Enc, calling appendType for the return type
6996 /// and the arguments.
6997 static bool appendFunctionType(SmallStringEnc &Enc, const FunctionType *FT,
6998 const CodeGen::CodeGenModule &CGM,
6999 TypeStringCache &TSC) {
7001 if (!appendType(Enc, FT->getReturnType(), CGM, TSC))
7004 if (const FunctionProtoType *FPT = FT->getAs<FunctionProtoType>()) {
7005 // N.B. we are only interested in the adjusted param types.
7006 auto I = FPT->param_type_begin();
7007 auto E = FPT->param_type_end();
7010 if (!appendType(Enc, *I, CGM, TSC))
7016 if (FPT->isVariadic())
7019 if (FPT->isVariadic())
7029 /// Handles the type's qualifier before dispatching a call to handle specific
7031 static bool appendType(SmallStringEnc &Enc, QualType QType,
7032 const CodeGen::CodeGenModule &CGM,
7033 TypeStringCache &TSC) {
7035 QualType QT = QType.getCanonicalType();
7037 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe())
7038 // The Qualifiers should be attached to the type rather than the array.
7039 // Thus we don't call appendQualifier() here.
7040 return appendArrayType(Enc, QT, AT, CGM, TSC, "");
7042 appendQualifier(Enc, QT);
7044 if (const BuiltinType *BT = QT->getAs<BuiltinType>())
7045 return appendBuiltinType(Enc, BT);
7047 if (const PointerType *PT = QT->getAs<PointerType>())
7048 return appendPointerType(Enc, PT, CGM, TSC);
7050 if (const EnumType *ET = QT->getAs<EnumType>())
7051 return appendEnumType(Enc, ET, TSC, QT.getBaseTypeIdentifier());
7053 if (const RecordType *RT = QT->getAsStructureType())
7054 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier());
7056 if (const RecordType *RT = QT->getAsUnionType())
7057 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier());
7059 if (const FunctionType *FT = QT->getAs<FunctionType>())
7060 return appendFunctionType(Enc, FT, CGM, TSC);
7065 static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
7066 CodeGen::CodeGenModule &CGM, TypeStringCache &TSC) {
7070 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
7071 if (FD->getLanguageLinkage() != CLanguageLinkage)
7073 return appendType(Enc, FD->getType(), CGM, TSC);
7076 if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
7077 if (VD->getLanguageLinkage() != CLanguageLinkage)
7079 QualType QT = VD->getType().getCanonicalType();
7080 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe()) {
7081 // Global ArrayTypes are given a size of '*' if the size is unknown.
7082 // The Qualifiers should be attached to the type rather than the array.
7083 // Thus we don't call appendQualifier() here.
7084 return appendArrayType(Enc, QT, AT, CGM, TSC, "*");
7086 return appendType(Enc, QT, CGM, TSC);
7092 //===----------------------------------------------------------------------===//
7094 //===----------------------------------------------------------------------===//
7096 const llvm::Triple &CodeGenModule::getTriple() const {
7097 return getTarget().getTriple();
7100 bool CodeGenModule::supportsCOMDAT() const {
7101 return !getTriple().isOSBinFormatMachO();
7104 const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
7105 if (TheTargetCodeGenInfo)
7106 return *TheTargetCodeGenInfo;
7108 const llvm::Triple &Triple = getTarget().getTriple();
7109 switch (Triple.getArch()) {
7111 return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo(Types));
7113 case llvm::Triple::le32:
7114 return *(TheTargetCodeGenInfo = new PNaClTargetCodeGenInfo(Types));
7115 case llvm::Triple::mips:
7116 case llvm::Triple::mipsel:
7117 if (Triple.getOS() == llvm::Triple::NaCl)
7118 return *(TheTargetCodeGenInfo = new PNaClTargetCodeGenInfo(Types));
7119 return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, true));
7121 case llvm::Triple::mips64:
7122 case llvm::Triple::mips64el:
7123 return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, false));
7125 case llvm::Triple::aarch64:
7126 case llvm::Triple::aarch64_be: {
7127 AArch64ABIInfo::ABIKind Kind = AArch64ABIInfo::AAPCS;
7128 if (getTarget().getABI() == "darwinpcs")
7129 Kind = AArch64ABIInfo::DarwinPCS;
7131 return *(TheTargetCodeGenInfo = new AArch64TargetCodeGenInfo(Types, Kind));
7134 case llvm::Triple::arm:
7135 case llvm::Triple::armeb:
7136 case llvm::Triple::thumb:
7137 case llvm::Triple::thumbeb:
7139 if (Triple.getOS() == llvm::Triple::Win32) {
7140 TheTargetCodeGenInfo =
7141 new WindowsARMTargetCodeGenInfo(Types, ARMABIInfo::AAPCS_VFP);
7142 return *TheTargetCodeGenInfo;
7145 ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS;
7146 if (getTarget().getABI() == "apcs-gnu")
7147 Kind = ARMABIInfo::APCS;
7148 else if (CodeGenOpts.FloatABI == "hard" ||
7149 (CodeGenOpts.FloatABI != "soft" &&
7150 Triple.getEnvironment() == llvm::Triple::GNUEABIHF))
7151 Kind = ARMABIInfo::AAPCS_VFP;
7153 return *(TheTargetCodeGenInfo = new ARMTargetCodeGenInfo(Types, Kind));
7156 case llvm::Triple::ppc:
7157 return *(TheTargetCodeGenInfo = new PPC32TargetCodeGenInfo(Types));
7158 case llvm::Triple::ppc64:
7159 if (Triple.isOSBinFormatELF()) {
7160 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv1;
7161 if (getTarget().getABI() == "elfv2")
7162 Kind = PPC64_SVR4_ABIInfo::ELFv2;
7163 bool HasQPX = getTarget().getABI() == "elfv1-qpx";
7165 return *(TheTargetCodeGenInfo =
7166 new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX));
7168 return *(TheTargetCodeGenInfo = new PPC64TargetCodeGenInfo(Types));
7169 case llvm::Triple::ppc64le: {
7170 assert(Triple.isOSBinFormatELF() && "PPC64 LE non-ELF not supported!");
7171 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv2;
7172 if (getTarget().getABI() == "elfv1" || getTarget().getABI() == "elfv1-qpx")
7173 Kind = PPC64_SVR4_ABIInfo::ELFv1;
7174 bool HasQPX = getTarget().getABI() == "elfv1-qpx";
7176 return *(TheTargetCodeGenInfo =
7177 new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX));
7180 case llvm::Triple::nvptx:
7181 case llvm::Triple::nvptx64:
7182 return *(TheTargetCodeGenInfo = new NVPTXTargetCodeGenInfo(Types));
7184 case llvm::Triple::msp430:
7185 return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo(Types));
7187 case llvm::Triple::systemz: {
7188 bool HasVector = getTarget().getABI() == "vector";
7189 return *(TheTargetCodeGenInfo = new SystemZTargetCodeGenInfo(Types,
7193 case llvm::Triple::tce:
7194 return *(TheTargetCodeGenInfo = new TCETargetCodeGenInfo(Types));
7196 case llvm::Triple::x86: {
7197 bool IsDarwinVectorABI = Triple.isOSDarwin();
7198 bool IsSmallStructInRegABI =
7199 X86_32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts);
7200 bool IsWin32FloatStructABI = Triple.isOSWindows() && !Triple.isOSCygMing();
7202 if (Triple.getOS() == llvm::Triple::Win32) {
7203 return *(TheTargetCodeGenInfo = new WinX86_32TargetCodeGenInfo(
7204 Types, IsDarwinVectorABI, IsSmallStructInRegABI,
7205 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters));
7207 return *(TheTargetCodeGenInfo = new X86_32TargetCodeGenInfo(
7208 Types, IsDarwinVectorABI, IsSmallStructInRegABI,
7209 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters));
7213 case llvm::Triple::x86_64: {
7214 StringRef ABI = getTarget().getABI();
7215 X86AVXABILevel AVXLevel = (ABI == "avx512" ? X86AVXABILevel::AVX512 :
7216 ABI == "avx" ? X86AVXABILevel::AVX :
7217 X86AVXABILevel::None);
7219 switch (Triple.getOS()) {
7220 case llvm::Triple::Win32:
7221 return *(TheTargetCodeGenInfo =
7222 new WinX86_64TargetCodeGenInfo(Types, AVXLevel));
7223 case llvm::Triple::PS4:
7224 return *(TheTargetCodeGenInfo =
7225 new PS4TargetCodeGenInfo(Types, AVXLevel));
7227 return *(TheTargetCodeGenInfo =
7228 new X86_64TargetCodeGenInfo(Types, AVXLevel));
7231 case llvm::Triple::hexagon:
7232 return *(TheTargetCodeGenInfo = new HexagonTargetCodeGenInfo(Types));
7233 case llvm::Triple::r600:
7234 return *(TheTargetCodeGenInfo = new AMDGPUTargetCodeGenInfo(Types));
7235 case llvm::Triple::amdgcn:
7236 return *(TheTargetCodeGenInfo = new AMDGPUTargetCodeGenInfo(Types));
7237 case llvm::Triple::sparcv9:
7238 return *(TheTargetCodeGenInfo = new SparcV9TargetCodeGenInfo(Types));
7239 case llvm::Triple::xcore:
7240 return *(TheTargetCodeGenInfo = new XCoreTargetCodeGenInfo(Types));