1 //===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // These classes wrap the information about a call or function
11 // definition used to handle ABI compliancy.
13 //===----------------------------------------------------------------------===//
15 #include "TargetInfo.h"
20 #include "CodeGenFunction.h"
21 #include "clang/AST/RecordLayout.h"
22 #include "clang/Basic/CodeGenOptions.h"
23 #include "clang/CodeGen/CGFunctionInfo.h"
24 #include "clang/CodeGen/SwiftCallingConv.h"
25 #include "llvm/ADT/StringExtras.h"
26 #include "llvm/ADT/StringSwitch.h"
27 #include "llvm/ADT/Triple.h"
28 #include "llvm/ADT/Twine.h"
29 #include "llvm/IR/DataLayout.h"
30 #include "llvm/IR/Type.h"
31 #include "llvm/Support/raw_ostream.h"
32 #include <algorithm> // std::sort
34 using namespace clang;
35 using namespace CodeGen;
37 // Helper for coercing an aggregate argument or return value into an integer
38 // array of the same size (including padding) and alignment. This alternate
39 // coercion happens only for the RenderScript ABI and can be removed after
40 // runtimes that rely on it are no longer supported.
42 // RenderScript assumes that the size of the argument / return value in the IR
43 // is the same as the size of the corresponding qualified type. This helper
44 // coerces the aggregate type into an array of the same size (including
45 // padding). This coercion is used in lieu of expansion of struct members or
46 // other canonical coercions that return a coerced-type of larger size.
48 // Ty - The argument / return value type
49 // Context - The associated ASTContext
50 // LLVMContext - The associated LLVMContext
51 static ABIArgInfo coerceToIntArray(QualType Ty,
53 llvm::LLVMContext &LLVMContext) {
54 // Alignment and Size are measured in bits.
55 const uint64_t Size = Context.getTypeSize(Ty);
56 const uint64_t Alignment = Context.getTypeAlign(Ty);
57 llvm::Type *IntType = llvm::Type::getIntNTy(LLVMContext, Alignment);
58 const uint64_t NumElements = (Size + Alignment - 1) / Alignment;
59 return ABIArgInfo::getDirect(llvm::ArrayType::get(IntType, NumElements));
62 static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder,
67 // Alternatively, we could emit this as a loop in the source.
68 for (unsigned I = FirstIndex; I <= LastIndex; ++I) {
70 Builder.CreateConstInBoundsGEP1_32(Builder.getInt8Ty(), Array, I);
71 Builder.CreateAlignedStore(Value, Cell, CharUnits::One());
75 static bool isAggregateTypeForABI(QualType T) {
76 return !CodeGenFunction::hasScalarEvaluationKind(T) ||
77 T->isMemberFunctionPointerType();
81 ABIInfo::getNaturalAlignIndirect(QualType Ty, bool ByRef, bool Realign,
82 llvm::Type *Padding) const {
83 return ABIArgInfo::getIndirect(getContext().getTypeAlignInChars(Ty),
84 ByRef, Realign, Padding);
88 ABIInfo::getNaturalAlignIndirectInReg(QualType Ty, bool Realign) const {
89 return ABIArgInfo::getIndirectInReg(getContext().getTypeAlignInChars(Ty),
90 /*ByRef*/ false, Realign);
93 Address ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
95 return Address::invalid();
98 ABIInfo::~ABIInfo() {}
100 /// Does the given lowering require more than the given number of
101 /// registers when expanded?
103 /// This is intended to be the basis of a reasonable basic implementation
104 /// of should{Pass,Return}IndirectlyForSwift.
106 /// For most targets, a limit of four total registers is reasonable; this
107 /// limits the amount of code required in order to move around the value
108 /// in case it wasn't produced immediately prior to the call by the caller
109 /// (or wasn't produced in exactly the right registers) or isn't used
110 /// immediately within the callee. But some targets may need to further
111 /// limit the register count due to an inability to support that many
112 /// return registers.
113 static bool occupiesMoreThan(CodeGenTypes &cgt,
114 ArrayRef<llvm::Type*> scalarTypes,
115 unsigned maxAllRegisters) {
116 unsigned intCount = 0, fpCount = 0;
117 for (llvm::Type *type : scalarTypes) {
118 if (type->isPointerTy()) {
120 } else if (auto intTy = dyn_cast<llvm::IntegerType>(type)) {
121 auto ptrWidth = cgt.getTarget().getPointerWidth(0);
122 intCount += (intTy->getBitWidth() + ptrWidth - 1) / ptrWidth;
124 assert(type->isVectorTy() || type->isFloatingPointTy());
129 return (intCount + fpCount > maxAllRegisters);
132 bool SwiftABIInfo::isLegalVectorTypeForSwift(CharUnits vectorSize,
134 unsigned numElts) const {
135 // The default implementation of this assumes that the target guarantees
136 // 128-bit SIMD support but nothing more.
137 return (vectorSize.getQuantity() > 8 && vectorSize.getQuantity() <= 16);
140 static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT,
142 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
144 if (!RT->getDecl()->canPassInRegisters())
145 return CGCXXABI::RAA_Indirect;
146 return CGCXXABI::RAA_Default;
148 return CXXABI.getRecordArgABI(RD);
151 static CGCXXABI::RecordArgABI getRecordArgABI(QualType T,
153 const RecordType *RT = T->getAs<RecordType>();
155 return CGCXXABI::RAA_Default;
156 return getRecordArgABI(RT, CXXABI);
159 static bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI,
160 const ABIInfo &Info) {
161 QualType Ty = FI.getReturnType();
163 if (const auto *RT = Ty->getAs<RecordType>())
164 if (!isa<CXXRecordDecl>(RT->getDecl()) &&
165 !RT->getDecl()->canPassInRegisters()) {
166 FI.getReturnInfo() = Info.getNaturalAlignIndirect(Ty);
170 return CXXABI.classifyReturnType(FI);
173 /// Pass transparent unions as if they were the type of the first element. Sema
174 /// should ensure that all elements of the union have the same "machine type".
175 static QualType useFirstFieldIfTransparentUnion(QualType Ty) {
176 if (const RecordType *UT = Ty->getAsUnionType()) {
177 const RecordDecl *UD = UT->getDecl();
178 if (UD->hasAttr<TransparentUnionAttr>()) {
179 assert(!UD->field_empty() && "sema created an empty transparent union");
180 return UD->field_begin()->getType();
186 CGCXXABI &ABIInfo::getCXXABI() const {
187 return CGT.getCXXABI();
190 ASTContext &ABIInfo::getContext() const {
191 return CGT.getContext();
194 llvm::LLVMContext &ABIInfo::getVMContext() const {
195 return CGT.getLLVMContext();
198 const llvm::DataLayout &ABIInfo::getDataLayout() const {
199 return CGT.getDataLayout();
202 const TargetInfo &ABIInfo::getTarget() const {
203 return CGT.getTarget();
206 const CodeGenOptions &ABIInfo::getCodeGenOpts() const {
207 return CGT.getCodeGenOpts();
210 bool ABIInfo::isAndroid() const { return getTarget().getTriple().isAndroid(); }
212 bool ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
216 bool ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
217 uint64_t Members) const {
221 LLVM_DUMP_METHOD void ABIArgInfo::dump() const {
222 raw_ostream &OS = llvm::errs();
223 OS << "(ABIArgInfo Kind=";
226 OS << "Direct Type=";
227 if (llvm::Type *Ty = getCoerceToType())
239 OS << "InAlloca Offset=" << getInAllocaFieldIndex();
242 OS << "Indirect Align=" << getIndirectAlign().getQuantity()
243 << " ByVal=" << getIndirectByVal()
244 << " Realign=" << getIndirectRealign();
249 case CoerceAndExpand:
250 OS << "CoerceAndExpand Type=";
251 getCoerceAndExpandType()->print(OS);
257 // Dynamically round a pointer up to a multiple of the given alignment.
258 static llvm::Value *emitRoundPointerUpToAlignment(CodeGenFunction &CGF,
261 llvm::Value *PtrAsInt = Ptr;
262 // OverflowArgArea = (OverflowArgArea + Align - 1) & -Align;
263 PtrAsInt = CGF.Builder.CreatePtrToInt(PtrAsInt, CGF.IntPtrTy);
264 PtrAsInt = CGF.Builder.CreateAdd(PtrAsInt,
265 llvm::ConstantInt::get(CGF.IntPtrTy, Align.getQuantity() - 1));
266 PtrAsInt = CGF.Builder.CreateAnd(PtrAsInt,
267 llvm::ConstantInt::get(CGF.IntPtrTy, -Align.getQuantity()));
268 PtrAsInt = CGF.Builder.CreateIntToPtr(PtrAsInt,
270 Ptr->getName() + ".aligned");
274 /// Emit va_arg for a platform using the common void* representation,
275 /// where arguments are simply emitted in an array of slots on the stack.
277 /// This version implements the core direct-value passing rules.
279 /// \param SlotSize - The size and alignment of a stack slot.
280 /// Each argument will be allocated to a multiple of this number of
281 /// slots, and all the slots will be aligned to this value.
282 /// \param AllowHigherAlign - The slot alignment is not a cap;
283 /// an argument type with an alignment greater than the slot size
284 /// will be emitted on a higher-alignment address, potentially
285 /// leaving one or more empty slots behind as padding. If this
286 /// is false, the returned address might be less-aligned than
288 static Address emitVoidPtrDirectVAArg(CodeGenFunction &CGF,
290 llvm::Type *DirectTy,
291 CharUnits DirectSize,
292 CharUnits DirectAlign,
294 bool AllowHigherAlign) {
295 // Cast the element type to i8* if necessary. Some platforms define
296 // va_list as a struct containing an i8* instead of just an i8*.
297 if (VAListAddr.getElementType() != CGF.Int8PtrTy)
298 VAListAddr = CGF.Builder.CreateElementBitCast(VAListAddr, CGF.Int8PtrTy);
300 llvm::Value *Ptr = CGF.Builder.CreateLoad(VAListAddr, "argp.cur");
302 // If the CC aligns values higher than the slot size, do so if needed.
303 Address Addr = Address::invalid();
304 if (AllowHigherAlign && DirectAlign > SlotSize) {
305 Addr = Address(emitRoundPointerUpToAlignment(CGF, Ptr, DirectAlign),
308 Addr = Address(Ptr, SlotSize);
311 // Advance the pointer past the argument, then store that back.
312 CharUnits FullDirectSize = DirectSize.alignTo(SlotSize);
313 llvm::Value *NextPtr =
314 CGF.Builder.CreateConstInBoundsByteGEP(Addr.getPointer(), FullDirectSize,
316 CGF.Builder.CreateStore(NextPtr, VAListAddr);
318 // If the argument is smaller than a slot, and this is a big-endian
319 // target, the argument will be right-adjusted in its slot.
320 if (DirectSize < SlotSize && CGF.CGM.getDataLayout().isBigEndian() &&
321 !DirectTy->isStructTy()) {
322 Addr = CGF.Builder.CreateConstInBoundsByteGEP(Addr, SlotSize - DirectSize);
325 Addr = CGF.Builder.CreateElementBitCast(Addr, DirectTy);
329 /// Emit va_arg for a platform using the common void* representation,
330 /// where arguments are simply emitted in an array of slots on the stack.
332 /// \param IsIndirect - Values of this type are passed indirectly.
333 /// \param ValueInfo - The size and alignment of this type, generally
334 /// computed with getContext().getTypeInfoInChars(ValueTy).
335 /// \param SlotSizeAndAlign - The size and alignment of a stack slot.
336 /// Each argument will be allocated to a multiple of this number of
337 /// slots, and all the slots will be aligned to this value.
338 /// \param AllowHigherAlign - The slot alignment is not a cap;
339 /// an argument type with an alignment greater than the slot size
340 /// will be emitted on a higher-alignment address, potentially
341 /// leaving one or more empty slots behind as padding.
342 static Address emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr,
343 QualType ValueTy, bool IsIndirect,
344 std::pair<CharUnits, CharUnits> ValueInfo,
345 CharUnits SlotSizeAndAlign,
346 bool AllowHigherAlign) {
347 // The size and alignment of the value that was passed directly.
348 CharUnits DirectSize, DirectAlign;
350 DirectSize = CGF.getPointerSize();
351 DirectAlign = CGF.getPointerAlign();
353 DirectSize = ValueInfo.first;
354 DirectAlign = ValueInfo.second;
357 // Cast the address we've calculated to the right type.
358 llvm::Type *DirectTy = CGF.ConvertTypeForMem(ValueTy);
360 DirectTy = DirectTy->getPointerTo(0);
362 Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, DirectTy,
363 DirectSize, DirectAlign,
368 Addr = Address(CGF.Builder.CreateLoad(Addr), ValueInfo.second);
375 static Address emitMergePHI(CodeGenFunction &CGF,
376 Address Addr1, llvm::BasicBlock *Block1,
377 Address Addr2, llvm::BasicBlock *Block2,
378 const llvm::Twine &Name = "") {
379 assert(Addr1.getType() == Addr2.getType());
380 llvm::PHINode *PHI = CGF.Builder.CreatePHI(Addr1.getType(), 2, Name);
381 PHI->addIncoming(Addr1.getPointer(), Block1);
382 PHI->addIncoming(Addr2.getPointer(), Block2);
383 CharUnits Align = std::min(Addr1.getAlignment(), Addr2.getAlignment());
384 return Address(PHI, Align);
387 TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; }
389 // If someone can figure out a general rule for this, that would be great.
390 // It's probably just doomed to be platform-dependent, though.
391 unsigned TargetCodeGenInfo::getSizeOfUnwindException() const {
393 // x86-64 FreeBSD, Linux, Darwin
394 // x86-32 FreeBSD, Linux, Darwin
395 // PowerPC Linux, Darwin
396 // ARM Darwin (*not* EABI)
401 bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args,
402 const FunctionNoProtoType *fnType) const {
403 // The following conventions are known to require this to be false:
406 // For everything else, we just prefer false unless we opt out.
411 TargetCodeGenInfo::getDependentLibraryOption(llvm::StringRef Lib,
412 llvm::SmallString<24> &Opt) const {
413 // This assumes the user is passing a library name like "rt" instead of a
414 // filename like "librt.a/so", and that they don't care whether it's static or
420 unsigned TargetCodeGenInfo::getOpenCLKernelCallingConv() const {
421 // OpenCL kernels are called via an explicit runtime API with arguments
422 // set with clSetKernelArg(), not as normal sub-functions.
423 // Return SPIR_KERNEL by default as the kernel calling convention to
424 // ensure the fingerprint is fixed such way that each OpenCL argument
425 // gets one matching argument in the produced kernel function argument
426 // list to enable feasible implementation of clSetKernelArg() with
427 // aggregates etc. In case we would use the default C calling conv here,
428 // clSetKernelArg() might break depending on the target-specific
429 // conventions; different targets might split structs passed as values
430 // to multiple function arguments etc.
431 return llvm::CallingConv::SPIR_KERNEL;
434 llvm::Constant *TargetCodeGenInfo::getNullPointer(const CodeGen::CodeGenModule &CGM,
435 llvm::PointerType *T, QualType QT) const {
436 return llvm::ConstantPointerNull::get(T);
439 LangAS TargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM,
440 const VarDecl *D) const {
441 assert(!CGM.getLangOpts().OpenCL &&
442 !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) &&
443 "Address space agnostic languages only");
444 return D ? D->getType().getAddressSpace() : LangAS::Default;
447 llvm::Value *TargetCodeGenInfo::performAddrSpaceCast(
448 CodeGen::CodeGenFunction &CGF, llvm::Value *Src, LangAS SrcAddr,
449 LangAS DestAddr, llvm::Type *DestTy, bool isNonNull) const {
450 // Since target may map different address spaces in AST to the same address
451 // space, an address space conversion may end up as a bitcast.
452 if (auto *C = dyn_cast<llvm::Constant>(Src))
453 return performAddrSpaceCast(CGF.CGM, C, SrcAddr, DestAddr, DestTy);
454 return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DestTy);
458 TargetCodeGenInfo::performAddrSpaceCast(CodeGenModule &CGM, llvm::Constant *Src,
459 LangAS SrcAddr, LangAS DestAddr,
460 llvm::Type *DestTy) const {
461 // Since target may map different address spaces in AST to the same address
462 // space, an address space conversion may end up as a bitcast.
463 return llvm::ConstantExpr::getPointerCast(Src, DestTy);
467 TargetCodeGenInfo::getLLVMSyncScopeID(SyncScope S, llvm::LLVMContext &C) const {
468 return C.getOrInsertSyncScopeID(""); /* default sync scope */
471 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays);
473 /// isEmptyField - Return true iff a the field is "empty", that is it
474 /// is an unnamed bit-field or an (array of) empty record(s).
475 static bool isEmptyField(ASTContext &Context, const FieldDecl *FD,
477 if (FD->isUnnamedBitfield())
480 QualType FT = FD->getType();
482 // Constant arrays of empty records count as empty, strip them off.
483 // Constant arrays of zero length always count as empty.
485 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
486 if (AT->getSize() == 0)
488 FT = AT->getElementType();
491 const RecordType *RT = FT->getAs<RecordType>();
495 // C++ record fields are never empty, at least in the Itanium ABI.
497 // FIXME: We should use a predicate for whether this behavior is true in the
499 if (isa<CXXRecordDecl>(RT->getDecl()))
502 return isEmptyRecord(Context, FT, AllowArrays);
505 /// isEmptyRecord - Return true iff a structure contains only empty
506 /// fields. Note that a structure with a flexible array member is not
507 /// considered empty.
508 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) {
509 const RecordType *RT = T->getAs<RecordType>();
512 const RecordDecl *RD = RT->getDecl();
513 if (RD->hasFlexibleArrayMember())
516 // If this is a C++ record, check the bases first.
517 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
518 for (const auto &I : CXXRD->bases())
519 if (!isEmptyRecord(Context, I.getType(), true))
522 for (const auto *I : RD->fields())
523 if (!isEmptyField(Context, I, AllowArrays))
528 /// isSingleElementStruct - Determine if a structure is a "single
529 /// element struct", i.e. it has exactly one non-empty field or
530 /// exactly one field which is itself a single element
531 /// struct. Structures with flexible array members are never
532 /// considered single element structs.
534 /// \return The field declaration for the single non-empty field, if
536 static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
537 const RecordType *RT = T->getAs<RecordType>();
541 const RecordDecl *RD = RT->getDecl();
542 if (RD->hasFlexibleArrayMember())
545 const Type *Found = nullptr;
547 // If this is a C++ record, check the bases first.
548 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
549 for (const auto &I : CXXRD->bases()) {
550 // Ignore empty records.
551 if (isEmptyRecord(Context, I.getType(), true))
554 // If we already found an element then this isn't a single-element struct.
558 // If this is non-empty and not a single element struct, the composite
559 // cannot be a single element struct.
560 Found = isSingleElementStruct(I.getType(), Context);
566 // Check for single element.
567 for (const auto *FD : RD->fields()) {
568 QualType FT = FD->getType();
570 // Ignore empty fields.
571 if (isEmptyField(Context, FD, true))
574 // If we already found an element then this isn't a single-element
579 // Treat single element arrays as the element.
580 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
581 if (AT->getSize().getZExtValue() != 1)
583 FT = AT->getElementType();
586 if (!isAggregateTypeForABI(FT)) {
587 Found = FT.getTypePtr();
589 Found = isSingleElementStruct(FT, Context);
595 // We don't consider a struct a single-element struct if it has
596 // padding beyond the element type.
597 if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T))
604 Address EmitVAArgInstr(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
605 const ABIArgInfo &AI) {
606 // This default implementation defers to the llvm backend's va_arg
607 // instruction. It can handle only passing arguments directly
608 // (typically only handled in the backend for primitive types), or
609 // aggregates passed indirectly by pointer (NOTE: if the "byval"
610 // flag has ABI impact in the callee, this implementation cannot
613 // Only a few cases are covered here at the moment -- those needed
614 // by the default abi.
617 if (AI.isIndirect()) {
618 assert(!AI.getPaddingType() &&
619 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!");
621 !AI.getIndirectRealign() &&
622 "Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!");
624 auto TyInfo = CGF.getContext().getTypeInfoInChars(Ty);
625 CharUnits TyAlignForABI = TyInfo.second;
628 llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty));
630 CGF.Builder.CreateVAArg(VAListAddr.getPointer(), BaseTy);
631 return Address(Addr, TyAlignForABI);
633 assert((AI.isDirect() || AI.isExtend()) &&
634 "Unexpected ArgInfo Kind in generic VAArg emitter!");
636 assert(!AI.getInReg() &&
637 "Unexpected InReg seen in arginfo in generic VAArg emitter!");
638 assert(!AI.getPaddingType() &&
639 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!");
640 assert(!AI.getDirectOffset() &&
641 "Unexpected DirectOffset seen in arginfo in generic VAArg emitter!");
642 assert(!AI.getCoerceToType() &&
643 "Unexpected CoerceToType seen in arginfo in generic VAArg emitter!");
645 Address Temp = CGF.CreateMemTemp(Ty, "varet");
646 Val = CGF.Builder.CreateVAArg(VAListAddr.getPointer(), CGF.ConvertType(Ty));
647 CGF.Builder.CreateStore(Val, Temp);
652 /// DefaultABIInfo - The default implementation for ABI specific
653 /// details. This implementation provides information which results in
654 /// self-consistent and sensible LLVM IR generation, but does not
655 /// conform to any particular ABI.
656 class DefaultABIInfo : public ABIInfo {
658 DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
660 ABIArgInfo classifyReturnType(QualType RetTy) const;
661 ABIArgInfo classifyArgumentType(QualType RetTy) const;
663 void computeInfo(CGFunctionInfo &FI) const override {
664 if (!getCXXABI().classifyReturnType(FI))
665 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
666 for (auto &I : FI.arguments())
667 I.info = classifyArgumentType(I.type);
670 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
671 QualType Ty) const override {
672 return EmitVAArgInstr(CGF, VAListAddr, Ty, classifyArgumentType(Ty));
676 class DefaultTargetCodeGenInfo : public TargetCodeGenInfo {
678 DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
679 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
682 ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
683 Ty = useFirstFieldIfTransparentUnion(Ty);
685 if (isAggregateTypeForABI(Ty)) {
686 // Records with non-trivial destructors/copy-constructors should not be
688 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
689 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
691 return getNaturalAlignIndirect(Ty);
694 // Treat an enum type as its underlying type.
695 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
696 Ty = EnumTy->getDecl()->getIntegerType();
698 return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty)
699 : ABIArgInfo::getDirect());
702 ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
703 if (RetTy->isVoidType())
704 return ABIArgInfo::getIgnore();
706 if (isAggregateTypeForABI(RetTy))
707 return getNaturalAlignIndirect(RetTy);
709 // Treat an enum type as its underlying type.
710 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
711 RetTy = EnumTy->getDecl()->getIntegerType();
713 return (RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend(RetTy)
714 : ABIArgInfo::getDirect());
717 //===----------------------------------------------------------------------===//
718 // WebAssembly ABI Implementation
720 // This is a very simple ABI that relies a lot on DefaultABIInfo.
721 //===----------------------------------------------------------------------===//
723 class WebAssemblyABIInfo final : public SwiftABIInfo {
724 DefaultABIInfo defaultInfo;
727 explicit WebAssemblyABIInfo(CodeGen::CodeGenTypes &CGT)
728 : SwiftABIInfo(CGT), defaultInfo(CGT) {}
731 ABIArgInfo classifyReturnType(QualType RetTy) const;
732 ABIArgInfo classifyArgumentType(QualType Ty) const;
734 // DefaultABIInfo's classifyReturnType and classifyArgumentType are
735 // non-virtual, but computeInfo and EmitVAArg are virtual, so we
737 void computeInfo(CGFunctionInfo &FI) const override {
738 if (!getCXXABI().classifyReturnType(FI))
739 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
740 for (auto &Arg : FI.arguments())
741 Arg.info = classifyArgumentType(Arg.type);
744 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
745 QualType Ty) const override;
747 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
748 bool asReturnValue) const override {
749 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
752 bool isSwiftErrorInRegister() const override {
757 class WebAssemblyTargetCodeGenInfo final : public TargetCodeGenInfo {
759 explicit WebAssemblyTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
760 : TargetCodeGenInfo(new WebAssemblyABIInfo(CGT)) {}
762 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
763 CodeGen::CodeGenModule &CGM) const override {
764 TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
765 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D)) {
766 if (const auto *Attr = FD->getAttr<WebAssemblyImportModuleAttr>()) {
767 llvm::Function *Fn = cast<llvm::Function>(GV);
769 B.addAttribute("wasm-import-module", Attr->getImportModule());
770 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
772 if (const auto *Attr = FD->getAttr<WebAssemblyImportNameAttr>()) {
773 llvm::Function *Fn = cast<llvm::Function>(GV);
775 B.addAttribute("wasm-import-name", Attr->getImportName());
776 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
780 if (auto *FD = dyn_cast_or_null<FunctionDecl>(D)) {
781 llvm::Function *Fn = cast<llvm::Function>(GV);
782 if (!FD->doesThisDeclarationHaveABody() && !FD->hasPrototype())
783 Fn->addFnAttr("no-prototype");
788 /// Classify argument of given type \p Ty.
789 ABIArgInfo WebAssemblyABIInfo::classifyArgumentType(QualType Ty) const {
790 Ty = useFirstFieldIfTransparentUnion(Ty);
792 if (isAggregateTypeForABI(Ty)) {
793 // Records with non-trivial destructors/copy-constructors should not be
795 if (auto RAA = getRecordArgABI(Ty, getCXXABI()))
796 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
797 // Ignore empty structs/unions.
798 if (isEmptyRecord(getContext(), Ty, true))
799 return ABIArgInfo::getIgnore();
800 // Lower single-element structs to just pass a regular value. TODO: We
801 // could do reasonable-size multiple-element structs too, using getExpand(),
802 // though watch out for things like bitfields.
803 if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
804 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
807 // Otherwise just do the default thing.
808 return defaultInfo.classifyArgumentType(Ty);
811 ABIArgInfo WebAssemblyABIInfo::classifyReturnType(QualType RetTy) const {
812 if (isAggregateTypeForABI(RetTy)) {
813 // Records with non-trivial destructors/copy-constructors should not be
814 // returned by value.
815 if (!getRecordArgABI(RetTy, getCXXABI())) {
816 // Ignore empty structs/unions.
817 if (isEmptyRecord(getContext(), RetTy, true))
818 return ABIArgInfo::getIgnore();
819 // Lower single-element structs to just return a regular value. TODO: We
820 // could do reasonable-size multiple-element structs too, using
821 // ABIArgInfo::getDirect().
822 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
823 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
827 // Otherwise just do the default thing.
828 return defaultInfo.classifyReturnType(RetTy);
831 Address WebAssemblyABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
833 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect=*/ false,
834 getContext().getTypeInfoInChars(Ty),
835 CharUnits::fromQuantity(4),
836 /*AllowHigherAlign=*/ true);
839 //===----------------------------------------------------------------------===//
840 // le32/PNaCl bitcode ABI Implementation
842 // This is a simplified version of the x86_32 ABI. Arguments and return values
843 // are always passed on the stack.
844 //===----------------------------------------------------------------------===//
846 class PNaClABIInfo : public ABIInfo {
848 PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
850 ABIArgInfo classifyReturnType(QualType RetTy) const;
851 ABIArgInfo classifyArgumentType(QualType RetTy) const;
853 void computeInfo(CGFunctionInfo &FI) const override;
854 Address EmitVAArg(CodeGenFunction &CGF,
855 Address VAListAddr, QualType Ty) const override;
858 class PNaClTargetCodeGenInfo : public TargetCodeGenInfo {
860 PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
861 : TargetCodeGenInfo(new PNaClABIInfo(CGT)) {}
864 void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const {
865 if (!getCXXABI().classifyReturnType(FI))
866 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
868 for (auto &I : FI.arguments())
869 I.info = classifyArgumentType(I.type);
872 Address PNaClABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
874 // The PNaCL ABI is a bit odd, in that varargs don't use normal
875 // function classification. Structs get passed directly for varargs
876 // functions, through a rewriting transform in
877 // pnacl-llvm/lib/Transforms/NaCl/ExpandVarArgs.cpp, which allows
878 // this target to actually support a va_arg instructions with an
879 // aggregate type, unlike other targets.
880 return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect());
883 /// Classify argument of given type \p Ty.
884 ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty) const {
885 if (isAggregateTypeForABI(Ty)) {
886 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
887 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
888 return getNaturalAlignIndirect(Ty);
889 } else if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
890 // Treat an enum type as its underlying type.
891 Ty = EnumTy->getDecl()->getIntegerType();
892 } else if (Ty->isFloatingType()) {
893 // Floating-point types don't go inreg.
894 return ABIArgInfo::getDirect();
897 return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty)
898 : ABIArgInfo::getDirect());
901 ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const {
902 if (RetTy->isVoidType())
903 return ABIArgInfo::getIgnore();
905 // In the PNaCl ABI we always return records/structures on the stack.
906 if (isAggregateTypeForABI(RetTy))
907 return getNaturalAlignIndirect(RetTy);
909 // Treat an enum type as its underlying type.
910 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
911 RetTy = EnumTy->getDecl()->getIntegerType();
913 return (RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend(RetTy)
914 : ABIArgInfo::getDirect());
917 /// IsX86_MMXType - Return true if this is an MMX type.
918 bool IsX86_MMXType(llvm::Type *IRType) {
919 // Return true if the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>.
920 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
921 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
922 IRType->getScalarSizeInBits() != 64;
925 static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
926 StringRef Constraint,
928 bool IsMMXCons = llvm::StringSwitch<bool>(Constraint)
929 .Cases("y", "&y", "^Ym", true)
931 if (IsMMXCons && Ty->isVectorTy()) {
932 if (cast<llvm::VectorType>(Ty)->getBitWidth() != 64) {
933 // Invalid MMX constraint
937 return llvm::Type::getX86_MMXTy(CGF.getLLVMContext());
940 // No operation needed
944 /// Returns true if this type can be passed in SSE registers with the
945 /// X86_VectorCall calling convention. Shared between x86_32 and x86_64.
946 static bool isX86VectorTypeForVectorCall(ASTContext &Context, QualType Ty) {
947 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
948 if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half) {
949 if (BT->getKind() == BuiltinType::LongDouble) {
950 if (&Context.getTargetInfo().getLongDoubleFormat() ==
951 &llvm::APFloat::x87DoubleExtended())
956 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
957 // vectorcall can pass XMM, YMM, and ZMM vectors. We don't pass SSE1 MMX
958 // registers specially.
959 unsigned VecSize = Context.getTypeSize(VT);
960 if (VecSize == 128 || VecSize == 256 || VecSize == 512)
966 /// Returns true if this aggregate is small enough to be passed in SSE registers
967 /// in the X86_VectorCall calling convention. Shared between x86_32 and x86_64.
968 static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) {
969 return NumMembers <= 4;
972 /// Returns a Homogeneous Vector Aggregate ABIArgInfo, used in X86.
973 static ABIArgInfo getDirectX86Hva(llvm::Type* T = nullptr) {
974 auto AI = ABIArgInfo::getDirect(T);
976 AI.setCanBeFlattened(false);
980 //===----------------------------------------------------------------------===//
981 // X86-32 ABI Implementation
982 //===----------------------------------------------------------------------===//
984 /// Similar to llvm::CCState, but for Clang.
986 CCState(unsigned CC) : CC(CC), FreeRegs(0), FreeSSERegs(0) {}
990 unsigned FreeSSERegs;
994 // Vectorcall only allows the first 6 parameters to be passed in registers.
995 VectorcallMaxParamNumAsReg = 6
998 /// X86_32ABIInfo - The X86-32 ABI information.
999 class X86_32ABIInfo : public SwiftABIInfo {
1005 static const unsigned MinABIStackAlignInBytes = 4;
1007 bool IsDarwinVectorABI;
1008 bool IsRetSmallStructInRegABI;
1009 bool IsWin32StructABI;
1010 bool IsSoftFloatABI;
1012 unsigned DefaultNumRegisterParameters;
1014 static bool isRegisterSize(unsigned Size) {
1015 return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
1018 bool isHomogeneousAggregateBaseType(QualType Ty) const override {
1019 // FIXME: Assumes vectorcall is in use.
1020 return isX86VectorTypeForVectorCall(getContext(), Ty);
1023 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
1024 uint64_t NumMembers) const override {
1025 // FIXME: Assumes vectorcall is in use.
1026 return isX86VectorCallAggregateSmallEnough(NumMembers);
1029 bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context) const;
1031 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
1032 /// such that the argument will be passed in memory.
1033 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const;
1035 ABIArgInfo getIndirectReturnResult(QualType Ty, CCState &State) const;
1037 /// Return the alignment to use for the given type on the stack.
1038 unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const;
1040 Class classify(QualType Ty) const;
1041 ABIArgInfo classifyReturnType(QualType RetTy, CCState &State) const;
1042 ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const;
1044 /// Updates the number of available free registers, returns
1045 /// true if any registers were allocated.
1046 bool updateFreeRegs(QualType Ty, CCState &State) const;
1048 bool shouldAggregateUseDirect(QualType Ty, CCState &State, bool &InReg,
1049 bool &NeedsPadding) const;
1050 bool shouldPrimitiveUseInReg(QualType Ty, CCState &State) const;
1052 bool canExpandIndirectArgument(QualType Ty) const;
1054 /// Rewrite the function info so that all memory arguments use
1056 void rewriteWithInAlloca(CGFunctionInfo &FI) const;
1058 void addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
1059 CharUnits &StackOffset, ABIArgInfo &Info,
1060 QualType Type) const;
1061 void computeVectorCallArgs(CGFunctionInfo &FI, CCState &State,
1062 bool &UsedInAlloca) const;
1066 void computeInfo(CGFunctionInfo &FI) const override;
1067 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
1068 QualType Ty) const override;
1070 X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI,
1071 bool RetSmallStructInRegABI, bool Win32StructABI,
1072 unsigned NumRegisterParameters, bool SoftFloatABI)
1073 : SwiftABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI),
1074 IsRetSmallStructInRegABI(RetSmallStructInRegABI),
1075 IsWin32StructABI(Win32StructABI),
1076 IsSoftFloatABI(SoftFloatABI),
1077 IsMCUABI(CGT.getTarget().getTriple().isOSIAMCU()),
1078 DefaultNumRegisterParameters(NumRegisterParameters) {}
1080 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
1081 bool asReturnValue) const override {
1082 // LLVM's x86-32 lowering currently only assigns up to three
1083 // integer registers and three fp registers. Oddly, it'll use up to
1084 // four vector registers for vectors, but those can overlap with the
1085 // scalar registers.
1086 return occupiesMoreThan(CGT, scalars, /*total*/ 3);
1089 bool isSwiftErrorInRegister() const override {
1090 // x86-32 lowering does not support passing swifterror in a register.
1095 class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
1097 X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI,
1098 bool RetSmallStructInRegABI, bool Win32StructABI,
1099 unsigned NumRegisterParameters, bool SoftFloatABI)
1100 : TargetCodeGenInfo(new X86_32ABIInfo(
1101 CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
1102 NumRegisterParameters, SoftFloatABI)) {}
1104 static bool isStructReturnInRegABI(
1105 const llvm::Triple &Triple, const CodeGenOptions &Opts);
1107 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
1108 CodeGen::CodeGenModule &CGM) const override;
1110 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
1111 // Darwin uses different dwarf register numbers for EH.
1112 if (CGM.getTarget().getTriple().isOSDarwin()) return 5;
1116 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
1117 llvm::Value *Address) const override;
1119 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
1120 StringRef Constraint,
1121 llvm::Type* Ty) const override {
1122 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
1125 void addReturnRegisterOutputs(CodeGenFunction &CGF, LValue ReturnValue,
1126 std::string &Constraints,
1127 std::vector<llvm::Type *> &ResultRegTypes,
1128 std::vector<llvm::Type *> &ResultTruncRegTypes,
1129 std::vector<LValue> &ResultRegDests,
1130 std::string &AsmString,
1131 unsigned NumOutputs) const override;
1134 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
1135 unsigned Sig = (0xeb << 0) | // jmp rel8
1136 (0x06 << 8) | // .+0x08
1139 return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
1142 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
1143 return "movl\t%ebp, %ebp"
1144 "\t\t// marker for objc_retainAutoreleaseReturnValue";
1150 /// Rewrite input constraint references after adding some output constraints.
1151 /// In the case where there is one output and one input and we add one output,
1152 /// we need to replace all operand references greater than or equal to 1:
1155 /// The result will be:
1158 static void rewriteInputConstraintReferences(unsigned FirstIn,
1159 unsigned NumNewOuts,
1160 std::string &AsmString) {
1162 llvm::raw_string_ostream OS(Buf);
1164 while (Pos < AsmString.size()) {
1165 size_t DollarStart = AsmString.find('$', Pos);
1166 if (DollarStart == std::string::npos)
1167 DollarStart = AsmString.size();
1168 size_t DollarEnd = AsmString.find_first_not_of('$', DollarStart);
1169 if (DollarEnd == std::string::npos)
1170 DollarEnd = AsmString.size();
1171 OS << StringRef(&AsmString[Pos], DollarEnd - Pos);
1173 size_t NumDollars = DollarEnd - DollarStart;
1174 if (NumDollars % 2 != 0 && Pos < AsmString.size()) {
1175 // We have an operand reference.
1176 size_t DigitStart = Pos;
1177 size_t DigitEnd = AsmString.find_first_not_of("0123456789", DigitStart);
1178 if (DigitEnd == std::string::npos)
1179 DigitEnd = AsmString.size();
1180 StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart);
1181 unsigned OperandIndex;
1182 if (!OperandStr.getAsInteger(10, OperandIndex)) {
1183 if (OperandIndex >= FirstIn)
1184 OperandIndex += NumNewOuts;
1192 AsmString = std::move(OS.str());
1195 /// Add output constraints for EAX:EDX because they are return registers.
1196 void X86_32TargetCodeGenInfo::addReturnRegisterOutputs(
1197 CodeGenFunction &CGF, LValue ReturnSlot, std::string &Constraints,
1198 std::vector<llvm::Type *> &ResultRegTypes,
1199 std::vector<llvm::Type *> &ResultTruncRegTypes,
1200 std::vector<LValue> &ResultRegDests, std::string &AsmString,
1201 unsigned NumOutputs) const {
1202 uint64_t RetWidth = CGF.getContext().getTypeSize(ReturnSlot.getType());
1204 // Use the EAX constraint if the width is 32 or smaller and EAX:EDX if it is
1206 if (!Constraints.empty())
1208 if (RetWidth <= 32) {
1209 Constraints += "={eax}";
1210 ResultRegTypes.push_back(CGF.Int32Ty);
1212 // Use the 'A' constraint for EAX:EDX.
1213 Constraints += "=A";
1214 ResultRegTypes.push_back(CGF.Int64Ty);
1217 // Truncate EAX or EAX:EDX to an integer of the appropriate size.
1218 llvm::Type *CoerceTy = llvm::IntegerType::get(CGF.getLLVMContext(), RetWidth);
1219 ResultTruncRegTypes.push_back(CoerceTy);
1221 // Coerce the integer by bitcasting the return slot pointer.
1222 ReturnSlot.setAddress(CGF.Builder.CreateBitCast(ReturnSlot.getAddress(),
1223 CoerceTy->getPointerTo()));
1224 ResultRegDests.push_back(ReturnSlot);
1226 rewriteInputConstraintReferences(NumOutputs, 1, AsmString);
1229 /// shouldReturnTypeInRegister - Determine if the given type should be
1230 /// returned in a register (for the Darwin and MCU ABI).
1231 bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
1232 ASTContext &Context) const {
1233 uint64_t Size = Context.getTypeSize(Ty);
1235 // For i386, type must be register sized.
1236 // For the MCU ABI, it only needs to be <= 8-byte
1237 if ((IsMCUABI && Size > 64) || (!IsMCUABI && !isRegisterSize(Size)))
1240 if (Ty->isVectorType()) {
1241 // 64- and 128- bit vectors inside structures are not returned in
1243 if (Size == 64 || Size == 128)
1249 // If this is a builtin, pointer, enum, complex type, member pointer, or
1250 // member function pointer it is ok.
1251 if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() ||
1252 Ty->isAnyComplexType() || Ty->isEnumeralType() ||
1253 Ty->isBlockPointerType() || Ty->isMemberPointerType())
1256 // Arrays are treated like records.
1257 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
1258 return shouldReturnTypeInRegister(AT->getElementType(), Context);
1260 // Otherwise, it must be a record type.
1261 const RecordType *RT = Ty->getAs<RecordType>();
1262 if (!RT) return false;
1264 // FIXME: Traverse bases here too.
1266 // Structure types are passed in register if all fields would be
1267 // passed in a register.
1268 for (const auto *FD : RT->getDecl()->fields()) {
1269 // Empty fields are ignored.
1270 if (isEmptyField(Context, FD, true))
1273 // Check fields recursively.
1274 if (!shouldReturnTypeInRegister(FD->getType(), Context))
1280 static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
1281 // Treat complex types as the element type.
1282 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
1283 Ty = CTy->getElementType();
1285 // Check for a type which we know has a simple scalar argument-passing
1286 // convention without any padding. (We're specifically looking for 32
1287 // and 64-bit integer and integer-equivalents, float, and double.)
1288 if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() &&
1289 !Ty->isEnumeralType() && !Ty->isBlockPointerType())
1292 uint64_t Size = Context.getTypeSize(Ty);
1293 return Size == 32 || Size == 64;
1296 static bool addFieldSizes(ASTContext &Context, const RecordDecl *RD,
1298 for (const auto *FD : RD->fields()) {
1299 // Scalar arguments on the stack get 4 byte alignment on x86. If the
1300 // argument is smaller than 32-bits, expanding the struct will create
1301 // alignment padding.
1302 if (!is32Or64BitBasicType(FD->getType(), Context))
1305 // FIXME: Reject bit-fields wholesale; there are two problems, we don't know
1306 // how to expand them yet, and the predicate for telling if a bitfield still
1307 // counts as "basic" is more complicated than what we were doing previously.
1308 if (FD->isBitField())
1311 Size += Context.getTypeSize(FD->getType());
1316 static bool addBaseAndFieldSizes(ASTContext &Context, const CXXRecordDecl *RD,
1318 // Don't do this if there are any non-empty bases.
1319 for (const CXXBaseSpecifier &Base : RD->bases()) {
1320 if (!addBaseAndFieldSizes(Context, Base.getType()->getAsCXXRecordDecl(),
1324 if (!addFieldSizes(Context, RD, Size))
1329 /// Test whether an argument type which is to be passed indirectly (on the
1330 /// stack) would have the equivalent layout if it was expanded into separate
1331 /// arguments. If so, we prefer to do the latter to avoid inhibiting
1333 bool X86_32ABIInfo::canExpandIndirectArgument(QualType Ty) const {
1334 // We can only expand structure types.
1335 const RecordType *RT = Ty->getAs<RecordType>();
1338 const RecordDecl *RD = RT->getDecl();
1340 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
1341 if (!IsWin32StructABI) {
1342 // On non-Windows, we have to conservatively match our old bitcode
1343 // prototypes in order to be ABI-compatible at the bitcode level.
1344 if (!CXXRD->isCLike())
1347 // Don't do this for dynamic classes.
1348 if (CXXRD->isDynamicClass())
1351 if (!addBaseAndFieldSizes(getContext(), CXXRD, Size))
1354 if (!addFieldSizes(getContext(), RD, Size))
1358 // We can do this if there was no alignment padding.
1359 return Size == getContext().getTypeSize(Ty);
1362 ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(QualType RetTy, CCState &State) const {
1363 // If the return value is indirect, then the hidden argument is consuming one
1364 // integer register.
1365 if (State.FreeRegs) {
1368 return getNaturalAlignIndirectInReg(RetTy);
1370 return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
1373 ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
1374 CCState &State) const {
1375 if (RetTy->isVoidType())
1376 return ABIArgInfo::getIgnore();
1378 const Type *Base = nullptr;
1379 uint64_t NumElts = 0;
1380 if ((State.CC == llvm::CallingConv::X86_VectorCall ||
1381 State.CC == llvm::CallingConv::X86_RegCall) &&
1382 isHomogeneousAggregate(RetTy, Base, NumElts)) {
1383 // The LLVM struct type for such an aggregate should lower properly.
1384 return ABIArgInfo::getDirect();
1387 if (const VectorType *VT = RetTy->getAs<VectorType>()) {
1388 // On Darwin, some vectors are returned in registers.
1389 if (IsDarwinVectorABI) {
1390 uint64_t Size = getContext().getTypeSize(RetTy);
1392 // 128-bit vectors are a special case; they are returned in
1393 // registers and we need to make sure to pick a type the LLVM
1394 // backend will like.
1396 return ABIArgInfo::getDirect(llvm::VectorType::get(
1397 llvm::Type::getInt64Ty(getVMContext()), 2));
1399 // Always return in register if it fits in a general purpose
1400 // register, or if it is 64 bits and has a single element.
1401 if ((Size == 8 || Size == 16 || Size == 32) ||
1402 (Size == 64 && VT->getNumElements() == 1))
1403 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
1406 return getIndirectReturnResult(RetTy, State);
1409 return ABIArgInfo::getDirect();
1412 if (isAggregateTypeForABI(RetTy)) {
1413 if (const RecordType *RT = RetTy->getAs<RecordType>()) {
1414 // Structures with flexible arrays are always indirect.
1415 if (RT->getDecl()->hasFlexibleArrayMember())
1416 return getIndirectReturnResult(RetTy, State);
1419 // If specified, structs and unions are always indirect.
1420 if (!IsRetSmallStructInRegABI && !RetTy->isAnyComplexType())
1421 return getIndirectReturnResult(RetTy, State);
1423 // Ignore empty structs/unions.
1424 if (isEmptyRecord(getContext(), RetTy, true))
1425 return ABIArgInfo::getIgnore();
1427 // Small structures which are register sized are generally returned
1429 if (shouldReturnTypeInRegister(RetTy, getContext())) {
1430 uint64_t Size = getContext().getTypeSize(RetTy);
1432 // As a special-case, if the struct is a "single-element" struct, and
1433 // the field is of type "float" or "double", return it in a
1434 // floating-point register. (MSVC does not apply this special case.)
1435 // We apply a similar transformation for pointer types to improve the
1436 // quality of the generated IR.
1437 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
1438 if ((!IsWin32StructABI && SeltTy->isRealFloatingType())
1439 || SeltTy->hasPointerRepresentation())
1440 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
1442 // FIXME: We should be able to narrow this integer in cases with dead
1444 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size));
1447 return getIndirectReturnResult(RetTy, State);
1450 // Treat an enum type as its underlying type.
1451 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
1452 RetTy = EnumTy->getDecl()->getIntegerType();
1454 return (RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend(RetTy)
1455 : ABIArgInfo::getDirect());
1458 static bool isSSEVectorType(ASTContext &Context, QualType Ty) {
1459 return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128;
1462 static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) {
1463 const RecordType *RT = Ty->getAs<RecordType>();
1466 const RecordDecl *RD = RT->getDecl();
1468 // If this is a C++ record, check the bases first.
1469 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
1470 for (const auto &I : CXXRD->bases())
1471 if (!isRecordWithSSEVectorType(Context, I.getType()))
1474 for (const auto *i : RD->fields()) {
1475 QualType FT = i->getType();
1477 if (isSSEVectorType(Context, FT))
1480 if (isRecordWithSSEVectorType(Context, FT))
1487 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty,
1488 unsigned Align) const {
1489 // Otherwise, if the alignment is less than or equal to the minimum ABI
1490 // alignment, just use the default; the backend will handle this.
1491 if (Align <= MinABIStackAlignInBytes)
1492 return 0; // Use default alignment.
1494 // On non-Darwin, the stack type alignment is always 4.
1495 if (!IsDarwinVectorABI) {
1496 // Set explicit alignment, since we may need to realign the top.
1497 return MinABIStackAlignInBytes;
1500 // Otherwise, if the type contains an SSE vector type, the alignment is 16.
1501 if (Align >= 16 && (isSSEVectorType(getContext(), Ty) ||
1502 isRecordWithSSEVectorType(getContext(), Ty)))
1505 return MinABIStackAlignInBytes;
1508 ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal,
1509 CCState &State) const {
1511 if (State.FreeRegs) {
1512 --State.FreeRegs; // Non-byval indirects just use one pointer.
1514 return getNaturalAlignIndirectInReg(Ty);
1516 return getNaturalAlignIndirect(Ty, false);
1519 // Compute the byval alignment.
1520 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
1521 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
1522 if (StackAlign == 0)
1523 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true);
1525 // If the stack alignment is less than the type alignment, realign the
1527 bool Realign = TypeAlign > StackAlign;
1528 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(StackAlign),
1529 /*ByVal=*/true, Realign);
1532 X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const {
1533 const Type *T = isSingleElementStruct(Ty, getContext());
1535 T = Ty.getTypePtr();
1537 if (const BuiltinType *BT = T->getAs<BuiltinType>()) {
1538 BuiltinType::Kind K = BT->getKind();
1539 if (K == BuiltinType::Float || K == BuiltinType::Double)
1545 bool X86_32ABIInfo::updateFreeRegs(QualType Ty, CCState &State) const {
1546 if (!IsSoftFloatABI) {
1547 Class C = classify(Ty);
1552 unsigned Size = getContext().getTypeSize(Ty);
1553 unsigned SizeInRegs = (Size + 31) / 32;
1555 if (SizeInRegs == 0)
1559 if (SizeInRegs > State.FreeRegs) {
1564 // The MCU psABI allows passing parameters in-reg even if there are
1565 // earlier parameters that are passed on the stack. Also,
1566 // it does not allow passing >8-byte structs in-register,
1567 // even if there are 3 free registers available.
1568 if (SizeInRegs > State.FreeRegs || SizeInRegs > 2)
1572 State.FreeRegs -= SizeInRegs;
1576 bool X86_32ABIInfo::shouldAggregateUseDirect(QualType Ty, CCState &State,
1578 bool &NeedsPadding) const {
1579 // On Windows, aggregates other than HFAs are never passed in registers, and
1580 // they do not consume register slots. Homogenous floating-point aggregates
1581 // (HFAs) have already been dealt with at this point.
1582 if (IsWin32StructABI && isAggregateTypeForABI(Ty))
1585 NeedsPadding = false;
1588 if (!updateFreeRegs(Ty, State))
1594 if (State.CC == llvm::CallingConv::X86_FastCall ||
1595 State.CC == llvm::CallingConv::X86_VectorCall ||
1596 State.CC == llvm::CallingConv::X86_RegCall) {
1597 if (getContext().getTypeSize(Ty) <= 32 && State.FreeRegs)
1598 NeedsPadding = true;
1606 bool X86_32ABIInfo::shouldPrimitiveUseInReg(QualType Ty, CCState &State) const {
1607 if (!updateFreeRegs(Ty, State))
1613 if (State.CC == llvm::CallingConv::X86_FastCall ||
1614 State.CC == llvm::CallingConv::X86_VectorCall ||
1615 State.CC == llvm::CallingConv::X86_RegCall) {
1616 if (getContext().getTypeSize(Ty) > 32)
1619 return (Ty->isIntegralOrEnumerationType() || Ty->isPointerType() ||
1620 Ty->isReferenceType());
1626 ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
1627 CCState &State) const {
1628 // FIXME: Set alignment on indirect arguments.
1630 Ty = useFirstFieldIfTransparentUnion(Ty);
1632 // Check with the C++ ABI first.
1633 const RecordType *RT = Ty->getAs<RecordType>();
1635 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
1636 if (RAA == CGCXXABI::RAA_Indirect) {
1637 return getIndirectResult(Ty, false, State);
1638 } else if (RAA == CGCXXABI::RAA_DirectInMemory) {
1639 // The field index doesn't matter, we'll fix it up later.
1640 return ABIArgInfo::getInAlloca(/*FieldIndex=*/0);
1644 // Regcall uses the concept of a homogenous vector aggregate, similar
1645 // to other targets.
1646 const Type *Base = nullptr;
1647 uint64_t NumElts = 0;
1648 if (State.CC == llvm::CallingConv::X86_RegCall &&
1649 isHomogeneousAggregate(Ty, Base, NumElts)) {
1651 if (State.FreeSSERegs >= NumElts) {
1652 State.FreeSSERegs -= NumElts;
1653 if (Ty->isBuiltinType() || Ty->isVectorType())
1654 return ABIArgInfo::getDirect();
1655 return ABIArgInfo::getExpand();
1657 return getIndirectResult(Ty, /*ByVal=*/false, State);
1660 if (isAggregateTypeForABI(Ty)) {
1661 // Structures with flexible arrays are always indirect.
1662 // FIXME: This should not be byval!
1663 if (RT && RT->getDecl()->hasFlexibleArrayMember())
1664 return getIndirectResult(Ty, true, State);
1666 // Ignore empty structs/unions on non-Windows.
1667 if (!IsWin32StructABI && isEmptyRecord(getContext(), Ty, true))
1668 return ABIArgInfo::getIgnore();
1670 llvm::LLVMContext &LLVMContext = getVMContext();
1671 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
1672 bool NeedsPadding = false;
1674 if (shouldAggregateUseDirect(Ty, State, InReg, NeedsPadding)) {
1675 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
1676 SmallVector<llvm::Type*, 3> Elements(SizeInRegs, Int32);
1677 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
1679 return ABIArgInfo::getDirectInReg(Result);
1681 return ABIArgInfo::getDirect(Result);
1683 llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : nullptr;
1685 // Expand small (<= 128-bit) record types when we know that the stack layout
1686 // of those arguments will match the struct. This is important because the
1687 // LLVM backend isn't smart enough to remove byval, which inhibits many
1689 // Don't do this for the MCU if there are still free integer registers
1690 // (see X86_64 ABI for full explanation).
1691 if (getContext().getTypeSize(Ty) <= 4 * 32 &&
1692 (!IsMCUABI || State.FreeRegs == 0) && canExpandIndirectArgument(Ty))
1693 return ABIArgInfo::getExpandWithPadding(
1694 State.CC == llvm::CallingConv::X86_FastCall ||
1695 State.CC == llvm::CallingConv::X86_VectorCall ||
1696 State.CC == llvm::CallingConv::X86_RegCall,
1699 return getIndirectResult(Ty, true, State);
1702 if (const VectorType *VT = Ty->getAs<VectorType>()) {
1703 // On Darwin, some vectors are passed in memory, we handle this by passing
1704 // it as an i8/i16/i32/i64.
1705 if (IsDarwinVectorABI) {
1706 uint64_t Size = getContext().getTypeSize(Ty);
1707 if ((Size == 8 || Size == 16 || Size == 32) ||
1708 (Size == 64 && VT->getNumElements() == 1))
1709 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
1713 if (IsX86_MMXType(CGT.ConvertType(Ty)))
1714 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 64));
1716 return ABIArgInfo::getDirect();
1720 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
1721 Ty = EnumTy->getDecl()->getIntegerType();
1723 bool InReg = shouldPrimitiveUseInReg(Ty, State);
1725 if (Ty->isPromotableIntegerType()) {
1727 return ABIArgInfo::getExtendInReg(Ty);
1728 return ABIArgInfo::getExtend(Ty);
1732 return ABIArgInfo::getDirectInReg();
1733 return ABIArgInfo::getDirect();
1736 void X86_32ABIInfo::computeVectorCallArgs(CGFunctionInfo &FI, CCState &State,
1737 bool &UsedInAlloca) const {
1738 // Vectorcall x86 works subtly different than in x64, so the format is
1739 // a bit different than the x64 version. First, all vector types (not HVAs)
1740 // are assigned, with the first 6 ending up in the YMM0-5 or XMM0-5 registers.
1741 // This differs from the x64 implementation, where the first 6 by INDEX get
1743 // After that, integers AND HVAs are assigned Left to Right in the same pass.
1744 // Integers are passed as ECX/EDX if one is available (in order). HVAs will
1745 // first take up the remaining YMM/XMM registers. If insufficient registers
1746 // remain but an integer register (ECX/EDX) is available, it will be passed
1747 // in that, else, on the stack.
1748 for (auto &I : FI.arguments()) {
1749 // First pass do all the vector types.
1750 const Type *Base = nullptr;
1751 uint64_t NumElts = 0;
1752 const QualType& Ty = I.type;
1753 if ((Ty->isVectorType() || Ty->isBuiltinType()) &&
1754 isHomogeneousAggregate(Ty, Base, NumElts)) {
1755 if (State.FreeSSERegs >= NumElts) {
1756 State.FreeSSERegs -= NumElts;
1757 I.info = ABIArgInfo::getDirect();
1759 I.info = classifyArgumentType(Ty, State);
1761 UsedInAlloca |= (I.info.getKind() == ABIArgInfo::InAlloca);
1765 for (auto &I : FI.arguments()) {
1766 // Second pass, do the rest!
1767 const Type *Base = nullptr;
1768 uint64_t NumElts = 0;
1769 const QualType& Ty = I.type;
1770 bool IsHva = isHomogeneousAggregate(Ty, Base, NumElts);
1772 if (IsHva && !Ty->isVectorType() && !Ty->isBuiltinType()) {
1773 // Assign true HVAs (non vector/native FP types).
1774 if (State.FreeSSERegs >= NumElts) {
1775 State.FreeSSERegs -= NumElts;
1776 I.info = getDirectX86Hva();
1778 I.info = getIndirectResult(Ty, /*ByVal=*/false, State);
1780 } else if (!IsHva) {
1781 // Assign all Non-HVAs, so this will exclude Vector/FP args.
1782 I.info = classifyArgumentType(Ty, State);
1783 UsedInAlloca |= (I.info.getKind() == ABIArgInfo::InAlloca);
1788 void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
1789 CCState State(FI.getCallingConvention());
1792 else if (State.CC == llvm::CallingConv::X86_FastCall)
1794 else if (State.CC == llvm::CallingConv::X86_VectorCall) {
1796 State.FreeSSERegs = 6;
1797 } else if (FI.getHasRegParm())
1798 State.FreeRegs = FI.getRegParm();
1799 else if (State.CC == llvm::CallingConv::X86_RegCall) {
1801 State.FreeSSERegs = 8;
1803 State.FreeRegs = DefaultNumRegisterParameters;
1805 if (!::classifyReturnType(getCXXABI(), FI, *this)) {
1806 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), State);
1807 } else if (FI.getReturnInfo().isIndirect()) {
1808 // The C++ ABI is not aware of register usage, so we have to check if the
1809 // return value was sret and put it in a register ourselves if appropriate.
1810 if (State.FreeRegs) {
1811 --State.FreeRegs; // The sret parameter consumes a register.
1813 FI.getReturnInfo().setInReg(true);
1817 // The chain argument effectively gives us another free register.
1818 if (FI.isChainCall())
1821 bool UsedInAlloca = false;
1822 if (State.CC == llvm::CallingConv::X86_VectorCall) {
1823 computeVectorCallArgs(FI, State, UsedInAlloca);
1825 // If not vectorcall, revert to normal behavior.
1826 for (auto &I : FI.arguments()) {
1827 I.info = classifyArgumentType(I.type, State);
1828 UsedInAlloca |= (I.info.getKind() == ABIArgInfo::InAlloca);
1832 // If we needed to use inalloca for any argument, do a second pass and rewrite
1833 // all the memory arguments to use inalloca.
1835 rewriteWithInAlloca(FI);
1839 X86_32ABIInfo::addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
1840 CharUnits &StackOffset, ABIArgInfo &Info,
1841 QualType Type) const {
1842 // Arguments are always 4-byte-aligned.
1843 CharUnits FieldAlign = CharUnits::fromQuantity(4);
1845 assert(StackOffset.isMultipleOf(FieldAlign) && "unaligned inalloca struct");
1846 Info = ABIArgInfo::getInAlloca(FrameFields.size());
1847 FrameFields.push_back(CGT.ConvertTypeForMem(Type));
1848 StackOffset += getContext().getTypeSizeInChars(Type);
1850 // Insert padding bytes to respect alignment.
1851 CharUnits FieldEnd = StackOffset;
1852 StackOffset = FieldEnd.alignTo(FieldAlign);
1853 if (StackOffset != FieldEnd) {
1854 CharUnits NumBytes = StackOffset - FieldEnd;
1855 llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext());
1856 Ty = llvm::ArrayType::get(Ty, NumBytes.getQuantity());
1857 FrameFields.push_back(Ty);
1861 static bool isArgInAlloca(const ABIArgInfo &Info) {
1862 // Leave ignored and inreg arguments alone.
1863 switch (Info.getKind()) {
1864 case ABIArgInfo::InAlloca:
1866 case ABIArgInfo::Indirect:
1867 assert(Info.getIndirectByVal());
1869 case ABIArgInfo::Ignore:
1871 case ABIArgInfo::Direct:
1872 case ABIArgInfo::Extend:
1873 if (Info.getInReg())
1876 case ABIArgInfo::Expand:
1877 case ABIArgInfo::CoerceAndExpand:
1878 // These are aggregate types which are never passed in registers when
1879 // inalloca is involved.
1882 llvm_unreachable("invalid enum");
1885 void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const {
1886 assert(IsWin32StructABI && "inalloca only supported on win32");
1888 // Build a packed struct type for all of the arguments in memory.
1889 SmallVector<llvm::Type *, 6> FrameFields;
1891 // The stack alignment is always 4.
1892 CharUnits StackAlign = CharUnits::fromQuantity(4);
1894 CharUnits StackOffset;
1895 CGFunctionInfo::arg_iterator I = FI.arg_begin(), E = FI.arg_end();
1897 // Put 'this' into the struct before 'sret', if necessary.
1899 FI.getCallingConvention() == llvm::CallingConv::X86_ThisCall;
1900 ABIArgInfo &Ret = FI.getReturnInfo();
1901 if (Ret.isIndirect() && Ret.isSRetAfterThis() && !IsThisCall &&
1902 isArgInAlloca(I->info)) {
1903 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
1907 // Put the sret parameter into the inalloca struct if it's in memory.
1908 if (Ret.isIndirect() && !Ret.getInReg()) {
1909 CanQualType PtrTy = getContext().getPointerType(FI.getReturnType());
1910 addFieldToArgStruct(FrameFields, StackOffset, Ret, PtrTy);
1911 // On Windows, the hidden sret parameter is always returned in eax.
1912 Ret.setInAllocaSRet(IsWin32StructABI);
1915 // Skip the 'this' parameter in ecx.
1919 // Put arguments passed in memory into the struct.
1920 for (; I != E; ++I) {
1921 if (isArgInAlloca(I->info))
1922 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
1925 FI.setArgStruct(llvm::StructType::get(getVMContext(), FrameFields,
1930 Address X86_32ABIInfo::EmitVAArg(CodeGenFunction &CGF,
1931 Address VAListAddr, QualType Ty) const {
1933 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
1935 // x86-32 changes the alignment of certain arguments on the stack.
1937 // Just messing with TypeInfo like this works because we never pass
1938 // anything indirectly.
1939 TypeInfo.second = CharUnits::fromQuantity(
1940 getTypeStackAlignInBytes(Ty, TypeInfo.second.getQuantity()));
1942 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false,
1943 TypeInfo, CharUnits::fromQuantity(4),
1944 /*AllowHigherAlign*/ true);
1947 bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
1948 const llvm::Triple &Triple, const CodeGenOptions &Opts) {
1949 assert(Triple.getArch() == llvm::Triple::x86);
1951 switch (Opts.getStructReturnConvention()) {
1952 case CodeGenOptions::SRCK_Default:
1954 case CodeGenOptions::SRCK_OnStack: // -fpcc-struct-return
1956 case CodeGenOptions::SRCK_InRegs: // -freg-struct-return
1960 if (Triple.isOSDarwin() || Triple.isOSIAMCU())
1963 switch (Triple.getOS()) {
1964 case llvm::Triple::DragonFly:
1965 case llvm::Triple::FreeBSD:
1966 case llvm::Triple::OpenBSD:
1967 case llvm::Triple::Win32:
1974 void X86_32TargetCodeGenInfo::setTargetAttributes(
1975 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
1976 if (GV->isDeclaration())
1978 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
1979 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1980 llvm::Function *Fn = cast<llvm::Function>(GV);
1981 Fn->addFnAttr("stackrealign");
1983 if (FD->hasAttr<AnyX86InterruptAttr>()) {
1984 llvm::Function *Fn = cast<llvm::Function>(GV);
1985 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
1990 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
1991 CodeGen::CodeGenFunction &CGF,
1992 llvm::Value *Address) const {
1993 CodeGen::CGBuilderTy &Builder = CGF.Builder;
1995 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
1997 // 0-7 are the eight integer registers; the order is different
1998 // on Darwin (for EH), but the range is the same.
2000 AssignToArrayRange(Builder, Address, Four8, 0, 8);
2002 if (CGF.CGM.getTarget().getTriple().isOSDarwin()) {
2003 // 12-16 are st(0..4). Not sure why we stop at 4.
2004 // These have size 16, which is sizeof(long double) on
2005 // platforms with 8-byte alignment for that type.
2006 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16);
2007 AssignToArrayRange(Builder, Address, Sixteen8, 12, 16);
2010 // 9 is %eflags, which doesn't get a size on Darwin for some
2012 Builder.CreateAlignedStore(
2013 Four8, Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, Address, 9),
2016 // 11-16 are st(0..5). Not sure why we stop at 5.
2017 // These have size 12, which is sizeof(long double) on
2018 // platforms with 4-byte alignment for that type.
2019 llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12);
2020 AssignToArrayRange(Builder, Address, Twelve8, 11, 16);
2026 //===----------------------------------------------------------------------===//
2027 // X86-64 ABI Implementation
2028 //===----------------------------------------------------------------------===//
2032 /// The AVX ABI level for X86 targets.
2033 enum class X86AVXABILevel {
2039 /// \p returns the size in bits of the largest (native) vector for \p AVXLevel.
2040 static unsigned getNativeVectorSizeForAVXABI(X86AVXABILevel AVXLevel) {
2042 case X86AVXABILevel::AVX512:
2044 case X86AVXABILevel::AVX:
2046 case X86AVXABILevel::None:
2049 llvm_unreachable("Unknown AVXLevel");
2052 /// X86_64ABIInfo - The X86_64 ABI information.
2053 class X86_64ABIInfo : public SwiftABIInfo {
2065 /// merge - Implement the X86_64 ABI merging algorithm.
2067 /// Merge an accumulating classification \arg Accum with a field
2068 /// classification \arg Field.
2070 /// \param Accum - The accumulating classification. This should
2071 /// always be either NoClass or the result of a previous merge
2072 /// call. In addition, this should never be Memory (the caller
2073 /// should just return Memory for the aggregate).
2074 static Class merge(Class Accum, Class Field);
2076 /// postMerge - Implement the X86_64 ABI post merging algorithm.
2078 /// Post merger cleanup, reduces a malformed Hi and Lo pair to
2079 /// final MEMORY or SSE classes when necessary.
2081 /// \param AggregateSize - The size of the current aggregate in
2082 /// the classification process.
2084 /// \param Lo - The classification for the parts of the type
2085 /// residing in the low word of the containing object.
2087 /// \param Hi - The classification for the parts of the type
2088 /// residing in the higher words of the containing object.
2090 void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const;
2092 /// classify - Determine the x86_64 register classes in which the
2093 /// given type T should be passed.
2095 /// \param Lo - The classification for the parts of the type
2096 /// residing in the low word of the containing object.
2098 /// \param Hi - The classification for the parts of the type
2099 /// residing in the high word of the containing object.
2101 /// \param OffsetBase - The bit offset of this type in the
2102 /// containing object. Some parameters are classified different
2103 /// depending on whether they straddle an eightbyte boundary.
2105 /// \param isNamedArg - Whether the argument in question is a "named"
2106 /// argument, as used in AMD64-ABI 3.5.7.
2108 /// If a word is unused its result will be NoClass; if a type should
2109 /// be passed in Memory then at least the classification of \arg Lo
2112 /// The \arg Lo class will be NoClass iff the argument is ignored.
2114 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
2115 /// also be ComplexX87.
2116 void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi,
2117 bool isNamedArg) const;
2119 llvm::Type *GetByteVectorType(QualType Ty) const;
2120 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType,
2121 unsigned IROffset, QualType SourceTy,
2122 unsigned SourceOffset) const;
2123 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType,
2124 unsigned IROffset, QualType SourceTy,
2125 unsigned SourceOffset) const;
2127 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
2128 /// such that the argument will be returned in memory.
2129 ABIArgInfo getIndirectReturnResult(QualType Ty) const;
2131 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
2132 /// such that the argument will be passed in memory.
2134 /// \param freeIntRegs - The number of free integer registers remaining
2136 ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const;
2138 ABIArgInfo classifyReturnType(QualType RetTy) const;
2140 ABIArgInfo classifyArgumentType(QualType Ty, unsigned freeIntRegs,
2141 unsigned &neededInt, unsigned &neededSSE,
2142 bool isNamedArg) const;
2144 ABIArgInfo classifyRegCallStructType(QualType Ty, unsigned &NeededInt,
2145 unsigned &NeededSSE) const;
2147 ABIArgInfo classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt,
2148 unsigned &NeededSSE) const;
2150 bool IsIllegalVectorType(QualType Ty) const;
2152 /// The 0.98 ABI revision clarified a lot of ambiguities,
2153 /// unfortunately in ways that were not always consistent with
2154 /// certain previous compilers. In particular, platforms which
2155 /// required strict binary compatibility with older versions of GCC
2156 /// may need to exempt themselves.
2157 bool honorsRevision0_98() const {
2158 return !getTarget().getTriple().isOSDarwin();
2161 /// GCC classifies <1 x long long> as SSE but some platform ABIs choose to
2162 /// classify it as INTEGER (for compatibility with older clang compilers).
2163 bool classifyIntegerMMXAsSSE() const {
2164 // Clang <= 3.8 did not do this.
2165 if (getContext().getLangOpts().getClangABICompat() <=
2166 LangOptions::ClangABI::Ver3_8)
2169 const llvm::Triple &Triple = getTarget().getTriple();
2170 if (Triple.isOSDarwin() || Triple.getOS() == llvm::Triple::PS4)
2172 if (Triple.isOSFreeBSD() && Triple.getOSMajorVersion() >= 10)
2177 X86AVXABILevel AVXLevel;
2178 // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on
2180 bool Has64BitPointers;
2183 X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel) :
2184 SwiftABIInfo(CGT), AVXLevel(AVXLevel),
2185 Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) {
2188 bool isPassedUsingAVXType(QualType type) const {
2189 unsigned neededInt, neededSSE;
2190 // The freeIntRegs argument doesn't matter here.
2191 ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE,
2192 /*isNamedArg*/true);
2193 if (info.isDirect()) {
2194 llvm::Type *ty = info.getCoerceToType();
2195 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
2196 return (vectorTy->getBitWidth() > 128);
2201 void computeInfo(CGFunctionInfo &FI) const override;
2203 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
2204 QualType Ty) const override;
2205 Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
2206 QualType Ty) const override;
2208 bool has64BitPointers() const {
2209 return Has64BitPointers;
2212 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
2213 bool asReturnValue) const override {
2214 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
2216 bool isSwiftErrorInRegister() const override {
2221 /// WinX86_64ABIInfo - The Windows X86_64 ABI information.
2222 class WinX86_64ABIInfo : public SwiftABIInfo {
2224 WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT)
2225 : SwiftABIInfo(CGT),
2226 IsMingw64(getTarget().getTriple().isWindowsGNUEnvironment()) {}
2228 void computeInfo(CGFunctionInfo &FI) const override;
2230 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
2231 QualType Ty) const override;
2233 bool isHomogeneousAggregateBaseType(QualType Ty) const override {
2234 // FIXME: Assumes vectorcall is in use.
2235 return isX86VectorTypeForVectorCall(getContext(), Ty);
2238 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
2239 uint64_t NumMembers) const override {
2240 // FIXME: Assumes vectorcall is in use.
2241 return isX86VectorCallAggregateSmallEnough(NumMembers);
2244 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type *> scalars,
2245 bool asReturnValue) const override {
2246 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
2249 bool isSwiftErrorInRegister() const override {
2254 ABIArgInfo classify(QualType Ty, unsigned &FreeSSERegs, bool IsReturnType,
2255 bool IsVectorCall, bool IsRegCall) const;
2256 ABIArgInfo reclassifyHvaArgType(QualType Ty, unsigned &FreeSSERegs,
2257 const ABIArgInfo ¤t) const;
2258 void computeVectorCallArgs(CGFunctionInfo &FI, unsigned FreeSSERegs,
2259 bool IsVectorCall, bool IsRegCall) const;
2264 class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
2266 X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
2267 : TargetCodeGenInfo(new X86_64ABIInfo(CGT, AVXLevel)) {}
2269 const X86_64ABIInfo &getABIInfo() const {
2270 return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo());
2273 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
2277 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2278 llvm::Value *Address) const override {
2279 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
2281 // 0-15 are the 16 integer registers.
2283 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
2287 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
2288 StringRef Constraint,
2289 llvm::Type* Ty) const override {
2290 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
2293 bool isNoProtoCallVariadic(const CallArgList &args,
2294 const FunctionNoProtoType *fnType) const override {
2295 // The default CC on x86-64 sets %al to the number of SSA
2296 // registers used, and GCC sets this when calling an unprototyped
2297 // function, so we override the default behavior. However, don't do
2298 // that when AVX types are involved: the ABI explicitly states it is
2299 // undefined, and it doesn't work in practice because of how the ABI
2300 // defines varargs anyway.
2301 if (fnType->getCallConv() == CC_C) {
2302 bool HasAVXType = false;
2303 for (CallArgList::const_iterator
2304 it = args.begin(), ie = args.end(); it != ie; ++it) {
2305 if (getABIInfo().isPassedUsingAVXType(it->Ty)) {
2315 return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType);
2319 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
2320 unsigned Sig = (0xeb << 0) | // jmp rel8
2321 (0x06 << 8) | // .+0x08
2324 return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
2327 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2328 CodeGen::CodeGenModule &CGM) const override {
2329 if (GV->isDeclaration())
2331 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2332 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
2333 llvm::Function *Fn = cast<llvm::Function>(GV);
2334 Fn->addFnAttr("stackrealign");
2336 if (FD->hasAttr<AnyX86InterruptAttr>()) {
2337 llvm::Function *Fn = cast<llvm::Function>(GV);
2338 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
2344 class PS4TargetCodeGenInfo : public X86_64TargetCodeGenInfo {
2346 PS4TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
2347 : X86_64TargetCodeGenInfo(CGT, AVXLevel) {}
2349 void getDependentLibraryOption(llvm::StringRef Lib,
2350 llvm::SmallString<24> &Opt) const override {
2352 // If the argument contains a space, enclose it in quotes.
2353 if (Lib.find(" ") != StringRef::npos)
2354 Opt += "\"" + Lib.str() + "\"";
2360 static std::string qualifyWindowsLibrary(llvm::StringRef Lib) {
2361 // If the argument does not end in .lib, automatically add the suffix.
2362 // If the argument contains a space, enclose it in quotes.
2363 // This matches the behavior of MSVC.
2364 bool Quote = (Lib.find(" ") != StringRef::npos);
2365 std::string ArgStr = Quote ? "\"" : "";
2367 if (!Lib.endswith_lower(".lib") && !Lib.endswith_lower(".a"))
2369 ArgStr += Quote ? "\"" : "";
2373 class WinX86_32TargetCodeGenInfo : public X86_32TargetCodeGenInfo {
2375 WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
2376 bool DarwinVectorABI, bool RetSmallStructInRegABI, bool Win32StructABI,
2377 unsigned NumRegisterParameters)
2378 : X86_32TargetCodeGenInfo(CGT, DarwinVectorABI, RetSmallStructInRegABI,
2379 Win32StructABI, NumRegisterParameters, false) {}
2381 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2382 CodeGen::CodeGenModule &CGM) const override;
2384 void getDependentLibraryOption(llvm::StringRef Lib,
2385 llvm::SmallString<24> &Opt) const override {
2386 Opt = "/DEFAULTLIB:";
2387 Opt += qualifyWindowsLibrary(Lib);
2390 void getDetectMismatchOption(llvm::StringRef Name,
2391 llvm::StringRef Value,
2392 llvm::SmallString<32> &Opt) const override {
2393 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
2397 static void addStackProbeTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2398 CodeGen::CodeGenModule &CGM) {
2399 if (llvm::Function *Fn = dyn_cast_or_null<llvm::Function>(GV)) {
2401 if (CGM.getCodeGenOpts().StackProbeSize != 4096)
2402 Fn->addFnAttr("stack-probe-size",
2403 llvm::utostr(CGM.getCodeGenOpts().StackProbeSize));
2404 if (CGM.getCodeGenOpts().NoStackArgProbe)
2405 Fn->addFnAttr("no-stack-arg-probe");
2409 void WinX86_32TargetCodeGenInfo::setTargetAttributes(
2410 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
2411 X86_32TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
2412 if (GV->isDeclaration())
2414 addStackProbeTargetAttributes(D, GV, CGM);
2417 class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
2419 WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
2420 X86AVXABILevel AVXLevel)
2421 : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)) {}
2423 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2424 CodeGen::CodeGenModule &CGM) const override;
2426 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
2430 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2431 llvm::Value *Address) const override {
2432 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
2434 // 0-15 are the 16 integer registers.
2436 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
2440 void getDependentLibraryOption(llvm::StringRef Lib,
2441 llvm::SmallString<24> &Opt) const override {
2442 Opt = "/DEFAULTLIB:";
2443 Opt += qualifyWindowsLibrary(Lib);
2446 void getDetectMismatchOption(llvm::StringRef Name,
2447 llvm::StringRef Value,
2448 llvm::SmallString<32> &Opt) const override {
2449 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
2453 void WinX86_64TargetCodeGenInfo::setTargetAttributes(
2454 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
2455 TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
2456 if (GV->isDeclaration())
2458 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2459 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
2460 llvm::Function *Fn = cast<llvm::Function>(GV);
2461 Fn->addFnAttr("stackrealign");
2463 if (FD->hasAttr<AnyX86InterruptAttr>()) {
2464 llvm::Function *Fn = cast<llvm::Function>(GV);
2465 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
2469 addStackProbeTargetAttributes(D, GV, CGM);
2473 void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo,
2475 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
2477 // (a) If one of the classes is Memory, the whole argument is passed in
2480 // (b) If X87UP is not preceded by X87, the whole argument is passed in
2483 // (c) If the size of the aggregate exceeds two eightbytes and the first
2484 // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole
2485 // argument is passed in memory. NOTE: This is necessary to keep the
2486 // ABI working for processors that don't support the __m256 type.
2488 // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE.
2490 // Some of these are enforced by the merging logic. Others can arise
2491 // only with unions; for example:
2492 // union { _Complex double; unsigned; }
2494 // Note that clauses (b) and (c) were added in 0.98.
2498 if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
2500 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
2502 if (Hi == SSEUp && Lo != SSE)
2506 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
2507 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
2508 // classified recursively so that always two fields are
2509 // considered. The resulting class is calculated according to
2510 // the classes of the fields in the eightbyte:
2512 // (a) If both classes are equal, this is the resulting class.
2514 // (b) If one of the classes is NO_CLASS, the resulting class is
2517 // (c) If one of the classes is MEMORY, the result is the MEMORY
2520 // (d) If one of the classes is INTEGER, the result is the
2523 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
2524 // MEMORY is used as class.
2526 // (f) Otherwise class SSE is used.
2528 // Accum should never be memory (we should have returned) or
2529 // ComplexX87 (because this cannot be passed in a structure).
2530 assert((Accum != Memory && Accum != ComplexX87) &&
2531 "Invalid accumulated classification during merge.");
2532 if (Accum == Field || Field == NoClass)
2534 if (Field == Memory)
2536 if (Accum == NoClass)
2538 if (Accum == Integer || Field == Integer)
2540 if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
2541 Accum == X87 || Accum == X87Up)
2546 void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
2547 Class &Lo, Class &Hi, bool isNamedArg) const {
2548 // FIXME: This code can be simplified by introducing a simple value class for
2549 // Class pairs with appropriate constructor methods for the various
2552 // FIXME: Some of the split computations are wrong; unaligned vectors
2553 // shouldn't be passed in registers for example, so there is no chance they
2554 // can straddle an eightbyte. Verify & simplify.
2558 Class &Current = OffsetBase < 64 ? Lo : Hi;
2561 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
2562 BuiltinType::Kind k = BT->getKind();
2564 if (k == BuiltinType::Void) {
2566 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
2569 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
2571 } else if (k == BuiltinType::Float || k == BuiltinType::Double) {
2573 } else if (k == BuiltinType::LongDouble) {
2574 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
2575 if (LDF == &llvm::APFloat::IEEEquad()) {
2578 } else if (LDF == &llvm::APFloat::x87DoubleExtended()) {
2581 } else if (LDF == &llvm::APFloat::IEEEdouble()) {
2584 llvm_unreachable("unexpected long double representation!");
2586 // FIXME: _Decimal32 and _Decimal64 are SSE.
2587 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
2591 if (const EnumType *ET = Ty->getAs<EnumType>()) {
2592 // Classify the underlying integer type.
2593 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
2597 if (Ty->hasPointerRepresentation()) {
2602 if (Ty->isMemberPointerType()) {
2603 if (Ty->isMemberFunctionPointerType()) {
2604 if (Has64BitPointers) {
2605 // If Has64BitPointers, this is an {i64, i64}, so classify both
2609 // Otherwise, with 32-bit pointers, this is an {i32, i32}. If that
2610 // straddles an eightbyte boundary, Hi should be classified as well.
2611 uint64_t EB_FuncPtr = (OffsetBase) / 64;
2612 uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64;
2613 if (EB_FuncPtr != EB_ThisAdj) {
2625 if (const VectorType *VT = Ty->getAs<VectorType>()) {
2626 uint64_t Size = getContext().getTypeSize(VT);
2627 if (Size == 1 || Size == 8 || Size == 16 || Size == 32) {
2628 // gcc passes the following as integer:
2629 // 4 bytes - <4 x char>, <2 x short>, <1 x int>, <1 x float>
2630 // 2 bytes - <2 x char>, <1 x short>
2631 // 1 byte - <1 x char>
2634 // If this type crosses an eightbyte boundary, it should be
2636 uint64_t EB_Lo = (OffsetBase) / 64;
2637 uint64_t EB_Hi = (OffsetBase + Size - 1) / 64;
2640 } else if (Size == 64) {
2641 QualType ElementType = VT->getElementType();
2643 // gcc passes <1 x double> in memory. :(
2644 if (ElementType->isSpecificBuiltinType(BuiltinType::Double))
2647 // gcc passes <1 x long long> as SSE but clang used to unconditionally
2648 // pass them as integer. For platforms where clang is the de facto
2649 // platform compiler, we must continue to use integer.
2650 if (!classifyIntegerMMXAsSSE() &&
2651 (ElementType->isSpecificBuiltinType(BuiltinType::LongLong) ||
2652 ElementType->isSpecificBuiltinType(BuiltinType::ULongLong) ||
2653 ElementType->isSpecificBuiltinType(BuiltinType::Long) ||
2654 ElementType->isSpecificBuiltinType(BuiltinType::ULong)))
2659 // If this type crosses an eightbyte boundary, it should be
2661 if (OffsetBase && OffsetBase != 64)
2663 } else if (Size == 128 ||
2664 (isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) {
2665 // Arguments of 256-bits are split into four eightbyte chunks. The
2666 // least significant one belongs to class SSE and all the others to class
2667 // SSEUP. The original Lo and Hi design considers that types can't be
2668 // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense.
2669 // This design isn't correct for 256-bits, but since there're no cases
2670 // where the upper parts would need to be inspected, avoid adding
2671 // complexity and just consider Hi to match the 64-256 part.
2673 // Note that per 3.5.7 of AMD64-ABI, 256-bit args are only passed in
2674 // registers if they are "named", i.e. not part of the "..." of a
2675 // variadic function.
2677 // Similarly, per 3.2.3. of the AVX512 draft, 512-bits ("named") args are
2678 // split into eight eightbyte chunks, one SSE and seven SSEUP.
2685 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
2686 QualType ET = getContext().getCanonicalType(CT->getElementType());
2688 uint64_t Size = getContext().getTypeSize(Ty);
2689 if (ET->isIntegralOrEnumerationType()) {
2692 else if (Size <= 128)
2694 } else if (ET == getContext().FloatTy) {
2696 } else if (ET == getContext().DoubleTy) {
2698 } else if (ET == getContext().LongDoubleTy) {
2699 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
2700 if (LDF == &llvm::APFloat::IEEEquad())
2702 else if (LDF == &llvm::APFloat::x87DoubleExtended())
2703 Current = ComplexX87;
2704 else if (LDF == &llvm::APFloat::IEEEdouble())
2707 llvm_unreachable("unexpected long double representation!");
2710 // If this complex type crosses an eightbyte boundary then it
2712 uint64_t EB_Real = (OffsetBase) / 64;
2713 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64;
2714 if (Hi == NoClass && EB_Real != EB_Imag)
2720 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
2721 // Arrays are treated like structures.
2723 uint64_t Size = getContext().getTypeSize(Ty);
2725 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
2726 // than eight eightbytes, ..., it has class MEMORY.
2730 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
2731 // fields, it has class MEMORY.
2733 // Only need to check alignment of array base.
2734 if (OffsetBase % getContext().getTypeAlign(AT->getElementType()))
2737 // Otherwise implement simplified merge. We could be smarter about
2738 // this, but it isn't worth it and would be harder to verify.
2740 uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
2741 uint64_t ArraySize = AT->getSize().getZExtValue();
2743 // The only case a 256-bit wide vector could be used is when the array
2744 // contains a single 256-bit element. Since Lo and Hi logic isn't extended
2745 // to work for sizes wider than 128, early check and fallback to memory.
2748 (Size != EltSize || Size > getNativeVectorSizeForAVXABI(AVXLevel)))
2751 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
2752 Class FieldLo, FieldHi;
2753 classify(AT->getElementType(), Offset, FieldLo, FieldHi, isNamedArg);
2754 Lo = merge(Lo, FieldLo);
2755 Hi = merge(Hi, FieldHi);
2756 if (Lo == Memory || Hi == Memory)
2760 postMerge(Size, Lo, Hi);
2761 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
2765 if (const RecordType *RT = Ty->getAs<RecordType>()) {
2766 uint64_t Size = getContext().getTypeSize(Ty);
2768 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
2769 // than eight eightbytes, ..., it has class MEMORY.
2773 // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial
2774 // copy constructor or a non-trivial destructor, it is passed by invisible
2776 if (getRecordArgABI(RT, getCXXABI()))
2779 const RecordDecl *RD = RT->getDecl();
2781 // Assume variable sized types are passed in memory.
2782 if (RD->hasFlexibleArrayMember())
2785 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
2787 // Reset Lo class, this will be recomputed.
2790 // If this is a C++ record, classify the bases first.
2791 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2792 for (const auto &I : CXXRD->bases()) {
2793 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
2794 "Unexpected base class!");
2795 const CXXRecordDecl *Base =
2796 cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
2798 // Classify this field.
2800 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a
2801 // single eightbyte, each is classified separately. Each eightbyte gets
2802 // initialized to class NO_CLASS.
2803 Class FieldLo, FieldHi;
2805 OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base));
2806 classify(I.getType(), Offset, FieldLo, FieldHi, isNamedArg);
2807 Lo = merge(Lo, FieldLo);
2808 Hi = merge(Hi, FieldHi);
2809 if (Lo == Memory || Hi == Memory) {
2810 postMerge(Size, Lo, Hi);
2816 // Classify the fields one at a time, merging the results.
2818 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
2819 i != e; ++i, ++idx) {
2820 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
2821 bool BitField = i->isBitField();
2823 // Ignore padding bit-fields.
2824 if (BitField && i->isUnnamedBitfield())
2827 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than
2828 // four eightbytes, or it contains unaligned fields, it has class MEMORY.
2830 // The only case a 256-bit wide vector could be used is when the struct
2831 // contains a single 256-bit element. Since Lo and Hi logic isn't extended
2832 // to work for sizes wider than 128, early check and fallback to memory.
2834 if (Size > 128 && (Size != getContext().getTypeSize(i->getType()) ||
2835 Size > getNativeVectorSizeForAVXABI(AVXLevel))) {
2837 postMerge(Size, Lo, Hi);
2840 // Note, skip this test for bit-fields, see below.
2841 if (!BitField && Offset % getContext().getTypeAlign(i->getType())) {
2843 postMerge(Size, Lo, Hi);
2847 // Classify this field.
2849 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
2850 // exceeds a single eightbyte, each is classified
2851 // separately. Each eightbyte gets initialized to class
2853 Class FieldLo, FieldHi;
2855 // Bit-fields require special handling, they do not force the
2856 // structure to be passed in memory even if unaligned, and
2857 // therefore they can straddle an eightbyte.
2859 assert(!i->isUnnamedBitfield());
2860 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
2861 uint64_t Size = i->getBitWidthValue(getContext());
2863 uint64_t EB_Lo = Offset / 64;
2864 uint64_t EB_Hi = (Offset + Size - 1) / 64;
2867 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.");
2872 FieldHi = EB_Hi ? Integer : NoClass;
2875 classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg);
2876 Lo = merge(Lo, FieldLo);
2877 Hi = merge(Hi, FieldHi);
2878 if (Lo == Memory || Hi == Memory)
2882 postMerge(Size, Lo, Hi);
2886 ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
2887 // If this is a scalar LLVM value then assume LLVM will pass it in the right
2889 if (!isAggregateTypeForABI(Ty)) {
2890 // Treat an enum type as its underlying type.
2891 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2892 Ty = EnumTy->getDecl()->getIntegerType();
2894 return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty)
2895 : ABIArgInfo::getDirect());
2898 return getNaturalAlignIndirect(Ty);
2901 bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const {
2902 if (const VectorType *VecTy = Ty->getAs<VectorType>()) {
2903 uint64_t Size = getContext().getTypeSize(VecTy);
2904 unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel);
2905 if (Size <= 64 || Size > LargestVector)
2912 ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
2913 unsigned freeIntRegs) const {
2914 // If this is a scalar LLVM value then assume LLVM will pass it in the right
2917 // This assumption is optimistic, as there could be free registers available
2918 // when we need to pass this argument in memory, and LLVM could try to pass
2919 // the argument in the free register. This does not seem to happen currently,
2920 // but this code would be much safer if we could mark the argument with
2921 // 'onstack'. See PR12193.
2922 if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty)) {
2923 // Treat an enum type as its underlying type.
2924 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2925 Ty = EnumTy->getDecl()->getIntegerType();
2927 return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty)
2928 : ABIArgInfo::getDirect());
2931 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
2932 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
2934 // Compute the byval alignment. We specify the alignment of the byval in all
2935 // cases so that the mid-level optimizer knows the alignment of the byval.
2936 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U);
2938 // Attempt to avoid passing indirect results using byval when possible. This
2939 // is important for good codegen.
2941 // We do this by coercing the value into a scalar type which the backend can
2942 // handle naturally (i.e., without using byval).
2944 // For simplicity, we currently only do this when we have exhausted all of the
2945 // free integer registers. Doing this when there are free integer registers
2946 // would require more care, as we would have to ensure that the coerced value
2947 // did not claim the unused register. That would require either reording the
2948 // arguments to the function (so that any subsequent inreg values came first),
2949 // or only doing this optimization when there were no following arguments that
2952 // We currently expect it to be rare (particularly in well written code) for
2953 // arguments to be passed on the stack when there are still free integer
2954 // registers available (this would typically imply large structs being passed
2955 // by value), so this seems like a fair tradeoff for now.
2957 // We can revisit this if the backend grows support for 'onstack' parameter
2958 // attributes. See PR12193.
2959 if (freeIntRegs == 0) {
2960 uint64_t Size = getContext().getTypeSize(Ty);
2962 // If this type fits in an eightbyte, coerce it into the matching integral
2963 // type, which will end up on the stack (with alignment 8).
2964 if (Align == 8 && Size <= 64)
2965 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
2969 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(Align));
2972 /// The ABI specifies that a value should be passed in a full vector XMM/YMM
2973 /// register. Pick an LLVM IR type that will be passed as a vector register.
2974 llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const {
2975 // Wrapper structs/arrays that only contain vectors are passed just like
2976 // vectors; strip them off if present.
2977 if (const Type *InnerTy = isSingleElementStruct(Ty, getContext()))
2978 Ty = QualType(InnerTy, 0);
2980 llvm::Type *IRType = CGT.ConvertType(Ty);
2981 if (isa<llvm::VectorType>(IRType) ||
2982 IRType->getTypeID() == llvm::Type::FP128TyID)
2985 // We couldn't find the preferred IR vector type for 'Ty'.
2986 uint64_t Size = getContext().getTypeSize(Ty);
2987 assert((Size == 128 || Size == 256 || Size == 512) && "Invalid type found!");
2989 // Return a LLVM IR vector type based on the size of 'Ty'.
2990 return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()),
2994 /// BitsContainNoUserData - Return true if the specified [start,end) bit range
2995 /// is known to either be off the end of the specified type or being in
2996 /// alignment padding. The user type specified is known to be at most 128 bits
2997 /// in size, and have passed through X86_64ABIInfo::classify with a successful
2998 /// classification that put one of the two halves in the INTEGER class.
3000 /// It is conservatively correct to return false.
3001 static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,
3002 unsigned EndBit, ASTContext &Context) {
3003 // If the bytes being queried are off the end of the type, there is no user
3004 // data hiding here. This handles analysis of builtins, vectors and other
3005 // types that don't contain interesting padding.
3006 unsigned TySize = (unsigned)Context.getTypeSize(Ty);
3007 if (TySize <= StartBit)
3010 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
3011 unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType());
3012 unsigned NumElts = (unsigned)AT->getSize().getZExtValue();
3014 // Check each element to see if the element overlaps with the queried range.
3015 for (unsigned i = 0; i != NumElts; ++i) {
3016 // If the element is after the span we care about, then we're done..
3017 unsigned EltOffset = i*EltSize;
3018 if (EltOffset >= EndBit) break;
3020 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
3021 if (!BitsContainNoUserData(AT->getElementType(), EltStart,
3022 EndBit-EltOffset, Context))
3025 // If it overlaps no elements, then it is safe to process as padding.
3029 if (const RecordType *RT = Ty->getAs<RecordType>()) {
3030 const RecordDecl *RD = RT->getDecl();
3031 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
3033 // If this is a C++ record, check the bases first.
3034 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
3035 for (const auto &I : CXXRD->bases()) {
3036 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
3037 "Unexpected base class!");
3038 const CXXRecordDecl *Base =
3039 cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
3041 // If the base is after the span we care about, ignore it.
3042 unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base));
3043 if (BaseOffset >= EndBit) continue;
3045 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
3046 if (!BitsContainNoUserData(I.getType(), BaseStart,
3047 EndBit-BaseOffset, Context))
3052 // Verify that no field has data that overlaps the region of interest. Yes
3053 // this could be sped up a lot by being smarter about queried fields,
3054 // however we're only looking at structs up to 16 bytes, so we don't care
3057 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
3058 i != e; ++i, ++idx) {
3059 unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx);
3061 // If we found a field after the region we care about, then we're done.
3062 if (FieldOffset >= EndBit) break;
3064 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
3065 if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset,
3070 // If nothing in this record overlapped the area of interest, then we're
3078 /// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a
3079 /// float member at the specified offset. For example, {int,{float}} has a
3080 /// float at offset 4. It is conservatively correct for this routine to return
3082 static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset,
3083 const llvm::DataLayout &TD) {
3084 // Base case if we find a float.
3085 if (IROffset == 0 && IRType->isFloatTy())
3088 // If this is a struct, recurse into the field at the specified offset.
3089 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
3090 const llvm::StructLayout *SL = TD.getStructLayout(STy);
3091 unsigned Elt = SL->getElementContainingOffset(IROffset);
3092 IROffset -= SL->getElementOffset(Elt);
3093 return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD);
3096 // If this is an array, recurse into the field at the specified offset.
3097 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
3098 llvm::Type *EltTy = ATy->getElementType();
3099 unsigned EltSize = TD.getTypeAllocSize(EltTy);
3100 IROffset -= IROffset/EltSize*EltSize;
3101 return ContainsFloatAtOffset(EltTy, IROffset, TD);
3108 /// GetSSETypeAtOffset - Return a type that will be passed by the backend in the
3109 /// low 8 bytes of an XMM register, corresponding to the SSE class.
3110 llvm::Type *X86_64ABIInfo::
3111 GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset,
3112 QualType SourceTy, unsigned SourceOffset) const {
3113 // The only three choices we have are either double, <2 x float>, or float. We
3114 // pass as float if the last 4 bytes is just padding. This happens for
3115 // structs that contain 3 floats.
3116 if (BitsContainNoUserData(SourceTy, SourceOffset*8+32,
3117 SourceOffset*8+64, getContext()))
3118 return llvm::Type::getFloatTy(getVMContext());
3120 // We want to pass as <2 x float> if the LLVM IR type contains a float at
3121 // offset+0 and offset+4. Walk the LLVM IR type to find out if this is the
3123 if (ContainsFloatAtOffset(IRType, IROffset, getDataLayout()) &&
3124 ContainsFloatAtOffset(IRType, IROffset+4, getDataLayout()))
3125 return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2);
3127 return llvm::Type::getDoubleTy(getVMContext());
3131 /// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in
3132 /// an 8-byte GPR. This means that we either have a scalar or we are talking
3133 /// about the high or low part of an up-to-16-byte struct. This routine picks
3134 /// the best LLVM IR type to represent this, which may be i64 or may be anything
3135 /// else that the backend will pass in a GPR that works better (e.g. i8, %foo*,
3138 /// PrefType is an LLVM IR type that corresponds to (part of) the IR type for
3139 /// the source type. IROffset is an offset in bytes into the LLVM IR type that
3140 /// the 8-byte value references. PrefType may be null.
3142 /// SourceTy is the source-level type for the entire argument. SourceOffset is
3143 /// an offset into this that we're processing (which is always either 0 or 8).
3145 llvm::Type *X86_64ABIInfo::
3146 GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
3147 QualType SourceTy, unsigned SourceOffset) const {
3148 // If we're dealing with an un-offset LLVM IR type, then it means that we're
3149 // returning an 8-byte unit starting with it. See if we can safely use it.
3150 if (IROffset == 0) {
3151 // Pointers and int64's always fill the 8-byte unit.
3152 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) ||
3153 IRType->isIntegerTy(64))
3156 // If we have a 1/2/4-byte integer, we can use it only if the rest of the
3157 // goodness in the source type is just tail padding. This is allowed to
3158 // kick in for struct {double,int} on the int, but not on
3159 // struct{double,int,int} because we wouldn't return the second int. We
3160 // have to do this analysis on the source type because we can't depend on
3161 // unions being lowered a specific way etc.
3162 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
3163 IRType->isIntegerTy(32) ||
3164 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) {
3165 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 :
3166 cast<llvm::IntegerType>(IRType)->getBitWidth();
3168 if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth,
3169 SourceOffset*8+64, getContext()))
3174 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
3175 // If this is a struct, recurse into the field at the specified offset.
3176 const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy);
3177 if (IROffset < SL->getSizeInBytes()) {
3178 unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
3179 IROffset -= SL->getElementOffset(FieldIdx);
3181 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
3182 SourceTy, SourceOffset);
3186 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
3187 llvm::Type *EltTy = ATy->getElementType();
3188 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy);
3189 unsigned EltOffset = IROffset/EltSize*EltSize;
3190 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
3194 // Okay, we don't have any better idea of what to pass, so we pass this in an
3195 // integer register that isn't too big to fit the rest of the struct.
3196 unsigned TySizeInBytes =
3197 (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity();
3199 assert(TySizeInBytes != SourceOffset && "Empty field?");
3201 // It is always safe to classify this as an integer type up to i64 that
3202 // isn't larger than the structure.
3203 return llvm::IntegerType::get(getVMContext(),
3204 std::min(TySizeInBytes-SourceOffset, 8U)*8);
3208 /// GetX86_64ByValArgumentPair - Given a high and low type that can ideally
3209 /// be used as elements of a two register pair to pass or return, return a
3210 /// first class aggregate to represent them. For example, if the low part of
3211 /// a by-value argument should be passed as i32* and the high part as float,
3212 /// return {i32*, float}.
3214 GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi,
3215 const llvm::DataLayout &TD) {
3216 // In order to correctly satisfy the ABI, we need to the high part to start
3217 // at offset 8. If the high and low parts we inferred are both 4-byte types
3218 // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have
3219 // the second element at offset 8. Check for this:
3220 unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo);
3221 unsigned HiAlign = TD.getABITypeAlignment(Hi);
3222 unsigned HiStart = llvm::alignTo(LoSize, HiAlign);
3223 assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!");
3225 // To handle this, we have to increase the size of the low part so that the
3226 // second element will start at an 8 byte offset. We can't increase the size
3227 // of the second element because it might make us access off the end of the
3230 // There are usually two sorts of types the ABI generation code can produce
3231 // for the low part of a pair that aren't 8 bytes in size: float or
3232 // i8/i16/i32. This can also include pointers when they are 32-bit (X32 and
3234 // Promote these to a larger type.
3235 if (Lo->isFloatTy())
3236 Lo = llvm::Type::getDoubleTy(Lo->getContext());
3238 assert((Lo->isIntegerTy() || Lo->isPointerTy())
3239 && "Invalid/unknown lo type");
3240 Lo = llvm::Type::getInt64Ty(Lo->getContext());
3244 llvm::StructType *Result = llvm::StructType::get(Lo, Hi);
3246 // Verify that the second element is at an 8-byte offset.
3247 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
3248 "Invalid x86-64 argument pair!");
3252 ABIArgInfo X86_64ABIInfo::
3253 classifyReturnType(QualType RetTy) const {
3254 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
3255 // classification algorithm.
3256 X86_64ABIInfo::Class Lo, Hi;
3257 classify(RetTy, 0, Lo, Hi, /*isNamedArg*/ true);
3259 // Check some invariants.
3260 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
3261 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
3263 llvm::Type *ResType = nullptr;
3267 return ABIArgInfo::getIgnore();
3268 // If the low part is just padding, it takes no register, leave ResType
3270 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
3271 "Unknown missing lo part");
3276 llvm_unreachable("Invalid classification for lo word.");
3278 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
3281 return getIndirectReturnResult(RetTy);
3283 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
3284 // available register of the sequence %rax, %rdx is used.
3286 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
3288 // If we have a sign or zero extended integer, make sure to return Extend
3289 // so that the parameter gets the right LLVM IR attributes.
3290 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
3291 // Treat an enum type as its underlying type.
3292 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
3293 RetTy = EnumTy->getDecl()->getIntegerType();
3295 if (RetTy->isIntegralOrEnumerationType() &&
3296 RetTy->isPromotableIntegerType())
3297 return ABIArgInfo::getExtend(RetTy);
3301 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
3302 // available SSE register of the sequence %xmm0, %xmm1 is used.
3304 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
3307 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
3308 // returned on the X87 stack in %st0 as 80-bit x87 number.
3310 ResType = llvm::Type::getX86_FP80Ty(getVMContext());
3313 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
3314 // part of the value is returned in %st0 and the imaginary part in
3317 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
3318 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()),
3319 llvm::Type::getX86_FP80Ty(getVMContext()));
3323 llvm::Type *HighPart = nullptr;
3325 // Memory was handled previously and X87 should
3326 // never occur as a hi class.
3329 llvm_unreachable("Invalid classification for hi word.");
3331 case ComplexX87: // Previously handled.
3336 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
3337 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
3338 return ABIArgInfo::getDirect(HighPart, 8);
3341 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
3342 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
3343 return ABIArgInfo::getDirect(HighPart, 8);
3346 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
3347 // is passed in the next available eightbyte chunk if the last used
3350 // SSEUP should always be preceded by SSE, just widen.
3352 assert(Lo == SSE && "Unexpected SSEUp classification.");
3353 ResType = GetByteVectorType(RetTy);
3356 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
3357 // returned together with the previous X87 value in %st0.
3359 // If X87Up is preceded by X87, we don't need to do
3360 // anything. However, in some cases with unions it may not be
3361 // preceded by X87. In such situations we follow gcc and pass the
3362 // extra bits in an SSE reg.
3364 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
3365 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
3366 return ABIArgInfo::getDirect(HighPart, 8);
3371 // If a high part was specified, merge it together with the low part. It is
3372 // known to pass in the high eightbyte of the result. We do this by forming a
3373 // first class struct aggregate with the high and low part: {low, high}
3375 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
3377 return ABIArgInfo::getDirect(ResType);
3380 ABIArgInfo X86_64ABIInfo::classifyArgumentType(
3381 QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE,
3385 Ty = useFirstFieldIfTransparentUnion(Ty);
3387 X86_64ABIInfo::Class Lo, Hi;
3388 classify(Ty, 0, Lo, Hi, isNamedArg);
3390 // Check some invariants.
3391 // FIXME: Enforce these by construction.
3392 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
3393 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
3397 llvm::Type *ResType = nullptr;
3401 return ABIArgInfo::getIgnore();
3402 // If the low part is just padding, it takes no register, leave ResType
3404 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
3405 "Unknown missing lo part");
3408 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
3412 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
3413 // COMPLEX_X87, it is passed in memory.
3416 if (getRecordArgABI(Ty, getCXXABI()) == CGCXXABI::RAA_Indirect)
3418 return getIndirectResult(Ty, freeIntRegs);
3422 llvm_unreachable("Invalid classification for lo word.");
3424 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
3425 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
3430 // Pick an 8-byte type based on the preferred type.
3431 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0);
3433 // If we have a sign or zero extended integer, make sure to return Extend
3434 // so that the parameter gets the right LLVM IR attributes.
3435 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
3436 // Treat an enum type as its underlying type.
3437 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3438 Ty = EnumTy->getDecl()->getIntegerType();
3440 if (Ty->isIntegralOrEnumerationType() &&
3441 Ty->isPromotableIntegerType())
3442 return ABIArgInfo::getExtend(Ty);
3447 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
3448 // available SSE register is used, the registers are taken in the
3449 // order from %xmm0 to %xmm7.
3451 llvm::Type *IRType = CGT.ConvertType(Ty);
3452 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
3458 llvm::Type *HighPart = nullptr;
3460 // Memory was handled previously, ComplexX87 and X87 should
3461 // never occur as hi classes, and X87Up must be preceded by X87,
3462 // which is passed in memory.
3466 llvm_unreachable("Invalid classification for hi word.");
3468 case NoClass: break;
3472 // Pick an 8-byte type based on the preferred type.
3473 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
3475 if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
3476 return ABIArgInfo::getDirect(HighPart, 8);
3479 // X87Up generally doesn't occur here (long double is passed in
3480 // memory), except in situations involving unions.
3483 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
3485 if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
3486 return ABIArgInfo::getDirect(HighPart, 8);
3491 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
3492 // eightbyte is passed in the upper half of the last used SSE
3493 // register. This only happens when 128-bit vectors are passed.
3495 assert(Lo == SSE && "Unexpected SSEUp classification");
3496 ResType = GetByteVectorType(Ty);
3500 // If a high part was specified, merge it together with the low part. It is
3501 // known to pass in the high eightbyte of the result. We do this by forming a
3502 // first class struct aggregate with the high and low part: {low, high}
3504 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
3506 return ABIArgInfo::getDirect(ResType);
3510 X86_64ABIInfo::classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt,
3511 unsigned &NeededSSE) const {
3512 auto RT = Ty->getAs<RecordType>();
3513 assert(RT && "classifyRegCallStructType only valid with struct types");
3515 if (RT->getDecl()->hasFlexibleArrayMember())
3516 return getIndirectReturnResult(Ty);
3519 if (auto CXXRD = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
3520 if (CXXRD->isDynamicClass()) {
3521 NeededInt = NeededSSE = 0;
3522 return getIndirectReturnResult(Ty);
3525 for (const auto &I : CXXRD->bases())
3526 if (classifyRegCallStructTypeImpl(I.getType(), NeededInt, NeededSSE)
3528 NeededInt = NeededSSE = 0;
3529 return getIndirectReturnResult(Ty);
3534 for (const auto *FD : RT->getDecl()->fields()) {
3535 if (FD->getType()->isRecordType() && !FD->getType()->isUnionType()) {
3536 if (classifyRegCallStructTypeImpl(FD->getType(), NeededInt, NeededSSE)
3538 NeededInt = NeededSSE = 0;
3539 return getIndirectReturnResult(Ty);
3542 unsigned LocalNeededInt, LocalNeededSSE;
3543 if (classifyArgumentType(FD->getType(), UINT_MAX, LocalNeededInt,
3544 LocalNeededSSE, true)
3546 NeededInt = NeededSSE = 0;
3547 return getIndirectReturnResult(Ty);
3549 NeededInt += LocalNeededInt;
3550 NeededSSE += LocalNeededSSE;
3554 return ABIArgInfo::getDirect();
3557 ABIArgInfo X86_64ABIInfo::classifyRegCallStructType(QualType Ty,
3558 unsigned &NeededInt,
3559 unsigned &NeededSSE) const {
3564 return classifyRegCallStructTypeImpl(Ty, NeededInt, NeededSSE);
3567 void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
3569 const unsigned CallingConv = FI.getCallingConvention();
3570 // It is possible to force Win64 calling convention on any x86_64 target by
3571 // using __attribute__((ms_abi)). In such case to correctly emit Win64
3572 // compatible code delegate this call to WinX86_64ABIInfo::computeInfo.
3573 if (CallingConv == llvm::CallingConv::Win64) {
3574 WinX86_64ABIInfo Win64ABIInfo(CGT);
3575 Win64ABIInfo.computeInfo(FI);
3579 bool IsRegCall = CallingConv == llvm::CallingConv::X86_RegCall;
3581 // Keep track of the number of assigned registers.
3582 unsigned FreeIntRegs = IsRegCall ? 11 : 6;
3583 unsigned FreeSSERegs = IsRegCall ? 16 : 8;
3584 unsigned NeededInt, NeededSSE;
3586 if (!::classifyReturnType(getCXXABI(), FI, *this)) {
3587 if (IsRegCall && FI.getReturnType()->getTypePtr()->isRecordType() &&
3588 !FI.getReturnType()->getTypePtr()->isUnionType()) {
3589 FI.getReturnInfo() =
3590 classifyRegCallStructType(FI.getReturnType(), NeededInt, NeededSSE);
3591 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
3592 FreeIntRegs -= NeededInt;
3593 FreeSSERegs -= NeededSSE;
3595 FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType());
3597 } else if (IsRegCall && FI.getReturnType()->getAs<ComplexType>()) {
3598 // Complex Long Double Type is passed in Memory when Regcall
3599 // calling convention is used.
3600 const ComplexType *CT = FI.getReturnType()->getAs<ComplexType>();
3601 if (getContext().getCanonicalType(CT->getElementType()) ==
3602 getContext().LongDoubleTy)
3603 FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType());
3605 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
3608 // If the return value is indirect, then the hidden argument is consuming one
3609 // integer register.
3610 if (FI.getReturnInfo().isIndirect())
3613 // The chain argument effectively gives us another free register.
3614 if (FI.isChainCall())
3617 unsigned NumRequiredArgs = FI.getNumRequiredArgs();
3618 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
3619 // get assigned (in left-to-right order) for passing as follows...
3621 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
3622 it != ie; ++it, ++ArgNo) {
3623 bool IsNamedArg = ArgNo < NumRequiredArgs;
3625 if (IsRegCall && it->type->isStructureOrClassType())
3626 it->info = classifyRegCallStructType(it->type, NeededInt, NeededSSE);
3628 it->info = classifyArgumentType(it->type, FreeIntRegs, NeededInt,
3629 NeededSSE, IsNamedArg);
3631 // AMD64-ABI 3.2.3p3: If there are no registers available for any
3632 // eightbyte of an argument, the whole argument is passed on the
3633 // stack. If registers have already been assigned for some
3634 // eightbytes of such an argument, the assignments get reverted.
3635 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
3636 FreeIntRegs -= NeededInt;
3637 FreeSSERegs -= NeededSSE;
3639 it->info = getIndirectResult(it->type, FreeIntRegs);
3644 static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF,
3645 Address VAListAddr, QualType Ty) {
3646 Address overflow_arg_area_p = CGF.Builder.CreateStructGEP(
3647 VAListAddr, 2, CharUnits::fromQuantity(8), "overflow_arg_area_p");
3648 llvm::Value *overflow_arg_area =
3649 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
3651 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
3652 // byte boundary if alignment needed by type exceeds 8 byte boundary.
3653 // It isn't stated explicitly in the standard, but in practice we use
3654 // alignment greater than 16 where necessary.
3655 CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty);
3656 if (Align > CharUnits::fromQuantity(8)) {
3657 overflow_arg_area = emitRoundPointerUpToAlignment(CGF, overflow_arg_area,
3661 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
3662 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
3664 CGF.Builder.CreateBitCast(overflow_arg_area,
3665 llvm::PointerType::getUnqual(LTy));
3667 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
3668 // l->overflow_arg_area + sizeof(type).
3669 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to
3670 // an 8 byte boundary.
3672 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
3673 llvm::Value *Offset =
3674 llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7);
3675 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset,
3676 "overflow_arg_area.next");
3677 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
3679 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
3680 return Address(Res, Align);
3683 Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
3684 QualType Ty) const {
3685 // Assume that va_list type is correct; should be pointer to LLVM type:
3689 // i8* overflow_arg_area;
3690 // i8* reg_save_area;
3692 unsigned neededInt, neededSSE;
3694 Ty = getContext().getCanonicalType(Ty);
3695 ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE,
3696 /*isNamedArg*/false);
3698 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
3699 // in the registers. If not go to step 7.
3700 if (!neededInt && !neededSSE)
3701 return EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty);
3703 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
3704 // general purpose registers needed to pass type and num_fp to hold
3705 // the number of floating point registers needed.
3707 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into
3708 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or
3709 // l->fp_offset > 304 - num_fp * 16 go to step 7.
3711 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of
3712 // register save space).
3714 llvm::Value *InRegs = nullptr;
3715 Address gp_offset_p = Address::invalid(), fp_offset_p = Address::invalid();
3716 llvm::Value *gp_offset = nullptr, *fp_offset = nullptr;
3719 CGF.Builder.CreateStructGEP(VAListAddr, 0, CharUnits::Zero(),
3721 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
3722 InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8);
3723 InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp");
3728 CGF.Builder.CreateStructGEP(VAListAddr, 1, CharUnits::fromQuantity(4),
3730 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
3731 llvm::Value *FitsInFP =
3732 llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16);
3733 FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp");
3734 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
3737 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
3738 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
3739 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
3740 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
3742 // Emit code to load the value if it was passed in registers.
3744 CGF.EmitBlock(InRegBlock);
3746 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with
3747 // an offset of l->gp_offset and/or l->fp_offset. This may require
3748 // copying to a temporary location in case the parameter is passed
3749 // in different register classes or requires an alignment greater
3750 // than 8 for general purpose registers and 16 for XMM registers.
3752 // FIXME: This really results in shameful code when we end up needing to
3753 // collect arguments from different places; often what should result in a
3754 // simple assembling of a structure from scattered addresses has many more
3755 // loads than necessary. Can we clean this up?
3756 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
3757 llvm::Value *RegSaveArea = CGF.Builder.CreateLoad(
3758 CGF.Builder.CreateStructGEP(VAListAddr, 3, CharUnits::fromQuantity(16)),
3761 Address RegAddr = Address::invalid();
3762 if (neededInt && neededSSE) {
3764 assert(AI.isDirect() && "Unexpected ABI info for mixed regs");
3765 llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
3766 Address Tmp = CGF.CreateMemTemp(Ty);
3767 Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST);
3768 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
3769 llvm::Type *TyLo = ST->getElementType(0);
3770 llvm::Type *TyHi = ST->getElementType(1);
3771 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
3772 "Unexpected ABI info for mixed regs");
3773 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
3774 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
3775 llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegSaveArea, gp_offset);
3776 llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegSaveArea, fp_offset);
3777 llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr;
3778 llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr;
3780 // Copy the first element.
3781 // FIXME: Our choice of alignment here and below is probably pessimistic.
3782 llvm::Value *V = CGF.Builder.CreateAlignedLoad(
3783 TyLo, CGF.Builder.CreateBitCast(RegLoAddr, PTyLo),
3784 CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(TyLo)));
3785 CGF.Builder.CreateStore(V,
3786 CGF.Builder.CreateStructGEP(Tmp, 0, CharUnits::Zero()));
3788 // Copy the second element.
3789 V = CGF.Builder.CreateAlignedLoad(
3790 TyHi, CGF.Builder.CreateBitCast(RegHiAddr, PTyHi),
3791 CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(TyHi)));
3792 CharUnits Offset = CharUnits::fromQuantity(
3793 getDataLayout().getStructLayout(ST)->getElementOffset(1));
3794 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1, Offset));
3796 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy);
3797 } else if (neededInt) {
3798 RegAddr = Address(CGF.Builder.CreateGEP(RegSaveArea, gp_offset),
3799 CharUnits::fromQuantity(8));
3800 RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy);
3802 // Copy to a temporary if necessary to ensure the appropriate alignment.
3803 std::pair<CharUnits, CharUnits> SizeAlign =
3804 getContext().getTypeInfoInChars(Ty);
3805 uint64_t TySize = SizeAlign.first.getQuantity();
3806 CharUnits TyAlign = SizeAlign.second;
3808 // Copy into a temporary if the type is more aligned than the
3809 // register save area.
3810 if (TyAlign.getQuantity() > 8) {
3811 Address Tmp = CGF.CreateMemTemp(Ty);
3812 CGF.Builder.CreateMemCpy(Tmp, RegAddr, TySize, false);
3816 } else if (neededSSE == 1) {
3817 RegAddr = Address(CGF.Builder.CreateGEP(RegSaveArea, fp_offset),
3818 CharUnits::fromQuantity(16));
3819 RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy);
3821 assert(neededSSE == 2 && "Invalid number of needed registers!");
3822 // SSE registers are spaced 16 bytes apart in the register save
3823 // area, we need to collect the two eightbytes together.
3824 // The ABI isn't explicit about this, but it seems reasonable
3825 // to assume that the slots are 16-byte aligned, since the stack is
3826 // naturally 16-byte aligned and the prologue is expected to store
3827 // all the SSE registers to the RSA.
3828 Address RegAddrLo = Address(CGF.Builder.CreateGEP(RegSaveArea, fp_offset),
3829 CharUnits::fromQuantity(16));
3831 CGF.Builder.CreateConstInBoundsByteGEP(RegAddrLo,
3832 CharUnits::fromQuantity(16));
3833 llvm::Type *ST = AI.canHaveCoerceToType()
3834 ? AI.getCoerceToType()
3835 : llvm::StructType::get(CGF.DoubleTy, CGF.DoubleTy);
3837 Address Tmp = CGF.CreateMemTemp(Ty);
3838 Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST);
3839 V = CGF.Builder.CreateLoad(CGF.Builder.CreateElementBitCast(
3840 RegAddrLo, ST->getStructElementType(0)));
3841 CGF.Builder.CreateStore(V,
3842 CGF.Builder.CreateStructGEP(Tmp, 0, CharUnits::Zero()));
3843 V = CGF.Builder.CreateLoad(CGF.Builder.CreateElementBitCast(
3844 RegAddrHi, ST->getStructElementType(1)));
3845 CGF.Builder.CreateStore(V,
3846 CGF.Builder.CreateStructGEP(Tmp, 1, CharUnits::fromQuantity(8)));
3848 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy);
3851 // AMD64-ABI 3.5.7p5: Step 5. Set:
3852 // l->gp_offset = l->gp_offset + num_gp * 8
3853 // l->fp_offset = l->fp_offset + num_fp * 16.
3855 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8);
3856 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
3860 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16);
3861 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
3864 CGF.EmitBranch(ContBlock);
3866 // Emit code to load the value if it was passed in memory.
3868 CGF.EmitBlock(InMemBlock);
3869 Address MemAddr = EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty);
3871 // Return the appropriate result.
3873 CGF.EmitBlock(ContBlock);
3874 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, MemAddr, InMemBlock,
3879 Address X86_64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
3880 QualType Ty) const {
3881 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
3882 CGF.getContext().getTypeInfoInChars(Ty),
3883 CharUnits::fromQuantity(8),
3884 /*allowHigherAlign*/ false);
3888 WinX86_64ABIInfo::reclassifyHvaArgType(QualType Ty, unsigned &FreeSSERegs,
3889 const ABIArgInfo ¤t) const {
3890 // Assumes vectorCall calling convention.
3891 const Type *Base = nullptr;
3892 uint64_t NumElts = 0;
3894 if (!Ty->isBuiltinType() && !Ty->isVectorType() &&
3895 isHomogeneousAggregate(Ty, Base, NumElts) && FreeSSERegs >= NumElts) {
3896 FreeSSERegs -= NumElts;
3897 return getDirectX86Hva();
3902 ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs,
3903 bool IsReturnType, bool IsVectorCall,
3904 bool IsRegCall) const {
3906 if (Ty->isVoidType())
3907 return ABIArgInfo::getIgnore();
3909 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3910 Ty = EnumTy->getDecl()->getIntegerType();
3912 TypeInfo Info = getContext().getTypeInfo(Ty);
3913 uint64_t Width = Info.Width;
3914 CharUnits Align = getContext().toCharUnitsFromBits(Info.Align);
3916 const RecordType *RT = Ty->getAs<RecordType>();
3918 if (!IsReturnType) {
3919 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()))
3920 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
3923 if (RT->getDecl()->hasFlexibleArrayMember())
3924 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
3928 const Type *Base = nullptr;
3929 uint64_t NumElts = 0;
3930 // vectorcall adds the concept of a homogenous vector aggregate, similar to
3932 if ((IsVectorCall || IsRegCall) &&
3933 isHomogeneousAggregate(Ty, Base, NumElts)) {
3935 if (FreeSSERegs >= NumElts) {
3936 FreeSSERegs -= NumElts;
3937 if (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType())
3938 return ABIArgInfo::getDirect();
3939 return ABIArgInfo::getExpand();
3941 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
3942 } else if (IsVectorCall) {
3943 if (FreeSSERegs >= NumElts &&
3944 (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType())) {
3945 FreeSSERegs -= NumElts;
3946 return ABIArgInfo::getDirect();
3947 } else if (IsReturnType) {
3948 return ABIArgInfo::getExpand();
3949 } else if (!Ty->isBuiltinType() && !Ty->isVectorType()) {
3950 // HVAs are delayed and reclassified in the 2nd step.
3951 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
3956 if (Ty->isMemberPointerType()) {
3957 // If the member pointer is represented by an LLVM int or ptr, pass it
3959 llvm::Type *LLTy = CGT.ConvertType(Ty);
3960 if (LLTy->isPointerTy() || LLTy->isIntegerTy())
3961 return ABIArgInfo::getDirect();
3964 if (RT || Ty->isAnyComplexType() || Ty->isMemberPointerType()) {
3965 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
3966 // not 1, 2, 4, or 8 bytes, must be passed by reference."
3967 if (Width > 64 || !llvm::isPowerOf2_64(Width))
3968 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
3970 // Otherwise, coerce it to a small integer.
3971 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Width));
3974 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
3975 switch (BT->getKind()) {
3976 case BuiltinType::Bool:
3977 // Bool type is always extended to the ABI, other builtin types are not
3979 return ABIArgInfo::getExtend(Ty);
3981 case BuiltinType::LongDouble:
3982 // Mingw64 GCC uses the old 80 bit extended precision floating point
3983 // unit. It passes them indirectly through memory.
3985 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
3986 if (LDF == &llvm::APFloat::x87DoubleExtended())
3987 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
3991 case BuiltinType::Int128:
3992 case BuiltinType::UInt128:
3993 // If it's a parameter type, the normal ABI rule is that arguments larger
3994 // than 8 bytes are passed indirectly. GCC follows it. We follow it too,
3995 // even though it isn't particularly efficient.
3997 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
3999 // Mingw64 GCC returns i128 in XMM0. Coerce to v2i64 to handle that.
4000 // Clang matches them for compatibility.
4001 return ABIArgInfo::getDirect(
4002 llvm::VectorType::get(llvm::Type::getInt64Ty(getVMContext()), 2));
4009 return ABIArgInfo::getDirect();
4012 void WinX86_64ABIInfo::computeVectorCallArgs(CGFunctionInfo &FI,
4013 unsigned FreeSSERegs,
4015 bool IsRegCall) const {
4017 for (auto &I : FI.arguments()) {
4018 // Vectorcall in x64 only permits the first 6 arguments to be passed
4019 // as XMM/YMM registers.
4020 if (Count < VectorcallMaxParamNumAsReg)
4021 I.info = classify(I.type, FreeSSERegs, false, IsVectorCall, IsRegCall);
4023 // Since these cannot be passed in registers, pretend no registers
4025 unsigned ZeroSSERegsAvail = 0;
4026 I.info = classify(I.type, /*FreeSSERegs=*/ZeroSSERegsAvail, false,
4027 IsVectorCall, IsRegCall);
4032 for (auto &I : FI.arguments()) {
4033 I.info = reclassifyHvaArgType(I.type, FreeSSERegs, I.info);
4037 void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
4039 FI.getCallingConvention() == llvm::CallingConv::X86_VectorCall;
4040 bool IsRegCall = FI.getCallingConvention() == llvm::CallingConv::X86_RegCall;
4042 unsigned FreeSSERegs = 0;
4044 // We can use up to 4 SSE return registers with vectorcall.
4046 } else if (IsRegCall) {
4047 // RegCall gives us 16 SSE registers.
4051 if (!getCXXABI().classifyReturnType(FI))
4052 FI.getReturnInfo() = classify(FI.getReturnType(), FreeSSERegs, true,
4053 IsVectorCall, IsRegCall);
4056 // We can use up to 6 SSE register parameters with vectorcall.
4058 } else if (IsRegCall) {
4059 // RegCall gives us 16 SSE registers, we can reuse the return registers.
4064 computeVectorCallArgs(FI, FreeSSERegs, IsVectorCall, IsRegCall);
4066 for (auto &I : FI.arguments())
4067 I.info = classify(I.type, FreeSSERegs, false, IsVectorCall, IsRegCall);
4072 Address WinX86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4073 QualType Ty) const {
4075 bool IsIndirect = false;
4077 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
4078 // not 1, 2, 4, or 8 bytes, must be passed by reference."
4079 if (isAggregateTypeForABI(Ty) || Ty->isMemberPointerType()) {
4080 uint64_t Width = getContext().getTypeSize(Ty);
4081 IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
4084 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
4085 CGF.getContext().getTypeInfoInChars(Ty),
4086 CharUnits::fromQuantity(8),
4087 /*allowHigherAlign*/ false);
4092 /// PPC32_SVR4_ABIInfo - The 32-bit PowerPC ELF (SVR4) ABI information.
4093 class PPC32_SVR4_ABIInfo : public DefaultABIInfo {
4094 bool IsSoftFloatABI;
4096 CharUnits getParamTypeAlignment(QualType Ty) const;
4099 PPC32_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, bool SoftFloatABI)
4100 : DefaultABIInfo(CGT), IsSoftFloatABI(SoftFloatABI) {}
4102 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4103 QualType Ty) const override;
4106 class PPC32TargetCodeGenInfo : public TargetCodeGenInfo {
4108 PPC32TargetCodeGenInfo(CodeGenTypes &CGT, bool SoftFloatABI)
4109 : TargetCodeGenInfo(new PPC32_SVR4_ABIInfo(CGT, SoftFloatABI)) {}
4111 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
4112 // This is recovered from gcc output.
4113 return 1; // r1 is the dedicated stack pointer
4116 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4117 llvm::Value *Address) const override;
4121 CharUnits PPC32_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const {
4122 // Complex types are passed just like their elements
4123 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
4124 Ty = CTy->getElementType();
4126 if (Ty->isVectorType())
4127 return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16
4130 // For single-element float/vector structs, we consider the whole type
4131 // to have the same alignment requirements as its single element.
4132 const Type *AlignTy = nullptr;
4133 if (const Type *EltType = isSingleElementStruct(Ty, getContext())) {
4134 const BuiltinType *BT = EltType->getAs<BuiltinType>();
4135 if ((EltType->isVectorType() && getContext().getTypeSize(EltType) == 128) ||
4136 (BT && BT->isFloatingPoint()))
4141 return CharUnits::fromQuantity(AlignTy->isVectorType() ? 16 : 4);
4142 return CharUnits::fromQuantity(4);
4145 // TODO: this implementation is now likely redundant with
4146 // DefaultABIInfo::EmitVAArg.
4147 Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList,
4148 QualType Ty) const {
4149 if (getTarget().getTriple().isOSDarwin()) {
4150 auto TI = getContext().getTypeInfoInChars(Ty);
4151 TI.second = getParamTypeAlignment(Ty);
4153 CharUnits SlotSize = CharUnits::fromQuantity(4);
4154 return emitVoidPtrVAArg(CGF, VAList, Ty,
4155 classifyArgumentType(Ty).isIndirect(), TI, SlotSize,
4156 /*AllowHigherAlign=*/true);
4159 const unsigned OverflowLimit = 8;
4160 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
4161 // TODO: Implement this. For now ignore.
4163 return Address::invalid(); // FIXME?
4166 // struct __va_list_tag {
4167 // unsigned char gpr;
4168 // unsigned char fpr;
4169 // unsigned short reserved;
4170 // void *overflow_arg_area;
4171 // void *reg_save_area;
4174 bool isI64 = Ty->isIntegerType() && getContext().getTypeSize(Ty) == 64;
4176 Ty->isIntegerType() || Ty->isPointerType() || Ty->isAggregateType();
4177 bool isF64 = Ty->isFloatingType() && getContext().getTypeSize(Ty) == 64;
4179 // All aggregates are passed indirectly? That doesn't seem consistent
4180 // with the argument-lowering code.
4181 bool isIndirect = Ty->isAggregateType();
4183 CGBuilderTy &Builder = CGF.Builder;
4185 // The calling convention either uses 1-2 GPRs or 1 FPR.
4186 Address NumRegsAddr = Address::invalid();
4187 if (isInt || IsSoftFloatABI) {
4188 NumRegsAddr = Builder.CreateStructGEP(VAList, 0, CharUnits::Zero(), "gpr");
4190 NumRegsAddr = Builder.CreateStructGEP(VAList, 1, CharUnits::One(), "fpr");
4193 llvm::Value *NumRegs = Builder.CreateLoad(NumRegsAddr, "numUsedRegs");
4195 // "Align" the register count when TY is i64.
4196 if (isI64 || (isF64 && IsSoftFloatABI)) {
4197 NumRegs = Builder.CreateAdd(NumRegs, Builder.getInt8(1));
4198 NumRegs = Builder.CreateAnd(NumRegs, Builder.getInt8((uint8_t) ~1U));
4202 Builder.CreateICmpULT(NumRegs, Builder.getInt8(OverflowLimit), "cond");
4204 llvm::BasicBlock *UsingRegs = CGF.createBasicBlock("using_regs");
4205 llvm::BasicBlock *UsingOverflow = CGF.createBasicBlock("using_overflow");
4206 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
4208 Builder.CreateCondBr(CC, UsingRegs, UsingOverflow);
4210 llvm::Type *DirectTy = CGF.ConvertType(Ty);
4211 if (isIndirect) DirectTy = DirectTy->getPointerTo(0);
4213 // Case 1: consume registers.
4214 Address RegAddr = Address::invalid();
4216 CGF.EmitBlock(UsingRegs);
4218 Address RegSaveAreaPtr =
4219 Builder.CreateStructGEP(VAList, 4, CharUnits::fromQuantity(8));
4220 RegAddr = Address(Builder.CreateLoad(RegSaveAreaPtr),
4221 CharUnits::fromQuantity(8));
4222 assert(RegAddr.getElementType() == CGF.Int8Ty);
4224 // Floating-point registers start after the general-purpose registers.
4225 if (!(isInt || IsSoftFloatABI)) {
4226 RegAddr = Builder.CreateConstInBoundsByteGEP(RegAddr,
4227 CharUnits::fromQuantity(32));
4230 // Get the address of the saved value by scaling the number of
4231 // registers we've used by the number of
4232 CharUnits RegSize = CharUnits::fromQuantity((isInt || IsSoftFloatABI) ? 4 : 8);
4233 llvm::Value *RegOffset =
4234 Builder.CreateMul(NumRegs, Builder.getInt8(RegSize.getQuantity()));
4235 RegAddr = Address(Builder.CreateInBoundsGEP(CGF.Int8Ty,
4236 RegAddr.getPointer(), RegOffset),
4237 RegAddr.getAlignment().alignmentOfArrayElement(RegSize));
4238 RegAddr = Builder.CreateElementBitCast(RegAddr, DirectTy);
4240 // Increase the used-register count.
4242 Builder.CreateAdd(NumRegs,
4243 Builder.getInt8((isI64 || (isF64 && IsSoftFloatABI)) ? 2 : 1));
4244 Builder.CreateStore(NumRegs, NumRegsAddr);
4246 CGF.EmitBranch(Cont);
4249 // Case 2: consume space in the overflow area.
4250 Address MemAddr = Address::invalid();
4252 CGF.EmitBlock(UsingOverflow);
4254 Builder.CreateStore(Builder.getInt8(OverflowLimit), NumRegsAddr);
4256 // Everything in the overflow area is rounded up to a size of at least 4.
4257 CharUnits OverflowAreaAlign = CharUnits::fromQuantity(4);
4261 auto TypeInfo = CGF.getContext().getTypeInfoInChars(Ty);
4262 Size = TypeInfo.first.alignTo(OverflowAreaAlign);
4264 Size = CGF.getPointerSize();
4267 Address OverflowAreaAddr =
4268 Builder.CreateStructGEP(VAList, 3, CharUnits::fromQuantity(4));
4269 Address OverflowArea(Builder.CreateLoad(OverflowAreaAddr, "argp.cur"),
4271 // Round up address of argument to alignment
4272 CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty);
4273 if (Align > OverflowAreaAlign) {
4274 llvm::Value *Ptr = OverflowArea.getPointer();
4275 OverflowArea = Address(emitRoundPointerUpToAlignment(CGF, Ptr, Align),
4279 MemAddr = Builder.CreateElementBitCast(OverflowArea, DirectTy);
4281 // Increase the overflow area.
4282 OverflowArea = Builder.CreateConstInBoundsByteGEP(OverflowArea, Size);
4283 Builder.CreateStore(OverflowArea.getPointer(), OverflowAreaAddr);
4284 CGF.EmitBranch(Cont);
4287 CGF.EmitBlock(Cont);
4289 // Merge the cases with a phi.
4290 Address Result = emitMergePHI(CGF, RegAddr, UsingRegs, MemAddr, UsingOverflow,
4293 // Load the pointer if the argument was passed indirectly.
4295 Result = Address(Builder.CreateLoad(Result, "aggr"),
4296 getContext().getTypeAlignInChars(Ty));
4303 PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4304 llvm::Value *Address) const {
4305 // This is calculated from the LLVM and GCC tables and verified
4306 // against gcc output. AFAIK all ABIs use the same encoding.
4308 CodeGen::CGBuilderTy &Builder = CGF.Builder;
4310 llvm::IntegerType *i8 = CGF.Int8Ty;
4311 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
4312 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
4313 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
4315 // 0-31: r0-31, the 4-byte general-purpose registers
4316 AssignToArrayRange(Builder, Address, Four8, 0, 31);
4318 // 32-63: fp0-31, the 8-byte floating-point registers
4319 AssignToArrayRange(Builder, Address, Eight8, 32, 63);
4321 // 64-76 are various 4-byte special-purpose registers:
4328 AssignToArrayRange(Builder, Address, Four8, 64, 76);
4330 // 77-108: v0-31, the 16-byte vector registers
4331 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
4338 AssignToArrayRange(Builder, Address, Four8, 109, 113);
4346 /// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information.
4347 class PPC64_SVR4_ABIInfo : public SwiftABIInfo {
4355 static const unsigned GPRBits = 64;
4358 bool IsSoftFloatABI;
4360 // A vector of float or double will be promoted to <4 x f32> or <4 x f64> and
4361 // will be passed in a QPX register.
4362 bool IsQPXVectorTy(const Type *Ty) const {
4366 if (const VectorType *VT = Ty->getAs<VectorType>()) {
4367 unsigned NumElements = VT->getNumElements();
4368 if (NumElements == 1)
4371 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) {
4372 if (getContext().getTypeSize(Ty) <= 256)
4374 } else if (VT->getElementType()->
4375 isSpecificBuiltinType(BuiltinType::Float)) {
4376 if (getContext().getTypeSize(Ty) <= 128)
4384 bool IsQPXVectorTy(QualType Ty) const {
4385 return IsQPXVectorTy(Ty.getTypePtr());
4389 PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind, bool HasQPX,
4391 : SwiftABIInfo(CGT), Kind(Kind), HasQPX(HasQPX),
4392 IsSoftFloatABI(SoftFloatABI) {}
4394 bool isPromotableTypeForABI(QualType Ty) const;
4395 CharUnits getParamTypeAlignment(QualType Ty) const;
4397 ABIArgInfo classifyReturnType(QualType RetTy) const;
4398 ABIArgInfo classifyArgumentType(QualType Ty) const;
4400 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
4401 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
4402 uint64_t Members) const override;
4404 // TODO: We can add more logic to computeInfo to improve performance.
4405 // Example: For aggregate arguments that fit in a register, we could
4406 // use getDirectInReg (as is done below for structs containing a single
4407 // floating-point value) to avoid pushing them to memory on function
4408 // entry. This would require changing the logic in PPCISelLowering
4409 // when lowering the parameters in the caller and args in the callee.
4410 void computeInfo(CGFunctionInfo &FI) const override {
4411 if (!getCXXABI().classifyReturnType(FI))
4412 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
4413 for (auto &I : FI.arguments()) {
4414 // We rely on the default argument classification for the most part.
4415 // One exception: An aggregate containing a single floating-point
4416 // or vector item must be passed in a register if one is available.
4417 const Type *T = isSingleElementStruct(I.type, getContext());
4419 const BuiltinType *BT = T->getAs<BuiltinType>();
4420 if (IsQPXVectorTy(T) ||
4421 (T->isVectorType() && getContext().getTypeSize(T) == 128) ||
4422 (BT && BT->isFloatingPoint())) {
4424 I.info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT));
4428 I.info = classifyArgumentType(I.type);
4432 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4433 QualType Ty) const override;
4435 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
4436 bool asReturnValue) const override {
4437 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
4440 bool isSwiftErrorInRegister() const override {
4445 class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo {
4448 PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT,
4449 PPC64_SVR4_ABIInfo::ABIKind Kind, bool HasQPX,
4451 : TargetCodeGenInfo(new PPC64_SVR4_ABIInfo(CGT, Kind, HasQPX,
4454 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
4455 // This is recovered from gcc output.
4456 return 1; // r1 is the dedicated stack pointer
4459 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4460 llvm::Value *Address) const override;
4463 class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo {
4465 PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
4467 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
4468 // This is recovered from gcc output.
4469 return 1; // r1 is the dedicated stack pointer
4472 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4473 llvm::Value *Address) const override;
4478 // Return true if the ABI requires Ty to be passed sign- or zero-
4479 // extended to 64 bits.
4481 PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const {
4482 // Treat an enum type as its underlying type.
4483 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
4484 Ty = EnumTy->getDecl()->getIntegerType();
4486 // Promotable integer types are required to be promoted by the ABI.
4487 if (Ty->isPromotableIntegerType())
4490 // In addition to the usual promotable integer types, we also need to
4491 // extend all 32-bit types, since the ABI requires promotion to 64 bits.
4492 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
4493 switch (BT->getKind()) {
4494 case BuiltinType::Int:
4495 case BuiltinType::UInt:
4504 /// isAlignedParamType - Determine whether a type requires 16-byte or
4505 /// higher alignment in the parameter area. Always returns at least 8.
4506 CharUnits PPC64_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const {
4507 // Complex types are passed just like their elements.
4508 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
4509 Ty = CTy->getElementType();
4511 // Only vector types of size 16 bytes need alignment (larger types are
4512 // passed via reference, smaller types are not aligned).
4513 if (IsQPXVectorTy(Ty)) {
4514 if (getContext().getTypeSize(Ty) > 128)
4515 return CharUnits::fromQuantity(32);
4517 return CharUnits::fromQuantity(16);
4518 } else if (Ty->isVectorType()) {
4519 return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16 : 8);
4522 // For single-element float/vector structs, we consider the whole type
4523 // to have the same alignment requirements as its single element.
4524 const Type *AlignAsType = nullptr;
4525 const Type *EltType = isSingleElementStruct(Ty, getContext());
4527 const BuiltinType *BT = EltType->getAs<BuiltinType>();
4528 if (IsQPXVectorTy(EltType) || (EltType->isVectorType() &&
4529 getContext().getTypeSize(EltType) == 128) ||
4530 (BT && BT->isFloatingPoint()))
4531 AlignAsType = EltType;
4534 // Likewise for ELFv2 homogeneous aggregates.
4535 const Type *Base = nullptr;
4536 uint64_t Members = 0;
4537 if (!AlignAsType && Kind == ELFv2 &&
4538 isAggregateTypeForABI(Ty) && isHomogeneousAggregate(Ty, Base, Members))
4541 // With special case aggregates, only vector base types need alignment.
4542 if (AlignAsType && IsQPXVectorTy(AlignAsType)) {
4543 if (getContext().getTypeSize(AlignAsType) > 128)
4544 return CharUnits::fromQuantity(32);
4546 return CharUnits::fromQuantity(16);
4547 } else if (AlignAsType) {
4548 return CharUnits::fromQuantity(AlignAsType->isVectorType() ? 16 : 8);
4551 // Otherwise, we only need alignment for any aggregate type that
4552 // has an alignment requirement of >= 16 bytes.
4553 if (isAggregateTypeForABI(Ty) && getContext().getTypeAlign(Ty) >= 128) {
4554 if (HasQPX && getContext().getTypeAlign(Ty) >= 256)
4555 return CharUnits::fromQuantity(32);
4556 return CharUnits::fromQuantity(16);
4559 return CharUnits::fromQuantity(8);
4562 /// isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous
4563 /// aggregate. Base is set to the base element type, and Members is set
4564 /// to the number of base elements.
4565 bool ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base,
4566 uint64_t &Members) const {
4567 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
4568 uint64_t NElements = AT->getSize().getZExtValue();
4571 if (!isHomogeneousAggregate(AT->getElementType(), Base, Members))
4573 Members *= NElements;
4574 } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
4575 const RecordDecl *RD = RT->getDecl();
4576 if (RD->hasFlexibleArrayMember())
4581 // If this is a C++ record, check the bases first.
4582 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
4583 for (const auto &I : CXXRD->bases()) {
4584 // Ignore empty records.
4585 if (isEmptyRecord(getContext(), I.getType(), true))
4588 uint64_t FldMembers;
4589 if (!isHomogeneousAggregate(I.getType(), Base, FldMembers))
4592 Members += FldMembers;
4596 for (const auto *FD : RD->fields()) {
4597 // Ignore (non-zero arrays of) empty records.
4598 QualType FT = FD->getType();
4599 while (const ConstantArrayType *AT =
4600 getContext().getAsConstantArrayType(FT)) {
4601 if (AT->getSize().getZExtValue() == 0)
4603 FT = AT->getElementType();
4605 if (isEmptyRecord(getContext(), FT, true))
4608 // For compatibility with GCC, ignore empty bitfields in C++ mode.
4609 if (getContext().getLangOpts().CPlusPlus &&
4610 FD->isZeroLengthBitField(getContext()))
4613 uint64_t FldMembers;
4614 if (!isHomogeneousAggregate(FD->getType(), Base, FldMembers))
4617 Members = (RD->isUnion() ?
4618 std::max(Members, FldMembers) : Members + FldMembers);
4624 // Ensure there is no padding.
4625 if (getContext().getTypeSize(Base) * Members !=
4626 getContext().getTypeSize(Ty))
4630 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
4632 Ty = CT->getElementType();
4635 // Most ABIs only support float, double, and some vector type widths.
4636 if (!isHomogeneousAggregateBaseType(Ty))
4639 // The base type must be the same for all members. Types that
4640 // agree in both total size and mode (float vs. vector) are
4641 // treated as being equivalent here.
4642 const Type *TyPtr = Ty.getTypePtr();
4645 // If it's a non-power-of-2 vector, its size is already a power-of-2,
4646 // so make sure to widen it explicitly.
4647 if (const VectorType *VT = Base->getAs<VectorType>()) {
4648 QualType EltTy = VT->getElementType();
4649 unsigned NumElements =
4650 getContext().getTypeSize(VT) / getContext().getTypeSize(EltTy);
4652 .getVectorType(EltTy, NumElements, VT->getVectorKind())
4657 if (Base->isVectorType() != TyPtr->isVectorType() ||
4658 getContext().getTypeSize(Base) != getContext().getTypeSize(TyPtr))
4661 return Members > 0 && isHomogeneousAggregateSmallEnough(Base, Members);
4664 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
4665 // Homogeneous aggregates for ELFv2 must have base types of float,
4666 // double, long double, or 128-bit vectors.
4667 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
4668 if (BT->getKind() == BuiltinType::Float ||
4669 BT->getKind() == BuiltinType::Double ||
4670 BT->getKind() == BuiltinType::LongDouble ||
4671 (getContext().getTargetInfo().hasFloat128Type() &&
4672 (BT->getKind() == BuiltinType::Float128))) {
4678 if (const VectorType *VT = Ty->getAs<VectorType>()) {
4679 if (getContext().getTypeSize(VT) == 128 || IsQPXVectorTy(Ty))
4685 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough(
4686 const Type *Base, uint64_t Members) const {
4687 // Vector and fp128 types require one register, other floating point types
4688 // require one or two registers depending on their size.
4690 ((getContext().getTargetInfo().hasFloat128Type() &&
4691 Base->isFloat128Type()) ||
4692 Base->isVectorType()) ? 1
4693 : (getContext().getTypeSize(Base) + 63) / 64;
4695 // Homogeneous Aggregates may occupy at most 8 registers.
4696 return Members * NumRegs <= 8;
4700 PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const {
4701 Ty = useFirstFieldIfTransparentUnion(Ty);
4703 if (Ty->isAnyComplexType())
4704 return ABIArgInfo::getDirect();
4706 // Non-Altivec vector types are passed in GPRs (smaller than 16 bytes)
4707 // or via reference (larger than 16 bytes).
4708 if (Ty->isVectorType() && !IsQPXVectorTy(Ty)) {
4709 uint64_t Size = getContext().getTypeSize(Ty);
4711 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
4712 else if (Size < 128) {
4713 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
4714 return ABIArgInfo::getDirect(CoerceTy);
4718 if (isAggregateTypeForABI(Ty)) {
4719 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
4720 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
4722 uint64_t ABIAlign = getParamTypeAlignment(Ty).getQuantity();
4723 uint64_t TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity();
4725 // ELFv2 homogeneous aggregates are passed as array types.
4726 const Type *Base = nullptr;
4727 uint64_t Members = 0;
4728 if (Kind == ELFv2 &&
4729 isHomogeneousAggregate(Ty, Base, Members)) {
4730 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
4731 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
4732 return ABIArgInfo::getDirect(CoerceTy);
4735 // If an aggregate may end up fully in registers, we do not
4736 // use the ByVal method, but pass the aggregate as array.
4737 // This is usually beneficial since we avoid forcing the
4738 // back-end to store the argument to memory.
4739 uint64_t Bits = getContext().getTypeSize(Ty);
4740 if (Bits > 0 && Bits <= 8 * GPRBits) {
4741 llvm::Type *CoerceTy;
4743 // Types up to 8 bytes are passed as integer type (which will be
4744 // properly aligned in the argument save area doubleword).
4745 if (Bits <= GPRBits)
4747 llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8));
4748 // Larger types are passed as arrays, with the base type selected
4749 // according to the required alignment in the save area.
4751 uint64_t RegBits = ABIAlign * 8;
4752 uint64_t NumRegs = llvm::alignTo(Bits, RegBits) / RegBits;
4753 llvm::Type *RegTy = llvm::IntegerType::get(getVMContext(), RegBits);
4754 CoerceTy = llvm::ArrayType::get(RegTy, NumRegs);
4757 return ABIArgInfo::getDirect(CoerceTy);
4760 // All other aggregates are passed ByVal.
4761 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign),
4763 /*Realign=*/TyAlign > ABIAlign);
4766 return (isPromotableTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
4767 : ABIArgInfo::getDirect());
4771 PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
4772 if (RetTy->isVoidType())
4773 return ABIArgInfo::getIgnore();
4775 if (RetTy->isAnyComplexType())
4776 return ABIArgInfo::getDirect();
4778 // Non-Altivec vector types are returned in GPRs (smaller than 16 bytes)
4779 // or via reference (larger than 16 bytes).
4780 if (RetTy->isVectorType() && !IsQPXVectorTy(RetTy)) {
4781 uint64_t Size = getContext().getTypeSize(RetTy);
4783 return getNaturalAlignIndirect(RetTy);
4784 else if (Size < 128) {
4785 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
4786 return ABIArgInfo::getDirect(CoerceTy);
4790 if (isAggregateTypeForABI(RetTy)) {
4791 // ELFv2 homogeneous aggregates are returned as array types.
4792 const Type *Base = nullptr;
4793 uint64_t Members = 0;
4794 if (Kind == ELFv2 &&
4795 isHomogeneousAggregate(RetTy, Base, Members)) {
4796 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
4797 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
4798 return ABIArgInfo::getDirect(CoerceTy);
4801 // ELFv2 small aggregates are returned in up to two registers.
4802 uint64_t Bits = getContext().getTypeSize(RetTy);
4803 if (Kind == ELFv2 && Bits <= 2 * GPRBits) {
4805 return ABIArgInfo::getIgnore();
4807 llvm::Type *CoerceTy;
4808 if (Bits > GPRBits) {
4809 CoerceTy = llvm::IntegerType::get(getVMContext(), GPRBits);
4810 CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy);
4813 llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8));
4814 return ABIArgInfo::getDirect(CoerceTy);
4817 // All other aggregates are returned indirectly.
4818 return getNaturalAlignIndirect(RetTy);
4821 return (isPromotableTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
4822 : ABIArgInfo::getDirect());
4825 // Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine.
4826 Address PPC64_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4827 QualType Ty) const {
4828 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
4829 TypeInfo.second = getParamTypeAlignment(Ty);
4831 CharUnits SlotSize = CharUnits::fromQuantity(8);
4833 // If we have a complex type and the base type is smaller than 8 bytes,
4834 // the ABI calls for the real and imaginary parts to be right-adjusted
4835 // in separate doublewords. However, Clang expects us to produce a
4836 // pointer to a structure with the two parts packed tightly. So generate
4837 // loads of the real and imaginary parts relative to the va_list pointer,
4838 // and store them to a temporary structure.
4839 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
4840 CharUnits EltSize = TypeInfo.first / 2;
4841 if (EltSize < SlotSize) {
4842 Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, CGF.Int8Ty,
4843 SlotSize * 2, SlotSize,
4844 SlotSize, /*AllowHigher*/ true);
4846 Address RealAddr = Addr;
4847 Address ImagAddr = RealAddr;
4848 if (CGF.CGM.getDataLayout().isBigEndian()) {
4849 RealAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr,
4850 SlotSize - EltSize);
4851 ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(ImagAddr,
4852 2 * SlotSize - EltSize);
4854 ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr, SlotSize);
4857 llvm::Type *EltTy = CGF.ConvertTypeForMem(CTy->getElementType());
4858 RealAddr = CGF.Builder.CreateElementBitCast(RealAddr, EltTy);
4859 ImagAddr = CGF.Builder.CreateElementBitCast(ImagAddr, EltTy);
4860 llvm::Value *Real = CGF.Builder.CreateLoad(RealAddr, ".vareal");
4861 llvm::Value *Imag = CGF.Builder.CreateLoad(ImagAddr, ".vaimag");
4863 Address Temp = CGF.CreateMemTemp(Ty, "vacplx");
4864 CGF.EmitStoreOfComplex({Real, Imag}, CGF.MakeAddrLValue(Temp, Ty),
4870 // Otherwise, just use the general rule.
4871 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false,
4872 TypeInfo, SlotSize, /*AllowHigher*/ true);
4876 PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4877 llvm::Value *Address) {
4878 // This is calculated from the LLVM and GCC tables and verified
4879 // against gcc output. AFAIK all ABIs use the same encoding.
4881 CodeGen::CGBuilderTy &Builder = CGF.Builder;
4883 llvm::IntegerType *i8 = CGF.Int8Ty;
4884 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
4885 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
4886 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
4888 // 0-31: r0-31, the 8-byte general-purpose registers
4889 AssignToArrayRange(Builder, Address, Eight8, 0, 31);
4891 // 32-63: fp0-31, the 8-byte floating-point registers
4892 AssignToArrayRange(Builder, Address, Eight8, 32, 63);
4894 // 64-67 are various 8-byte special-purpose registers:
4899 AssignToArrayRange(Builder, Address, Eight8, 64, 67);
4901 // 68-76 are various 4-byte special-purpose registers:
4904 AssignToArrayRange(Builder, Address, Four8, 68, 76);
4906 // 77-108: v0-31, the 16-byte vector registers
4907 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
4917 AssignToArrayRange(Builder, Address, Eight8, 109, 116);
4923 PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable(
4924 CodeGen::CodeGenFunction &CGF,
4925 llvm::Value *Address) const {
4927 return PPC64_initDwarfEHRegSizeTable(CGF, Address);
4931 PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4932 llvm::Value *Address) const {
4934 return PPC64_initDwarfEHRegSizeTable(CGF, Address);
4937 //===----------------------------------------------------------------------===//
4938 // AArch64 ABI Implementation
4939 //===----------------------------------------------------------------------===//
4943 class AArch64ABIInfo : public SwiftABIInfo {
4955 AArch64ABIInfo(CodeGenTypes &CGT, ABIKind Kind)
4956 : SwiftABIInfo(CGT), Kind(Kind) {}
4959 ABIKind getABIKind() const { return Kind; }
4960 bool isDarwinPCS() const { return Kind == DarwinPCS; }
4962 ABIArgInfo classifyReturnType(QualType RetTy) const;
4963 ABIArgInfo classifyArgumentType(QualType RetTy) const;
4964 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
4965 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
4966 uint64_t Members) const override;
4968 bool isIllegalVectorType(QualType Ty) const;
4970 void computeInfo(CGFunctionInfo &FI) const override {
4971 if (!::classifyReturnType(getCXXABI(), FI, *this))
4972 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
4974 for (auto &it : FI.arguments())
4975 it.info = classifyArgumentType(it.type);
4978 Address EmitDarwinVAArg(Address VAListAddr, QualType Ty,
4979 CodeGenFunction &CGF) const;
4981 Address EmitAAPCSVAArg(Address VAListAddr, QualType Ty,
4982 CodeGenFunction &CGF) const;
4984 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4985 QualType Ty) const override {
4986 return Kind == Win64 ? EmitMSVAArg(CGF, VAListAddr, Ty)
4987 : isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
4988 : EmitAAPCSVAArg(VAListAddr, Ty, CGF);
4991 Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
4992 QualType Ty) const override;
4994 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
4995 bool asReturnValue) const override {
4996 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
4998 bool isSwiftErrorInRegister() const override {
5002 bool isLegalVectorTypeForSwift(CharUnits totalSize, llvm::Type *eltTy,
5003 unsigned elts) const override;
5006 class AArch64TargetCodeGenInfo : public TargetCodeGenInfo {
5008 AArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind Kind)
5009 : TargetCodeGenInfo(new AArch64ABIInfo(CGT, Kind)) {}
5011 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
5012 return "mov\tfp, fp\t\t// marker for objc_retainAutoreleaseReturnValue";
5015 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
5019 bool doesReturnSlotInterfereWithArgs() const override { return false; }
5021 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5022 CodeGen::CodeGenModule &CGM) const override {
5023 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
5026 llvm::Function *Fn = cast<llvm::Function>(GV);
5028 auto Kind = CGM.getCodeGenOpts().getSignReturnAddress();
5029 if (Kind != CodeGenOptions::SignReturnAddressScope::None) {
5030 Fn->addFnAttr("sign-return-address",
5031 Kind == CodeGenOptions::SignReturnAddressScope::All
5035 auto Key = CGM.getCodeGenOpts().getSignReturnAddressKey();
5036 Fn->addFnAttr("sign-return-address-key",
5037 Key == CodeGenOptions::SignReturnAddressKeyValue::AKey
5042 if (CGM.getCodeGenOpts().BranchTargetEnforcement)
5043 Fn->addFnAttr("branch-target-enforcement");
5047 class WindowsAArch64TargetCodeGenInfo : public AArch64TargetCodeGenInfo {
5049 WindowsAArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind K)
5050 : AArch64TargetCodeGenInfo(CGT, K) {}
5052 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5053 CodeGen::CodeGenModule &CGM) const override;
5055 void getDependentLibraryOption(llvm::StringRef Lib,
5056 llvm::SmallString<24> &Opt) const override {
5057 Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
5060 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value,
5061 llvm::SmallString<32> &Opt) const override {
5062 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
5066 void WindowsAArch64TargetCodeGenInfo::setTargetAttributes(
5067 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
5068 AArch64TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
5069 if (GV->isDeclaration())
5071 addStackProbeTargetAttributes(D, GV, CGM);
5075 ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty) const {
5076 Ty = useFirstFieldIfTransparentUnion(Ty);
5078 // Handle illegal vector types here.
5079 if (isIllegalVectorType(Ty)) {
5080 uint64_t Size = getContext().getTypeSize(Ty);
5081 // Android promotes <2 x i8> to i16, not i32
5082 if (isAndroid() && (Size <= 16)) {
5083 llvm::Type *ResType = llvm::Type::getInt16Ty(getVMContext());
5084 return ABIArgInfo::getDirect(ResType);
5087 llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext());
5088 return ABIArgInfo::getDirect(ResType);
5091 llvm::Type *ResType =
5092 llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2);
5093 return ABIArgInfo::getDirect(ResType);
5096 llvm::Type *ResType =
5097 llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);
5098 return ABIArgInfo::getDirect(ResType);
5100 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
5103 if (!isAggregateTypeForABI(Ty)) {
5104 // Treat an enum type as its underlying type.
5105 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
5106 Ty = EnumTy->getDecl()->getIntegerType();
5108 return (Ty->isPromotableIntegerType() && isDarwinPCS()
5109 ? ABIArgInfo::getExtend(Ty)
5110 : ABIArgInfo::getDirect());
5113 // Structures with either a non-trivial destructor or a non-trivial
5114 // copy constructor are always indirect.
5115 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
5116 return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA ==
5117 CGCXXABI::RAA_DirectInMemory);
5120 // Empty records are always ignored on Darwin, but actually passed in C++ mode
5121 // elsewhere for GNU compatibility.
5122 uint64_t Size = getContext().getTypeSize(Ty);
5123 bool IsEmpty = isEmptyRecord(getContext(), Ty, true);
5124 if (IsEmpty || Size == 0) {
5125 if (!getContext().getLangOpts().CPlusPlus || isDarwinPCS())
5126 return ABIArgInfo::getIgnore();
5128 // GNU C mode. The only argument that gets ignored is an empty one with size
5130 if (IsEmpty && Size == 0)
5131 return ABIArgInfo::getIgnore();
5132 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
5135 // Homogeneous Floating-point Aggregates (HFAs) need to be expanded.
5136 const Type *Base = nullptr;
5137 uint64_t Members = 0;
5138 if (isHomogeneousAggregate(Ty, Base, Members)) {
5139 return ABIArgInfo::getDirect(
5140 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members));
5143 // Aggregates <= 16 bytes are passed directly in registers or on the stack.
5145 // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of
5146 // same size and alignment.
5147 if (getTarget().isRenderScriptTarget()) {
5148 return coerceToIntArray(Ty, getContext(), getVMContext());
5151 if (Kind == AArch64ABIInfo::AAPCS) {
5152 Alignment = getContext().getTypeUnadjustedAlign(Ty);
5153 Alignment = Alignment < 128 ? 64 : 128;
5155 Alignment = getContext().getTypeAlign(Ty);
5157 Size = llvm::alignTo(Size, 64); // round up to multiple of 8 bytes
5159 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
5160 // For aggregates with 16-byte alignment, we use i128.
5161 if (Alignment < 128 && Size == 128) {
5162 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
5163 return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64));
5165 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
5168 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
5171 ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy) const {
5172 if (RetTy->isVoidType())
5173 return ABIArgInfo::getIgnore();
5175 // Large vector types should be returned via memory.
5176 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128)
5177 return getNaturalAlignIndirect(RetTy);
5179 if (!isAggregateTypeForABI(RetTy)) {
5180 // Treat an enum type as its underlying type.
5181 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
5182 RetTy = EnumTy->getDecl()->getIntegerType();
5184 return (RetTy->isPromotableIntegerType() && isDarwinPCS()
5185 ? ABIArgInfo::getExtend(RetTy)
5186 : ABIArgInfo::getDirect());
5189 uint64_t Size = getContext().getTypeSize(RetTy);
5190 if (isEmptyRecord(getContext(), RetTy, true) || Size == 0)
5191 return ABIArgInfo::getIgnore();
5193 const Type *Base = nullptr;
5194 uint64_t Members = 0;
5195 if (isHomogeneousAggregate(RetTy, Base, Members))
5196 // Homogeneous Floating-point Aggregates (HFAs) are returned directly.
5197 return ABIArgInfo::getDirect();
5199 // Aggregates <= 16 bytes are returned directly in registers or on the stack.
5201 // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of
5202 // same size and alignment.
5203 if (getTarget().isRenderScriptTarget()) {
5204 return coerceToIntArray(RetTy, getContext(), getVMContext());
5206 unsigned Alignment = getContext().getTypeAlign(RetTy);
5207 Size = llvm::alignTo(Size, 64); // round up to multiple of 8 bytes
5209 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
5210 // For aggregates with 16-byte alignment, we use i128.
5211 if (Alignment < 128 && Size == 128) {
5212 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
5213 return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64));
5215 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
5218 return getNaturalAlignIndirect(RetTy);
5221 /// isIllegalVectorType - check whether the vector type is legal for AArch64.
5222 bool AArch64ABIInfo::isIllegalVectorType(QualType Ty) const {
5223 if (const VectorType *VT = Ty->getAs<VectorType>()) {
5224 // Check whether VT is legal.
5225 unsigned NumElements = VT->getNumElements();
5226 uint64_t Size = getContext().getTypeSize(VT);
5227 // NumElements should be power of 2.
5228 if (!llvm::isPowerOf2_32(NumElements))
5230 return Size != 64 && (Size != 128 || NumElements == 1);
5235 bool AArch64ABIInfo::isLegalVectorTypeForSwift(CharUnits totalSize,
5237 unsigned elts) const {
5238 if (!llvm::isPowerOf2_32(elts))
5240 if (totalSize.getQuantity() != 8 &&
5241 (totalSize.getQuantity() != 16 || elts == 1))
5246 bool AArch64ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
5247 // Homogeneous aggregates for AAPCS64 must have base types of a floating
5248 // point type or a short-vector type. This is the same as the 32-bit ABI,
5249 // but with the difference that any floating-point type is allowed,
5250 // including __fp16.
5251 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
5252 if (BT->isFloatingPoint())
5254 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
5255 unsigned VecSize = getContext().getTypeSize(VT);
5256 if (VecSize == 64 || VecSize == 128)
5262 bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
5263 uint64_t Members) const {
5264 return Members <= 4;
5267 Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr,
5269 CodeGenFunction &CGF) const {
5270 ABIArgInfo AI = classifyArgumentType(Ty);
5271 bool IsIndirect = AI.isIndirect();
5273 llvm::Type *BaseTy = CGF.ConvertType(Ty);
5275 BaseTy = llvm::PointerType::getUnqual(BaseTy);
5276 else if (AI.getCoerceToType())
5277 BaseTy = AI.getCoerceToType();
5279 unsigned NumRegs = 1;
5280 if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) {
5281 BaseTy = ArrTy->getElementType();
5282 NumRegs = ArrTy->getNumElements();
5284 bool IsFPR = BaseTy->isFloatingPointTy() || BaseTy->isVectorTy();
5286 // The AArch64 va_list type and handling is specified in the Procedure Call
5287 // Standard, section B.4:
5297 llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg");
5298 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
5299 llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack");
5300 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
5302 auto TyInfo = getContext().getTypeInfoInChars(Ty);
5303 CharUnits TyAlign = TyInfo.second;
5305 Address reg_offs_p = Address::invalid();
5306 llvm::Value *reg_offs = nullptr;
5308 CharUnits reg_top_offset;
5309 int RegSize = IsIndirect ? 8 : TyInfo.first.getQuantity();
5311 // 3 is the field number of __gr_offs
5313 CGF.Builder.CreateStructGEP(VAListAddr, 3, CharUnits::fromQuantity(24),
5315 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs");
5316 reg_top_index = 1; // field number for __gr_top
5317 reg_top_offset = CharUnits::fromQuantity(8);
5318 RegSize = llvm::alignTo(RegSize, 8);
5320 // 4 is the field number of __vr_offs.
5322 CGF.Builder.CreateStructGEP(VAListAddr, 4, CharUnits::fromQuantity(28),
5324 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs");
5325 reg_top_index = 2; // field number for __vr_top
5326 reg_top_offset = CharUnits::fromQuantity(16);
5327 RegSize = 16 * NumRegs;
5330 //=======================================
5331 // Find out where argument was passed
5332 //=======================================
5334 // If reg_offs >= 0 we're already using the stack for this type of
5335 // argument. We don't want to keep updating reg_offs (in case it overflows,
5336 // though anyone passing 2GB of arguments, each at most 16 bytes, deserves
5337 // whatever they get).
5338 llvm::Value *UsingStack = nullptr;
5339 UsingStack = CGF.Builder.CreateICmpSGE(
5340 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, 0));
5342 CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
5344 // Otherwise, at least some kind of argument could go in these registers, the
5345 // question is whether this particular type is too big.
5346 CGF.EmitBlock(MaybeRegBlock);
5348 // Integer arguments may need to correct register alignment (for example a
5349 // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we
5350 // align __gr_offs to calculate the potential address.
5351 if (!IsFPR && !IsIndirect && TyAlign.getQuantity() > 8) {
5352 int Align = TyAlign.getQuantity();
5354 reg_offs = CGF.Builder.CreateAdd(
5355 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, Align - 1),
5357 reg_offs = CGF.Builder.CreateAnd(
5358 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, -Align),
5362 // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list.
5363 // The fact that this is done unconditionally reflects the fact that
5364 // allocating an argument to the stack also uses up all the remaining
5365 // registers of the appropriate kind.
5366 llvm::Value *NewOffset = nullptr;
5367 NewOffset = CGF.Builder.CreateAdd(
5368 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, RegSize), "new_reg_offs");
5369 CGF.Builder.CreateStore(NewOffset, reg_offs_p);
5371 // Now we're in a position to decide whether this argument really was in
5372 // registers or not.
5373 llvm::Value *InRegs = nullptr;
5374 InRegs = CGF.Builder.CreateICmpSLE(
5375 NewOffset, llvm::ConstantInt::get(CGF.Int32Ty, 0), "inreg");
5377 CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
5379 //=======================================
5380 // Argument was in registers
5381 //=======================================
5383 // Now we emit the code for if the argument was originally passed in
5384 // registers. First start the appropriate block:
5385 CGF.EmitBlock(InRegBlock);
5387 llvm::Value *reg_top = nullptr;
5388 Address reg_top_p = CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index,
5389 reg_top_offset, "reg_top_p");
5390 reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top");
5391 Address BaseAddr(CGF.Builder.CreateInBoundsGEP(reg_top, reg_offs),
5392 CharUnits::fromQuantity(IsFPR ? 16 : 8));
5393 Address RegAddr = Address::invalid();
5394 llvm::Type *MemTy = CGF.ConvertTypeForMem(Ty);
5397 // If it's been passed indirectly (actually a struct), whatever we find from
5398 // stored registers or on the stack will actually be a struct **.
5399 MemTy = llvm::PointerType::getUnqual(MemTy);
5402 const Type *Base = nullptr;
5403 uint64_t NumMembers = 0;
5404 bool IsHFA = isHomogeneousAggregate(Ty, Base, NumMembers);
5405 if (IsHFA && NumMembers > 1) {
5406 // Homogeneous aggregates passed in registers will have their elements split
5407 // and stored 16-bytes apart regardless of size (they're notionally in qN,
5408 // qN+1, ...). We reload and store into a temporary local variable
5410 assert(!IsIndirect && "Homogeneous aggregates should be passed directly");
5411 auto BaseTyInfo = getContext().getTypeInfoInChars(QualType(Base, 0));
5412 llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0));
5413 llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
5414 Address Tmp = CGF.CreateTempAlloca(HFATy,
5415 std::max(TyAlign, BaseTyInfo.second));
5417 // On big-endian platforms, the value will be right-aligned in its slot.
5419 if (CGF.CGM.getDataLayout().isBigEndian() &&
5420 BaseTyInfo.first.getQuantity() < 16)
5421 Offset = 16 - BaseTyInfo.first.getQuantity();
5423 for (unsigned i = 0; i < NumMembers; ++i) {
5424 CharUnits BaseOffset = CharUnits::fromQuantity(16 * i + Offset);
5426 CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, BaseOffset);
5427 LoadAddr = CGF.Builder.CreateElementBitCast(LoadAddr, BaseTy);
5430 CGF.Builder.CreateConstArrayGEP(Tmp, i, BaseTyInfo.first);
5432 llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr);
5433 CGF.Builder.CreateStore(Elem, StoreAddr);
5436 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, MemTy);
5438 // Otherwise the object is contiguous in memory.
5440 // It might be right-aligned in its slot.
5441 CharUnits SlotSize = BaseAddr.getAlignment();
5442 if (CGF.CGM.getDataLayout().isBigEndian() && !IsIndirect &&
5443 (IsHFA || !isAggregateTypeForABI(Ty)) &&
5444 TyInfo.first < SlotSize) {
5445 CharUnits Offset = SlotSize - TyInfo.first;
5446 BaseAddr = CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, Offset);
5449 RegAddr = CGF.Builder.CreateElementBitCast(BaseAddr, MemTy);
5452 CGF.EmitBranch(ContBlock);
5454 //=======================================
5455 // Argument was on the stack
5456 //=======================================
5457 CGF.EmitBlock(OnStackBlock);
5459 Address stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0,
5460 CharUnits::Zero(), "stack_p");
5461 llvm::Value *OnStackPtr = CGF.Builder.CreateLoad(stack_p, "stack");
5463 // Again, stack arguments may need realignment. In this case both integer and
5464 // floating-point ones might be affected.
5465 if (!IsIndirect && TyAlign.getQuantity() > 8) {
5466 int Align = TyAlign.getQuantity();
5468 OnStackPtr = CGF.Builder.CreatePtrToInt(OnStackPtr, CGF.Int64Ty);
5470 OnStackPtr = CGF.Builder.CreateAdd(
5471 OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1),
5473 OnStackPtr = CGF.Builder.CreateAnd(
5474 OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, -Align),
5477 OnStackPtr = CGF.Builder.CreateIntToPtr(OnStackPtr, CGF.Int8PtrTy);
5479 Address OnStackAddr(OnStackPtr,
5480 std::max(CharUnits::fromQuantity(8), TyAlign));
5482 // All stack slots are multiples of 8 bytes.
5483 CharUnits StackSlotSize = CharUnits::fromQuantity(8);
5484 CharUnits StackSize;
5486 StackSize = StackSlotSize;
5488 StackSize = TyInfo.first.alignTo(StackSlotSize);
5490 llvm::Value *StackSizeC = CGF.Builder.getSize(StackSize);
5491 llvm::Value *NewStack =
5492 CGF.Builder.CreateInBoundsGEP(OnStackPtr, StackSizeC, "new_stack");
5494 // Write the new value of __stack for the next call to va_arg
5495 CGF.Builder.CreateStore(NewStack, stack_p);
5497 if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) &&
5498 TyInfo.first < StackSlotSize) {
5499 CharUnits Offset = StackSlotSize - TyInfo.first;
5500 OnStackAddr = CGF.Builder.CreateConstInBoundsByteGEP(OnStackAddr, Offset);
5503 OnStackAddr = CGF.Builder.CreateElementBitCast(OnStackAddr, MemTy);
5505 CGF.EmitBranch(ContBlock);
5507 //=======================================
5509 //=======================================
5510 CGF.EmitBlock(ContBlock);
5512 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock,
5513 OnStackAddr, OnStackBlock, "vaargs.addr");
5516 return Address(CGF.Builder.CreateLoad(ResAddr, "vaarg.addr"),
5522 Address AArch64ABIInfo::EmitDarwinVAArg(Address VAListAddr, QualType Ty,
5523 CodeGenFunction &CGF) const {
5524 // The backend's lowering doesn't support va_arg for aggregates or
5525 // illegal vector types. Lower VAArg here for these cases and use
5526 // the LLVM va_arg instruction for everything else.
5527 if (!isAggregateTypeForABI(Ty) && !isIllegalVectorType(Ty))
5528 return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect());
5530 CharUnits SlotSize = CharUnits::fromQuantity(8);
5532 // Empty records are ignored for parameter passing purposes.
5533 if (isEmptyRecord(getContext(), Ty, true)) {
5534 Address Addr(CGF.Builder.CreateLoad(VAListAddr, "ap.cur"), SlotSize);
5535 Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty));
5539 // The size of the actual thing passed, which might end up just
5540 // being a pointer for indirect types.
5541 auto TyInfo = getContext().getTypeInfoInChars(Ty);
5543 // Arguments bigger than 16 bytes which aren't homogeneous
5544 // aggregates should be passed indirectly.
5545 bool IsIndirect = false;
5546 if (TyInfo.first.getQuantity() > 16) {
5547 const Type *Base = nullptr;
5548 uint64_t Members = 0;
5549 IsIndirect = !isHomogeneousAggregate(Ty, Base, Members);
5552 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
5553 TyInfo, SlotSize, /*AllowHigherAlign*/ true);
5556 Address AArch64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
5557 QualType Ty) const {
5558 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
5559 CGF.getContext().getTypeInfoInChars(Ty),
5560 CharUnits::fromQuantity(8),
5561 /*allowHigherAlign*/ false);
5564 //===----------------------------------------------------------------------===//
5565 // ARM ABI Implementation
5566 //===----------------------------------------------------------------------===//
5570 class ARMABIInfo : public SwiftABIInfo {
5583 ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind)
5584 : SwiftABIInfo(CGT), Kind(_Kind) {
5588 bool isEABI() const {
5589 switch (getTarget().getTriple().getEnvironment()) {
5590 case llvm::Triple::Android:
5591 case llvm::Triple::EABI:
5592 case llvm::Triple::EABIHF:
5593 case llvm::Triple::GNUEABI:
5594 case llvm::Triple::GNUEABIHF:
5595 case llvm::Triple::MuslEABI:
5596 case llvm::Triple::MuslEABIHF:
5603 bool isEABIHF() const {
5604 switch (getTarget().getTriple().getEnvironment()) {
5605 case llvm::Triple::EABIHF:
5606 case llvm::Triple::GNUEABIHF:
5607 case llvm::Triple::MuslEABIHF:
5614 ABIKind getABIKind() const { return Kind; }
5617 ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic) const;
5618 ABIArgInfo classifyArgumentType(QualType RetTy, bool isVariadic) const;
5619 ABIArgInfo classifyHomogeneousAggregate(QualType Ty, const Type *Base,
5620 uint64_t Members) const;
5621 ABIArgInfo coerceIllegalVector(QualType Ty) const;
5622 bool isIllegalVectorType(QualType Ty) const;
5624 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
5625 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
5626 uint64_t Members) const override;
5628 void computeInfo(CGFunctionInfo &FI) const override;
5630 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
5631 QualType Ty) const override;
5633 llvm::CallingConv::ID getLLVMDefaultCC() const;
5634 llvm::CallingConv::ID getABIDefaultCC() const;
5637 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
5638 bool asReturnValue) const override {
5639 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
5641 bool isSwiftErrorInRegister() const override {
5644 bool isLegalVectorTypeForSwift(CharUnits totalSize, llvm::Type *eltTy,
5645 unsigned elts) const override;
5648 class ARMTargetCodeGenInfo : public TargetCodeGenInfo {
5650 ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
5651 :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {}
5653 const ARMABIInfo &getABIInfo() const {
5654 return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo());
5657 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
5661 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
5662 return "mov\tr7, r7\t\t// marker for objc_retainAutoreleaseReturnValue";
5665 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
5666 llvm::Value *Address) const override {
5667 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
5669 // 0-15 are the 16 integer registers.
5670 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15);
5674 unsigned getSizeOfUnwindException() const override {
5675 if (getABIInfo().isEABI()) return 88;
5676 return TargetCodeGenInfo::getSizeOfUnwindException();
5679 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5680 CodeGen::CodeGenModule &CGM) const override {
5681 if (GV->isDeclaration())
5683 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
5687 const ARMInterruptAttr *Attr = FD->getAttr<ARMInterruptAttr>();
5692 switch (Attr->getInterrupt()) {
5693 case ARMInterruptAttr::Generic: Kind = ""; break;
5694 case ARMInterruptAttr::IRQ: Kind = "IRQ"; break;
5695 case ARMInterruptAttr::FIQ: Kind = "FIQ"; break;
5696 case ARMInterruptAttr::SWI: Kind = "SWI"; break;
5697 case ARMInterruptAttr::ABORT: Kind = "ABORT"; break;
5698 case ARMInterruptAttr::UNDEF: Kind = "UNDEF"; break;
5701 llvm::Function *Fn = cast<llvm::Function>(GV);
5703 Fn->addFnAttr("interrupt", Kind);
5705 ARMABIInfo::ABIKind ABI = cast<ARMABIInfo>(getABIInfo()).getABIKind();
5706 if (ABI == ARMABIInfo::APCS)
5709 // AAPCS guarantees that sp will be 8-byte aligned on any public interface,
5710 // however this is not necessarily true on taking any interrupt. Instruct
5711 // the backend to perform a realignment as part of the function prologue.
5712 llvm::AttrBuilder B;
5713 B.addStackAlignmentAttr(8);
5714 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
5718 class WindowsARMTargetCodeGenInfo : public ARMTargetCodeGenInfo {
5720 WindowsARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
5721 : ARMTargetCodeGenInfo(CGT, K) {}
5723 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5724 CodeGen::CodeGenModule &CGM) const override;
5726 void getDependentLibraryOption(llvm::StringRef Lib,
5727 llvm::SmallString<24> &Opt) const override {
5728 Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
5731 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value,
5732 llvm::SmallString<32> &Opt) const override {
5733 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
5737 void WindowsARMTargetCodeGenInfo::setTargetAttributes(
5738 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
5739 ARMTargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
5740 if (GV->isDeclaration())
5742 addStackProbeTargetAttributes(D, GV, CGM);
5746 void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
5747 if (!::classifyReturnType(getCXXABI(), FI, *this))
5748 FI.getReturnInfo() =
5749 classifyReturnType(FI.getReturnType(), FI.isVariadic());
5751 for (auto &I : FI.arguments())
5752 I.info = classifyArgumentType(I.type, FI.isVariadic());
5754 // Always honor user-specified calling convention.
5755 if (FI.getCallingConvention() != llvm::CallingConv::C)
5758 llvm::CallingConv::ID cc = getRuntimeCC();
5759 if (cc != llvm::CallingConv::C)
5760 FI.setEffectiveCallingConvention(cc);
5763 /// Return the default calling convention that LLVM will use.
5764 llvm::CallingConv::ID ARMABIInfo::getLLVMDefaultCC() const {
5765 // The default calling convention that LLVM will infer.
5766 if (isEABIHF() || getTarget().getTriple().isWatchABI())
5767 return llvm::CallingConv::ARM_AAPCS_VFP;
5769 return llvm::CallingConv::ARM_AAPCS;
5771 return llvm::CallingConv::ARM_APCS;
5774 /// Return the calling convention that our ABI would like us to use
5775 /// as the C calling convention.
5776 llvm::CallingConv::ID ARMABIInfo::getABIDefaultCC() const {
5777 switch (getABIKind()) {
5778 case APCS: return llvm::CallingConv::ARM_APCS;
5779 case AAPCS: return llvm::CallingConv::ARM_AAPCS;
5780 case AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
5781 case AAPCS16_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
5783 llvm_unreachable("bad ABI kind");
5786 void ARMABIInfo::setCCs() {
5787 assert(getRuntimeCC() == llvm::CallingConv::C);
5789 // Don't muddy up the IR with a ton of explicit annotations if
5790 // they'd just match what LLVM will infer from the triple.
5791 llvm::CallingConv::ID abiCC = getABIDefaultCC();
5792 if (abiCC != getLLVMDefaultCC())
5796 ABIArgInfo ARMABIInfo::coerceIllegalVector(QualType Ty) const {
5797 uint64_t Size = getContext().getTypeSize(Ty);
5799 llvm::Type *ResType =
5800 llvm::Type::getInt32Ty(getVMContext());
5801 return ABIArgInfo::getDirect(ResType);
5803 if (Size == 64 || Size == 128) {
5804 llvm::Type *ResType = llvm::VectorType::get(
5805 llvm::Type::getInt32Ty(getVMContext()), Size / 32);
5806 return ABIArgInfo::getDirect(ResType);
5808 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
5811 ABIArgInfo ARMABIInfo::classifyHomogeneousAggregate(QualType Ty,
5813 uint64_t Members) const {
5814 assert(Base && "Base class should be set for homogeneous aggregate");
5815 // Base can be a floating-point or a vector.
5816 if (const VectorType *VT = Base->getAs<VectorType>()) {
5817 // FP16 vectors should be converted to integer vectors
5818 if (!getTarget().hasLegalHalfType() &&
5819 (VT->getElementType()->isFloat16Type() ||
5820 VT->getElementType()->isHalfType())) {
5821 uint64_t Size = getContext().getTypeSize(VT);
5822 llvm::Type *NewVecTy = llvm::VectorType::get(
5823 llvm::Type::getInt32Ty(getVMContext()), Size / 32);
5824 llvm::Type *Ty = llvm::ArrayType::get(NewVecTy, Members);
5825 return ABIArgInfo::getDirect(Ty, 0, nullptr, false);
5828 return ABIArgInfo::getDirect(nullptr, 0, nullptr, false);
5831 ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty,
5832 bool isVariadic) const {
5833 // 6.1.2.1 The following argument types are VFP CPRCs:
5834 // A single-precision floating-point type (including promoted
5835 // half-precision types); A double-precision floating-point type;
5836 // A 64-bit or 128-bit containerized vector type; Homogeneous Aggregate
5837 // with a Base Type of a single- or double-precision floating-point type,
5838 // 64-bit containerized vectors or 128-bit containerized vectors with one
5839 // to four Elements.
5840 bool IsEffectivelyAAPCS_VFP = getABIKind() == AAPCS_VFP && !isVariadic;
5842 Ty = useFirstFieldIfTransparentUnion(Ty);
5844 // Handle illegal vector types here.
5845 if (isIllegalVectorType(Ty))
5846 return coerceIllegalVector(Ty);
5848 // _Float16 and __fp16 get passed as if it were an int or float, but with
5849 // the top 16 bits unspecified. This is not done for OpenCL as it handles the
5850 // half type natively, and does not need to interwork with AAPCS code.
5851 if ((Ty->isFloat16Type() || Ty->isHalfType()) &&
5852 !getContext().getLangOpts().NativeHalfArgsAndReturns) {
5853 llvm::Type *ResType = IsEffectivelyAAPCS_VFP ?
5854 llvm::Type::getFloatTy(getVMContext()) :
5855 llvm::Type::getInt32Ty(getVMContext());
5856 return ABIArgInfo::getDirect(ResType);
5859 if (!isAggregateTypeForABI(Ty)) {
5860 // Treat an enum type as its underlying type.
5861 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
5862 Ty = EnumTy->getDecl()->getIntegerType();
5865 return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty)
5866 : ABIArgInfo::getDirect());
5869 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
5870 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
5873 // Ignore empty records.
5874 if (isEmptyRecord(getContext(), Ty, true))
5875 return ABIArgInfo::getIgnore();
5877 if (IsEffectivelyAAPCS_VFP) {
5878 // Homogeneous Aggregates need to be expanded when we can fit the aggregate
5879 // into VFP registers.
5880 const Type *Base = nullptr;
5881 uint64_t Members = 0;
5882 if (isHomogeneousAggregate(Ty, Base, Members))
5883 return classifyHomogeneousAggregate(Ty, Base, Members);
5884 } else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) {
5885 // WatchOS does have homogeneous aggregates. Note that we intentionally use
5886 // this convention even for a variadic function: the backend will use GPRs
5888 const Type *Base = nullptr;
5889 uint64_t Members = 0;
5890 if (isHomogeneousAggregate(Ty, Base, Members)) {
5891 assert(Base && Members <= 4 && "unexpected homogeneous aggregate");
5893 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members);
5894 return ABIArgInfo::getDirect(Ty, 0, nullptr, false);
5898 if (getABIKind() == ARMABIInfo::AAPCS16_VFP &&
5899 getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(16)) {
5900 // WatchOS is adopting the 64-bit AAPCS rule on composite types: if they're
5901 // bigger than 128-bits, they get placed in space allocated by the caller,
5902 // and a pointer is passed.
5903 return ABIArgInfo::getIndirect(
5904 CharUnits::fromQuantity(getContext().getTypeAlign(Ty) / 8), false);
5907 // Support byval for ARM.
5908 // The ABI alignment for APCS is 4-byte and for AAPCS at least 4-byte and at
5909 // most 8-byte. We realign the indirect argument if type alignment is bigger
5910 // than ABI alignment.
5911 uint64_t ABIAlign = 4;
5913 if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
5914 getABIKind() == ARMABIInfo::AAPCS) {
5915 TyAlign = getContext().getTypeUnadjustedAlignInChars(Ty).getQuantity();
5916 ABIAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8);
5918 TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity();
5920 if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) {
5921 assert(getABIKind() != ARMABIInfo::AAPCS16_VFP && "unexpected byval");
5922 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign),
5924 /*Realign=*/TyAlign > ABIAlign);
5927 // On RenderScript, coerce Aggregates <= 64 bytes to an integer array of
5928 // same size and alignment.
5929 if (getTarget().isRenderScriptTarget()) {
5930 return coerceToIntArray(Ty, getContext(), getVMContext());
5933 // Otherwise, pass by coercing to a structure of the appropriate size.
5936 // FIXME: Try to match the types of the arguments more accurately where
5939 ElemTy = llvm::Type::getInt32Ty(getVMContext());
5940 SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32;
5942 ElemTy = llvm::Type::getInt64Ty(getVMContext());
5943 SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64;
5946 return ABIArgInfo::getDirect(llvm::ArrayType::get(ElemTy, SizeRegs));
5949 static bool isIntegerLikeType(QualType Ty, ASTContext &Context,
5950 llvm::LLVMContext &VMContext) {
5951 // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure
5952 // is called integer-like if its size is less than or equal to one word, and
5953 // the offset of each of its addressable sub-fields is zero.
5955 uint64_t Size = Context.getTypeSize(Ty);
5957 // Check that the type fits in a word.
5961 // FIXME: Handle vector types!
5962 if (Ty->isVectorType())
5965 // Float types are never treated as "integer like".
5966 if (Ty->isRealFloatingType())
5969 // If this is a builtin or pointer type then it is ok.
5970 if (Ty->getAs<BuiltinType>() || Ty->isPointerType())
5973 // Small complex integer types are "integer like".
5974 if (const ComplexType *CT = Ty->getAs<ComplexType>())
5975 return isIntegerLikeType(CT->getElementType(), Context, VMContext);
5977 // Single element and zero sized arrays should be allowed, by the definition
5978 // above, but they are not.
5980 // Otherwise, it must be a record type.
5981 const RecordType *RT = Ty->getAs<RecordType>();
5982 if (!RT) return false;
5984 // Ignore records with flexible arrays.
5985 const RecordDecl *RD = RT->getDecl();
5986 if (RD->hasFlexibleArrayMember())
5989 // Check that all sub-fields are at offset 0, and are themselves "integer
5991 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
5993 bool HadField = false;
5995 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
5996 i != e; ++i, ++idx) {
5997 const FieldDecl *FD = *i;
5999 // Bit-fields are not addressable, we only need to verify they are "integer
6000 // like". We still have to disallow a subsequent non-bitfield, for example:
6001 // struct { int : 0; int x }
6002 // is non-integer like according to gcc.
6003 if (FD->isBitField()) {
6007 if (!isIntegerLikeType(FD->getType(), Context, VMContext))
6013 // Check if this field is at offset 0.
6014 if (Layout.getFieldOffset(idx) != 0)
6017 if (!isIntegerLikeType(FD->getType(), Context, VMContext))
6020 // Only allow at most one field in a structure. This doesn't match the
6021 // wording above, but follows gcc in situations with a field following an
6023 if (!RD->isUnion()) {
6034 ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
6035 bool isVariadic) const {
6036 bool IsEffectivelyAAPCS_VFP =
6037 (getABIKind() == AAPCS_VFP || getABIKind() == AAPCS16_VFP) && !isVariadic;
6039 if (RetTy->isVoidType())
6040 return ABIArgInfo::getIgnore();
6042 if (const VectorType *VT = RetTy->getAs<VectorType>()) {
6043 // Large vector types should be returned via memory.
6044 if (getContext().getTypeSize(RetTy) > 128)
6045 return getNaturalAlignIndirect(RetTy);
6046 // FP16 vectors should be converted to integer vectors
6047 if (!getTarget().hasLegalHalfType() &&
6048 (VT->getElementType()->isFloat16Type() ||
6049 VT->getElementType()->isHalfType()))
6050 return coerceIllegalVector(RetTy);
6053 // _Float16 and __fp16 get returned as if it were an int or float, but with
6054 // the top 16 bits unspecified. This is not done for OpenCL as it handles the
6055 // half type natively, and does not need to interwork with AAPCS code.
6056 if ((RetTy->isFloat16Type() || RetTy->isHalfType()) &&
6057 !getContext().getLangOpts().NativeHalfArgsAndReturns) {
6058 llvm::Type *ResType = IsEffectivelyAAPCS_VFP ?
6059 llvm::Type::getFloatTy(getVMContext()) :
6060 llvm::Type::getInt32Ty(getVMContext());
6061 return ABIArgInfo::getDirect(ResType);
6064 if (!isAggregateTypeForABI(RetTy)) {
6065 // Treat an enum type as its underlying type.
6066 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
6067 RetTy = EnumTy->getDecl()->getIntegerType();
6069 return RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend(RetTy)
6070 : ABIArgInfo::getDirect();
6073 // Are we following APCS?
6074 if (getABIKind() == APCS) {
6075 if (isEmptyRecord(getContext(), RetTy, false))
6076 return ABIArgInfo::getIgnore();
6078 // Complex types are all returned as packed integers.
6080 // FIXME: Consider using 2 x vector types if the back end handles them
6082 if (RetTy->isAnyComplexType())
6083 return ABIArgInfo::getDirect(llvm::IntegerType::get(
6084 getVMContext(), getContext().getTypeSize(RetTy)));
6086 // Integer like structures are returned in r0.
6087 if (isIntegerLikeType(RetTy, getContext(), getVMContext())) {
6088 // Return in the smallest viable integer type.
6089 uint64_t Size = getContext().getTypeSize(RetTy);
6091 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
6093 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
6094 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
6097 // Otherwise return in memory.
6098 return getNaturalAlignIndirect(RetTy);
6101 // Otherwise this is an AAPCS variant.
6103 if (isEmptyRecord(getContext(), RetTy, true))
6104 return ABIArgInfo::getIgnore();
6106 // Check for homogeneous aggregates with AAPCS-VFP.
6107 if (IsEffectivelyAAPCS_VFP) {
6108 const Type *Base = nullptr;
6109 uint64_t Members = 0;
6110 if (isHomogeneousAggregate(RetTy, Base, Members))
6111 return classifyHomogeneousAggregate(RetTy, Base, Members);
6114 // Aggregates <= 4 bytes are returned in r0; other aggregates
6115 // are returned indirectly.
6116 uint64_t Size = getContext().getTypeSize(RetTy);
6118 // On RenderScript, coerce Aggregates <= 4 bytes to an integer array of
6119 // same size and alignment.
6120 if (getTarget().isRenderScriptTarget()) {
6121 return coerceToIntArray(RetTy, getContext(), getVMContext());
6123 if (getDataLayout().isBigEndian())
6124 // Return in 32 bit integer integer type (as if loaded by LDR, AAPCS 5.4)
6125 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
6127 // Return in the smallest viable integer type.
6129 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
6131 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
6132 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
6133 } else if (Size <= 128 && getABIKind() == AAPCS16_VFP) {
6134 llvm::Type *Int32Ty = llvm::Type::getInt32Ty(getVMContext());
6135 llvm::Type *CoerceTy =
6136 llvm::ArrayType::get(Int32Ty, llvm::alignTo(Size, 32) / 32);
6137 return ABIArgInfo::getDirect(CoerceTy);
6140 return getNaturalAlignIndirect(RetTy);
6143 /// isIllegalVector - check whether Ty is an illegal vector type.
6144 bool ARMABIInfo::isIllegalVectorType(QualType Ty) const {
6145 if (const VectorType *VT = Ty->getAs<VectorType> ()) {
6146 // On targets that don't support FP16, FP16 is expanded into float, and we
6147 // don't want the ABI to depend on whether or not FP16 is supported in
6148 // hardware. Thus return false to coerce FP16 vectors into integer vectors.
6149 if (!getTarget().hasLegalHalfType() &&
6150 (VT->getElementType()->isFloat16Type() ||
6151 VT->getElementType()->isHalfType()))
6154 // Android shipped using Clang 3.1, which supported a slightly different
6155 // vector ABI. The primary differences were that 3-element vector types
6156 // were legal, and so were sub 32-bit vectors (i.e. <2 x i8>). This path
6157 // accepts that legacy behavior for Android only.
6158 // Check whether VT is legal.
6159 unsigned NumElements = VT->getNumElements();
6160 // NumElements should be power of 2 or equal to 3.
6161 if (!llvm::isPowerOf2_32(NumElements) && NumElements != 3)
6164 // Check whether VT is legal.
6165 unsigned NumElements = VT->getNumElements();
6166 uint64_t Size = getContext().getTypeSize(VT);
6167 // NumElements should be power of 2.
6168 if (!llvm::isPowerOf2_32(NumElements))
6170 // Size should be greater than 32 bits.
6177 bool ARMABIInfo::isLegalVectorTypeForSwift(CharUnits vectorSize,
6179 unsigned numElts) const {
6180 if (!llvm::isPowerOf2_32(numElts))
6182 unsigned size = getDataLayout().getTypeStoreSizeInBits(eltTy);
6185 if (vectorSize.getQuantity() != 8 &&
6186 (vectorSize.getQuantity() != 16 || numElts == 1))
6191 bool ARMABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
6192 // Homogeneous aggregates for AAPCS-VFP must have base types of float,
6193 // double, or 64-bit or 128-bit vectors.
6194 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
6195 if (BT->getKind() == BuiltinType::Float ||
6196 BT->getKind() == BuiltinType::Double ||
6197 BT->getKind() == BuiltinType::LongDouble)
6199 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
6200 unsigned VecSize = getContext().getTypeSize(VT);
6201 if (VecSize == 64 || VecSize == 128)
6207 bool ARMABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
6208 uint64_t Members) const {
6209 return Members <= 4;
6212 Address ARMABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6213 QualType Ty) const {
6214 CharUnits SlotSize = CharUnits::fromQuantity(4);
6216 // Empty records are ignored for parameter passing purposes.
6217 if (isEmptyRecord(getContext(), Ty, true)) {
6218 Address Addr(CGF.Builder.CreateLoad(VAListAddr), SlotSize);
6219 Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty));
6223 auto TyInfo = getContext().getTypeInfoInChars(Ty);
6224 CharUnits TyAlignForABI = TyInfo.second;
6226 // Use indirect if size of the illegal vector is bigger than 16 bytes.
6227 bool IsIndirect = false;
6228 const Type *Base = nullptr;
6229 uint64_t Members = 0;
6230 if (TyInfo.first > CharUnits::fromQuantity(16) && isIllegalVectorType(Ty)) {
6233 // ARMv7k passes structs bigger than 16 bytes indirectly, in space
6234 // allocated by the caller.
6235 } else if (TyInfo.first > CharUnits::fromQuantity(16) &&
6236 getABIKind() == ARMABIInfo::AAPCS16_VFP &&
6237 !isHomogeneousAggregate(Ty, Base, Members)) {
6240 // Otherwise, bound the type's ABI alignment.
6241 // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for
6242 // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte.
6243 // Our callers should be prepared to handle an under-aligned address.
6244 } else if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
6245 getABIKind() == ARMABIInfo::AAPCS) {
6246 TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4));
6247 TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(8));
6248 } else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) {
6249 // ARMv7k allows type alignment up to 16 bytes.
6250 TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4));
6251 TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(16));
6253 TyAlignForABI = CharUnits::fromQuantity(4);
6255 TyInfo.second = TyAlignForABI;
6257 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TyInfo,
6258 SlotSize, /*AllowHigherAlign*/ true);
6261 //===----------------------------------------------------------------------===//
6262 // NVPTX ABI Implementation
6263 //===----------------------------------------------------------------------===//
6267 class NVPTXABIInfo : public ABIInfo {
6269 NVPTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
6271 ABIArgInfo classifyReturnType(QualType RetTy) const;
6272 ABIArgInfo classifyArgumentType(QualType Ty) const;
6274 void computeInfo(CGFunctionInfo &FI) const override;
6275 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6276 QualType Ty) const override;
6279 class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo {
6281 NVPTXTargetCodeGenInfo(CodeGenTypes &CGT)
6282 : TargetCodeGenInfo(new NVPTXABIInfo(CGT)) {}
6284 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
6285 CodeGen::CodeGenModule &M) const override;
6286 bool shouldEmitStaticExternCAliases() const override;
6289 // Adds a NamedMDNode with F, Name, and Operand as operands, and adds the
6290 // resulting MDNode to the nvvm.annotations MDNode.
6291 static void addNVVMMetadata(llvm::Function *F, StringRef Name, int Operand);
6294 ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const {
6295 if (RetTy->isVoidType())
6296 return ABIArgInfo::getIgnore();
6298 // note: this is different from default ABI
6299 if (!RetTy->isScalarType())
6300 return ABIArgInfo::getDirect();
6302 // Treat an enum type as its underlying type.
6303 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
6304 RetTy = EnumTy->getDecl()->getIntegerType();
6306 return (RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend(RetTy)
6307 : ABIArgInfo::getDirect());
6310 ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const {
6311 // Treat an enum type as its underlying type.
6312 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
6313 Ty = EnumTy->getDecl()->getIntegerType();
6315 // Return aggregates type as indirect by value
6316 if (isAggregateTypeForABI(Ty))
6317 return getNaturalAlignIndirect(Ty, /* byval */ true);
6319 return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty)
6320 : ABIArgInfo::getDirect());
6323 void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
6324 if (!getCXXABI().classifyReturnType(FI))
6325 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
6326 for (auto &I : FI.arguments())
6327 I.info = classifyArgumentType(I.type);
6329 // Always honor user-specified calling convention.
6330 if (FI.getCallingConvention() != llvm::CallingConv::C)
6333 FI.setEffectiveCallingConvention(getRuntimeCC());
6336 Address NVPTXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6337 QualType Ty) const {
6338 llvm_unreachable("NVPTX does not support varargs");
6341 void NVPTXTargetCodeGenInfo::setTargetAttributes(
6342 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
6343 if (GV->isDeclaration())
6345 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
6348 llvm::Function *F = cast<llvm::Function>(GV);
6350 // Perform special handling in OpenCL mode
6351 if (M.getLangOpts().OpenCL) {
6352 // Use OpenCL function attributes to check for kernel functions
6353 // By default, all functions are device functions
6354 if (FD->hasAttr<OpenCLKernelAttr>()) {
6355 // OpenCL __kernel functions get kernel metadata
6356 // Create !{<func-ref>, metadata !"kernel", i32 1} node
6357 addNVVMMetadata(F, "kernel", 1);
6358 // And kernel functions are not subject to inlining
6359 F->addFnAttr(llvm::Attribute::NoInline);
6363 // Perform special handling in CUDA mode.
6364 if (M.getLangOpts().CUDA) {
6365 // CUDA __global__ functions get a kernel metadata entry. Since
6366 // __global__ functions cannot be called from the device, we do not
6367 // need to set the noinline attribute.
6368 if (FD->hasAttr<CUDAGlobalAttr>()) {
6369 // Create !{<func-ref>, metadata !"kernel", i32 1} node
6370 addNVVMMetadata(F, "kernel", 1);
6372 if (CUDALaunchBoundsAttr *Attr = FD->getAttr<CUDALaunchBoundsAttr>()) {
6373 // Create !{<func-ref>, metadata !"maxntidx", i32 <val>} node
6374 llvm::APSInt MaxThreads(32);
6375 MaxThreads = Attr->getMaxThreads()->EvaluateKnownConstInt(M.getContext());
6377 addNVVMMetadata(F, "maxntidx", MaxThreads.getExtValue());
6379 // min blocks is an optional argument for CUDALaunchBoundsAttr. If it was
6380 // not specified in __launch_bounds__ or if the user specified a 0 value,
6381 // we don't have to add a PTX directive.
6382 if (Attr->getMinBlocks()) {
6383 llvm::APSInt MinBlocks(32);
6384 MinBlocks = Attr->getMinBlocks()->EvaluateKnownConstInt(M.getContext());
6386 // Create !{<func-ref>, metadata !"minctasm", i32 <val>} node
6387 addNVVMMetadata(F, "minctasm", MinBlocks.getExtValue());
6393 void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::Function *F, StringRef Name,
6395 llvm::Module *M = F->getParent();
6396 llvm::LLVMContext &Ctx = M->getContext();
6398 // Get "nvvm.annotations" metadata node
6399 llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations");
6401 llvm::Metadata *MDVals[] = {
6402 llvm::ConstantAsMetadata::get(F), llvm::MDString::get(Ctx, Name),
6403 llvm::ConstantAsMetadata::get(
6404 llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), Operand))};
6405 // Append metadata to nvvm.annotations
6406 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
6409 bool NVPTXTargetCodeGenInfo::shouldEmitStaticExternCAliases() const {
6414 //===----------------------------------------------------------------------===//
6415 // SystemZ ABI Implementation
6416 //===----------------------------------------------------------------------===//
6420 class SystemZABIInfo : public SwiftABIInfo {
6424 SystemZABIInfo(CodeGenTypes &CGT, bool HV)
6425 : SwiftABIInfo(CGT), HasVector(HV) {}
6427 bool isPromotableIntegerType(QualType Ty) const;
6428 bool isCompoundType(QualType Ty) const;
6429 bool isVectorArgumentType(QualType Ty) const;
6430 bool isFPArgumentType(QualType Ty) const;
6431 QualType GetSingleElementType(QualType Ty) const;
6433 ABIArgInfo classifyReturnType(QualType RetTy) const;
6434 ABIArgInfo classifyArgumentType(QualType ArgTy) const;
6436 void computeInfo(CGFunctionInfo &FI) const override {
6437 if (!getCXXABI().classifyReturnType(FI))
6438 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
6439 for (auto &I : FI.arguments())
6440 I.info = classifyArgumentType(I.type);
6443 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6444 QualType Ty) const override;
6446 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
6447 bool asReturnValue) const override {
6448 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
6450 bool isSwiftErrorInRegister() const override {
6455 class SystemZTargetCodeGenInfo : public TargetCodeGenInfo {
6457 SystemZTargetCodeGenInfo(CodeGenTypes &CGT, bool HasVector)
6458 : TargetCodeGenInfo(new SystemZABIInfo(CGT, HasVector)) {}
6463 bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const {
6464 // Treat an enum type as its underlying type.
6465 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
6466 Ty = EnumTy->getDecl()->getIntegerType();
6468 // Promotable integer types are required to be promoted by the ABI.
6469 if (Ty->isPromotableIntegerType())
6472 // 32-bit values must also be promoted.
6473 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
6474 switch (BT->getKind()) {
6475 case BuiltinType::Int:
6476 case BuiltinType::UInt:
6484 bool SystemZABIInfo::isCompoundType(QualType Ty) const {
6485 return (Ty->isAnyComplexType() ||
6486 Ty->isVectorType() ||
6487 isAggregateTypeForABI(Ty));
6490 bool SystemZABIInfo::isVectorArgumentType(QualType Ty) const {
6491 return (HasVector &&
6492 Ty->isVectorType() &&
6493 getContext().getTypeSize(Ty) <= 128);
6496 bool SystemZABIInfo::isFPArgumentType(QualType Ty) const {
6497 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
6498 switch (BT->getKind()) {
6499 case BuiltinType::Float:
6500 case BuiltinType::Double:
6509 QualType SystemZABIInfo::GetSingleElementType(QualType Ty) const {
6510 if (const RecordType *RT = Ty->getAsStructureType()) {
6511 const RecordDecl *RD = RT->getDecl();
6514 // If this is a C++ record, check the bases first.
6515 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
6516 for (const auto &I : CXXRD->bases()) {
6517 QualType Base = I.getType();
6519 // Empty bases don't affect things either way.
6520 if (isEmptyRecord(getContext(), Base, true))
6523 if (!Found.isNull())
6525 Found = GetSingleElementType(Base);
6528 // Check the fields.
6529 for (const auto *FD : RD->fields()) {
6530 // For compatibility with GCC, ignore empty bitfields in C++ mode.
6531 // Unlike isSingleElementStruct(), empty structure and array fields
6532 // do count. So do anonymous bitfields that aren't zero-sized.
6533 if (getContext().getLangOpts().CPlusPlus &&
6534 FD->isZeroLengthBitField(getContext()))
6537 // Unlike isSingleElementStruct(), arrays do not count.
6538 // Nested structures still do though.
6539 if (!Found.isNull())
6541 Found = GetSingleElementType(FD->getType());
6544 // Unlike isSingleElementStruct(), trailing padding is allowed.
6545 // An 8-byte aligned struct s { float f; } is passed as a double.
6546 if (!Found.isNull())
6553 Address SystemZABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6554 QualType Ty) const {
6555 // Assume that va_list type is correct; should be pointer to LLVM type:
6559 // i8 *__overflow_arg_area;
6560 // i8 *__reg_save_area;
6563 // Every non-vector argument occupies 8 bytes and is passed by preference
6564 // in either GPRs or FPRs. Vector arguments occupy 8 or 16 bytes and are
6565 // always passed on the stack.
6566 Ty = getContext().getCanonicalType(Ty);
6567 auto TyInfo = getContext().getTypeInfoInChars(Ty);
6568 llvm::Type *ArgTy = CGF.ConvertTypeForMem(Ty);
6569 llvm::Type *DirectTy = ArgTy;
6570 ABIArgInfo AI = classifyArgumentType(Ty);
6571 bool IsIndirect = AI.isIndirect();
6572 bool InFPRs = false;
6573 bool IsVector = false;
6574 CharUnits UnpaddedSize;
6575 CharUnits DirectAlign;
6577 DirectTy = llvm::PointerType::getUnqual(DirectTy);
6578 UnpaddedSize = DirectAlign = CharUnits::fromQuantity(8);
6580 if (AI.getCoerceToType())
6581 ArgTy = AI.getCoerceToType();
6582 InFPRs = ArgTy->isFloatTy() || ArgTy->isDoubleTy();
6583 IsVector = ArgTy->isVectorTy();
6584 UnpaddedSize = TyInfo.first;
6585 DirectAlign = TyInfo.second;
6587 CharUnits PaddedSize = CharUnits::fromQuantity(8);
6588 if (IsVector && UnpaddedSize > PaddedSize)
6589 PaddedSize = CharUnits::fromQuantity(16);
6590 assert((UnpaddedSize <= PaddedSize) && "Invalid argument size.");
6592 CharUnits Padding = (PaddedSize - UnpaddedSize);
6594 llvm::Type *IndexTy = CGF.Int64Ty;
6595 llvm::Value *PaddedSizeV =
6596 llvm::ConstantInt::get(IndexTy, PaddedSize.getQuantity());
6599 // Work out the address of a vector argument on the stack.
6600 // Vector arguments are always passed in the high bits of a
6601 // single (8 byte) or double (16 byte) stack slot.
6602 Address OverflowArgAreaPtr =
6603 CGF.Builder.CreateStructGEP(VAListAddr, 2, CharUnits::fromQuantity(16),
6604 "overflow_arg_area_ptr");
6605 Address OverflowArgArea =
6606 Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"),
6609 CGF.Builder.CreateElementBitCast(OverflowArgArea, DirectTy, "mem_addr");
6611 // Update overflow_arg_area_ptr pointer
6612 llvm::Value *NewOverflowArgArea =
6613 CGF.Builder.CreateGEP(OverflowArgArea.getPointer(), PaddedSizeV,
6614 "overflow_arg_area");
6615 CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
6620 assert(PaddedSize.getQuantity() == 8);
6622 unsigned MaxRegs, RegCountField, RegSaveIndex;
6623 CharUnits RegPadding;
6625 MaxRegs = 4; // Maximum of 4 FPR arguments
6626 RegCountField = 1; // __fpr
6627 RegSaveIndex = 16; // save offset for f0
6628 RegPadding = CharUnits(); // floats are passed in the high bits of an FPR
6630 MaxRegs = 5; // Maximum of 5 GPR arguments
6631 RegCountField = 0; // __gpr
6632 RegSaveIndex = 2; // save offset for r2
6633 RegPadding = Padding; // values are passed in the low bits of a GPR
6636 Address RegCountPtr = CGF.Builder.CreateStructGEP(
6637 VAListAddr, RegCountField, RegCountField * CharUnits::fromQuantity(8),
6639 llvm::Value *RegCount = CGF.Builder.CreateLoad(RegCountPtr, "reg_count");
6640 llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs);
6641 llvm::Value *InRegs = CGF.Builder.CreateICmpULT(RegCount, MaxRegsV,
6644 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
6645 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
6646 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
6647 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
6649 // Emit code to load the value if it was passed in registers.
6650 CGF.EmitBlock(InRegBlock);
6652 // Work out the address of an argument register.
6653 llvm::Value *ScaledRegCount =
6654 CGF.Builder.CreateMul(RegCount, PaddedSizeV, "scaled_reg_count");
6655 llvm::Value *RegBase =
6656 llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize.getQuantity()
6657 + RegPadding.getQuantity());
6658 llvm::Value *RegOffset =
6659 CGF.Builder.CreateAdd(ScaledRegCount, RegBase, "reg_offset");
6660 Address RegSaveAreaPtr =
6661 CGF.Builder.CreateStructGEP(VAListAddr, 3, CharUnits::fromQuantity(24),
6662 "reg_save_area_ptr");
6663 llvm::Value *RegSaveArea =
6664 CGF.Builder.CreateLoad(RegSaveAreaPtr, "reg_save_area");
6665 Address RawRegAddr(CGF.Builder.CreateGEP(RegSaveArea, RegOffset,
6669 CGF.Builder.CreateElementBitCast(RawRegAddr, DirectTy, "reg_addr");
6671 // Update the register count
6672 llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1);
6673 llvm::Value *NewRegCount =
6674 CGF.Builder.CreateAdd(RegCount, One, "reg_count");
6675 CGF.Builder.CreateStore(NewRegCount, RegCountPtr);
6676 CGF.EmitBranch(ContBlock);
6678 // Emit code to load the value if it was passed in memory.
6679 CGF.EmitBlock(InMemBlock);
6681 // Work out the address of a stack argument.
6682 Address OverflowArgAreaPtr = CGF.Builder.CreateStructGEP(
6683 VAListAddr, 2, CharUnits::fromQuantity(16), "overflow_arg_area_ptr");
6684 Address OverflowArgArea =
6685 Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"),
6687 Address RawMemAddr =
6688 CGF.Builder.CreateConstByteGEP(OverflowArgArea, Padding, "raw_mem_addr");
6690 CGF.Builder.CreateElementBitCast(RawMemAddr, DirectTy, "mem_addr");
6692 // Update overflow_arg_area_ptr pointer
6693 llvm::Value *NewOverflowArgArea =
6694 CGF.Builder.CreateGEP(OverflowArgArea.getPointer(), PaddedSizeV,
6695 "overflow_arg_area");
6696 CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
6697 CGF.EmitBranch(ContBlock);
6699 // Return the appropriate result.
6700 CGF.EmitBlock(ContBlock);
6701 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock,
6702 MemAddr, InMemBlock, "va_arg.addr");
6705 ResAddr = Address(CGF.Builder.CreateLoad(ResAddr, "indirect_arg"),
6711 ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const {
6712 if (RetTy->isVoidType())
6713 return ABIArgInfo::getIgnore();
6714 if (isVectorArgumentType(RetTy))
6715 return ABIArgInfo::getDirect();
6716 if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64)
6717 return getNaturalAlignIndirect(RetTy);
6718 return (isPromotableIntegerType(RetTy) ? ABIArgInfo::getExtend(RetTy)
6719 : ABIArgInfo::getDirect());
6722 ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
6723 // Handle the generic C++ ABI.
6724 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
6725 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
6727 // Integers and enums are extended to full register width.
6728 if (isPromotableIntegerType(Ty))
6729 return ABIArgInfo::getExtend(Ty);
6731 // Handle vector types and vector-like structure types. Note that
6732 // as opposed to float-like structure types, we do not allow any
6733 // padding for vector-like structures, so verify the sizes match.
6734 uint64_t Size = getContext().getTypeSize(Ty);
6735 QualType SingleElementTy = GetSingleElementType(Ty);
6736 if (isVectorArgumentType(SingleElementTy) &&
6737 getContext().getTypeSize(SingleElementTy) == Size)
6738 return ABIArgInfo::getDirect(CGT.ConvertType(SingleElementTy));
6740 // Values that are not 1, 2, 4 or 8 bytes in size are passed indirectly.
6741 if (Size != 8 && Size != 16 && Size != 32 && Size != 64)
6742 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
6744 // Handle small structures.
6745 if (const RecordType *RT = Ty->getAs<RecordType>()) {
6746 // Structures with flexible arrays have variable length, so really
6747 // fail the size test above.
6748 const RecordDecl *RD = RT->getDecl();
6749 if (RD->hasFlexibleArrayMember())
6750 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
6752 // The structure is passed as an unextended integer, a float, or a double.
6754 if (isFPArgumentType(SingleElementTy)) {
6755 assert(Size == 32 || Size == 64);
6757 PassTy = llvm::Type::getFloatTy(getVMContext());
6759 PassTy = llvm::Type::getDoubleTy(getVMContext());
6761 PassTy = llvm::IntegerType::get(getVMContext(), Size);
6762 return ABIArgInfo::getDirect(PassTy);
6765 // Non-structure compounds are passed indirectly.
6766 if (isCompoundType(Ty))
6767 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
6769 return ABIArgInfo::getDirect(nullptr);
6772 //===----------------------------------------------------------------------===//
6773 // MSP430 ABI Implementation
6774 //===----------------------------------------------------------------------===//
6778 class MSP430TargetCodeGenInfo : public TargetCodeGenInfo {
6780 MSP430TargetCodeGenInfo(CodeGenTypes &CGT)
6781 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
6782 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
6783 CodeGen::CodeGenModule &M) const override;
6788 void MSP430TargetCodeGenInfo::setTargetAttributes(
6789 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
6790 if (GV->isDeclaration())
6792 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
6793 const auto *InterruptAttr = FD->getAttr<MSP430InterruptAttr>();
6797 // Handle 'interrupt' attribute:
6798 llvm::Function *F = cast<llvm::Function>(GV);
6800 // Step 1: Set ISR calling convention.
6801 F->setCallingConv(llvm::CallingConv::MSP430_INTR);
6803 // Step 2: Add attributes goodness.
6804 F->addFnAttr(llvm::Attribute::NoInline);
6805 F->addFnAttr("interrupt", llvm::utostr(InterruptAttr->getNumber()));
6809 //===----------------------------------------------------------------------===//
6810 // MIPS ABI Implementation. This works for both little-endian and
6811 // big-endian variants.
6812 //===----------------------------------------------------------------------===//
6815 class MipsABIInfo : public ABIInfo {
6817 unsigned MinABIStackAlignInBytes, StackAlignInBytes;
6818 void CoerceToIntArgs(uint64_t TySize,
6819 SmallVectorImpl<llvm::Type *> &ArgList) const;
6820 llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const;
6821 llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const;
6822 llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const;
6824 MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) :
6825 ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8),
6826 StackAlignInBytes(IsO32 ? 8 : 16) {}
6828 ABIArgInfo classifyReturnType(QualType RetTy) const;
6829 ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const;
6830 void computeInfo(CGFunctionInfo &FI) const override;
6831 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6832 QualType Ty) const override;
6833 ABIArgInfo extendType(QualType Ty) const;
6836 class MIPSTargetCodeGenInfo : public TargetCodeGenInfo {
6837 unsigned SizeOfUnwindException;
6839 MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32)
6840 : TargetCodeGenInfo(new MipsABIInfo(CGT, IsO32)),
6841 SizeOfUnwindException(IsO32 ? 24 : 32) {}
6843 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
6847 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
6848 CodeGen::CodeGenModule &CGM) const override {
6849 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
6851 llvm::Function *Fn = cast<llvm::Function>(GV);
6853 if (FD->hasAttr<MipsLongCallAttr>())
6854 Fn->addFnAttr("long-call");
6855 else if (FD->hasAttr<MipsShortCallAttr>())
6856 Fn->addFnAttr("short-call");
6858 // Other attributes do not have a meaning for declarations.
6859 if (GV->isDeclaration())
6862 if (FD->hasAttr<Mips16Attr>()) {
6863 Fn->addFnAttr("mips16");
6865 else if (FD->hasAttr<NoMips16Attr>()) {
6866 Fn->addFnAttr("nomips16");
6869 if (FD->hasAttr<MicroMipsAttr>())
6870 Fn->addFnAttr("micromips");
6871 else if (FD->hasAttr<NoMicroMipsAttr>())
6872 Fn->addFnAttr("nomicromips");
6874 const MipsInterruptAttr *Attr = FD->getAttr<MipsInterruptAttr>();
6879 switch (Attr->getInterrupt()) {
6880 case MipsInterruptAttr::eic: Kind = "eic"; break;
6881 case MipsInterruptAttr::sw0: Kind = "sw0"; break;
6882 case MipsInterruptAttr::sw1: Kind = "sw1"; break;
6883 case MipsInterruptAttr::hw0: Kind = "hw0"; break;
6884 case MipsInterruptAttr::hw1: Kind = "hw1"; break;
6885 case MipsInterruptAttr::hw2: Kind = "hw2"; break;
6886 case MipsInterruptAttr::hw3: Kind = "hw3"; break;
6887 case MipsInterruptAttr::hw4: Kind = "hw4"; break;
6888 case MipsInterruptAttr::hw5: Kind = "hw5"; break;
6891 Fn->addFnAttr("interrupt", Kind);
6895 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
6896 llvm::Value *Address) const override;
6898 unsigned getSizeOfUnwindException() const override {
6899 return SizeOfUnwindException;
6904 void MipsABIInfo::CoerceToIntArgs(
6905 uint64_t TySize, SmallVectorImpl<llvm::Type *> &ArgList) const {
6906 llvm::IntegerType *IntTy =
6907 llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8);
6909 // Add (TySize / MinABIStackAlignInBytes) args of IntTy.
6910 for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N)
6911 ArgList.push_back(IntTy);
6913 // If necessary, add one more integer type to ArgList.
6914 unsigned R = TySize % (MinABIStackAlignInBytes * 8);
6917 ArgList.push_back(llvm::IntegerType::get(getVMContext(), R));
6920 // In N32/64, an aligned double precision floating point field is passed in
6922 llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const {
6923 SmallVector<llvm::Type*, 8> ArgList, IntArgList;
6926 CoerceToIntArgs(TySize, ArgList);
6927 return llvm::StructType::get(getVMContext(), ArgList);
6930 if (Ty->isComplexType())
6931 return CGT.ConvertType(Ty);
6933 const RecordType *RT = Ty->getAs<RecordType>();
6935 // Unions/vectors are passed in integer registers.
6936 if (!RT || !RT->isStructureOrClassType()) {
6937 CoerceToIntArgs(TySize, ArgList);
6938 return llvm::StructType::get(getVMContext(), ArgList);
6941 const RecordDecl *RD = RT->getDecl();
6942 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
6943 assert(!(TySize % 8) && "Size of structure must be multiple of 8.");
6945 uint64_t LastOffset = 0;
6947 llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64);
6949 // Iterate over fields in the struct/class and check if there are any aligned
6951 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
6952 i != e; ++i, ++idx) {
6953 const QualType Ty = i->getType();
6954 const BuiltinType *BT = Ty->getAs<BuiltinType>();
6956 if (!BT || BT->getKind() != BuiltinType::Double)
6959 uint64_t Offset = Layout.getFieldOffset(idx);
6960 if (Offset % 64) // Ignore doubles that are not aligned.
6963 // Add ((Offset - LastOffset) / 64) args of type i64.
6964 for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j)
6965 ArgList.push_back(I64);
6968 ArgList.push_back(llvm::Type::getDoubleTy(getVMContext()));
6969 LastOffset = Offset + 64;
6972 CoerceToIntArgs(TySize - LastOffset, IntArgList);
6973 ArgList.append(IntArgList.begin(), IntArgList.end());
6975 return llvm::StructType::get(getVMContext(), ArgList);
6978 llvm::Type *MipsABIInfo::getPaddingType(uint64_t OrigOffset,
6979 uint64_t Offset) const {
6980 if (OrigOffset + MinABIStackAlignInBytes > Offset)
6983 return llvm::IntegerType::get(getVMContext(), (Offset - OrigOffset) * 8);
6987 MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const {
6988 Ty = useFirstFieldIfTransparentUnion(Ty);
6990 uint64_t OrigOffset = Offset;
6991 uint64_t TySize = getContext().getTypeSize(Ty);
6992 uint64_t Align = getContext().getTypeAlign(Ty) / 8;
6994 Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes),
6995 (uint64_t)StackAlignInBytes);
6996 unsigned CurrOffset = llvm::alignTo(Offset, Align);
6997 Offset = CurrOffset + llvm::alignTo(TySize, Align * 8) / 8;
6999 if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) {
7000 // Ignore empty aggregates.
7002 return ABIArgInfo::getIgnore();
7004 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
7005 Offset = OrigOffset + MinABIStackAlignInBytes;
7006 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
7009 // If we have reached here, aggregates are passed directly by coercing to
7010 // another structure type. Padding is inserted if the offset of the
7011 // aggregate is unaligned.
7012 ABIArgInfo ArgInfo =
7013 ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0,
7014 getPaddingType(OrigOffset, CurrOffset));
7015 ArgInfo.setInReg(true);
7019 // Treat an enum type as its underlying type.
7020 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
7021 Ty = EnumTy->getDecl()->getIntegerType();
7023 // All integral types are promoted to the GPR width.
7024 if (Ty->isIntegralOrEnumerationType())
7025 return extendType(Ty);
7027 return ABIArgInfo::getDirect(
7028 nullptr, 0, IsO32 ? nullptr : getPaddingType(OrigOffset, CurrOffset));
7032 MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const {
7033 const RecordType *RT = RetTy->getAs<RecordType>();
7034 SmallVector<llvm::Type*, 8> RTList;
7036 if (RT && RT->isStructureOrClassType()) {
7037 const RecordDecl *RD = RT->getDecl();
7038 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
7039 unsigned FieldCnt = Layout.getFieldCount();
7041 // N32/64 returns struct/classes in floating point registers if the
7042 // following conditions are met:
7043 // 1. The size of the struct/class is no larger than 128-bit.
7044 // 2. The struct/class has one or two fields all of which are floating
7046 // 3. The offset of the first field is zero (this follows what gcc does).
7048 // Any other composite results are returned in integer registers.
7050 if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) {
7051 RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end();
7052 for (; b != e; ++b) {
7053 const BuiltinType *BT = b->getType()->getAs<BuiltinType>();
7055 if (!BT || !BT->isFloatingPoint())
7058 RTList.push_back(CGT.ConvertType(b->getType()));
7062 return llvm::StructType::get(getVMContext(), RTList,
7063 RD->hasAttr<PackedAttr>());
7069 CoerceToIntArgs(Size, RTList);
7070 return llvm::StructType::get(getVMContext(), RTList);
7073 ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const {
7074 uint64_t Size = getContext().getTypeSize(RetTy);
7076 if (RetTy->isVoidType())
7077 return ABIArgInfo::getIgnore();
7079 // O32 doesn't treat zero-sized structs differently from other structs.
7080 // However, N32/N64 ignores zero sized return values.
7081 if (!IsO32 && Size == 0)
7082 return ABIArgInfo::getIgnore();
7084 if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) {
7086 if (RetTy->isAnyComplexType())
7087 return ABIArgInfo::getDirect();
7089 // O32 returns integer vectors in registers and N32/N64 returns all small
7090 // aggregates in registers.
7092 (RetTy->isVectorType() && !RetTy->hasFloatingRepresentation())) {
7093 ABIArgInfo ArgInfo =
7094 ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size));
7095 ArgInfo.setInReg(true);
7100 return getNaturalAlignIndirect(RetTy);
7103 // Treat an enum type as its underlying type.
7104 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
7105 RetTy = EnumTy->getDecl()->getIntegerType();
7107 if (RetTy->isPromotableIntegerType())
7108 return ABIArgInfo::getExtend(RetTy);
7110 if ((RetTy->isUnsignedIntegerOrEnumerationType() ||
7111 RetTy->isSignedIntegerOrEnumerationType()) && Size == 32 && !IsO32)
7112 return ABIArgInfo::getSignExtend(RetTy);
7114 return ABIArgInfo::getDirect();
7117 void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const {
7118 ABIArgInfo &RetInfo = FI.getReturnInfo();
7119 if (!getCXXABI().classifyReturnType(FI))
7120 RetInfo = classifyReturnType(FI.getReturnType());
7122 // Check if a pointer to an aggregate is passed as a hidden argument.
7123 uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0;
7125 for (auto &I : FI.arguments())
7126 I.info = classifyArgumentType(I.type, Offset);
7129 Address MipsABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7130 QualType OrigTy) const {
7131 QualType Ty = OrigTy;
7133 // Integer arguments are promoted to 32-bit on O32 and 64-bit on N32/N64.
7134 // Pointers are also promoted in the same way but this only matters for N32.
7135 unsigned SlotSizeInBits = IsO32 ? 32 : 64;
7136 unsigned PtrWidth = getTarget().getPointerWidth(0);
7137 bool DidPromote = false;
7138 if ((Ty->isIntegerType() &&
7139 getContext().getIntWidth(Ty) < SlotSizeInBits) ||
7140 (Ty->isPointerType() && PtrWidth < SlotSizeInBits)) {
7142 Ty = getContext().getIntTypeForBitwidth(SlotSizeInBits,
7143 Ty->isSignedIntegerType());
7146 auto TyInfo = getContext().getTypeInfoInChars(Ty);
7148 // The alignment of things in the argument area is never larger than
7149 // StackAlignInBytes.
7151 std::min(TyInfo.second, CharUnits::fromQuantity(StackAlignInBytes));
7153 // MinABIStackAlignInBytes is the size of argument slots on the stack.
7154 CharUnits ArgSlotSize = CharUnits::fromQuantity(MinABIStackAlignInBytes);
7156 Address Addr = emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
7157 TyInfo, ArgSlotSize, /*AllowHigherAlign*/ true);
7160 // If there was a promotion, "unpromote" into a temporary.
7161 // TODO: can we just use a pointer into a subset of the original slot?
7163 Address Temp = CGF.CreateMemTemp(OrigTy, "vaarg.promotion-temp");
7164 llvm::Value *Promoted = CGF.Builder.CreateLoad(Addr);
7166 // Truncate down to the right width.
7167 llvm::Type *IntTy = (OrigTy->isIntegerType() ? Temp.getElementType()
7169 llvm::Value *V = CGF.Builder.CreateTrunc(Promoted, IntTy);
7170 if (OrigTy->isPointerType())
7171 V = CGF.Builder.CreateIntToPtr(V, Temp.getElementType());
7173 CGF.Builder.CreateStore(V, Temp);
7180 ABIArgInfo MipsABIInfo::extendType(QualType Ty) const {
7181 int TySize = getContext().getTypeSize(Ty);
7183 // MIPS64 ABI requires unsigned 32 bit integers to be sign extended.
7184 if (Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32)
7185 return ABIArgInfo::getSignExtend(Ty);
7187 return ABIArgInfo::getExtend(Ty);
7191 MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
7192 llvm::Value *Address) const {
7193 // This information comes from gcc's implementation, which seems to
7194 // as canonical as it gets.
7196 // Everything on MIPS is 4 bytes. Double-precision FP registers
7197 // are aliased to pairs of single-precision FP registers.
7198 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
7200 // 0-31 are the general purpose registers, $0 - $31.
7201 // 32-63 are the floating-point registers, $f0 - $f31.
7202 // 64 and 65 are the multiply/divide registers, $hi and $lo.
7203 // 66 is the (notional, I think) register for signal-handler return.
7204 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65);
7206 // 67-74 are the floating-point status registers, $fcc0 - $fcc7.
7207 // They are one bit wide and ignored here.
7209 // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31.
7210 // (coprocessor 1 is the FP unit)
7211 // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31.
7212 // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31.
7213 // 176-181 are the DSP accumulator registers.
7214 AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181);
7218 //===----------------------------------------------------------------------===//
7219 // AVR ABI Implementation.
7220 //===----------------------------------------------------------------------===//
7223 class AVRTargetCodeGenInfo : public TargetCodeGenInfo {
7225 AVRTargetCodeGenInfo(CodeGenTypes &CGT)
7226 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) { }
7228 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
7229 CodeGen::CodeGenModule &CGM) const override {
7230 if (GV->isDeclaration())
7232 const auto *FD = dyn_cast_or_null<FunctionDecl>(D);
7234 auto *Fn = cast<llvm::Function>(GV);
7236 if (FD->getAttr<AVRInterruptAttr>())
7237 Fn->addFnAttr("interrupt");
7239 if (FD->getAttr<AVRSignalAttr>())
7240 Fn->addFnAttr("signal");
7245 //===----------------------------------------------------------------------===//
7246 // TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults.
7247 // Currently subclassed only to implement custom OpenCL C function attribute
7249 //===----------------------------------------------------------------------===//
7253 class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo {
7255 TCETargetCodeGenInfo(CodeGenTypes &CGT)
7256 : DefaultTargetCodeGenInfo(CGT) {}
7258 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
7259 CodeGen::CodeGenModule &M) const override;
7262 void TCETargetCodeGenInfo::setTargetAttributes(
7263 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
7264 if (GV->isDeclaration())
7266 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
7269 llvm::Function *F = cast<llvm::Function>(GV);
7271 if (M.getLangOpts().OpenCL) {
7272 if (FD->hasAttr<OpenCLKernelAttr>()) {
7273 // OpenCL C Kernel functions are not subject to inlining
7274 F->addFnAttr(llvm::Attribute::NoInline);
7275 const ReqdWorkGroupSizeAttr *Attr = FD->getAttr<ReqdWorkGroupSizeAttr>();
7277 // Convert the reqd_work_group_size() attributes to metadata.
7278 llvm::LLVMContext &Context = F->getContext();
7279 llvm::NamedMDNode *OpenCLMetadata =
7280 M.getModule().getOrInsertNamedMetadata(
7281 "opencl.kernel_wg_size_info");
7283 SmallVector<llvm::Metadata *, 5> Operands;
7284 Operands.push_back(llvm::ConstantAsMetadata::get(F));
7287 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
7288 M.Int32Ty, llvm::APInt(32, Attr->getXDim()))));
7290 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
7291 M.Int32Ty, llvm::APInt(32, Attr->getYDim()))));
7293 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
7294 M.Int32Ty, llvm::APInt(32, Attr->getZDim()))));
7296 // Add a boolean constant operand for "required" (true) or "hint"
7297 // (false) for implementing the work_group_size_hint attr later.
7298 // Currently always true as the hint is not yet implemented.
7300 llvm::ConstantAsMetadata::get(llvm::ConstantInt::getTrue(Context)));
7301 OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands));
7309 //===----------------------------------------------------------------------===//
7310 // Hexagon ABI Implementation
7311 //===----------------------------------------------------------------------===//
7315 class HexagonABIInfo : public ABIInfo {
7319 HexagonABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
7323 ABIArgInfo classifyReturnType(QualType RetTy) const;
7324 ABIArgInfo classifyArgumentType(QualType RetTy) const;
7326 void computeInfo(CGFunctionInfo &FI) const override;
7328 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7329 QualType Ty) const override;
7332 class HexagonTargetCodeGenInfo : public TargetCodeGenInfo {
7334 HexagonTargetCodeGenInfo(CodeGenTypes &CGT)
7335 :TargetCodeGenInfo(new HexagonABIInfo(CGT)) {}
7337 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
7344 void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const {
7345 if (!getCXXABI().classifyReturnType(FI))
7346 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
7347 for (auto &I : FI.arguments())
7348 I.info = classifyArgumentType(I.type);
7351 ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const {
7352 if (!isAggregateTypeForABI(Ty)) {
7353 // Treat an enum type as its underlying type.
7354 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
7355 Ty = EnumTy->getDecl()->getIntegerType();
7357 return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty)
7358 : ABIArgInfo::getDirect());
7361 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
7362 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
7364 // Ignore empty records.
7365 if (isEmptyRecord(getContext(), Ty, true))
7366 return ABIArgInfo::getIgnore();
7368 uint64_t Size = getContext().getTypeSize(Ty);
7370 return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
7371 // Pass in the smallest viable integer type.
7373 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
7375 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
7377 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
7379 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
7382 ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const {
7383 if (RetTy->isVoidType())
7384 return ABIArgInfo::getIgnore();
7386 // Large vector types should be returned via memory.
7387 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 64)
7388 return getNaturalAlignIndirect(RetTy);
7390 if (!isAggregateTypeForABI(RetTy)) {
7391 // Treat an enum type as its underlying type.
7392 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
7393 RetTy = EnumTy->getDecl()->getIntegerType();
7395 return (RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend(RetTy)
7396 : ABIArgInfo::getDirect());
7399 if (isEmptyRecord(getContext(), RetTy, true))
7400 return ABIArgInfo::getIgnore();
7402 // Aggregates <= 8 bytes are returned in r0; other aggregates
7403 // are returned indirectly.
7404 uint64_t Size = getContext().getTypeSize(RetTy);
7406 // Return in the smallest viable integer type.
7408 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
7410 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
7412 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
7413 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
7416 return getNaturalAlignIndirect(RetTy, /*ByVal=*/true);
7419 Address HexagonABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7420 QualType Ty) const {
7421 // FIXME: Someone needs to audit that this handle alignment correctly.
7422 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
7423 getContext().getTypeInfoInChars(Ty),
7424 CharUnits::fromQuantity(4),
7425 /*AllowHigherAlign*/ true);
7428 //===----------------------------------------------------------------------===//
7429 // Lanai ABI Implementation
7430 //===----------------------------------------------------------------------===//
7433 class LanaiABIInfo : public DefaultABIInfo {
7435 LanaiABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
7437 bool shouldUseInReg(QualType Ty, CCState &State) const;
7439 void computeInfo(CGFunctionInfo &FI) const override {
7440 CCState State(FI.getCallingConvention());
7441 // Lanai uses 4 registers to pass arguments unless the function has the
7442 // regparm attribute set.
7443 if (FI.getHasRegParm()) {
7444 State.FreeRegs = FI.getRegParm();
7449 if (!getCXXABI().classifyReturnType(FI))
7450 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
7451 for (auto &I : FI.arguments())
7452 I.info = classifyArgumentType(I.type, State);
7455 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const;
7456 ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const;
7458 } // end anonymous namespace
7460 bool LanaiABIInfo::shouldUseInReg(QualType Ty, CCState &State) const {
7461 unsigned Size = getContext().getTypeSize(Ty);
7462 unsigned SizeInRegs = llvm::alignTo(Size, 32U) / 32U;
7464 if (SizeInRegs == 0)
7467 if (SizeInRegs > State.FreeRegs) {
7472 State.FreeRegs -= SizeInRegs;
7477 ABIArgInfo LanaiABIInfo::getIndirectResult(QualType Ty, bool ByVal,
7478 CCState &State) const {
7480 if (State.FreeRegs) {
7481 --State.FreeRegs; // Non-byval indirects just use one pointer.
7482 return getNaturalAlignIndirectInReg(Ty);
7484 return getNaturalAlignIndirect(Ty, false);
7487 // Compute the byval alignment.
7488 const unsigned MinABIStackAlignInBytes = 4;
7489 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
7490 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true,
7491 /*Realign=*/TypeAlign >
7492 MinABIStackAlignInBytes);
7495 ABIArgInfo LanaiABIInfo::classifyArgumentType(QualType Ty,
7496 CCState &State) const {
7497 // Check with the C++ ABI first.
7498 const RecordType *RT = Ty->getAs<RecordType>();
7500 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
7501 if (RAA == CGCXXABI::RAA_Indirect) {
7502 return getIndirectResult(Ty, /*ByVal=*/false, State);
7503 } else if (RAA == CGCXXABI::RAA_DirectInMemory) {
7504 return getNaturalAlignIndirect(Ty, /*ByRef=*/true);
7508 if (isAggregateTypeForABI(Ty)) {
7509 // Structures with flexible arrays are always indirect.
7510 if (RT && RT->getDecl()->hasFlexibleArrayMember())
7511 return getIndirectResult(Ty, /*ByVal=*/true, State);
7513 // Ignore empty structs/unions.
7514 if (isEmptyRecord(getContext(), Ty, true))
7515 return ABIArgInfo::getIgnore();
7517 llvm::LLVMContext &LLVMContext = getVMContext();
7518 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
7519 if (SizeInRegs <= State.FreeRegs) {
7520 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
7521 SmallVector<llvm::Type *, 3> Elements(SizeInRegs, Int32);
7522 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
7523 State.FreeRegs -= SizeInRegs;
7524 return ABIArgInfo::getDirectInReg(Result);
7528 return getIndirectResult(Ty, true, State);
7531 // Treat an enum type as its underlying type.
7532 if (const auto *EnumTy = Ty->getAs<EnumType>())
7533 Ty = EnumTy->getDecl()->getIntegerType();
7535 bool InReg = shouldUseInReg(Ty, State);
7536 if (Ty->isPromotableIntegerType()) {
7538 return ABIArgInfo::getDirectInReg();
7539 return ABIArgInfo::getExtend(Ty);
7542 return ABIArgInfo::getDirectInReg();
7543 return ABIArgInfo::getDirect();
7547 class LanaiTargetCodeGenInfo : public TargetCodeGenInfo {
7549 LanaiTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
7550 : TargetCodeGenInfo(new LanaiABIInfo(CGT)) {}
7554 //===----------------------------------------------------------------------===//
7555 // AMDGPU ABI Implementation
7556 //===----------------------------------------------------------------------===//
7560 class AMDGPUABIInfo final : public DefaultABIInfo {
7562 static const unsigned MaxNumRegsForArgsRet = 16;
7564 unsigned numRegsForType(QualType Ty) const;
7566 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
7567 bool isHomogeneousAggregateSmallEnough(const Type *Base,
7568 uint64_t Members) const override;
7571 explicit AMDGPUABIInfo(CodeGen::CodeGenTypes &CGT) :
7572 DefaultABIInfo(CGT) {}
7574 ABIArgInfo classifyReturnType(QualType RetTy) const;
7575 ABIArgInfo classifyKernelArgumentType(QualType Ty) const;
7576 ABIArgInfo classifyArgumentType(QualType Ty, unsigned &NumRegsLeft) const;
7578 void computeInfo(CGFunctionInfo &FI) const override;
7581 bool AMDGPUABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
7585 bool AMDGPUABIInfo::isHomogeneousAggregateSmallEnough(
7586 const Type *Base, uint64_t Members) const {
7587 uint32_t NumRegs = (getContext().getTypeSize(Base) + 31) / 32;
7589 // Homogeneous Aggregates may occupy at most 16 registers.
7590 return Members * NumRegs <= MaxNumRegsForArgsRet;
7593 /// Estimate number of registers the type will use when passed in registers.
7594 unsigned AMDGPUABIInfo::numRegsForType(QualType Ty) const {
7595 unsigned NumRegs = 0;
7597 if (const VectorType *VT = Ty->getAs<VectorType>()) {
7598 // Compute from the number of elements. The reported size is based on the
7599 // in-memory size, which includes the padding 4th element for 3-vectors.
7600 QualType EltTy = VT->getElementType();
7601 unsigned EltSize = getContext().getTypeSize(EltTy);
7603 // 16-bit element vectors should be passed as packed.
7605 return (VT->getNumElements() + 1) / 2;
7607 unsigned EltNumRegs = (EltSize + 31) / 32;
7608 return EltNumRegs * VT->getNumElements();
7611 if (const RecordType *RT = Ty->getAs<RecordType>()) {
7612 const RecordDecl *RD = RT->getDecl();
7613 assert(!RD->hasFlexibleArrayMember());
7615 for (const FieldDecl *Field : RD->fields()) {
7616 QualType FieldTy = Field->getType();
7617 NumRegs += numRegsForType(FieldTy);
7623 return (getContext().getTypeSize(Ty) + 31) / 32;
7626 void AMDGPUABIInfo::computeInfo(CGFunctionInfo &FI) const {
7627 llvm::CallingConv::ID CC = FI.getCallingConvention();
7629 if (!getCXXABI().classifyReturnType(FI))
7630 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
7632 unsigned NumRegsLeft = MaxNumRegsForArgsRet;
7633 for (auto &Arg : FI.arguments()) {
7634 if (CC == llvm::CallingConv::AMDGPU_KERNEL) {
7635 Arg.info = classifyKernelArgumentType(Arg.type);
7637 Arg.info = classifyArgumentType(Arg.type, NumRegsLeft);
7642 ABIArgInfo AMDGPUABIInfo::classifyReturnType(QualType RetTy) const {
7643 if (isAggregateTypeForABI(RetTy)) {
7644 // Records with non-trivial destructors/copy-constructors should not be
7645 // returned by value.
7646 if (!getRecordArgABI(RetTy, getCXXABI())) {
7647 // Ignore empty structs/unions.
7648 if (isEmptyRecord(getContext(), RetTy, true))
7649 return ABIArgInfo::getIgnore();
7651 // Lower single-element structs to just return a regular value.
7652 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
7653 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
7655 if (const RecordType *RT = RetTy->getAs<RecordType>()) {
7656 const RecordDecl *RD = RT->getDecl();
7657 if (RD->hasFlexibleArrayMember())
7658 return DefaultABIInfo::classifyReturnType(RetTy);
7661 // Pack aggregates <= 4 bytes into single VGPR or pair.
7662 uint64_t Size = getContext().getTypeSize(RetTy);
7664 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
7667 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
7670 llvm::Type *I32Ty = llvm::Type::getInt32Ty(getVMContext());
7671 return ABIArgInfo::getDirect(llvm::ArrayType::get(I32Ty, 2));
7674 if (numRegsForType(RetTy) <= MaxNumRegsForArgsRet)
7675 return ABIArgInfo::getDirect();
7679 // Otherwise just do the default thing.
7680 return DefaultABIInfo::classifyReturnType(RetTy);
7683 /// For kernels all parameters are really passed in a special buffer. It doesn't
7684 /// make sense to pass anything byval, so everything must be direct.
7685 ABIArgInfo AMDGPUABIInfo::classifyKernelArgumentType(QualType Ty) const {
7686 Ty = useFirstFieldIfTransparentUnion(Ty);
7688 // TODO: Can we omit empty structs?
7690 // Coerce single element structs to its element.
7691 if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
7692 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
7694 // If we set CanBeFlattened to true, CodeGen will expand the struct to its
7695 // individual elements, which confuses the Clover OpenCL backend; therefore we
7696 // have to set it to false here. Other args of getDirect() are just defaults.
7697 return ABIArgInfo::getDirect(nullptr, 0, nullptr, false);
7700 ABIArgInfo AMDGPUABIInfo::classifyArgumentType(QualType Ty,
7701 unsigned &NumRegsLeft) const {
7702 assert(NumRegsLeft <= MaxNumRegsForArgsRet && "register estimate underflow");
7704 Ty = useFirstFieldIfTransparentUnion(Ty);
7706 if (isAggregateTypeForABI(Ty)) {
7707 // Records with non-trivial destructors/copy-constructors should not be
7709 if (auto RAA = getRecordArgABI(Ty, getCXXABI()))
7710 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
7712 // Ignore empty structs/unions.
7713 if (isEmptyRecord(getContext(), Ty, true))
7714 return ABIArgInfo::getIgnore();
7716 // Lower single-element structs to just pass a regular value. TODO: We
7717 // could do reasonable-size multiple-element structs too, using getExpand(),
7718 // though watch out for things like bitfields.
7719 if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
7720 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
7722 if (const RecordType *RT = Ty->getAs<RecordType>()) {
7723 const RecordDecl *RD = RT->getDecl();
7724 if (RD->hasFlexibleArrayMember())
7725 return DefaultABIInfo::classifyArgumentType(Ty);
7728 // Pack aggregates <= 8 bytes into single VGPR or pair.
7729 uint64_t Size = getContext().getTypeSize(Ty);
7731 unsigned NumRegs = (Size + 31) / 32;
7732 NumRegsLeft -= std::min(NumRegsLeft, NumRegs);
7735 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
7738 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
7740 // XXX: Should this be i64 instead, and should the limit increase?
7741 llvm::Type *I32Ty = llvm::Type::getInt32Ty(getVMContext());
7742 return ABIArgInfo::getDirect(llvm::ArrayType::get(I32Ty, 2));
7745 if (NumRegsLeft > 0) {
7746 unsigned NumRegs = numRegsForType(Ty);
7747 if (NumRegsLeft >= NumRegs) {
7748 NumRegsLeft -= NumRegs;
7749 return ABIArgInfo::getDirect();
7754 // Otherwise just do the default thing.
7755 ABIArgInfo ArgInfo = DefaultABIInfo::classifyArgumentType(Ty);
7756 if (!ArgInfo.isIndirect()) {
7757 unsigned NumRegs = numRegsForType(Ty);
7758 NumRegsLeft -= std::min(NumRegs, NumRegsLeft);
7764 class AMDGPUTargetCodeGenInfo : public TargetCodeGenInfo {
7766 AMDGPUTargetCodeGenInfo(CodeGenTypes &CGT)
7767 : TargetCodeGenInfo(new AMDGPUABIInfo(CGT)) {}
7768 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
7769 CodeGen::CodeGenModule &M) const override;
7770 unsigned getOpenCLKernelCallingConv() const override;
7772 llvm::Constant *getNullPointer(const CodeGen::CodeGenModule &CGM,
7773 llvm::PointerType *T, QualType QT) const override;
7775 LangAS getASTAllocaAddressSpace() const override {
7776 return getLangASFromTargetAS(
7777 getABIInfo().getDataLayout().getAllocaAddrSpace());
7779 LangAS getGlobalVarAddressSpace(CodeGenModule &CGM,
7780 const VarDecl *D) const override;
7781 llvm::SyncScope::ID getLLVMSyncScopeID(SyncScope S,
7782 llvm::LLVMContext &C) const override;
7784 createEnqueuedBlockKernel(CodeGenFunction &CGF,
7785 llvm::Function *BlockInvokeFunc,
7786 llvm::Value *BlockLiteral) const override;
7787 bool shouldEmitStaticExternCAliases() const override;
7788 void setCUDAKernelCallingConvention(const FunctionType *&FT) const override;
7792 void AMDGPUTargetCodeGenInfo::setTargetAttributes(
7793 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
7794 if (GV->isDeclaration())
7796 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
7800 llvm::Function *F = cast<llvm::Function>(GV);
7802 const auto *ReqdWGS = M.getLangOpts().OpenCL ?
7803 FD->getAttr<ReqdWorkGroupSizeAttr>() : nullptr;
7805 if (M.getLangOpts().OpenCL && FD->hasAttr<OpenCLKernelAttr>() &&
7806 (M.getTriple().getOS() == llvm::Triple::AMDHSA))
7807 F->addFnAttr("amdgpu-implicitarg-num-bytes", "48");
7809 const auto *FlatWGS = FD->getAttr<AMDGPUFlatWorkGroupSizeAttr>();
7810 if (ReqdWGS || FlatWGS) {
7811 unsigned Min = FlatWGS ? FlatWGS->getMin() : 0;
7812 unsigned Max = FlatWGS ? FlatWGS->getMax() : 0;
7813 if (ReqdWGS && Min == 0 && Max == 0)
7814 Min = Max = ReqdWGS->getXDim() * ReqdWGS->getYDim() * ReqdWGS->getZDim();
7817 assert(Min <= Max && "Min must be less than or equal Max");
7819 std::string AttrVal = llvm::utostr(Min) + "," + llvm::utostr(Max);
7820 F->addFnAttr("amdgpu-flat-work-group-size", AttrVal);
7822 assert(Max == 0 && "Max must be zero");
7825 if (const auto *Attr = FD->getAttr<AMDGPUWavesPerEUAttr>()) {
7826 unsigned Min = Attr->getMin();
7827 unsigned Max = Attr->getMax();
7830 assert((Max == 0 || Min <= Max) && "Min must be less than or equal Max");
7832 std::string AttrVal = llvm::utostr(Min);
7834 AttrVal = AttrVal + "," + llvm::utostr(Max);
7835 F->addFnAttr("amdgpu-waves-per-eu", AttrVal);
7837 assert(Max == 0 && "Max must be zero");
7840 if (const auto *Attr = FD->getAttr<AMDGPUNumSGPRAttr>()) {
7841 unsigned NumSGPR = Attr->getNumSGPR();
7844 F->addFnAttr("amdgpu-num-sgpr", llvm::utostr(NumSGPR));
7847 if (const auto *Attr = FD->getAttr<AMDGPUNumVGPRAttr>()) {
7848 uint32_t NumVGPR = Attr->getNumVGPR();
7851 F->addFnAttr("amdgpu-num-vgpr", llvm::utostr(NumVGPR));
7855 unsigned AMDGPUTargetCodeGenInfo::getOpenCLKernelCallingConv() const {
7856 return llvm::CallingConv::AMDGPU_KERNEL;
7859 // Currently LLVM assumes null pointers always have value 0,
7860 // which results in incorrectly transformed IR. Therefore, instead of
7861 // emitting null pointers in private and local address spaces, a null
7862 // pointer in generic address space is emitted which is casted to a
7863 // pointer in local or private address space.
7864 llvm::Constant *AMDGPUTargetCodeGenInfo::getNullPointer(
7865 const CodeGen::CodeGenModule &CGM, llvm::PointerType *PT,
7866 QualType QT) const {
7867 if (CGM.getContext().getTargetNullPointerValue(QT) == 0)
7868 return llvm::ConstantPointerNull::get(PT);
7870 auto &Ctx = CGM.getContext();
7871 auto NPT = llvm::PointerType::get(PT->getElementType(),
7872 Ctx.getTargetAddressSpace(LangAS::opencl_generic));
7873 return llvm::ConstantExpr::getAddrSpaceCast(
7874 llvm::ConstantPointerNull::get(NPT), PT);
7878 AMDGPUTargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM,
7879 const VarDecl *D) const {
7880 assert(!CGM.getLangOpts().OpenCL &&
7881 !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) &&
7882 "Address space agnostic languages only");
7883 LangAS DefaultGlobalAS = getLangASFromTargetAS(
7884 CGM.getContext().getTargetAddressSpace(LangAS::opencl_global));
7886 return DefaultGlobalAS;
7888 LangAS AddrSpace = D->getType().getAddressSpace();
7889 assert(AddrSpace == LangAS::Default || isTargetAddressSpace(AddrSpace));
7890 if (AddrSpace != LangAS::Default)
7893 if (CGM.isTypeConstant(D->getType(), false)) {
7894 if (auto ConstAS = CGM.getTarget().getConstantAddressSpace())
7895 return ConstAS.getValue();
7897 return DefaultGlobalAS;
7901 AMDGPUTargetCodeGenInfo::getLLVMSyncScopeID(SyncScope S,
7902 llvm::LLVMContext &C) const {
7905 case SyncScope::OpenCLWorkGroup:
7908 case SyncScope::OpenCLDevice:
7911 case SyncScope::OpenCLAllSVMDevices:
7914 case SyncScope::OpenCLSubGroup:
7917 return C.getOrInsertSyncScopeID(Name);
7920 bool AMDGPUTargetCodeGenInfo::shouldEmitStaticExternCAliases() const {
7924 void AMDGPUTargetCodeGenInfo::setCUDAKernelCallingConvention(
7925 const FunctionType *&FT) const {
7926 FT = getABIInfo().getContext().adjustFunctionType(
7927 FT, FT->getExtInfo().withCallingConv(CC_OpenCLKernel));
7930 //===----------------------------------------------------------------------===//
7931 // SPARC v8 ABI Implementation.
7932 // Based on the SPARC Compliance Definition version 2.4.1.
7934 // Ensures that complex values are passed in registers.
7937 class SparcV8ABIInfo : public DefaultABIInfo {
7939 SparcV8ABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
7942 ABIArgInfo classifyReturnType(QualType RetTy) const;
7943 void computeInfo(CGFunctionInfo &FI) const override;
7945 } // end anonymous namespace
7949 SparcV8ABIInfo::classifyReturnType(QualType Ty) const {
7950 if (Ty->isAnyComplexType()) {
7951 return ABIArgInfo::getDirect();
7954 return DefaultABIInfo::classifyReturnType(Ty);
7958 void SparcV8ABIInfo::computeInfo(CGFunctionInfo &FI) const {
7960 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
7961 for (auto &Arg : FI.arguments())
7962 Arg.info = classifyArgumentType(Arg.type);
7966 class SparcV8TargetCodeGenInfo : public TargetCodeGenInfo {
7968 SparcV8TargetCodeGenInfo(CodeGenTypes &CGT)
7969 : TargetCodeGenInfo(new SparcV8ABIInfo(CGT)) {}
7971 } // end anonymous namespace
7973 //===----------------------------------------------------------------------===//
7974 // SPARC v9 ABI Implementation.
7975 // Based on the SPARC Compliance Definition version 2.4.1.
7977 // Function arguments a mapped to a nominal "parameter array" and promoted to
7978 // registers depending on their type. Each argument occupies 8 or 16 bytes in
7979 // the array, structs larger than 16 bytes are passed indirectly.
7981 // One case requires special care:
7988 // When a struct mixed is passed by value, it only occupies 8 bytes in the
7989 // parameter array, but the int is passed in an integer register, and the float
7990 // is passed in a floating point register. This is represented as two arguments
7991 // with the LLVM IR inreg attribute:
7993 // declare void f(i32 inreg %i, float inreg %f)
7995 // The code generator will only allocate 4 bytes from the parameter array for
7996 // the inreg arguments. All other arguments are allocated a multiple of 8
8000 class SparcV9ABIInfo : public ABIInfo {
8002 SparcV9ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
8005 ABIArgInfo classifyType(QualType RetTy, unsigned SizeLimit) const;
8006 void computeInfo(CGFunctionInfo &FI) const override;
8007 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
8008 QualType Ty) const override;
8010 // Coercion type builder for structs passed in registers. The coercion type
8011 // serves two purposes:
8013 // 1. Pad structs to a multiple of 64 bits, so they are passed 'left-aligned'
8015 // 2. Expose aligned floating point elements as first-level elements, so the
8016 // code generator knows to pass them in floating point registers.
8018 // We also compute the InReg flag which indicates that the struct contains
8019 // aligned 32-bit floats.
8021 struct CoerceBuilder {
8022 llvm::LLVMContext &Context;
8023 const llvm::DataLayout &DL;
8024 SmallVector<llvm::Type*, 8> Elems;
8028 CoerceBuilder(llvm::LLVMContext &c, const llvm::DataLayout &dl)
8029 : Context(c), DL(dl), Size(0), InReg(false) {}
8031 // Pad Elems with integers until Size is ToSize.
8032 void pad(uint64_t ToSize) {
8033 assert(ToSize >= Size && "Cannot remove elements");
8037 // Finish the current 64-bit word.
8038 uint64_t Aligned = llvm::alignTo(Size, 64);
8039 if (Aligned > Size && Aligned <= ToSize) {
8040 Elems.push_back(llvm::IntegerType::get(Context, Aligned - Size));
8044 // Add whole 64-bit words.
8045 while (Size + 64 <= ToSize) {
8046 Elems.push_back(llvm::Type::getInt64Ty(Context));
8050 // Final in-word padding.
8051 if (Size < ToSize) {
8052 Elems.push_back(llvm::IntegerType::get(Context, ToSize - Size));
8057 // Add a floating point element at Offset.
8058 void addFloat(uint64_t Offset, llvm::Type *Ty, unsigned Bits) {
8059 // Unaligned floats are treated as integers.
8062 // The InReg flag is only required if there are any floats < 64 bits.
8066 Elems.push_back(Ty);
8067 Size = Offset + Bits;
8070 // Add a struct type to the coercion type, starting at Offset (in bits).
8071 void addStruct(uint64_t Offset, llvm::StructType *StrTy) {
8072 const llvm::StructLayout *Layout = DL.getStructLayout(StrTy);
8073 for (unsigned i = 0, e = StrTy->getNumElements(); i != e; ++i) {
8074 llvm::Type *ElemTy = StrTy->getElementType(i);
8075 uint64_t ElemOffset = Offset + Layout->getElementOffsetInBits(i);
8076 switch (ElemTy->getTypeID()) {
8077 case llvm::Type::StructTyID:
8078 addStruct(ElemOffset, cast<llvm::StructType>(ElemTy));
8080 case llvm::Type::FloatTyID:
8081 addFloat(ElemOffset, ElemTy, 32);
8083 case llvm::Type::DoubleTyID:
8084 addFloat(ElemOffset, ElemTy, 64);
8086 case llvm::Type::FP128TyID:
8087 addFloat(ElemOffset, ElemTy, 128);
8089 case llvm::Type::PointerTyID:
8090 if (ElemOffset % 64 == 0) {
8092 Elems.push_back(ElemTy);
8102 // Check if Ty is a usable substitute for the coercion type.
8103 bool isUsableType(llvm::StructType *Ty) const {
8104 return llvm::makeArrayRef(Elems) == Ty->elements();
8107 // Get the coercion type as a literal struct type.
8108 llvm::Type *getType() const {
8109 if (Elems.size() == 1)
8110 return Elems.front();
8112 return llvm::StructType::get(Context, Elems);
8116 } // end anonymous namespace
8119 SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const {
8120 if (Ty->isVoidType())
8121 return ABIArgInfo::getIgnore();
8123 uint64_t Size = getContext().getTypeSize(Ty);
8125 // Anything too big to fit in registers is passed with an explicit indirect
8126 // pointer / sret pointer.
8127 if (Size > SizeLimit)
8128 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
8130 // Treat an enum type as its underlying type.
8131 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
8132 Ty = EnumTy->getDecl()->getIntegerType();
8134 // Integer types smaller than a register are extended.
8135 if (Size < 64 && Ty->isIntegerType())
8136 return ABIArgInfo::getExtend(Ty);
8138 // Other non-aggregates go in registers.
8139 if (!isAggregateTypeForABI(Ty))
8140 return ABIArgInfo::getDirect();
8142 // If a C++ object has either a non-trivial copy constructor or a non-trivial
8143 // destructor, it is passed with an explicit indirect pointer / sret pointer.
8144 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
8145 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
8147 // This is a small aggregate type that should be passed in registers.
8148 // Build a coercion type from the LLVM struct type.
8149 llvm::StructType *StrTy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty));
8151 return ABIArgInfo::getDirect();
8153 CoerceBuilder CB(getVMContext(), getDataLayout());
8154 CB.addStruct(0, StrTy);
8155 CB.pad(llvm::alignTo(CB.DL.getTypeSizeInBits(StrTy), 64));
8157 // Try to use the original type for coercion.
8158 llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType();
8161 return ABIArgInfo::getDirectInReg(CoerceTy);
8163 return ABIArgInfo::getDirect(CoerceTy);
8166 Address SparcV9ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
8167 QualType Ty) const {
8168 ABIArgInfo AI = classifyType(Ty, 16 * 8);
8169 llvm::Type *ArgTy = CGT.ConvertType(Ty);
8170 if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
8171 AI.setCoerceToType(ArgTy);
8173 CharUnits SlotSize = CharUnits::fromQuantity(8);
8175 CGBuilderTy &Builder = CGF.Builder;
8176 Address Addr(Builder.CreateLoad(VAListAddr, "ap.cur"), SlotSize);
8177 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
8179 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
8181 Address ArgAddr = Address::invalid();
8183 switch (AI.getKind()) {
8184 case ABIArgInfo::Expand:
8185 case ABIArgInfo::CoerceAndExpand:
8186 case ABIArgInfo::InAlloca:
8187 llvm_unreachable("Unsupported ABI kind for va_arg");
8189 case ABIArgInfo::Extend: {
8191 CharUnits Offset = SlotSize - TypeInfo.first;
8192 ArgAddr = Builder.CreateConstInBoundsByteGEP(Addr, Offset, "extend");
8196 case ABIArgInfo::Direct: {
8197 auto AllocSize = getDataLayout().getTypeAllocSize(AI.getCoerceToType());
8198 Stride = CharUnits::fromQuantity(AllocSize).alignTo(SlotSize);
8203 case ABIArgInfo::Indirect:
8205 ArgAddr = Builder.CreateElementBitCast(Addr, ArgPtrTy, "indirect");
8206 ArgAddr = Address(Builder.CreateLoad(ArgAddr, "indirect.arg"),
8210 case ABIArgInfo::Ignore:
8211 return Address(llvm::UndefValue::get(ArgPtrTy), TypeInfo.second);
8215 llvm::Value *NextPtr =
8216 Builder.CreateConstInBoundsByteGEP(Addr.getPointer(), Stride, "ap.next");
8217 Builder.CreateStore(NextPtr, VAListAddr);
8219 return Builder.CreateBitCast(ArgAddr, ArgPtrTy, "arg.addr");
8222 void SparcV9ABIInfo::computeInfo(CGFunctionInfo &FI) const {
8223 FI.getReturnInfo() = classifyType(FI.getReturnType(), 32 * 8);
8224 for (auto &I : FI.arguments())
8225 I.info = classifyType(I.type, 16 * 8);
8229 class SparcV9TargetCodeGenInfo : public TargetCodeGenInfo {
8231 SparcV9TargetCodeGenInfo(CodeGenTypes &CGT)
8232 : TargetCodeGenInfo(new SparcV9ABIInfo(CGT)) {}
8234 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
8238 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
8239 llvm::Value *Address) const override;
8241 } // end anonymous namespace
8244 SparcV9TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
8245 llvm::Value *Address) const {
8246 // This is calculated from the LLVM and GCC tables and verified
8247 // against gcc output. AFAIK all ABIs use the same encoding.
8249 CodeGen::CGBuilderTy &Builder = CGF.Builder;
8251 llvm::IntegerType *i8 = CGF.Int8Ty;
8252 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
8253 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
8255 // 0-31: the 8-byte general-purpose registers
8256 AssignToArrayRange(Builder, Address, Eight8, 0, 31);
8258 // 32-63: f0-31, the 4-byte floating-point registers
8259 AssignToArrayRange(Builder, Address, Four8, 32, 63);
8269 AssignToArrayRange(Builder, Address, Eight8, 64, 71);
8271 // 72-87: d0-15, the 8-byte floating-point registers
8272 AssignToArrayRange(Builder, Address, Eight8, 72, 87);
8277 // ARC ABI implementation.
8280 class ARCABIInfo : public DefaultABIInfo {
8282 using DefaultABIInfo::DefaultABIInfo;
8285 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
8286 QualType Ty) const override;
8288 void updateState(const ABIArgInfo &Info, QualType Ty, CCState &State) const {
8289 if (!State.FreeRegs)
8291 if (Info.isIndirect() && Info.getInReg())
8293 else if (Info.isDirect() && Info.getInReg()) {
8294 unsigned sz = (getContext().getTypeSize(Ty) + 31) / 32;
8295 if (sz < State.FreeRegs)
8296 State.FreeRegs -= sz;
8302 void computeInfo(CGFunctionInfo &FI) const override {
8303 CCState State(FI.getCallingConvention());
8304 // ARC uses 8 registers to pass arguments.
8307 if (!getCXXABI().classifyReturnType(FI))
8308 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
8309 updateState(FI.getReturnInfo(), FI.getReturnType(), State);
8310 for (auto &I : FI.arguments()) {
8311 I.info = classifyArgumentType(I.type, State.FreeRegs);
8312 updateState(I.info, I.type, State);
8316 ABIArgInfo getIndirectByRef(QualType Ty, bool HasFreeRegs) const;
8317 ABIArgInfo getIndirectByValue(QualType Ty) const;
8318 ABIArgInfo classifyArgumentType(QualType Ty, uint8_t FreeRegs) const;
8319 ABIArgInfo classifyReturnType(QualType RetTy) const;
8322 class ARCTargetCodeGenInfo : public TargetCodeGenInfo {
8324 ARCTargetCodeGenInfo(CodeGenTypes &CGT)
8325 : TargetCodeGenInfo(new ARCABIInfo(CGT)) {}
8329 ABIArgInfo ARCABIInfo::getIndirectByRef(QualType Ty, bool HasFreeRegs) const {
8330 return HasFreeRegs ? getNaturalAlignIndirectInReg(Ty) :
8331 getNaturalAlignIndirect(Ty, false);
8334 ABIArgInfo ARCABIInfo::getIndirectByValue(QualType Ty) const {
8335 // Compute the byval alignment.
8336 const unsigned MinABIStackAlignInBytes = 4;
8337 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
8338 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true,
8339 TypeAlign > MinABIStackAlignInBytes);
8342 Address ARCABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
8343 QualType Ty) const {
8344 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
8345 getContext().getTypeInfoInChars(Ty),
8346 CharUnits::fromQuantity(4), true);
8349 ABIArgInfo ARCABIInfo::classifyArgumentType(QualType Ty,
8350 uint8_t FreeRegs) const {
8351 // Handle the generic C++ ABI.
8352 const RecordType *RT = Ty->getAs<RecordType>();
8354 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
8355 if (RAA == CGCXXABI::RAA_Indirect)
8356 return getIndirectByRef(Ty, FreeRegs > 0);
8358 if (RAA == CGCXXABI::RAA_DirectInMemory)
8359 return getIndirectByValue(Ty);
8362 // Treat an enum type as its underlying type.
8363 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
8364 Ty = EnumTy->getDecl()->getIntegerType();
8366 auto SizeInRegs = llvm::alignTo(getContext().getTypeSize(Ty), 32) / 32;
8368 if (isAggregateTypeForABI(Ty)) {
8369 // Structures with flexible arrays are always indirect.
8370 if (RT && RT->getDecl()->hasFlexibleArrayMember())
8371 return getIndirectByValue(Ty);
8373 // Ignore empty structs/unions.
8374 if (isEmptyRecord(getContext(), Ty, true))
8375 return ABIArgInfo::getIgnore();
8377 llvm::LLVMContext &LLVMContext = getVMContext();
8379 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
8380 SmallVector<llvm::Type *, 3> Elements(SizeInRegs, Int32);
8381 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
8383 return FreeRegs >= SizeInRegs ?
8384 ABIArgInfo::getDirectInReg(Result) :
8385 ABIArgInfo::getDirect(Result, 0, nullptr, false);
8388 return Ty->isPromotableIntegerType() ?
8389 (FreeRegs >= SizeInRegs ? ABIArgInfo::getExtendInReg(Ty) :
8390 ABIArgInfo::getExtend(Ty)) :
8391 (FreeRegs >= SizeInRegs ? ABIArgInfo::getDirectInReg() :
8392 ABIArgInfo::getDirect());
8395 ABIArgInfo ARCABIInfo::classifyReturnType(QualType RetTy) const {
8396 if (RetTy->isAnyComplexType())
8397 return ABIArgInfo::getDirectInReg();
8399 // Arguments of size > 4 registers are indirect.
8400 auto RetSize = llvm::alignTo(getContext().getTypeSize(RetTy), 32) / 32;
8402 return getIndirectByRef(RetTy, /*HasFreeRegs*/ true);
8404 return DefaultABIInfo::classifyReturnType(RetTy);
8407 } // End anonymous namespace.
8409 //===----------------------------------------------------------------------===//
8410 // XCore ABI Implementation
8411 //===----------------------------------------------------------------------===//
8415 /// A SmallStringEnc instance is used to build up the TypeString by passing
8416 /// it by reference between functions that append to it.
8417 typedef llvm::SmallString<128> SmallStringEnc;
8419 /// TypeStringCache caches the meta encodings of Types.
8421 /// The reason for caching TypeStrings is two fold:
8422 /// 1. To cache a type's encoding for later uses;
8423 /// 2. As a means to break recursive member type inclusion.
8425 /// A cache Entry can have a Status of:
8426 /// NonRecursive: The type encoding is not recursive;
8427 /// Recursive: The type encoding is recursive;
8428 /// Incomplete: An incomplete TypeString;
8429 /// IncompleteUsed: An incomplete TypeString that has been used in a
8430 /// Recursive type encoding.
8432 /// A NonRecursive entry will have all of its sub-members expanded as fully
8433 /// as possible. Whilst it may contain types which are recursive, the type
8434 /// itself is not recursive and thus its encoding may be safely used whenever
8435 /// the type is encountered.
8437 /// A Recursive entry will have all of its sub-members expanded as fully as
8438 /// possible. The type itself is recursive and it may contain other types which
8439 /// are recursive. The Recursive encoding must not be used during the expansion
8440 /// of a recursive type's recursive branch. For simplicity the code uses
8441 /// IncompleteCount to reject all usage of Recursive encodings for member types.
8443 /// An Incomplete entry is always a RecordType and only encodes its
8444 /// identifier e.g. "s(S){}". Incomplete 'StubEnc' entries are ephemeral and
8445 /// are placed into the cache during type expansion as a means to identify and
8446 /// handle recursive inclusion of types as sub-members. If there is recursion
8447 /// the entry becomes IncompleteUsed.
8449 /// During the expansion of a RecordType's members:
8451 /// If the cache contains a NonRecursive encoding for the member type, the
8452 /// cached encoding is used;
8454 /// If the cache contains a Recursive encoding for the member type, the
8455 /// cached encoding is 'Swapped' out, as it may be incorrect, and...
8457 /// If the member is a RecordType, an Incomplete encoding is placed into the
8458 /// cache to break potential recursive inclusion of itself as a sub-member;
8460 /// Once a member RecordType has been expanded, its temporary incomplete
8461 /// entry is removed from the cache. If a Recursive encoding was swapped out
8462 /// it is swapped back in;
8464 /// If an incomplete entry is used to expand a sub-member, the incomplete
8465 /// entry is marked as IncompleteUsed. The cache keeps count of how many
8466 /// IncompleteUsed entries it currently contains in IncompleteUsedCount;
8468 /// If a member's encoding is found to be a NonRecursive or Recursive viz:
8469 /// IncompleteUsedCount==0, the member's encoding is added to the cache.
8470 /// Else the member is part of a recursive type and thus the recursion has
8471 /// been exited too soon for the encoding to be correct for the member.
8473 class TypeStringCache {
8474 enum Status {NonRecursive, Recursive, Incomplete, IncompleteUsed};
8476 std::string Str; // The encoded TypeString for the type.
8477 enum Status State; // Information about the encoding in 'Str'.
8478 std::string Swapped; // A temporary place holder for a Recursive encoding
8479 // during the expansion of RecordType's members.
8481 std::map<const IdentifierInfo *, struct Entry> Map;
8482 unsigned IncompleteCount; // Number of Incomplete entries in the Map.
8483 unsigned IncompleteUsedCount; // Number of IncompleteUsed entries in the Map.
8485 TypeStringCache() : IncompleteCount(0), IncompleteUsedCount(0) {}
8486 void addIncomplete(const IdentifierInfo *ID, std::string StubEnc);
8487 bool removeIncomplete(const IdentifierInfo *ID);
8488 void addIfComplete(const IdentifierInfo *ID, StringRef Str,
8490 StringRef lookupStr(const IdentifierInfo *ID);
8493 /// TypeString encodings for enum & union fields must be order.
8494 /// FieldEncoding is a helper for this ordering process.
8495 class FieldEncoding {
8499 FieldEncoding(bool b, SmallStringEnc &e) : HasName(b), Enc(e.c_str()) {}
8500 StringRef str() { return Enc; }
8501 bool operator<(const FieldEncoding &rhs) const {
8502 if (HasName != rhs.HasName) return HasName;
8503 return Enc < rhs.Enc;
8507 class XCoreABIInfo : public DefaultABIInfo {
8509 XCoreABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
8510 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
8511 QualType Ty) const override;
8514 class XCoreTargetCodeGenInfo : public TargetCodeGenInfo {
8515 mutable TypeStringCache TSC;
8517 XCoreTargetCodeGenInfo(CodeGenTypes &CGT)
8518 :TargetCodeGenInfo(new XCoreABIInfo(CGT)) {}
8519 void emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
8520 CodeGen::CodeGenModule &M) const override;
8523 } // End anonymous namespace.
8525 // TODO: this implementation is likely now redundant with the default
8527 Address XCoreABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
8528 QualType Ty) const {
8529 CGBuilderTy &Builder = CGF.Builder;
8532 CharUnits SlotSize = CharUnits::fromQuantity(4);
8533 Address AP(Builder.CreateLoad(VAListAddr), SlotSize);
8535 // Handle the argument.
8536 ABIArgInfo AI = classifyArgumentType(Ty);
8537 CharUnits TypeAlign = getContext().getTypeAlignInChars(Ty);
8538 llvm::Type *ArgTy = CGT.ConvertType(Ty);
8539 if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
8540 AI.setCoerceToType(ArgTy);
8541 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
8543 Address Val = Address::invalid();
8544 CharUnits ArgSize = CharUnits::Zero();
8545 switch (AI.getKind()) {
8546 case ABIArgInfo::Expand:
8547 case ABIArgInfo::CoerceAndExpand:
8548 case ABIArgInfo::InAlloca:
8549 llvm_unreachable("Unsupported ABI kind for va_arg");
8550 case ABIArgInfo::Ignore:
8551 Val = Address(llvm::UndefValue::get(ArgPtrTy), TypeAlign);
8552 ArgSize = CharUnits::Zero();
8554 case ABIArgInfo::Extend:
8555 case ABIArgInfo::Direct:
8556 Val = Builder.CreateBitCast(AP, ArgPtrTy);
8557 ArgSize = CharUnits::fromQuantity(
8558 getDataLayout().getTypeAllocSize(AI.getCoerceToType()));
8559 ArgSize = ArgSize.alignTo(SlotSize);
8561 case ABIArgInfo::Indirect:
8562 Val = Builder.CreateElementBitCast(AP, ArgPtrTy);
8563 Val = Address(Builder.CreateLoad(Val), TypeAlign);
8568 // Increment the VAList.
8569 if (!ArgSize.isZero()) {
8571 Builder.CreateConstInBoundsByteGEP(AP.getPointer(), ArgSize);
8572 Builder.CreateStore(APN, VAListAddr);
8578 /// During the expansion of a RecordType, an incomplete TypeString is placed
8579 /// into the cache as a means to identify and break recursion.
8580 /// If there is a Recursive encoding in the cache, it is swapped out and will
8581 /// be reinserted by removeIncomplete().
8582 /// All other types of encoding should have been used rather than arriving here.
8583 void TypeStringCache::addIncomplete(const IdentifierInfo *ID,
8584 std::string StubEnc) {
8588 assert( (E.Str.empty() || E.State == Recursive) &&
8589 "Incorrectly use of addIncomplete");
8590 assert(!StubEnc.empty() && "Passing an empty string to addIncomplete()");
8591 E.Swapped.swap(E.Str); // swap out the Recursive
8592 E.Str.swap(StubEnc);
8593 E.State = Incomplete;
8597 /// Once the RecordType has been expanded, the temporary incomplete TypeString
8598 /// must be removed from the cache.
8599 /// If a Recursive was swapped out by addIncomplete(), it will be replaced.
8600 /// Returns true if the RecordType was defined recursively.
8601 bool TypeStringCache::removeIncomplete(const IdentifierInfo *ID) {
8604 auto I = Map.find(ID);
8605 assert(I != Map.end() && "Entry not present");
8606 Entry &E = I->second;
8607 assert( (E.State == Incomplete ||
8608 E.State == IncompleteUsed) &&
8609 "Entry must be an incomplete type");
8610 bool IsRecursive = false;
8611 if (E.State == IncompleteUsed) {
8612 // We made use of our Incomplete encoding, thus we are recursive.
8614 --IncompleteUsedCount;
8616 if (E.Swapped.empty())
8619 // Swap the Recursive back.
8620 E.Swapped.swap(E.Str);
8622 E.State = Recursive;
8628 /// Add the encoded TypeString to the cache only if it is NonRecursive or
8629 /// Recursive (viz: all sub-members were expanded as fully as possible).
8630 void TypeStringCache::addIfComplete(const IdentifierInfo *ID, StringRef Str,
8632 if (!ID || IncompleteUsedCount)
8633 return; // No key or it is is an incomplete sub-type so don't add.
8635 if (IsRecursive && !E.Str.empty()) {
8636 assert(E.State==Recursive && E.Str.size() == Str.size() &&
8637 "This is not the same Recursive entry");
8638 // The parent container was not recursive after all, so we could have used
8639 // this Recursive sub-member entry after all, but we assumed the worse when
8640 // we started viz: IncompleteCount!=0.
8643 assert(E.Str.empty() && "Entry already present");
8645 E.State = IsRecursive? Recursive : NonRecursive;
8648 /// Return a cached TypeString encoding for the ID. If there isn't one, or we
8649 /// are recursively expanding a type (IncompleteCount != 0) and the cached
8650 /// encoding is Recursive, return an empty StringRef.
8651 StringRef TypeStringCache::lookupStr(const IdentifierInfo *ID) {
8653 return StringRef(); // We have no key.
8654 auto I = Map.find(ID);
8656 return StringRef(); // We have no encoding.
8657 Entry &E = I->second;
8658 if (E.State == Recursive && IncompleteCount)
8659 return StringRef(); // We don't use Recursive encodings for member types.
8661 if (E.State == Incomplete) {
8662 // The incomplete type is being used to break out of recursion.
8663 E.State = IncompleteUsed;
8664 ++IncompleteUsedCount;
8669 /// The XCore ABI includes a type information section that communicates symbol
8670 /// type information to the linker. The linker uses this information to verify
8671 /// safety/correctness of things such as array bound and pointers et al.
8672 /// The ABI only requires C (and XC) language modules to emit TypeStrings.
8673 /// This type information (TypeString) is emitted into meta data for all global
8674 /// symbols: definitions, declarations, functions & variables.
8676 /// The TypeString carries type, qualifier, name, size & value details.
8677 /// Please see 'Tools Development Guide' section 2.16.2 for format details:
8678 /// https://www.xmos.com/download/public/Tools-Development-Guide%28X9114A%29.pdf
8679 /// The output is tested by test/CodeGen/xcore-stringtype.c.
8681 static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
8682 CodeGen::CodeGenModule &CGM, TypeStringCache &TSC);
8684 /// XCore uses emitTargetMD to emit TypeString metadata for global symbols.
8685 void XCoreTargetCodeGenInfo::emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
8686 CodeGen::CodeGenModule &CGM) const {
8688 if (getTypeString(Enc, D, CGM, TSC)) {
8689 llvm::LLVMContext &Ctx = CGM.getModule().getContext();
8690 llvm::Metadata *MDVals[] = {llvm::ConstantAsMetadata::get(GV),
8691 llvm::MDString::get(Ctx, Enc.str())};
8692 llvm::NamedMDNode *MD =
8693 CGM.getModule().getOrInsertNamedMetadata("xcore.typestrings");
8694 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
8698 //===----------------------------------------------------------------------===//
8699 // SPIR ABI Implementation
8700 //===----------------------------------------------------------------------===//
8703 class SPIRTargetCodeGenInfo : public TargetCodeGenInfo {
8705 SPIRTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
8706 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
8707 unsigned getOpenCLKernelCallingConv() const override;
8710 } // End anonymous namespace.
8714 void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI) {
8715 DefaultABIInfo SPIRABI(CGM.getTypes());
8716 SPIRABI.computeInfo(FI);
8721 unsigned SPIRTargetCodeGenInfo::getOpenCLKernelCallingConv() const {
8722 return llvm::CallingConv::SPIR_KERNEL;
8725 static bool appendType(SmallStringEnc &Enc, QualType QType,
8726 const CodeGen::CodeGenModule &CGM,
8727 TypeStringCache &TSC);
8729 /// Helper function for appendRecordType().
8730 /// Builds a SmallVector containing the encoded field types in declaration
8732 static bool extractFieldType(SmallVectorImpl<FieldEncoding> &FE,
8733 const RecordDecl *RD,
8734 const CodeGen::CodeGenModule &CGM,
8735 TypeStringCache &TSC) {
8736 for (const auto *Field : RD->fields()) {
8739 Enc += Field->getName();
8741 if (Field->isBitField()) {
8743 llvm::raw_svector_ostream OS(Enc);
8744 OS << Field->getBitWidthValue(CGM.getContext());
8747 if (!appendType(Enc, Field->getType(), CGM, TSC))
8749 if (Field->isBitField())
8752 FE.emplace_back(!Field->getName().empty(), Enc);
8757 /// Appends structure and union types to Enc and adds encoding to cache.
8758 /// Recursively calls appendType (via extractFieldType) for each field.
8759 /// Union types have their fields ordered according to the ABI.
8760 static bool appendRecordType(SmallStringEnc &Enc, const RecordType *RT,
8761 const CodeGen::CodeGenModule &CGM,
8762 TypeStringCache &TSC, const IdentifierInfo *ID) {
8763 // Append the cached TypeString if we have one.
8764 StringRef TypeString = TSC.lookupStr(ID);
8765 if (!TypeString.empty()) {
8770 // Start to emit an incomplete TypeString.
8771 size_t Start = Enc.size();
8772 Enc += (RT->isUnionType()? 'u' : 's');
8775 Enc += ID->getName();
8778 // We collect all encoded fields and order as necessary.
8779 bool IsRecursive = false;
8780 const RecordDecl *RD = RT->getDecl()->getDefinition();
8781 if (RD && !RD->field_empty()) {
8782 // An incomplete TypeString stub is placed in the cache for this RecordType
8783 // so that recursive calls to this RecordType will use it whilst building a
8784 // complete TypeString for this RecordType.
8785 SmallVector<FieldEncoding, 16> FE;
8786 std::string StubEnc(Enc.substr(Start).str());
8787 StubEnc += '}'; // StubEnc now holds a valid incomplete TypeString.
8788 TSC.addIncomplete(ID, std::move(StubEnc));
8789 if (!extractFieldType(FE, RD, CGM, TSC)) {
8790 (void) TSC.removeIncomplete(ID);
8793 IsRecursive = TSC.removeIncomplete(ID);
8794 // The ABI requires unions to be sorted but not structures.
8795 // See FieldEncoding::operator< for sort algorithm.
8796 if (RT->isUnionType())
8798 // We can now complete the TypeString.
8799 unsigned E = FE.size();
8800 for (unsigned I = 0; I != E; ++I) {
8807 TSC.addIfComplete(ID, Enc.substr(Start), IsRecursive);
8811 /// Appends enum types to Enc and adds the encoding to the cache.
8812 static bool appendEnumType(SmallStringEnc &Enc, const EnumType *ET,
8813 TypeStringCache &TSC,
8814 const IdentifierInfo *ID) {
8815 // Append the cached TypeString if we have one.
8816 StringRef TypeString = TSC.lookupStr(ID);
8817 if (!TypeString.empty()) {
8822 size_t Start = Enc.size();
8825 Enc += ID->getName();
8828 // We collect all encoded enumerations and order them alphanumerically.
8829 if (const EnumDecl *ED = ET->getDecl()->getDefinition()) {
8830 SmallVector<FieldEncoding, 16> FE;
8831 for (auto I = ED->enumerator_begin(), E = ED->enumerator_end(); I != E;
8833 SmallStringEnc EnumEnc;
8835 EnumEnc += I->getName();
8837 I->getInitVal().toString(EnumEnc);
8839 FE.push_back(FieldEncoding(!I->getName().empty(), EnumEnc));
8842 unsigned E = FE.size();
8843 for (unsigned I = 0; I != E; ++I) {
8850 TSC.addIfComplete(ID, Enc.substr(Start), false);
8854 /// Appends type's qualifier to Enc.
8855 /// This is done prior to appending the type's encoding.
8856 static void appendQualifier(SmallStringEnc &Enc, QualType QT) {
8857 // Qualifiers are emitted in alphabetical order.
8858 static const char *const Table[]={"","c:","r:","cr:","v:","cv:","rv:","crv:"};
8860 if (QT.isConstQualified())
8862 if (QT.isRestrictQualified())
8864 if (QT.isVolatileQualified())
8866 Enc += Table[Lookup];
8869 /// Appends built-in types to Enc.
8870 static bool appendBuiltinType(SmallStringEnc &Enc, const BuiltinType *BT) {
8871 const char *EncType;
8872 switch (BT->getKind()) {
8873 case BuiltinType::Void:
8876 case BuiltinType::Bool:
8879 case BuiltinType::Char_U:
8882 case BuiltinType::UChar:
8885 case BuiltinType::SChar:
8888 case BuiltinType::UShort:
8891 case BuiltinType::Short:
8894 case BuiltinType::UInt:
8897 case BuiltinType::Int:
8900 case BuiltinType::ULong:
8903 case BuiltinType::Long:
8906 case BuiltinType::ULongLong:
8909 case BuiltinType::LongLong:
8912 case BuiltinType::Float:
8915 case BuiltinType::Double:
8918 case BuiltinType::LongDouble:
8928 /// Appends a pointer encoding to Enc before calling appendType for the pointee.
8929 static bool appendPointerType(SmallStringEnc &Enc, const PointerType *PT,
8930 const CodeGen::CodeGenModule &CGM,
8931 TypeStringCache &TSC) {
8933 if (!appendType(Enc, PT->getPointeeType(), CGM, TSC))
8939 /// Appends array encoding to Enc before calling appendType for the element.
8940 static bool appendArrayType(SmallStringEnc &Enc, QualType QT,
8941 const ArrayType *AT,
8942 const CodeGen::CodeGenModule &CGM,
8943 TypeStringCache &TSC, StringRef NoSizeEnc) {
8944 if (AT->getSizeModifier() != ArrayType::Normal)
8947 if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT))
8948 CAT->getSize().toStringUnsigned(Enc);
8950 Enc += NoSizeEnc; // Global arrays use "*", otherwise it is "".
8952 // The Qualifiers should be attached to the type rather than the array.
8953 appendQualifier(Enc, QT);
8954 if (!appendType(Enc, AT->getElementType(), CGM, TSC))
8960 /// Appends a function encoding to Enc, calling appendType for the return type
8961 /// and the arguments.
8962 static bool appendFunctionType(SmallStringEnc &Enc, const FunctionType *FT,
8963 const CodeGen::CodeGenModule &CGM,
8964 TypeStringCache &TSC) {
8966 if (!appendType(Enc, FT->getReturnType(), CGM, TSC))
8969 if (const FunctionProtoType *FPT = FT->getAs<FunctionProtoType>()) {
8970 // N.B. we are only interested in the adjusted param types.
8971 auto I = FPT->param_type_begin();
8972 auto E = FPT->param_type_end();
8975 if (!appendType(Enc, *I, CGM, TSC))
8981 if (FPT->isVariadic())
8984 if (FPT->isVariadic())
8994 /// Handles the type's qualifier before dispatching a call to handle specific
8996 static bool appendType(SmallStringEnc &Enc, QualType QType,
8997 const CodeGen::CodeGenModule &CGM,
8998 TypeStringCache &TSC) {
9000 QualType QT = QType.getCanonicalType();
9002 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe())
9003 // The Qualifiers should be attached to the type rather than the array.
9004 // Thus we don't call appendQualifier() here.
9005 return appendArrayType(Enc, QT, AT, CGM, TSC, "");
9007 appendQualifier(Enc, QT);
9009 if (const BuiltinType *BT = QT->getAs<BuiltinType>())
9010 return appendBuiltinType(Enc, BT);
9012 if (const PointerType *PT = QT->getAs<PointerType>())
9013 return appendPointerType(Enc, PT, CGM, TSC);
9015 if (const EnumType *ET = QT->getAs<EnumType>())
9016 return appendEnumType(Enc, ET, TSC, QT.getBaseTypeIdentifier());
9018 if (const RecordType *RT = QT->getAsStructureType())
9019 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier());
9021 if (const RecordType *RT = QT->getAsUnionType())
9022 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier());
9024 if (const FunctionType *FT = QT->getAs<FunctionType>())
9025 return appendFunctionType(Enc, FT, CGM, TSC);
9030 static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
9031 CodeGen::CodeGenModule &CGM, TypeStringCache &TSC) {
9035 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
9036 if (FD->getLanguageLinkage() != CLanguageLinkage)
9038 return appendType(Enc, FD->getType(), CGM, TSC);
9041 if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
9042 if (VD->getLanguageLinkage() != CLanguageLinkage)
9044 QualType QT = VD->getType().getCanonicalType();
9045 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe()) {
9046 // Global ArrayTypes are given a size of '*' if the size is unknown.
9047 // The Qualifiers should be attached to the type rather than the array.
9048 // Thus we don't call appendQualifier() here.
9049 return appendArrayType(Enc, QT, AT, CGM, TSC, "*");
9051 return appendType(Enc, QT, CGM, TSC);
9056 //===----------------------------------------------------------------------===//
9057 // RISCV ABI Implementation
9058 //===----------------------------------------------------------------------===//
9061 class RISCVABIInfo : public DefaultABIInfo {
9063 unsigned XLen; // Size of the integer ('x') registers in bits.
9064 static const int NumArgGPRs = 8;
9067 RISCVABIInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen)
9068 : DefaultABIInfo(CGT), XLen(XLen) {}
9070 // DefaultABIInfo's classifyReturnType and classifyArgumentType are
9071 // non-virtual, but computeInfo is virtual, so we overload it.
9072 void computeInfo(CGFunctionInfo &FI) const override;
9074 ABIArgInfo classifyArgumentType(QualType Ty, bool IsFixed,
9075 int &ArgGPRsLeft) const;
9076 ABIArgInfo classifyReturnType(QualType RetTy) const;
9078 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
9079 QualType Ty) const override;
9081 ABIArgInfo extendType(QualType Ty) const;
9083 } // end anonymous namespace
9085 void RISCVABIInfo::computeInfo(CGFunctionInfo &FI) const {
9086 QualType RetTy = FI.getReturnType();
9087 if (!getCXXABI().classifyReturnType(FI))
9088 FI.getReturnInfo() = classifyReturnType(RetTy);
9090 // IsRetIndirect is true if classifyArgumentType indicated the value should
9091 // be passed indirect or if the type size is greater than 2*xlen. e.g. fp128
9092 // is passed direct in LLVM IR, relying on the backend lowering code to
9093 // rewrite the argument list and pass indirectly on RV32.
9094 bool IsRetIndirect = FI.getReturnInfo().getKind() == ABIArgInfo::Indirect ||
9095 getContext().getTypeSize(RetTy) > (2 * XLen);
9097 // We must track the number of GPRs used in order to conform to the RISC-V
9098 // ABI, as integer scalars passed in registers should have signext/zeroext
9099 // when promoted, but are anyext if passed on the stack. As GPR usage is
9100 // different for variadic arguments, we must also track whether we are
9101 // examining a vararg or not.
9102 int ArgGPRsLeft = IsRetIndirect ? NumArgGPRs - 1 : NumArgGPRs;
9103 int NumFixedArgs = FI.getNumRequiredArgs();
9106 for (auto &ArgInfo : FI.arguments()) {
9107 bool IsFixed = ArgNum < NumFixedArgs;
9108 ArgInfo.info = classifyArgumentType(ArgInfo.type, IsFixed, ArgGPRsLeft);
9113 ABIArgInfo RISCVABIInfo::classifyArgumentType(QualType Ty, bool IsFixed,
9114 int &ArgGPRsLeft) const {
9115 assert(ArgGPRsLeft <= NumArgGPRs && "Arg GPR tracking underflow");
9116 Ty = useFirstFieldIfTransparentUnion(Ty);
9118 // Structures with either a non-trivial destructor or a non-trivial
9119 // copy constructor are always passed indirectly.
9120 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
9123 return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA ==
9124 CGCXXABI::RAA_DirectInMemory);
9127 // Ignore empty structs/unions.
9128 if (isEmptyRecord(getContext(), Ty, true))
9129 return ABIArgInfo::getIgnore();
9131 uint64_t Size = getContext().getTypeSize(Ty);
9132 uint64_t NeededAlign = getContext().getTypeAlign(Ty);
9133 bool MustUseStack = false;
9134 // Determine the number of GPRs needed to pass the current argument
9135 // according to the ABI. 2*XLen-aligned varargs are passed in "aligned"
9136 // register pairs, so may consume 3 registers.
9137 int NeededArgGPRs = 1;
9138 if (!IsFixed && NeededAlign == 2 * XLen)
9139 NeededArgGPRs = 2 + (ArgGPRsLeft % 2);
9140 else if (Size > XLen && Size <= 2 * XLen)
9143 if (NeededArgGPRs > ArgGPRsLeft) {
9144 MustUseStack = true;
9145 NeededArgGPRs = ArgGPRsLeft;
9148 ArgGPRsLeft -= NeededArgGPRs;
9150 if (!isAggregateTypeForABI(Ty) && !Ty->isVectorType()) {
9151 // Treat an enum type as its underlying type.
9152 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
9153 Ty = EnumTy->getDecl()->getIntegerType();
9155 // All integral types are promoted to XLen width, unless passed on the
9157 if (Size < XLen && Ty->isIntegralOrEnumerationType() && !MustUseStack) {
9158 return extendType(Ty);
9161 return ABIArgInfo::getDirect();
9164 // Aggregates which are <= 2*XLen will be passed in registers if possible,
9165 // so coerce to integers.
9166 if (Size <= 2 * XLen) {
9167 unsigned Alignment = getContext().getTypeAlign(Ty);
9169 // Use a single XLen int if possible, 2*XLen if 2*XLen alignment is
9170 // required, and a 2-element XLen array if only XLen alignment is required.
9172 return ABIArgInfo::getDirect(
9173 llvm::IntegerType::get(getVMContext(), XLen));
9174 } else if (Alignment == 2 * XLen) {
9175 return ABIArgInfo::getDirect(
9176 llvm::IntegerType::get(getVMContext(), 2 * XLen));
9178 return ABIArgInfo::getDirect(llvm::ArrayType::get(
9179 llvm::IntegerType::get(getVMContext(), XLen), 2));
9182 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
9185 ABIArgInfo RISCVABIInfo::classifyReturnType(QualType RetTy) const {
9186 if (RetTy->isVoidType())
9187 return ABIArgInfo::getIgnore();
9189 int ArgGPRsLeft = 2;
9191 // The rules for return and argument types are the same, so defer to
9192 // classifyArgumentType.
9193 return classifyArgumentType(RetTy, /*IsFixed=*/true, ArgGPRsLeft);
9196 Address RISCVABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
9197 QualType Ty) const {
9198 CharUnits SlotSize = CharUnits::fromQuantity(XLen / 8);
9200 // Empty records are ignored for parameter passing purposes.
9201 if (isEmptyRecord(getContext(), Ty, true)) {
9202 Address Addr(CGF.Builder.CreateLoad(VAListAddr), SlotSize);
9203 Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty));
9207 std::pair<CharUnits, CharUnits> SizeAndAlign =
9208 getContext().getTypeInfoInChars(Ty);
9210 // Arguments bigger than 2*Xlen bytes are passed indirectly.
9211 bool IsIndirect = SizeAndAlign.first > 2 * SlotSize;
9213 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, SizeAndAlign,
9214 SlotSize, /*AllowHigherAlign=*/true);
9217 ABIArgInfo RISCVABIInfo::extendType(QualType Ty) const {
9218 int TySize = getContext().getTypeSize(Ty);
9219 // RV64 ABI requires unsigned 32 bit integers to be sign extended.
9220 if (XLen == 64 && Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32)
9221 return ABIArgInfo::getSignExtend(Ty);
9222 return ABIArgInfo::getExtend(Ty);
9226 class RISCVTargetCodeGenInfo : public TargetCodeGenInfo {
9228 RISCVTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen)
9229 : TargetCodeGenInfo(new RISCVABIInfo(CGT, XLen)) {}
9231 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
9232 CodeGen::CodeGenModule &CGM) const override {
9233 const auto *FD = dyn_cast_or_null<FunctionDecl>(D);
9236 const auto *Attr = FD->getAttr<RISCVInterruptAttr>();
9241 switch (Attr->getInterrupt()) {
9242 case RISCVInterruptAttr::user: Kind = "user"; break;
9243 case RISCVInterruptAttr::supervisor: Kind = "supervisor"; break;
9244 case RISCVInterruptAttr::machine: Kind = "machine"; break;
9247 auto *Fn = cast<llvm::Function>(GV);
9249 Fn->addFnAttr("interrupt", Kind);
9254 //===----------------------------------------------------------------------===//
9256 //===----------------------------------------------------------------------===//
9258 bool CodeGenModule::supportsCOMDAT() const {
9259 return getTriple().supportsCOMDAT();
9262 const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
9263 if (TheTargetCodeGenInfo)
9264 return *TheTargetCodeGenInfo;
9266 // Helper to set the unique_ptr while still keeping the return value.
9267 auto SetCGInfo = [&](TargetCodeGenInfo *P) -> const TargetCodeGenInfo & {
9268 this->TheTargetCodeGenInfo.reset(P);
9272 const llvm::Triple &Triple = getTarget().getTriple();
9273 switch (Triple.getArch()) {
9275 return SetCGInfo(new DefaultTargetCodeGenInfo(Types));
9277 case llvm::Triple::le32:
9278 return SetCGInfo(new PNaClTargetCodeGenInfo(Types));
9279 case llvm::Triple::mips:
9280 case llvm::Triple::mipsel:
9281 if (Triple.getOS() == llvm::Triple::NaCl)
9282 return SetCGInfo(new PNaClTargetCodeGenInfo(Types));
9283 return SetCGInfo(new MIPSTargetCodeGenInfo(Types, true));
9285 case llvm::Triple::mips64:
9286 case llvm::Triple::mips64el:
9287 return SetCGInfo(new MIPSTargetCodeGenInfo(Types, false));
9289 case llvm::Triple::avr:
9290 return SetCGInfo(new AVRTargetCodeGenInfo(Types));
9292 case llvm::Triple::aarch64:
9293 case llvm::Triple::aarch64_be: {
9294 AArch64ABIInfo::ABIKind Kind = AArch64ABIInfo::AAPCS;
9295 if (getTarget().getABI() == "darwinpcs")
9296 Kind = AArch64ABIInfo::DarwinPCS;
9297 else if (Triple.isOSWindows())
9299 new WindowsAArch64TargetCodeGenInfo(Types, AArch64ABIInfo::Win64));
9301 return SetCGInfo(new AArch64TargetCodeGenInfo(Types, Kind));
9304 case llvm::Triple::wasm32:
9305 case llvm::Triple::wasm64:
9306 return SetCGInfo(new WebAssemblyTargetCodeGenInfo(Types));
9308 case llvm::Triple::arm:
9309 case llvm::Triple::armeb:
9310 case llvm::Triple::thumb:
9311 case llvm::Triple::thumbeb: {
9312 if (Triple.getOS() == llvm::Triple::Win32) {
9314 new WindowsARMTargetCodeGenInfo(Types, ARMABIInfo::AAPCS_VFP));
9317 ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS;
9318 StringRef ABIStr = getTarget().getABI();
9319 if (ABIStr == "apcs-gnu")
9320 Kind = ARMABIInfo::APCS;
9321 else if (ABIStr == "aapcs16")
9322 Kind = ARMABIInfo::AAPCS16_VFP;
9323 else if (CodeGenOpts.FloatABI == "hard" ||
9324 (CodeGenOpts.FloatABI != "soft" &&
9325 (Triple.getEnvironment() == llvm::Triple::GNUEABIHF ||
9326 Triple.getEnvironment() == llvm::Triple::MuslEABIHF ||
9327 Triple.getEnvironment() == llvm::Triple::EABIHF)))
9328 Kind = ARMABIInfo::AAPCS_VFP;
9330 return SetCGInfo(new ARMTargetCodeGenInfo(Types, Kind));
9333 case llvm::Triple::ppc:
9335 new PPC32TargetCodeGenInfo(Types, CodeGenOpts.FloatABI == "soft"));
9336 case llvm::Triple::ppc64:
9337 if (Triple.isOSBinFormatELF()) {
9338 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv1;
9339 if (getTarget().getABI() == "elfv2")
9340 Kind = PPC64_SVR4_ABIInfo::ELFv2;
9341 bool HasQPX = getTarget().getABI() == "elfv1-qpx";
9342 bool IsSoftFloat = CodeGenOpts.FloatABI == "soft";
9344 return SetCGInfo(new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX,
9347 return SetCGInfo(new PPC64TargetCodeGenInfo(Types));
9348 case llvm::Triple::ppc64le: {
9349 assert(Triple.isOSBinFormatELF() && "PPC64 LE non-ELF not supported!");
9350 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv2;
9351 if (getTarget().getABI() == "elfv1" || getTarget().getABI() == "elfv1-qpx")
9352 Kind = PPC64_SVR4_ABIInfo::ELFv1;
9353 bool HasQPX = getTarget().getABI() == "elfv1-qpx";
9354 bool IsSoftFloat = CodeGenOpts.FloatABI == "soft";
9356 return SetCGInfo(new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX,
9360 case llvm::Triple::nvptx:
9361 case llvm::Triple::nvptx64:
9362 return SetCGInfo(new NVPTXTargetCodeGenInfo(Types));
9364 case llvm::Triple::msp430:
9365 return SetCGInfo(new MSP430TargetCodeGenInfo(Types));
9367 case llvm::Triple::riscv32:
9368 return SetCGInfo(new RISCVTargetCodeGenInfo(Types, 32));
9369 case llvm::Triple::riscv64:
9370 return SetCGInfo(new RISCVTargetCodeGenInfo(Types, 64));
9372 case llvm::Triple::systemz: {
9373 bool HasVector = getTarget().getABI() == "vector";
9374 return SetCGInfo(new SystemZTargetCodeGenInfo(Types, HasVector));
9377 case llvm::Triple::tce:
9378 case llvm::Triple::tcele:
9379 return SetCGInfo(new TCETargetCodeGenInfo(Types));
9381 case llvm::Triple::x86: {
9382 bool IsDarwinVectorABI = Triple.isOSDarwin();
9383 bool RetSmallStructInRegABI =
9384 X86_32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts);
9385 bool IsWin32FloatStructABI = Triple.isOSWindows() && !Triple.isOSCygMing();
9387 if (Triple.getOS() == llvm::Triple::Win32) {
9388 return SetCGInfo(new WinX86_32TargetCodeGenInfo(
9389 Types, IsDarwinVectorABI, RetSmallStructInRegABI,
9390 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters));
9392 return SetCGInfo(new X86_32TargetCodeGenInfo(
9393 Types, IsDarwinVectorABI, RetSmallStructInRegABI,
9394 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters,
9395 CodeGenOpts.FloatABI == "soft"));
9399 case llvm::Triple::x86_64: {
9400 StringRef ABI = getTarget().getABI();
9401 X86AVXABILevel AVXLevel =
9403 ? X86AVXABILevel::AVX512
9404 : ABI == "avx" ? X86AVXABILevel::AVX : X86AVXABILevel::None);
9406 switch (Triple.getOS()) {
9407 case llvm::Triple::Win32:
9408 return SetCGInfo(new WinX86_64TargetCodeGenInfo(Types, AVXLevel));
9409 case llvm::Triple::PS4:
9410 return SetCGInfo(new PS4TargetCodeGenInfo(Types, AVXLevel));
9412 return SetCGInfo(new X86_64TargetCodeGenInfo(Types, AVXLevel));
9415 case llvm::Triple::hexagon:
9416 return SetCGInfo(new HexagonTargetCodeGenInfo(Types));
9417 case llvm::Triple::lanai:
9418 return SetCGInfo(new LanaiTargetCodeGenInfo(Types));
9419 case llvm::Triple::r600:
9420 return SetCGInfo(new AMDGPUTargetCodeGenInfo(Types));
9421 case llvm::Triple::amdgcn:
9422 return SetCGInfo(new AMDGPUTargetCodeGenInfo(Types));
9423 case llvm::Triple::sparc:
9424 return SetCGInfo(new SparcV8TargetCodeGenInfo(Types));
9425 case llvm::Triple::sparcv9:
9426 return SetCGInfo(new SparcV9TargetCodeGenInfo(Types));
9427 case llvm::Triple::xcore:
9428 return SetCGInfo(new XCoreTargetCodeGenInfo(Types));
9429 case llvm::Triple::arc:
9430 return SetCGInfo(new ARCTargetCodeGenInfo(Types));
9431 case llvm::Triple::spir:
9432 case llvm::Triple::spir64:
9433 return SetCGInfo(new SPIRTargetCodeGenInfo(Types));
9437 /// Create an OpenCL kernel for an enqueued block.
9439 /// The kernel has the same function type as the block invoke function. Its
9440 /// name is the name of the block invoke function postfixed with "_kernel".
9441 /// It simply calls the block invoke function then returns.
9443 TargetCodeGenInfo::createEnqueuedBlockKernel(CodeGenFunction &CGF,
9444 llvm::Function *Invoke,
9445 llvm::Value *BlockLiteral) const {
9446 auto *InvokeFT = Invoke->getFunctionType();
9447 llvm::SmallVector<llvm::Type *, 2> ArgTys;
9448 for (auto &P : InvokeFT->params())
9449 ArgTys.push_back(P);
9450 auto &C = CGF.getLLVMContext();
9451 std::string Name = Invoke->getName().str() + "_kernel";
9452 auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(C), ArgTys, false);
9453 auto *F = llvm::Function::Create(FT, llvm::GlobalValue::InternalLinkage, Name,
9454 &CGF.CGM.getModule());
9455 auto IP = CGF.Builder.saveIP();
9456 auto *BB = llvm::BasicBlock::Create(C, "entry", F);
9457 auto &Builder = CGF.Builder;
9458 Builder.SetInsertPoint(BB);
9459 llvm::SmallVector<llvm::Value *, 2> Args;
9460 for (auto &A : F->args())
9462 Builder.CreateCall(Invoke, Args);
9463 Builder.CreateRetVoid();
9464 Builder.restoreIP(IP);
9468 /// Create an OpenCL kernel for an enqueued block.
9470 /// The type of the first argument (the block literal) is the struct type
9471 /// of the block literal instead of a pointer type. The first argument
9472 /// (block literal) is passed directly by value to the kernel. The kernel
9473 /// allocates the same type of struct on stack and stores the block literal
9474 /// to it and passes its pointer to the block invoke function. The kernel
9475 /// has "enqueued-block" function attribute and kernel argument metadata.
9476 llvm::Function *AMDGPUTargetCodeGenInfo::createEnqueuedBlockKernel(
9477 CodeGenFunction &CGF, llvm::Function *Invoke,
9478 llvm::Value *BlockLiteral) const {
9479 auto &Builder = CGF.Builder;
9480 auto &C = CGF.getLLVMContext();
9482 auto *BlockTy = BlockLiteral->getType()->getPointerElementType();
9483 auto *InvokeFT = Invoke->getFunctionType();
9484 llvm::SmallVector<llvm::Type *, 2> ArgTys;
9485 llvm::SmallVector<llvm::Metadata *, 8> AddressQuals;
9486 llvm::SmallVector<llvm::Metadata *, 8> AccessQuals;
9487 llvm::SmallVector<llvm::Metadata *, 8> ArgTypeNames;
9488 llvm::SmallVector<llvm::Metadata *, 8> ArgBaseTypeNames;
9489 llvm::SmallVector<llvm::Metadata *, 8> ArgTypeQuals;
9490 llvm::SmallVector<llvm::Metadata *, 8> ArgNames;
9492 ArgTys.push_back(BlockTy);
9493 ArgTypeNames.push_back(llvm::MDString::get(C, "__block_literal"));
9494 AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(0)));
9495 ArgBaseTypeNames.push_back(llvm::MDString::get(C, "__block_literal"));
9496 ArgTypeQuals.push_back(llvm::MDString::get(C, ""));
9497 AccessQuals.push_back(llvm::MDString::get(C, "none"));
9498 ArgNames.push_back(llvm::MDString::get(C, "block_literal"));
9499 for (unsigned I = 1, E = InvokeFT->getNumParams(); I < E; ++I) {
9500 ArgTys.push_back(InvokeFT->getParamType(I));
9501 ArgTypeNames.push_back(llvm::MDString::get(C, "void*"));
9502 AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(3)));
9503 AccessQuals.push_back(llvm::MDString::get(C, "none"));
9504 ArgBaseTypeNames.push_back(llvm::MDString::get(C, "void*"));
9505 ArgTypeQuals.push_back(llvm::MDString::get(C, ""));
9507 llvm::MDString::get(C, (Twine("local_arg") + Twine(I)).str()));
9509 std::string Name = Invoke->getName().str() + "_kernel";
9510 auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(C), ArgTys, false);
9511 auto *F = llvm::Function::Create(FT, llvm::GlobalValue::InternalLinkage, Name,
9512 &CGF.CGM.getModule());
9513 F->addFnAttr("enqueued-block");
9514 auto IP = CGF.Builder.saveIP();
9515 auto *BB = llvm::BasicBlock::Create(C, "entry", F);
9516 Builder.SetInsertPoint(BB);
9517 unsigned BlockAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(BlockTy);
9518 auto *BlockPtr = Builder.CreateAlloca(BlockTy, nullptr);
9519 BlockPtr->setAlignment(BlockAlign);
9520 Builder.CreateAlignedStore(F->arg_begin(), BlockPtr, BlockAlign);
9521 auto *Cast = Builder.CreatePointerCast(BlockPtr, InvokeFT->getParamType(0));
9522 llvm::SmallVector<llvm::Value *, 2> Args;
9523 Args.push_back(Cast);
9524 for (auto I = F->arg_begin() + 1, E = F->arg_end(); I != E; ++I)
9526 Builder.CreateCall(Invoke, Args);
9527 Builder.CreateRetVoid();
9528 Builder.restoreIP(IP);
9530 F->setMetadata("kernel_arg_addr_space", llvm::MDNode::get(C, AddressQuals));
9531 F->setMetadata("kernel_arg_access_qual", llvm::MDNode::get(C, AccessQuals));
9532 F->setMetadata("kernel_arg_type", llvm::MDNode::get(C, ArgTypeNames));
9533 F->setMetadata("kernel_arg_base_type",
9534 llvm::MDNode::get(C, ArgBaseTypeNames));
9535 F->setMetadata("kernel_arg_type_qual", llvm::MDNode::get(C, ArgTypeQuals));
9536 if (CGF.CGM.getCodeGenOpts().EmitOpenCLArgMetadata)
9537 F->setMetadata("kernel_arg_name", llvm::MDNode::get(C, ArgNames));