1 //===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // These classes wrap the information about a call or function
10 // definition used to handle ABI compliancy.
12 //===----------------------------------------------------------------------===//
14 #include "TargetInfo.h"
19 #include "CodeGenFunction.h"
20 #include "clang/AST/RecordLayout.h"
21 #include "clang/Basic/CodeGenOptions.h"
22 #include "clang/CodeGen/CGFunctionInfo.h"
23 #include "clang/CodeGen/SwiftCallingConv.h"
24 #include "llvm/ADT/StringExtras.h"
25 #include "llvm/ADT/StringSwitch.h"
26 #include "llvm/ADT/Triple.h"
27 #include "llvm/ADT/Twine.h"
28 #include "llvm/IR/DataLayout.h"
29 #include "llvm/IR/Type.h"
30 #include "llvm/Support/raw_ostream.h"
31 #include <algorithm> // std::sort
33 using namespace clang;
34 using namespace CodeGen;
36 // Helper for coercing an aggregate argument or return value into an integer
37 // array of the same size (including padding) and alignment. This alternate
38 // coercion happens only for the RenderScript ABI and can be removed after
39 // runtimes that rely on it are no longer supported.
41 // RenderScript assumes that the size of the argument / return value in the IR
42 // is the same as the size of the corresponding qualified type. This helper
43 // coerces the aggregate type into an array of the same size (including
44 // padding). This coercion is used in lieu of expansion of struct members or
45 // other canonical coercions that return a coerced-type of larger size.
47 // Ty - The argument / return value type
48 // Context - The associated ASTContext
49 // LLVMContext - The associated LLVMContext
50 static ABIArgInfo coerceToIntArray(QualType Ty,
52 llvm::LLVMContext &LLVMContext) {
53 // Alignment and Size are measured in bits.
54 const uint64_t Size = Context.getTypeSize(Ty);
55 const uint64_t Alignment = Context.getTypeAlign(Ty);
56 llvm::Type *IntType = llvm::Type::getIntNTy(LLVMContext, Alignment);
57 const uint64_t NumElements = (Size + Alignment - 1) / Alignment;
58 return ABIArgInfo::getDirect(llvm::ArrayType::get(IntType, NumElements));
61 static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder,
66 // Alternatively, we could emit this as a loop in the source.
67 for (unsigned I = FirstIndex; I <= LastIndex; ++I) {
69 Builder.CreateConstInBoundsGEP1_32(Builder.getInt8Ty(), Array, I);
70 Builder.CreateAlignedStore(Value, Cell, CharUnits::One());
74 static bool isAggregateTypeForABI(QualType T) {
75 return !CodeGenFunction::hasScalarEvaluationKind(T) ||
76 T->isMemberFunctionPointerType();
80 ABIInfo::getNaturalAlignIndirect(QualType Ty, bool ByRef, bool Realign,
81 llvm::Type *Padding) const {
82 return ABIArgInfo::getIndirect(getContext().getTypeAlignInChars(Ty),
83 ByRef, Realign, Padding);
87 ABIInfo::getNaturalAlignIndirectInReg(QualType Ty, bool Realign) const {
88 return ABIArgInfo::getIndirectInReg(getContext().getTypeAlignInChars(Ty),
89 /*ByRef*/ false, Realign);
92 Address ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
94 return Address::invalid();
97 ABIInfo::~ABIInfo() {}
99 /// Does the given lowering require more than the given number of
100 /// registers when expanded?
102 /// This is intended to be the basis of a reasonable basic implementation
103 /// of should{Pass,Return}IndirectlyForSwift.
105 /// For most targets, a limit of four total registers is reasonable; this
106 /// limits the amount of code required in order to move around the value
107 /// in case it wasn't produced immediately prior to the call by the caller
108 /// (or wasn't produced in exactly the right registers) or isn't used
109 /// immediately within the callee. But some targets may need to further
110 /// limit the register count due to an inability to support that many
111 /// return registers.
112 static bool occupiesMoreThan(CodeGenTypes &cgt,
113 ArrayRef<llvm::Type*> scalarTypes,
114 unsigned maxAllRegisters) {
115 unsigned intCount = 0, fpCount = 0;
116 for (llvm::Type *type : scalarTypes) {
117 if (type->isPointerTy()) {
119 } else if (auto intTy = dyn_cast<llvm::IntegerType>(type)) {
120 auto ptrWidth = cgt.getTarget().getPointerWidth(0);
121 intCount += (intTy->getBitWidth() + ptrWidth - 1) / ptrWidth;
123 assert(type->isVectorTy() || type->isFloatingPointTy());
128 return (intCount + fpCount > maxAllRegisters);
131 bool SwiftABIInfo::isLegalVectorTypeForSwift(CharUnits vectorSize,
133 unsigned numElts) const {
134 // The default implementation of this assumes that the target guarantees
135 // 128-bit SIMD support but nothing more.
136 return (vectorSize.getQuantity() > 8 && vectorSize.getQuantity() <= 16);
139 static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT,
141 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
143 if (!RT->getDecl()->canPassInRegisters())
144 return CGCXXABI::RAA_Indirect;
145 return CGCXXABI::RAA_Default;
147 return CXXABI.getRecordArgABI(RD);
150 static CGCXXABI::RecordArgABI getRecordArgABI(QualType T,
152 const RecordType *RT = T->getAs<RecordType>();
154 return CGCXXABI::RAA_Default;
155 return getRecordArgABI(RT, CXXABI);
158 static bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI,
159 const ABIInfo &Info) {
160 QualType Ty = FI.getReturnType();
162 if (const auto *RT = Ty->getAs<RecordType>())
163 if (!isa<CXXRecordDecl>(RT->getDecl()) &&
164 !RT->getDecl()->canPassInRegisters()) {
165 FI.getReturnInfo() = Info.getNaturalAlignIndirect(Ty);
169 return CXXABI.classifyReturnType(FI);
172 /// Pass transparent unions as if they were the type of the first element. Sema
173 /// should ensure that all elements of the union have the same "machine type".
174 static QualType useFirstFieldIfTransparentUnion(QualType Ty) {
175 if (const RecordType *UT = Ty->getAsUnionType()) {
176 const RecordDecl *UD = UT->getDecl();
177 if (UD->hasAttr<TransparentUnionAttr>()) {
178 assert(!UD->field_empty() && "sema created an empty transparent union");
179 return UD->field_begin()->getType();
185 CGCXXABI &ABIInfo::getCXXABI() const {
186 return CGT.getCXXABI();
189 ASTContext &ABIInfo::getContext() const {
190 return CGT.getContext();
193 llvm::LLVMContext &ABIInfo::getVMContext() const {
194 return CGT.getLLVMContext();
197 const llvm::DataLayout &ABIInfo::getDataLayout() const {
198 return CGT.getDataLayout();
201 const TargetInfo &ABIInfo::getTarget() const {
202 return CGT.getTarget();
205 const CodeGenOptions &ABIInfo::getCodeGenOpts() const {
206 return CGT.getCodeGenOpts();
209 bool ABIInfo::isAndroid() const { return getTarget().getTriple().isAndroid(); }
211 bool ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
215 bool ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
216 uint64_t Members) const {
220 LLVM_DUMP_METHOD void ABIArgInfo::dump() const {
221 raw_ostream &OS = llvm::errs();
222 OS << "(ABIArgInfo Kind=";
225 OS << "Direct Type=";
226 if (llvm::Type *Ty = getCoerceToType())
238 OS << "InAlloca Offset=" << getInAllocaFieldIndex();
241 OS << "Indirect Align=" << getIndirectAlign().getQuantity()
242 << " ByVal=" << getIndirectByVal()
243 << " Realign=" << getIndirectRealign();
248 case CoerceAndExpand:
249 OS << "CoerceAndExpand Type=";
250 getCoerceAndExpandType()->print(OS);
256 // Dynamically round a pointer up to a multiple of the given alignment.
257 static llvm::Value *emitRoundPointerUpToAlignment(CodeGenFunction &CGF,
260 llvm::Value *PtrAsInt = Ptr;
261 // OverflowArgArea = (OverflowArgArea + Align - 1) & -Align;
262 PtrAsInt = CGF.Builder.CreatePtrToInt(PtrAsInt, CGF.IntPtrTy);
263 PtrAsInt = CGF.Builder.CreateAdd(PtrAsInt,
264 llvm::ConstantInt::get(CGF.IntPtrTy, Align.getQuantity() - 1));
265 PtrAsInt = CGF.Builder.CreateAnd(PtrAsInt,
266 llvm::ConstantInt::get(CGF.IntPtrTy, -Align.getQuantity()));
267 PtrAsInt = CGF.Builder.CreateIntToPtr(PtrAsInt,
269 Ptr->getName() + ".aligned");
273 /// Emit va_arg for a platform using the common void* representation,
274 /// where arguments are simply emitted in an array of slots on the stack.
276 /// This version implements the core direct-value passing rules.
278 /// \param SlotSize - The size and alignment of a stack slot.
279 /// Each argument will be allocated to a multiple of this number of
280 /// slots, and all the slots will be aligned to this value.
281 /// \param AllowHigherAlign - The slot alignment is not a cap;
282 /// an argument type with an alignment greater than the slot size
283 /// will be emitted on a higher-alignment address, potentially
284 /// leaving one or more empty slots behind as padding. If this
285 /// is false, the returned address might be less-aligned than
287 static Address emitVoidPtrDirectVAArg(CodeGenFunction &CGF,
289 llvm::Type *DirectTy,
290 CharUnits DirectSize,
291 CharUnits DirectAlign,
293 bool AllowHigherAlign) {
294 // Cast the element type to i8* if necessary. Some platforms define
295 // va_list as a struct containing an i8* instead of just an i8*.
296 if (VAListAddr.getElementType() != CGF.Int8PtrTy)
297 VAListAddr = CGF.Builder.CreateElementBitCast(VAListAddr, CGF.Int8PtrTy);
299 llvm::Value *Ptr = CGF.Builder.CreateLoad(VAListAddr, "argp.cur");
301 // If the CC aligns values higher than the slot size, do so if needed.
302 Address Addr = Address::invalid();
303 if (AllowHigherAlign && DirectAlign > SlotSize) {
304 Addr = Address(emitRoundPointerUpToAlignment(CGF, Ptr, DirectAlign),
307 Addr = Address(Ptr, SlotSize);
310 // Advance the pointer past the argument, then store that back.
311 CharUnits FullDirectSize = DirectSize.alignTo(SlotSize);
313 CGF.Builder.CreateConstInBoundsByteGEP(Addr, FullDirectSize, "argp.next");
314 CGF.Builder.CreateStore(NextPtr.getPointer(), VAListAddr);
316 // If the argument is smaller than a slot, and this is a big-endian
317 // target, the argument will be right-adjusted in its slot.
318 if (DirectSize < SlotSize && CGF.CGM.getDataLayout().isBigEndian() &&
319 !DirectTy->isStructTy()) {
320 Addr = CGF.Builder.CreateConstInBoundsByteGEP(Addr, SlotSize - DirectSize);
323 Addr = CGF.Builder.CreateElementBitCast(Addr, DirectTy);
327 /// Emit va_arg for a platform using the common void* representation,
328 /// where arguments are simply emitted in an array of slots on the stack.
330 /// \param IsIndirect - Values of this type are passed indirectly.
331 /// \param ValueInfo - The size and alignment of this type, generally
332 /// computed with getContext().getTypeInfoInChars(ValueTy).
333 /// \param SlotSizeAndAlign - The size and alignment of a stack slot.
334 /// Each argument will be allocated to a multiple of this number of
335 /// slots, and all the slots will be aligned to this value.
336 /// \param AllowHigherAlign - The slot alignment is not a cap;
337 /// an argument type with an alignment greater than the slot size
338 /// will be emitted on a higher-alignment address, potentially
339 /// leaving one or more empty slots behind as padding.
340 static Address emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr,
341 QualType ValueTy, bool IsIndirect,
342 std::pair<CharUnits, CharUnits> ValueInfo,
343 CharUnits SlotSizeAndAlign,
344 bool AllowHigherAlign) {
345 // The size and alignment of the value that was passed directly.
346 CharUnits DirectSize, DirectAlign;
348 DirectSize = CGF.getPointerSize();
349 DirectAlign = CGF.getPointerAlign();
351 DirectSize = ValueInfo.first;
352 DirectAlign = ValueInfo.second;
355 // Cast the address we've calculated to the right type.
356 llvm::Type *DirectTy = CGF.ConvertTypeForMem(ValueTy);
358 DirectTy = DirectTy->getPointerTo(0);
360 Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, DirectTy,
361 DirectSize, DirectAlign,
366 Addr = Address(CGF.Builder.CreateLoad(Addr), ValueInfo.second);
373 static Address emitMergePHI(CodeGenFunction &CGF,
374 Address Addr1, llvm::BasicBlock *Block1,
375 Address Addr2, llvm::BasicBlock *Block2,
376 const llvm::Twine &Name = "") {
377 assert(Addr1.getType() == Addr2.getType());
378 llvm::PHINode *PHI = CGF.Builder.CreatePHI(Addr1.getType(), 2, Name);
379 PHI->addIncoming(Addr1.getPointer(), Block1);
380 PHI->addIncoming(Addr2.getPointer(), Block2);
381 CharUnits Align = std::min(Addr1.getAlignment(), Addr2.getAlignment());
382 return Address(PHI, Align);
385 TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; }
387 // If someone can figure out a general rule for this, that would be great.
388 // It's probably just doomed to be platform-dependent, though.
389 unsigned TargetCodeGenInfo::getSizeOfUnwindException() const {
391 // x86-64 FreeBSD, Linux, Darwin
392 // x86-32 FreeBSD, Linux, Darwin
393 // PowerPC Linux, Darwin
394 // ARM Darwin (*not* EABI)
399 bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args,
400 const FunctionNoProtoType *fnType) const {
401 // The following conventions are known to require this to be false:
404 // For everything else, we just prefer false unless we opt out.
409 TargetCodeGenInfo::getDependentLibraryOption(llvm::StringRef Lib,
410 llvm::SmallString<24> &Opt) const {
411 // This assumes the user is passing a library name like "rt" instead of a
412 // filename like "librt.a/so", and that they don't care whether it's static or
418 unsigned TargetCodeGenInfo::getOpenCLKernelCallingConv() const {
419 // OpenCL kernels are called via an explicit runtime API with arguments
420 // set with clSetKernelArg(), not as normal sub-functions.
421 // Return SPIR_KERNEL by default as the kernel calling convention to
422 // ensure the fingerprint is fixed such way that each OpenCL argument
423 // gets one matching argument in the produced kernel function argument
424 // list to enable feasible implementation of clSetKernelArg() with
425 // aggregates etc. In case we would use the default C calling conv here,
426 // clSetKernelArg() might break depending on the target-specific
427 // conventions; different targets might split structs passed as values
428 // to multiple function arguments etc.
429 return llvm::CallingConv::SPIR_KERNEL;
432 llvm::Constant *TargetCodeGenInfo::getNullPointer(const CodeGen::CodeGenModule &CGM,
433 llvm::PointerType *T, QualType QT) const {
434 return llvm::ConstantPointerNull::get(T);
437 LangAS TargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM,
438 const VarDecl *D) const {
439 assert(!CGM.getLangOpts().OpenCL &&
440 !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) &&
441 "Address space agnostic languages only");
442 return D ? D->getType().getAddressSpace() : LangAS::Default;
445 llvm::Value *TargetCodeGenInfo::performAddrSpaceCast(
446 CodeGen::CodeGenFunction &CGF, llvm::Value *Src, LangAS SrcAddr,
447 LangAS DestAddr, llvm::Type *DestTy, bool isNonNull) const {
448 // Since target may map different address spaces in AST to the same address
449 // space, an address space conversion may end up as a bitcast.
450 if (auto *C = dyn_cast<llvm::Constant>(Src))
451 return performAddrSpaceCast(CGF.CGM, C, SrcAddr, DestAddr, DestTy);
452 // Try to preserve the source's name to make IR more readable.
453 return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
454 Src, DestTy, Src->hasName() ? Src->getName() + ".ascast" : "");
458 TargetCodeGenInfo::performAddrSpaceCast(CodeGenModule &CGM, llvm::Constant *Src,
459 LangAS SrcAddr, LangAS DestAddr,
460 llvm::Type *DestTy) const {
461 // Since target may map different address spaces in AST to the same address
462 // space, an address space conversion may end up as a bitcast.
463 return llvm::ConstantExpr::getPointerCast(Src, DestTy);
467 TargetCodeGenInfo::getLLVMSyncScopeID(const LangOptions &LangOpts,
469 llvm::AtomicOrdering Ordering,
470 llvm::LLVMContext &Ctx) const {
471 return Ctx.getOrInsertSyncScopeID(""); /* default sync scope */
474 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays);
476 /// isEmptyField - Return true iff a the field is "empty", that is it
477 /// is an unnamed bit-field or an (array of) empty record(s).
478 static bool isEmptyField(ASTContext &Context, const FieldDecl *FD,
480 if (FD->isUnnamedBitfield())
483 QualType FT = FD->getType();
485 // Constant arrays of empty records count as empty, strip them off.
486 // Constant arrays of zero length always count as empty.
488 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
489 if (AT->getSize() == 0)
491 FT = AT->getElementType();
494 const RecordType *RT = FT->getAs<RecordType>();
498 // C++ record fields are never empty, at least in the Itanium ABI.
500 // FIXME: We should use a predicate for whether this behavior is true in the
502 if (isa<CXXRecordDecl>(RT->getDecl()))
505 return isEmptyRecord(Context, FT, AllowArrays);
508 /// isEmptyRecord - Return true iff a structure contains only empty
509 /// fields. Note that a structure with a flexible array member is not
510 /// considered empty.
511 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) {
512 const RecordType *RT = T->getAs<RecordType>();
515 const RecordDecl *RD = RT->getDecl();
516 if (RD->hasFlexibleArrayMember())
519 // If this is a C++ record, check the bases first.
520 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
521 for (const auto &I : CXXRD->bases())
522 if (!isEmptyRecord(Context, I.getType(), true))
525 for (const auto *I : RD->fields())
526 if (!isEmptyField(Context, I, AllowArrays))
531 /// isSingleElementStruct - Determine if a structure is a "single
532 /// element struct", i.e. it has exactly one non-empty field or
533 /// exactly one field which is itself a single element
534 /// struct. Structures with flexible array members are never
535 /// considered single element structs.
537 /// \return The field declaration for the single non-empty field, if
539 static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
540 const RecordType *RT = T->getAs<RecordType>();
544 const RecordDecl *RD = RT->getDecl();
545 if (RD->hasFlexibleArrayMember())
548 const Type *Found = nullptr;
550 // If this is a C++ record, check the bases first.
551 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
552 for (const auto &I : CXXRD->bases()) {
553 // Ignore empty records.
554 if (isEmptyRecord(Context, I.getType(), true))
557 // If we already found an element then this isn't a single-element struct.
561 // If this is non-empty and not a single element struct, the composite
562 // cannot be a single element struct.
563 Found = isSingleElementStruct(I.getType(), Context);
569 // Check for single element.
570 for (const auto *FD : RD->fields()) {
571 QualType FT = FD->getType();
573 // Ignore empty fields.
574 if (isEmptyField(Context, FD, true))
577 // If we already found an element then this isn't a single-element
582 // Treat single element arrays as the element.
583 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
584 if (AT->getSize().getZExtValue() != 1)
586 FT = AT->getElementType();
589 if (!isAggregateTypeForABI(FT)) {
590 Found = FT.getTypePtr();
592 Found = isSingleElementStruct(FT, Context);
598 // We don't consider a struct a single-element struct if it has
599 // padding beyond the element type.
600 if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T))
607 Address EmitVAArgInstr(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
608 const ABIArgInfo &AI) {
609 // This default implementation defers to the llvm backend's va_arg
610 // instruction. It can handle only passing arguments directly
611 // (typically only handled in the backend for primitive types), or
612 // aggregates passed indirectly by pointer (NOTE: if the "byval"
613 // flag has ABI impact in the callee, this implementation cannot
616 // Only a few cases are covered here at the moment -- those needed
617 // by the default abi.
620 if (AI.isIndirect()) {
621 assert(!AI.getPaddingType() &&
622 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!");
624 !AI.getIndirectRealign() &&
625 "Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!");
627 auto TyInfo = CGF.getContext().getTypeInfoInChars(Ty);
628 CharUnits TyAlignForABI = TyInfo.second;
631 llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty));
633 CGF.Builder.CreateVAArg(VAListAddr.getPointer(), BaseTy);
634 return Address(Addr, TyAlignForABI);
636 assert((AI.isDirect() || AI.isExtend()) &&
637 "Unexpected ArgInfo Kind in generic VAArg emitter!");
639 assert(!AI.getInReg() &&
640 "Unexpected InReg seen in arginfo in generic VAArg emitter!");
641 assert(!AI.getPaddingType() &&
642 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!");
643 assert(!AI.getDirectOffset() &&
644 "Unexpected DirectOffset seen in arginfo in generic VAArg emitter!");
645 assert(!AI.getCoerceToType() &&
646 "Unexpected CoerceToType seen in arginfo in generic VAArg emitter!");
648 Address Temp = CGF.CreateMemTemp(Ty, "varet");
649 Val = CGF.Builder.CreateVAArg(VAListAddr.getPointer(), CGF.ConvertType(Ty));
650 CGF.Builder.CreateStore(Val, Temp);
655 /// DefaultABIInfo - The default implementation for ABI specific
656 /// details. This implementation provides information which results in
657 /// self-consistent and sensible LLVM IR generation, but does not
658 /// conform to any particular ABI.
659 class DefaultABIInfo : public ABIInfo {
661 DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
663 ABIArgInfo classifyReturnType(QualType RetTy) const;
664 ABIArgInfo classifyArgumentType(QualType RetTy) const;
666 void computeInfo(CGFunctionInfo &FI) const override {
667 if (!getCXXABI().classifyReturnType(FI))
668 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
669 for (auto &I : FI.arguments())
670 I.info = classifyArgumentType(I.type);
673 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
674 QualType Ty) const override {
675 return EmitVAArgInstr(CGF, VAListAddr, Ty, classifyArgumentType(Ty));
679 class DefaultTargetCodeGenInfo : public TargetCodeGenInfo {
681 DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
682 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
685 ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
686 Ty = useFirstFieldIfTransparentUnion(Ty);
688 if (isAggregateTypeForABI(Ty)) {
689 // Records with non-trivial destructors/copy-constructors should not be
691 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
692 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
694 return getNaturalAlignIndirect(Ty);
697 // Treat an enum type as its underlying type.
698 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
699 Ty = EnumTy->getDecl()->getIntegerType();
701 return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty)
702 : ABIArgInfo::getDirect());
705 ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
706 if (RetTy->isVoidType())
707 return ABIArgInfo::getIgnore();
709 if (isAggregateTypeForABI(RetTy))
710 return getNaturalAlignIndirect(RetTy);
712 // Treat an enum type as its underlying type.
713 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
714 RetTy = EnumTy->getDecl()->getIntegerType();
716 return (RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend(RetTy)
717 : ABIArgInfo::getDirect());
720 //===----------------------------------------------------------------------===//
721 // WebAssembly ABI Implementation
723 // This is a very simple ABI that relies a lot on DefaultABIInfo.
724 //===----------------------------------------------------------------------===//
726 class WebAssemblyABIInfo final : public SwiftABIInfo {
727 DefaultABIInfo defaultInfo;
730 explicit WebAssemblyABIInfo(CodeGen::CodeGenTypes &CGT)
731 : SwiftABIInfo(CGT), defaultInfo(CGT) {}
734 ABIArgInfo classifyReturnType(QualType RetTy) const;
735 ABIArgInfo classifyArgumentType(QualType Ty) const;
737 // DefaultABIInfo's classifyReturnType and classifyArgumentType are
738 // non-virtual, but computeInfo and EmitVAArg are virtual, so we
740 void computeInfo(CGFunctionInfo &FI) const override {
741 if (!getCXXABI().classifyReturnType(FI))
742 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
743 for (auto &Arg : FI.arguments())
744 Arg.info = classifyArgumentType(Arg.type);
747 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
748 QualType Ty) const override;
750 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
751 bool asReturnValue) const override {
752 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
755 bool isSwiftErrorInRegister() const override {
760 class WebAssemblyTargetCodeGenInfo final : public TargetCodeGenInfo {
762 explicit WebAssemblyTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
763 : TargetCodeGenInfo(new WebAssemblyABIInfo(CGT)) {}
765 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
766 CodeGen::CodeGenModule &CGM) const override {
767 TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
768 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D)) {
769 if (const auto *Attr = FD->getAttr<WebAssemblyImportModuleAttr>()) {
770 llvm::Function *Fn = cast<llvm::Function>(GV);
772 B.addAttribute("wasm-import-module", Attr->getImportModule());
773 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
775 if (const auto *Attr = FD->getAttr<WebAssemblyImportNameAttr>()) {
776 llvm::Function *Fn = cast<llvm::Function>(GV);
778 B.addAttribute("wasm-import-name", Attr->getImportName());
779 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
783 if (auto *FD = dyn_cast_or_null<FunctionDecl>(D)) {
784 llvm::Function *Fn = cast<llvm::Function>(GV);
785 if (!FD->doesThisDeclarationHaveABody() && !FD->hasPrototype())
786 Fn->addFnAttr("no-prototype");
791 /// Classify argument of given type \p Ty.
792 ABIArgInfo WebAssemblyABIInfo::classifyArgumentType(QualType Ty) const {
793 Ty = useFirstFieldIfTransparentUnion(Ty);
795 if (isAggregateTypeForABI(Ty)) {
796 // Records with non-trivial destructors/copy-constructors should not be
798 if (auto RAA = getRecordArgABI(Ty, getCXXABI()))
799 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
800 // Ignore empty structs/unions.
801 if (isEmptyRecord(getContext(), Ty, true))
802 return ABIArgInfo::getIgnore();
803 // Lower single-element structs to just pass a regular value. TODO: We
804 // could do reasonable-size multiple-element structs too, using getExpand(),
805 // though watch out for things like bitfields.
806 if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
807 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
810 // Otherwise just do the default thing.
811 return defaultInfo.classifyArgumentType(Ty);
814 ABIArgInfo WebAssemblyABIInfo::classifyReturnType(QualType RetTy) const {
815 if (isAggregateTypeForABI(RetTy)) {
816 // Records with non-trivial destructors/copy-constructors should not be
817 // returned by value.
818 if (!getRecordArgABI(RetTy, getCXXABI())) {
819 // Ignore empty structs/unions.
820 if (isEmptyRecord(getContext(), RetTy, true))
821 return ABIArgInfo::getIgnore();
822 // Lower single-element structs to just return a regular value. TODO: We
823 // could do reasonable-size multiple-element structs too, using
824 // ABIArgInfo::getDirect().
825 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
826 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
830 // Otherwise just do the default thing.
831 return defaultInfo.classifyReturnType(RetTy);
834 Address WebAssemblyABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
836 bool IsIndirect = isAggregateTypeForABI(Ty) &&
837 !isEmptyRecord(getContext(), Ty, true) &&
838 !isSingleElementStruct(Ty, getContext());
839 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
840 getContext().getTypeInfoInChars(Ty),
841 CharUnits::fromQuantity(4),
842 /*AllowHigherAlign=*/true);
845 //===----------------------------------------------------------------------===//
846 // le32/PNaCl bitcode ABI Implementation
848 // This is a simplified version of the x86_32 ABI. Arguments and return values
849 // are always passed on the stack.
850 //===----------------------------------------------------------------------===//
852 class PNaClABIInfo : public ABIInfo {
854 PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
856 ABIArgInfo classifyReturnType(QualType RetTy) const;
857 ABIArgInfo classifyArgumentType(QualType RetTy) const;
859 void computeInfo(CGFunctionInfo &FI) const override;
860 Address EmitVAArg(CodeGenFunction &CGF,
861 Address VAListAddr, QualType Ty) const override;
864 class PNaClTargetCodeGenInfo : public TargetCodeGenInfo {
866 PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
867 : TargetCodeGenInfo(new PNaClABIInfo(CGT)) {}
870 void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const {
871 if (!getCXXABI().classifyReturnType(FI))
872 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
874 for (auto &I : FI.arguments())
875 I.info = classifyArgumentType(I.type);
878 Address PNaClABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
880 // The PNaCL ABI is a bit odd, in that varargs don't use normal
881 // function classification. Structs get passed directly for varargs
882 // functions, through a rewriting transform in
883 // pnacl-llvm/lib/Transforms/NaCl/ExpandVarArgs.cpp, which allows
884 // this target to actually support a va_arg instructions with an
885 // aggregate type, unlike other targets.
886 return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect());
889 /// Classify argument of given type \p Ty.
890 ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty) const {
891 if (isAggregateTypeForABI(Ty)) {
892 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
893 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
894 return getNaturalAlignIndirect(Ty);
895 } else if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
896 // Treat an enum type as its underlying type.
897 Ty = EnumTy->getDecl()->getIntegerType();
898 } else if (Ty->isFloatingType()) {
899 // Floating-point types don't go inreg.
900 return ABIArgInfo::getDirect();
903 return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty)
904 : ABIArgInfo::getDirect());
907 ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const {
908 if (RetTy->isVoidType())
909 return ABIArgInfo::getIgnore();
911 // In the PNaCl ABI we always return records/structures on the stack.
912 if (isAggregateTypeForABI(RetTy))
913 return getNaturalAlignIndirect(RetTy);
915 // Treat an enum type as its underlying type.
916 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
917 RetTy = EnumTy->getDecl()->getIntegerType();
919 return (RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend(RetTy)
920 : ABIArgInfo::getDirect());
923 /// IsX86_MMXType - Return true if this is an MMX type.
924 bool IsX86_MMXType(llvm::Type *IRType) {
925 // Return true if the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>.
926 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
927 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
928 IRType->getScalarSizeInBits() != 64;
931 static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
932 StringRef Constraint,
934 bool IsMMXCons = llvm::StringSwitch<bool>(Constraint)
935 .Cases("y", "&y", "^Ym", true)
937 if (IsMMXCons && Ty->isVectorTy()) {
938 if (cast<llvm::VectorType>(Ty)->getBitWidth() != 64) {
939 // Invalid MMX constraint
943 return llvm::Type::getX86_MMXTy(CGF.getLLVMContext());
946 // No operation needed
950 /// Returns true if this type can be passed in SSE registers with the
951 /// X86_VectorCall calling convention. Shared between x86_32 and x86_64.
952 static bool isX86VectorTypeForVectorCall(ASTContext &Context, QualType Ty) {
953 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
954 if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half) {
955 if (BT->getKind() == BuiltinType::LongDouble) {
956 if (&Context.getTargetInfo().getLongDoubleFormat() ==
957 &llvm::APFloat::x87DoubleExtended())
962 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
963 // vectorcall can pass XMM, YMM, and ZMM vectors. We don't pass SSE1 MMX
964 // registers specially.
965 unsigned VecSize = Context.getTypeSize(VT);
966 if (VecSize == 128 || VecSize == 256 || VecSize == 512)
972 /// Returns true if this aggregate is small enough to be passed in SSE registers
973 /// in the X86_VectorCall calling convention. Shared between x86_32 and x86_64.
974 static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) {
975 return NumMembers <= 4;
978 /// Returns a Homogeneous Vector Aggregate ABIArgInfo, used in X86.
979 static ABIArgInfo getDirectX86Hva(llvm::Type* T = nullptr) {
980 auto AI = ABIArgInfo::getDirect(T);
982 AI.setCanBeFlattened(false);
986 //===----------------------------------------------------------------------===//
987 // X86-32 ABI Implementation
988 //===----------------------------------------------------------------------===//
990 /// Similar to llvm::CCState, but for Clang.
992 CCState(unsigned CC) : CC(CC), FreeRegs(0), FreeSSERegs(0) {}
996 unsigned FreeSSERegs;
1000 // Vectorcall only allows the first 6 parameters to be passed in registers.
1001 VectorcallMaxParamNumAsReg = 6
1004 /// X86_32ABIInfo - The X86-32 ABI information.
1005 class X86_32ABIInfo : public SwiftABIInfo {
1011 static const unsigned MinABIStackAlignInBytes = 4;
1013 bool IsDarwinVectorABI;
1014 bool IsRetSmallStructInRegABI;
1015 bool IsWin32StructABI;
1016 bool IsSoftFloatABI;
1018 unsigned DefaultNumRegisterParameters;
1020 static bool isRegisterSize(unsigned Size) {
1021 return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
1024 bool isHomogeneousAggregateBaseType(QualType Ty) const override {
1025 // FIXME: Assumes vectorcall is in use.
1026 return isX86VectorTypeForVectorCall(getContext(), Ty);
1029 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
1030 uint64_t NumMembers) const override {
1031 // FIXME: Assumes vectorcall is in use.
1032 return isX86VectorCallAggregateSmallEnough(NumMembers);
1035 bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context) const;
1037 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
1038 /// such that the argument will be passed in memory.
1039 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const;
1041 ABIArgInfo getIndirectReturnResult(QualType Ty, CCState &State) const;
1043 /// Return the alignment to use for the given type on the stack.
1044 unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const;
1046 Class classify(QualType Ty) const;
1047 ABIArgInfo classifyReturnType(QualType RetTy, CCState &State) const;
1048 ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const;
1050 /// Updates the number of available free registers, returns
1051 /// true if any registers were allocated.
1052 bool updateFreeRegs(QualType Ty, CCState &State) const;
1054 bool shouldAggregateUseDirect(QualType Ty, CCState &State, bool &InReg,
1055 bool &NeedsPadding) const;
1056 bool shouldPrimitiveUseInReg(QualType Ty, CCState &State) const;
1058 bool canExpandIndirectArgument(QualType Ty) const;
1060 /// Rewrite the function info so that all memory arguments use
1062 void rewriteWithInAlloca(CGFunctionInfo &FI) const;
1064 void addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
1065 CharUnits &StackOffset, ABIArgInfo &Info,
1066 QualType Type) const;
1067 void computeVectorCallArgs(CGFunctionInfo &FI, CCState &State,
1068 bool &UsedInAlloca) const;
1072 void computeInfo(CGFunctionInfo &FI) const override;
1073 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
1074 QualType Ty) const override;
1076 X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI,
1077 bool RetSmallStructInRegABI, bool Win32StructABI,
1078 unsigned NumRegisterParameters, bool SoftFloatABI)
1079 : SwiftABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI),
1080 IsRetSmallStructInRegABI(RetSmallStructInRegABI),
1081 IsWin32StructABI(Win32StructABI),
1082 IsSoftFloatABI(SoftFloatABI),
1083 IsMCUABI(CGT.getTarget().getTriple().isOSIAMCU()),
1084 DefaultNumRegisterParameters(NumRegisterParameters) {}
1086 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
1087 bool asReturnValue) const override {
1088 // LLVM's x86-32 lowering currently only assigns up to three
1089 // integer registers and three fp registers. Oddly, it'll use up to
1090 // four vector registers for vectors, but those can overlap with the
1091 // scalar registers.
1092 return occupiesMoreThan(CGT, scalars, /*total*/ 3);
1095 bool isSwiftErrorInRegister() const override {
1096 // x86-32 lowering does not support passing swifterror in a register.
1101 class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
1103 X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI,
1104 bool RetSmallStructInRegABI, bool Win32StructABI,
1105 unsigned NumRegisterParameters, bool SoftFloatABI)
1106 : TargetCodeGenInfo(new X86_32ABIInfo(
1107 CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
1108 NumRegisterParameters, SoftFloatABI)) {}
1110 static bool isStructReturnInRegABI(
1111 const llvm::Triple &Triple, const CodeGenOptions &Opts);
1113 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
1114 CodeGen::CodeGenModule &CGM) const override;
1116 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
1117 // Darwin uses different dwarf register numbers for EH.
1118 if (CGM.getTarget().getTriple().isOSDarwin()) return 5;
1122 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
1123 llvm::Value *Address) const override;
1125 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
1126 StringRef Constraint,
1127 llvm::Type* Ty) const override {
1128 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
1131 void addReturnRegisterOutputs(CodeGenFunction &CGF, LValue ReturnValue,
1132 std::string &Constraints,
1133 std::vector<llvm::Type *> &ResultRegTypes,
1134 std::vector<llvm::Type *> &ResultTruncRegTypes,
1135 std::vector<LValue> &ResultRegDests,
1136 std::string &AsmString,
1137 unsigned NumOutputs) const override;
1140 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
1141 unsigned Sig = (0xeb << 0) | // jmp rel8
1142 (0x06 << 8) | // .+0x08
1145 return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
1148 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
1149 return "movl\t%ebp, %ebp"
1150 "\t\t// marker for objc_retainAutoreleaseReturnValue";
1156 /// Rewrite input constraint references after adding some output constraints.
1157 /// In the case where there is one output and one input and we add one output,
1158 /// we need to replace all operand references greater than or equal to 1:
1161 /// The result will be:
1164 static void rewriteInputConstraintReferences(unsigned FirstIn,
1165 unsigned NumNewOuts,
1166 std::string &AsmString) {
1168 llvm::raw_string_ostream OS(Buf);
1170 while (Pos < AsmString.size()) {
1171 size_t DollarStart = AsmString.find('$', Pos);
1172 if (DollarStart == std::string::npos)
1173 DollarStart = AsmString.size();
1174 size_t DollarEnd = AsmString.find_first_not_of('$', DollarStart);
1175 if (DollarEnd == std::string::npos)
1176 DollarEnd = AsmString.size();
1177 OS << StringRef(&AsmString[Pos], DollarEnd - Pos);
1179 size_t NumDollars = DollarEnd - DollarStart;
1180 if (NumDollars % 2 != 0 && Pos < AsmString.size()) {
1181 // We have an operand reference.
1182 size_t DigitStart = Pos;
1183 size_t DigitEnd = AsmString.find_first_not_of("0123456789", DigitStart);
1184 if (DigitEnd == std::string::npos)
1185 DigitEnd = AsmString.size();
1186 StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart);
1187 unsigned OperandIndex;
1188 if (!OperandStr.getAsInteger(10, OperandIndex)) {
1189 if (OperandIndex >= FirstIn)
1190 OperandIndex += NumNewOuts;
1198 AsmString = std::move(OS.str());
1201 /// Add output constraints for EAX:EDX because they are return registers.
1202 void X86_32TargetCodeGenInfo::addReturnRegisterOutputs(
1203 CodeGenFunction &CGF, LValue ReturnSlot, std::string &Constraints,
1204 std::vector<llvm::Type *> &ResultRegTypes,
1205 std::vector<llvm::Type *> &ResultTruncRegTypes,
1206 std::vector<LValue> &ResultRegDests, std::string &AsmString,
1207 unsigned NumOutputs) const {
1208 uint64_t RetWidth = CGF.getContext().getTypeSize(ReturnSlot.getType());
1210 // Use the EAX constraint if the width is 32 or smaller and EAX:EDX if it is
1212 if (!Constraints.empty())
1214 if (RetWidth <= 32) {
1215 Constraints += "={eax}";
1216 ResultRegTypes.push_back(CGF.Int32Ty);
1218 // Use the 'A' constraint for EAX:EDX.
1219 Constraints += "=A";
1220 ResultRegTypes.push_back(CGF.Int64Ty);
1223 // Truncate EAX or EAX:EDX to an integer of the appropriate size.
1224 llvm::Type *CoerceTy = llvm::IntegerType::get(CGF.getLLVMContext(), RetWidth);
1225 ResultTruncRegTypes.push_back(CoerceTy);
1227 // Coerce the integer by bitcasting the return slot pointer.
1228 ReturnSlot.setAddress(CGF.Builder.CreateBitCast(ReturnSlot.getAddress(),
1229 CoerceTy->getPointerTo()));
1230 ResultRegDests.push_back(ReturnSlot);
1232 rewriteInputConstraintReferences(NumOutputs, 1, AsmString);
1235 /// shouldReturnTypeInRegister - Determine if the given type should be
1236 /// returned in a register (for the Darwin and MCU ABI).
1237 bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
1238 ASTContext &Context) const {
1239 uint64_t Size = Context.getTypeSize(Ty);
1241 // For i386, type must be register sized.
1242 // For the MCU ABI, it only needs to be <= 8-byte
1243 if ((IsMCUABI && Size > 64) || (!IsMCUABI && !isRegisterSize(Size)))
1246 if (Ty->isVectorType()) {
1247 // 64- and 128- bit vectors inside structures are not returned in
1249 if (Size == 64 || Size == 128)
1255 // If this is a builtin, pointer, enum, complex type, member pointer, or
1256 // member function pointer it is ok.
1257 if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() ||
1258 Ty->isAnyComplexType() || Ty->isEnumeralType() ||
1259 Ty->isBlockPointerType() || Ty->isMemberPointerType())
1262 // Arrays are treated like records.
1263 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
1264 return shouldReturnTypeInRegister(AT->getElementType(), Context);
1266 // Otherwise, it must be a record type.
1267 const RecordType *RT = Ty->getAs<RecordType>();
1268 if (!RT) return false;
1270 // FIXME: Traverse bases here too.
1272 // Structure types are passed in register if all fields would be
1273 // passed in a register.
1274 for (const auto *FD : RT->getDecl()->fields()) {
1275 // Empty fields are ignored.
1276 if (isEmptyField(Context, FD, true))
1279 // Check fields recursively.
1280 if (!shouldReturnTypeInRegister(FD->getType(), Context))
1286 static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
1287 // Treat complex types as the element type.
1288 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
1289 Ty = CTy->getElementType();
1291 // Check for a type which we know has a simple scalar argument-passing
1292 // convention without any padding. (We're specifically looking for 32
1293 // and 64-bit integer and integer-equivalents, float, and double.)
1294 if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() &&
1295 !Ty->isEnumeralType() && !Ty->isBlockPointerType())
1298 uint64_t Size = Context.getTypeSize(Ty);
1299 return Size == 32 || Size == 64;
1302 static bool addFieldSizes(ASTContext &Context, const RecordDecl *RD,
1304 for (const auto *FD : RD->fields()) {
1305 // Scalar arguments on the stack get 4 byte alignment on x86. If the
1306 // argument is smaller than 32-bits, expanding the struct will create
1307 // alignment padding.
1308 if (!is32Or64BitBasicType(FD->getType(), Context))
1311 // FIXME: Reject bit-fields wholesale; there are two problems, we don't know
1312 // how to expand them yet, and the predicate for telling if a bitfield still
1313 // counts as "basic" is more complicated than what we were doing previously.
1314 if (FD->isBitField())
1317 Size += Context.getTypeSize(FD->getType());
1322 static bool addBaseAndFieldSizes(ASTContext &Context, const CXXRecordDecl *RD,
1324 // Don't do this if there are any non-empty bases.
1325 for (const CXXBaseSpecifier &Base : RD->bases()) {
1326 if (!addBaseAndFieldSizes(Context, Base.getType()->getAsCXXRecordDecl(),
1330 if (!addFieldSizes(Context, RD, Size))
1335 /// Test whether an argument type which is to be passed indirectly (on the
1336 /// stack) would have the equivalent layout if it was expanded into separate
1337 /// arguments. If so, we prefer to do the latter to avoid inhibiting
1339 bool X86_32ABIInfo::canExpandIndirectArgument(QualType Ty) const {
1340 // We can only expand structure types.
1341 const RecordType *RT = Ty->getAs<RecordType>();
1344 const RecordDecl *RD = RT->getDecl();
1346 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
1347 if (!IsWin32StructABI) {
1348 // On non-Windows, we have to conservatively match our old bitcode
1349 // prototypes in order to be ABI-compatible at the bitcode level.
1350 if (!CXXRD->isCLike())
1353 // Don't do this for dynamic classes.
1354 if (CXXRD->isDynamicClass())
1357 if (!addBaseAndFieldSizes(getContext(), CXXRD, Size))
1360 if (!addFieldSizes(getContext(), RD, Size))
1364 // We can do this if there was no alignment padding.
1365 return Size == getContext().getTypeSize(Ty);
1368 ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(QualType RetTy, CCState &State) const {
1369 // If the return value is indirect, then the hidden argument is consuming one
1370 // integer register.
1371 if (State.FreeRegs) {
1374 return getNaturalAlignIndirectInReg(RetTy);
1376 return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
1379 ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
1380 CCState &State) const {
1381 if (RetTy->isVoidType())
1382 return ABIArgInfo::getIgnore();
1384 const Type *Base = nullptr;
1385 uint64_t NumElts = 0;
1386 if ((State.CC == llvm::CallingConv::X86_VectorCall ||
1387 State.CC == llvm::CallingConv::X86_RegCall) &&
1388 isHomogeneousAggregate(RetTy, Base, NumElts)) {
1389 // The LLVM struct type for such an aggregate should lower properly.
1390 return ABIArgInfo::getDirect();
1393 if (const VectorType *VT = RetTy->getAs<VectorType>()) {
1394 // On Darwin, some vectors are returned in registers.
1395 if (IsDarwinVectorABI) {
1396 uint64_t Size = getContext().getTypeSize(RetTy);
1398 // 128-bit vectors are a special case; they are returned in
1399 // registers and we need to make sure to pick a type the LLVM
1400 // backend will like.
1402 return ABIArgInfo::getDirect(llvm::VectorType::get(
1403 llvm::Type::getInt64Ty(getVMContext()), 2));
1405 // Always return in register if it fits in a general purpose
1406 // register, or if it is 64 bits and has a single element.
1407 if ((Size == 8 || Size == 16 || Size == 32) ||
1408 (Size == 64 && VT->getNumElements() == 1))
1409 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
1412 return getIndirectReturnResult(RetTy, State);
1415 return ABIArgInfo::getDirect();
1418 if (isAggregateTypeForABI(RetTy)) {
1419 if (const RecordType *RT = RetTy->getAs<RecordType>()) {
1420 // Structures with flexible arrays are always indirect.
1421 if (RT->getDecl()->hasFlexibleArrayMember())
1422 return getIndirectReturnResult(RetTy, State);
1425 // If specified, structs and unions are always indirect.
1426 if (!IsRetSmallStructInRegABI && !RetTy->isAnyComplexType())
1427 return getIndirectReturnResult(RetTy, State);
1429 // Ignore empty structs/unions.
1430 if (isEmptyRecord(getContext(), RetTy, true))
1431 return ABIArgInfo::getIgnore();
1433 // Small structures which are register sized are generally returned
1435 if (shouldReturnTypeInRegister(RetTy, getContext())) {
1436 uint64_t Size = getContext().getTypeSize(RetTy);
1438 // As a special-case, if the struct is a "single-element" struct, and
1439 // the field is of type "float" or "double", return it in a
1440 // floating-point register. (MSVC does not apply this special case.)
1441 // We apply a similar transformation for pointer types to improve the
1442 // quality of the generated IR.
1443 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
1444 if ((!IsWin32StructABI && SeltTy->isRealFloatingType())
1445 || SeltTy->hasPointerRepresentation())
1446 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
1448 // FIXME: We should be able to narrow this integer in cases with dead
1450 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size));
1453 return getIndirectReturnResult(RetTy, State);
1456 // Treat an enum type as its underlying type.
1457 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
1458 RetTy = EnumTy->getDecl()->getIntegerType();
1460 return (RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend(RetTy)
1461 : ABIArgInfo::getDirect());
1464 static bool isSSEVectorType(ASTContext &Context, QualType Ty) {
1465 return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128;
1468 static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) {
1469 const RecordType *RT = Ty->getAs<RecordType>();
1472 const RecordDecl *RD = RT->getDecl();
1474 // If this is a C++ record, check the bases first.
1475 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
1476 for (const auto &I : CXXRD->bases())
1477 if (!isRecordWithSSEVectorType(Context, I.getType()))
1480 for (const auto *i : RD->fields()) {
1481 QualType FT = i->getType();
1483 if (isSSEVectorType(Context, FT))
1486 if (isRecordWithSSEVectorType(Context, FT))
1493 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty,
1494 unsigned Align) const {
1495 // Otherwise, if the alignment is less than or equal to the minimum ABI
1496 // alignment, just use the default; the backend will handle this.
1497 if (Align <= MinABIStackAlignInBytes)
1498 return 0; // Use default alignment.
1500 // On non-Darwin, the stack type alignment is always 4.
1501 if (!IsDarwinVectorABI) {
1502 // Set explicit alignment, since we may need to realign the top.
1503 return MinABIStackAlignInBytes;
1506 // Otherwise, if the type contains an SSE vector type, the alignment is 16.
1507 if (Align >= 16 && (isSSEVectorType(getContext(), Ty) ||
1508 isRecordWithSSEVectorType(getContext(), Ty)))
1511 return MinABIStackAlignInBytes;
1514 ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal,
1515 CCState &State) const {
1517 if (State.FreeRegs) {
1518 --State.FreeRegs; // Non-byval indirects just use one pointer.
1520 return getNaturalAlignIndirectInReg(Ty);
1522 return getNaturalAlignIndirect(Ty, false);
1525 // Compute the byval alignment.
1526 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
1527 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
1528 if (StackAlign == 0)
1529 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true);
1531 // If the stack alignment is less than the type alignment, realign the
1533 bool Realign = TypeAlign > StackAlign;
1534 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(StackAlign),
1535 /*ByVal=*/true, Realign);
1538 X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const {
1539 const Type *T = isSingleElementStruct(Ty, getContext());
1541 T = Ty.getTypePtr();
1543 if (const BuiltinType *BT = T->getAs<BuiltinType>()) {
1544 BuiltinType::Kind K = BT->getKind();
1545 if (K == BuiltinType::Float || K == BuiltinType::Double)
1551 bool X86_32ABIInfo::updateFreeRegs(QualType Ty, CCState &State) const {
1552 if (!IsSoftFloatABI) {
1553 Class C = classify(Ty);
1558 unsigned Size = getContext().getTypeSize(Ty);
1559 unsigned SizeInRegs = (Size + 31) / 32;
1561 if (SizeInRegs == 0)
1565 if (SizeInRegs > State.FreeRegs) {
1570 // The MCU psABI allows passing parameters in-reg even if there are
1571 // earlier parameters that are passed on the stack. Also,
1572 // it does not allow passing >8-byte structs in-register,
1573 // even if there are 3 free registers available.
1574 if (SizeInRegs > State.FreeRegs || SizeInRegs > 2)
1578 State.FreeRegs -= SizeInRegs;
1582 bool X86_32ABIInfo::shouldAggregateUseDirect(QualType Ty, CCState &State,
1584 bool &NeedsPadding) const {
1585 // On Windows, aggregates other than HFAs are never passed in registers, and
1586 // they do not consume register slots. Homogenous floating-point aggregates
1587 // (HFAs) have already been dealt with at this point.
1588 if (IsWin32StructABI && isAggregateTypeForABI(Ty))
1591 NeedsPadding = false;
1594 if (!updateFreeRegs(Ty, State))
1600 if (State.CC == llvm::CallingConv::X86_FastCall ||
1601 State.CC == llvm::CallingConv::X86_VectorCall ||
1602 State.CC == llvm::CallingConv::X86_RegCall) {
1603 if (getContext().getTypeSize(Ty) <= 32 && State.FreeRegs)
1604 NeedsPadding = true;
1612 bool X86_32ABIInfo::shouldPrimitiveUseInReg(QualType Ty, CCState &State) const {
1613 if (!updateFreeRegs(Ty, State))
1619 if (State.CC == llvm::CallingConv::X86_FastCall ||
1620 State.CC == llvm::CallingConv::X86_VectorCall ||
1621 State.CC == llvm::CallingConv::X86_RegCall) {
1622 if (getContext().getTypeSize(Ty) > 32)
1625 return (Ty->isIntegralOrEnumerationType() || Ty->isPointerType() ||
1626 Ty->isReferenceType());
1632 ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
1633 CCState &State) const {
1634 // FIXME: Set alignment on indirect arguments.
1636 Ty = useFirstFieldIfTransparentUnion(Ty);
1638 // Check with the C++ ABI first.
1639 const RecordType *RT = Ty->getAs<RecordType>();
1641 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
1642 if (RAA == CGCXXABI::RAA_Indirect) {
1643 return getIndirectResult(Ty, false, State);
1644 } else if (RAA == CGCXXABI::RAA_DirectInMemory) {
1645 // The field index doesn't matter, we'll fix it up later.
1646 return ABIArgInfo::getInAlloca(/*FieldIndex=*/0);
1650 // Regcall uses the concept of a homogenous vector aggregate, similar
1651 // to other targets.
1652 const Type *Base = nullptr;
1653 uint64_t NumElts = 0;
1654 if (State.CC == llvm::CallingConv::X86_RegCall &&
1655 isHomogeneousAggregate(Ty, Base, NumElts)) {
1657 if (State.FreeSSERegs >= NumElts) {
1658 State.FreeSSERegs -= NumElts;
1659 if (Ty->isBuiltinType() || Ty->isVectorType())
1660 return ABIArgInfo::getDirect();
1661 return ABIArgInfo::getExpand();
1663 return getIndirectResult(Ty, /*ByVal=*/false, State);
1666 if (isAggregateTypeForABI(Ty)) {
1667 // Structures with flexible arrays are always indirect.
1668 // FIXME: This should not be byval!
1669 if (RT && RT->getDecl()->hasFlexibleArrayMember())
1670 return getIndirectResult(Ty, true, State);
1672 // Ignore empty structs/unions on non-Windows.
1673 if (!IsWin32StructABI && isEmptyRecord(getContext(), Ty, true))
1674 return ABIArgInfo::getIgnore();
1676 llvm::LLVMContext &LLVMContext = getVMContext();
1677 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
1678 bool NeedsPadding = false;
1680 if (shouldAggregateUseDirect(Ty, State, InReg, NeedsPadding)) {
1681 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
1682 SmallVector<llvm::Type*, 3> Elements(SizeInRegs, Int32);
1683 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
1685 return ABIArgInfo::getDirectInReg(Result);
1687 return ABIArgInfo::getDirect(Result);
1689 llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : nullptr;
1691 // Expand small (<= 128-bit) record types when we know that the stack layout
1692 // of those arguments will match the struct. This is important because the
1693 // LLVM backend isn't smart enough to remove byval, which inhibits many
1695 // Don't do this for the MCU if there are still free integer registers
1696 // (see X86_64 ABI for full explanation).
1697 if (getContext().getTypeSize(Ty) <= 4 * 32 &&
1698 (!IsMCUABI || State.FreeRegs == 0) && canExpandIndirectArgument(Ty))
1699 return ABIArgInfo::getExpandWithPadding(
1700 State.CC == llvm::CallingConv::X86_FastCall ||
1701 State.CC == llvm::CallingConv::X86_VectorCall ||
1702 State.CC == llvm::CallingConv::X86_RegCall,
1705 return getIndirectResult(Ty, true, State);
1708 if (const VectorType *VT = Ty->getAs<VectorType>()) {
1709 // On Darwin, some vectors are passed in memory, we handle this by passing
1710 // it as an i8/i16/i32/i64.
1711 if (IsDarwinVectorABI) {
1712 uint64_t Size = getContext().getTypeSize(Ty);
1713 if ((Size == 8 || Size == 16 || Size == 32) ||
1714 (Size == 64 && VT->getNumElements() == 1))
1715 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
1719 if (IsX86_MMXType(CGT.ConvertType(Ty)))
1720 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 64));
1722 return ABIArgInfo::getDirect();
1726 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
1727 Ty = EnumTy->getDecl()->getIntegerType();
1729 bool InReg = shouldPrimitiveUseInReg(Ty, State);
1731 if (Ty->isPromotableIntegerType()) {
1733 return ABIArgInfo::getExtendInReg(Ty);
1734 return ABIArgInfo::getExtend(Ty);
1738 return ABIArgInfo::getDirectInReg();
1739 return ABIArgInfo::getDirect();
1742 void X86_32ABIInfo::computeVectorCallArgs(CGFunctionInfo &FI, CCState &State,
1743 bool &UsedInAlloca) const {
1744 // Vectorcall x86 works subtly different than in x64, so the format is
1745 // a bit different than the x64 version. First, all vector types (not HVAs)
1746 // are assigned, with the first 6 ending up in the YMM0-5 or XMM0-5 registers.
1747 // This differs from the x64 implementation, where the first 6 by INDEX get
1749 // After that, integers AND HVAs are assigned Left to Right in the same pass.
1750 // Integers are passed as ECX/EDX if one is available (in order). HVAs will
1751 // first take up the remaining YMM/XMM registers. If insufficient registers
1752 // remain but an integer register (ECX/EDX) is available, it will be passed
1753 // in that, else, on the stack.
1754 for (auto &I : FI.arguments()) {
1755 // First pass do all the vector types.
1756 const Type *Base = nullptr;
1757 uint64_t NumElts = 0;
1758 const QualType& Ty = I.type;
1759 if ((Ty->isVectorType() || Ty->isBuiltinType()) &&
1760 isHomogeneousAggregate(Ty, Base, NumElts)) {
1761 if (State.FreeSSERegs >= NumElts) {
1762 State.FreeSSERegs -= NumElts;
1763 I.info = ABIArgInfo::getDirect();
1765 I.info = classifyArgumentType(Ty, State);
1767 UsedInAlloca |= (I.info.getKind() == ABIArgInfo::InAlloca);
1771 for (auto &I : FI.arguments()) {
1772 // Second pass, do the rest!
1773 const Type *Base = nullptr;
1774 uint64_t NumElts = 0;
1775 const QualType& Ty = I.type;
1776 bool IsHva = isHomogeneousAggregate(Ty, Base, NumElts);
1778 if (IsHva && !Ty->isVectorType() && !Ty->isBuiltinType()) {
1779 // Assign true HVAs (non vector/native FP types).
1780 if (State.FreeSSERegs >= NumElts) {
1781 State.FreeSSERegs -= NumElts;
1782 I.info = getDirectX86Hva();
1784 I.info = getIndirectResult(Ty, /*ByVal=*/false, State);
1786 } else if (!IsHva) {
1787 // Assign all Non-HVAs, so this will exclude Vector/FP args.
1788 I.info = classifyArgumentType(Ty, State);
1789 UsedInAlloca |= (I.info.getKind() == ABIArgInfo::InAlloca);
1794 void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
1795 CCState State(FI.getCallingConvention());
1798 else if (State.CC == llvm::CallingConv::X86_FastCall)
1800 else if (State.CC == llvm::CallingConv::X86_VectorCall) {
1802 State.FreeSSERegs = 6;
1803 } else if (FI.getHasRegParm())
1804 State.FreeRegs = FI.getRegParm();
1805 else if (State.CC == llvm::CallingConv::X86_RegCall) {
1807 State.FreeSSERegs = 8;
1809 State.FreeRegs = DefaultNumRegisterParameters;
1811 if (!::classifyReturnType(getCXXABI(), FI, *this)) {
1812 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), State);
1813 } else if (FI.getReturnInfo().isIndirect()) {
1814 // The C++ ABI is not aware of register usage, so we have to check if the
1815 // return value was sret and put it in a register ourselves if appropriate.
1816 if (State.FreeRegs) {
1817 --State.FreeRegs; // The sret parameter consumes a register.
1819 FI.getReturnInfo().setInReg(true);
1823 // The chain argument effectively gives us another free register.
1824 if (FI.isChainCall())
1827 bool UsedInAlloca = false;
1828 if (State.CC == llvm::CallingConv::X86_VectorCall) {
1829 computeVectorCallArgs(FI, State, UsedInAlloca);
1831 // If not vectorcall, revert to normal behavior.
1832 for (auto &I : FI.arguments()) {
1833 I.info = classifyArgumentType(I.type, State);
1834 UsedInAlloca |= (I.info.getKind() == ABIArgInfo::InAlloca);
1838 // If we needed to use inalloca for any argument, do a second pass and rewrite
1839 // all the memory arguments to use inalloca.
1841 rewriteWithInAlloca(FI);
1845 X86_32ABIInfo::addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
1846 CharUnits &StackOffset, ABIArgInfo &Info,
1847 QualType Type) const {
1848 // Arguments are always 4-byte-aligned.
1849 CharUnits FieldAlign = CharUnits::fromQuantity(4);
1851 assert(StackOffset.isMultipleOf(FieldAlign) && "unaligned inalloca struct");
1852 Info = ABIArgInfo::getInAlloca(FrameFields.size());
1853 FrameFields.push_back(CGT.ConvertTypeForMem(Type));
1854 StackOffset += getContext().getTypeSizeInChars(Type);
1856 // Insert padding bytes to respect alignment.
1857 CharUnits FieldEnd = StackOffset;
1858 StackOffset = FieldEnd.alignTo(FieldAlign);
1859 if (StackOffset != FieldEnd) {
1860 CharUnits NumBytes = StackOffset - FieldEnd;
1861 llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext());
1862 Ty = llvm::ArrayType::get(Ty, NumBytes.getQuantity());
1863 FrameFields.push_back(Ty);
1867 static bool isArgInAlloca(const ABIArgInfo &Info) {
1868 // Leave ignored and inreg arguments alone.
1869 switch (Info.getKind()) {
1870 case ABIArgInfo::InAlloca:
1872 case ABIArgInfo::Indirect:
1873 assert(Info.getIndirectByVal());
1875 case ABIArgInfo::Ignore:
1877 case ABIArgInfo::Direct:
1878 case ABIArgInfo::Extend:
1879 if (Info.getInReg())
1882 case ABIArgInfo::Expand:
1883 case ABIArgInfo::CoerceAndExpand:
1884 // These are aggregate types which are never passed in registers when
1885 // inalloca is involved.
1888 llvm_unreachable("invalid enum");
1891 void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const {
1892 assert(IsWin32StructABI && "inalloca only supported on win32");
1894 // Build a packed struct type for all of the arguments in memory.
1895 SmallVector<llvm::Type *, 6> FrameFields;
1897 // The stack alignment is always 4.
1898 CharUnits StackAlign = CharUnits::fromQuantity(4);
1900 CharUnits StackOffset;
1901 CGFunctionInfo::arg_iterator I = FI.arg_begin(), E = FI.arg_end();
1903 // Put 'this' into the struct before 'sret', if necessary.
1905 FI.getCallingConvention() == llvm::CallingConv::X86_ThisCall;
1906 ABIArgInfo &Ret = FI.getReturnInfo();
1907 if (Ret.isIndirect() && Ret.isSRetAfterThis() && !IsThisCall &&
1908 isArgInAlloca(I->info)) {
1909 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
1913 // Put the sret parameter into the inalloca struct if it's in memory.
1914 if (Ret.isIndirect() && !Ret.getInReg()) {
1915 CanQualType PtrTy = getContext().getPointerType(FI.getReturnType());
1916 addFieldToArgStruct(FrameFields, StackOffset, Ret, PtrTy);
1917 // On Windows, the hidden sret parameter is always returned in eax.
1918 Ret.setInAllocaSRet(IsWin32StructABI);
1921 // Skip the 'this' parameter in ecx.
1925 // Put arguments passed in memory into the struct.
1926 for (; I != E; ++I) {
1927 if (isArgInAlloca(I->info))
1928 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
1931 FI.setArgStruct(llvm::StructType::get(getVMContext(), FrameFields,
1936 Address X86_32ABIInfo::EmitVAArg(CodeGenFunction &CGF,
1937 Address VAListAddr, QualType Ty) const {
1939 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
1941 // x86-32 changes the alignment of certain arguments on the stack.
1943 // Just messing with TypeInfo like this works because we never pass
1944 // anything indirectly.
1945 TypeInfo.second = CharUnits::fromQuantity(
1946 getTypeStackAlignInBytes(Ty, TypeInfo.second.getQuantity()));
1948 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false,
1949 TypeInfo, CharUnits::fromQuantity(4),
1950 /*AllowHigherAlign*/ true);
1953 bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
1954 const llvm::Triple &Triple, const CodeGenOptions &Opts) {
1955 assert(Triple.getArch() == llvm::Triple::x86);
1957 switch (Opts.getStructReturnConvention()) {
1958 case CodeGenOptions::SRCK_Default:
1960 case CodeGenOptions::SRCK_OnStack: // -fpcc-struct-return
1962 case CodeGenOptions::SRCK_InRegs: // -freg-struct-return
1966 if (Triple.isOSDarwin() || Triple.isOSIAMCU())
1969 switch (Triple.getOS()) {
1970 case llvm::Triple::DragonFly:
1971 case llvm::Triple::FreeBSD:
1972 case llvm::Triple::OpenBSD:
1973 case llvm::Triple::Win32:
1980 void X86_32TargetCodeGenInfo::setTargetAttributes(
1981 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
1982 if (GV->isDeclaration())
1984 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
1985 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1986 llvm::Function *Fn = cast<llvm::Function>(GV);
1987 Fn->addFnAttr("stackrealign");
1989 if (FD->hasAttr<AnyX86InterruptAttr>()) {
1990 llvm::Function *Fn = cast<llvm::Function>(GV);
1991 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
1996 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
1997 CodeGen::CodeGenFunction &CGF,
1998 llvm::Value *Address) const {
1999 CodeGen::CGBuilderTy &Builder = CGF.Builder;
2001 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
2003 // 0-7 are the eight integer registers; the order is different
2004 // on Darwin (for EH), but the range is the same.
2006 AssignToArrayRange(Builder, Address, Four8, 0, 8);
2008 if (CGF.CGM.getTarget().getTriple().isOSDarwin()) {
2009 // 12-16 are st(0..4). Not sure why we stop at 4.
2010 // These have size 16, which is sizeof(long double) on
2011 // platforms with 8-byte alignment for that type.
2012 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16);
2013 AssignToArrayRange(Builder, Address, Sixteen8, 12, 16);
2016 // 9 is %eflags, which doesn't get a size on Darwin for some
2018 Builder.CreateAlignedStore(
2019 Four8, Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, Address, 9),
2022 // 11-16 are st(0..5). Not sure why we stop at 5.
2023 // These have size 12, which is sizeof(long double) on
2024 // platforms with 4-byte alignment for that type.
2025 llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12);
2026 AssignToArrayRange(Builder, Address, Twelve8, 11, 16);
2032 //===----------------------------------------------------------------------===//
2033 // X86-64 ABI Implementation
2034 //===----------------------------------------------------------------------===//
2038 /// The AVX ABI level for X86 targets.
2039 enum class X86AVXABILevel {
2045 /// \p returns the size in bits of the largest (native) vector for \p AVXLevel.
2046 static unsigned getNativeVectorSizeForAVXABI(X86AVXABILevel AVXLevel) {
2048 case X86AVXABILevel::AVX512:
2050 case X86AVXABILevel::AVX:
2052 case X86AVXABILevel::None:
2055 llvm_unreachable("Unknown AVXLevel");
2058 /// X86_64ABIInfo - The X86_64 ABI information.
2059 class X86_64ABIInfo : public SwiftABIInfo {
2071 /// merge - Implement the X86_64 ABI merging algorithm.
2073 /// Merge an accumulating classification \arg Accum with a field
2074 /// classification \arg Field.
2076 /// \param Accum - The accumulating classification. This should
2077 /// always be either NoClass or the result of a previous merge
2078 /// call. In addition, this should never be Memory (the caller
2079 /// should just return Memory for the aggregate).
2080 static Class merge(Class Accum, Class Field);
2082 /// postMerge - Implement the X86_64 ABI post merging algorithm.
2084 /// Post merger cleanup, reduces a malformed Hi and Lo pair to
2085 /// final MEMORY or SSE classes when necessary.
2087 /// \param AggregateSize - The size of the current aggregate in
2088 /// the classification process.
2090 /// \param Lo - The classification for the parts of the type
2091 /// residing in the low word of the containing object.
2093 /// \param Hi - The classification for the parts of the type
2094 /// residing in the higher words of the containing object.
2096 void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const;
2098 /// classify - Determine the x86_64 register classes in which the
2099 /// given type T should be passed.
2101 /// \param Lo - The classification for the parts of the type
2102 /// residing in the low word of the containing object.
2104 /// \param Hi - The classification for the parts of the type
2105 /// residing in the high word of the containing object.
2107 /// \param OffsetBase - The bit offset of this type in the
2108 /// containing object. Some parameters are classified different
2109 /// depending on whether they straddle an eightbyte boundary.
2111 /// \param isNamedArg - Whether the argument in question is a "named"
2112 /// argument, as used in AMD64-ABI 3.5.7.
2114 /// If a word is unused its result will be NoClass; if a type should
2115 /// be passed in Memory then at least the classification of \arg Lo
2118 /// The \arg Lo class will be NoClass iff the argument is ignored.
2120 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
2121 /// also be ComplexX87.
2122 void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi,
2123 bool isNamedArg) const;
2125 llvm::Type *GetByteVectorType(QualType Ty) const;
2126 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType,
2127 unsigned IROffset, QualType SourceTy,
2128 unsigned SourceOffset) const;
2129 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType,
2130 unsigned IROffset, QualType SourceTy,
2131 unsigned SourceOffset) const;
2133 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
2134 /// such that the argument will be returned in memory.
2135 ABIArgInfo getIndirectReturnResult(QualType Ty) const;
2137 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
2138 /// such that the argument will be passed in memory.
2140 /// \param freeIntRegs - The number of free integer registers remaining
2142 ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const;
2144 ABIArgInfo classifyReturnType(QualType RetTy) const;
2146 ABIArgInfo classifyArgumentType(QualType Ty, unsigned freeIntRegs,
2147 unsigned &neededInt, unsigned &neededSSE,
2148 bool isNamedArg) const;
2150 ABIArgInfo classifyRegCallStructType(QualType Ty, unsigned &NeededInt,
2151 unsigned &NeededSSE) const;
2153 ABIArgInfo classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt,
2154 unsigned &NeededSSE) const;
2156 bool IsIllegalVectorType(QualType Ty) const;
2158 /// The 0.98 ABI revision clarified a lot of ambiguities,
2159 /// unfortunately in ways that were not always consistent with
2160 /// certain previous compilers. In particular, platforms which
2161 /// required strict binary compatibility with older versions of GCC
2162 /// may need to exempt themselves.
2163 bool honorsRevision0_98() const {
2164 return !getTarget().getTriple().isOSDarwin();
2167 /// GCC classifies <1 x long long> as SSE but some platform ABIs choose to
2168 /// classify it as INTEGER (for compatibility with older clang compilers).
2169 bool classifyIntegerMMXAsSSE() const {
2170 // Clang <= 3.8 did not do this.
2171 if (getContext().getLangOpts().getClangABICompat() <=
2172 LangOptions::ClangABI::Ver3_8)
2175 const llvm::Triple &Triple = getTarget().getTriple();
2176 if (Triple.isOSDarwin() || Triple.getOS() == llvm::Triple::PS4)
2178 if (Triple.isOSFreeBSD() && Triple.getOSMajorVersion() >= 10)
2183 // GCC classifies vectors of __int128 as memory.
2184 bool passInt128VectorsInMem() const {
2185 // Clang <= 9.0 did not do this.
2186 if (getContext().getLangOpts().getClangABICompat() <=
2187 LangOptions::ClangABI::Ver9)
2190 const llvm::Triple &T = getTarget().getTriple();
2191 return T.isOSLinux() || T.isOSNetBSD();
2194 X86AVXABILevel AVXLevel;
2195 // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on
2197 bool Has64BitPointers;
2200 X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel) :
2201 SwiftABIInfo(CGT), AVXLevel(AVXLevel),
2202 Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) {
2205 bool isPassedUsingAVXType(QualType type) const {
2206 unsigned neededInt, neededSSE;
2207 // The freeIntRegs argument doesn't matter here.
2208 ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE,
2209 /*isNamedArg*/true);
2210 if (info.isDirect()) {
2211 llvm::Type *ty = info.getCoerceToType();
2212 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
2213 return (vectorTy->getBitWidth() > 128);
2218 void computeInfo(CGFunctionInfo &FI) const override;
2220 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
2221 QualType Ty) const override;
2222 Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
2223 QualType Ty) const override;
2225 bool has64BitPointers() const {
2226 return Has64BitPointers;
2229 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
2230 bool asReturnValue) const override {
2231 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
2233 bool isSwiftErrorInRegister() const override {
2238 /// WinX86_64ABIInfo - The Windows X86_64 ABI information.
2239 class WinX86_64ABIInfo : public SwiftABIInfo {
2241 WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
2242 : SwiftABIInfo(CGT), AVXLevel(AVXLevel),
2243 IsMingw64(getTarget().getTriple().isWindowsGNUEnvironment()) {}
2245 void computeInfo(CGFunctionInfo &FI) const override;
2247 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
2248 QualType Ty) const override;
2250 bool isHomogeneousAggregateBaseType(QualType Ty) const override {
2251 // FIXME: Assumes vectorcall is in use.
2252 return isX86VectorTypeForVectorCall(getContext(), Ty);
2255 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
2256 uint64_t NumMembers) const override {
2257 // FIXME: Assumes vectorcall is in use.
2258 return isX86VectorCallAggregateSmallEnough(NumMembers);
2261 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type *> scalars,
2262 bool asReturnValue) const override {
2263 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
2266 bool isSwiftErrorInRegister() const override {
2271 ABIArgInfo classify(QualType Ty, unsigned &FreeSSERegs, bool IsReturnType,
2272 bool IsVectorCall, bool IsRegCall) const;
2273 ABIArgInfo reclassifyHvaArgType(QualType Ty, unsigned &FreeSSERegs,
2274 const ABIArgInfo ¤t) const;
2275 void computeVectorCallArgs(CGFunctionInfo &FI, unsigned FreeSSERegs,
2276 bool IsVectorCall, bool IsRegCall) const;
2278 X86AVXABILevel AVXLevel;
2283 class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
2285 X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
2286 : TargetCodeGenInfo(new X86_64ABIInfo(CGT, AVXLevel)) {}
2288 const X86_64ABIInfo &getABIInfo() const {
2289 return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo());
2292 /// Disable tail call on x86-64. The epilogue code before the tail jump blocks
2293 /// the autoreleaseRV/retainRV optimization.
2294 bool shouldSuppressTailCallsOfRetainAutoreleasedReturnValue() const override {
2298 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
2302 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2303 llvm::Value *Address) const override {
2304 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
2306 // 0-15 are the 16 integer registers.
2308 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
2312 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
2313 StringRef Constraint,
2314 llvm::Type* Ty) const override {
2315 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
2318 bool isNoProtoCallVariadic(const CallArgList &args,
2319 const FunctionNoProtoType *fnType) const override {
2320 // The default CC on x86-64 sets %al to the number of SSA
2321 // registers used, and GCC sets this when calling an unprototyped
2322 // function, so we override the default behavior. However, don't do
2323 // that when AVX types are involved: the ABI explicitly states it is
2324 // undefined, and it doesn't work in practice because of how the ABI
2325 // defines varargs anyway.
2326 if (fnType->getCallConv() == CC_C) {
2327 bool HasAVXType = false;
2328 for (CallArgList::const_iterator
2329 it = args.begin(), ie = args.end(); it != ie; ++it) {
2330 if (getABIInfo().isPassedUsingAVXType(it->Ty)) {
2340 return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType);
2344 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
2345 unsigned Sig = (0xeb << 0) | // jmp rel8
2346 (0x06 << 8) | // .+0x08
2349 return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
2352 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2353 CodeGen::CodeGenModule &CGM) const override {
2354 if (GV->isDeclaration())
2356 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2357 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
2358 llvm::Function *Fn = cast<llvm::Function>(GV);
2359 Fn->addFnAttr("stackrealign");
2361 if (FD->hasAttr<AnyX86InterruptAttr>()) {
2362 llvm::Function *Fn = cast<llvm::Function>(GV);
2363 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
2369 static std::string qualifyWindowsLibrary(llvm::StringRef Lib) {
2370 // If the argument does not end in .lib, automatically add the suffix.
2371 // If the argument contains a space, enclose it in quotes.
2372 // This matches the behavior of MSVC.
2373 bool Quote = (Lib.find(" ") != StringRef::npos);
2374 std::string ArgStr = Quote ? "\"" : "";
2376 if (!Lib.endswith_lower(".lib") && !Lib.endswith_lower(".a"))
2378 ArgStr += Quote ? "\"" : "";
2382 class WinX86_32TargetCodeGenInfo : public X86_32TargetCodeGenInfo {
2384 WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
2385 bool DarwinVectorABI, bool RetSmallStructInRegABI, bool Win32StructABI,
2386 unsigned NumRegisterParameters)
2387 : X86_32TargetCodeGenInfo(CGT, DarwinVectorABI, RetSmallStructInRegABI,
2388 Win32StructABI, NumRegisterParameters, false) {}
2390 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2391 CodeGen::CodeGenModule &CGM) const override;
2393 void getDependentLibraryOption(llvm::StringRef Lib,
2394 llvm::SmallString<24> &Opt) const override {
2395 Opt = "/DEFAULTLIB:";
2396 Opt += qualifyWindowsLibrary(Lib);
2399 void getDetectMismatchOption(llvm::StringRef Name,
2400 llvm::StringRef Value,
2401 llvm::SmallString<32> &Opt) const override {
2402 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
2406 static void addStackProbeTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2407 CodeGen::CodeGenModule &CGM) {
2408 if (llvm::Function *Fn = dyn_cast_or_null<llvm::Function>(GV)) {
2410 if (CGM.getCodeGenOpts().StackProbeSize != 4096)
2411 Fn->addFnAttr("stack-probe-size",
2412 llvm::utostr(CGM.getCodeGenOpts().StackProbeSize));
2413 if (CGM.getCodeGenOpts().NoStackArgProbe)
2414 Fn->addFnAttr("no-stack-arg-probe");
2418 void WinX86_32TargetCodeGenInfo::setTargetAttributes(
2419 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
2420 X86_32TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
2421 if (GV->isDeclaration())
2423 addStackProbeTargetAttributes(D, GV, CGM);
2426 class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
2428 WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
2429 X86AVXABILevel AVXLevel)
2430 : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT, AVXLevel)) {}
2432 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2433 CodeGen::CodeGenModule &CGM) const override;
2435 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
2439 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2440 llvm::Value *Address) const override {
2441 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
2443 // 0-15 are the 16 integer registers.
2445 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
2449 void getDependentLibraryOption(llvm::StringRef Lib,
2450 llvm::SmallString<24> &Opt) const override {
2451 Opt = "/DEFAULTLIB:";
2452 Opt += qualifyWindowsLibrary(Lib);
2455 void getDetectMismatchOption(llvm::StringRef Name,
2456 llvm::StringRef Value,
2457 llvm::SmallString<32> &Opt) const override {
2458 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
2462 void WinX86_64TargetCodeGenInfo::setTargetAttributes(
2463 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
2464 TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
2465 if (GV->isDeclaration())
2467 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2468 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
2469 llvm::Function *Fn = cast<llvm::Function>(GV);
2470 Fn->addFnAttr("stackrealign");
2472 if (FD->hasAttr<AnyX86InterruptAttr>()) {
2473 llvm::Function *Fn = cast<llvm::Function>(GV);
2474 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
2478 addStackProbeTargetAttributes(D, GV, CGM);
2482 void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo,
2484 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
2486 // (a) If one of the classes is Memory, the whole argument is passed in
2489 // (b) If X87UP is not preceded by X87, the whole argument is passed in
2492 // (c) If the size of the aggregate exceeds two eightbytes and the first
2493 // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole
2494 // argument is passed in memory. NOTE: This is necessary to keep the
2495 // ABI working for processors that don't support the __m256 type.
2497 // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE.
2499 // Some of these are enforced by the merging logic. Others can arise
2500 // only with unions; for example:
2501 // union { _Complex double; unsigned; }
2503 // Note that clauses (b) and (c) were added in 0.98.
2507 if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
2509 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
2511 if (Hi == SSEUp && Lo != SSE)
2515 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
2516 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
2517 // classified recursively so that always two fields are
2518 // considered. The resulting class is calculated according to
2519 // the classes of the fields in the eightbyte:
2521 // (a) If both classes are equal, this is the resulting class.
2523 // (b) If one of the classes is NO_CLASS, the resulting class is
2526 // (c) If one of the classes is MEMORY, the result is the MEMORY
2529 // (d) If one of the classes is INTEGER, the result is the
2532 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
2533 // MEMORY is used as class.
2535 // (f) Otherwise class SSE is used.
2537 // Accum should never be memory (we should have returned) or
2538 // ComplexX87 (because this cannot be passed in a structure).
2539 assert((Accum != Memory && Accum != ComplexX87) &&
2540 "Invalid accumulated classification during merge.");
2541 if (Accum == Field || Field == NoClass)
2543 if (Field == Memory)
2545 if (Accum == NoClass)
2547 if (Accum == Integer || Field == Integer)
2549 if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
2550 Accum == X87 || Accum == X87Up)
2555 void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
2556 Class &Lo, Class &Hi, bool isNamedArg) const {
2557 // FIXME: This code can be simplified by introducing a simple value class for
2558 // Class pairs with appropriate constructor methods for the various
2561 // FIXME: Some of the split computations are wrong; unaligned vectors
2562 // shouldn't be passed in registers for example, so there is no chance they
2563 // can straddle an eightbyte. Verify & simplify.
2567 Class &Current = OffsetBase < 64 ? Lo : Hi;
2570 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
2571 BuiltinType::Kind k = BT->getKind();
2573 if (k == BuiltinType::Void) {
2575 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
2578 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
2580 } else if (k == BuiltinType::Float || k == BuiltinType::Double) {
2582 } else if (k == BuiltinType::LongDouble) {
2583 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
2584 if (LDF == &llvm::APFloat::IEEEquad()) {
2587 } else if (LDF == &llvm::APFloat::x87DoubleExtended()) {
2590 } else if (LDF == &llvm::APFloat::IEEEdouble()) {
2593 llvm_unreachable("unexpected long double representation!");
2595 // FIXME: _Decimal32 and _Decimal64 are SSE.
2596 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
2600 if (const EnumType *ET = Ty->getAs<EnumType>()) {
2601 // Classify the underlying integer type.
2602 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
2606 if (Ty->hasPointerRepresentation()) {
2611 if (Ty->isMemberPointerType()) {
2612 if (Ty->isMemberFunctionPointerType()) {
2613 if (Has64BitPointers) {
2614 // If Has64BitPointers, this is an {i64, i64}, so classify both
2618 // Otherwise, with 32-bit pointers, this is an {i32, i32}. If that
2619 // straddles an eightbyte boundary, Hi should be classified as well.
2620 uint64_t EB_FuncPtr = (OffsetBase) / 64;
2621 uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64;
2622 if (EB_FuncPtr != EB_ThisAdj) {
2634 if (const VectorType *VT = Ty->getAs<VectorType>()) {
2635 uint64_t Size = getContext().getTypeSize(VT);
2636 if (Size == 1 || Size == 8 || Size == 16 || Size == 32) {
2637 // gcc passes the following as integer:
2638 // 4 bytes - <4 x char>, <2 x short>, <1 x int>, <1 x float>
2639 // 2 bytes - <2 x char>, <1 x short>
2640 // 1 byte - <1 x char>
2643 // If this type crosses an eightbyte boundary, it should be
2645 uint64_t EB_Lo = (OffsetBase) / 64;
2646 uint64_t EB_Hi = (OffsetBase + Size - 1) / 64;
2649 } else if (Size == 64) {
2650 QualType ElementType = VT->getElementType();
2652 // gcc passes <1 x double> in memory. :(
2653 if (ElementType->isSpecificBuiltinType(BuiltinType::Double))
2656 // gcc passes <1 x long long> as SSE but clang used to unconditionally
2657 // pass them as integer. For platforms where clang is the de facto
2658 // platform compiler, we must continue to use integer.
2659 if (!classifyIntegerMMXAsSSE() &&
2660 (ElementType->isSpecificBuiltinType(BuiltinType::LongLong) ||
2661 ElementType->isSpecificBuiltinType(BuiltinType::ULongLong) ||
2662 ElementType->isSpecificBuiltinType(BuiltinType::Long) ||
2663 ElementType->isSpecificBuiltinType(BuiltinType::ULong)))
2668 // If this type crosses an eightbyte boundary, it should be
2670 if (OffsetBase && OffsetBase != 64)
2672 } else if (Size == 128 ||
2673 (isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) {
2674 QualType ElementType = VT->getElementType();
2676 // gcc passes 256 and 512 bit <X x __int128> vectors in memory. :(
2677 if (passInt128VectorsInMem() && Size != 128 &&
2678 (ElementType->isSpecificBuiltinType(BuiltinType::Int128) ||
2679 ElementType->isSpecificBuiltinType(BuiltinType::UInt128)))
2682 // Arguments of 256-bits are split into four eightbyte chunks. The
2683 // least significant one belongs to class SSE and all the others to class
2684 // SSEUP. The original Lo and Hi design considers that types can't be
2685 // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense.
2686 // This design isn't correct for 256-bits, but since there're no cases
2687 // where the upper parts would need to be inspected, avoid adding
2688 // complexity and just consider Hi to match the 64-256 part.
2690 // Note that per 3.5.7 of AMD64-ABI, 256-bit args are only passed in
2691 // registers if they are "named", i.e. not part of the "..." of a
2692 // variadic function.
2694 // Similarly, per 3.2.3. of the AVX512 draft, 512-bits ("named") args are
2695 // split into eight eightbyte chunks, one SSE and seven SSEUP.
2702 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
2703 QualType ET = getContext().getCanonicalType(CT->getElementType());
2705 uint64_t Size = getContext().getTypeSize(Ty);
2706 if (ET->isIntegralOrEnumerationType()) {
2709 else if (Size <= 128)
2711 } else if (ET == getContext().FloatTy) {
2713 } else if (ET == getContext().DoubleTy) {
2715 } else if (ET == getContext().LongDoubleTy) {
2716 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
2717 if (LDF == &llvm::APFloat::IEEEquad())
2719 else if (LDF == &llvm::APFloat::x87DoubleExtended())
2720 Current = ComplexX87;
2721 else if (LDF == &llvm::APFloat::IEEEdouble())
2724 llvm_unreachable("unexpected long double representation!");
2727 // If this complex type crosses an eightbyte boundary then it
2729 uint64_t EB_Real = (OffsetBase) / 64;
2730 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64;
2731 if (Hi == NoClass && EB_Real != EB_Imag)
2737 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
2738 // Arrays are treated like structures.
2740 uint64_t Size = getContext().getTypeSize(Ty);
2742 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
2743 // than eight eightbytes, ..., it has class MEMORY.
2747 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
2748 // fields, it has class MEMORY.
2750 // Only need to check alignment of array base.
2751 if (OffsetBase % getContext().getTypeAlign(AT->getElementType()))
2754 // Otherwise implement simplified merge. We could be smarter about
2755 // this, but it isn't worth it and would be harder to verify.
2757 uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
2758 uint64_t ArraySize = AT->getSize().getZExtValue();
2760 // The only case a 256-bit wide vector could be used is when the array
2761 // contains a single 256-bit element. Since Lo and Hi logic isn't extended
2762 // to work for sizes wider than 128, early check and fallback to memory.
2765 (Size != EltSize || Size > getNativeVectorSizeForAVXABI(AVXLevel)))
2768 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
2769 Class FieldLo, FieldHi;
2770 classify(AT->getElementType(), Offset, FieldLo, FieldHi, isNamedArg);
2771 Lo = merge(Lo, FieldLo);
2772 Hi = merge(Hi, FieldHi);
2773 if (Lo == Memory || Hi == Memory)
2777 postMerge(Size, Lo, Hi);
2778 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
2782 if (const RecordType *RT = Ty->getAs<RecordType>()) {
2783 uint64_t Size = getContext().getTypeSize(Ty);
2785 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
2786 // than eight eightbytes, ..., it has class MEMORY.
2790 // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial
2791 // copy constructor or a non-trivial destructor, it is passed by invisible
2793 if (getRecordArgABI(RT, getCXXABI()))
2796 const RecordDecl *RD = RT->getDecl();
2798 // Assume variable sized types are passed in memory.
2799 if (RD->hasFlexibleArrayMember())
2802 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
2804 // Reset Lo class, this will be recomputed.
2807 // If this is a C++ record, classify the bases first.
2808 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2809 for (const auto &I : CXXRD->bases()) {
2810 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
2811 "Unexpected base class!");
2813 cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
2815 // Classify this field.
2817 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a
2818 // single eightbyte, each is classified separately. Each eightbyte gets
2819 // initialized to class NO_CLASS.
2820 Class FieldLo, FieldHi;
2822 OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base));
2823 classify(I.getType(), Offset, FieldLo, FieldHi, isNamedArg);
2824 Lo = merge(Lo, FieldLo);
2825 Hi = merge(Hi, FieldHi);
2826 if (Lo == Memory || Hi == Memory) {
2827 postMerge(Size, Lo, Hi);
2833 // Classify the fields one at a time, merging the results.
2835 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
2836 i != e; ++i, ++idx) {
2837 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
2838 bool BitField = i->isBitField();
2840 // Ignore padding bit-fields.
2841 if (BitField && i->isUnnamedBitfield())
2844 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than
2845 // four eightbytes, or it contains unaligned fields, it has class MEMORY.
2847 // The only case a 256-bit wide vector could be used is when the struct
2848 // contains a single 256-bit element. Since Lo and Hi logic isn't extended
2849 // to work for sizes wider than 128, early check and fallback to memory.
2851 if (Size > 128 && (Size != getContext().getTypeSize(i->getType()) ||
2852 Size > getNativeVectorSizeForAVXABI(AVXLevel))) {
2854 postMerge(Size, Lo, Hi);
2857 // Note, skip this test for bit-fields, see below.
2858 if (!BitField && Offset % getContext().getTypeAlign(i->getType())) {
2860 postMerge(Size, Lo, Hi);
2864 // Classify this field.
2866 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
2867 // exceeds a single eightbyte, each is classified
2868 // separately. Each eightbyte gets initialized to class
2870 Class FieldLo, FieldHi;
2872 // Bit-fields require special handling, they do not force the
2873 // structure to be passed in memory even if unaligned, and
2874 // therefore they can straddle an eightbyte.
2876 assert(!i->isUnnamedBitfield());
2877 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
2878 uint64_t Size = i->getBitWidthValue(getContext());
2880 uint64_t EB_Lo = Offset / 64;
2881 uint64_t EB_Hi = (Offset + Size - 1) / 64;
2884 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.");
2889 FieldHi = EB_Hi ? Integer : NoClass;
2892 classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg);
2893 Lo = merge(Lo, FieldLo);
2894 Hi = merge(Hi, FieldHi);
2895 if (Lo == Memory || Hi == Memory)
2899 postMerge(Size, Lo, Hi);
2903 ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
2904 // If this is a scalar LLVM value then assume LLVM will pass it in the right
2906 if (!isAggregateTypeForABI(Ty)) {
2907 // Treat an enum type as its underlying type.
2908 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2909 Ty = EnumTy->getDecl()->getIntegerType();
2911 return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty)
2912 : ABIArgInfo::getDirect());
2915 return getNaturalAlignIndirect(Ty);
2918 bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const {
2919 if (const VectorType *VecTy = Ty->getAs<VectorType>()) {
2920 uint64_t Size = getContext().getTypeSize(VecTy);
2921 unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel);
2922 if (Size <= 64 || Size > LargestVector)
2924 QualType EltTy = VecTy->getElementType();
2925 if (passInt128VectorsInMem() &&
2926 (EltTy->isSpecificBuiltinType(BuiltinType::Int128) ||
2927 EltTy->isSpecificBuiltinType(BuiltinType::UInt128)))
2934 ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
2935 unsigned freeIntRegs) const {
2936 // If this is a scalar LLVM value then assume LLVM will pass it in the right
2939 // This assumption is optimistic, as there could be free registers available
2940 // when we need to pass this argument in memory, and LLVM could try to pass
2941 // the argument in the free register. This does not seem to happen currently,
2942 // but this code would be much safer if we could mark the argument with
2943 // 'onstack'. See PR12193.
2944 if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty)) {
2945 // Treat an enum type as its underlying type.
2946 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2947 Ty = EnumTy->getDecl()->getIntegerType();
2949 return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty)
2950 : ABIArgInfo::getDirect());
2953 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
2954 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
2956 // Compute the byval alignment. We specify the alignment of the byval in all
2957 // cases so that the mid-level optimizer knows the alignment of the byval.
2958 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U);
2960 // Attempt to avoid passing indirect results using byval when possible. This
2961 // is important for good codegen.
2963 // We do this by coercing the value into a scalar type which the backend can
2964 // handle naturally (i.e., without using byval).
2966 // For simplicity, we currently only do this when we have exhausted all of the
2967 // free integer registers. Doing this when there are free integer registers
2968 // would require more care, as we would have to ensure that the coerced value
2969 // did not claim the unused register. That would require either reording the
2970 // arguments to the function (so that any subsequent inreg values came first),
2971 // or only doing this optimization when there were no following arguments that
2974 // We currently expect it to be rare (particularly in well written code) for
2975 // arguments to be passed on the stack when there are still free integer
2976 // registers available (this would typically imply large structs being passed
2977 // by value), so this seems like a fair tradeoff for now.
2979 // We can revisit this if the backend grows support for 'onstack' parameter
2980 // attributes. See PR12193.
2981 if (freeIntRegs == 0) {
2982 uint64_t Size = getContext().getTypeSize(Ty);
2984 // If this type fits in an eightbyte, coerce it into the matching integral
2985 // type, which will end up on the stack (with alignment 8).
2986 if (Align == 8 && Size <= 64)
2987 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
2991 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(Align));
2994 /// The ABI specifies that a value should be passed in a full vector XMM/YMM
2995 /// register. Pick an LLVM IR type that will be passed as a vector register.
2996 llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const {
2997 // Wrapper structs/arrays that only contain vectors are passed just like
2998 // vectors; strip them off if present.
2999 if (const Type *InnerTy = isSingleElementStruct(Ty, getContext()))
3000 Ty = QualType(InnerTy, 0);
3002 llvm::Type *IRType = CGT.ConvertType(Ty);
3003 if (isa<llvm::VectorType>(IRType)) {
3004 // Don't pass vXi128 vectors in their native type, the backend can't
3006 if (passInt128VectorsInMem() &&
3007 IRType->getVectorElementType()->isIntegerTy(128)) {
3008 // Use a vXi64 vector.
3009 uint64_t Size = getContext().getTypeSize(Ty);
3010 return llvm::VectorType::get(llvm::Type::getInt64Ty(getVMContext()),
3017 if (IRType->getTypeID() == llvm::Type::FP128TyID)
3020 // We couldn't find the preferred IR vector type for 'Ty'.
3021 uint64_t Size = getContext().getTypeSize(Ty);
3022 assert((Size == 128 || Size == 256 || Size == 512) && "Invalid type found!");
3025 // Return a LLVM IR vector type based on the size of 'Ty'.
3026 return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()),
3030 /// BitsContainNoUserData - Return true if the specified [start,end) bit range
3031 /// is known to either be off the end of the specified type or being in
3032 /// alignment padding. The user type specified is known to be at most 128 bits
3033 /// in size, and have passed through X86_64ABIInfo::classify with a successful
3034 /// classification that put one of the two halves in the INTEGER class.
3036 /// It is conservatively correct to return false.
3037 static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,
3038 unsigned EndBit, ASTContext &Context) {
3039 // If the bytes being queried are off the end of the type, there is no user
3040 // data hiding here. This handles analysis of builtins, vectors and other
3041 // types that don't contain interesting padding.
3042 unsigned TySize = (unsigned)Context.getTypeSize(Ty);
3043 if (TySize <= StartBit)
3046 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
3047 unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType());
3048 unsigned NumElts = (unsigned)AT->getSize().getZExtValue();
3050 // Check each element to see if the element overlaps with the queried range.
3051 for (unsigned i = 0; i != NumElts; ++i) {
3052 // If the element is after the span we care about, then we're done..
3053 unsigned EltOffset = i*EltSize;
3054 if (EltOffset >= EndBit) break;
3056 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
3057 if (!BitsContainNoUserData(AT->getElementType(), EltStart,
3058 EndBit-EltOffset, Context))
3061 // If it overlaps no elements, then it is safe to process as padding.
3065 if (const RecordType *RT = Ty->getAs<RecordType>()) {
3066 const RecordDecl *RD = RT->getDecl();
3067 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
3069 // If this is a C++ record, check the bases first.
3070 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
3071 for (const auto &I : CXXRD->bases()) {
3072 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
3073 "Unexpected base class!");
3075 cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
3077 // If the base is after the span we care about, ignore it.
3078 unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base));
3079 if (BaseOffset >= EndBit) continue;
3081 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
3082 if (!BitsContainNoUserData(I.getType(), BaseStart,
3083 EndBit-BaseOffset, Context))
3088 // Verify that no field has data that overlaps the region of interest. Yes
3089 // this could be sped up a lot by being smarter about queried fields,
3090 // however we're only looking at structs up to 16 bytes, so we don't care
3093 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
3094 i != e; ++i, ++idx) {
3095 unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx);
3097 // If we found a field after the region we care about, then we're done.
3098 if (FieldOffset >= EndBit) break;
3100 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
3101 if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset,
3106 // If nothing in this record overlapped the area of interest, then we're
3114 /// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a
3115 /// float member at the specified offset. For example, {int,{float}} has a
3116 /// float at offset 4. It is conservatively correct for this routine to return
3118 static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset,
3119 const llvm::DataLayout &TD) {
3120 // Base case if we find a float.
3121 if (IROffset == 0 && IRType->isFloatTy())
3124 // If this is a struct, recurse into the field at the specified offset.
3125 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
3126 const llvm::StructLayout *SL = TD.getStructLayout(STy);
3127 unsigned Elt = SL->getElementContainingOffset(IROffset);
3128 IROffset -= SL->getElementOffset(Elt);
3129 return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD);
3132 // If this is an array, recurse into the field at the specified offset.
3133 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
3134 llvm::Type *EltTy = ATy->getElementType();
3135 unsigned EltSize = TD.getTypeAllocSize(EltTy);
3136 IROffset -= IROffset/EltSize*EltSize;
3137 return ContainsFloatAtOffset(EltTy, IROffset, TD);
3144 /// GetSSETypeAtOffset - Return a type that will be passed by the backend in the
3145 /// low 8 bytes of an XMM register, corresponding to the SSE class.
3146 llvm::Type *X86_64ABIInfo::
3147 GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset,
3148 QualType SourceTy, unsigned SourceOffset) const {
3149 // The only three choices we have are either double, <2 x float>, or float. We
3150 // pass as float if the last 4 bytes is just padding. This happens for
3151 // structs that contain 3 floats.
3152 if (BitsContainNoUserData(SourceTy, SourceOffset*8+32,
3153 SourceOffset*8+64, getContext()))
3154 return llvm::Type::getFloatTy(getVMContext());
3156 // We want to pass as <2 x float> if the LLVM IR type contains a float at
3157 // offset+0 and offset+4. Walk the LLVM IR type to find out if this is the
3159 if (ContainsFloatAtOffset(IRType, IROffset, getDataLayout()) &&
3160 ContainsFloatAtOffset(IRType, IROffset+4, getDataLayout()))
3161 return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2);
3163 return llvm::Type::getDoubleTy(getVMContext());
3167 /// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in
3168 /// an 8-byte GPR. This means that we either have a scalar or we are talking
3169 /// about the high or low part of an up-to-16-byte struct. This routine picks
3170 /// the best LLVM IR type to represent this, which may be i64 or may be anything
3171 /// else that the backend will pass in a GPR that works better (e.g. i8, %foo*,
3174 /// PrefType is an LLVM IR type that corresponds to (part of) the IR type for
3175 /// the source type. IROffset is an offset in bytes into the LLVM IR type that
3176 /// the 8-byte value references. PrefType may be null.
3178 /// SourceTy is the source-level type for the entire argument. SourceOffset is
3179 /// an offset into this that we're processing (which is always either 0 or 8).
3181 llvm::Type *X86_64ABIInfo::
3182 GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
3183 QualType SourceTy, unsigned SourceOffset) const {
3184 // If we're dealing with an un-offset LLVM IR type, then it means that we're
3185 // returning an 8-byte unit starting with it. See if we can safely use it.
3186 if (IROffset == 0) {
3187 // Pointers and int64's always fill the 8-byte unit.
3188 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) ||
3189 IRType->isIntegerTy(64))
3192 // If we have a 1/2/4-byte integer, we can use it only if the rest of the
3193 // goodness in the source type is just tail padding. This is allowed to
3194 // kick in for struct {double,int} on the int, but not on
3195 // struct{double,int,int} because we wouldn't return the second int. We
3196 // have to do this analysis on the source type because we can't depend on
3197 // unions being lowered a specific way etc.
3198 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
3199 IRType->isIntegerTy(32) ||
3200 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) {
3201 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 :
3202 cast<llvm::IntegerType>(IRType)->getBitWidth();
3204 if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth,
3205 SourceOffset*8+64, getContext()))
3210 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
3211 // If this is a struct, recurse into the field at the specified offset.
3212 const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy);
3213 if (IROffset < SL->getSizeInBytes()) {
3214 unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
3215 IROffset -= SL->getElementOffset(FieldIdx);
3217 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
3218 SourceTy, SourceOffset);
3222 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
3223 llvm::Type *EltTy = ATy->getElementType();
3224 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy);
3225 unsigned EltOffset = IROffset/EltSize*EltSize;
3226 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
3230 // Okay, we don't have any better idea of what to pass, so we pass this in an
3231 // integer register that isn't too big to fit the rest of the struct.
3232 unsigned TySizeInBytes =
3233 (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity();
3235 assert(TySizeInBytes != SourceOffset && "Empty field?");
3237 // It is always safe to classify this as an integer type up to i64 that
3238 // isn't larger than the structure.
3239 return llvm::IntegerType::get(getVMContext(),
3240 std::min(TySizeInBytes-SourceOffset, 8U)*8);
3244 /// GetX86_64ByValArgumentPair - Given a high and low type that can ideally
3245 /// be used as elements of a two register pair to pass or return, return a
3246 /// first class aggregate to represent them. For example, if the low part of
3247 /// a by-value argument should be passed as i32* and the high part as float,
3248 /// return {i32*, float}.
3250 GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi,
3251 const llvm::DataLayout &TD) {
3252 // In order to correctly satisfy the ABI, we need to the high part to start
3253 // at offset 8. If the high and low parts we inferred are both 4-byte types
3254 // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have
3255 // the second element at offset 8. Check for this:
3256 unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo);
3257 unsigned HiAlign = TD.getABITypeAlignment(Hi);
3258 unsigned HiStart = llvm::alignTo(LoSize, HiAlign);
3259 assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!");
3261 // To handle this, we have to increase the size of the low part so that the
3262 // second element will start at an 8 byte offset. We can't increase the size
3263 // of the second element because it might make us access off the end of the
3266 // There are usually two sorts of types the ABI generation code can produce
3267 // for the low part of a pair that aren't 8 bytes in size: float or
3268 // i8/i16/i32. This can also include pointers when they are 32-bit (X32 and
3270 // Promote these to a larger type.
3271 if (Lo->isFloatTy())
3272 Lo = llvm::Type::getDoubleTy(Lo->getContext());
3274 assert((Lo->isIntegerTy() || Lo->isPointerTy())
3275 && "Invalid/unknown lo type");
3276 Lo = llvm::Type::getInt64Ty(Lo->getContext());
3280 llvm::StructType *Result = llvm::StructType::get(Lo, Hi);
3282 // Verify that the second element is at an 8-byte offset.
3283 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
3284 "Invalid x86-64 argument pair!");
3288 ABIArgInfo X86_64ABIInfo::
3289 classifyReturnType(QualType RetTy) const {
3290 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
3291 // classification algorithm.
3292 X86_64ABIInfo::Class Lo, Hi;
3293 classify(RetTy, 0, Lo, Hi, /*isNamedArg*/ true);
3295 // Check some invariants.
3296 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
3297 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
3299 llvm::Type *ResType = nullptr;
3303 return ABIArgInfo::getIgnore();
3304 // If the low part is just padding, it takes no register, leave ResType
3306 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
3307 "Unknown missing lo part");
3312 llvm_unreachable("Invalid classification for lo word.");
3314 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
3317 return getIndirectReturnResult(RetTy);
3319 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
3320 // available register of the sequence %rax, %rdx is used.
3322 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
3324 // If we have a sign or zero extended integer, make sure to return Extend
3325 // so that the parameter gets the right LLVM IR attributes.
3326 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
3327 // Treat an enum type as its underlying type.
3328 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
3329 RetTy = EnumTy->getDecl()->getIntegerType();
3331 if (RetTy->isIntegralOrEnumerationType() &&
3332 RetTy->isPromotableIntegerType())
3333 return ABIArgInfo::getExtend(RetTy);
3337 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
3338 // available SSE register of the sequence %xmm0, %xmm1 is used.
3340 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
3343 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
3344 // returned on the X87 stack in %st0 as 80-bit x87 number.
3346 ResType = llvm::Type::getX86_FP80Ty(getVMContext());
3349 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
3350 // part of the value is returned in %st0 and the imaginary part in
3353 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
3354 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()),
3355 llvm::Type::getX86_FP80Ty(getVMContext()));
3359 llvm::Type *HighPart = nullptr;
3361 // Memory was handled previously and X87 should
3362 // never occur as a hi class.
3365 llvm_unreachable("Invalid classification for hi word.");
3367 case ComplexX87: // Previously handled.
3372 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
3373 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
3374 return ABIArgInfo::getDirect(HighPart, 8);
3377 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
3378 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
3379 return ABIArgInfo::getDirect(HighPart, 8);
3382 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
3383 // is passed in the next available eightbyte chunk if the last used
3386 // SSEUP should always be preceded by SSE, just widen.
3388 assert(Lo == SSE && "Unexpected SSEUp classification.");
3389 ResType = GetByteVectorType(RetTy);
3392 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
3393 // returned together with the previous X87 value in %st0.
3395 // If X87Up is preceded by X87, we don't need to do
3396 // anything. However, in some cases with unions it may not be
3397 // preceded by X87. In such situations we follow gcc and pass the
3398 // extra bits in an SSE reg.
3400 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
3401 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
3402 return ABIArgInfo::getDirect(HighPart, 8);
3407 // If a high part was specified, merge it together with the low part. It is
3408 // known to pass in the high eightbyte of the result. We do this by forming a
3409 // first class struct aggregate with the high and low part: {low, high}
3411 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
3413 return ABIArgInfo::getDirect(ResType);
3416 ABIArgInfo X86_64ABIInfo::classifyArgumentType(
3417 QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE,
3421 Ty = useFirstFieldIfTransparentUnion(Ty);
3423 X86_64ABIInfo::Class Lo, Hi;
3424 classify(Ty, 0, Lo, Hi, isNamedArg);
3426 // Check some invariants.
3427 // FIXME: Enforce these by construction.
3428 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
3429 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
3433 llvm::Type *ResType = nullptr;
3437 return ABIArgInfo::getIgnore();
3438 // If the low part is just padding, it takes no register, leave ResType
3440 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
3441 "Unknown missing lo part");
3444 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
3448 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
3449 // COMPLEX_X87, it is passed in memory.
3452 if (getRecordArgABI(Ty, getCXXABI()) == CGCXXABI::RAA_Indirect)
3454 return getIndirectResult(Ty, freeIntRegs);
3458 llvm_unreachable("Invalid classification for lo word.");
3460 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
3461 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
3466 // Pick an 8-byte type based on the preferred type.
3467 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0);
3469 // If we have a sign or zero extended integer, make sure to return Extend
3470 // so that the parameter gets the right LLVM IR attributes.
3471 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
3472 // Treat an enum type as its underlying type.
3473 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3474 Ty = EnumTy->getDecl()->getIntegerType();
3476 if (Ty->isIntegralOrEnumerationType() &&
3477 Ty->isPromotableIntegerType())
3478 return ABIArgInfo::getExtend(Ty);
3483 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
3484 // available SSE register is used, the registers are taken in the
3485 // order from %xmm0 to %xmm7.
3487 llvm::Type *IRType = CGT.ConvertType(Ty);
3488 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
3494 llvm::Type *HighPart = nullptr;
3496 // Memory was handled previously, ComplexX87 and X87 should
3497 // never occur as hi classes, and X87Up must be preceded by X87,
3498 // which is passed in memory.
3502 llvm_unreachable("Invalid classification for hi word.");
3504 case NoClass: break;
3508 // Pick an 8-byte type based on the preferred type.
3509 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
3511 if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
3512 return ABIArgInfo::getDirect(HighPart, 8);
3515 // X87Up generally doesn't occur here (long double is passed in
3516 // memory), except in situations involving unions.
3519 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
3521 if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
3522 return ABIArgInfo::getDirect(HighPart, 8);
3527 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
3528 // eightbyte is passed in the upper half of the last used SSE
3529 // register. This only happens when 128-bit vectors are passed.
3531 assert(Lo == SSE && "Unexpected SSEUp classification");
3532 ResType = GetByteVectorType(Ty);
3536 // If a high part was specified, merge it together with the low part. It is
3537 // known to pass in the high eightbyte of the result. We do this by forming a
3538 // first class struct aggregate with the high and low part: {low, high}
3540 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
3542 return ABIArgInfo::getDirect(ResType);
3546 X86_64ABIInfo::classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt,
3547 unsigned &NeededSSE) const {
3548 auto RT = Ty->getAs<RecordType>();
3549 assert(RT && "classifyRegCallStructType only valid with struct types");
3551 if (RT->getDecl()->hasFlexibleArrayMember())
3552 return getIndirectReturnResult(Ty);
3555 if (auto CXXRD = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
3556 if (CXXRD->isDynamicClass()) {
3557 NeededInt = NeededSSE = 0;
3558 return getIndirectReturnResult(Ty);
3561 for (const auto &I : CXXRD->bases())
3562 if (classifyRegCallStructTypeImpl(I.getType(), NeededInt, NeededSSE)
3564 NeededInt = NeededSSE = 0;
3565 return getIndirectReturnResult(Ty);
3570 for (const auto *FD : RT->getDecl()->fields()) {
3571 if (FD->getType()->isRecordType() && !FD->getType()->isUnionType()) {
3572 if (classifyRegCallStructTypeImpl(FD->getType(), NeededInt, NeededSSE)
3574 NeededInt = NeededSSE = 0;
3575 return getIndirectReturnResult(Ty);
3578 unsigned LocalNeededInt, LocalNeededSSE;
3579 if (classifyArgumentType(FD->getType(), UINT_MAX, LocalNeededInt,
3580 LocalNeededSSE, true)
3582 NeededInt = NeededSSE = 0;
3583 return getIndirectReturnResult(Ty);
3585 NeededInt += LocalNeededInt;
3586 NeededSSE += LocalNeededSSE;
3590 return ABIArgInfo::getDirect();
3593 ABIArgInfo X86_64ABIInfo::classifyRegCallStructType(QualType Ty,
3594 unsigned &NeededInt,
3595 unsigned &NeededSSE) const {
3600 return classifyRegCallStructTypeImpl(Ty, NeededInt, NeededSSE);
3603 void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
3605 const unsigned CallingConv = FI.getCallingConvention();
3606 // It is possible to force Win64 calling convention on any x86_64 target by
3607 // using __attribute__((ms_abi)). In such case to correctly emit Win64
3608 // compatible code delegate this call to WinX86_64ABIInfo::computeInfo.
3609 if (CallingConv == llvm::CallingConv::Win64) {
3610 WinX86_64ABIInfo Win64ABIInfo(CGT, AVXLevel);
3611 Win64ABIInfo.computeInfo(FI);
3615 bool IsRegCall = CallingConv == llvm::CallingConv::X86_RegCall;
3617 // Keep track of the number of assigned registers.
3618 unsigned FreeIntRegs = IsRegCall ? 11 : 6;
3619 unsigned FreeSSERegs = IsRegCall ? 16 : 8;
3620 unsigned NeededInt, NeededSSE;
3622 if (!::classifyReturnType(getCXXABI(), FI, *this)) {
3623 if (IsRegCall && FI.getReturnType()->getTypePtr()->isRecordType() &&
3624 !FI.getReturnType()->getTypePtr()->isUnionType()) {
3625 FI.getReturnInfo() =
3626 classifyRegCallStructType(FI.getReturnType(), NeededInt, NeededSSE);
3627 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
3628 FreeIntRegs -= NeededInt;
3629 FreeSSERegs -= NeededSSE;
3631 FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType());
3633 } else if (IsRegCall && FI.getReturnType()->getAs<ComplexType>()) {
3634 // Complex Long Double Type is passed in Memory when Regcall
3635 // calling convention is used.
3636 const ComplexType *CT = FI.getReturnType()->getAs<ComplexType>();
3637 if (getContext().getCanonicalType(CT->getElementType()) ==
3638 getContext().LongDoubleTy)
3639 FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType());
3641 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
3644 // If the return value is indirect, then the hidden argument is consuming one
3645 // integer register.
3646 if (FI.getReturnInfo().isIndirect())
3649 // The chain argument effectively gives us another free register.
3650 if (FI.isChainCall())
3653 unsigned NumRequiredArgs = FI.getNumRequiredArgs();
3654 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
3655 // get assigned (in left-to-right order) for passing as follows...
3657 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
3658 it != ie; ++it, ++ArgNo) {
3659 bool IsNamedArg = ArgNo < NumRequiredArgs;
3661 if (IsRegCall && it->type->isStructureOrClassType())
3662 it->info = classifyRegCallStructType(it->type, NeededInt, NeededSSE);
3664 it->info = classifyArgumentType(it->type, FreeIntRegs, NeededInt,
3665 NeededSSE, IsNamedArg);
3667 // AMD64-ABI 3.2.3p3: If there are no registers available for any
3668 // eightbyte of an argument, the whole argument is passed on the
3669 // stack. If registers have already been assigned for some
3670 // eightbytes of such an argument, the assignments get reverted.
3671 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
3672 FreeIntRegs -= NeededInt;
3673 FreeSSERegs -= NeededSSE;
3675 it->info = getIndirectResult(it->type, FreeIntRegs);
3680 static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF,
3681 Address VAListAddr, QualType Ty) {
3682 Address overflow_arg_area_p =
3683 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p");
3684 llvm::Value *overflow_arg_area =
3685 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
3687 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
3688 // byte boundary if alignment needed by type exceeds 8 byte boundary.
3689 // It isn't stated explicitly in the standard, but in practice we use
3690 // alignment greater than 16 where necessary.
3691 CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty);
3692 if (Align > CharUnits::fromQuantity(8)) {
3693 overflow_arg_area = emitRoundPointerUpToAlignment(CGF, overflow_arg_area,
3697 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
3698 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
3700 CGF.Builder.CreateBitCast(overflow_arg_area,
3701 llvm::PointerType::getUnqual(LTy));
3703 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
3704 // l->overflow_arg_area + sizeof(type).
3705 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to
3706 // an 8 byte boundary.
3708 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
3709 llvm::Value *Offset =
3710 llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7);
3711 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset,
3712 "overflow_arg_area.next");
3713 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
3715 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
3716 return Address(Res, Align);
3719 Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
3720 QualType Ty) const {
3721 // Assume that va_list type is correct; should be pointer to LLVM type:
3725 // i8* overflow_arg_area;
3726 // i8* reg_save_area;
3728 unsigned neededInt, neededSSE;
3730 Ty = getContext().getCanonicalType(Ty);
3731 ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE,
3732 /*isNamedArg*/false);
3734 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
3735 // in the registers. If not go to step 7.
3736 if (!neededInt && !neededSSE)
3737 return EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty);
3739 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
3740 // general purpose registers needed to pass type and num_fp to hold
3741 // the number of floating point registers needed.
3743 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into
3744 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or
3745 // l->fp_offset > 304 - num_fp * 16 go to step 7.
3747 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of
3748 // register save space).
3750 llvm::Value *InRegs = nullptr;
3751 Address gp_offset_p = Address::invalid(), fp_offset_p = Address::invalid();
3752 llvm::Value *gp_offset = nullptr, *fp_offset = nullptr;
3754 gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p");
3755 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
3756 InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8);
3757 InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp");
3761 fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p");
3762 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
3763 llvm::Value *FitsInFP =
3764 llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16);
3765 FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp");
3766 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
3769 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
3770 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
3771 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
3772 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
3774 // Emit code to load the value if it was passed in registers.
3776 CGF.EmitBlock(InRegBlock);
3778 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with
3779 // an offset of l->gp_offset and/or l->fp_offset. This may require
3780 // copying to a temporary location in case the parameter is passed
3781 // in different register classes or requires an alignment greater
3782 // than 8 for general purpose registers and 16 for XMM registers.
3784 // FIXME: This really results in shameful code when we end up needing to
3785 // collect arguments from different places; often what should result in a
3786 // simple assembling of a structure from scattered addresses has many more
3787 // loads than necessary. Can we clean this up?
3788 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
3789 llvm::Value *RegSaveArea = CGF.Builder.CreateLoad(
3790 CGF.Builder.CreateStructGEP(VAListAddr, 3), "reg_save_area");
3792 Address RegAddr = Address::invalid();
3793 if (neededInt && neededSSE) {
3795 assert(AI.isDirect() && "Unexpected ABI info for mixed regs");
3796 llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
3797 Address Tmp = CGF.CreateMemTemp(Ty);
3798 Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST);
3799 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
3800 llvm::Type *TyLo = ST->getElementType(0);
3801 llvm::Type *TyHi = ST->getElementType(1);
3802 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
3803 "Unexpected ABI info for mixed regs");
3804 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
3805 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
3806 llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegSaveArea, gp_offset);
3807 llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegSaveArea, fp_offset);
3808 llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr;
3809 llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr;
3811 // Copy the first element.
3812 // FIXME: Our choice of alignment here and below is probably pessimistic.
3813 llvm::Value *V = CGF.Builder.CreateAlignedLoad(
3814 TyLo, CGF.Builder.CreateBitCast(RegLoAddr, PTyLo),
3815 CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(TyLo)));
3816 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
3818 // Copy the second element.
3819 V = CGF.Builder.CreateAlignedLoad(
3820 TyHi, CGF.Builder.CreateBitCast(RegHiAddr, PTyHi),
3821 CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(TyHi)));
3822 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
3824 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy);
3825 } else if (neededInt) {
3826 RegAddr = Address(CGF.Builder.CreateGEP(RegSaveArea, gp_offset),
3827 CharUnits::fromQuantity(8));
3828 RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy);
3830 // Copy to a temporary if necessary to ensure the appropriate alignment.
3831 std::pair<CharUnits, CharUnits> SizeAlign =
3832 getContext().getTypeInfoInChars(Ty);
3833 uint64_t TySize = SizeAlign.first.getQuantity();
3834 CharUnits TyAlign = SizeAlign.second;
3836 // Copy into a temporary if the type is more aligned than the
3837 // register save area.
3838 if (TyAlign.getQuantity() > 8) {
3839 Address Tmp = CGF.CreateMemTemp(Ty);
3840 CGF.Builder.CreateMemCpy(Tmp, RegAddr, TySize, false);
3844 } else if (neededSSE == 1) {
3845 RegAddr = Address(CGF.Builder.CreateGEP(RegSaveArea, fp_offset),
3846 CharUnits::fromQuantity(16));
3847 RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy);
3849 assert(neededSSE == 2 && "Invalid number of needed registers!");
3850 // SSE registers are spaced 16 bytes apart in the register save
3851 // area, we need to collect the two eightbytes together.
3852 // The ABI isn't explicit about this, but it seems reasonable
3853 // to assume that the slots are 16-byte aligned, since the stack is
3854 // naturally 16-byte aligned and the prologue is expected to store
3855 // all the SSE registers to the RSA.
3856 Address RegAddrLo = Address(CGF.Builder.CreateGEP(RegSaveArea, fp_offset),
3857 CharUnits::fromQuantity(16));
3859 CGF.Builder.CreateConstInBoundsByteGEP(RegAddrLo,
3860 CharUnits::fromQuantity(16));
3861 llvm::Type *ST = AI.canHaveCoerceToType()
3862 ? AI.getCoerceToType()
3863 : llvm::StructType::get(CGF.DoubleTy, CGF.DoubleTy);
3865 Address Tmp = CGF.CreateMemTemp(Ty);
3866 Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST);
3867 V = CGF.Builder.CreateLoad(CGF.Builder.CreateElementBitCast(
3868 RegAddrLo, ST->getStructElementType(0)));
3869 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
3870 V = CGF.Builder.CreateLoad(CGF.Builder.CreateElementBitCast(
3871 RegAddrHi, ST->getStructElementType(1)));
3872 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
3874 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy);
3877 // AMD64-ABI 3.5.7p5: Step 5. Set:
3878 // l->gp_offset = l->gp_offset + num_gp * 8
3879 // l->fp_offset = l->fp_offset + num_fp * 16.
3881 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8);
3882 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
3886 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16);
3887 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
3890 CGF.EmitBranch(ContBlock);
3892 // Emit code to load the value if it was passed in memory.
3894 CGF.EmitBlock(InMemBlock);
3895 Address MemAddr = EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty);
3897 // Return the appropriate result.
3899 CGF.EmitBlock(ContBlock);
3900 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, MemAddr, InMemBlock,
3905 Address X86_64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
3906 QualType Ty) const {
3907 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
3908 CGF.getContext().getTypeInfoInChars(Ty),
3909 CharUnits::fromQuantity(8),
3910 /*allowHigherAlign*/ false);
3914 WinX86_64ABIInfo::reclassifyHvaArgType(QualType Ty, unsigned &FreeSSERegs,
3915 const ABIArgInfo ¤t) const {
3916 // Assumes vectorCall calling convention.
3917 const Type *Base = nullptr;
3918 uint64_t NumElts = 0;
3920 if (!Ty->isBuiltinType() && !Ty->isVectorType() &&
3921 isHomogeneousAggregate(Ty, Base, NumElts) && FreeSSERegs >= NumElts) {
3922 FreeSSERegs -= NumElts;
3923 return getDirectX86Hva();
3928 ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs,
3929 bool IsReturnType, bool IsVectorCall,
3930 bool IsRegCall) const {
3932 if (Ty->isVoidType())
3933 return ABIArgInfo::getIgnore();
3935 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3936 Ty = EnumTy->getDecl()->getIntegerType();
3938 TypeInfo Info = getContext().getTypeInfo(Ty);
3939 uint64_t Width = Info.Width;
3940 CharUnits Align = getContext().toCharUnitsFromBits(Info.Align);
3942 const RecordType *RT = Ty->getAs<RecordType>();
3944 if (!IsReturnType) {
3945 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()))
3946 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
3949 if (RT->getDecl()->hasFlexibleArrayMember())
3950 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
3954 const Type *Base = nullptr;
3955 uint64_t NumElts = 0;
3956 // vectorcall adds the concept of a homogenous vector aggregate, similar to
3958 if ((IsVectorCall || IsRegCall) &&
3959 isHomogeneousAggregate(Ty, Base, NumElts)) {
3961 if (FreeSSERegs >= NumElts) {
3962 FreeSSERegs -= NumElts;
3963 if (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType())
3964 return ABIArgInfo::getDirect();
3965 return ABIArgInfo::getExpand();
3967 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
3968 } else if (IsVectorCall) {
3969 if (FreeSSERegs >= NumElts &&
3970 (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType())) {
3971 FreeSSERegs -= NumElts;
3972 return ABIArgInfo::getDirect();
3973 } else if (IsReturnType) {
3974 return ABIArgInfo::getExpand();
3975 } else if (!Ty->isBuiltinType() && !Ty->isVectorType()) {
3976 // HVAs are delayed and reclassified in the 2nd step.
3977 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
3982 if (Ty->isMemberPointerType()) {
3983 // If the member pointer is represented by an LLVM int or ptr, pass it
3985 llvm::Type *LLTy = CGT.ConvertType(Ty);
3986 if (LLTy->isPointerTy() || LLTy->isIntegerTy())
3987 return ABIArgInfo::getDirect();
3990 if (RT || Ty->isAnyComplexType() || Ty->isMemberPointerType()) {
3991 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
3992 // not 1, 2, 4, or 8 bytes, must be passed by reference."
3993 if (Width > 64 || !llvm::isPowerOf2_64(Width))
3994 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
3996 // Otherwise, coerce it to a small integer.
3997 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Width));
4000 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
4001 switch (BT->getKind()) {
4002 case BuiltinType::Bool:
4003 // Bool type is always extended to the ABI, other builtin types are not
4005 return ABIArgInfo::getExtend(Ty);
4007 case BuiltinType::LongDouble:
4008 // Mingw64 GCC uses the old 80 bit extended precision floating point
4009 // unit. It passes them indirectly through memory.
4011 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
4012 if (LDF == &llvm::APFloat::x87DoubleExtended())
4013 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
4017 case BuiltinType::Int128:
4018 case BuiltinType::UInt128:
4019 // If it's a parameter type, the normal ABI rule is that arguments larger
4020 // than 8 bytes are passed indirectly. GCC follows it. We follow it too,
4021 // even though it isn't particularly efficient.
4023 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
4025 // Mingw64 GCC returns i128 in XMM0. Coerce to v2i64 to handle that.
4026 // Clang matches them for compatibility.
4027 return ABIArgInfo::getDirect(
4028 llvm::VectorType::get(llvm::Type::getInt64Ty(getVMContext()), 2));
4035 return ABIArgInfo::getDirect();
4038 void WinX86_64ABIInfo::computeVectorCallArgs(CGFunctionInfo &FI,
4039 unsigned FreeSSERegs,
4041 bool IsRegCall) const {
4043 for (auto &I : FI.arguments()) {
4044 // Vectorcall in x64 only permits the first 6 arguments to be passed
4045 // as XMM/YMM registers.
4046 if (Count < VectorcallMaxParamNumAsReg)
4047 I.info = classify(I.type, FreeSSERegs, false, IsVectorCall, IsRegCall);
4049 // Since these cannot be passed in registers, pretend no registers
4051 unsigned ZeroSSERegsAvail = 0;
4052 I.info = classify(I.type, /*FreeSSERegs=*/ZeroSSERegsAvail, false,
4053 IsVectorCall, IsRegCall);
4058 for (auto &I : FI.arguments()) {
4059 I.info = reclassifyHvaArgType(I.type, FreeSSERegs, I.info);
4063 void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
4064 const unsigned CC = FI.getCallingConvention();
4065 bool IsVectorCall = CC == llvm::CallingConv::X86_VectorCall;
4066 bool IsRegCall = CC == llvm::CallingConv::X86_RegCall;
4068 // If __attribute__((sysv_abi)) is in use, use the SysV argument
4069 // classification rules.
4070 if (CC == llvm::CallingConv::X86_64_SysV) {
4071 X86_64ABIInfo SysVABIInfo(CGT, AVXLevel);
4072 SysVABIInfo.computeInfo(FI);
4076 unsigned FreeSSERegs = 0;
4078 // We can use up to 4 SSE return registers with vectorcall.
4080 } else if (IsRegCall) {
4081 // RegCall gives us 16 SSE registers.
4085 if (!getCXXABI().classifyReturnType(FI))
4086 FI.getReturnInfo() = classify(FI.getReturnType(), FreeSSERegs, true,
4087 IsVectorCall, IsRegCall);
4090 // We can use up to 6 SSE register parameters with vectorcall.
4092 } else if (IsRegCall) {
4093 // RegCall gives us 16 SSE registers, we can reuse the return registers.
4098 computeVectorCallArgs(FI, FreeSSERegs, IsVectorCall, IsRegCall);
4100 for (auto &I : FI.arguments())
4101 I.info = classify(I.type, FreeSSERegs, false, IsVectorCall, IsRegCall);
4106 Address WinX86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4107 QualType Ty) const {
4109 bool IsIndirect = false;
4111 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
4112 // not 1, 2, 4, or 8 bytes, must be passed by reference."
4113 if (isAggregateTypeForABI(Ty) || Ty->isMemberPointerType()) {
4114 uint64_t Width = getContext().getTypeSize(Ty);
4115 IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
4118 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
4119 CGF.getContext().getTypeInfoInChars(Ty),
4120 CharUnits::fromQuantity(8),
4121 /*allowHigherAlign*/ false);
4126 /// PPC32_SVR4_ABIInfo - The 32-bit PowerPC ELF (SVR4) ABI information.
4127 class PPC32_SVR4_ABIInfo : public DefaultABIInfo {
4128 bool IsSoftFloatABI;
4130 CharUnits getParamTypeAlignment(QualType Ty) const;
4133 PPC32_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, bool SoftFloatABI)
4134 : DefaultABIInfo(CGT), IsSoftFloatABI(SoftFloatABI) {}
4136 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4137 QualType Ty) const override;
4140 class PPC32TargetCodeGenInfo : public TargetCodeGenInfo {
4142 PPC32TargetCodeGenInfo(CodeGenTypes &CGT, bool SoftFloatABI)
4143 : TargetCodeGenInfo(new PPC32_SVR4_ABIInfo(CGT, SoftFloatABI)) {}
4145 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
4146 // This is recovered from gcc output.
4147 return 1; // r1 is the dedicated stack pointer
4150 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4151 llvm::Value *Address) const override;
4155 CharUnits PPC32_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const {
4156 // Complex types are passed just like their elements
4157 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
4158 Ty = CTy->getElementType();
4160 if (Ty->isVectorType())
4161 return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16
4164 // For single-element float/vector structs, we consider the whole type
4165 // to have the same alignment requirements as its single element.
4166 const Type *AlignTy = nullptr;
4167 if (const Type *EltType = isSingleElementStruct(Ty, getContext())) {
4168 const BuiltinType *BT = EltType->getAs<BuiltinType>();
4169 if ((EltType->isVectorType() && getContext().getTypeSize(EltType) == 128) ||
4170 (BT && BT->isFloatingPoint()))
4175 return CharUnits::fromQuantity(AlignTy->isVectorType() ? 16 : 4);
4176 return CharUnits::fromQuantity(4);
4179 // TODO: this implementation is now likely redundant with
4180 // DefaultABIInfo::EmitVAArg.
4181 Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList,
4182 QualType Ty) const {
4183 if (getTarget().getTriple().isOSDarwin()) {
4184 auto TI = getContext().getTypeInfoInChars(Ty);
4185 TI.second = getParamTypeAlignment(Ty);
4187 CharUnits SlotSize = CharUnits::fromQuantity(4);
4188 return emitVoidPtrVAArg(CGF, VAList, Ty,
4189 classifyArgumentType(Ty).isIndirect(), TI, SlotSize,
4190 /*AllowHigherAlign=*/true);
4193 const unsigned OverflowLimit = 8;
4194 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
4195 // TODO: Implement this. For now ignore.
4197 return Address::invalid(); // FIXME?
4200 // struct __va_list_tag {
4201 // unsigned char gpr;
4202 // unsigned char fpr;
4203 // unsigned short reserved;
4204 // void *overflow_arg_area;
4205 // void *reg_save_area;
4208 bool isI64 = Ty->isIntegerType() && getContext().getTypeSize(Ty) == 64;
4210 Ty->isIntegerType() || Ty->isPointerType() || Ty->isAggregateType();
4211 bool isF64 = Ty->isFloatingType() && getContext().getTypeSize(Ty) == 64;
4213 // All aggregates are passed indirectly? That doesn't seem consistent
4214 // with the argument-lowering code.
4215 bool isIndirect = Ty->isAggregateType();
4217 CGBuilderTy &Builder = CGF.Builder;
4219 // The calling convention either uses 1-2 GPRs or 1 FPR.
4220 Address NumRegsAddr = Address::invalid();
4221 if (isInt || IsSoftFloatABI) {
4222 NumRegsAddr = Builder.CreateStructGEP(VAList, 0, "gpr");
4224 NumRegsAddr = Builder.CreateStructGEP(VAList, 1, "fpr");
4227 llvm::Value *NumRegs = Builder.CreateLoad(NumRegsAddr, "numUsedRegs");
4229 // "Align" the register count when TY is i64.
4230 if (isI64 || (isF64 && IsSoftFloatABI)) {
4231 NumRegs = Builder.CreateAdd(NumRegs, Builder.getInt8(1));
4232 NumRegs = Builder.CreateAnd(NumRegs, Builder.getInt8((uint8_t) ~1U));
4236 Builder.CreateICmpULT(NumRegs, Builder.getInt8(OverflowLimit), "cond");
4238 llvm::BasicBlock *UsingRegs = CGF.createBasicBlock("using_regs");
4239 llvm::BasicBlock *UsingOverflow = CGF.createBasicBlock("using_overflow");
4240 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
4242 Builder.CreateCondBr(CC, UsingRegs, UsingOverflow);
4244 llvm::Type *DirectTy = CGF.ConvertType(Ty);
4245 if (isIndirect) DirectTy = DirectTy->getPointerTo(0);
4247 // Case 1: consume registers.
4248 Address RegAddr = Address::invalid();
4250 CGF.EmitBlock(UsingRegs);
4252 Address RegSaveAreaPtr = Builder.CreateStructGEP(VAList, 4);
4253 RegAddr = Address(Builder.CreateLoad(RegSaveAreaPtr),
4254 CharUnits::fromQuantity(8));
4255 assert(RegAddr.getElementType() == CGF.Int8Ty);
4257 // Floating-point registers start after the general-purpose registers.
4258 if (!(isInt || IsSoftFloatABI)) {
4259 RegAddr = Builder.CreateConstInBoundsByteGEP(RegAddr,
4260 CharUnits::fromQuantity(32));
4263 // Get the address of the saved value by scaling the number of
4264 // registers we've used by the number of
4265 CharUnits RegSize = CharUnits::fromQuantity((isInt || IsSoftFloatABI) ? 4 : 8);
4266 llvm::Value *RegOffset =
4267 Builder.CreateMul(NumRegs, Builder.getInt8(RegSize.getQuantity()));
4268 RegAddr = Address(Builder.CreateInBoundsGEP(CGF.Int8Ty,
4269 RegAddr.getPointer(), RegOffset),
4270 RegAddr.getAlignment().alignmentOfArrayElement(RegSize));
4271 RegAddr = Builder.CreateElementBitCast(RegAddr, DirectTy);
4273 // Increase the used-register count.
4275 Builder.CreateAdd(NumRegs,
4276 Builder.getInt8((isI64 || (isF64 && IsSoftFloatABI)) ? 2 : 1));
4277 Builder.CreateStore(NumRegs, NumRegsAddr);
4279 CGF.EmitBranch(Cont);
4282 // Case 2: consume space in the overflow area.
4283 Address MemAddr = Address::invalid();
4285 CGF.EmitBlock(UsingOverflow);
4287 Builder.CreateStore(Builder.getInt8(OverflowLimit), NumRegsAddr);
4289 // Everything in the overflow area is rounded up to a size of at least 4.
4290 CharUnits OverflowAreaAlign = CharUnits::fromQuantity(4);
4294 auto TypeInfo = CGF.getContext().getTypeInfoInChars(Ty);
4295 Size = TypeInfo.first.alignTo(OverflowAreaAlign);
4297 Size = CGF.getPointerSize();
4300 Address OverflowAreaAddr = Builder.CreateStructGEP(VAList, 3);
4301 Address OverflowArea(Builder.CreateLoad(OverflowAreaAddr, "argp.cur"),
4303 // Round up address of argument to alignment
4304 CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty);
4305 if (Align > OverflowAreaAlign) {
4306 llvm::Value *Ptr = OverflowArea.getPointer();
4307 OverflowArea = Address(emitRoundPointerUpToAlignment(CGF, Ptr, Align),
4311 MemAddr = Builder.CreateElementBitCast(OverflowArea, DirectTy);
4313 // Increase the overflow area.
4314 OverflowArea = Builder.CreateConstInBoundsByteGEP(OverflowArea, Size);
4315 Builder.CreateStore(OverflowArea.getPointer(), OverflowAreaAddr);
4316 CGF.EmitBranch(Cont);
4319 CGF.EmitBlock(Cont);
4321 // Merge the cases with a phi.
4322 Address Result = emitMergePHI(CGF, RegAddr, UsingRegs, MemAddr, UsingOverflow,
4325 // Load the pointer if the argument was passed indirectly.
4327 Result = Address(Builder.CreateLoad(Result, "aggr"),
4328 getContext().getTypeAlignInChars(Ty));
4335 PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4336 llvm::Value *Address) const {
4337 // This is calculated from the LLVM and GCC tables and verified
4338 // against gcc output. AFAIK all ABIs use the same encoding.
4340 CodeGen::CGBuilderTy &Builder = CGF.Builder;
4342 llvm::IntegerType *i8 = CGF.Int8Ty;
4343 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
4344 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
4345 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
4347 // 0-31: r0-31, the 4-byte general-purpose registers
4348 AssignToArrayRange(Builder, Address, Four8, 0, 31);
4350 // 32-63: fp0-31, the 8-byte floating-point registers
4351 AssignToArrayRange(Builder, Address, Eight8, 32, 63);
4353 // 64-76 are various 4-byte special-purpose registers:
4360 AssignToArrayRange(Builder, Address, Four8, 64, 76);
4362 // 77-108: v0-31, the 16-byte vector registers
4363 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
4370 AssignToArrayRange(Builder, Address, Four8, 109, 113);
4378 /// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information.
4379 class PPC64_SVR4_ABIInfo : public SwiftABIInfo {
4387 static const unsigned GPRBits = 64;
4390 bool IsSoftFloatABI;
4392 // A vector of float or double will be promoted to <4 x f32> or <4 x f64> and
4393 // will be passed in a QPX register.
4394 bool IsQPXVectorTy(const Type *Ty) const {
4398 if (const VectorType *VT = Ty->getAs<VectorType>()) {
4399 unsigned NumElements = VT->getNumElements();
4400 if (NumElements == 1)
4403 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) {
4404 if (getContext().getTypeSize(Ty) <= 256)
4406 } else if (VT->getElementType()->
4407 isSpecificBuiltinType(BuiltinType::Float)) {
4408 if (getContext().getTypeSize(Ty) <= 128)
4416 bool IsQPXVectorTy(QualType Ty) const {
4417 return IsQPXVectorTy(Ty.getTypePtr());
4421 PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind, bool HasQPX,
4423 : SwiftABIInfo(CGT), Kind(Kind), HasQPX(HasQPX),
4424 IsSoftFloatABI(SoftFloatABI) {}
4426 bool isPromotableTypeForABI(QualType Ty) const;
4427 CharUnits getParamTypeAlignment(QualType Ty) const;
4429 ABIArgInfo classifyReturnType(QualType RetTy) const;
4430 ABIArgInfo classifyArgumentType(QualType Ty) const;
4432 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
4433 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
4434 uint64_t Members) const override;
4436 // TODO: We can add more logic to computeInfo to improve performance.
4437 // Example: For aggregate arguments that fit in a register, we could
4438 // use getDirectInReg (as is done below for structs containing a single
4439 // floating-point value) to avoid pushing them to memory on function
4440 // entry. This would require changing the logic in PPCISelLowering
4441 // when lowering the parameters in the caller and args in the callee.
4442 void computeInfo(CGFunctionInfo &FI) const override {
4443 if (!getCXXABI().classifyReturnType(FI))
4444 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
4445 for (auto &I : FI.arguments()) {
4446 // We rely on the default argument classification for the most part.
4447 // One exception: An aggregate containing a single floating-point
4448 // or vector item must be passed in a register if one is available.
4449 const Type *T = isSingleElementStruct(I.type, getContext());
4451 const BuiltinType *BT = T->getAs<BuiltinType>();
4452 if (IsQPXVectorTy(T) ||
4453 (T->isVectorType() && getContext().getTypeSize(T) == 128) ||
4454 (BT && BT->isFloatingPoint())) {
4456 I.info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT));
4460 I.info = classifyArgumentType(I.type);
4464 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4465 QualType Ty) const override;
4467 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
4468 bool asReturnValue) const override {
4469 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
4472 bool isSwiftErrorInRegister() const override {
4477 class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo {
4480 PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT,
4481 PPC64_SVR4_ABIInfo::ABIKind Kind, bool HasQPX,
4483 : TargetCodeGenInfo(new PPC64_SVR4_ABIInfo(CGT, Kind, HasQPX,
4486 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
4487 // This is recovered from gcc output.
4488 return 1; // r1 is the dedicated stack pointer
4491 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4492 llvm::Value *Address) const override;
4495 class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo {
4497 PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
4499 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
4500 // This is recovered from gcc output.
4501 return 1; // r1 is the dedicated stack pointer
4504 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4505 llvm::Value *Address) const override;
4510 // Return true if the ABI requires Ty to be passed sign- or zero-
4511 // extended to 64 bits.
4513 PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const {
4514 // Treat an enum type as its underlying type.
4515 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
4516 Ty = EnumTy->getDecl()->getIntegerType();
4518 // Promotable integer types are required to be promoted by the ABI.
4519 if (Ty->isPromotableIntegerType())
4522 // In addition to the usual promotable integer types, we also need to
4523 // extend all 32-bit types, since the ABI requires promotion to 64 bits.
4524 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
4525 switch (BT->getKind()) {
4526 case BuiltinType::Int:
4527 case BuiltinType::UInt:
4536 /// isAlignedParamType - Determine whether a type requires 16-byte or
4537 /// higher alignment in the parameter area. Always returns at least 8.
4538 CharUnits PPC64_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const {
4539 // Complex types are passed just like their elements.
4540 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
4541 Ty = CTy->getElementType();
4543 // Only vector types of size 16 bytes need alignment (larger types are
4544 // passed via reference, smaller types are not aligned).
4545 if (IsQPXVectorTy(Ty)) {
4546 if (getContext().getTypeSize(Ty) > 128)
4547 return CharUnits::fromQuantity(32);
4549 return CharUnits::fromQuantity(16);
4550 } else if (Ty->isVectorType()) {
4551 return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16 : 8);
4554 // For single-element float/vector structs, we consider the whole type
4555 // to have the same alignment requirements as its single element.
4556 const Type *AlignAsType = nullptr;
4557 const Type *EltType = isSingleElementStruct(Ty, getContext());
4559 const BuiltinType *BT = EltType->getAs<BuiltinType>();
4560 if (IsQPXVectorTy(EltType) || (EltType->isVectorType() &&
4561 getContext().getTypeSize(EltType) == 128) ||
4562 (BT && BT->isFloatingPoint()))
4563 AlignAsType = EltType;
4566 // Likewise for ELFv2 homogeneous aggregates.
4567 const Type *Base = nullptr;
4568 uint64_t Members = 0;
4569 if (!AlignAsType && Kind == ELFv2 &&
4570 isAggregateTypeForABI(Ty) && isHomogeneousAggregate(Ty, Base, Members))
4573 // With special case aggregates, only vector base types need alignment.
4574 if (AlignAsType && IsQPXVectorTy(AlignAsType)) {
4575 if (getContext().getTypeSize(AlignAsType) > 128)
4576 return CharUnits::fromQuantity(32);
4578 return CharUnits::fromQuantity(16);
4579 } else if (AlignAsType) {
4580 return CharUnits::fromQuantity(AlignAsType->isVectorType() ? 16 : 8);
4583 // Otherwise, we only need alignment for any aggregate type that
4584 // has an alignment requirement of >= 16 bytes.
4585 if (isAggregateTypeForABI(Ty) && getContext().getTypeAlign(Ty) >= 128) {
4586 if (HasQPX && getContext().getTypeAlign(Ty) >= 256)
4587 return CharUnits::fromQuantity(32);
4588 return CharUnits::fromQuantity(16);
4591 return CharUnits::fromQuantity(8);
4594 /// isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous
4595 /// aggregate. Base is set to the base element type, and Members is set
4596 /// to the number of base elements.
4597 bool ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base,
4598 uint64_t &Members) const {
4599 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
4600 uint64_t NElements = AT->getSize().getZExtValue();
4603 if (!isHomogeneousAggregate(AT->getElementType(), Base, Members))
4605 Members *= NElements;
4606 } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
4607 const RecordDecl *RD = RT->getDecl();
4608 if (RD->hasFlexibleArrayMember())
4613 // If this is a C++ record, check the bases first.
4614 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
4615 for (const auto &I : CXXRD->bases()) {
4616 // Ignore empty records.
4617 if (isEmptyRecord(getContext(), I.getType(), true))
4620 uint64_t FldMembers;
4621 if (!isHomogeneousAggregate(I.getType(), Base, FldMembers))
4624 Members += FldMembers;
4628 for (const auto *FD : RD->fields()) {
4629 // Ignore (non-zero arrays of) empty records.
4630 QualType FT = FD->getType();
4631 while (const ConstantArrayType *AT =
4632 getContext().getAsConstantArrayType(FT)) {
4633 if (AT->getSize().getZExtValue() == 0)
4635 FT = AT->getElementType();
4637 if (isEmptyRecord(getContext(), FT, true))
4640 // For compatibility with GCC, ignore empty bitfields in C++ mode.
4641 if (getContext().getLangOpts().CPlusPlus &&
4642 FD->isZeroLengthBitField(getContext()))
4645 uint64_t FldMembers;
4646 if (!isHomogeneousAggregate(FD->getType(), Base, FldMembers))
4649 Members = (RD->isUnion() ?
4650 std::max(Members, FldMembers) : Members + FldMembers);
4656 // Ensure there is no padding.
4657 if (getContext().getTypeSize(Base) * Members !=
4658 getContext().getTypeSize(Ty))
4662 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
4664 Ty = CT->getElementType();
4667 // Most ABIs only support float, double, and some vector type widths.
4668 if (!isHomogeneousAggregateBaseType(Ty))
4671 // The base type must be the same for all members. Types that
4672 // agree in both total size and mode (float vs. vector) are
4673 // treated as being equivalent here.
4674 const Type *TyPtr = Ty.getTypePtr();
4677 // If it's a non-power-of-2 vector, its size is already a power-of-2,
4678 // so make sure to widen it explicitly.
4679 if (const VectorType *VT = Base->getAs<VectorType>()) {
4680 QualType EltTy = VT->getElementType();
4681 unsigned NumElements =
4682 getContext().getTypeSize(VT) / getContext().getTypeSize(EltTy);
4684 .getVectorType(EltTy, NumElements, VT->getVectorKind())
4689 if (Base->isVectorType() != TyPtr->isVectorType() ||
4690 getContext().getTypeSize(Base) != getContext().getTypeSize(TyPtr))
4693 return Members > 0 && isHomogeneousAggregateSmallEnough(Base, Members);
4696 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
4697 // Homogeneous aggregates for ELFv2 must have base types of float,
4698 // double, long double, or 128-bit vectors.
4699 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
4700 if (BT->getKind() == BuiltinType::Float ||
4701 BT->getKind() == BuiltinType::Double ||
4702 BT->getKind() == BuiltinType::LongDouble ||
4703 (getContext().getTargetInfo().hasFloat128Type() &&
4704 (BT->getKind() == BuiltinType::Float128))) {
4710 if (const VectorType *VT = Ty->getAs<VectorType>()) {
4711 if (getContext().getTypeSize(VT) == 128 || IsQPXVectorTy(Ty))
4717 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough(
4718 const Type *Base, uint64_t Members) const {
4719 // Vector and fp128 types require one register, other floating point types
4720 // require one or two registers depending on their size.
4722 ((getContext().getTargetInfo().hasFloat128Type() &&
4723 Base->isFloat128Type()) ||
4724 Base->isVectorType()) ? 1
4725 : (getContext().getTypeSize(Base) + 63) / 64;
4727 // Homogeneous Aggregates may occupy at most 8 registers.
4728 return Members * NumRegs <= 8;
4732 PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const {
4733 Ty = useFirstFieldIfTransparentUnion(Ty);
4735 if (Ty->isAnyComplexType())
4736 return ABIArgInfo::getDirect();
4738 // Non-Altivec vector types are passed in GPRs (smaller than 16 bytes)
4739 // or via reference (larger than 16 bytes).
4740 if (Ty->isVectorType() && !IsQPXVectorTy(Ty)) {
4741 uint64_t Size = getContext().getTypeSize(Ty);
4743 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
4744 else if (Size < 128) {
4745 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
4746 return ABIArgInfo::getDirect(CoerceTy);
4750 if (isAggregateTypeForABI(Ty)) {
4751 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
4752 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
4754 uint64_t ABIAlign = getParamTypeAlignment(Ty).getQuantity();
4755 uint64_t TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity();
4757 // ELFv2 homogeneous aggregates are passed as array types.
4758 const Type *Base = nullptr;
4759 uint64_t Members = 0;
4760 if (Kind == ELFv2 &&
4761 isHomogeneousAggregate(Ty, Base, Members)) {
4762 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
4763 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
4764 return ABIArgInfo::getDirect(CoerceTy);
4767 // If an aggregate may end up fully in registers, we do not
4768 // use the ByVal method, but pass the aggregate as array.
4769 // This is usually beneficial since we avoid forcing the
4770 // back-end to store the argument to memory.
4771 uint64_t Bits = getContext().getTypeSize(Ty);
4772 if (Bits > 0 && Bits <= 8 * GPRBits) {
4773 llvm::Type *CoerceTy;
4775 // Types up to 8 bytes are passed as integer type (which will be
4776 // properly aligned in the argument save area doubleword).
4777 if (Bits <= GPRBits)
4779 llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8));
4780 // Larger types are passed as arrays, with the base type selected
4781 // according to the required alignment in the save area.
4783 uint64_t RegBits = ABIAlign * 8;
4784 uint64_t NumRegs = llvm::alignTo(Bits, RegBits) / RegBits;
4785 llvm::Type *RegTy = llvm::IntegerType::get(getVMContext(), RegBits);
4786 CoerceTy = llvm::ArrayType::get(RegTy, NumRegs);
4789 return ABIArgInfo::getDirect(CoerceTy);
4792 // All other aggregates are passed ByVal.
4793 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign),
4795 /*Realign=*/TyAlign > ABIAlign);
4798 return (isPromotableTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
4799 : ABIArgInfo::getDirect());
4803 PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
4804 if (RetTy->isVoidType())
4805 return ABIArgInfo::getIgnore();
4807 if (RetTy->isAnyComplexType())
4808 return ABIArgInfo::getDirect();
4810 // Non-Altivec vector types are returned in GPRs (smaller than 16 bytes)
4811 // or via reference (larger than 16 bytes).
4812 if (RetTy->isVectorType() && !IsQPXVectorTy(RetTy)) {
4813 uint64_t Size = getContext().getTypeSize(RetTy);
4815 return getNaturalAlignIndirect(RetTy);
4816 else if (Size < 128) {
4817 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
4818 return ABIArgInfo::getDirect(CoerceTy);
4822 if (isAggregateTypeForABI(RetTy)) {
4823 // ELFv2 homogeneous aggregates are returned as array types.
4824 const Type *Base = nullptr;
4825 uint64_t Members = 0;
4826 if (Kind == ELFv2 &&
4827 isHomogeneousAggregate(RetTy, Base, Members)) {
4828 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
4829 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
4830 return ABIArgInfo::getDirect(CoerceTy);
4833 // ELFv2 small aggregates are returned in up to two registers.
4834 uint64_t Bits = getContext().getTypeSize(RetTy);
4835 if (Kind == ELFv2 && Bits <= 2 * GPRBits) {
4837 return ABIArgInfo::getIgnore();
4839 llvm::Type *CoerceTy;
4840 if (Bits > GPRBits) {
4841 CoerceTy = llvm::IntegerType::get(getVMContext(), GPRBits);
4842 CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy);
4845 llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8));
4846 return ABIArgInfo::getDirect(CoerceTy);
4849 // All other aggregates are returned indirectly.
4850 return getNaturalAlignIndirect(RetTy);
4853 return (isPromotableTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
4854 : ABIArgInfo::getDirect());
4857 // Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine.
4858 Address PPC64_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4859 QualType Ty) const {
4860 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
4861 TypeInfo.second = getParamTypeAlignment(Ty);
4863 CharUnits SlotSize = CharUnits::fromQuantity(8);
4865 // If we have a complex type and the base type is smaller than 8 bytes,
4866 // the ABI calls for the real and imaginary parts to be right-adjusted
4867 // in separate doublewords. However, Clang expects us to produce a
4868 // pointer to a structure with the two parts packed tightly. So generate
4869 // loads of the real and imaginary parts relative to the va_list pointer,
4870 // and store them to a temporary structure.
4871 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
4872 CharUnits EltSize = TypeInfo.first / 2;
4873 if (EltSize < SlotSize) {
4874 Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, CGF.Int8Ty,
4875 SlotSize * 2, SlotSize,
4876 SlotSize, /*AllowHigher*/ true);
4878 Address RealAddr = Addr;
4879 Address ImagAddr = RealAddr;
4880 if (CGF.CGM.getDataLayout().isBigEndian()) {
4881 RealAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr,
4882 SlotSize - EltSize);
4883 ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(ImagAddr,
4884 2 * SlotSize - EltSize);
4886 ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr, SlotSize);
4889 llvm::Type *EltTy = CGF.ConvertTypeForMem(CTy->getElementType());
4890 RealAddr = CGF.Builder.CreateElementBitCast(RealAddr, EltTy);
4891 ImagAddr = CGF.Builder.CreateElementBitCast(ImagAddr, EltTy);
4892 llvm::Value *Real = CGF.Builder.CreateLoad(RealAddr, ".vareal");
4893 llvm::Value *Imag = CGF.Builder.CreateLoad(ImagAddr, ".vaimag");
4895 Address Temp = CGF.CreateMemTemp(Ty, "vacplx");
4896 CGF.EmitStoreOfComplex({Real, Imag}, CGF.MakeAddrLValue(Temp, Ty),
4902 // Otherwise, just use the general rule.
4903 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false,
4904 TypeInfo, SlotSize, /*AllowHigher*/ true);
4908 PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4909 llvm::Value *Address) {
4910 // This is calculated from the LLVM and GCC tables and verified
4911 // against gcc output. AFAIK all ABIs use the same encoding.
4913 CodeGen::CGBuilderTy &Builder = CGF.Builder;
4915 llvm::IntegerType *i8 = CGF.Int8Ty;
4916 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
4917 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
4918 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
4920 // 0-31: r0-31, the 8-byte general-purpose registers
4921 AssignToArrayRange(Builder, Address, Eight8, 0, 31);
4923 // 32-63: fp0-31, the 8-byte floating-point registers
4924 AssignToArrayRange(Builder, Address, Eight8, 32, 63);
4926 // 64-67 are various 8-byte special-purpose registers:
4931 AssignToArrayRange(Builder, Address, Eight8, 64, 67);
4933 // 68-76 are various 4-byte special-purpose registers:
4936 AssignToArrayRange(Builder, Address, Four8, 68, 76);
4938 // 77-108: v0-31, the 16-byte vector registers
4939 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
4949 AssignToArrayRange(Builder, Address, Eight8, 109, 116);
4955 PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable(
4956 CodeGen::CodeGenFunction &CGF,
4957 llvm::Value *Address) const {
4959 return PPC64_initDwarfEHRegSizeTable(CGF, Address);
4963 PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4964 llvm::Value *Address) const {
4966 return PPC64_initDwarfEHRegSizeTable(CGF, Address);
4969 //===----------------------------------------------------------------------===//
4970 // AArch64 ABI Implementation
4971 //===----------------------------------------------------------------------===//
4975 class AArch64ABIInfo : public SwiftABIInfo {
4987 AArch64ABIInfo(CodeGenTypes &CGT, ABIKind Kind)
4988 : SwiftABIInfo(CGT), Kind(Kind) {}
4991 ABIKind getABIKind() const { return Kind; }
4992 bool isDarwinPCS() const { return Kind == DarwinPCS; }
4994 ABIArgInfo classifyReturnType(QualType RetTy) const;
4995 ABIArgInfo classifyArgumentType(QualType RetTy) const;
4996 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
4997 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
4998 uint64_t Members) const override;
5000 bool isIllegalVectorType(QualType Ty) const;
5002 void computeInfo(CGFunctionInfo &FI) const override {
5003 if (!::classifyReturnType(getCXXABI(), FI, *this))
5004 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
5006 for (auto &it : FI.arguments())
5007 it.info = classifyArgumentType(it.type);
5010 Address EmitDarwinVAArg(Address VAListAddr, QualType Ty,
5011 CodeGenFunction &CGF) const;
5013 Address EmitAAPCSVAArg(Address VAListAddr, QualType Ty,
5014 CodeGenFunction &CGF) const;
5016 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
5017 QualType Ty) const override {
5018 return Kind == Win64 ? EmitMSVAArg(CGF, VAListAddr, Ty)
5019 : isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
5020 : EmitAAPCSVAArg(VAListAddr, Ty, CGF);
5023 Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
5024 QualType Ty) const override;
5026 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
5027 bool asReturnValue) const override {
5028 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
5030 bool isSwiftErrorInRegister() const override {
5034 bool isLegalVectorTypeForSwift(CharUnits totalSize, llvm::Type *eltTy,
5035 unsigned elts) const override;
5038 class AArch64TargetCodeGenInfo : public TargetCodeGenInfo {
5040 AArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind Kind)
5041 : TargetCodeGenInfo(new AArch64ABIInfo(CGT, Kind)) {}
5043 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
5044 return "mov\tfp, fp\t\t// marker for objc_retainAutoreleaseReturnValue";
5047 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
5051 bool doesReturnSlotInterfereWithArgs() const override { return false; }
5053 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5054 CodeGen::CodeGenModule &CGM) const override {
5055 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
5058 llvm::Function *Fn = cast<llvm::Function>(GV);
5060 auto Kind = CGM.getCodeGenOpts().getSignReturnAddress();
5061 if (Kind != CodeGenOptions::SignReturnAddressScope::None) {
5062 Fn->addFnAttr("sign-return-address",
5063 Kind == CodeGenOptions::SignReturnAddressScope::All
5067 auto Key = CGM.getCodeGenOpts().getSignReturnAddressKey();
5068 Fn->addFnAttr("sign-return-address-key",
5069 Key == CodeGenOptions::SignReturnAddressKeyValue::AKey
5074 if (CGM.getCodeGenOpts().BranchTargetEnforcement)
5075 Fn->addFnAttr("branch-target-enforcement");
5079 class WindowsAArch64TargetCodeGenInfo : public AArch64TargetCodeGenInfo {
5081 WindowsAArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind K)
5082 : AArch64TargetCodeGenInfo(CGT, K) {}
5084 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5085 CodeGen::CodeGenModule &CGM) const override;
5087 void getDependentLibraryOption(llvm::StringRef Lib,
5088 llvm::SmallString<24> &Opt) const override {
5089 Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
5092 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value,
5093 llvm::SmallString<32> &Opt) const override {
5094 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
5098 void WindowsAArch64TargetCodeGenInfo::setTargetAttributes(
5099 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
5100 AArch64TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
5101 if (GV->isDeclaration())
5103 addStackProbeTargetAttributes(D, GV, CGM);
5107 ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty) const {
5108 Ty = useFirstFieldIfTransparentUnion(Ty);
5110 // Handle illegal vector types here.
5111 if (isIllegalVectorType(Ty)) {
5112 uint64_t Size = getContext().getTypeSize(Ty);
5113 // Android promotes <2 x i8> to i16, not i32
5114 if (isAndroid() && (Size <= 16)) {
5115 llvm::Type *ResType = llvm::Type::getInt16Ty(getVMContext());
5116 return ABIArgInfo::getDirect(ResType);
5119 llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext());
5120 return ABIArgInfo::getDirect(ResType);
5123 llvm::Type *ResType =
5124 llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2);
5125 return ABIArgInfo::getDirect(ResType);
5128 llvm::Type *ResType =
5129 llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);
5130 return ABIArgInfo::getDirect(ResType);
5132 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
5135 if (!isAggregateTypeForABI(Ty)) {
5136 // Treat an enum type as its underlying type.
5137 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
5138 Ty = EnumTy->getDecl()->getIntegerType();
5140 return (Ty->isPromotableIntegerType() && isDarwinPCS()
5141 ? ABIArgInfo::getExtend(Ty)
5142 : ABIArgInfo::getDirect());
5145 // Structures with either a non-trivial destructor or a non-trivial
5146 // copy constructor are always indirect.
5147 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
5148 return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA ==
5149 CGCXXABI::RAA_DirectInMemory);
5152 // Empty records are always ignored on Darwin, but actually passed in C++ mode
5153 // elsewhere for GNU compatibility.
5154 uint64_t Size = getContext().getTypeSize(Ty);
5155 bool IsEmpty = isEmptyRecord(getContext(), Ty, true);
5156 if (IsEmpty || Size == 0) {
5157 if (!getContext().getLangOpts().CPlusPlus || isDarwinPCS())
5158 return ABIArgInfo::getIgnore();
5160 // GNU C mode. The only argument that gets ignored is an empty one with size
5162 if (IsEmpty && Size == 0)
5163 return ABIArgInfo::getIgnore();
5164 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
5167 // Homogeneous Floating-point Aggregates (HFAs) need to be expanded.
5168 const Type *Base = nullptr;
5169 uint64_t Members = 0;
5170 if (isHomogeneousAggregate(Ty, Base, Members)) {
5171 return ABIArgInfo::getDirect(
5172 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members));
5175 // Aggregates <= 16 bytes are passed directly in registers or on the stack.
5177 // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of
5178 // same size and alignment.
5179 if (getTarget().isRenderScriptTarget()) {
5180 return coerceToIntArray(Ty, getContext(), getVMContext());
5183 if (Kind == AArch64ABIInfo::AAPCS) {
5184 Alignment = getContext().getTypeUnadjustedAlign(Ty);
5185 Alignment = Alignment < 128 ? 64 : 128;
5187 Alignment = getContext().getTypeAlign(Ty);
5189 Size = llvm::alignTo(Size, 64); // round up to multiple of 8 bytes
5191 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
5192 // For aggregates with 16-byte alignment, we use i128.
5193 if (Alignment < 128 && Size == 128) {
5194 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
5195 return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64));
5197 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
5200 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
5203 ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy) const {
5204 if (RetTy->isVoidType())
5205 return ABIArgInfo::getIgnore();
5207 // Large vector types should be returned via memory.
5208 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128)
5209 return getNaturalAlignIndirect(RetTy);
5211 if (!isAggregateTypeForABI(RetTy)) {
5212 // Treat an enum type as its underlying type.
5213 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
5214 RetTy = EnumTy->getDecl()->getIntegerType();
5216 return (RetTy->isPromotableIntegerType() && isDarwinPCS()
5217 ? ABIArgInfo::getExtend(RetTy)
5218 : ABIArgInfo::getDirect());
5221 uint64_t Size = getContext().getTypeSize(RetTy);
5222 if (isEmptyRecord(getContext(), RetTy, true) || Size == 0)
5223 return ABIArgInfo::getIgnore();
5225 const Type *Base = nullptr;
5226 uint64_t Members = 0;
5227 if (isHomogeneousAggregate(RetTy, Base, Members))
5228 // Homogeneous Floating-point Aggregates (HFAs) are returned directly.
5229 return ABIArgInfo::getDirect();
5231 // Aggregates <= 16 bytes are returned directly in registers or on the stack.
5233 // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of
5234 // same size and alignment.
5235 if (getTarget().isRenderScriptTarget()) {
5236 return coerceToIntArray(RetTy, getContext(), getVMContext());
5238 unsigned Alignment = getContext().getTypeAlign(RetTy);
5239 Size = llvm::alignTo(Size, 64); // round up to multiple of 8 bytes
5241 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
5242 // For aggregates with 16-byte alignment, we use i128.
5243 if (Alignment < 128 && Size == 128) {
5244 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
5245 return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64));
5247 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
5250 return getNaturalAlignIndirect(RetTy);
5253 /// isIllegalVectorType - check whether the vector type is legal for AArch64.
5254 bool AArch64ABIInfo::isIllegalVectorType(QualType Ty) const {
5255 if (const VectorType *VT = Ty->getAs<VectorType>()) {
5256 // Check whether VT is legal.
5257 unsigned NumElements = VT->getNumElements();
5258 uint64_t Size = getContext().getTypeSize(VT);
5259 // NumElements should be power of 2.
5260 if (!llvm::isPowerOf2_32(NumElements))
5262 return Size != 64 && (Size != 128 || NumElements == 1);
5267 bool AArch64ABIInfo::isLegalVectorTypeForSwift(CharUnits totalSize,
5269 unsigned elts) const {
5270 if (!llvm::isPowerOf2_32(elts))
5272 if (totalSize.getQuantity() != 8 &&
5273 (totalSize.getQuantity() != 16 || elts == 1))
5278 bool AArch64ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
5279 // Homogeneous aggregates for AAPCS64 must have base types of a floating
5280 // point type or a short-vector type. This is the same as the 32-bit ABI,
5281 // but with the difference that any floating-point type is allowed,
5282 // including __fp16.
5283 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
5284 if (BT->isFloatingPoint())
5286 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
5287 unsigned VecSize = getContext().getTypeSize(VT);
5288 if (VecSize == 64 || VecSize == 128)
5294 bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
5295 uint64_t Members) const {
5296 return Members <= 4;
5299 Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr,
5301 CodeGenFunction &CGF) const {
5302 ABIArgInfo AI = classifyArgumentType(Ty);
5303 bool IsIndirect = AI.isIndirect();
5305 llvm::Type *BaseTy = CGF.ConvertType(Ty);
5307 BaseTy = llvm::PointerType::getUnqual(BaseTy);
5308 else if (AI.getCoerceToType())
5309 BaseTy = AI.getCoerceToType();
5311 unsigned NumRegs = 1;
5312 if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) {
5313 BaseTy = ArrTy->getElementType();
5314 NumRegs = ArrTy->getNumElements();
5316 bool IsFPR = BaseTy->isFloatingPointTy() || BaseTy->isVectorTy();
5318 // The AArch64 va_list type and handling is specified in the Procedure Call
5319 // Standard, section B.4:
5329 llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg");
5330 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
5331 llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack");
5332 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
5334 CharUnits TySize = getContext().getTypeSizeInChars(Ty);
5335 CharUnits TyAlign = getContext().getTypeUnadjustedAlignInChars(Ty);
5337 Address reg_offs_p = Address::invalid();
5338 llvm::Value *reg_offs = nullptr;
5340 int RegSize = IsIndirect ? 8 : TySize.getQuantity();
5342 // 3 is the field number of __gr_offs
5343 reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 3, "gr_offs_p");
5344 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs");
5345 reg_top_index = 1; // field number for __gr_top
5346 RegSize = llvm::alignTo(RegSize, 8);
5348 // 4 is the field number of __vr_offs.
5349 reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 4, "vr_offs_p");
5350 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs");
5351 reg_top_index = 2; // field number for __vr_top
5352 RegSize = 16 * NumRegs;
5355 //=======================================
5356 // Find out where argument was passed
5357 //=======================================
5359 // If reg_offs >= 0 we're already using the stack for this type of
5360 // argument. We don't want to keep updating reg_offs (in case it overflows,
5361 // though anyone passing 2GB of arguments, each at most 16 bytes, deserves
5362 // whatever they get).
5363 llvm::Value *UsingStack = nullptr;
5364 UsingStack = CGF.Builder.CreateICmpSGE(
5365 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, 0));
5367 CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
5369 // Otherwise, at least some kind of argument could go in these registers, the
5370 // question is whether this particular type is too big.
5371 CGF.EmitBlock(MaybeRegBlock);
5373 // Integer arguments may need to correct register alignment (for example a
5374 // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we
5375 // align __gr_offs to calculate the potential address.
5376 if (!IsFPR && !IsIndirect && TyAlign.getQuantity() > 8) {
5377 int Align = TyAlign.getQuantity();
5379 reg_offs = CGF.Builder.CreateAdd(
5380 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, Align - 1),
5382 reg_offs = CGF.Builder.CreateAnd(
5383 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, -Align),
5387 // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list.
5388 // The fact that this is done unconditionally reflects the fact that
5389 // allocating an argument to the stack also uses up all the remaining
5390 // registers of the appropriate kind.
5391 llvm::Value *NewOffset = nullptr;
5392 NewOffset = CGF.Builder.CreateAdd(
5393 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, RegSize), "new_reg_offs");
5394 CGF.Builder.CreateStore(NewOffset, reg_offs_p);
5396 // Now we're in a position to decide whether this argument really was in
5397 // registers or not.
5398 llvm::Value *InRegs = nullptr;
5399 InRegs = CGF.Builder.CreateICmpSLE(
5400 NewOffset, llvm::ConstantInt::get(CGF.Int32Ty, 0), "inreg");
5402 CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
5404 //=======================================
5405 // Argument was in registers
5406 //=======================================
5408 // Now we emit the code for if the argument was originally passed in
5409 // registers. First start the appropriate block:
5410 CGF.EmitBlock(InRegBlock);
5412 llvm::Value *reg_top = nullptr;
5414 CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index, "reg_top_p");
5415 reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top");
5416 Address BaseAddr(CGF.Builder.CreateInBoundsGEP(reg_top, reg_offs),
5417 CharUnits::fromQuantity(IsFPR ? 16 : 8));
5418 Address RegAddr = Address::invalid();
5419 llvm::Type *MemTy = CGF.ConvertTypeForMem(Ty);
5422 // If it's been passed indirectly (actually a struct), whatever we find from
5423 // stored registers or on the stack will actually be a struct **.
5424 MemTy = llvm::PointerType::getUnqual(MemTy);
5427 const Type *Base = nullptr;
5428 uint64_t NumMembers = 0;
5429 bool IsHFA = isHomogeneousAggregate(Ty, Base, NumMembers);
5430 if (IsHFA && NumMembers > 1) {
5431 // Homogeneous aggregates passed in registers will have their elements split
5432 // and stored 16-bytes apart regardless of size (they're notionally in qN,
5433 // qN+1, ...). We reload and store into a temporary local variable
5435 assert(!IsIndirect && "Homogeneous aggregates should be passed directly");
5436 auto BaseTyInfo = getContext().getTypeInfoInChars(QualType(Base, 0));
5437 llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0));
5438 llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
5439 Address Tmp = CGF.CreateTempAlloca(HFATy,
5440 std::max(TyAlign, BaseTyInfo.second));
5442 // On big-endian platforms, the value will be right-aligned in its slot.
5444 if (CGF.CGM.getDataLayout().isBigEndian() &&
5445 BaseTyInfo.first.getQuantity() < 16)
5446 Offset = 16 - BaseTyInfo.first.getQuantity();
5448 for (unsigned i = 0; i < NumMembers; ++i) {
5449 CharUnits BaseOffset = CharUnits::fromQuantity(16 * i + Offset);
5451 CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, BaseOffset);
5452 LoadAddr = CGF.Builder.CreateElementBitCast(LoadAddr, BaseTy);
5454 Address StoreAddr = CGF.Builder.CreateConstArrayGEP(Tmp, i);
5456 llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr);
5457 CGF.Builder.CreateStore(Elem, StoreAddr);
5460 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, MemTy);
5462 // Otherwise the object is contiguous in memory.
5464 // It might be right-aligned in its slot.
5465 CharUnits SlotSize = BaseAddr.getAlignment();
5466 if (CGF.CGM.getDataLayout().isBigEndian() && !IsIndirect &&
5467 (IsHFA || !isAggregateTypeForABI(Ty)) &&
5468 TySize < SlotSize) {
5469 CharUnits Offset = SlotSize - TySize;
5470 BaseAddr = CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, Offset);
5473 RegAddr = CGF.Builder.CreateElementBitCast(BaseAddr, MemTy);
5476 CGF.EmitBranch(ContBlock);
5478 //=======================================
5479 // Argument was on the stack
5480 //=======================================
5481 CGF.EmitBlock(OnStackBlock);
5483 Address stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "stack_p");
5484 llvm::Value *OnStackPtr = CGF.Builder.CreateLoad(stack_p, "stack");
5486 // Again, stack arguments may need realignment. In this case both integer and
5487 // floating-point ones might be affected.
5488 if (!IsIndirect && TyAlign.getQuantity() > 8) {
5489 int Align = TyAlign.getQuantity();
5491 OnStackPtr = CGF.Builder.CreatePtrToInt(OnStackPtr, CGF.Int64Ty);
5493 OnStackPtr = CGF.Builder.CreateAdd(
5494 OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1),
5496 OnStackPtr = CGF.Builder.CreateAnd(
5497 OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, -Align),
5500 OnStackPtr = CGF.Builder.CreateIntToPtr(OnStackPtr, CGF.Int8PtrTy);
5502 Address OnStackAddr(OnStackPtr,
5503 std::max(CharUnits::fromQuantity(8), TyAlign));
5505 // All stack slots are multiples of 8 bytes.
5506 CharUnits StackSlotSize = CharUnits::fromQuantity(8);
5507 CharUnits StackSize;
5509 StackSize = StackSlotSize;
5511 StackSize = TySize.alignTo(StackSlotSize);
5513 llvm::Value *StackSizeC = CGF.Builder.getSize(StackSize);
5514 llvm::Value *NewStack =
5515 CGF.Builder.CreateInBoundsGEP(OnStackPtr, StackSizeC, "new_stack");
5517 // Write the new value of __stack for the next call to va_arg
5518 CGF.Builder.CreateStore(NewStack, stack_p);
5520 if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) &&
5521 TySize < StackSlotSize) {
5522 CharUnits Offset = StackSlotSize - TySize;
5523 OnStackAddr = CGF.Builder.CreateConstInBoundsByteGEP(OnStackAddr, Offset);
5526 OnStackAddr = CGF.Builder.CreateElementBitCast(OnStackAddr, MemTy);
5528 CGF.EmitBranch(ContBlock);
5530 //=======================================
5532 //=======================================
5533 CGF.EmitBlock(ContBlock);
5535 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock,
5536 OnStackAddr, OnStackBlock, "vaargs.addr");
5539 return Address(CGF.Builder.CreateLoad(ResAddr, "vaarg.addr"),
5545 Address AArch64ABIInfo::EmitDarwinVAArg(Address VAListAddr, QualType Ty,
5546 CodeGenFunction &CGF) const {
5547 // The backend's lowering doesn't support va_arg for aggregates or
5548 // illegal vector types. Lower VAArg here for these cases and use
5549 // the LLVM va_arg instruction for everything else.
5550 if (!isAggregateTypeForABI(Ty) && !isIllegalVectorType(Ty))
5551 return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect());
5553 CharUnits SlotSize = CharUnits::fromQuantity(8);
5555 // Empty records are ignored for parameter passing purposes.
5556 if (isEmptyRecord(getContext(), Ty, true)) {
5557 Address Addr(CGF.Builder.CreateLoad(VAListAddr, "ap.cur"), SlotSize);
5558 Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty));
5562 // The size of the actual thing passed, which might end up just
5563 // being a pointer for indirect types.
5564 auto TyInfo = getContext().getTypeInfoInChars(Ty);
5566 // Arguments bigger than 16 bytes which aren't homogeneous
5567 // aggregates should be passed indirectly.
5568 bool IsIndirect = false;
5569 if (TyInfo.first.getQuantity() > 16) {
5570 const Type *Base = nullptr;
5571 uint64_t Members = 0;
5572 IsIndirect = !isHomogeneousAggregate(Ty, Base, Members);
5575 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
5576 TyInfo, SlotSize, /*AllowHigherAlign*/ true);
5579 Address AArch64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
5580 QualType Ty) const {
5581 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
5582 CGF.getContext().getTypeInfoInChars(Ty),
5583 CharUnits::fromQuantity(8),
5584 /*allowHigherAlign*/ false);
5587 //===----------------------------------------------------------------------===//
5588 // ARM ABI Implementation
5589 //===----------------------------------------------------------------------===//
5593 class ARMABIInfo : public SwiftABIInfo {
5606 ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind)
5607 : SwiftABIInfo(CGT), Kind(_Kind) {
5611 bool isEABI() const {
5612 switch (getTarget().getTriple().getEnvironment()) {
5613 case llvm::Triple::Android:
5614 case llvm::Triple::EABI:
5615 case llvm::Triple::EABIHF:
5616 case llvm::Triple::GNUEABI:
5617 case llvm::Triple::GNUEABIHF:
5618 case llvm::Triple::MuslEABI:
5619 case llvm::Triple::MuslEABIHF:
5626 bool isEABIHF() const {
5627 switch (getTarget().getTriple().getEnvironment()) {
5628 case llvm::Triple::EABIHF:
5629 case llvm::Triple::GNUEABIHF:
5630 case llvm::Triple::MuslEABIHF:
5637 ABIKind getABIKind() const { return Kind; }
5640 ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic,
5641 unsigned functionCallConv) const;
5642 ABIArgInfo classifyArgumentType(QualType RetTy, bool isVariadic,
5643 unsigned functionCallConv) const;
5644 ABIArgInfo classifyHomogeneousAggregate(QualType Ty, const Type *Base,
5645 uint64_t Members) const;
5646 ABIArgInfo coerceIllegalVector(QualType Ty) const;
5647 bool isIllegalVectorType(QualType Ty) const;
5648 bool containsAnyFP16Vectors(QualType Ty) const;
5650 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
5651 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
5652 uint64_t Members) const override;
5654 bool isEffectivelyAAPCS_VFP(unsigned callConvention, bool acceptHalf) const;
5656 void computeInfo(CGFunctionInfo &FI) const override;
5658 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
5659 QualType Ty) const override;
5661 llvm::CallingConv::ID getLLVMDefaultCC() const;
5662 llvm::CallingConv::ID getABIDefaultCC() const;
5665 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
5666 bool asReturnValue) const override {
5667 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
5669 bool isSwiftErrorInRegister() const override {
5672 bool isLegalVectorTypeForSwift(CharUnits totalSize, llvm::Type *eltTy,
5673 unsigned elts) const override;
5676 class ARMTargetCodeGenInfo : public TargetCodeGenInfo {
5678 ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
5679 :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {}
5681 const ARMABIInfo &getABIInfo() const {
5682 return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo());
5685 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
5689 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
5690 return "mov\tr7, r7\t\t// marker for objc_retainAutoreleaseReturnValue";
5693 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
5694 llvm::Value *Address) const override {
5695 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
5697 // 0-15 are the 16 integer registers.
5698 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15);
5702 unsigned getSizeOfUnwindException() const override {
5703 if (getABIInfo().isEABI()) return 88;
5704 return TargetCodeGenInfo::getSizeOfUnwindException();
5707 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5708 CodeGen::CodeGenModule &CGM) const override {
5709 if (GV->isDeclaration())
5711 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
5715 const ARMInterruptAttr *Attr = FD->getAttr<ARMInterruptAttr>();
5720 switch (Attr->getInterrupt()) {
5721 case ARMInterruptAttr::Generic: Kind = ""; break;
5722 case ARMInterruptAttr::IRQ: Kind = "IRQ"; break;
5723 case ARMInterruptAttr::FIQ: Kind = "FIQ"; break;
5724 case ARMInterruptAttr::SWI: Kind = "SWI"; break;
5725 case ARMInterruptAttr::ABORT: Kind = "ABORT"; break;
5726 case ARMInterruptAttr::UNDEF: Kind = "UNDEF"; break;
5729 llvm::Function *Fn = cast<llvm::Function>(GV);
5731 Fn->addFnAttr("interrupt", Kind);
5733 ARMABIInfo::ABIKind ABI = cast<ARMABIInfo>(getABIInfo()).getABIKind();
5734 if (ABI == ARMABIInfo::APCS)
5737 // AAPCS guarantees that sp will be 8-byte aligned on any public interface,
5738 // however this is not necessarily true on taking any interrupt. Instruct
5739 // the backend to perform a realignment as part of the function prologue.
5740 llvm::AttrBuilder B;
5741 B.addStackAlignmentAttr(8);
5742 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
5746 class WindowsARMTargetCodeGenInfo : public ARMTargetCodeGenInfo {
5748 WindowsARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
5749 : ARMTargetCodeGenInfo(CGT, K) {}
5751 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5752 CodeGen::CodeGenModule &CGM) const override;
5754 void getDependentLibraryOption(llvm::StringRef Lib,
5755 llvm::SmallString<24> &Opt) const override {
5756 Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
5759 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value,
5760 llvm::SmallString<32> &Opt) const override {
5761 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
5765 void WindowsARMTargetCodeGenInfo::setTargetAttributes(
5766 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
5767 ARMTargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
5768 if (GV->isDeclaration())
5770 addStackProbeTargetAttributes(D, GV, CGM);
5774 void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
5775 if (!::classifyReturnType(getCXXABI(), FI, *this))
5776 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), FI.isVariadic(),
5777 FI.getCallingConvention());
5779 for (auto &I : FI.arguments())
5780 I.info = classifyArgumentType(I.type, FI.isVariadic(),
5781 FI.getCallingConvention());
5784 // Always honor user-specified calling convention.
5785 if (FI.getCallingConvention() != llvm::CallingConv::C)
5788 llvm::CallingConv::ID cc = getRuntimeCC();
5789 if (cc != llvm::CallingConv::C)
5790 FI.setEffectiveCallingConvention(cc);
5793 /// Return the default calling convention that LLVM will use.
5794 llvm::CallingConv::ID ARMABIInfo::getLLVMDefaultCC() const {
5795 // The default calling convention that LLVM will infer.
5796 if (isEABIHF() || getTarget().getTriple().isWatchABI())
5797 return llvm::CallingConv::ARM_AAPCS_VFP;
5799 return llvm::CallingConv::ARM_AAPCS;
5801 return llvm::CallingConv::ARM_APCS;
5804 /// Return the calling convention that our ABI would like us to use
5805 /// as the C calling convention.
5806 llvm::CallingConv::ID ARMABIInfo::getABIDefaultCC() const {
5807 switch (getABIKind()) {
5808 case APCS: return llvm::CallingConv::ARM_APCS;
5809 case AAPCS: return llvm::CallingConv::ARM_AAPCS;
5810 case AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
5811 case AAPCS16_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
5813 llvm_unreachable("bad ABI kind");
5816 void ARMABIInfo::setCCs() {
5817 assert(getRuntimeCC() == llvm::CallingConv::C);
5819 // Don't muddy up the IR with a ton of explicit annotations if
5820 // they'd just match what LLVM will infer from the triple.
5821 llvm::CallingConv::ID abiCC = getABIDefaultCC();
5822 if (abiCC != getLLVMDefaultCC())
5826 ABIArgInfo ARMABIInfo::coerceIllegalVector(QualType Ty) const {
5827 uint64_t Size = getContext().getTypeSize(Ty);
5829 llvm::Type *ResType =
5830 llvm::Type::getInt32Ty(getVMContext());
5831 return ABIArgInfo::getDirect(ResType);
5833 if (Size == 64 || Size == 128) {
5834 llvm::Type *ResType = llvm::VectorType::get(
5835 llvm::Type::getInt32Ty(getVMContext()), Size / 32);
5836 return ABIArgInfo::getDirect(ResType);
5838 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
5841 ABIArgInfo ARMABIInfo::classifyHomogeneousAggregate(QualType Ty,
5843 uint64_t Members) const {
5844 assert(Base && "Base class should be set for homogeneous aggregate");
5845 // Base can be a floating-point or a vector.
5846 if (const VectorType *VT = Base->getAs<VectorType>()) {
5847 // FP16 vectors should be converted to integer vectors
5848 if (!getTarget().hasLegalHalfType() && containsAnyFP16Vectors(Ty)) {
5849 uint64_t Size = getContext().getTypeSize(VT);
5850 llvm::Type *NewVecTy = llvm::VectorType::get(
5851 llvm::Type::getInt32Ty(getVMContext()), Size / 32);
5852 llvm::Type *Ty = llvm::ArrayType::get(NewVecTy, Members);
5853 return ABIArgInfo::getDirect(Ty, 0, nullptr, false);
5856 return ABIArgInfo::getDirect(nullptr, 0, nullptr, false);
5859 ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool isVariadic,
5860 unsigned functionCallConv) const {
5861 // 6.1.2.1 The following argument types are VFP CPRCs:
5862 // A single-precision floating-point type (including promoted
5863 // half-precision types); A double-precision floating-point type;
5864 // A 64-bit or 128-bit containerized vector type; Homogeneous Aggregate
5865 // with a Base Type of a single- or double-precision floating-point type,
5866 // 64-bit containerized vectors or 128-bit containerized vectors with one
5867 // to four Elements.
5868 // Variadic functions should always marshal to the base standard.
5870 !isVariadic && isEffectivelyAAPCS_VFP(functionCallConv, /* AAPCS16 */ false);
5872 Ty = useFirstFieldIfTransparentUnion(Ty);
5874 // Handle illegal vector types here.
5875 if (isIllegalVectorType(Ty))
5876 return coerceIllegalVector(Ty);
5878 // _Float16 and __fp16 get passed as if it were an int or float, but with
5879 // the top 16 bits unspecified. This is not done for OpenCL as it handles the
5880 // half type natively, and does not need to interwork with AAPCS code.
5881 if ((Ty->isFloat16Type() || Ty->isHalfType()) &&
5882 !getContext().getLangOpts().NativeHalfArgsAndReturns) {
5883 llvm::Type *ResType = IsAAPCS_VFP ?
5884 llvm::Type::getFloatTy(getVMContext()) :
5885 llvm::Type::getInt32Ty(getVMContext());
5886 return ABIArgInfo::getDirect(ResType);
5889 if (!isAggregateTypeForABI(Ty)) {
5890 // Treat an enum type as its underlying type.
5891 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
5892 Ty = EnumTy->getDecl()->getIntegerType();
5895 return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty)
5896 : ABIArgInfo::getDirect());
5899 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
5900 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
5903 // Ignore empty records.
5904 if (isEmptyRecord(getContext(), Ty, true))
5905 return ABIArgInfo::getIgnore();
5908 // Homogeneous Aggregates need to be expanded when we can fit the aggregate
5909 // into VFP registers.
5910 const Type *Base = nullptr;
5911 uint64_t Members = 0;
5912 if (isHomogeneousAggregate(Ty, Base, Members))
5913 return classifyHomogeneousAggregate(Ty, Base, Members);
5914 } else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) {
5915 // WatchOS does have homogeneous aggregates. Note that we intentionally use
5916 // this convention even for a variadic function: the backend will use GPRs
5918 const Type *Base = nullptr;
5919 uint64_t Members = 0;
5920 if (isHomogeneousAggregate(Ty, Base, Members)) {
5921 assert(Base && Members <= 4 && "unexpected homogeneous aggregate");
5923 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members);
5924 return ABIArgInfo::getDirect(Ty, 0, nullptr, false);
5928 if (getABIKind() == ARMABIInfo::AAPCS16_VFP &&
5929 getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(16)) {
5930 // WatchOS is adopting the 64-bit AAPCS rule on composite types: if they're
5931 // bigger than 128-bits, they get placed in space allocated by the caller,
5932 // and a pointer is passed.
5933 return ABIArgInfo::getIndirect(
5934 CharUnits::fromQuantity(getContext().getTypeAlign(Ty) / 8), false);
5937 // Support byval for ARM.
5938 // The ABI alignment for APCS is 4-byte and for AAPCS at least 4-byte and at
5939 // most 8-byte. We realign the indirect argument if type alignment is bigger
5940 // than ABI alignment.
5941 uint64_t ABIAlign = 4;
5943 if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
5944 getABIKind() == ARMABIInfo::AAPCS) {
5945 TyAlign = getContext().getTypeUnadjustedAlignInChars(Ty).getQuantity();
5946 ABIAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8);
5948 TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity();
5950 if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) {
5951 assert(getABIKind() != ARMABIInfo::AAPCS16_VFP && "unexpected byval");
5952 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign),
5954 /*Realign=*/TyAlign > ABIAlign);
5957 // On RenderScript, coerce Aggregates <= 64 bytes to an integer array of
5958 // same size and alignment.
5959 if (getTarget().isRenderScriptTarget()) {
5960 return coerceToIntArray(Ty, getContext(), getVMContext());
5963 // Otherwise, pass by coercing to a structure of the appropriate size.
5966 // FIXME: Try to match the types of the arguments more accurately where
5969 ElemTy = llvm::Type::getInt32Ty(getVMContext());
5970 SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32;
5972 ElemTy = llvm::Type::getInt64Ty(getVMContext());
5973 SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64;
5976 return ABIArgInfo::getDirect(llvm::ArrayType::get(ElemTy, SizeRegs));
5979 static bool isIntegerLikeType(QualType Ty, ASTContext &Context,
5980 llvm::LLVMContext &VMContext) {
5981 // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure
5982 // is called integer-like if its size is less than or equal to one word, and
5983 // the offset of each of its addressable sub-fields is zero.
5985 uint64_t Size = Context.getTypeSize(Ty);
5987 // Check that the type fits in a word.
5991 // FIXME: Handle vector types!
5992 if (Ty->isVectorType())
5995 // Float types are never treated as "integer like".
5996 if (Ty->isRealFloatingType())
5999 // If this is a builtin or pointer type then it is ok.
6000 if (Ty->getAs<BuiltinType>() || Ty->isPointerType())
6003 // Small complex integer types are "integer like".
6004 if (const ComplexType *CT = Ty->getAs<ComplexType>())
6005 return isIntegerLikeType(CT->getElementType(), Context, VMContext);
6007 // Single element and zero sized arrays should be allowed, by the definition
6008 // above, but they are not.
6010 // Otherwise, it must be a record type.
6011 const RecordType *RT = Ty->getAs<RecordType>();
6012 if (!RT) return false;
6014 // Ignore records with flexible arrays.
6015 const RecordDecl *RD = RT->getDecl();
6016 if (RD->hasFlexibleArrayMember())
6019 // Check that all sub-fields are at offset 0, and are themselves "integer
6021 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
6023 bool HadField = false;
6025 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
6026 i != e; ++i, ++idx) {
6027 const FieldDecl *FD = *i;
6029 // Bit-fields are not addressable, we only need to verify they are "integer
6030 // like". We still have to disallow a subsequent non-bitfield, for example:
6031 // struct { int : 0; int x }
6032 // is non-integer like according to gcc.
6033 if (FD->isBitField()) {
6037 if (!isIntegerLikeType(FD->getType(), Context, VMContext))
6043 // Check if this field is at offset 0.
6044 if (Layout.getFieldOffset(idx) != 0)
6047 if (!isIntegerLikeType(FD->getType(), Context, VMContext))
6050 // Only allow at most one field in a structure. This doesn't match the
6051 // wording above, but follows gcc in situations with a field following an
6053 if (!RD->isUnion()) {
6064 ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy, bool isVariadic,
6065 unsigned functionCallConv) const {
6067 // Variadic functions should always marshal to the base standard.
6069 !isVariadic && isEffectivelyAAPCS_VFP(functionCallConv, /* AAPCS16 */ true);
6071 if (RetTy->isVoidType())
6072 return ABIArgInfo::getIgnore();
6074 if (const VectorType *VT = RetTy->getAs<VectorType>()) {
6075 // Large vector types should be returned via memory.
6076 if (getContext().getTypeSize(RetTy) > 128)
6077 return getNaturalAlignIndirect(RetTy);
6078 // FP16 vectors should be converted to integer vectors
6079 if (!getTarget().hasLegalHalfType() &&
6080 (VT->getElementType()->isFloat16Type() ||
6081 VT->getElementType()->isHalfType()))
6082 return coerceIllegalVector(RetTy);
6085 // _Float16 and __fp16 get returned as if it were an int or float, but with
6086 // the top 16 bits unspecified. This is not done for OpenCL as it handles the
6087 // half type natively, and does not need to interwork with AAPCS code.
6088 if ((RetTy->isFloat16Type() || RetTy->isHalfType()) &&
6089 !getContext().getLangOpts().NativeHalfArgsAndReturns) {
6090 llvm::Type *ResType = IsAAPCS_VFP ?
6091 llvm::Type::getFloatTy(getVMContext()) :
6092 llvm::Type::getInt32Ty(getVMContext());
6093 return ABIArgInfo::getDirect(ResType);
6096 if (!isAggregateTypeForABI(RetTy)) {
6097 // Treat an enum type as its underlying type.
6098 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
6099 RetTy = EnumTy->getDecl()->getIntegerType();
6101 return RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend(RetTy)
6102 : ABIArgInfo::getDirect();
6105 // Are we following APCS?
6106 if (getABIKind() == APCS) {
6107 if (isEmptyRecord(getContext(), RetTy, false))
6108 return ABIArgInfo::getIgnore();
6110 // Complex types are all returned as packed integers.
6112 // FIXME: Consider using 2 x vector types if the back end handles them
6114 if (RetTy->isAnyComplexType())
6115 return ABIArgInfo::getDirect(llvm::IntegerType::get(
6116 getVMContext(), getContext().getTypeSize(RetTy)));
6118 // Integer like structures are returned in r0.
6119 if (isIntegerLikeType(RetTy, getContext(), getVMContext())) {
6120 // Return in the smallest viable integer type.
6121 uint64_t Size = getContext().getTypeSize(RetTy);
6123 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
6125 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
6126 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
6129 // Otherwise return in memory.
6130 return getNaturalAlignIndirect(RetTy);
6133 // Otherwise this is an AAPCS variant.
6135 if (isEmptyRecord(getContext(), RetTy, true))
6136 return ABIArgInfo::getIgnore();
6138 // Check for homogeneous aggregates with AAPCS-VFP.
6140 const Type *Base = nullptr;
6141 uint64_t Members = 0;
6142 if (isHomogeneousAggregate(RetTy, Base, Members))
6143 return classifyHomogeneousAggregate(RetTy, Base, Members);
6146 // Aggregates <= 4 bytes are returned in r0; other aggregates
6147 // are returned indirectly.
6148 uint64_t Size = getContext().getTypeSize(RetTy);
6150 // On RenderScript, coerce Aggregates <= 4 bytes to an integer array of
6151 // same size and alignment.
6152 if (getTarget().isRenderScriptTarget()) {
6153 return coerceToIntArray(RetTy, getContext(), getVMContext());
6155 if (getDataLayout().isBigEndian())
6156 // Return in 32 bit integer integer type (as if loaded by LDR, AAPCS 5.4)
6157 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
6159 // Return in the smallest viable integer type.
6161 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
6163 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
6164 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
6165 } else if (Size <= 128 && getABIKind() == AAPCS16_VFP) {
6166 llvm::Type *Int32Ty = llvm::Type::getInt32Ty(getVMContext());
6167 llvm::Type *CoerceTy =
6168 llvm::ArrayType::get(Int32Ty, llvm::alignTo(Size, 32) / 32);
6169 return ABIArgInfo::getDirect(CoerceTy);
6172 return getNaturalAlignIndirect(RetTy);
6175 /// isIllegalVector - check whether Ty is an illegal vector type.
6176 bool ARMABIInfo::isIllegalVectorType(QualType Ty) const {
6177 if (const VectorType *VT = Ty->getAs<VectorType> ()) {
6178 // On targets that don't support FP16, FP16 is expanded into float, and we
6179 // don't want the ABI to depend on whether or not FP16 is supported in
6180 // hardware. Thus return false to coerce FP16 vectors into integer vectors.
6181 if (!getTarget().hasLegalHalfType() &&
6182 (VT->getElementType()->isFloat16Type() ||
6183 VT->getElementType()->isHalfType()))
6186 // Android shipped using Clang 3.1, which supported a slightly different
6187 // vector ABI. The primary differences were that 3-element vector types
6188 // were legal, and so were sub 32-bit vectors (i.e. <2 x i8>). This path
6189 // accepts that legacy behavior for Android only.
6190 // Check whether VT is legal.
6191 unsigned NumElements = VT->getNumElements();
6192 // NumElements should be power of 2 or equal to 3.
6193 if (!llvm::isPowerOf2_32(NumElements) && NumElements != 3)
6196 // Check whether VT is legal.
6197 unsigned NumElements = VT->getNumElements();
6198 uint64_t Size = getContext().getTypeSize(VT);
6199 // NumElements should be power of 2.
6200 if (!llvm::isPowerOf2_32(NumElements))
6202 // Size should be greater than 32 bits.
6209 /// Return true if a type contains any 16-bit floating point vectors
6210 bool ARMABIInfo::containsAnyFP16Vectors(QualType Ty) const {
6211 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
6212 uint64_t NElements = AT->getSize().getZExtValue();
6215 return containsAnyFP16Vectors(AT->getElementType());
6216 } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
6217 const RecordDecl *RD = RT->getDecl();
6219 // If this is a C++ record, check the bases first.
6220 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
6221 if (llvm::any_of(CXXRD->bases(), [this](const CXXBaseSpecifier &B) {
6222 return containsAnyFP16Vectors(B.getType());
6226 if (llvm::any_of(RD->fields(), [this](FieldDecl *FD) {
6227 return FD && containsAnyFP16Vectors(FD->getType());
6233 if (const VectorType *VT = Ty->getAs<VectorType>())
6234 return (VT->getElementType()->isFloat16Type() ||
6235 VT->getElementType()->isHalfType());
6240 bool ARMABIInfo::isLegalVectorTypeForSwift(CharUnits vectorSize,
6242 unsigned numElts) const {
6243 if (!llvm::isPowerOf2_32(numElts))
6245 unsigned size = getDataLayout().getTypeStoreSizeInBits(eltTy);
6248 if (vectorSize.getQuantity() != 8 &&
6249 (vectorSize.getQuantity() != 16 || numElts == 1))
6254 bool ARMABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
6255 // Homogeneous aggregates for AAPCS-VFP must have base types of float,
6256 // double, or 64-bit or 128-bit vectors.
6257 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
6258 if (BT->getKind() == BuiltinType::Float ||
6259 BT->getKind() == BuiltinType::Double ||
6260 BT->getKind() == BuiltinType::LongDouble)
6262 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
6263 unsigned VecSize = getContext().getTypeSize(VT);
6264 if (VecSize == 64 || VecSize == 128)
6270 bool ARMABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
6271 uint64_t Members) const {
6272 return Members <= 4;
6275 bool ARMABIInfo::isEffectivelyAAPCS_VFP(unsigned callConvention,
6276 bool acceptHalf) const {
6277 // Give precedence to user-specified calling conventions.
6278 if (callConvention != llvm::CallingConv::C)
6279 return (callConvention == llvm::CallingConv::ARM_AAPCS_VFP);
6281 return (getABIKind() == AAPCS_VFP) ||
6282 (acceptHalf && (getABIKind() == AAPCS16_VFP));
6285 Address ARMABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6286 QualType Ty) const {
6287 CharUnits SlotSize = CharUnits::fromQuantity(4);
6289 // Empty records are ignored for parameter passing purposes.
6290 if (isEmptyRecord(getContext(), Ty, true)) {
6291 Address Addr(CGF.Builder.CreateLoad(VAListAddr), SlotSize);
6292 Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty));
6296 CharUnits TySize = getContext().getTypeSizeInChars(Ty);
6297 CharUnits TyAlignForABI = getContext().getTypeUnadjustedAlignInChars(Ty);
6299 // Use indirect if size of the illegal vector is bigger than 16 bytes.
6300 bool IsIndirect = false;
6301 const Type *Base = nullptr;
6302 uint64_t Members = 0;
6303 if (TySize > CharUnits::fromQuantity(16) && isIllegalVectorType(Ty)) {
6306 // ARMv7k passes structs bigger than 16 bytes indirectly, in space
6307 // allocated by the caller.
6308 } else if (TySize > CharUnits::fromQuantity(16) &&
6309 getABIKind() == ARMABIInfo::AAPCS16_VFP &&
6310 !isHomogeneousAggregate(Ty, Base, Members)) {
6313 // Otherwise, bound the type's ABI alignment.
6314 // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for
6315 // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte.
6316 // Our callers should be prepared to handle an under-aligned address.
6317 } else if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
6318 getABIKind() == ARMABIInfo::AAPCS) {
6319 TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4));
6320 TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(8));
6321 } else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) {
6322 // ARMv7k allows type alignment up to 16 bytes.
6323 TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4));
6324 TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(16));
6326 TyAlignForABI = CharUnits::fromQuantity(4);
6329 std::pair<CharUnits, CharUnits> TyInfo = { TySize, TyAlignForABI };
6330 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TyInfo,
6331 SlotSize, /*AllowHigherAlign*/ true);
6334 //===----------------------------------------------------------------------===//
6335 // NVPTX ABI Implementation
6336 //===----------------------------------------------------------------------===//
6340 class NVPTXABIInfo : public ABIInfo {
6342 NVPTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
6344 ABIArgInfo classifyReturnType(QualType RetTy) const;
6345 ABIArgInfo classifyArgumentType(QualType Ty) const;
6347 void computeInfo(CGFunctionInfo &FI) const override;
6348 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6349 QualType Ty) const override;
6352 class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo {
6354 NVPTXTargetCodeGenInfo(CodeGenTypes &CGT)
6355 : TargetCodeGenInfo(new NVPTXABIInfo(CGT)) {}
6357 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
6358 CodeGen::CodeGenModule &M) const override;
6359 bool shouldEmitStaticExternCAliases() const override;
6362 // Adds a NamedMDNode with F, Name, and Operand as operands, and adds the
6363 // resulting MDNode to the nvvm.annotations MDNode.
6364 static void addNVVMMetadata(llvm::Function *F, StringRef Name, int Operand);
6367 /// Checks if the type is unsupported directly by the current target.
6368 static bool isUnsupportedType(ASTContext &Context, QualType T) {
6369 if (!Context.getTargetInfo().hasFloat16Type() && T->isFloat16Type())
6371 if (!Context.getTargetInfo().hasFloat128Type() &&
6372 (T->isFloat128Type() ||
6373 (T->isRealFloatingType() && Context.getTypeSize(T) == 128)))
6375 if (!Context.getTargetInfo().hasInt128Type() && T->isIntegerType() &&
6376 Context.getTypeSize(T) > 64)
6378 if (const auto *AT = T->getAsArrayTypeUnsafe())
6379 return isUnsupportedType(Context, AT->getElementType());
6380 const auto *RT = T->getAs<RecordType>();
6383 const RecordDecl *RD = RT->getDecl();
6385 // If this is a C++ record, check the bases first.
6386 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
6387 for (const CXXBaseSpecifier &I : CXXRD->bases())
6388 if (isUnsupportedType(Context, I.getType()))
6391 for (const FieldDecl *I : RD->fields())
6392 if (isUnsupportedType(Context, I->getType()))
6397 /// Coerce the given type into an array with maximum allowed size of elements.
6398 static ABIArgInfo coerceToIntArrayWithLimit(QualType Ty, ASTContext &Context,
6399 llvm::LLVMContext &LLVMContext,
6401 // Alignment and Size are measured in bits.
6402 const uint64_t Size = Context.getTypeSize(Ty);
6403 const uint64_t Alignment = Context.getTypeAlign(Ty);
6404 const unsigned Div = std::min<unsigned>(MaxSize, Alignment);
6405 llvm::Type *IntType = llvm::Type::getIntNTy(LLVMContext, Div);
6406 const uint64_t NumElements = (Size + Div - 1) / Div;
6407 return ABIArgInfo::getDirect(llvm::ArrayType::get(IntType, NumElements));
6410 ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const {
6411 if (RetTy->isVoidType())
6412 return ABIArgInfo::getIgnore();
6414 if (getContext().getLangOpts().OpenMP &&
6415 getContext().getLangOpts().OpenMPIsDevice &&
6416 isUnsupportedType(getContext(), RetTy))
6417 return coerceToIntArrayWithLimit(RetTy, getContext(), getVMContext(), 64);
6419 // note: this is different from default ABI
6420 if (!RetTy->isScalarType())
6421 return ABIArgInfo::getDirect();
6423 // Treat an enum type as its underlying type.
6424 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
6425 RetTy = EnumTy->getDecl()->getIntegerType();
6427 return (RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend(RetTy)
6428 : ABIArgInfo::getDirect());
6431 ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const {
6432 // Treat an enum type as its underlying type.
6433 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
6434 Ty = EnumTy->getDecl()->getIntegerType();
6436 // Return aggregates type as indirect by value
6437 if (isAggregateTypeForABI(Ty))
6438 return getNaturalAlignIndirect(Ty, /* byval */ true);
6440 return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty)
6441 : ABIArgInfo::getDirect());
6444 void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
6445 if (!getCXXABI().classifyReturnType(FI))
6446 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
6447 for (auto &I : FI.arguments())
6448 I.info = classifyArgumentType(I.type);
6450 // Always honor user-specified calling convention.
6451 if (FI.getCallingConvention() != llvm::CallingConv::C)
6454 FI.setEffectiveCallingConvention(getRuntimeCC());
6457 Address NVPTXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6458 QualType Ty) const {
6459 llvm_unreachable("NVPTX does not support varargs");
6462 void NVPTXTargetCodeGenInfo::setTargetAttributes(
6463 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
6464 if (GV->isDeclaration())
6466 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
6469 llvm::Function *F = cast<llvm::Function>(GV);
6471 // Perform special handling in OpenCL mode
6472 if (M.getLangOpts().OpenCL) {
6473 // Use OpenCL function attributes to check for kernel functions
6474 // By default, all functions are device functions
6475 if (FD->hasAttr<OpenCLKernelAttr>()) {
6476 // OpenCL __kernel functions get kernel metadata
6477 // Create !{<func-ref>, metadata !"kernel", i32 1} node
6478 addNVVMMetadata(F, "kernel", 1);
6479 // And kernel functions are not subject to inlining
6480 F->addFnAttr(llvm::Attribute::NoInline);
6484 // Perform special handling in CUDA mode.
6485 if (M.getLangOpts().CUDA) {
6486 // CUDA __global__ functions get a kernel metadata entry. Since
6487 // __global__ functions cannot be called from the device, we do not
6488 // need to set the noinline attribute.
6489 if (FD->hasAttr<CUDAGlobalAttr>()) {
6490 // Create !{<func-ref>, metadata !"kernel", i32 1} node
6491 addNVVMMetadata(F, "kernel", 1);
6493 if (CUDALaunchBoundsAttr *Attr = FD->getAttr<CUDALaunchBoundsAttr>()) {
6494 // Create !{<func-ref>, metadata !"maxntidx", i32 <val>} node
6495 llvm::APSInt MaxThreads(32);
6496 MaxThreads = Attr->getMaxThreads()->EvaluateKnownConstInt(M.getContext());
6498 addNVVMMetadata(F, "maxntidx", MaxThreads.getExtValue());
6500 // min blocks is an optional argument for CUDALaunchBoundsAttr. If it was
6501 // not specified in __launch_bounds__ or if the user specified a 0 value,
6502 // we don't have to add a PTX directive.
6503 if (Attr->getMinBlocks()) {
6504 llvm::APSInt MinBlocks(32);
6505 MinBlocks = Attr->getMinBlocks()->EvaluateKnownConstInt(M.getContext());
6507 // Create !{<func-ref>, metadata !"minctasm", i32 <val>} node
6508 addNVVMMetadata(F, "minctasm", MinBlocks.getExtValue());
6514 void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::Function *F, StringRef Name,
6516 llvm::Module *M = F->getParent();
6517 llvm::LLVMContext &Ctx = M->getContext();
6519 // Get "nvvm.annotations" metadata node
6520 llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations");
6522 llvm::Metadata *MDVals[] = {
6523 llvm::ConstantAsMetadata::get(F), llvm::MDString::get(Ctx, Name),
6524 llvm::ConstantAsMetadata::get(
6525 llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), Operand))};
6526 // Append metadata to nvvm.annotations
6527 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
6530 bool NVPTXTargetCodeGenInfo::shouldEmitStaticExternCAliases() const {
6535 //===----------------------------------------------------------------------===//
6536 // SystemZ ABI Implementation
6537 //===----------------------------------------------------------------------===//
6541 class SystemZABIInfo : public SwiftABIInfo {
6545 SystemZABIInfo(CodeGenTypes &CGT, bool HV)
6546 : SwiftABIInfo(CGT), HasVector(HV) {}
6548 bool isPromotableIntegerType(QualType Ty) const;
6549 bool isCompoundType(QualType Ty) const;
6550 bool isVectorArgumentType(QualType Ty) const;
6551 bool isFPArgumentType(QualType Ty) const;
6552 QualType GetSingleElementType(QualType Ty) const;
6554 ABIArgInfo classifyReturnType(QualType RetTy) const;
6555 ABIArgInfo classifyArgumentType(QualType ArgTy) const;
6557 void computeInfo(CGFunctionInfo &FI) const override {
6558 if (!getCXXABI().classifyReturnType(FI))
6559 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
6560 for (auto &I : FI.arguments())
6561 I.info = classifyArgumentType(I.type);
6564 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6565 QualType Ty) const override;
6567 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
6568 bool asReturnValue) const override {
6569 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
6571 bool isSwiftErrorInRegister() const override {
6576 class SystemZTargetCodeGenInfo : public TargetCodeGenInfo {
6578 SystemZTargetCodeGenInfo(CodeGenTypes &CGT, bool HasVector)
6579 : TargetCodeGenInfo(new SystemZABIInfo(CGT, HasVector)) {}
6584 bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const {
6585 // Treat an enum type as its underlying type.
6586 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
6587 Ty = EnumTy->getDecl()->getIntegerType();
6589 // Promotable integer types are required to be promoted by the ABI.
6590 if (Ty->isPromotableIntegerType())
6593 // 32-bit values must also be promoted.
6594 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
6595 switch (BT->getKind()) {
6596 case BuiltinType::Int:
6597 case BuiltinType::UInt:
6605 bool SystemZABIInfo::isCompoundType(QualType Ty) const {
6606 return (Ty->isAnyComplexType() ||
6607 Ty->isVectorType() ||
6608 isAggregateTypeForABI(Ty));
6611 bool SystemZABIInfo::isVectorArgumentType(QualType Ty) const {
6612 return (HasVector &&
6613 Ty->isVectorType() &&
6614 getContext().getTypeSize(Ty) <= 128);
6617 bool SystemZABIInfo::isFPArgumentType(QualType Ty) const {
6618 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
6619 switch (BT->getKind()) {
6620 case BuiltinType::Float:
6621 case BuiltinType::Double:
6630 QualType SystemZABIInfo::GetSingleElementType(QualType Ty) const {
6631 if (const RecordType *RT = Ty->getAsStructureType()) {
6632 const RecordDecl *RD = RT->getDecl();
6635 // If this is a C++ record, check the bases first.
6636 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
6637 for (const auto &I : CXXRD->bases()) {
6638 QualType Base = I.getType();
6640 // Empty bases don't affect things either way.
6641 if (isEmptyRecord(getContext(), Base, true))
6644 if (!Found.isNull())
6646 Found = GetSingleElementType(Base);
6649 // Check the fields.
6650 for (const auto *FD : RD->fields()) {
6651 // For compatibility with GCC, ignore empty bitfields in C++ mode.
6652 // Unlike isSingleElementStruct(), empty structure and array fields
6653 // do count. So do anonymous bitfields that aren't zero-sized.
6654 if (getContext().getLangOpts().CPlusPlus &&
6655 FD->isZeroLengthBitField(getContext()))
6658 // Unlike isSingleElementStruct(), arrays do not count.
6659 // Nested structures still do though.
6660 if (!Found.isNull())
6662 Found = GetSingleElementType(FD->getType());
6665 // Unlike isSingleElementStruct(), trailing padding is allowed.
6666 // An 8-byte aligned struct s { float f; } is passed as a double.
6667 if (!Found.isNull())
6674 Address SystemZABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6675 QualType Ty) const {
6676 // Assume that va_list type is correct; should be pointer to LLVM type:
6680 // i8 *__overflow_arg_area;
6681 // i8 *__reg_save_area;
6684 // Every non-vector argument occupies 8 bytes and is passed by preference
6685 // in either GPRs or FPRs. Vector arguments occupy 8 or 16 bytes and are
6686 // always passed on the stack.
6687 Ty = getContext().getCanonicalType(Ty);
6688 auto TyInfo = getContext().getTypeInfoInChars(Ty);
6689 llvm::Type *ArgTy = CGF.ConvertTypeForMem(Ty);
6690 llvm::Type *DirectTy = ArgTy;
6691 ABIArgInfo AI = classifyArgumentType(Ty);
6692 bool IsIndirect = AI.isIndirect();
6693 bool InFPRs = false;
6694 bool IsVector = false;
6695 CharUnits UnpaddedSize;
6696 CharUnits DirectAlign;
6698 DirectTy = llvm::PointerType::getUnqual(DirectTy);
6699 UnpaddedSize = DirectAlign = CharUnits::fromQuantity(8);
6701 if (AI.getCoerceToType())
6702 ArgTy = AI.getCoerceToType();
6703 InFPRs = ArgTy->isFloatTy() || ArgTy->isDoubleTy();
6704 IsVector = ArgTy->isVectorTy();
6705 UnpaddedSize = TyInfo.first;
6706 DirectAlign = TyInfo.second;
6708 CharUnits PaddedSize = CharUnits::fromQuantity(8);
6709 if (IsVector && UnpaddedSize > PaddedSize)
6710 PaddedSize = CharUnits::fromQuantity(16);
6711 assert((UnpaddedSize <= PaddedSize) && "Invalid argument size.");
6713 CharUnits Padding = (PaddedSize - UnpaddedSize);
6715 llvm::Type *IndexTy = CGF.Int64Ty;
6716 llvm::Value *PaddedSizeV =
6717 llvm::ConstantInt::get(IndexTy, PaddedSize.getQuantity());
6720 // Work out the address of a vector argument on the stack.
6721 // Vector arguments are always passed in the high bits of a
6722 // single (8 byte) or double (16 byte) stack slot.
6723 Address OverflowArgAreaPtr =
6724 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_ptr");
6725 Address OverflowArgArea =
6726 Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"),
6729 CGF.Builder.CreateElementBitCast(OverflowArgArea, DirectTy, "mem_addr");
6731 // Update overflow_arg_area_ptr pointer
6732 llvm::Value *NewOverflowArgArea =
6733 CGF.Builder.CreateGEP(OverflowArgArea.getPointer(), PaddedSizeV,
6734 "overflow_arg_area");
6735 CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
6740 assert(PaddedSize.getQuantity() == 8);
6742 unsigned MaxRegs, RegCountField, RegSaveIndex;
6743 CharUnits RegPadding;
6745 MaxRegs = 4; // Maximum of 4 FPR arguments
6746 RegCountField = 1; // __fpr
6747 RegSaveIndex = 16; // save offset for f0
6748 RegPadding = CharUnits(); // floats are passed in the high bits of an FPR
6750 MaxRegs = 5; // Maximum of 5 GPR arguments
6751 RegCountField = 0; // __gpr
6752 RegSaveIndex = 2; // save offset for r2
6753 RegPadding = Padding; // values are passed in the low bits of a GPR
6756 Address RegCountPtr =
6757 CGF.Builder.CreateStructGEP(VAListAddr, RegCountField, "reg_count_ptr");
6758 llvm::Value *RegCount = CGF.Builder.CreateLoad(RegCountPtr, "reg_count");
6759 llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs);
6760 llvm::Value *InRegs = CGF.Builder.CreateICmpULT(RegCount, MaxRegsV,
6763 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
6764 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
6765 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
6766 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
6768 // Emit code to load the value if it was passed in registers.
6769 CGF.EmitBlock(InRegBlock);
6771 // Work out the address of an argument register.
6772 llvm::Value *ScaledRegCount =
6773 CGF.Builder.CreateMul(RegCount, PaddedSizeV, "scaled_reg_count");
6774 llvm::Value *RegBase =
6775 llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize.getQuantity()
6776 + RegPadding.getQuantity());
6777 llvm::Value *RegOffset =
6778 CGF.Builder.CreateAdd(ScaledRegCount, RegBase, "reg_offset");
6779 Address RegSaveAreaPtr =
6780 CGF.Builder.CreateStructGEP(VAListAddr, 3, "reg_save_area_ptr");
6781 llvm::Value *RegSaveArea =
6782 CGF.Builder.CreateLoad(RegSaveAreaPtr, "reg_save_area");
6783 Address RawRegAddr(CGF.Builder.CreateGEP(RegSaveArea, RegOffset,
6787 CGF.Builder.CreateElementBitCast(RawRegAddr, DirectTy, "reg_addr");
6789 // Update the register count
6790 llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1);
6791 llvm::Value *NewRegCount =
6792 CGF.Builder.CreateAdd(RegCount, One, "reg_count");
6793 CGF.Builder.CreateStore(NewRegCount, RegCountPtr);
6794 CGF.EmitBranch(ContBlock);
6796 // Emit code to load the value if it was passed in memory.
6797 CGF.EmitBlock(InMemBlock);
6799 // Work out the address of a stack argument.
6800 Address OverflowArgAreaPtr =
6801 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_ptr");
6802 Address OverflowArgArea =
6803 Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"),
6805 Address RawMemAddr =
6806 CGF.Builder.CreateConstByteGEP(OverflowArgArea, Padding, "raw_mem_addr");
6808 CGF.Builder.CreateElementBitCast(RawMemAddr, DirectTy, "mem_addr");
6810 // Update overflow_arg_area_ptr pointer
6811 llvm::Value *NewOverflowArgArea =
6812 CGF.Builder.CreateGEP(OverflowArgArea.getPointer(), PaddedSizeV,
6813 "overflow_arg_area");
6814 CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
6815 CGF.EmitBranch(ContBlock);
6817 // Return the appropriate result.
6818 CGF.EmitBlock(ContBlock);
6819 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock,
6820 MemAddr, InMemBlock, "va_arg.addr");
6823 ResAddr = Address(CGF.Builder.CreateLoad(ResAddr, "indirect_arg"),
6829 ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const {
6830 if (RetTy->isVoidType())
6831 return ABIArgInfo::getIgnore();
6832 if (isVectorArgumentType(RetTy))
6833 return ABIArgInfo::getDirect();
6834 if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64)
6835 return getNaturalAlignIndirect(RetTy);
6836 return (isPromotableIntegerType(RetTy) ? ABIArgInfo::getExtend(RetTy)
6837 : ABIArgInfo::getDirect());
6840 ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
6841 // Handle the generic C++ ABI.
6842 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
6843 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
6845 // Integers and enums are extended to full register width.
6846 if (isPromotableIntegerType(Ty))
6847 return ABIArgInfo::getExtend(Ty);
6849 // Handle vector types and vector-like structure types. Note that
6850 // as opposed to float-like structure types, we do not allow any
6851 // padding for vector-like structures, so verify the sizes match.
6852 uint64_t Size = getContext().getTypeSize(Ty);
6853 QualType SingleElementTy = GetSingleElementType(Ty);
6854 if (isVectorArgumentType(SingleElementTy) &&
6855 getContext().getTypeSize(SingleElementTy) == Size)
6856 return ABIArgInfo::getDirect(CGT.ConvertType(SingleElementTy));
6858 // Values that are not 1, 2, 4 or 8 bytes in size are passed indirectly.
6859 if (Size != 8 && Size != 16 && Size != 32 && Size != 64)
6860 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
6862 // Handle small structures.
6863 if (const RecordType *RT = Ty->getAs<RecordType>()) {
6864 // Structures with flexible arrays have variable length, so really
6865 // fail the size test above.
6866 const RecordDecl *RD = RT->getDecl();
6867 if (RD->hasFlexibleArrayMember())
6868 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
6870 // The structure is passed as an unextended integer, a float, or a double.
6872 if (isFPArgumentType(SingleElementTy)) {
6873 assert(Size == 32 || Size == 64);
6875 PassTy = llvm::Type::getFloatTy(getVMContext());
6877 PassTy = llvm::Type::getDoubleTy(getVMContext());
6879 PassTy = llvm::IntegerType::get(getVMContext(), Size);
6880 return ABIArgInfo::getDirect(PassTy);
6883 // Non-structure compounds are passed indirectly.
6884 if (isCompoundType(Ty))
6885 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
6887 return ABIArgInfo::getDirect(nullptr);
6890 //===----------------------------------------------------------------------===//
6891 // MSP430 ABI Implementation
6892 //===----------------------------------------------------------------------===//
6896 class MSP430TargetCodeGenInfo : public TargetCodeGenInfo {
6898 MSP430TargetCodeGenInfo(CodeGenTypes &CGT)
6899 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
6900 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
6901 CodeGen::CodeGenModule &M) const override;
6906 void MSP430TargetCodeGenInfo::setTargetAttributes(
6907 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
6908 if (GV->isDeclaration())
6910 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
6911 const auto *InterruptAttr = FD->getAttr<MSP430InterruptAttr>();
6915 // Handle 'interrupt' attribute:
6916 llvm::Function *F = cast<llvm::Function>(GV);
6918 // Step 1: Set ISR calling convention.
6919 F->setCallingConv(llvm::CallingConv::MSP430_INTR);
6921 // Step 2: Add attributes goodness.
6922 F->addFnAttr(llvm::Attribute::NoInline);
6923 F->addFnAttr("interrupt", llvm::utostr(InterruptAttr->getNumber()));
6927 //===----------------------------------------------------------------------===//
6928 // MIPS ABI Implementation. This works for both little-endian and
6929 // big-endian variants.
6930 //===----------------------------------------------------------------------===//
6933 class MipsABIInfo : public ABIInfo {
6935 unsigned MinABIStackAlignInBytes, StackAlignInBytes;
6936 void CoerceToIntArgs(uint64_t TySize,
6937 SmallVectorImpl<llvm::Type *> &ArgList) const;
6938 llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const;
6939 llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const;
6940 llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const;
6942 MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) :
6943 ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8),
6944 StackAlignInBytes(IsO32 ? 8 : 16) {}
6946 ABIArgInfo classifyReturnType(QualType RetTy) const;
6947 ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const;
6948 void computeInfo(CGFunctionInfo &FI) const override;
6949 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6950 QualType Ty) const override;
6951 ABIArgInfo extendType(QualType Ty) const;
6954 class MIPSTargetCodeGenInfo : public TargetCodeGenInfo {
6955 unsigned SizeOfUnwindException;
6957 MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32)
6958 : TargetCodeGenInfo(new MipsABIInfo(CGT, IsO32)),
6959 SizeOfUnwindException(IsO32 ? 24 : 32) {}
6961 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
6965 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
6966 CodeGen::CodeGenModule &CGM) const override {
6967 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
6969 llvm::Function *Fn = cast<llvm::Function>(GV);
6971 if (FD->hasAttr<MipsLongCallAttr>())
6972 Fn->addFnAttr("long-call");
6973 else if (FD->hasAttr<MipsShortCallAttr>())
6974 Fn->addFnAttr("short-call");
6976 // Other attributes do not have a meaning for declarations.
6977 if (GV->isDeclaration())
6980 if (FD->hasAttr<Mips16Attr>()) {
6981 Fn->addFnAttr("mips16");
6983 else if (FD->hasAttr<NoMips16Attr>()) {
6984 Fn->addFnAttr("nomips16");
6987 if (FD->hasAttr<MicroMipsAttr>())
6988 Fn->addFnAttr("micromips");
6989 else if (FD->hasAttr<NoMicroMipsAttr>())
6990 Fn->addFnAttr("nomicromips");
6992 const MipsInterruptAttr *Attr = FD->getAttr<MipsInterruptAttr>();
6997 switch (Attr->getInterrupt()) {
6998 case MipsInterruptAttr::eic: Kind = "eic"; break;
6999 case MipsInterruptAttr::sw0: Kind = "sw0"; break;
7000 case MipsInterruptAttr::sw1: Kind = "sw1"; break;
7001 case MipsInterruptAttr::hw0: Kind = "hw0"; break;
7002 case MipsInterruptAttr::hw1: Kind = "hw1"; break;
7003 case MipsInterruptAttr::hw2: Kind = "hw2"; break;
7004 case MipsInterruptAttr::hw3: Kind = "hw3"; break;
7005 case MipsInterruptAttr::hw4: Kind = "hw4"; break;
7006 case MipsInterruptAttr::hw5: Kind = "hw5"; break;
7009 Fn->addFnAttr("interrupt", Kind);
7013 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
7014 llvm::Value *Address) const override;
7016 unsigned getSizeOfUnwindException() const override {
7017 return SizeOfUnwindException;
7022 void MipsABIInfo::CoerceToIntArgs(
7023 uint64_t TySize, SmallVectorImpl<llvm::Type *> &ArgList) const {
7024 llvm::IntegerType *IntTy =
7025 llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8);
7027 // Add (TySize / MinABIStackAlignInBytes) args of IntTy.
7028 for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N)
7029 ArgList.push_back(IntTy);
7031 // If necessary, add one more integer type to ArgList.
7032 unsigned R = TySize % (MinABIStackAlignInBytes * 8);
7035 ArgList.push_back(llvm::IntegerType::get(getVMContext(), R));
7038 // In N32/64, an aligned double precision floating point field is passed in
7040 llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const {
7041 SmallVector<llvm::Type*, 8> ArgList, IntArgList;
7044 CoerceToIntArgs(TySize, ArgList);
7045 return llvm::StructType::get(getVMContext(), ArgList);
7048 if (Ty->isComplexType())
7049 return CGT.ConvertType(Ty);
7051 const RecordType *RT = Ty->getAs<RecordType>();
7053 // Unions/vectors are passed in integer registers.
7054 if (!RT || !RT->isStructureOrClassType()) {
7055 CoerceToIntArgs(TySize, ArgList);
7056 return llvm::StructType::get(getVMContext(), ArgList);
7059 const RecordDecl *RD = RT->getDecl();
7060 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
7061 assert(!(TySize % 8) && "Size of structure must be multiple of 8.");
7063 uint64_t LastOffset = 0;
7065 llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64);
7067 // Iterate over fields in the struct/class and check if there are any aligned
7069 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
7070 i != e; ++i, ++idx) {
7071 const QualType Ty = i->getType();
7072 const BuiltinType *BT = Ty->getAs<BuiltinType>();
7074 if (!BT || BT->getKind() != BuiltinType::Double)
7077 uint64_t Offset = Layout.getFieldOffset(idx);
7078 if (Offset % 64) // Ignore doubles that are not aligned.
7081 // Add ((Offset - LastOffset) / 64) args of type i64.
7082 for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j)
7083 ArgList.push_back(I64);
7086 ArgList.push_back(llvm::Type::getDoubleTy(getVMContext()));
7087 LastOffset = Offset + 64;
7090 CoerceToIntArgs(TySize - LastOffset, IntArgList);
7091 ArgList.append(IntArgList.begin(), IntArgList.end());
7093 return llvm::StructType::get(getVMContext(), ArgList);
7096 llvm::Type *MipsABIInfo::getPaddingType(uint64_t OrigOffset,
7097 uint64_t Offset) const {
7098 if (OrigOffset + MinABIStackAlignInBytes > Offset)
7101 return llvm::IntegerType::get(getVMContext(), (Offset - OrigOffset) * 8);
7105 MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const {
7106 Ty = useFirstFieldIfTransparentUnion(Ty);
7108 uint64_t OrigOffset = Offset;
7109 uint64_t TySize = getContext().getTypeSize(Ty);
7110 uint64_t Align = getContext().getTypeAlign(Ty) / 8;
7112 Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes),
7113 (uint64_t)StackAlignInBytes);
7114 unsigned CurrOffset = llvm::alignTo(Offset, Align);
7115 Offset = CurrOffset + llvm::alignTo(TySize, Align * 8) / 8;
7117 if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) {
7118 // Ignore empty aggregates.
7120 return ABIArgInfo::getIgnore();
7122 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
7123 Offset = OrigOffset + MinABIStackAlignInBytes;
7124 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
7127 // If we have reached here, aggregates are passed directly by coercing to
7128 // another structure type. Padding is inserted if the offset of the
7129 // aggregate is unaligned.
7130 ABIArgInfo ArgInfo =
7131 ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0,
7132 getPaddingType(OrigOffset, CurrOffset));
7133 ArgInfo.setInReg(true);
7137 // Treat an enum type as its underlying type.
7138 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
7139 Ty = EnumTy->getDecl()->getIntegerType();
7141 // All integral types are promoted to the GPR width.
7142 if (Ty->isIntegralOrEnumerationType())
7143 return extendType(Ty);
7145 return ABIArgInfo::getDirect(
7146 nullptr, 0, IsO32 ? nullptr : getPaddingType(OrigOffset, CurrOffset));
7150 MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const {
7151 const RecordType *RT = RetTy->getAs<RecordType>();
7152 SmallVector<llvm::Type*, 8> RTList;
7154 if (RT && RT->isStructureOrClassType()) {
7155 const RecordDecl *RD = RT->getDecl();
7156 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
7157 unsigned FieldCnt = Layout.getFieldCount();
7159 // N32/64 returns struct/classes in floating point registers if the
7160 // following conditions are met:
7161 // 1. The size of the struct/class is no larger than 128-bit.
7162 // 2. The struct/class has one or two fields all of which are floating
7164 // 3. The offset of the first field is zero (this follows what gcc does).
7166 // Any other composite results are returned in integer registers.
7168 if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) {
7169 RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end();
7170 for (; b != e; ++b) {
7171 const BuiltinType *BT = b->getType()->getAs<BuiltinType>();
7173 if (!BT || !BT->isFloatingPoint())
7176 RTList.push_back(CGT.ConvertType(b->getType()));
7180 return llvm::StructType::get(getVMContext(), RTList,
7181 RD->hasAttr<PackedAttr>());
7187 CoerceToIntArgs(Size, RTList);
7188 return llvm::StructType::get(getVMContext(), RTList);
7191 ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const {
7192 uint64_t Size = getContext().getTypeSize(RetTy);
7194 if (RetTy->isVoidType())
7195 return ABIArgInfo::getIgnore();
7197 // O32 doesn't treat zero-sized structs differently from other structs.
7198 // However, N32/N64 ignores zero sized return values.
7199 if (!IsO32 && Size == 0)
7200 return ABIArgInfo::getIgnore();
7202 if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) {
7204 if (RetTy->isAnyComplexType())
7205 return ABIArgInfo::getDirect();
7207 // O32 returns integer vectors in registers and N32/N64 returns all small
7208 // aggregates in registers.
7210 (RetTy->isVectorType() && !RetTy->hasFloatingRepresentation())) {
7211 ABIArgInfo ArgInfo =
7212 ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size));
7213 ArgInfo.setInReg(true);
7218 return getNaturalAlignIndirect(RetTy);
7221 // Treat an enum type as its underlying type.
7222 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
7223 RetTy = EnumTy->getDecl()->getIntegerType();
7225 if (RetTy->isPromotableIntegerType())
7226 return ABIArgInfo::getExtend(RetTy);
7228 if ((RetTy->isUnsignedIntegerOrEnumerationType() ||
7229 RetTy->isSignedIntegerOrEnumerationType()) && Size == 32 && !IsO32)
7230 return ABIArgInfo::getSignExtend(RetTy);
7232 return ABIArgInfo::getDirect();
7235 void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const {
7236 ABIArgInfo &RetInfo = FI.getReturnInfo();
7237 if (!getCXXABI().classifyReturnType(FI))
7238 RetInfo = classifyReturnType(FI.getReturnType());
7240 // Check if a pointer to an aggregate is passed as a hidden argument.
7241 uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0;
7243 for (auto &I : FI.arguments())
7244 I.info = classifyArgumentType(I.type, Offset);
7247 Address MipsABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7248 QualType OrigTy) const {
7249 QualType Ty = OrigTy;
7251 // Integer arguments are promoted to 32-bit on O32 and 64-bit on N32/N64.
7252 // Pointers are also promoted in the same way but this only matters for N32.
7253 unsigned SlotSizeInBits = IsO32 ? 32 : 64;
7254 unsigned PtrWidth = getTarget().getPointerWidth(0);
7255 bool DidPromote = false;
7256 if ((Ty->isIntegerType() &&
7257 getContext().getIntWidth(Ty) < SlotSizeInBits) ||
7258 (Ty->isPointerType() && PtrWidth < SlotSizeInBits)) {
7260 Ty = getContext().getIntTypeForBitwidth(SlotSizeInBits,
7261 Ty->isSignedIntegerType());
7264 auto TyInfo = getContext().getTypeInfoInChars(Ty);
7266 // The alignment of things in the argument area is never larger than
7267 // StackAlignInBytes.
7269 std::min(TyInfo.second, CharUnits::fromQuantity(StackAlignInBytes));
7271 // MinABIStackAlignInBytes is the size of argument slots on the stack.
7272 CharUnits ArgSlotSize = CharUnits::fromQuantity(MinABIStackAlignInBytes);
7274 Address Addr = emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
7275 TyInfo, ArgSlotSize, /*AllowHigherAlign*/ true);
7278 // If there was a promotion, "unpromote" into a temporary.
7279 // TODO: can we just use a pointer into a subset of the original slot?
7281 Address Temp = CGF.CreateMemTemp(OrigTy, "vaarg.promotion-temp");
7282 llvm::Value *Promoted = CGF.Builder.CreateLoad(Addr);
7284 // Truncate down to the right width.
7285 llvm::Type *IntTy = (OrigTy->isIntegerType() ? Temp.getElementType()
7287 llvm::Value *V = CGF.Builder.CreateTrunc(Promoted, IntTy);
7288 if (OrigTy->isPointerType())
7289 V = CGF.Builder.CreateIntToPtr(V, Temp.getElementType());
7291 CGF.Builder.CreateStore(V, Temp);
7298 ABIArgInfo MipsABIInfo::extendType(QualType Ty) const {
7299 int TySize = getContext().getTypeSize(Ty);
7301 // MIPS64 ABI requires unsigned 32 bit integers to be sign extended.
7302 if (Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32)
7303 return ABIArgInfo::getSignExtend(Ty);
7305 return ABIArgInfo::getExtend(Ty);
7309 MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
7310 llvm::Value *Address) const {
7311 // This information comes from gcc's implementation, which seems to
7312 // as canonical as it gets.
7314 // Everything on MIPS is 4 bytes. Double-precision FP registers
7315 // are aliased to pairs of single-precision FP registers.
7316 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
7318 // 0-31 are the general purpose registers, $0 - $31.
7319 // 32-63 are the floating-point registers, $f0 - $f31.
7320 // 64 and 65 are the multiply/divide registers, $hi and $lo.
7321 // 66 is the (notional, I think) register for signal-handler return.
7322 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65);
7324 // 67-74 are the floating-point status registers, $fcc0 - $fcc7.
7325 // They are one bit wide and ignored here.
7327 // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31.
7328 // (coprocessor 1 is the FP unit)
7329 // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31.
7330 // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31.
7331 // 176-181 are the DSP accumulator registers.
7332 AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181);
7336 //===----------------------------------------------------------------------===//
7337 // AVR ABI Implementation.
7338 //===----------------------------------------------------------------------===//
7341 class AVRTargetCodeGenInfo : public TargetCodeGenInfo {
7343 AVRTargetCodeGenInfo(CodeGenTypes &CGT)
7344 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) { }
7346 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
7347 CodeGen::CodeGenModule &CGM) const override {
7348 if (GV->isDeclaration())
7350 const auto *FD = dyn_cast_or_null<FunctionDecl>(D);
7352 auto *Fn = cast<llvm::Function>(GV);
7354 if (FD->getAttr<AVRInterruptAttr>())
7355 Fn->addFnAttr("interrupt");
7357 if (FD->getAttr<AVRSignalAttr>())
7358 Fn->addFnAttr("signal");
7363 //===----------------------------------------------------------------------===//
7364 // TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults.
7365 // Currently subclassed only to implement custom OpenCL C function attribute
7367 //===----------------------------------------------------------------------===//
7371 class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo {
7373 TCETargetCodeGenInfo(CodeGenTypes &CGT)
7374 : DefaultTargetCodeGenInfo(CGT) {}
7376 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
7377 CodeGen::CodeGenModule &M) const override;
7380 void TCETargetCodeGenInfo::setTargetAttributes(
7381 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
7382 if (GV->isDeclaration())
7384 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
7387 llvm::Function *F = cast<llvm::Function>(GV);
7389 if (M.getLangOpts().OpenCL) {
7390 if (FD->hasAttr<OpenCLKernelAttr>()) {
7391 // OpenCL C Kernel functions are not subject to inlining
7392 F->addFnAttr(llvm::Attribute::NoInline);
7393 const ReqdWorkGroupSizeAttr *Attr = FD->getAttr<ReqdWorkGroupSizeAttr>();
7395 // Convert the reqd_work_group_size() attributes to metadata.
7396 llvm::LLVMContext &Context = F->getContext();
7397 llvm::NamedMDNode *OpenCLMetadata =
7398 M.getModule().getOrInsertNamedMetadata(
7399 "opencl.kernel_wg_size_info");
7401 SmallVector<llvm::Metadata *, 5> Operands;
7402 Operands.push_back(llvm::ConstantAsMetadata::get(F));
7405 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
7406 M.Int32Ty, llvm::APInt(32, Attr->getXDim()))));
7408 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
7409 M.Int32Ty, llvm::APInt(32, Attr->getYDim()))));
7411 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
7412 M.Int32Ty, llvm::APInt(32, Attr->getZDim()))));
7414 // Add a boolean constant operand for "required" (true) or "hint"
7415 // (false) for implementing the work_group_size_hint attr later.
7416 // Currently always true as the hint is not yet implemented.
7418 llvm::ConstantAsMetadata::get(llvm::ConstantInt::getTrue(Context)));
7419 OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands));
7427 //===----------------------------------------------------------------------===//
7428 // Hexagon ABI Implementation
7429 //===----------------------------------------------------------------------===//
7433 class HexagonABIInfo : public ABIInfo {
7437 HexagonABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
7441 ABIArgInfo classifyReturnType(QualType RetTy) const;
7442 ABIArgInfo classifyArgumentType(QualType RetTy) const;
7444 void computeInfo(CGFunctionInfo &FI) const override;
7446 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7447 QualType Ty) const override;
7450 class HexagonTargetCodeGenInfo : public TargetCodeGenInfo {
7452 HexagonTargetCodeGenInfo(CodeGenTypes &CGT)
7453 :TargetCodeGenInfo(new HexagonABIInfo(CGT)) {}
7455 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
7462 void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const {
7463 if (!getCXXABI().classifyReturnType(FI))
7464 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
7465 for (auto &I : FI.arguments())
7466 I.info = classifyArgumentType(I.type);
7469 ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const {
7470 if (!isAggregateTypeForABI(Ty)) {
7471 // Treat an enum type as its underlying type.
7472 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
7473 Ty = EnumTy->getDecl()->getIntegerType();
7475 return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty)
7476 : ABIArgInfo::getDirect());
7479 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
7480 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
7482 // Ignore empty records.
7483 if (isEmptyRecord(getContext(), Ty, true))
7484 return ABIArgInfo::getIgnore();
7486 uint64_t Size = getContext().getTypeSize(Ty);
7488 return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
7489 // Pass in the smallest viable integer type.
7491 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
7493 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
7495 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
7497 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
7500 ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const {
7501 if (RetTy->isVoidType())
7502 return ABIArgInfo::getIgnore();
7504 // Large vector types should be returned via memory.
7505 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 64)
7506 return getNaturalAlignIndirect(RetTy);
7508 if (!isAggregateTypeForABI(RetTy)) {
7509 // Treat an enum type as its underlying type.
7510 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
7511 RetTy = EnumTy->getDecl()->getIntegerType();
7513 return (RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend(RetTy)
7514 : ABIArgInfo::getDirect());
7517 if (isEmptyRecord(getContext(), RetTy, true))
7518 return ABIArgInfo::getIgnore();
7520 // Aggregates <= 8 bytes are returned in r0; other aggregates
7521 // are returned indirectly.
7522 uint64_t Size = getContext().getTypeSize(RetTy);
7524 // Return in the smallest viable integer type.
7526 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
7528 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
7530 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
7531 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
7534 return getNaturalAlignIndirect(RetTy, /*ByVal=*/true);
7537 Address HexagonABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7538 QualType Ty) const {
7539 // FIXME: Someone needs to audit that this handle alignment correctly.
7540 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
7541 getContext().getTypeInfoInChars(Ty),
7542 CharUnits::fromQuantity(4),
7543 /*AllowHigherAlign*/ true);
7546 //===----------------------------------------------------------------------===//
7547 // Lanai ABI Implementation
7548 //===----------------------------------------------------------------------===//
7551 class LanaiABIInfo : public DefaultABIInfo {
7553 LanaiABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
7555 bool shouldUseInReg(QualType Ty, CCState &State) const;
7557 void computeInfo(CGFunctionInfo &FI) const override {
7558 CCState State(FI.getCallingConvention());
7559 // Lanai uses 4 registers to pass arguments unless the function has the
7560 // regparm attribute set.
7561 if (FI.getHasRegParm()) {
7562 State.FreeRegs = FI.getRegParm();
7567 if (!getCXXABI().classifyReturnType(FI))
7568 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
7569 for (auto &I : FI.arguments())
7570 I.info = classifyArgumentType(I.type, State);
7573 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const;
7574 ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const;
7576 } // end anonymous namespace
7578 bool LanaiABIInfo::shouldUseInReg(QualType Ty, CCState &State) const {
7579 unsigned Size = getContext().getTypeSize(Ty);
7580 unsigned SizeInRegs = llvm::alignTo(Size, 32U) / 32U;
7582 if (SizeInRegs == 0)
7585 if (SizeInRegs > State.FreeRegs) {
7590 State.FreeRegs -= SizeInRegs;
7595 ABIArgInfo LanaiABIInfo::getIndirectResult(QualType Ty, bool ByVal,
7596 CCState &State) const {
7598 if (State.FreeRegs) {
7599 --State.FreeRegs; // Non-byval indirects just use one pointer.
7600 return getNaturalAlignIndirectInReg(Ty);
7602 return getNaturalAlignIndirect(Ty, false);
7605 // Compute the byval alignment.
7606 const unsigned MinABIStackAlignInBytes = 4;
7607 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
7608 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true,
7609 /*Realign=*/TypeAlign >
7610 MinABIStackAlignInBytes);
7613 ABIArgInfo LanaiABIInfo::classifyArgumentType(QualType Ty,
7614 CCState &State) const {
7615 // Check with the C++ ABI first.
7616 const RecordType *RT = Ty->getAs<RecordType>();
7618 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
7619 if (RAA == CGCXXABI::RAA_Indirect) {
7620 return getIndirectResult(Ty, /*ByVal=*/false, State);
7621 } else if (RAA == CGCXXABI::RAA_DirectInMemory) {
7622 return getNaturalAlignIndirect(Ty, /*ByRef=*/true);
7626 if (isAggregateTypeForABI(Ty)) {
7627 // Structures with flexible arrays are always indirect.
7628 if (RT && RT->getDecl()->hasFlexibleArrayMember())
7629 return getIndirectResult(Ty, /*ByVal=*/true, State);
7631 // Ignore empty structs/unions.
7632 if (isEmptyRecord(getContext(), Ty, true))
7633 return ABIArgInfo::getIgnore();
7635 llvm::LLVMContext &LLVMContext = getVMContext();
7636 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
7637 if (SizeInRegs <= State.FreeRegs) {
7638 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
7639 SmallVector<llvm::Type *, 3> Elements(SizeInRegs, Int32);
7640 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
7641 State.FreeRegs -= SizeInRegs;
7642 return ABIArgInfo::getDirectInReg(Result);
7646 return getIndirectResult(Ty, true, State);
7649 // Treat an enum type as its underlying type.
7650 if (const auto *EnumTy = Ty->getAs<EnumType>())
7651 Ty = EnumTy->getDecl()->getIntegerType();
7653 bool InReg = shouldUseInReg(Ty, State);
7654 if (Ty->isPromotableIntegerType()) {
7656 return ABIArgInfo::getDirectInReg();
7657 return ABIArgInfo::getExtend(Ty);
7660 return ABIArgInfo::getDirectInReg();
7661 return ABIArgInfo::getDirect();
7665 class LanaiTargetCodeGenInfo : public TargetCodeGenInfo {
7667 LanaiTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
7668 : TargetCodeGenInfo(new LanaiABIInfo(CGT)) {}
7672 //===----------------------------------------------------------------------===//
7673 // AMDGPU ABI Implementation
7674 //===----------------------------------------------------------------------===//
7678 class AMDGPUABIInfo final : public DefaultABIInfo {
7680 static const unsigned MaxNumRegsForArgsRet = 16;
7682 unsigned numRegsForType(QualType Ty) const;
7684 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
7685 bool isHomogeneousAggregateSmallEnough(const Type *Base,
7686 uint64_t Members) const override;
7689 explicit AMDGPUABIInfo(CodeGen::CodeGenTypes &CGT) :
7690 DefaultABIInfo(CGT) {}
7692 ABIArgInfo classifyReturnType(QualType RetTy) const;
7693 ABIArgInfo classifyKernelArgumentType(QualType Ty) const;
7694 ABIArgInfo classifyArgumentType(QualType Ty, unsigned &NumRegsLeft) const;
7696 void computeInfo(CGFunctionInfo &FI) const override;
7699 bool AMDGPUABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
7703 bool AMDGPUABIInfo::isHomogeneousAggregateSmallEnough(
7704 const Type *Base, uint64_t Members) const {
7705 uint32_t NumRegs = (getContext().getTypeSize(Base) + 31) / 32;
7707 // Homogeneous Aggregates may occupy at most 16 registers.
7708 return Members * NumRegs <= MaxNumRegsForArgsRet;
7711 /// Estimate number of registers the type will use when passed in registers.
7712 unsigned AMDGPUABIInfo::numRegsForType(QualType Ty) const {
7713 unsigned NumRegs = 0;
7715 if (const VectorType *VT = Ty->getAs<VectorType>()) {
7716 // Compute from the number of elements. The reported size is based on the
7717 // in-memory size, which includes the padding 4th element for 3-vectors.
7718 QualType EltTy = VT->getElementType();
7719 unsigned EltSize = getContext().getTypeSize(EltTy);
7721 // 16-bit element vectors should be passed as packed.
7723 return (VT->getNumElements() + 1) / 2;
7725 unsigned EltNumRegs = (EltSize + 31) / 32;
7726 return EltNumRegs * VT->getNumElements();
7729 if (const RecordType *RT = Ty->getAs<RecordType>()) {
7730 const RecordDecl *RD = RT->getDecl();
7731 assert(!RD->hasFlexibleArrayMember());
7733 for (const FieldDecl *Field : RD->fields()) {
7734 QualType FieldTy = Field->getType();
7735 NumRegs += numRegsForType(FieldTy);
7741 return (getContext().getTypeSize(Ty) + 31) / 32;
7744 void AMDGPUABIInfo::computeInfo(CGFunctionInfo &FI) const {
7745 llvm::CallingConv::ID CC = FI.getCallingConvention();
7747 if (!getCXXABI().classifyReturnType(FI))
7748 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
7750 unsigned NumRegsLeft = MaxNumRegsForArgsRet;
7751 for (auto &Arg : FI.arguments()) {
7752 if (CC == llvm::CallingConv::AMDGPU_KERNEL) {
7753 Arg.info = classifyKernelArgumentType(Arg.type);
7755 Arg.info = classifyArgumentType(Arg.type, NumRegsLeft);
7760 ABIArgInfo AMDGPUABIInfo::classifyReturnType(QualType RetTy) const {
7761 if (isAggregateTypeForABI(RetTy)) {
7762 // Records with non-trivial destructors/copy-constructors should not be
7763 // returned by value.
7764 if (!getRecordArgABI(RetTy, getCXXABI())) {
7765 // Ignore empty structs/unions.
7766 if (isEmptyRecord(getContext(), RetTy, true))
7767 return ABIArgInfo::getIgnore();
7769 // Lower single-element structs to just return a regular value.
7770 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
7771 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
7773 if (const RecordType *RT = RetTy->getAs<RecordType>()) {
7774 const RecordDecl *RD = RT->getDecl();
7775 if (RD->hasFlexibleArrayMember())
7776 return DefaultABIInfo::classifyReturnType(RetTy);
7779 // Pack aggregates <= 4 bytes into single VGPR or pair.
7780 uint64_t Size = getContext().getTypeSize(RetTy);
7782 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
7785 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
7788 llvm::Type *I32Ty = llvm::Type::getInt32Ty(getVMContext());
7789 return ABIArgInfo::getDirect(llvm::ArrayType::get(I32Ty, 2));
7792 if (numRegsForType(RetTy) <= MaxNumRegsForArgsRet)
7793 return ABIArgInfo::getDirect();
7797 // Otherwise just do the default thing.
7798 return DefaultABIInfo::classifyReturnType(RetTy);
7801 /// For kernels all parameters are really passed in a special buffer. It doesn't
7802 /// make sense to pass anything byval, so everything must be direct.
7803 ABIArgInfo AMDGPUABIInfo::classifyKernelArgumentType(QualType Ty) const {
7804 Ty = useFirstFieldIfTransparentUnion(Ty);
7806 // TODO: Can we omit empty structs?
7808 // Coerce single element structs to its element.
7809 if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
7810 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
7812 // If we set CanBeFlattened to true, CodeGen will expand the struct to its
7813 // individual elements, which confuses the Clover OpenCL backend; therefore we
7814 // have to set it to false here. Other args of getDirect() are just defaults.
7815 return ABIArgInfo::getDirect(nullptr, 0, nullptr, false);
7818 ABIArgInfo AMDGPUABIInfo::classifyArgumentType(QualType Ty,
7819 unsigned &NumRegsLeft) const {
7820 assert(NumRegsLeft <= MaxNumRegsForArgsRet && "register estimate underflow");
7822 Ty = useFirstFieldIfTransparentUnion(Ty);
7824 if (isAggregateTypeForABI(Ty)) {
7825 // Records with non-trivial destructors/copy-constructors should not be
7827 if (auto RAA = getRecordArgABI(Ty, getCXXABI()))
7828 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
7830 // Ignore empty structs/unions.
7831 if (isEmptyRecord(getContext(), Ty, true))
7832 return ABIArgInfo::getIgnore();
7834 // Lower single-element structs to just pass a regular value. TODO: We
7835 // could do reasonable-size multiple-element structs too, using getExpand(),
7836 // though watch out for things like bitfields.
7837 if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
7838 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
7840 if (const RecordType *RT = Ty->getAs<RecordType>()) {
7841 const RecordDecl *RD = RT->getDecl();
7842 if (RD->hasFlexibleArrayMember())
7843 return DefaultABIInfo::classifyArgumentType(Ty);
7846 // Pack aggregates <= 8 bytes into single VGPR or pair.
7847 uint64_t Size = getContext().getTypeSize(Ty);
7849 unsigned NumRegs = (Size + 31) / 32;
7850 NumRegsLeft -= std::min(NumRegsLeft, NumRegs);
7853 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
7856 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
7858 // XXX: Should this be i64 instead, and should the limit increase?
7859 llvm::Type *I32Ty = llvm::Type::getInt32Ty(getVMContext());
7860 return ABIArgInfo::getDirect(llvm::ArrayType::get(I32Ty, 2));
7863 if (NumRegsLeft > 0) {
7864 unsigned NumRegs = numRegsForType(Ty);
7865 if (NumRegsLeft >= NumRegs) {
7866 NumRegsLeft -= NumRegs;
7867 return ABIArgInfo::getDirect();
7872 // Otherwise just do the default thing.
7873 ABIArgInfo ArgInfo = DefaultABIInfo::classifyArgumentType(Ty);
7874 if (!ArgInfo.isIndirect()) {
7875 unsigned NumRegs = numRegsForType(Ty);
7876 NumRegsLeft -= std::min(NumRegs, NumRegsLeft);
7882 class AMDGPUTargetCodeGenInfo : public TargetCodeGenInfo {
7884 AMDGPUTargetCodeGenInfo(CodeGenTypes &CGT)
7885 : TargetCodeGenInfo(new AMDGPUABIInfo(CGT)) {}
7886 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
7887 CodeGen::CodeGenModule &M) const override;
7888 unsigned getOpenCLKernelCallingConv() const override;
7890 llvm::Constant *getNullPointer(const CodeGen::CodeGenModule &CGM,
7891 llvm::PointerType *T, QualType QT) const override;
7893 LangAS getASTAllocaAddressSpace() const override {
7894 return getLangASFromTargetAS(
7895 getABIInfo().getDataLayout().getAllocaAddrSpace());
7897 LangAS getGlobalVarAddressSpace(CodeGenModule &CGM,
7898 const VarDecl *D) const override;
7899 llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts,
7901 llvm::AtomicOrdering Ordering,
7902 llvm::LLVMContext &Ctx) const override;
7904 createEnqueuedBlockKernel(CodeGenFunction &CGF,
7905 llvm::Function *BlockInvokeFunc,
7906 llvm::Value *BlockLiteral) const override;
7907 bool shouldEmitStaticExternCAliases() const override;
7908 void setCUDAKernelCallingConvention(const FunctionType *&FT) const override;
7912 static bool requiresAMDGPUProtectedVisibility(const Decl *D,
7913 llvm::GlobalValue *GV) {
7914 if (GV->getVisibility() != llvm::GlobalValue::HiddenVisibility)
7917 return D->hasAttr<OpenCLKernelAttr>() ||
7918 (isa<FunctionDecl>(D) && D->hasAttr<CUDAGlobalAttr>()) ||
7920 (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>() ||
7921 D->hasAttr<HIPPinnedShadowAttr>()));
7924 static bool requiresAMDGPUDefaultVisibility(const Decl *D,
7925 llvm::GlobalValue *GV) {
7926 if (GV->getVisibility() != llvm::GlobalValue::HiddenVisibility)
7929 return isa<VarDecl>(D) && D->hasAttr<HIPPinnedShadowAttr>();
7932 void AMDGPUTargetCodeGenInfo::setTargetAttributes(
7933 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
7934 if (requiresAMDGPUDefaultVisibility(D, GV)) {
7935 GV->setVisibility(llvm::GlobalValue::DefaultVisibility);
7936 GV->setDSOLocal(false);
7937 } else if (requiresAMDGPUProtectedVisibility(D, GV)) {
7938 GV->setVisibility(llvm::GlobalValue::ProtectedVisibility);
7939 GV->setDSOLocal(true);
7942 if (GV->isDeclaration())
7944 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
7948 llvm::Function *F = cast<llvm::Function>(GV);
7950 const auto *ReqdWGS = M.getLangOpts().OpenCL ?
7951 FD->getAttr<ReqdWorkGroupSizeAttr>() : nullptr;
7954 const bool IsOpenCLKernel = M.getLangOpts().OpenCL &&
7955 FD->hasAttr<OpenCLKernelAttr>();
7956 const bool IsHIPKernel = M.getLangOpts().HIP &&
7957 FD->hasAttr<CUDAGlobalAttr>();
7958 if ((IsOpenCLKernel || IsHIPKernel) &&
7959 (M.getTriple().getOS() == llvm::Triple::AMDHSA))
7960 F->addFnAttr("amdgpu-implicitarg-num-bytes", "56");
7962 const auto *FlatWGS = FD->getAttr<AMDGPUFlatWorkGroupSizeAttr>();
7963 if (ReqdWGS || FlatWGS) {
7967 Min = FlatWGS->getMin()
7968 ->EvaluateKnownConstInt(M.getContext())
7970 Max = FlatWGS->getMax()
7971 ->EvaluateKnownConstInt(M.getContext())
7974 if (ReqdWGS && Min == 0 && Max == 0)
7975 Min = Max = ReqdWGS->getXDim() * ReqdWGS->getYDim() * ReqdWGS->getZDim();
7978 assert(Min <= Max && "Min must be less than or equal Max");
7980 std::string AttrVal = llvm::utostr(Min) + "," + llvm::utostr(Max);
7981 F->addFnAttr("amdgpu-flat-work-group-size", AttrVal);
7983 assert(Max == 0 && "Max must be zero");
7984 } else if (IsOpenCLKernel || IsHIPKernel) {
7985 // By default, restrict the maximum size to 256.
7986 F->addFnAttr("amdgpu-flat-work-group-size", "1,256");
7989 if (const auto *Attr = FD->getAttr<AMDGPUWavesPerEUAttr>()) {
7991 Attr->getMin()->EvaluateKnownConstInt(M.getContext()).getExtValue();
7992 unsigned Max = Attr->getMax() ? Attr->getMax()
7993 ->EvaluateKnownConstInt(M.getContext())
7998 assert((Max == 0 || Min <= Max) && "Min must be less than or equal Max");
8000 std::string AttrVal = llvm::utostr(Min);
8002 AttrVal = AttrVal + "," + llvm::utostr(Max);
8003 F->addFnAttr("amdgpu-waves-per-eu", AttrVal);
8005 assert(Max == 0 && "Max must be zero");
8008 if (const auto *Attr = FD->getAttr<AMDGPUNumSGPRAttr>()) {
8009 unsigned NumSGPR = Attr->getNumSGPR();
8012 F->addFnAttr("amdgpu-num-sgpr", llvm::utostr(NumSGPR));
8015 if (const auto *Attr = FD->getAttr<AMDGPUNumVGPRAttr>()) {
8016 uint32_t NumVGPR = Attr->getNumVGPR();
8019 F->addFnAttr("amdgpu-num-vgpr", llvm::utostr(NumVGPR));
8023 unsigned AMDGPUTargetCodeGenInfo::getOpenCLKernelCallingConv() const {
8024 return llvm::CallingConv::AMDGPU_KERNEL;
8027 // Currently LLVM assumes null pointers always have value 0,
8028 // which results in incorrectly transformed IR. Therefore, instead of
8029 // emitting null pointers in private and local address spaces, a null
8030 // pointer in generic address space is emitted which is casted to a
8031 // pointer in local or private address space.
8032 llvm::Constant *AMDGPUTargetCodeGenInfo::getNullPointer(
8033 const CodeGen::CodeGenModule &CGM, llvm::PointerType *PT,
8034 QualType QT) const {
8035 if (CGM.getContext().getTargetNullPointerValue(QT) == 0)
8036 return llvm::ConstantPointerNull::get(PT);
8038 auto &Ctx = CGM.getContext();
8039 auto NPT = llvm::PointerType::get(PT->getElementType(),
8040 Ctx.getTargetAddressSpace(LangAS::opencl_generic));
8041 return llvm::ConstantExpr::getAddrSpaceCast(
8042 llvm::ConstantPointerNull::get(NPT), PT);
8046 AMDGPUTargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM,
8047 const VarDecl *D) const {
8048 assert(!CGM.getLangOpts().OpenCL &&
8049 !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) &&
8050 "Address space agnostic languages only");
8051 LangAS DefaultGlobalAS = getLangASFromTargetAS(
8052 CGM.getContext().getTargetAddressSpace(LangAS::opencl_global));
8054 return DefaultGlobalAS;
8056 LangAS AddrSpace = D->getType().getAddressSpace();
8057 assert(AddrSpace == LangAS::Default || isTargetAddressSpace(AddrSpace));
8058 if (AddrSpace != LangAS::Default)
8061 if (CGM.isTypeConstant(D->getType(), false)) {
8062 if (auto ConstAS = CGM.getTarget().getConstantAddressSpace())
8063 return ConstAS.getValue();
8065 return DefaultGlobalAS;
8069 AMDGPUTargetCodeGenInfo::getLLVMSyncScopeID(const LangOptions &LangOpts,
8071 llvm::AtomicOrdering Ordering,
8072 llvm::LLVMContext &Ctx) const {
8075 case SyncScope::OpenCLWorkGroup:
8078 case SyncScope::OpenCLDevice:
8081 case SyncScope::OpenCLAllSVMDevices:
8084 case SyncScope::OpenCLSubGroup:
8088 if (Ordering != llvm::AtomicOrdering::SequentiallyConsistent) {
8090 Name = Twine(Twine(Name) + Twine("-")).str();
8092 Name = Twine(Twine(Name) + Twine("one-as")).str();
8095 return Ctx.getOrInsertSyncScopeID(Name);
8098 bool AMDGPUTargetCodeGenInfo::shouldEmitStaticExternCAliases() const {
8102 void AMDGPUTargetCodeGenInfo::setCUDAKernelCallingConvention(
8103 const FunctionType *&FT) const {
8104 FT = getABIInfo().getContext().adjustFunctionType(
8105 FT, FT->getExtInfo().withCallingConv(CC_OpenCLKernel));
8108 //===----------------------------------------------------------------------===//
8109 // SPARC v8 ABI Implementation.
8110 // Based on the SPARC Compliance Definition version 2.4.1.
8112 // Ensures that complex values are passed in registers.
8115 class SparcV8ABIInfo : public DefaultABIInfo {
8117 SparcV8ABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
8120 ABIArgInfo classifyReturnType(QualType RetTy) const;
8121 void computeInfo(CGFunctionInfo &FI) const override;
8123 } // end anonymous namespace
8127 SparcV8ABIInfo::classifyReturnType(QualType Ty) const {
8128 if (Ty->isAnyComplexType()) {
8129 return ABIArgInfo::getDirect();
8132 return DefaultABIInfo::classifyReturnType(Ty);
8136 void SparcV8ABIInfo::computeInfo(CGFunctionInfo &FI) const {
8138 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
8139 for (auto &Arg : FI.arguments())
8140 Arg.info = classifyArgumentType(Arg.type);
8144 class SparcV8TargetCodeGenInfo : public TargetCodeGenInfo {
8146 SparcV8TargetCodeGenInfo(CodeGenTypes &CGT)
8147 : TargetCodeGenInfo(new SparcV8ABIInfo(CGT)) {}
8149 } // end anonymous namespace
8151 //===----------------------------------------------------------------------===//
8152 // SPARC v9 ABI Implementation.
8153 // Based on the SPARC Compliance Definition version 2.4.1.
8155 // Function arguments a mapped to a nominal "parameter array" and promoted to
8156 // registers depending on their type. Each argument occupies 8 or 16 bytes in
8157 // the array, structs larger than 16 bytes are passed indirectly.
8159 // One case requires special care:
8166 // When a struct mixed is passed by value, it only occupies 8 bytes in the
8167 // parameter array, but the int is passed in an integer register, and the float
8168 // is passed in a floating point register. This is represented as two arguments
8169 // with the LLVM IR inreg attribute:
8171 // declare void f(i32 inreg %i, float inreg %f)
8173 // The code generator will only allocate 4 bytes from the parameter array for
8174 // the inreg arguments. All other arguments are allocated a multiple of 8
8178 class SparcV9ABIInfo : public ABIInfo {
8180 SparcV9ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
8183 ABIArgInfo classifyType(QualType RetTy, unsigned SizeLimit) const;
8184 void computeInfo(CGFunctionInfo &FI) const override;
8185 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
8186 QualType Ty) const override;
8188 // Coercion type builder for structs passed in registers. The coercion type
8189 // serves two purposes:
8191 // 1. Pad structs to a multiple of 64 bits, so they are passed 'left-aligned'
8193 // 2. Expose aligned floating point elements as first-level elements, so the
8194 // code generator knows to pass them in floating point registers.
8196 // We also compute the InReg flag which indicates that the struct contains
8197 // aligned 32-bit floats.
8199 struct CoerceBuilder {
8200 llvm::LLVMContext &Context;
8201 const llvm::DataLayout &DL;
8202 SmallVector<llvm::Type*, 8> Elems;
8206 CoerceBuilder(llvm::LLVMContext &c, const llvm::DataLayout &dl)
8207 : Context(c), DL(dl), Size(0), InReg(false) {}
8209 // Pad Elems with integers until Size is ToSize.
8210 void pad(uint64_t ToSize) {
8211 assert(ToSize >= Size && "Cannot remove elements");
8215 // Finish the current 64-bit word.
8216 uint64_t Aligned = llvm::alignTo(Size, 64);
8217 if (Aligned > Size && Aligned <= ToSize) {
8218 Elems.push_back(llvm::IntegerType::get(Context, Aligned - Size));
8222 // Add whole 64-bit words.
8223 while (Size + 64 <= ToSize) {
8224 Elems.push_back(llvm::Type::getInt64Ty(Context));
8228 // Final in-word padding.
8229 if (Size < ToSize) {
8230 Elems.push_back(llvm::IntegerType::get(Context, ToSize - Size));
8235 // Add a floating point element at Offset.
8236 void addFloat(uint64_t Offset, llvm::Type *Ty, unsigned Bits) {
8237 // Unaligned floats are treated as integers.
8240 // The InReg flag is only required if there are any floats < 64 bits.
8244 Elems.push_back(Ty);
8245 Size = Offset + Bits;
8248 // Add a struct type to the coercion type, starting at Offset (in bits).
8249 void addStruct(uint64_t Offset, llvm::StructType *StrTy) {
8250 const llvm::StructLayout *Layout = DL.getStructLayout(StrTy);
8251 for (unsigned i = 0, e = StrTy->getNumElements(); i != e; ++i) {
8252 llvm::Type *ElemTy = StrTy->getElementType(i);
8253 uint64_t ElemOffset = Offset + Layout->getElementOffsetInBits(i);
8254 switch (ElemTy->getTypeID()) {
8255 case llvm::Type::StructTyID:
8256 addStruct(ElemOffset, cast<llvm::StructType>(ElemTy));
8258 case llvm::Type::FloatTyID:
8259 addFloat(ElemOffset, ElemTy, 32);
8261 case llvm::Type::DoubleTyID:
8262 addFloat(ElemOffset, ElemTy, 64);
8264 case llvm::Type::FP128TyID:
8265 addFloat(ElemOffset, ElemTy, 128);
8267 case llvm::Type::PointerTyID:
8268 if (ElemOffset % 64 == 0) {
8270 Elems.push_back(ElemTy);
8280 // Check if Ty is a usable substitute for the coercion type.
8281 bool isUsableType(llvm::StructType *Ty) const {
8282 return llvm::makeArrayRef(Elems) == Ty->elements();
8285 // Get the coercion type as a literal struct type.
8286 llvm::Type *getType() const {
8287 if (Elems.size() == 1)
8288 return Elems.front();
8290 return llvm::StructType::get(Context, Elems);
8294 } // end anonymous namespace
8297 SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const {
8298 if (Ty->isVoidType())
8299 return ABIArgInfo::getIgnore();
8301 uint64_t Size = getContext().getTypeSize(Ty);
8303 // Anything too big to fit in registers is passed with an explicit indirect
8304 // pointer / sret pointer.
8305 if (Size > SizeLimit)
8306 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
8308 // Treat an enum type as its underlying type.
8309 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
8310 Ty = EnumTy->getDecl()->getIntegerType();
8312 // Integer types smaller than a register are extended.
8313 if (Size < 64 && Ty->isIntegerType())
8314 return ABIArgInfo::getExtend(Ty);
8316 // Other non-aggregates go in registers.
8317 if (!isAggregateTypeForABI(Ty))
8318 return ABIArgInfo::getDirect();
8320 // If a C++ object has either a non-trivial copy constructor or a non-trivial
8321 // destructor, it is passed with an explicit indirect pointer / sret pointer.
8322 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
8323 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
8325 // This is a small aggregate type that should be passed in registers.
8326 // Build a coercion type from the LLVM struct type.
8327 llvm::StructType *StrTy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty));
8329 return ABIArgInfo::getDirect();
8331 CoerceBuilder CB(getVMContext(), getDataLayout());
8332 CB.addStruct(0, StrTy);
8333 CB.pad(llvm::alignTo(CB.DL.getTypeSizeInBits(StrTy), 64));
8335 // Try to use the original type for coercion.
8336 llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType();
8339 return ABIArgInfo::getDirectInReg(CoerceTy);
8341 return ABIArgInfo::getDirect(CoerceTy);
8344 Address SparcV9ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
8345 QualType Ty) const {
8346 ABIArgInfo AI = classifyType(Ty, 16 * 8);
8347 llvm::Type *ArgTy = CGT.ConvertType(Ty);
8348 if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
8349 AI.setCoerceToType(ArgTy);
8351 CharUnits SlotSize = CharUnits::fromQuantity(8);
8353 CGBuilderTy &Builder = CGF.Builder;
8354 Address Addr(Builder.CreateLoad(VAListAddr, "ap.cur"), SlotSize);
8355 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
8357 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
8359 Address ArgAddr = Address::invalid();
8361 switch (AI.getKind()) {
8362 case ABIArgInfo::Expand:
8363 case ABIArgInfo::CoerceAndExpand:
8364 case ABIArgInfo::InAlloca:
8365 llvm_unreachable("Unsupported ABI kind for va_arg");
8367 case ABIArgInfo::Extend: {
8369 CharUnits Offset = SlotSize - TypeInfo.first;
8370 ArgAddr = Builder.CreateConstInBoundsByteGEP(Addr, Offset, "extend");
8374 case ABIArgInfo::Direct: {
8375 auto AllocSize = getDataLayout().getTypeAllocSize(AI.getCoerceToType());
8376 Stride = CharUnits::fromQuantity(AllocSize).alignTo(SlotSize);
8381 case ABIArgInfo::Indirect:
8383 ArgAddr = Builder.CreateElementBitCast(Addr, ArgPtrTy, "indirect");
8384 ArgAddr = Address(Builder.CreateLoad(ArgAddr, "indirect.arg"),
8388 case ABIArgInfo::Ignore:
8389 return Address(llvm::UndefValue::get(ArgPtrTy), TypeInfo.second);
8393 Address NextPtr = Builder.CreateConstInBoundsByteGEP(Addr, Stride, "ap.next");
8394 Builder.CreateStore(NextPtr.getPointer(), VAListAddr);
8396 return Builder.CreateBitCast(ArgAddr, ArgPtrTy, "arg.addr");
8399 void SparcV9ABIInfo::computeInfo(CGFunctionInfo &FI) const {
8400 FI.getReturnInfo() = classifyType(FI.getReturnType(), 32 * 8);
8401 for (auto &I : FI.arguments())
8402 I.info = classifyType(I.type, 16 * 8);
8406 class SparcV9TargetCodeGenInfo : public TargetCodeGenInfo {
8408 SparcV9TargetCodeGenInfo(CodeGenTypes &CGT)
8409 : TargetCodeGenInfo(new SparcV9ABIInfo(CGT)) {}
8411 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
8415 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
8416 llvm::Value *Address) const override;
8418 } // end anonymous namespace
8421 SparcV9TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
8422 llvm::Value *Address) const {
8423 // This is calculated from the LLVM and GCC tables and verified
8424 // against gcc output. AFAIK all ABIs use the same encoding.
8426 CodeGen::CGBuilderTy &Builder = CGF.Builder;
8428 llvm::IntegerType *i8 = CGF.Int8Ty;
8429 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
8430 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
8432 // 0-31: the 8-byte general-purpose registers
8433 AssignToArrayRange(Builder, Address, Eight8, 0, 31);
8435 // 32-63: f0-31, the 4-byte floating-point registers
8436 AssignToArrayRange(Builder, Address, Four8, 32, 63);
8446 AssignToArrayRange(Builder, Address, Eight8, 64, 71);
8448 // 72-87: d0-15, the 8-byte floating-point registers
8449 AssignToArrayRange(Builder, Address, Eight8, 72, 87);
8454 // ARC ABI implementation.
8457 class ARCABIInfo : public DefaultABIInfo {
8459 using DefaultABIInfo::DefaultABIInfo;
8462 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
8463 QualType Ty) const override;
8465 void updateState(const ABIArgInfo &Info, QualType Ty, CCState &State) const {
8466 if (!State.FreeRegs)
8468 if (Info.isIndirect() && Info.getInReg())
8470 else if (Info.isDirect() && Info.getInReg()) {
8471 unsigned sz = (getContext().getTypeSize(Ty) + 31) / 32;
8472 if (sz < State.FreeRegs)
8473 State.FreeRegs -= sz;
8479 void computeInfo(CGFunctionInfo &FI) const override {
8480 CCState State(FI.getCallingConvention());
8481 // ARC uses 8 registers to pass arguments.
8484 if (!getCXXABI().classifyReturnType(FI))
8485 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
8486 updateState(FI.getReturnInfo(), FI.getReturnType(), State);
8487 for (auto &I : FI.arguments()) {
8488 I.info = classifyArgumentType(I.type, State.FreeRegs);
8489 updateState(I.info, I.type, State);
8493 ABIArgInfo getIndirectByRef(QualType Ty, bool HasFreeRegs) const;
8494 ABIArgInfo getIndirectByValue(QualType Ty) const;
8495 ABIArgInfo classifyArgumentType(QualType Ty, uint8_t FreeRegs) const;
8496 ABIArgInfo classifyReturnType(QualType RetTy) const;
8499 class ARCTargetCodeGenInfo : public TargetCodeGenInfo {
8501 ARCTargetCodeGenInfo(CodeGenTypes &CGT)
8502 : TargetCodeGenInfo(new ARCABIInfo(CGT)) {}
8506 ABIArgInfo ARCABIInfo::getIndirectByRef(QualType Ty, bool HasFreeRegs) const {
8507 return HasFreeRegs ? getNaturalAlignIndirectInReg(Ty) :
8508 getNaturalAlignIndirect(Ty, false);
8511 ABIArgInfo ARCABIInfo::getIndirectByValue(QualType Ty) const {
8512 // Compute the byval alignment.
8513 const unsigned MinABIStackAlignInBytes = 4;
8514 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
8515 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true,
8516 TypeAlign > MinABIStackAlignInBytes);
8519 Address ARCABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
8520 QualType Ty) const {
8521 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
8522 getContext().getTypeInfoInChars(Ty),
8523 CharUnits::fromQuantity(4), true);
8526 ABIArgInfo ARCABIInfo::classifyArgumentType(QualType Ty,
8527 uint8_t FreeRegs) const {
8528 // Handle the generic C++ ABI.
8529 const RecordType *RT = Ty->getAs<RecordType>();
8531 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
8532 if (RAA == CGCXXABI::RAA_Indirect)
8533 return getIndirectByRef(Ty, FreeRegs > 0);
8535 if (RAA == CGCXXABI::RAA_DirectInMemory)
8536 return getIndirectByValue(Ty);
8539 // Treat an enum type as its underlying type.
8540 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
8541 Ty = EnumTy->getDecl()->getIntegerType();
8543 auto SizeInRegs = llvm::alignTo(getContext().getTypeSize(Ty), 32) / 32;
8545 if (isAggregateTypeForABI(Ty)) {
8546 // Structures with flexible arrays are always indirect.
8547 if (RT && RT->getDecl()->hasFlexibleArrayMember())
8548 return getIndirectByValue(Ty);
8550 // Ignore empty structs/unions.
8551 if (isEmptyRecord(getContext(), Ty, true))
8552 return ABIArgInfo::getIgnore();
8554 llvm::LLVMContext &LLVMContext = getVMContext();
8556 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
8557 SmallVector<llvm::Type *, 3> Elements(SizeInRegs, Int32);
8558 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
8560 return FreeRegs >= SizeInRegs ?
8561 ABIArgInfo::getDirectInReg(Result) :
8562 ABIArgInfo::getDirect(Result, 0, nullptr, false);
8565 return Ty->isPromotableIntegerType() ?
8566 (FreeRegs >= SizeInRegs ? ABIArgInfo::getExtendInReg(Ty) :
8567 ABIArgInfo::getExtend(Ty)) :
8568 (FreeRegs >= SizeInRegs ? ABIArgInfo::getDirectInReg() :
8569 ABIArgInfo::getDirect());
8572 ABIArgInfo ARCABIInfo::classifyReturnType(QualType RetTy) const {
8573 if (RetTy->isAnyComplexType())
8574 return ABIArgInfo::getDirectInReg();
8576 // Arguments of size > 4 registers are indirect.
8577 auto RetSize = llvm::alignTo(getContext().getTypeSize(RetTy), 32) / 32;
8579 return getIndirectByRef(RetTy, /*HasFreeRegs*/ true);
8581 return DefaultABIInfo::classifyReturnType(RetTy);
8584 } // End anonymous namespace.
8586 //===----------------------------------------------------------------------===//
8587 // XCore ABI Implementation
8588 //===----------------------------------------------------------------------===//
8592 /// A SmallStringEnc instance is used to build up the TypeString by passing
8593 /// it by reference between functions that append to it.
8594 typedef llvm::SmallString<128> SmallStringEnc;
8596 /// TypeStringCache caches the meta encodings of Types.
8598 /// The reason for caching TypeStrings is two fold:
8599 /// 1. To cache a type's encoding for later uses;
8600 /// 2. As a means to break recursive member type inclusion.
8602 /// A cache Entry can have a Status of:
8603 /// NonRecursive: The type encoding is not recursive;
8604 /// Recursive: The type encoding is recursive;
8605 /// Incomplete: An incomplete TypeString;
8606 /// IncompleteUsed: An incomplete TypeString that has been used in a
8607 /// Recursive type encoding.
8609 /// A NonRecursive entry will have all of its sub-members expanded as fully
8610 /// as possible. Whilst it may contain types which are recursive, the type
8611 /// itself is not recursive and thus its encoding may be safely used whenever
8612 /// the type is encountered.
8614 /// A Recursive entry will have all of its sub-members expanded as fully as
8615 /// possible. The type itself is recursive and it may contain other types which
8616 /// are recursive. The Recursive encoding must not be used during the expansion
8617 /// of a recursive type's recursive branch. For simplicity the code uses
8618 /// IncompleteCount to reject all usage of Recursive encodings for member types.
8620 /// An Incomplete entry is always a RecordType and only encodes its
8621 /// identifier e.g. "s(S){}". Incomplete 'StubEnc' entries are ephemeral and
8622 /// are placed into the cache during type expansion as a means to identify and
8623 /// handle recursive inclusion of types as sub-members. If there is recursion
8624 /// the entry becomes IncompleteUsed.
8626 /// During the expansion of a RecordType's members:
8628 /// If the cache contains a NonRecursive encoding for the member type, the
8629 /// cached encoding is used;
8631 /// If the cache contains a Recursive encoding for the member type, the
8632 /// cached encoding is 'Swapped' out, as it may be incorrect, and...
8634 /// If the member is a RecordType, an Incomplete encoding is placed into the
8635 /// cache to break potential recursive inclusion of itself as a sub-member;
8637 /// Once a member RecordType has been expanded, its temporary incomplete
8638 /// entry is removed from the cache. If a Recursive encoding was swapped out
8639 /// it is swapped back in;
8641 /// If an incomplete entry is used to expand a sub-member, the incomplete
8642 /// entry is marked as IncompleteUsed. The cache keeps count of how many
8643 /// IncompleteUsed entries it currently contains in IncompleteUsedCount;
8645 /// If a member's encoding is found to be a NonRecursive or Recursive viz:
8646 /// IncompleteUsedCount==0, the member's encoding is added to the cache.
8647 /// Else the member is part of a recursive type and thus the recursion has
8648 /// been exited too soon for the encoding to be correct for the member.
8650 class TypeStringCache {
8651 enum Status {NonRecursive, Recursive, Incomplete, IncompleteUsed};
8653 std::string Str; // The encoded TypeString for the type.
8654 enum Status State; // Information about the encoding in 'Str'.
8655 std::string Swapped; // A temporary place holder for a Recursive encoding
8656 // during the expansion of RecordType's members.
8658 std::map<const IdentifierInfo *, struct Entry> Map;
8659 unsigned IncompleteCount; // Number of Incomplete entries in the Map.
8660 unsigned IncompleteUsedCount; // Number of IncompleteUsed entries in the Map.
8662 TypeStringCache() : IncompleteCount(0), IncompleteUsedCount(0) {}
8663 void addIncomplete(const IdentifierInfo *ID, std::string StubEnc);
8664 bool removeIncomplete(const IdentifierInfo *ID);
8665 void addIfComplete(const IdentifierInfo *ID, StringRef Str,
8667 StringRef lookupStr(const IdentifierInfo *ID);
8670 /// TypeString encodings for enum & union fields must be order.
8671 /// FieldEncoding is a helper for this ordering process.
8672 class FieldEncoding {
8676 FieldEncoding(bool b, SmallStringEnc &e) : HasName(b), Enc(e.c_str()) {}
8677 StringRef str() { return Enc; }
8678 bool operator<(const FieldEncoding &rhs) const {
8679 if (HasName != rhs.HasName) return HasName;
8680 return Enc < rhs.Enc;
8684 class XCoreABIInfo : public DefaultABIInfo {
8686 XCoreABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
8687 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
8688 QualType Ty) const override;
8691 class XCoreTargetCodeGenInfo : public TargetCodeGenInfo {
8692 mutable TypeStringCache TSC;
8694 XCoreTargetCodeGenInfo(CodeGenTypes &CGT)
8695 :TargetCodeGenInfo(new XCoreABIInfo(CGT)) {}
8696 void emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
8697 CodeGen::CodeGenModule &M) const override;
8700 } // End anonymous namespace.
8702 // TODO: this implementation is likely now redundant with the default
8704 Address XCoreABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
8705 QualType Ty) const {
8706 CGBuilderTy &Builder = CGF.Builder;
8709 CharUnits SlotSize = CharUnits::fromQuantity(4);
8710 Address AP(Builder.CreateLoad(VAListAddr), SlotSize);
8712 // Handle the argument.
8713 ABIArgInfo AI = classifyArgumentType(Ty);
8714 CharUnits TypeAlign = getContext().getTypeAlignInChars(Ty);
8715 llvm::Type *ArgTy = CGT.ConvertType(Ty);
8716 if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
8717 AI.setCoerceToType(ArgTy);
8718 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
8720 Address Val = Address::invalid();
8721 CharUnits ArgSize = CharUnits::Zero();
8722 switch (AI.getKind()) {
8723 case ABIArgInfo::Expand:
8724 case ABIArgInfo::CoerceAndExpand:
8725 case ABIArgInfo::InAlloca:
8726 llvm_unreachable("Unsupported ABI kind for va_arg");
8727 case ABIArgInfo::Ignore:
8728 Val = Address(llvm::UndefValue::get(ArgPtrTy), TypeAlign);
8729 ArgSize = CharUnits::Zero();
8731 case ABIArgInfo::Extend:
8732 case ABIArgInfo::Direct:
8733 Val = Builder.CreateBitCast(AP, ArgPtrTy);
8734 ArgSize = CharUnits::fromQuantity(
8735 getDataLayout().getTypeAllocSize(AI.getCoerceToType()));
8736 ArgSize = ArgSize.alignTo(SlotSize);
8738 case ABIArgInfo::Indirect:
8739 Val = Builder.CreateElementBitCast(AP, ArgPtrTy);
8740 Val = Address(Builder.CreateLoad(Val), TypeAlign);
8745 // Increment the VAList.
8746 if (!ArgSize.isZero()) {
8747 Address APN = Builder.CreateConstInBoundsByteGEP(AP, ArgSize);
8748 Builder.CreateStore(APN.getPointer(), VAListAddr);
8754 /// During the expansion of a RecordType, an incomplete TypeString is placed
8755 /// into the cache as a means to identify and break recursion.
8756 /// If there is a Recursive encoding in the cache, it is swapped out and will
8757 /// be reinserted by removeIncomplete().
8758 /// All other types of encoding should have been used rather than arriving here.
8759 void TypeStringCache::addIncomplete(const IdentifierInfo *ID,
8760 std::string StubEnc) {
8764 assert( (E.Str.empty() || E.State == Recursive) &&
8765 "Incorrectly use of addIncomplete");
8766 assert(!StubEnc.empty() && "Passing an empty string to addIncomplete()");
8767 E.Swapped.swap(E.Str); // swap out the Recursive
8768 E.Str.swap(StubEnc);
8769 E.State = Incomplete;
8773 /// Once the RecordType has been expanded, the temporary incomplete TypeString
8774 /// must be removed from the cache.
8775 /// If a Recursive was swapped out by addIncomplete(), it will be replaced.
8776 /// Returns true if the RecordType was defined recursively.
8777 bool TypeStringCache::removeIncomplete(const IdentifierInfo *ID) {
8780 auto I = Map.find(ID);
8781 assert(I != Map.end() && "Entry not present");
8782 Entry &E = I->second;
8783 assert( (E.State == Incomplete ||
8784 E.State == IncompleteUsed) &&
8785 "Entry must be an incomplete type");
8786 bool IsRecursive = false;
8787 if (E.State == IncompleteUsed) {
8788 // We made use of our Incomplete encoding, thus we are recursive.
8790 --IncompleteUsedCount;
8792 if (E.Swapped.empty())
8795 // Swap the Recursive back.
8796 E.Swapped.swap(E.Str);
8798 E.State = Recursive;
8804 /// Add the encoded TypeString to the cache only if it is NonRecursive or
8805 /// Recursive (viz: all sub-members were expanded as fully as possible).
8806 void TypeStringCache::addIfComplete(const IdentifierInfo *ID, StringRef Str,
8808 if (!ID || IncompleteUsedCount)
8809 return; // No key or it is is an incomplete sub-type so don't add.
8811 if (IsRecursive && !E.Str.empty()) {
8812 assert(E.State==Recursive && E.Str.size() == Str.size() &&
8813 "This is not the same Recursive entry");
8814 // The parent container was not recursive after all, so we could have used
8815 // this Recursive sub-member entry after all, but we assumed the worse when
8816 // we started viz: IncompleteCount!=0.
8819 assert(E.Str.empty() && "Entry already present");
8821 E.State = IsRecursive? Recursive : NonRecursive;
8824 /// Return a cached TypeString encoding for the ID. If there isn't one, or we
8825 /// are recursively expanding a type (IncompleteCount != 0) and the cached
8826 /// encoding is Recursive, return an empty StringRef.
8827 StringRef TypeStringCache::lookupStr(const IdentifierInfo *ID) {
8829 return StringRef(); // We have no key.
8830 auto I = Map.find(ID);
8832 return StringRef(); // We have no encoding.
8833 Entry &E = I->second;
8834 if (E.State == Recursive && IncompleteCount)
8835 return StringRef(); // We don't use Recursive encodings for member types.
8837 if (E.State == Incomplete) {
8838 // The incomplete type is being used to break out of recursion.
8839 E.State = IncompleteUsed;
8840 ++IncompleteUsedCount;
8845 /// The XCore ABI includes a type information section that communicates symbol
8846 /// type information to the linker. The linker uses this information to verify
8847 /// safety/correctness of things such as array bound and pointers et al.
8848 /// The ABI only requires C (and XC) language modules to emit TypeStrings.
8849 /// This type information (TypeString) is emitted into meta data for all global
8850 /// symbols: definitions, declarations, functions & variables.
8852 /// The TypeString carries type, qualifier, name, size & value details.
8853 /// Please see 'Tools Development Guide' section 2.16.2 for format details:
8854 /// https://www.xmos.com/download/public/Tools-Development-Guide%28X9114A%29.pdf
8855 /// The output is tested by test/CodeGen/xcore-stringtype.c.
8857 static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
8858 CodeGen::CodeGenModule &CGM, TypeStringCache &TSC);
8860 /// XCore uses emitTargetMD to emit TypeString metadata for global symbols.
8861 void XCoreTargetCodeGenInfo::emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
8862 CodeGen::CodeGenModule &CGM) const {
8864 if (getTypeString(Enc, D, CGM, TSC)) {
8865 llvm::LLVMContext &Ctx = CGM.getModule().getContext();
8866 llvm::Metadata *MDVals[] = {llvm::ConstantAsMetadata::get(GV),
8867 llvm::MDString::get(Ctx, Enc.str())};
8868 llvm::NamedMDNode *MD =
8869 CGM.getModule().getOrInsertNamedMetadata("xcore.typestrings");
8870 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
8874 //===----------------------------------------------------------------------===//
8875 // SPIR ABI Implementation
8876 //===----------------------------------------------------------------------===//
8879 class SPIRTargetCodeGenInfo : public TargetCodeGenInfo {
8881 SPIRTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
8882 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
8883 unsigned getOpenCLKernelCallingConv() const override;
8886 } // End anonymous namespace.
8890 void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI) {
8891 DefaultABIInfo SPIRABI(CGM.getTypes());
8892 SPIRABI.computeInfo(FI);
8897 unsigned SPIRTargetCodeGenInfo::getOpenCLKernelCallingConv() const {
8898 return llvm::CallingConv::SPIR_KERNEL;
8901 static bool appendType(SmallStringEnc &Enc, QualType QType,
8902 const CodeGen::CodeGenModule &CGM,
8903 TypeStringCache &TSC);
8905 /// Helper function for appendRecordType().
8906 /// Builds a SmallVector containing the encoded field types in declaration
8908 static bool extractFieldType(SmallVectorImpl<FieldEncoding> &FE,
8909 const RecordDecl *RD,
8910 const CodeGen::CodeGenModule &CGM,
8911 TypeStringCache &TSC) {
8912 for (const auto *Field : RD->fields()) {
8915 Enc += Field->getName();
8917 if (Field->isBitField()) {
8919 llvm::raw_svector_ostream OS(Enc);
8920 OS << Field->getBitWidthValue(CGM.getContext());
8923 if (!appendType(Enc, Field->getType(), CGM, TSC))
8925 if (Field->isBitField())
8928 FE.emplace_back(!Field->getName().empty(), Enc);
8933 /// Appends structure and union types to Enc and adds encoding to cache.
8934 /// Recursively calls appendType (via extractFieldType) for each field.
8935 /// Union types have their fields ordered according to the ABI.
8936 static bool appendRecordType(SmallStringEnc &Enc, const RecordType *RT,
8937 const CodeGen::CodeGenModule &CGM,
8938 TypeStringCache &TSC, const IdentifierInfo *ID) {
8939 // Append the cached TypeString if we have one.
8940 StringRef TypeString = TSC.lookupStr(ID);
8941 if (!TypeString.empty()) {
8946 // Start to emit an incomplete TypeString.
8947 size_t Start = Enc.size();
8948 Enc += (RT->isUnionType()? 'u' : 's');
8951 Enc += ID->getName();
8954 // We collect all encoded fields and order as necessary.
8955 bool IsRecursive = false;
8956 const RecordDecl *RD = RT->getDecl()->getDefinition();
8957 if (RD && !RD->field_empty()) {
8958 // An incomplete TypeString stub is placed in the cache for this RecordType
8959 // so that recursive calls to this RecordType will use it whilst building a
8960 // complete TypeString for this RecordType.
8961 SmallVector<FieldEncoding, 16> FE;
8962 std::string StubEnc(Enc.substr(Start).str());
8963 StubEnc += '}'; // StubEnc now holds a valid incomplete TypeString.
8964 TSC.addIncomplete(ID, std::move(StubEnc));
8965 if (!extractFieldType(FE, RD, CGM, TSC)) {
8966 (void) TSC.removeIncomplete(ID);
8969 IsRecursive = TSC.removeIncomplete(ID);
8970 // The ABI requires unions to be sorted but not structures.
8971 // See FieldEncoding::operator< for sort algorithm.
8972 if (RT->isUnionType())
8974 // We can now complete the TypeString.
8975 unsigned E = FE.size();
8976 for (unsigned I = 0; I != E; ++I) {
8983 TSC.addIfComplete(ID, Enc.substr(Start), IsRecursive);
8987 /// Appends enum types to Enc and adds the encoding to the cache.
8988 static bool appendEnumType(SmallStringEnc &Enc, const EnumType *ET,
8989 TypeStringCache &TSC,
8990 const IdentifierInfo *ID) {
8991 // Append the cached TypeString if we have one.
8992 StringRef TypeString = TSC.lookupStr(ID);
8993 if (!TypeString.empty()) {
8998 size_t Start = Enc.size();
9001 Enc += ID->getName();
9004 // We collect all encoded enumerations and order them alphanumerically.
9005 if (const EnumDecl *ED = ET->getDecl()->getDefinition()) {
9006 SmallVector<FieldEncoding, 16> FE;
9007 for (auto I = ED->enumerator_begin(), E = ED->enumerator_end(); I != E;
9009 SmallStringEnc EnumEnc;
9011 EnumEnc += I->getName();
9013 I->getInitVal().toString(EnumEnc);
9015 FE.push_back(FieldEncoding(!I->getName().empty(), EnumEnc));
9018 unsigned E = FE.size();
9019 for (unsigned I = 0; I != E; ++I) {
9026 TSC.addIfComplete(ID, Enc.substr(Start), false);
9030 /// Appends type's qualifier to Enc.
9031 /// This is done prior to appending the type's encoding.
9032 static void appendQualifier(SmallStringEnc &Enc, QualType QT) {
9033 // Qualifiers are emitted in alphabetical order.
9034 static const char *const Table[]={"","c:","r:","cr:","v:","cv:","rv:","crv:"};
9036 if (QT.isConstQualified())
9038 if (QT.isRestrictQualified())
9040 if (QT.isVolatileQualified())
9042 Enc += Table[Lookup];
9045 /// Appends built-in types to Enc.
9046 static bool appendBuiltinType(SmallStringEnc &Enc, const BuiltinType *BT) {
9047 const char *EncType;
9048 switch (BT->getKind()) {
9049 case BuiltinType::Void:
9052 case BuiltinType::Bool:
9055 case BuiltinType::Char_U:
9058 case BuiltinType::UChar:
9061 case BuiltinType::SChar:
9064 case BuiltinType::UShort:
9067 case BuiltinType::Short:
9070 case BuiltinType::UInt:
9073 case BuiltinType::Int:
9076 case BuiltinType::ULong:
9079 case BuiltinType::Long:
9082 case BuiltinType::ULongLong:
9085 case BuiltinType::LongLong:
9088 case BuiltinType::Float:
9091 case BuiltinType::Double:
9094 case BuiltinType::LongDouble:
9104 /// Appends a pointer encoding to Enc before calling appendType for the pointee.
9105 static bool appendPointerType(SmallStringEnc &Enc, const PointerType *PT,
9106 const CodeGen::CodeGenModule &CGM,
9107 TypeStringCache &TSC) {
9109 if (!appendType(Enc, PT->getPointeeType(), CGM, TSC))
9115 /// Appends array encoding to Enc before calling appendType for the element.
9116 static bool appendArrayType(SmallStringEnc &Enc, QualType QT,
9117 const ArrayType *AT,
9118 const CodeGen::CodeGenModule &CGM,
9119 TypeStringCache &TSC, StringRef NoSizeEnc) {
9120 if (AT->getSizeModifier() != ArrayType::Normal)
9123 if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT))
9124 CAT->getSize().toStringUnsigned(Enc);
9126 Enc += NoSizeEnc; // Global arrays use "*", otherwise it is "".
9128 // The Qualifiers should be attached to the type rather than the array.
9129 appendQualifier(Enc, QT);
9130 if (!appendType(Enc, AT->getElementType(), CGM, TSC))
9136 /// Appends a function encoding to Enc, calling appendType for the return type
9137 /// and the arguments.
9138 static bool appendFunctionType(SmallStringEnc &Enc, const FunctionType *FT,
9139 const CodeGen::CodeGenModule &CGM,
9140 TypeStringCache &TSC) {
9142 if (!appendType(Enc, FT->getReturnType(), CGM, TSC))
9145 if (const FunctionProtoType *FPT = FT->getAs<FunctionProtoType>()) {
9146 // N.B. we are only interested in the adjusted param types.
9147 auto I = FPT->param_type_begin();
9148 auto E = FPT->param_type_end();
9151 if (!appendType(Enc, *I, CGM, TSC))
9157 if (FPT->isVariadic())
9160 if (FPT->isVariadic())
9170 /// Handles the type's qualifier before dispatching a call to handle specific
9172 static bool appendType(SmallStringEnc &Enc, QualType QType,
9173 const CodeGen::CodeGenModule &CGM,
9174 TypeStringCache &TSC) {
9176 QualType QT = QType.getCanonicalType();
9178 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe())
9179 // The Qualifiers should be attached to the type rather than the array.
9180 // Thus we don't call appendQualifier() here.
9181 return appendArrayType(Enc, QT, AT, CGM, TSC, "");
9183 appendQualifier(Enc, QT);
9185 if (const BuiltinType *BT = QT->getAs<BuiltinType>())
9186 return appendBuiltinType(Enc, BT);
9188 if (const PointerType *PT = QT->getAs<PointerType>())
9189 return appendPointerType(Enc, PT, CGM, TSC);
9191 if (const EnumType *ET = QT->getAs<EnumType>())
9192 return appendEnumType(Enc, ET, TSC, QT.getBaseTypeIdentifier());
9194 if (const RecordType *RT = QT->getAsStructureType())
9195 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier());
9197 if (const RecordType *RT = QT->getAsUnionType())
9198 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier());
9200 if (const FunctionType *FT = QT->getAs<FunctionType>())
9201 return appendFunctionType(Enc, FT, CGM, TSC);
9206 static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
9207 CodeGen::CodeGenModule &CGM, TypeStringCache &TSC) {
9211 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
9212 if (FD->getLanguageLinkage() != CLanguageLinkage)
9214 return appendType(Enc, FD->getType(), CGM, TSC);
9217 if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
9218 if (VD->getLanguageLinkage() != CLanguageLinkage)
9220 QualType QT = VD->getType().getCanonicalType();
9221 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe()) {
9222 // Global ArrayTypes are given a size of '*' if the size is unknown.
9223 // The Qualifiers should be attached to the type rather than the array.
9224 // Thus we don't call appendQualifier() here.
9225 return appendArrayType(Enc, QT, AT, CGM, TSC, "*");
9227 return appendType(Enc, QT, CGM, TSC);
9232 //===----------------------------------------------------------------------===//
9233 // RISCV ABI Implementation
9234 //===----------------------------------------------------------------------===//
9237 class RISCVABIInfo : public DefaultABIInfo {
9239 // Size of the integer ('x') registers in bits.
9241 // Size of the floating point ('f') registers in bits. Note that the target
9242 // ISA might have a wider FLen than the selected ABI (e.g. an RV32IF target
9243 // with soft float ABI has FLen==0).
9245 static const int NumArgGPRs = 8;
9246 static const int NumArgFPRs = 8;
9247 bool detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff,
9248 llvm::Type *&Field1Ty,
9249 CharUnits &Field1Off,
9250 llvm::Type *&Field2Ty,
9251 CharUnits &Field2Off) const;
9254 RISCVABIInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen, unsigned FLen)
9255 : DefaultABIInfo(CGT), XLen(XLen), FLen(FLen) {}
9257 // DefaultABIInfo's classifyReturnType and classifyArgumentType are
9258 // non-virtual, but computeInfo is virtual, so we overload it.
9259 void computeInfo(CGFunctionInfo &FI) const override;
9261 ABIArgInfo classifyArgumentType(QualType Ty, bool IsFixed, int &ArgGPRsLeft,
9262 int &ArgFPRsLeft) const;
9263 ABIArgInfo classifyReturnType(QualType RetTy) const;
9265 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
9266 QualType Ty) const override;
9268 ABIArgInfo extendType(QualType Ty) const;
9270 bool detectFPCCEligibleStruct(QualType Ty, llvm::Type *&Field1Ty,
9271 CharUnits &Field1Off, llvm::Type *&Field2Ty,
9272 CharUnits &Field2Off, int &NeededArgGPRs,
9273 int &NeededArgFPRs) const;
9274 ABIArgInfo coerceAndExpandFPCCEligibleStruct(llvm::Type *Field1Ty,
9275 CharUnits Field1Off,
9276 llvm::Type *Field2Ty,
9277 CharUnits Field2Off) const;
9279 } // end anonymous namespace
9281 void RISCVABIInfo::computeInfo(CGFunctionInfo &FI) const {
9282 QualType RetTy = FI.getReturnType();
9283 if (!getCXXABI().classifyReturnType(FI))
9284 FI.getReturnInfo() = classifyReturnType(RetTy);
9286 // IsRetIndirect is true if classifyArgumentType indicated the value should
9287 // be passed indirect or if the type size is greater than 2*xlen. e.g. fp128
9288 // is passed direct in LLVM IR, relying on the backend lowering code to
9289 // rewrite the argument list and pass indirectly on RV32.
9290 bool IsRetIndirect = FI.getReturnInfo().getKind() == ABIArgInfo::Indirect ||
9291 getContext().getTypeSize(RetTy) > (2 * XLen);
9293 // We must track the number of GPRs used in order to conform to the RISC-V
9294 // ABI, as integer scalars passed in registers should have signext/zeroext
9295 // when promoted, but are anyext if passed on the stack. As GPR usage is
9296 // different for variadic arguments, we must also track whether we are
9297 // examining a vararg or not.
9298 int ArgGPRsLeft = IsRetIndirect ? NumArgGPRs - 1 : NumArgGPRs;
9299 int ArgFPRsLeft = FLen ? NumArgFPRs : 0;
9300 int NumFixedArgs = FI.getNumRequiredArgs();
9303 for (auto &ArgInfo : FI.arguments()) {
9304 bool IsFixed = ArgNum < NumFixedArgs;
9306 classifyArgumentType(ArgInfo.type, IsFixed, ArgGPRsLeft, ArgFPRsLeft);
9311 // Returns true if the struct is a potential candidate for the floating point
9312 // calling convention. If this function returns true, the caller is
9313 // responsible for checking that if there is only a single field then that
9314 // field is a float.
9315 bool RISCVABIInfo::detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff,
9316 llvm::Type *&Field1Ty,
9317 CharUnits &Field1Off,
9318 llvm::Type *&Field2Ty,
9319 CharUnits &Field2Off) const {
9320 bool IsInt = Ty->isIntegralOrEnumerationType();
9321 bool IsFloat = Ty->isRealFloatingType();
9323 if (IsInt || IsFloat) {
9324 uint64_t Size = getContext().getTypeSize(Ty);
9325 if (IsInt && Size > XLen)
9327 // Can't be eligible if larger than the FP registers. Half precision isn't
9328 // currently supported on RISC-V and the ABI hasn't been confirmed, so
9329 // default to the integer ABI in that case.
9330 if (IsFloat && (Size > FLen || Size < 32))
9332 // Can't be eligible if an integer type was already found (int+int pairs
9333 // are not eligible).
9334 if (IsInt && Field1Ty && Field1Ty->isIntegerTy())
9337 Field1Ty = CGT.ConvertType(Ty);
9342 Field2Ty = CGT.ConvertType(Ty);
9349 if (auto CTy = Ty->getAs<ComplexType>()) {
9352 QualType EltTy = CTy->getElementType();
9353 if (getContext().getTypeSize(EltTy) > FLen)
9355 Field1Ty = CGT.ConvertType(EltTy);
9357 assert(CurOff.isZero() && "Unexpected offset for first field");
9358 Field2Ty = Field1Ty;
9359 Field2Off = Field1Off + getContext().getTypeSizeInChars(EltTy);
9363 if (const ConstantArrayType *ATy = getContext().getAsConstantArrayType(Ty)) {
9364 uint64_t ArraySize = ATy->getSize().getZExtValue();
9365 QualType EltTy = ATy->getElementType();
9366 CharUnits EltSize = getContext().getTypeSizeInChars(EltTy);
9367 for (uint64_t i = 0; i < ArraySize; ++i) {
9368 bool Ret = detectFPCCEligibleStructHelper(EltTy, CurOff, Field1Ty,
9369 Field1Off, Field2Ty, Field2Off);
9377 if (const auto *RTy = Ty->getAs<RecordType>()) {
9378 // Structures with either a non-trivial destructor or a non-trivial
9379 // copy constructor are not eligible for the FP calling convention.
9380 if (getRecordArgABI(Ty, CGT.getCXXABI()))
9382 if (isEmptyRecord(getContext(), Ty, true))
9384 const RecordDecl *RD = RTy->getDecl();
9385 // Unions aren't eligible unless they're empty (which is caught above).
9388 int ZeroWidthBitFieldCount = 0;
9389 for (const FieldDecl *FD : RD->fields()) {
9390 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
9391 uint64_t FieldOffInBits = Layout.getFieldOffset(FD->getFieldIndex());
9392 QualType QTy = FD->getType();
9393 if (FD->isBitField()) {
9394 unsigned BitWidth = FD->getBitWidthValue(getContext());
9395 // Allow a bitfield with a type greater than XLen as long as the
9396 // bitwidth is XLen or less.
9397 if (getContext().getTypeSize(QTy) > XLen && BitWidth <= XLen)
9398 QTy = getContext().getIntTypeForBitwidth(XLen, false);
9399 if (BitWidth == 0) {
9400 ZeroWidthBitFieldCount++;
9405 bool Ret = detectFPCCEligibleStructHelper(
9406 QTy, CurOff + getContext().toCharUnitsFromBits(FieldOffInBits),
9407 Field1Ty, Field1Off, Field2Ty, Field2Off);
9411 // As a quirk of the ABI, zero-width bitfields aren't ignored for fp+fp
9412 // or int+fp structs, but are ignored for a struct with an fp field and
9413 // any number of zero-width bitfields.
9414 if (Field2Ty && ZeroWidthBitFieldCount > 0)
9417 return Field1Ty != nullptr;
9423 // Determine if a struct is eligible for passing according to the floating
9424 // point calling convention (i.e., when flattened it contains a single fp
9425 // value, fp+fp, or int+fp of appropriate size). If so, NeededArgFPRs and
9426 // NeededArgGPRs are incremented appropriately.
9427 bool RISCVABIInfo::detectFPCCEligibleStruct(QualType Ty, llvm::Type *&Field1Ty,
9428 CharUnits &Field1Off,
9429 llvm::Type *&Field2Ty,
9430 CharUnits &Field2Off,
9432 int &NeededArgFPRs) const {
9437 bool IsCandidate = detectFPCCEligibleStructHelper(
9438 Ty, CharUnits::Zero(), Field1Ty, Field1Off, Field2Ty, Field2Off);
9439 // Not really a candidate if we have a single int but no float.
9440 if (Field1Ty && !Field2Ty && !Field1Ty->isFloatingPointTy())
9444 if (Field1Ty && Field1Ty->isFloatingPointTy())
9448 if (Field2Ty && Field2Ty->isFloatingPointTy())
9455 // Call getCoerceAndExpand for the two-element flattened struct described by
9456 // Field1Ty, Field1Off, Field2Ty, Field2Off. This method will create an
9457 // appropriate coerceToType and unpaddedCoerceToType.
9458 ABIArgInfo RISCVABIInfo::coerceAndExpandFPCCEligibleStruct(
9459 llvm::Type *Field1Ty, CharUnits Field1Off, llvm::Type *Field2Ty,
9460 CharUnits Field2Off) const {
9461 SmallVector<llvm::Type *, 3> CoerceElts;
9462 SmallVector<llvm::Type *, 2> UnpaddedCoerceElts;
9463 if (!Field1Off.isZero())
9464 CoerceElts.push_back(llvm::ArrayType::get(
9465 llvm::Type::getInt8Ty(getVMContext()), Field1Off.getQuantity()));
9467 CoerceElts.push_back(Field1Ty);
9468 UnpaddedCoerceElts.push_back(Field1Ty);
9471 return ABIArgInfo::getCoerceAndExpand(
9472 llvm::StructType::get(getVMContext(), CoerceElts, !Field1Off.isZero()),
9473 UnpaddedCoerceElts[0]);
9476 CharUnits Field2Align =
9477 CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(Field2Ty));
9478 CharUnits Field1Size =
9479 CharUnits::fromQuantity(getDataLayout().getTypeStoreSize(Field1Ty));
9480 CharUnits Field2OffNoPadNoPack = Field1Size.alignTo(Field2Align);
9482 CharUnits Padding = CharUnits::Zero();
9483 if (Field2Off > Field2OffNoPadNoPack)
9484 Padding = Field2Off - Field2OffNoPadNoPack;
9485 else if (Field2Off != Field2Align && Field2Off > Field1Size)
9486 Padding = Field2Off - Field1Size;
9488 bool IsPacked = !Field2Off.isMultipleOf(Field2Align);
9490 if (!Padding.isZero())
9491 CoerceElts.push_back(llvm::ArrayType::get(
9492 llvm::Type::getInt8Ty(getVMContext()), Padding.getQuantity()));
9494 CoerceElts.push_back(Field2Ty);
9495 UnpaddedCoerceElts.push_back(Field2Ty);
9498 llvm::StructType::get(getVMContext(), CoerceElts, IsPacked);
9499 auto UnpaddedCoerceToType =
9500 llvm::StructType::get(getVMContext(), UnpaddedCoerceElts, IsPacked);
9502 return ABIArgInfo::getCoerceAndExpand(CoerceToType, UnpaddedCoerceToType);
9505 ABIArgInfo RISCVABIInfo::classifyArgumentType(QualType Ty, bool IsFixed,
9507 int &ArgFPRsLeft) const {
9508 assert(ArgGPRsLeft <= NumArgGPRs && "Arg GPR tracking underflow");
9509 Ty = useFirstFieldIfTransparentUnion(Ty);
9511 // Structures with either a non-trivial destructor or a non-trivial
9512 // copy constructor are always passed indirectly.
9513 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
9516 return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA ==
9517 CGCXXABI::RAA_DirectInMemory);
9520 // Ignore empty structs/unions.
9521 if (isEmptyRecord(getContext(), Ty, true))
9522 return ABIArgInfo::getIgnore();
9524 uint64_t Size = getContext().getTypeSize(Ty);
9526 // Pass floating point values via FPRs if possible.
9527 if (IsFixed && Ty->isFloatingType() && FLen >= Size && ArgFPRsLeft) {
9529 return ABIArgInfo::getDirect();
9532 // Complex types for the hard float ABI must be passed direct rather than
9533 // using CoerceAndExpand.
9534 if (IsFixed && Ty->isComplexType() && FLen && ArgFPRsLeft >= 2) {
9535 QualType EltTy = Ty->castAs<ComplexType>()->getElementType();
9536 if (getContext().getTypeSize(EltTy) <= FLen) {
9538 return ABIArgInfo::getDirect();
9542 if (IsFixed && FLen && Ty->isStructureOrClassType()) {
9543 llvm::Type *Field1Ty = nullptr;
9544 llvm::Type *Field2Ty = nullptr;
9545 CharUnits Field1Off = CharUnits::Zero();
9546 CharUnits Field2Off = CharUnits::Zero();
9550 detectFPCCEligibleStruct(Ty, Field1Ty, Field1Off, Field2Ty, Field2Off,
9551 NeededArgGPRs, NeededArgFPRs);
9552 if (IsCandidate && NeededArgGPRs <= ArgGPRsLeft &&
9553 NeededArgFPRs <= ArgFPRsLeft) {
9554 ArgGPRsLeft -= NeededArgGPRs;
9555 ArgFPRsLeft -= NeededArgFPRs;
9556 return coerceAndExpandFPCCEligibleStruct(Field1Ty, Field1Off, Field2Ty,
9561 uint64_t NeededAlign = getContext().getTypeAlign(Ty);
9562 bool MustUseStack = false;
9563 // Determine the number of GPRs needed to pass the current argument
9564 // according to the ABI. 2*XLen-aligned varargs are passed in "aligned"
9565 // register pairs, so may consume 3 registers.
9566 int NeededArgGPRs = 1;
9567 if (!IsFixed && NeededAlign == 2 * XLen)
9568 NeededArgGPRs = 2 + (ArgGPRsLeft % 2);
9569 else if (Size > XLen && Size <= 2 * XLen)
9572 if (NeededArgGPRs > ArgGPRsLeft) {
9573 MustUseStack = true;
9574 NeededArgGPRs = ArgGPRsLeft;
9577 ArgGPRsLeft -= NeededArgGPRs;
9579 if (!isAggregateTypeForABI(Ty) && !Ty->isVectorType()) {
9580 // Treat an enum type as its underlying type.
9581 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
9582 Ty = EnumTy->getDecl()->getIntegerType();
9584 // All integral types are promoted to XLen width, unless passed on the
9586 if (Size < XLen && Ty->isIntegralOrEnumerationType() && !MustUseStack) {
9587 return extendType(Ty);
9590 return ABIArgInfo::getDirect();
9593 // Aggregates which are <= 2*XLen will be passed in registers if possible,
9594 // so coerce to integers.
9595 if (Size <= 2 * XLen) {
9596 unsigned Alignment = getContext().getTypeAlign(Ty);
9598 // Use a single XLen int if possible, 2*XLen if 2*XLen alignment is
9599 // required, and a 2-element XLen array if only XLen alignment is required.
9601 return ABIArgInfo::getDirect(
9602 llvm::IntegerType::get(getVMContext(), XLen));
9603 } else if (Alignment == 2 * XLen) {
9604 return ABIArgInfo::getDirect(
9605 llvm::IntegerType::get(getVMContext(), 2 * XLen));
9607 return ABIArgInfo::getDirect(llvm::ArrayType::get(
9608 llvm::IntegerType::get(getVMContext(), XLen), 2));
9611 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
9614 ABIArgInfo RISCVABIInfo::classifyReturnType(QualType RetTy) const {
9615 if (RetTy->isVoidType())
9616 return ABIArgInfo::getIgnore();
9618 int ArgGPRsLeft = 2;
9619 int ArgFPRsLeft = FLen ? 2 : 0;
9621 // The rules for return and argument types are the same, so defer to
9622 // classifyArgumentType.
9623 return classifyArgumentType(RetTy, /*IsFixed=*/true, ArgGPRsLeft,
9627 Address RISCVABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
9628 QualType Ty) const {
9629 CharUnits SlotSize = CharUnits::fromQuantity(XLen / 8);
9631 // Empty records are ignored for parameter passing purposes.
9632 if (isEmptyRecord(getContext(), Ty, true)) {
9633 Address Addr(CGF.Builder.CreateLoad(VAListAddr), SlotSize);
9634 Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty));
9638 std::pair<CharUnits, CharUnits> SizeAndAlign =
9639 getContext().getTypeInfoInChars(Ty);
9641 // Arguments bigger than 2*Xlen bytes are passed indirectly.
9642 bool IsIndirect = SizeAndAlign.first > 2 * SlotSize;
9644 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, SizeAndAlign,
9645 SlotSize, /*AllowHigherAlign=*/true);
9648 ABIArgInfo RISCVABIInfo::extendType(QualType Ty) const {
9649 int TySize = getContext().getTypeSize(Ty);
9650 // RV64 ABI requires unsigned 32 bit integers to be sign extended.
9651 if (XLen == 64 && Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32)
9652 return ABIArgInfo::getSignExtend(Ty);
9653 return ABIArgInfo::getExtend(Ty);
9657 class RISCVTargetCodeGenInfo : public TargetCodeGenInfo {
9659 RISCVTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen,
9661 : TargetCodeGenInfo(new RISCVABIInfo(CGT, XLen, FLen)) {}
9663 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
9664 CodeGen::CodeGenModule &CGM) const override {
9665 const auto *FD = dyn_cast_or_null<FunctionDecl>(D);
9668 const auto *Attr = FD->getAttr<RISCVInterruptAttr>();
9673 switch (Attr->getInterrupt()) {
9674 case RISCVInterruptAttr::user: Kind = "user"; break;
9675 case RISCVInterruptAttr::supervisor: Kind = "supervisor"; break;
9676 case RISCVInterruptAttr::machine: Kind = "machine"; break;
9679 auto *Fn = cast<llvm::Function>(GV);
9681 Fn->addFnAttr("interrupt", Kind);
9686 //===----------------------------------------------------------------------===//
9688 //===----------------------------------------------------------------------===//
9690 bool CodeGenModule::supportsCOMDAT() const {
9691 return getTriple().supportsCOMDAT();
9694 const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
9695 if (TheTargetCodeGenInfo)
9696 return *TheTargetCodeGenInfo;
9698 // Helper to set the unique_ptr while still keeping the return value.
9699 auto SetCGInfo = [&](TargetCodeGenInfo *P) -> const TargetCodeGenInfo & {
9700 this->TheTargetCodeGenInfo.reset(P);
9704 const llvm::Triple &Triple = getTarget().getTriple();
9705 switch (Triple.getArch()) {
9707 return SetCGInfo(new DefaultTargetCodeGenInfo(Types));
9709 case llvm::Triple::le32:
9710 return SetCGInfo(new PNaClTargetCodeGenInfo(Types));
9711 case llvm::Triple::mips:
9712 case llvm::Triple::mipsel:
9713 if (Triple.getOS() == llvm::Triple::NaCl)
9714 return SetCGInfo(new PNaClTargetCodeGenInfo(Types));
9715 return SetCGInfo(new MIPSTargetCodeGenInfo(Types, true));
9717 case llvm::Triple::mips64:
9718 case llvm::Triple::mips64el:
9719 return SetCGInfo(new MIPSTargetCodeGenInfo(Types, false));
9721 case llvm::Triple::avr:
9722 return SetCGInfo(new AVRTargetCodeGenInfo(Types));
9724 case llvm::Triple::aarch64:
9725 case llvm::Triple::aarch64_be: {
9726 AArch64ABIInfo::ABIKind Kind = AArch64ABIInfo::AAPCS;
9727 if (getTarget().getABI() == "darwinpcs")
9728 Kind = AArch64ABIInfo::DarwinPCS;
9729 else if (Triple.isOSWindows())
9731 new WindowsAArch64TargetCodeGenInfo(Types, AArch64ABIInfo::Win64));
9733 return SetCGInfo(new AArch64TargetCodeGenInfo(Types, Kind));
9736 case llvm::Triple::wasm32:
9737 case llvm::Triple::wasm64:
9738 return SetCGInfo(new WebAssemblyTargetCodeGenInfo(Types));
9740 case llvm::Triple::arm:
9741 case llvm::Triple::armeb:
9742 case llvm::Triple::thumb:
9743 case llvm::Triple::thumbeb: {
9744 if (Triple.getOS() == llvm::Triple::Win32) {
9746 new WindowsARMTargetCodeGenInfo(Types, ARMABIInfo::AAPCS_VFP));
9749 ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS;
9750 StringRef ABIStr = getTarget().getABI();
9751 if (ABIStr == "apcs-gnu")
9752 Kind = ARMABIInfo::APCS;
9753 else if (ABIStr == "aapcs16")
9754 Kind = ARMABIInfo::AAPCS16_VFP;
9755 else if (CodeGenOpts.FloatABI == "hard" ||
9756 (CodeGenOpts.FloatABI != "soft" &&
9757 (Triple.getEnvironment() == llvm::Triple::GNUEABIHF ||
9758 Triple.getEnvironment() == llvm::Triple::MuslEABIHF ||
9759 Triple.getEnvironment() == llvm::Triple::EABIHF)))
9760 Kind = ARMABIInfo::AAPCS_VFP;
9762 return SetCGInfo(new ARMTargetCodeGenInfo(Types, Kind));
9765 case llvm::Triple::ppc:
9767 new PPC32TargetCodeGenInfo(Types, CodeGenOpts.FloatABI == "soft" ||
9768 getTarget().hasFeature("spe")));
9769 case llvm::Triple::ppc64:
9770 if (Triple.isOSBinFormatELF()) {
9771 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv1;
9772 if (getTarget().getABI() == "elfv2")
9773 Kind = PPC64_SVR4_ABIInfo::ELFv2;
9774 bool HasQPX = getTarget().getABI() == "elfv1-qpx";
9775 bool IsSoftFloat = CodeGenOpts.FloatABI == "soft";
9777 return SetCGInfo(new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX,
9780 return SetCGInfo(new PPC64TargetCodeGenInfo(Types));
9781 case llvm::Triple::ppc64le: {
9782 assert(Triple.isOSBinFormatELF() && "PPC64 LE non-ELF not supported!");
9783 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv2;
9784 if (getTarget().getABI() == "elfv1" || getTarget().getABI() == "elfv1-qpx")
9785 Kind = PPC64_SVR4_ABIInfo::ELFv1;
9786 bool HasQPX = getTarget().getABI() == "elfv1-qpx";
9787 bool IsSoftFloat = CodeGenOpts.FloatABI == "soft";
9789 return SetCGInfo(new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX,
9793 case llvm::Triple::nvptx:
9794 case llvm::Triple::nvptx64:
9795 return SetCGInfo(new NVPTXTargetCodeGenInfo(Types));
9797 case llvm::Triple::msp430:
9798 return SetCGInfo(new MSP430TargetCodeGenInfo(Types));
9800 case llvm::Triple::riscv32:
9801 case llvm::Triple::riscv64: {
9802 StringRef ABIStr = getTarget().getABI();
9803 unsigned XLen = getTarget().getPointerWidth(0);
9804 unsigned ABIFLen = 0;
9805 if (ABIStr.endswith("f"))
9807 else if (ABIStr.endswith("d"))
9809 return SetCGInfo(new RISCVTargetCodeGenInfo(Types, XLen, ABIFLen));
9812 case llvm::Triple::systemz: {
9813 bool HasVector = getTarget().getABI() == "vector";
9814 return SetCGInfo(new SystemZTargetCodeGenInfo(Types, HasVector));
9817 case llvm::Triple::tce:
9818 case llvm::Triple::tcele:
9819 return SetCGInfo(new TCETargetCodeGenInfo(Types));
9821 case llvm::Triple::x86: {
9822 bool IsDarwinVectorABI = Triple.isOSDarwin();
9823 bool RetSmallStructInRegABI =
9824 X86_32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts);
9825 bool IsWin32FloatStructABI = Triple.isOSWindows() && !Triple.isOSCygMing();
9827 if (Triple.getOS() == llvm::Triple::Win32) {
9828 return SetCGInfo(new WinX86_32TargetCodeGenInfo(
9829 Types, IsDarwinVectorABI, RetSmallStructInRegABI,
9830 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters));
9832 return SetCGInfo(new X86_32TargetCodeGenInfo(
9833 Types, IsDarwinVectorABI, RetSmallStructInRegABI,
9834 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters,
9835 CodeGenOpts.FloatABI == "soft"));
9839 case llvm::Triple::x86_64: {
9840 StringRef ABI = getTarget().getABI();
9841 X86AVXABILevel AVXLevel =
9843 ? X86AVXABILevel::AVX512
9844 : ABI == "avx" ? X86AVXABILevel::AVX : X86AVXABILevel::None);
9846 switch (Triple.getOS()) {
9847 case llvm::Triple::Win32:
9848 return SetCGInfo(new WinX86_64TargetCodeGenInfo(Types, AVXLevel));
9850 return SetCGInfo(new X86_64TargetCodeGenInfo(Types, AVXLevel));
9853 case llvm::Triple::hexagon:
9854 return SetCGInfo(new HexagonTargetCodeGenInfo(Types));
9855 case llvm::Triple::lanai:
9856 return SetCGInfo(new LanaiTargetCodeGenInfo(Types));
9857 case llvm::Triple::r600:
9858 return SetCGInfo(new AMDGPUTargetCodeGenInfo(Types));
9859 case llvm::Triple::amdgcn:
9860 return SetCGInfo(new AMDGPUTargetCodeGenInfo(Types));
9861 case llvm::Triple::sparc:
9862 return SetCGInfo(new SparcV8TargetCodeGenInfo(Types));
9863 case llvm::Triple::sparcv9:
9864 return SetCGInfo(new SparcV9TargetCodeGenInfo(Types));
9865 case llvm::Triple::xcore:
9866 return SetCGInfo(new XCoreTargetCodeGenInfo(Types));
9867 case llvm::Triple::arc:
9868 return SetCGInfo(new ARCTargetCodeGenInfo(Types));
9869 case llvm::Triple::spir:
9870 case llvm::Triple::spir64:
9871 return SetCGInfo(new SPIRTargetCodeGenInfo(Types));
9875 /// Create an OpenCL kernel for an enqueued block.
9877 /// The kernel has the same function type as the block invoke function. Its
9878 /// name is the name of the block invoke function postfixed with "_kernel".
9879 /// It simply calls the block invoke function then returns.
9881 TargetCodeGenInfo::createEnqueuedBlockKernel(CodeGenFunction &CGF,
9882 llvm::Function *Invoke,
9883 llvm::Value *BlockLiteral) const {
9884 auto *InvokeFT = Invoke->getFunctionType();
9885 llvm::SmallVector<llvm::Type *, 2> ArgTys;
9886 for (auto &P : InvokeFT->params())
9887 ArgTys.push_back(P);
9888 auto &C = CGF.getLLVMContext();
9889 std::string Name = Invoke->getName().str() + "_kernel";
9890 auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(C), ArgTys, false);
9891 auto *F = llvm::Function::Create(FT, llvm::GlobalValue::InternalLinkage, Name,
9892 &CGF.CGM.getModule());
9893 auto IP = CGF.Builder.saveIP();
9894 auto *BB = llvm::BasicBlock::Create(C, "entry", F);
9895 auto &Builder = CGF.Builder;
9896 Builder.SetInsertPoint(BB);
9897 llvm::SmallVector<llvm::Value *, 2> Args;
9898 for (auto &A : F->args())
9900 Builder.CreateCall(Invoke, Args);
9901 Builder.CreateRetVoid();
9902 Builder.restoreIP(IP);
9906 /// Create an OpenCL kernel for an enqueued block.
9908 /// The type of the first argument (the block literal) is the struct type
9909 /// of the block literal instead of a pointer type. The first argument
9910 /// (block literal) is passed directly by value to the kernel. The kernel
9911 /// allocates the same type of struct on stack and stores the block literal
9912 /// to it and passes its pointer to the block invoke function. The kernel
9913 /// has "enqueued-block" function attribute and kernel argument metadata.
9914 llvm::Function *AMDGPUTargetCodeGenInfo::createEnqueuedBlockKernel(
9915 CodeGenFunction &CGF, llvm::Function *Invoke,
9916 llvm::Value *BlockLiteral) const {
9917 auto &Builder = CGF.Builder;
9918 auto &C = CGF.getLLVMContext();
9920 auto *BlockTy = BlockLiteral->getType()->getPointerElementType();
9921 auto *InvokeFT = Invoke->getFunctionType();
9922 llvm::SmallVector<llvm::Type *, 2> ArgTys;
9923 llvm::SmallVector<llvm::Metadata *, 8> AddressQuals;
9924 llvm::SmallVector<llvm::Metadata *, 8> AccessQuals;
9925 llvm::SmallVector<llvm::Metadata *, 8> ArgTypeNames;
9926 llvm::SmallVector<llvm::Metadata *, 8> ArgBaseTypeNames;
9927 llvm::SmallVector<llvm::Metadata *, 8> ArgTypeQuals;
9928 llvm::SmallVector<llvm::Metadata *, 8> ArgNames;
9930 ArgTys.push_back(BlockTy);
9931 ArgTypeNames.push_back(llvm::MDString::get(C, "__block_literal"));
9932 AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(0)));
9933 ArgBaseTypeNames.push_back(llvm::MDString::get(C, "__block_literal"));
9934 ArgTypeQuals.push_back(llvm::MDString::get(C, ""));
9935 AccessQuals.push_back(llvm::MDString::get(C, "none"));
9936 ArgNames.push_back(llvm::MDString::get(C, "block_literal"));
9937 for (unsigned I = 1, E = InvokeFT->getNumParams(); I < E; ++I) {
9938 ArgTys.push_back(InvokeFT->getParamType(I));
9939 ArgTypeNames.push_back(llvm::MDString::get(C, "void*"));
9940 AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(3)));
9941 AccessQuals.push_back(llvm::MDString::get(C, "none"));
9942 ArgBaseTypeNames.push_back(llvm::MDString::get(C, "void*"));
9943 ArgTypeQuals.push_back(llvm::MDString::get(C, ""));
9945 llvm::MDString::get(C, (Twine("local_arg") + Twine(I)).str()));
9947 std::string Name = Invoke->getName().str() + "_kernel";
9948 auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(C), ArgTys, false);
9949 auto *F = llvm::Function::Create(FT, llvm::GlobalValue::InternalLinkage, Name,
9950 &CGF.CGM.getModule());
9951 F->addFnAttr("enqueued-block");
9952 auto IP = CGF.Builder.saveIP();
9953 auto *BB = llvm::BasicBlock::Create(C, "entry", F);
9954 Builder.SetInsertPoint(BB);
9955 unsigned BlockAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(BlockTy);
9956 auto *BlockPtr = Builder.CreateAlloca(BlockTy, nullptr);
9957 BlockPtr->setAlignment(llvm::MaybeAlign(BlockAlign));
9958 Builder.CreateAlignedStore(F->arg_begin(), BlockPtr, BlockAlign);
9959 auto *Cast = Builder.CreatePointerCast(BlockPtr, InvokeFT->getParamType(0));
9960 llvm::SmallVector<llvm::Value *, 2> Args;
9961 Args.push_back(Cast);
9962 for (auto I = F->arg_begin() + 1, E = F->arg_end(); I != E; ++I)
9964 Builder.CreateCall(Invoke, Args);
9965 Builder.CreateRetVoid();
9966 Builder.restoreIP(IP);
9968 F->setMetadata("kernel_arg_addr_space", llvm::MDNode::get(C, AddressQuals));
9969 F->setMetadata("kernel_arg_access_qual", llvm::MDNode::get(C, AccessQuals));
9970 F->setMetadata("kernel_arg_type", llvm::MDNode::get(C, ArgTypeNames));
9971 F->setMetadata("kernel_arg_base_type",
9972 llvm::MDNode::get(C, ArgBaseTypeNames));
9973 F->setMetadata("kernel_arg_type_qual", llvm::MDNode::get(C, ArgTypeQuals));
9974 if (CGF.CGM.getCodeGenOpts().EmitOpenCLArgMetadata)
9975 F->setMetadata("kernel_arg_name", llvm::MDNode::get(C, ArgNames));