1 //===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // These classes wrap the information about a call or function
10 // definition used to handle ABI compliancy.
12 //===----------------------------------------------------------------------===//
14 #include "TargetInfo.h"
19 #include "CodeGenFunction.h"
20 #include "clang/AST/RecordLayout.h"
21 #include "clang/Basic/CodeGenOptions.h"
22 #include "clang/CodeGen/CGFunctionInfo.h"
23 #include "clang/CodeGen/SwiftCallingConv.h"
24 #include "llvm/ADT/StringExtras.h"
25 #include "llvm/ADT/StringSwitch.h"
26 #include "llvm/ADT/Triple.h"
27 #include "llvm/ADT/Twine.h"
28 #include "llvm/IR/DataLayout.h"
29 #include "llvm/IR/Type.h"
30 #include "llvm/Support/raw_ostream.h"
31 #include <algorithm> // std::sort
33 using namespace clang;
34 using namespace CodeGen;
36 // Helper for coercing an aggregate argument or return value into an integer
37 // array of the same size (including padding) and alignment. This alternate
38 // coercion happens only for the RenderScript ABI and can be removed after
39 // runtimes that rely on it are no longer supported.
41 // RenderScript assumes that the size of the argument / return value in the IR
42 // is the same as the size of the corresponding qualified type. This helper
43 // coerces the aggregate type into an array of the same size (including
44 // padding). This coercion is used in lieu of expansion of struct members or
45 // other canonical coercions that return a coerced-type of larger size.
47 // Ty - The argument / return value type
48 // Context - The associated ASTContext
49 // LLVMContext - The associated LLVMContext
50 static ABIArgInfo coerceToIntArray(QualType Ty,
52 llvm::LLVMContext &LLVMContext) {
53 // Alignment and Size are measured in bits.
54 const uint64_t Size = Context.getTypeSize(Ty);
55 const uint64_t Alignment = Context.getTypeAlign(Ty);
56 llvm::Type *IntType = llvm::Type::getIntNTy(LLVMContext, Alignment);
57 const uint64_t NumElements = (Size + Alignment - 1) / Alignment;
58 return ABIArgInfo::getDirect(llvm::ArrayType::get(IntType, NumElements));
61 static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder,
66 // Alternatively, we could emit this as a loop in the source.
67 for (unsigned I = FirstIndex; I <= LastIndex; ++I) {
69 Builder.CreateConstInBoundsGEP1_32(Builder.getInt8Ty(), Array, I);
70 Builder.CreateAlignedStore(Value, Cell, CharUnits::One());
74 static bool isAggregateTypeForABI(QualType T) {
75 return !CodeGenFunction::hasScalarEvaluationKind(T) ||
76 T->isMemberFunctionPointerType();
80 ABIInfo::getNaturalAlignIndirect(QualType Ty, bool ByRef, bool Realign,
81 llvm::Type *Padding) const {
82 return ABIArgInfo::getIndirect(getContext().getTypeAlignInChars(Ty),
83 ByRef, Realign, Padding);
87 ABIInfo::getNaturalAlignIndirectInReg(QualType Ty, bool Realign) const {
88 return ABIArgInfo::getIndirectInReg(getContext().getTypeAlignInChars(Ty),
89 /*ByRef*/ false, Realign);
92 Address ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
94 return Address::invalid();
97 ABIInfo::~ABIInfo() {}
99 /// Does the given lowering require more than the given number of
100 /// registers when expanded?
102 /// This is intended to be the basis of a reasonable basic implementation
103 /// of should{Pass,Return}IndirectlyForSwift.
105 /// For most targets, a limit of four total registers is reasonable; this
106 /// limits the amount of code required in order to move around the value
107 /// in case it wasn't produced immediately prior to the call by the caller
108 /// (or wasn't produced in exactly the right registers) or isn't used
109 /// immediately within the callee. But some targets may need to further
110 /// limit the register count due to an inability to support that many
111 /// return registers.
112 static bool occupiesMoreThan(CodeGenTypes &cgt,
113 ArrayRef<llvm::Type*> scalarTypes,
114 unsigned maxAllRegisters) {
115 unsigned intCount = 0, fpCount = 0;
116 for (llvm::Type *type : scalarTypes) {
117 if (type->isPointerTy()) {
119 } else if (auto intTy = dyn_cast<llvm::IntegerType>(type)) {
120 auto ptrWidth = cgt.getTarget().getPointerWidth(0);
121 intCount += (intTy->getBitWidth() + ptrWidth - 1) / ptrWidth;
123 assert(type->isVectorTy() || type->isFloatingPointTy());
128 return (intCount + fpCount > maxAllRegisters);
131 bool SwiftABIInfo::isLegalVectorTypeForSwift(CharUnits vectorSize,
133 unsigned numElts) const {
134 // The default implementation of this assumes that the target guarantees
135 // 128-bit SIMD support but nothing more.
136 return (vectorSize.getQuantity() > 8 && vectorSize.getQuantity() <= 16);
139 static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT,
141 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
143 if (!RT->getDecl()->canPassInRegisters())
144 return CGCXXABI::RAA_Indirect;
145 return CGCXXABI::RAA_Default;
147 return CXXABI.getRecordArgABI(RD);
150 static CGCXXABI::RecordArgABI getRecordArgABI(QualType T,
152 const RecordType *RT = T->getAs<RecordType>();
154 return CGCXXABI::RAA_Default;
155 return getRecordArgABI(RT, CXXABI);
158 static bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI,
159 const ABIInfo &Info) {
160 QualType Ty = FI.getReturnType();
162 if (const auto *RT = Ty->getAs<RecordType>())
163 if (!isa<CXXRecordDecl>(RT->getDecl()) &&
164 !RT->getDecl()->canPassInRegisters()) {
165 FI.getReturnInfo() = Info.getNaturalAlignIndirect(Ty);
169 return CXXABI.classifyReturnType(FI);
172 /// Pass transparent unions as if they were the type of the first element. Sema
173 /// should ensure that all elements of the union have the same "machine type".
174 static QualType useFirstFieldIfTransparentUnion(QualType Ty) {
175 if (const RecordType *UT = Ty->getAsUnionType()) {
176 const RecordDecl *UD = UT->getDecl();
177 if (UD->hasAttr<TransparentUnionAttr>()) {
178 assert(!UD->field_empty() && "sema created an empty transparent union");
179 return UD->field_begin()->getType();
185 CGCXXABI &ABIInfo::getCXXABI() const {
186 return CGT.getCXXABI();
189 ASTContext &ABIInfo::getContext() const {
190 return CGT.getContext();
193 llvm::LLVMContext &ABIInfo::getVMContext() const {
194 return CGT.getLLVMContext();
197 const llvm::DataLayout &ABIInfo::getDataLayout() const {
198 return CGT.getDataLayout();
201 const TargetInfo &ABIInfo::getTarget() const {
202 return CGT.getTarget();
205 const CodeGenOptions &ABIInfo::getCodeGenOpts() const {
206 return CGT.getCodeGenOpts();
209 bool ABIInfo::isAndroid() const { return getTarget().getTriple().isAndroid(); }
211 bool ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
215 bool ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
216 uint64_t Members) const {
220 LLVM_DUMP_METHOD void ABIArgInfo::dump() const {
221 raw_ostream &OS = llvm::errs();
222 OS << "(ABIArgInfo Kind=";
225 OS << "Direct Type=";
226 if (llvm::Type *Ty = getCoerceToType())
238 OS << "InAlloca Offset=" << getInAllocaFieldIndex();
241 OS << "Indirect Align=" << getIndirectAlign().getQuantity()
242 << " ByVal=" << getIndirectByVal()
243 << " Realign=" << getIndirectRealign();
248 case CoerceAndExpand:
249 OS << "CoerceAndExpand Type=";
250 getCoerceAndExpandType()->print(OS);
256 // Dynamically round a pointer up to a multiple of the given alignment.
257 static llvm::Value *emitRoundPointerUpToAlignment(CodeGenFunction &CGF,
260 llvm::Value *PtrAsInt = Ptr;
261 // OverflowArgArea = (OverflowArgArea + Align - 1) & -Align;
262 PtrAsInt = CGF.Builder.CreatePtrToInt(PtrAsInt, CGF.IntPtrTy);
263 PtrAsInt = CGF.Builder.CreateAdd(PtrAsInt,
264 llvm::ConstantInt::get(CGF.IntPtrTy, Align.getQuantity() - 1));
265 PtrAsInt = CGF.Builder.CreateAnd(PtrAsInt,
266 llvm::ConstantInt::get(CGF.IntPtrTy, -Align.getQuantity()));
267 PtrAsInt = CGF.Builder.CreateIntToPtr(PtrAsInt,
269 Ptr->getName() + ".aligned");
273 /// Emit va_arg for a platform using the common void* representation,
274 /// where arguments are simply emitted in an array of slots on the stack.
276 /// This version implements the core direct-value passing rules.
278 /// \param SlotSize - The size and alignment of a stack slot.
279 /// Each argument will be allocated to a multiple of this number of
280 /// slots, and all the slots will be aligned to this value.
281 /// \param AllowHigherAlign - The slot alignment is not a cap;
282 /// an argument type with an alignment greater than the slot size
283 /// will be emitted on a higher-alignment address, potentially
284 /// leaving one or more empty slots behind as padding. If this
285 /// is false, the returned address might be less-aligned than
287 static Address emitVoidPtrDirectVAArg(CodeGenFunction &CGF,
289 llvm::Type *DirectTy,
290 CharUnits DirectSize,
291 CharUnits DirectAlign,
293 bool AllowHigherAlign) {
294 // Cast the element type to i8* if necessary. Some platforms define
295 // va_list as a struct containing an i8* instead of just an i8*.
296 if (VAListAddr.getElementType() != CGF.Int8PtrTy)
297 VAListAddr = CGF.Builder.CreateElementBitCast(VAListAddr, CGF.Int8PtrTy);
299 llvm::Value *Ptr = CGF.Builder.CreateLoad(VAListAddr, "argp.cur");
301 // If the CC aligns values higher than the slot size, do so if needed.
302 Address Addr = Address::invalid();
303 if (AllowHigherAlign && DirectAlign > SlotSize) {
304 Addr = Address(emitRoundPointerUpToAlignment(CGF, Ptr, DirectAlign),
307 Addr = Address(Ptr, SlotSize);
310 // Advance the pointer past the argument, then store that back.
311 CharUnits FullDirectSize = DirectSize.alignTo(SlotSize);
313 CGF.Builder.CreateConstInBoundsByteGEP(Addr, FullDirectSize, "argp.next");
314 CGF.Builder.CreateStore(NextPtr.getPointer(), VAListAddr);
316 // If the argument is smaller than a slot, and this is a big-endian
317 // target, the argument will be right-adjusted in its slot.
318 if (DirectSize < SlotSize && CGF.CGM.getDataLayout().isBigEndian() &&
319 !DirectTy->isStructTy()) {
320 Addr = CGF.Builder.CreateConstInBoundsByteGEP(Addr, SlotSize - DirectSize);
323 Addr = CGF.Builder.CreateElementBitCast(Addr, DirectTy);
327 /// Emit va_arg for a platform using the common void* representation,
328 /// where arguments are simply emitted in an array of slots on the stack.
330 /// \param IsIndirect - Values of this type are passed indirectly.
331 /// \param ValueInfo - The size and alignment of this type, generally
332 /// computed with getContext().getTypeInfoInChars(ValueTy).
333 /// \param SlotSizeAndAlign - The size and alignment of a stack slot.
334 /// Each argument will be allocated to a multiple of this number of
335 /// slots, and all the slots will be aligned to this value.
336 /// \param AllowHigherAlign - The slot alignment is not a cap;
337 /// an argument type with an alignment greater than the slot size
338 /// will be emitted on a higher-alignment address, potentially
339 /// leaving one or more empty slots behind as padding.
340 static Address emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr,
341 QualType ValueTy, bool IsIndirect,
342 std::pair<CharUnits, CharUnits> ValueInfo,
343 CharUnits SlotSizeAndAlign,
344 bool AllowHigherAlign) {
345 // The size and alignment of the value that was passed directly.
346 CharUnits DirectSize, DirectAlign;
348 DirectSize = CGF.getPointerSize();
349 DirectAlign = CGF.getPointerAlign();
351 DirectSize = ValueInfo.first;
352 DirectAlign = ValueInfo.second;
355 // Cast the address we've calculated to the right type.
356 llvm::Type *DirectTy = CGF.ConvertTypeForMem(ValueTy);
358 DirectTy = DirectTy->getPointerTo(0);
360 Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, DirectTy,
361 DirectSize, DirectAlign,
366 Addr = Address(CGF.Builder.CreateLoad(Addr), ValueInfo.second);
373 static Address emitMergePHI(CodeGenFunction &CGF,
374 Address Addr1, llvm::BasicBlock *Block1,
375 Address Addr2, llvm::BasicBlock *Block2,
376 const llvm::Twine &Name = "") {
377 assert(Addr1.getType() == Addr2.getType());
378 llvm::PHINode *PHI = CGF.Builder.CreatePHI(Addr1.getType(), 2, Name);
379 PHI->addIncoming(Addr1.getPointer(), Block1);
380 PHI->addIncoming(Addr2.getPointer(), Block2);
381 CharUnits Align = std::min(Addr1.getAlignment(), Addr2.getAlignment());
382 return Address(PHI, Align);
385 TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; }
387 // If someone can figure out a general rule for this, that would be great.
388 // It's probably just doomed to be platform-dependent, though.
389 unsigned TargetCodeGenInfo::getSizeOfUnwindException() const {
391 // x86-64 FreeBSD, Linux, Darwin
392 // x86-32 FreeBSD, Linux, Darwin
393 // PowerPC Linux, Darwin
394 // ARM Darwin (*not* EABI)
399 bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args,
400 const FunctionNoProtoType *fnType) const {
401 // The following conventions are known to require this to be false:
404 // For everything else, we just prefer false unless we opt out.
409 TargetCodeGenInfo::getDependentLibraryOption(llvm::StringRef Lib,
410 llvm::SmallString<24> &Opt) const {
411 // This assumes the user is passing a library name like "rt" instead of a
412 // filename like "librt.a/so", and that they don't care whether it's static or
418 unsigned TargetCodeGenInfo::getOpenCLKernelCallingConv() const {
419 // OpenCL kernels are called via an explicit runtime API with arguments
420 // set with clSetKernelArg(), not as normal sub-functions.
421 // Return SPIR_KERNEL by default as the kernel calling convention to
422 // ensure the fingerprint is fixed such way that each OpenCL argument
423 // gets one matching argument in the produced kernel function argument
424 // list to enable feasible implementation of clSetKernelArg() with
425 // aggregates etc. In case we would use the default C calling conv here,
426 // clSetKernelArg() might break depending on the target-specific
427 // conventions; different targets might split structs passed as values
428 // to multiple function arguments etc.
429 return llvm::CallingConv::SPIR_KERNEL;
432 llvm::Constant *TargetCodeGenInfo::getNullPointer(const CodeGen::CodeGenModule &CGM,
433 llvm::PointerType *T, QualType QT) const {
434 return llvm::ConstantPointerNull::get(T);
437 LangAS TargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM,
438 const VarDecl *D) const {
439 assert(!CGM.getLangOpts().OpenCL &&
440 !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) &&
441 "Address space agnostic languages only");
442 return D ? D->getType().getAddressSpace() : LangAS::Default;
445 llvm::Value *TargetCodeGenInfo::performAddrSpaceCast(
446 CodeGen::CodeGenFunction &CGF, llvm::Value *Src, LangAS SrcAddr,
447 LangAS DestAddr, llvm::Type *DestTy, bool isNonNull) const {
448 // Since target may map different address spaces in AST to the same address
449 // space, an address space conversion may end up as a bitcast.
450 if (auto *C = dyn_cast<llvm::Constant>(Src))
451 return performAddrSpaceCast(CGF.CGM, C, SrcAddr, DestAddr, DestTy);
452 // Try to preserve the source's name to make IR more readable.
453 return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
454 Src, DestTy, Src->hasName() ? Src->getName() + ".ascast" : "");
458 TargetCodeGenInfo::performAddrSpaceCast(CodeGenModule &CGM, llvm::Constant *Src,
459 LangAS SrcAddr, LangAS DestAddr,
460 llvm::Type *DestTy) const {
461 // Since target may map different address spaces in AST to the same address
462 // space, an address space conversion may end up as a bitcast.
463 return llvm::ConstantExpr::getPointerCast(Src, DestTy);
467 TargetCodeGenInfo::getLLVMSyncScopeID(const LangOptions &LangOpts,
469 llvm::AtomicOrdering Ordering,
470 llvm::LLVMContext &Ctx) const {
471 return Ctx.getOrInsertSyncScopeID(""); /* default sync scope */
474 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays);
476 /// isEmptyField - Return true iff a the field is "empty", that is it
477 /// is an unnamed bit-field or an (array of) empty record(s).
478 static bool isEmptyField(ASTContext &Context, const FieldDecl *FD,
480 if (FD->isUnnamedBitfield())
483 QualType FT = FD->getType();
485 // Constant arrays of empty records count as empty, strip them off.
486 // Constant arrays of zero length always count as empty.
488 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
489 if (AT->getSize() == 0)
491 FT = AT->getElementType();
494 const RecordType *RT = FT->getAs<RecordType>();
498 // C++ record fields are never empty, at least in the Itanium ABI.
500 // FIXME: We should use a predicate for whether this behavior is true in the
502 if (isa<CXXRecordDecl>(RT->getDecl()))
505 return isEmptyRecord(Context, FT, AllowArrays);
508 /// isEmptyRecord - Return true iff a structure contains only empty
509 /// fields. Note that a structure with a flexible array member is not
510 /// considered empty.
511 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) {
512 const RecordType *RT = T->getAs<RecordType>();
515 const RecordDecl *RD = RT->getDecl();
516 if (RD->hasFlexibleArrayMember())
519 // If this is a C++ record, check the bases first.
520 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
521 for (const auto &I : CXXRD->bases())
522 if (!isEmptyRecord(Context, I.getType(), true))
525 for (const auto *I : RD->fields())
526 if (!isEmptyField(Context, I, AllowArrays))
531 /// isSingleElementStruct - Determine if a structure is a "single
532 /// element struct", i.e. it has exactly one non-empty field or
533 /// exactly one field which is itself a single element
534 /// struct. Structures with flexible array members are never
535 /// considered single element structs.
537 /// \return The field declaration for the single non-empty field, if
539 static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
540 const RecordType *RT = T->getAs<RecordType>();
544 const RecordDecl *RD = RT->getDecl();
545 if (RD->hasFlexibleArrayMember())
548 const Type *Found = nullptr;
550 // If this is a C++ record, check the bases first.
551 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
552 for (const auto &I : CXXRD->bases()) {
553 // Ignore empty records.
554 if (isEmptyRecord(Context, I.getType(), true))
557 // If we already found an element then this isn't a single-element struct.
561 // If this is non-empty and not a single element struct, the composite
562 // cannot be a single element struct.
563 Found = isSingleElementStruct(I.getType(), Context);
569 // Check for single element.
570 for (const auto *FD : RD->fields()) {
571 QualType FT = FD->getType();
573 // Ignore empty fields.
574 if (isEmptyField(Context, FD, true))
577 // If we already found an element then this isn't a single-element
582 // Treat single element arrays as the element.
583 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
584 if (AT->getSize().getZExtValue() != 1)
586 FT = AT->getElementType();
589 if (!isAggregateTypeForABI(FT)) {
590 Found = FT.getTypePtr();
592 Found = isSingleElementStruct(FT, Context);
598 // We don't consider a struct a single-element struct if it has
599 // padding beyond the element type.
600 if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T))
607 Address EmitVAArgInstr(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
608 const ABIArgInfo &AI) {
609 // This default implementation defers to the llvm backend's va_arg
610 // instruction. It can handle only passing arguments directly
611 // (typically only handled in the backend for primitive types), or
612 // aggregates passed indirectly by pointer (NOTE: if the "byval"
613 // flag has ABI impact in the callee, this implementation cannot
616 // Only a few cases are covered here at the moment -- those needed
617 // by the default abi.
620 if (AI.isIndirect()) {
621 assert(!AI.getPaddingType() &&
622 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!");
624 !AI.getIndirectRealign() &&
625 "Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!");
627 auto TyInfo = CGF.getContext().getTypeInfoInChars(Ty);
628 CharUnits TyAlignForABI = TyInfo.second;
631 llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty));
633 CGF.Builder.CreateVAArg(VAListAddr.getPointer(), BaseTy);
634 return Address(Addr, TyAlignForABI);
636 assert((AI.isDirect() || AI.isExtend()) &&
637 "Unexpected ArgInfo Kind in generic VAArg emitter!");
639 assert(!AI.getInReg() &&
640 "Unexpected InReg seen in arginfo in generic VAArg emitter!");
641 assert(!AI.getPaddingType() &&
642 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!");
643 assert(!AI.getDirectOffset() &&
644 "Unexpected DirectOffset seen in arginfo in generic VAArg emitter!");
645 assert(!AI.getCoerceToType() &&
646 "Unexpected CoerceToType seen in arginfo in generic VAArg emitter!");
648 Address Temp = CGF.CreateMemTemp(Ty, "varet");
649 Val = CGF.Builder.CreateVAArg(VAListAddr.getPointer(), CGF.ConvertType(Ty));
650 CGF.Builder.CreateStore(Val, Temp);
655 /// DefaultABIInfo - The default implementation for ABI specific
656 /// details. This implementation provides information which results in
657 /// self-consistent and sensible LLVM IR generation, but does not
658 /// conform to any particular ABI.
659 class DefaultABIInfo : public ABIInfo {
661 DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
663 ABIArgInfo classifyReturnType(QualType RetTy) const;
664 ABIArgInfo classifyArgumentType(QualType RetTy) const;
666 void computeInfo(CGFunctionInfo &FI) const override {
667 if (!getCXXABI().classifyReturnType(FI))
668 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
669 for (auto &I : FI.arguments())
670 I.info = classifyArgumentType(I.type);
673 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
674 QualType Ty) const override {
675 return EmitVAArgInstr(CGF, VAListAddr, Ty, classifyArgumentType(Ty));
679 class DefaultTargetCodeGenInfo : public TargetCodeGenInfo {
681 DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
682 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
685 ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
686 Ty = useFirstFieldIfTransparentUnion(Ty);
688 if (isAggregateTypeForABI(Ty)) {
689 // Records with non-trivial destructors/copy-constructors should not be
691 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
692 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
694 return getNaturalAlignIndirect(Ty);
697 // Treat an enum type as its underlying type.
698 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
699 Ty = EnumTy->getDecl()->getIntegerType();
701 return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty)
702 : ABIArgInfo::getDirect());
705 ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
706 if (RetTy->isVoidType())
707 return ABIArgInfo::getIgnore();
709 if (isAggregateTypeForABI(RetTy))
710 return getNaturalAlignIndirect(RetTy);
712 // Treat an enum type as its underlying type.
713 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
714 RetTy = EnumTy->getDecl()->getIntegerType();
716 return (RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend(RetTy)
717 : ABIArgInfo::getDirect());
720 //===----------------------------------------------------------------------===//
721 // WebAssembly ABI Implementation
723 // This is a very simple ABI that relies a lot on DefaultABIInfo.
724 //===----------------------------------------------------------------------===//
726 class WebAssemblyABIInfo final : public SwiftABIInfo {
727 DefaultABIInfo defaultInfo;
730 explicit WebAssemblyABIInfo(CodeGen::CodeGenTypes &CGT)
731 : SwiftABIInfo(CGT), defaultInfo(CGT) {}
734 ABIArgInfo classifyReturnType(QualType RetTy) const;
735 ABIArgInfo classifyArgumentType(QualType Ty) const;
737 // DefaultABIInfo's classifyReturnType and classifyArgumentType are
738 // non-virtual, but computeInfo and EmitVAArg are virtual, so we
740 void computeInfo(CGFunctionInfo &FI) const override {
741 if (!getCXXABI().classifyReturnType(FI))
742 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
743 for (auto &Arg : FI.arguments())
744 Arg.info = classifyArgumentType(Arg.type);
747 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
748 QualType Ty) const override;
750 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
751 bool asReturnValue) const override {
752 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
755 bool isSwiftErrorInRegister() const override {
760 class WebAssemblyTargetCodeGenInfo final : public TargetCodeGenInfo {
762 explicit WebAssemblyTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
763 : TargetCodeGenInfo(new WebAssemblyABIInfo(CGT)) {}
765 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
766 CodeGen::CodeGenModule &CGM) const override {
767 TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
768 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D)) {
769 if (const auto *Attr = FD->getAttr<WebAssemblyImportModuleAttr>()) {
770 llvm::Function *Fn = cast<llvm::Function>(GV);
772 B.addAttribute("wasm-import-module", Attr->getImportModule());
773 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
775 if (const auto *Attr = FD->getAttr<WebAssemblyImportNameAttr>()) {
776 llvm::Function *Fn = cast<llvm::Function>(GV);
778 B.addAttribute("wasm-import-name", Attr->getImportName());
779 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
783 if (auto *FD = dyn_cast_or_null<FunctionDecl>(D)) {
784 llvm::Function *Fn = cast<llvm::Function>(GV);
785 if (!FD->doesThisDeclarationHaveABody() && !FD->hasPrototype())
786 Fn->addFnAttr("no-prototype");
791 /// Classify argument of given type \p Ty.
792 ABIArgInfo WebAssemblyABIInfo::classifyArgumentType(QualType Ty) const {
793 Ty = useFirstFieldIfTransparentUnion(Ty);
795 if (isAggregateTypeForABI(Ty)) {
796 // Records with non-trivial destructors/copy-constructors should not be
798 if (auto RAA = getRecordArgABI(Ty, getCXXABI()))
799 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
800 // Ignore empty structs/unions.
801 if (isEmptyRecord(getContext(), Ty, true))
802 return ABIArgInfo::getIgnore();
803 // Lower single-element structs to just pass a regular value. TODO: We
804 // could do reasonable-size multiple-element structs too, using getExpand(),
805 // though watch out for things like bitfields.
806 if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
807 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
810 // Otherwise just do the default thing.
811 return defaultInfo.classifyArgumentType(Ty);
814 ABIArgInfo WebAssemblyABIInfo::classifyReturnType(QualType RetTy) const {
815 if (isAggregateTypeForABI(RetTy)) {
816 // Records with non-trivial destructors/copy-constructors should not be
817 // returned by value.
818 if (!getRecordArgABI(RetTy, getCXXABI())) {
819 // Ignore empty structs/unions.
820 if (isEmptyRecord(getContext(), RetTy, true))
821 return ABIArgInfo::getIgnore();
822 // Lower single-element structs to just return a regular value. TODO: We
823 // could do reasonable-size multiple-element structs too, using
824 // ABIArgInfo::getDirect().
825 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
826 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
830 // Otherwise just do the default thing.
831 return defaultInfo.classifyReturnType(RetTy);
834 Address WebAssemblyABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
836 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*IsIndirect=*/ false,
837 getContext().getTypeInfoInChars(Ty),
838 CharUnits::fromQuantity(4),
839 /*AllowHigherAlign=*/ true);
842 //===----------------------------------------------------------------------===//
843 // le32/PNaCl bitcode ABI Implementation
845 // This is a simplified version of the x86_32 ABI. Arguments and return values
846 // are always passed on the stack.
847 //===----------------------------------------------------------------------===//
849 class PNaClABIInfo : public ABIInfo {
851 PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
853 ABIArgInfo classifyReturnType(QualType RetTy) const;
854 ABIArgInfo classifyArgumentType(QualType RetTy) const;
856 void computeInfo(CGFunctionInfo &FI) const override;
857 Address EmitVAArg(CodeGenFunction &CGF,
858 Address VAListAddr, QualType Ty) const override;
861 class PNaClTargetCodeGenInfo : public TargetCodeGenInfo {
863 PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
864 : TargetCodeGenInfo(new PNaClABIInfo(CGT)) {}
867 void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const {
868 if (!getCXXABI().classifyReturnType(FI))
869 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
871 for (auto &I : FI.arguments())
872 I.info = classifyArgumentType(I.type);
875 Address PNaClABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
877 // The PNaCL ABI is a bit odd, in that varargs don't use normal
878 // function classification. Structs get passed directly for varargs
879 // functions, through a rewriting transform in
880 // pnacl-llvm/lib/Transforms/NaCl/ExpandVarArgs.cpp, which allows
881 // this target to actually support a va_arg instructions with an
882 // aggregate type, unlike other targets.
883 return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect());
886 /// Classify argument of given type \p Ty.
887 ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty) const {
888 if (isAggregateTypeForABI(Ty)) {
889 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
890 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
891 return getNaturalAlignIndirect(Ty);
892 } else if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
893 // Treat an enum type as its underlying type.
894 Ty = EnumTy->getDecl()->getIntegerType();
895 } else if (Ty->isFloatingType()) {
896 // Floating-point types don't go inreg.
897 return ABIArgInfo::getDirect();
900 return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty)
901 : ABIArgInfo::getDirect());
904 ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const {
905 if (RetTy->isVoidType())
906 return ABIArgInfo::getIgnore();
908 // In the PNaCl ABI we always return records/structures on the stack.
909 if (isAggregateTypeForABI(RetTy))
910 return getNaturalAlignIndirect(RetTy);
912 // Treat an enum type as its underlying type.
913 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
914 RetTy = EnumTy->getDecl()->getIntegerType();
916 return (RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend(RetTy)
917 : ABIArgInfo::getDirect());
920 /// IsX86_MMXType - Return true if this is an MMX type.
921 bool IsX86_MMXType(llvm::Type *IRType) {
922 // Return true if the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>.
923 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
924 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
925 IRType->getScalarSizeInBits() != 64;
928 static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
929 StringRef Constraint,
931 bool IsMMXCons = llvm::StringSwitch<bool>(Constraint)
932 .Cases("y", "&y", "^Ym", true)
934 if (IsMMXCons && Ty->isVectorTy()) {
935 if (cast<llvm::VectorType>(Ty)->getBitWidth() != 64) {
936 // Invalid MMX constraint
940 return llvm::Type::getX86_MMXTy(CGF.getLLVMContext());
943 // No operation needed
947 /// Returns true if this type can be passed in SSE registers with the
948 /// X86_VectorCall calling convention. Shared between x86_32 and x86_64.
949 static bool isX86VectorTypeForVectorCall(ASTContext &Context, QualType Ty) {
950 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
951 if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half) {
952 if (BT->getKind() == BuiltinType::LongDouble) {
953 if (&Context.getTargetInfo().getLongDoubleFormat() ==
954 &llvm::APFloat::x87DoubleExtended())
959 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
960 // vectorcall can pass XMM, YMM, and ZMM vectors. We don't pass SSE1 MMX
961 // registers specially.
962 unsigned VecSize = Context.getTypeSize(VT);
963 if (VecSize == 128 || VecSize == 256 || VecSize == 512)
969 /// Returns true if this aggregate is small enough to be passed in SSE registers
970 /// in the X86_VectorCall calling convention. Shared between x86_32 and x86_64.
971 static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) {
972 return NumMembers <= 4;
975 /// Returns a Homogeneous Vector Aggregate ABIArgInfo, used in X86.
976 static ABIArgInfo getDirectX86Hva(llvm::Type* T = nullptr) {
977 auto AI = ABIArgInfo::getDirect(T);
979 AI.setCanBeFlattened(false);
983 //===----------------------------------------------------------------------===//
984 // X86-32 ABI Implementation
985 //===----------------------------------------------------------------------===//
987 /// Similar to llvm::CCState, but for Clang.
989 CCState(unsigned CC) : CC(CC), FreeRegs(0), FreeSSERegs(0) {}
993 unsigned FreeSSERegs;
997 // Vectorcall only allows the first 6 parameters to be passed in registers.
998 VectorcallMaxParamNumAsReg = 6
1001 /// X86_32ABIInfo - The X86-32 ABI information.
1002 class X86_32ABIInfo : public SwiftABIInfo {
1008 static const unsigned MinABIStackAlignInBytes = 4;
1010 bool IsDarwinVectorABI;
1011 bool IsRetSmallStructInRegABI;
1012 bool IsWin32StructABI;
1013 bool IsSoftFloatABI;
1015 unsigned DefaultNumRegisterParameters;
1017 static bool isRegisterSize(unsigned Size) {
1018 return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
1021 bool isHomogeneousAggregateBaseType(QualType Ty) const override {
1022 // FIXME: Assumes vectorcall is in use.
1023 return isX86VectorTypeForVectorCall(getContext(), Ty);
1026 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
1027 uint64_t NumMembers) const override {
1028 // FIXME: Assumes vectorcall is in use.
1029 return isX86VectorCallAggregateSmallEnough(NumMembers);
1032 bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context) const;
1034 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
1035 /// such that the argument will be passed in memory.
1036 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const;
1038 ABIArgInfo getIndirectReturnResult(QualType Ty, CCState &State) const;
1040 /// Return the alignment to use for the given type on the stack.
1041 unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const;
1043 Class classify(QualType Ty) const;
1044 ABIArgInfo classifyReturnType(QualType RetTy, CCState &State) const;
1045 ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const;
1047 /// Updates the number of available free registers, returns
1048 /// true if any registers were allocated.
1049 bool updateFreeRegs(QualType Ty, CCState &State) const;
1051 bool shouldAggregateUseDirect(QualType Ty, CCState &State, bool &InReg,
1052 bool &NeedsPadding) const;
1053 bool shouldPrimitiveUseInReg(QualType Ty, CCState &State) const;
1055 bool canExpandIndirectArgument(QualType Ty) const;
1057 /// Rewrite the function info so that all memory arguments use
1059 void rewriteWithInAlloca(CGFunctionInfo &FI) const;
1061 void addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
1062 CharUnits &StackOffset, ABIArgInfo &Info,
1063 QualType Type) const;
1064 void computeVectorCallArgs(CGFunctionInfo &FI, CCState &State,
1065 bool &UsedInAlloca) const;
1069 void computeInfo(CGFunctionInfo &FI) const override;
1070 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
1071 QualType Ty) const override;
1073 X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI,
1074 bool RetSmallStructInRegABI, bool Win32StructABI,
1075 unsigned NumRegisterParameters, bool SoftFloatABI)
1076 : SwiftABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI),
1077 IsRetSmallStructInRegABI(RetSmallStructInRegABI),
1078 IsWin32StructABI(Win32StructABI),
1079 IsSoftFloatABI(SoftFloatABI),
1080 IsMCUABI(CGT.getTarget().getTriple().isOSIAMCU()),
1081 DefaultNumRegisterParameters(NumRegisterParameters) {}
1083 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
1084 bool asReturnValue) const override {
1085 // LLVM's x86-32 lowering currently only assigns up to three
1086 // integer registers and three fp registers. Oddly, it'll use up to
1087 // four vector registers for vectors, but those can overlap with the
1088 // scalar registers.
1089 return occupiesMoreThan(CGT, scalars, /*total*/ 3);
1092 bool isSwiftErrorInRegister() const override {
1093 // x86-32 lowering does not support passing swifterror in a register.
1098 class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
1100 X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI,
1101 bool RetSmallStructInRegABI, bool Win32StructABI,
1102 unsigned NumRegisterParameters, bool SoftFloatABI)
1103 : TargetCodeGenInfo(new X86_32ABIInfo(
1104 CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
1105 NumRegisterParameters, SoftFloatABI)) {}
1107 static bool isStructReturnInRegABI(
1108 const llvm::Triple &Triple, const CodeGenOptions &Opts);
1110 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
1111 CodeGen::CodeGenModule &CGM) const override;
1113 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
1114 // Darwin uses different dwarf register numbers for EH.
1115 if (CGM.getTarget().getTriple().isOSDarwin()) return 5;
1119 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
1120 llvm::Value *Address) const override;
1122 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
1123 StringRef Constraint,
1124 llvm::Type* Ty) const override {
1125 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
1128 void addReturnRegisterOutputs(CodeGenFunction &CGF, LValue ReturnValue,
1129 std::string &Constraints,
1130 std::vector<llvm::Type *> &ResultRegTypes,
1131 std::vector<llvm::Type *> &ResultTruncRegTypes,
1132 std::vector<LValue> &ResultRegDests,
1133 std::string &AsmString,
1134 unsigned NumOutputs) const override;
1137 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
1138 unsigned Sig = (0xeb << 0) | // jmp rel8
1139 (0x06 << 8) | // .+0x08
1142 return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
1145 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
1146 return "movl\t%ebp, %ebp"
1147 "\t\t// marker for objc_retainAutoreleaseReturnValue";
1153 /// Rewrite input constraint references after adding some output constraints.
1154 /// In the case where there is one output and one input and we add one output,
1155 /// we need to replace all operand references greater than or equal to 1:
1158 /// The result will be:
1161 static void rewriteInputConstraintReferences(unsigned FirstIn,
1162 unsigned NumNewOuts,
1163 std::string &AsmString) {
1165 llvm::raw_string_ostream OS(Buf);
1167 while (Pos < AsmString.size()) {
1168 size_t DollarStart = AsmString.find('$', Pos);
1169 if (DollarStart == std::string::npos)
1170 DollarStart = AsmString.size();
1171 size_t DollarEnd = AsmString.find_first_not_of('$', DollarStart);
1172 if (DollarEnd == std::string::npos)
1173 DollarEnd = AsmString.size();
1174 OS << StringRef(&AsmString[Pos], DollarEnd - Pos);
1176 size_t NumDollars = DollarEnd - DollarStart;
1177 if (NumDollars % 2 != 0 && Pos < AsmString.size()) {
1178 // We have an operand reference.
1179 size_t DigitStart = Pos;
1180 size_t DigitEnd = AsmString.find_first_not_of("0123456789", DigitStart);
1181 if (DigitEnd == std::string::npos)
1182 DigitEnd = AsmString.size();
1183 StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart);
1184 unsigned OperandIndex;
1185 if (!OperandStr.getAsInteger(10, OperandIndex)) {
1186 if (OperandIndex >= FirstIn)
1187 OperandIndex += NumNewOuts;
1195 AsmString = std::move(OS.str());
1198 /// Add output constraints for EAX:EDX because they are return registers.
1199 void X86_32TargetCodeGenInfo::addReturnRegisterOutputs(
1200 CodeGenFunction &CGF, LValue ReturnSlot, std::string &Constraints,
1201 std::vector<llvm::Type *> &ResultRegTypes,
1202 std::vector<llvm::Type *> &ResultTruncRegTypes,
1203 std::vector<LValue> &ResultRegDests, std::string &AsmString,
1204 unsigned NumOutputs) const {
1205 uint64_t RetWidth = CGF.getContext().getTypeSize(ReturnSlot.getType());
1207 // Use the EAX constraint if the width is 32 or smaller and EAX:EDX if it is
1209 if (!Constraints.empty())
1211 if (RetWidth <= 32) {
1212 Constraints += "={eax}";
1213 ResultRegTypes.push_back(CGF.Int32Ty);
1215 // Use the 'A' constraint for EAX:EDX.
1216 Constraints += "=A";
1217 ResultRegTypes.push_back(CGF.Int64Ty);
1220 // Truncate EAX or EAX:EDX to an integer of the appropriate size.
1221 llvm::Type *CoerceTy = llvm::IntegerType::get(CGF.getLLVMContext(), RetWidth);
1222 ResultTruncRegTypes.push_back(CoerceTy);
1224 // Coerce the integer by bitcasting the return slot pointer.
1225 ReturnSlot.setAddress(CGF.Builder.CreateBitCast(ReturnSlot.getAddress(),
1226 CoerceTy->getPointerTo()));
1227 ResultRegDests.push_back(ReturnSlot);
1229 rewriteInputConstraintReferences(NumOutputs, 1, AsmString);
1232 /// shouldReturnTypeInRegister - Determine if the given type should be
1233 /// returned in a register (for the Darwin and MCU ABI).
1234 bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
1235 ASTContext &Context) const {
1236 uint64_t Size = Context.getTypeSize(Ty);
1238 // For i386, type must be register sized.
1239 // For the MCU ABI, it only needs to be <= 8-byte
1240 if ((IsMCUABI && Size > 64) || (!IsMCUABI && !isRegisterSize(Size)))
1243 if (Ty->isVectorType()) {
1244 // 64- and 128- bit vectors inside structures are not returned in
1246 if (Size == 64 || Size == 128)
1252 // If this is a builtin, pointer, enum, complex type, member pointer, or
1253 // member function pointer it is ok.
1254 if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() ||
1255 Ty->isAnyComplexType() || Ty->isEnumeralType() ||
1256 Ty->isBlockPointerType() || Ty->isMemberPointerType())
1259 // Arrays are treated like records.
1260 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
1261 return shouldReturnTypeInRegister(AT->getElementType(), Context);
1263 // Otherwise, it must be a record type.
1264 const RecordType *RT = Ty->getAs<RecordType>();
1265 if (!RT) return false;
1267 // FIXME: Traverse bases here too.
1269 // Structure types are passed in register if all fields would be
1270 // passed in a register.
1271 for (const auto *FD : RT->getDecl()->fields()) {
1272 // Empty fields are ignored.
1273 if (isEmptyField(Context, FD, true))
1276 // Check fields recursively.
1277 if (!shouldReturnTypeInRegister(FD->getType(), Context))
1283 static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
1284 // Treat complex types as the element type.
1285 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
1286 Ty = CTy->getElementType();
1288 // Check for a type which we know has a simple scalar argument-passing
1289 // convention without any padding. (We're specifically looking for 32
1290 // and 64-bit integer and integer-equivalents, float, and double.)
1291 if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() &&
1292 !Ty->isEnumeralType() && !Ty->isBlockPointerType())
1295 uint64_t Size = Context.getTypeSize(Ty);
1296 return Size == 32 || Size == 64;
1299 static bool addFieldSizes(ASTContext &Context, const RecordDecl *RD,
1301 for (const auto *FD : RD->fields()) {
1302 // Scalar arguments on the stack get 4 byte alignment on x86. If the
1303 // argument is smaller than 32-bits, expanding the struct will create
1304 // alignment padding.
1305 if (!is32Or64BitBasicType(FD->getType(), Context))
1308 // FIXME: Reject bit-fields wholesale; there are two problems, we don't know
1309 // how to expand them yet, and the predicate for telling if a bitfield still
1310 // counts as "basic" is more complicated than what we were doing previously.
1311 if (FD->isBitField())
1314 Size += Context.getTypeSize(FD->getType());
1319 static bool addBaseAndFieldSizes(ASTContext &Context, const CXXRecordDecl *RD,
1321 // Don't do this if there are any non-empty bases.
1322 for (const CXXBaseSpecifier &Base : RD->bases()) {
1323 if (!addBaseAndFieldSizes(Context, Base.getType()->getAsCXXRecordDecl(),
1327 if (!addFieldSizes(Context, RD, Size))
1332 /// Test whether an argument type which is to be passed indirectly (on the
1333 /// stack) would have the equivalent layout if it was expanded into separate
1334 /// arguments. If so, we prefer to do the latter to avoid inhibiting
1336 bool X86_32ABIInfo::canExpandIndirectArgument(QualType Ty) const {
1337 // We can only expand structure types.
1338 const RecordType *RT = Ty->getAs<RecordType>();
1341 const RecordDecl *RD = RT->getDecl();
1343 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
1344 if (!IsWin32StructABI) {
1345 // On non-Windows, we have to conservatively match our old bitcode
1346 // prototypes in order to be ABI-compatible at the bitcode level.
1347 if (!CXXRD->isCLike())
1350 // Don't do this for dynamic classes.
1351 if (CXXRD->isDynamicClass())
1354 if (!addBaseAndFieldSizes(getContext(), CXXRD, Size))
1357 if (!addFieldSizes(getContext(), RD, Size))
1361 // We can do this if there was no alignment padding.
1362 return Size == getContext().getTypeSize(Ty);
1365 ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(QualType RetTy, CCState &State) const {
1366 // If the return value is indirect, then the hidden argument is consuming one
1367 // integer register.
1368 if (State.FreeRegs) {
1371 return getNaturalAlignIndirectInReg(RetTy);
1373 return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
1376 ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
1377 CCState &State) const {
1378 if (RetTy->isVoidType())
1379 return ABIArgInfo::getIgnore();
1381 const Type *Base = nullptr;
1382 uint64_t NumElts = 0;
1383 if ((State.CC == llvm::CallingConv::X86_VectorCall ||
1384 State.CC == llvm::CallingConv::X86_RegCall) &&
1385 isHomogeneousAggregate(RetTy, Base, NumElts)) {
1386 // The LLVM struct type for such an aggregate should lower properly.
1387 return ABIArgInfo::getDirect();
1390 if (const VectorType *VT = RetTy->getAs<VectorType>()) {
1391 // On Darwin, some vectors are returned in registers.
1392 if (IsDarwinVectorABI) {
1393 uint64_t Size = getContext().getTypeSize(RetTy);
1395 // 128-bit vectors are a special case; they are returned in
1396 // registers and we need to make sure to pick a type the LLVM
1397 // backend will like.
1399 return ABIArgInfo::getDirect(llvm::VectorType::get(
1400 llvm::Type::getInt64Ty(getVMContext()), 2));
1402 // Always return in register if it fits in a general purpose
1403 // register, or if it is 64 bits and has a single element.
1404 if ((Size == 8 || Size == 16 || Size == 32) ||
1405 (Size == 64 && VT->getNumElements() == 1))
1406 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
1409 return getIndirectReturnResult(RetTy, State);
1412 return ABIArgInfo::getDirect();
1415 if (isAggregateTypeForABI(RetTy)) {
1416 if (const RecordType *RT = RetTy->getAs<RecordType>()) {
1417 // Structures with flexible arrays are always indirect.
1418 if (RT->getDecl()->hasFlexibleArrayMember())
1419 return getIndirectReturnResult(RetTy, State);
1422 // If specified, structs and unions are always indirect.
1423 if (!IsRetSmallStructInRegABI && !RetTy->isAnyComplexType())
1424 return getIndirectReturnResult(RetTy, State);
1426 // Ignore empty structs/unions.
1427 if (isEmptyRecord(getContext(), RetTy, true))
1428 return ABIArgInfo::getIgnore();
1430 // Small structures which are register sized are generally returned
1432 if (shouldReturnTypeInRegister(RetTy, getContext())) {
1433 uint64_t Size = getContext().getTypeSize(RetTy);
1435 // As a special-case, if the struct is a "single-element" struct, and
1436 // the field is of type "float" or "double", return it in a
1437 // floating-point register. (MSVC does not apply this special case.)
1438 // We apply a similar transformation for pointer types to improve the
1439 // quality of the generated IR.
1440 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
1441 if ((!IsWin32StructABI && SeltTy->isRealFloatingType())
1442 || SeltTy->hasPointerRepresentation())
1443 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
1445 // FIXME: We should be able to narrow this integer in cases with dead
1447 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size));
1450 return getIndirectReturnResult(RetTy, State);
1453 // Treat an enum type as its underlying type.
1454 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
1455 RetTy = EnumTy->getDecl()->getIntegerType();
1457 return (RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend(RetTy)
1458 : ABIArgInfo::getDirect());
1461 static bool isSSEVectorType(ASTContext &Context, QualType Ty) {
1462 return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128;
1465 static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) {
1466 const RecordType *RT = Ty->getAs<RecordType>();
1469 const RecordDecl *RD = RT->getDecl();
1471 // If this is a C++ record, check the bases first.
1472 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
1473 for (const auto &I : CXXRD->bases())
1474 if (!isRecordWithSSEVectorType(Context, I.getType()))
1477 for (const auto *i : RD->fields()) {
1478 QualType FT = i->getType();
1480 if (isSSEVectorType(Context, FT))
1483 if (isRecordWithSSEVectorType(Context, FT))
1490 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty,
1491 unsigned Align) const {
1492 // Otherwise, if the alignment is less than or equal to the minimum ABI
1493 // alignment, just use the default; the backend will handle this.
1494 if (Align <= MinABIStackAlignInBytes)
1495 return 0; // Use default alignment.
1497 // On non-Darwin, the stack type alignment is always 4.
1498 if (!IsDarwinVectorABI) {
1499 // Set explicit alignment, since we may need to realign the top.
1500 return MinABIStackAlignInBytes;
1503 // Otherwise, if the type contains an SSE vector type, the alignment is 16.
1504 if (Align >= 16 && (isSSEVectorType(getContext(), Ty) ||
1505 isRecordWithSSEVectorType(getContext(), Ty)))
1508 return MinABIStackAlignInBytes;
1511 ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal,
1512 CCState &State) const {
1514 if (State.FreeRegs) {
1515 --State.FreeRegs; // Non-byval indirects just use one pointer.
1517 return getNaturalAlignIndirectInReg(Ty);
1519 return getNaturalAlignIndirect(Ty, false);
1522 // Compute the byval alignment.
1523 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
1524 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
1525 if (StackAlign == 0)
1526 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true);
1528 // If the stack alignment is less than the type alignment, realign the
1530 bool Realign = TypeAlign > StackAlign;
1531 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(StackAlign),
1532 /*ByVal=*/true, Realign);
1535 X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const {
1536 const Type *T = isSingleElementStruct(Ty, getContext());
1538 T = Ty.getTypePtr();
1540 if (const BuiltinType *BT = T->getAs<BuiltinType>()) {
1541 BuiltinType::Kind K = BT->getKind();
1542 if (K == BuiltinType::Float || K == BuiltinType::Double)
1548 bool X86_32ABIInfo::updateFreeRegs(QualType Ty, CCState &State) const {
1549 if (!IsSoftFloatABI) {
1550 Class C = classify(Ty);
1555 unsigned Size = getContext().getTypeSize(Ty);
1556 unsigned SizeInRegs = (Size + 31) / 32;
1558 if (SizeInRegs == 0)
1562 if (SizeInRegs > State.FreeRegs) {
1567 // The MCU psABI allows passing parameters in-reg even if there are
1568 // earlier parameters that are passed on the stack. Also,
1569 // it does not allow passing >8-byte structs in-register,
1570 // even if there are 3 free registers available.
1571 if (SizeInRegs > State.FreeRegs || SizeInRegs > 2)
1575 State.FreeRegs -= SizeInRegs;
1579 bool X86_32ABIInfo::shouldAggregateUseDirect(QualType Ty, CCState &State,
1581 bool &NeedsPadding) const {
1582 // On Windows, aggregates other than HFAs are never passed in registers, and
1583 // they do not consume register slots. Homogenous floating-point aggregates
1584 // (HFAs) have already been dealt with at this point.
1585 if (IsWin32StructABI && isAggregateTypeForABI(Ty))
1588 NeedsPadding = false;
1591 if (!updateFreeRegs(Ty, State))
1597 if (State.CC == llvm::CallingConv::X86_FastCall ||
1598 State.CC == llvm::CallingConv::X86_VectorCall ||
1599 State.CC == llvm::CallingConv::X86_RegCall) {
1600 if (getContext().getTypeSize(Ty) <= 32 && State.FreeRegs)
1601 NeedsPadding = true;
1609 bool X86_32ABIInfo::shouldPrimitiveUseInReg(QualType Ty, CCState &State) const {
1610 if (!updateFreeRegs(Ty, State))
1616 if (State.CC == llvm::CallingConv::X86_FastCall ||
1617 State.CC == llvm::CallingConv::X86_VectorCall ||
1618 State.CC == llvm::CallingConv::X86_RegCall) {
1619 if (getContext().getTypeSize(Ty) > 32)
1622 return (Ty->isIntegralOrEnumerationType() || Ty->isPointerType() ||
1623 Ty->isReferenceType());
1629 ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
1630 CCState &State) const {
1631 // FIXME: Set alignment on indirect arguments.
1633 Ty = useFirstFieldIfTransparentUnion(Ty);
1635 // Check with the C++ ABI first.
1636 const RecordType *RT = Ty->getAs<RecordType>();
1638 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
1639 if (RAA == CGCXXABI::RAA_Indirect) {
1640 return getIndirectResult(Ty, false, State);
1641 } else if (RAA == CGCXXABI::RAA_DirectInMemory) {
1642 // The field index doesn't matter, we'll fix it up later.
1643 return ABIArgInfo::getInAlloca(/*FieldIndex=*/0);
1647 // Regcall uses the concept of a homogenous vector aggregate, similar
1648 // to other targets.
1649 const Type *Base = nullptr;
1650 uint64_t NumElts = 0;
1651 if (State.CC == llvm::CallingConv::X86_RegCall &&
1652 isHomogeneousAggregate(Ty, Base, NumElts)) {
1654 if (State.FreeSSERegs >= NumElts) {
1655 State.FreeSSERegs -= NumElts;
1656 if (Ty->isBuiltinType() || Ty->isVectorType())
1657 return ABIArgInfo::getDirect();
1658 return ABIArgInfo::getExpand();
1660 return getIndirectResult(Ty, /*ByVal=*/false, State);
1663 if (isAggregateTypeForABI(Ty)) {
1664 // Structures with flexible arrays are always indirect.
1665 // FIXME: This should not be byval!
1666 if (RT && RT->getDecl()->hasFlexibleArrayMember())
1667 return getIndirectResult(Ty, true, State);
1669 // Ignore empty structs/unions on non-Windows.
1670 if (!IsWin32StructABI && isEmptyRecord(getContext(), Ty, true))
1671 return ABIArgInfo::getIgnore();
1673 llvm::LLVMContext &LLVMContext = getVMContext();
1674 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
1675 bool NeedsPadding = false;
1677 if (shouldAggregateUseDirect(Ty, State, InReg, NeedsPadding)) {
1678 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
1679 SmallVector<llvm::Type*, 3> Elements(SizeInRegs, Int32);
1680 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
1682 return ABIArgInfo::getDirectInReg(Result);
1684 return ABIArgInfo::getDirect(Result);
1686 llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : nullptr;
1688 // Expand small (<= 128-bit) record types when we know that the stack layout
1689 // of those arguments will match the struct. This is important because the
1690 // LLVM backend isn't smart enough to remove byval, which inhibits many
1692 // Don't do this for the MCU if there are still free integer registers
1693 // (see X86_64 ABI for full explanation).
1694 if (getContext().getTypeSize(Ty) <= 4 * 32 &&
1695 (!IsMCUABI || State.FreeRegs == 0) && canExpandIndirectArgument(Ty))
1696 return ABIArgInfo::getExpandWithPadding(
1697 State.CC == llvm::CallingConv::X86_FastCall ||
1698 State.CC == llvm::CallingConv::X86_VectorCall ||
1699 State.CC == llvm::CallingConv::X86_RegCall,
1702 return getIndirectResult(Ty, true, State);
1705 if (const VectorType *VT = Ty->getAs<VectorType>()) {
1706 // On Darwin, some vectors are passed in memory, we handle this by passing
1707 // it as an i8/i16/i32/i64.
1708 if (IsDarwinVectorABI) {
1709 uint64_t Size = getContext().getTypeSize(Ty);
1710 if ((Size == 8 || Size == 16 || Size == 32) ||
1711 (Size == 64 && VT->getNumElements() == 1))
1712 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
1716 if (IsX86_MMXType(CGT.ConvertType(Ty)))
1717 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 64));
1719 return ABIArgInfo::getDirect();
1723 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
1724 Ty = EnumTy->getDecl()->getIntegerType();
1726 bool InReg = shouldPrimitiveUseInReg(Ty, State);
1728 if (Ty->isPromotableIntegerType()) {
1730 return ABIArgInfo::getExtendInReg(Ty);
1731 return ABIArgInfo::getExtend(Ty);
1735 return ABIArgInfo::getDirectInReg();
1736 return ABIArgInfo::getDirect();
1739 void X86_32ABIInfo::computeVectorCallArgs(CGFunctionInfo &FI, CCState &State,
1740 bool &UsedInAlloca) const {
1741 // Vectorcall x86 works subtly different than in x64, so the format is
1742 // a bit different than the x64 version. First, all vector types (not HVAs)
1743 // are assigned, with the first 6 ending up in the YMM0-5 or XMM0-5 registers.
1744 // This differs from the x64 implementation, where the first 6 by INDEX get
1746 // After that, integers AND HVAs are assigned Left to Right in the same pass.
1747 // Integers are passed as ECX/EDX if one is available (in order). HVAs will
1748 // first take up the remaining YMM/XMM registers. If insufficient registers
1749 // remain but an integer register (ECX/EDX) is available, it will be passed
1750 // in that, else, on the stack.
1751 for (auto &I : FI.arguments()) {
1752 // First pass do all the vector types.
1753 const Type *Base = nullptr;
1754 uint64_t NumElts = 0;
1755 const QualType& Ty = I.type;
1756 if ((Ty->isVectorType() || Ty->isBuiltinType()) &&
1757 isHomogeneousAggregate(Ty, Base, NumElts)) {
1758 if (State.FreeSSERegs >= NumElts) {
1759 State.FreeSSERegs -= NumElts;
1760 I.info = ABIArgInfo::getDirect();
1762 I.info = classifyArgumentType(Ty, State);
1764 UsedInAlloca |= (I.info.getKind() == ABIArgInfo::InAlloca);
1768 for (auto &I : FI.arguments()) {
1769 // Second pass, do the rest!
1770 const Type *Base = nullptr;
1771 uint64_t NumElts = 0;
1772 const QualType& Ty = I.type;
1773 bool IsHva = isHomogeneousAggregate(Ty, Base, NumElts);
1775 if (IsHva && !Ty->isVectorType() && !Ty->isBuiltinType()) {
1776 // Assign true HVAs (non vector/native FP types).
1777 if (State.FreeSSERegs >= NumElts) {
1778 State.FreeSSERegs -= NumElts;
1779 I.info = getDirectX86Hva();
1781 I.info = getIndirectResult(Ty, /*ByVal=*/false, State);
1783 } else if (!IsHva) {
1784 // Assign all Non-HVAs, so this will exclude Vector/FP args.
1785 I.info = classifyArgumentType(Ty, State);
1786 UsedInAlloca |= (I.info.getKind() == ABIArgInfo::InAlloca);
1791 void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
1792 CCState State(FI.getCallingConvention());
1795 else if (State.CC == llvm::CallingConv::X86_FastCall)
1797 else if (State.CC == llvm::CallingConv::X86_VectorCall) {
1799 State.FreeSSERegs = 6;
1800 } else if (FI.getHasRegParm())
1801 State.FreeRegs = FI.getRegParm();
1802 else if (State.CC == llvm::CallingConv::X86_RegCall) {
1804 State.FreeSSERegs = 8;
1806 State.FreeRegs = DefaultNumRegisterParameters;
1808 if (!::classifyReturnType(getCXXABI(), FI, *this)) {
1809 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), State);
1810 } else if (FI.getReturnInfo().isIndirect()) {
1811 // The C++ ABI is not aware of register usage, so we have to check if the
1812 // return value was sret and put it in a register ourselves if appropriate.
1813 if (State.FreeRegs) {
1814 --State.FreeRegs; // The sret parameter consumes a register.
1816 FI.getReturnInfo().setInReg(true);
1820 // The chain argument effectively gives us another free register.
1821 if (FI.isChainCall())
1824 bool UsedInAlloca = false;
1825 if (State.CC == llvm::CallingConv::X86_VectorCall) {
1826 computeVectorCallArgs(FI, State, UsedInAlloca);
1828 // If not vectorcall, revert to normal behavior.
1829 for (auto &I : FI.arguments()) {
1830 I.info = classifyArgumentType(I.type, State);
1831 UsedInAlloca |= (I.info.getKind() == ABIArgInfo::InAlloca);
1835 // If we needed to use inalloca for any argument, do a second pass and rewrite
1836 // all the memory arguments to use inalloca.
1838 rewriteWithInAlloca(FI);
1842 X86_32ABIInfo::addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
1843 CharUnits &StackOffset, ABIArgInfo &Info,
1844 QualType Type) const {
1845 // Arguments are always 4-byte-aligned.
1846 CharUnits FieldAlign = CharUnits::fromQuantity(4);
1848 assert(StackOffset.isMultipleOf(FieldAlign) && "unaligned inalloca struct");
1849 Info = ABIArgInfo::getInAlloca(FrameFields.size());
1850 FrameFields.push_back(CGT.ConvertTypeForMem(Type));
1851 StackOffset += getContext().getTypeSizeInChars(Type);
1853 // Insert padding bytes to respect alignment.
1854 CharUnits FieldEnd = StackOffset;
1855 StackOffset = FieldEnd.alignTo(FieldAlign);
1856 if (StackOffset != FieldEnd) {
1857 CharUnits NumBytes = StackOffset - FieldEnd;
1858 llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext());
1859 Ty = llvm::ArrayType::get(Ty, NumBytes.getQuantity());
1860 FrameFields.push_back(Ty);
1864 static bool isArgInAlloca(const ABIArgInfo &Info) {
1865 // Leave ignored and inreg arguments alone.
1866 switch (Info.getKind()) {
1867 case ABIArgInfo::InAlloca:
1869 case ABIArgInfo::Indirect:
1870 assert(Info.getIndirectByVal());
1872 case ABIArgInfo::Ignore:
1874 case ABIArgInfo::Direct:
1875 case ABIArgInfo::Extend:
1876 if (Info.getInReg())
1879 case ABIArgInfo::Expand:
1880 case ABIArgInfo::CoerceAndExpand:
1881 // These are aggregate types which are never passed in registers when
1882 // inalloca is involved.
1885 llvm_unreachable("invalid enum");
1888 void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const {
1889 assert(IsWin32StructABI && "inalloca only supported on win32");
1891 // Build a packed struct type for all of the arguments in memory.
1892 SmallVector<llvm::Type *, 6> FrameFields;
1894 // The stack alignment is always 4.
1895 CharUnits StackAlign = CharUnits::fromQuantity(4);
1897 CharUnits StackOffset;
1898 CGFunctionInfo::arg_iterator I = FI.arg_begin(), E = FI.arg_end();
1900 // Put 'this' into the struct before 'sret', if necessary.
1902 FI.getCallingConvention() == llvm::CallingConv::X86_ThisCall;
1903 ABIArgInfo &Ret = FI.getReturnInfo();
1904 if (Ret.isIndirect() && Ret.isSRetAfterThis() && !IsThisCall &&
1905 isArgInAlloca(I->info)) {
1906 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
1910 // Put the sret parameter into the inalloca struct if it's in memory.
1911 if (Ret.isIndirect() && !Ret.getInReg()) {
1912 CanQualType PtrTy = getContext().getPointerType(FI.getReturnType());
1913 addFieldToArgStruct(FrameFields, StackOffset, Ret, PtrTy);
1914 // On Windows, the hidden sret parameter is always returned in eax.
1915 Ret.setInAllocaSRet(IsWin32StructABI);
1918 // Skip the 'this' parameter in ecx.
1922 // Put arguments passed in memory into the struct.
1923 for (; I != E; ++I) {
1924 if (isArgInAlloca(I->info))
1925 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
1928 FI.setArgStruct(llvm::StructType::get(getVMContext(), FrameFields,
1933 Address X86_32ABIInfo::EmitVAArg(CodeGenFunction &CGF,
1934 Address VAListAddr, QualType Ty) const {
1936 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
1938 // x86-32 changes the alignment of certain arguments on the stack.
1940 // Just messing with TypeInfo like this works because we never pass
1941 // anything indirectly.
1942 TypeInfo.second = CharUnits::fromQuantity(
1943 getTypeStackAlignInBytes(Ty, TypeInfo.second.getQuantity()));
1945 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false,
1946 TypeInfo, CharUnits::fromQuantity(4),
1947 /*AllowHigherAlign*/ true);
1950 bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
1951 const llvm::Triple &Triple, const CodeGenOptions &Opts) {
1952 assert(Triple.getArch() == llvm::Triple::x86);
1954 switch (Opts.getStructReturnConvention()) {
1955 case CodeGenOptions::SRCK_Default:
1957 case CodeGenOptions::SRCK_OnStack: // -fpcc-struct-return
1959 case CodeGenOptions::SRCK_InRegs: // -freg-struct-return
1963 if (Triple.isOSDarwin() || Triple.isOSIAMCU())
1966 switch (Triple.getOS()) {
1967 case llvm::Triple::DragonFly:
1968 case llvm::Triple::FreeBSD:
1969 case llvm::Triple::OpenBSD:
1970 case llvm::Triple::Win32:
1977 void X86_32TargetCodeGenInfo::setTargetAttributes(
1978 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
1979 if (GV->isDeclaration())
1981 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
1982 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1983 llvm::Function *Fn = cast<llvm::Function>(GV);
1984 Fn->addFnAttr("stackrealign");
1986 if (FD->hasAttr<AnyX86InterruptAttr>()) {
1987 llvm::Function *Fn = cast<llvm::Function>(GV);
1988 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
1993 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
1994 CodeGen::CodeGenFunction &CGF,
1995 llvm::Value *Address) const {
1996 CodeGen::CGBuilderTy &Builder = CGF.Builder;
1998 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
2000 // 0-7 are the eight integer registers; the order is different
2001 // on Darwin (for EH), but the range is the same.
2003 AssignToArrayRange(Builder, Address, Four8, 0, 8);
2005 if (CGF.CGM.getTarget().getTriple().isOSDarwin()) {
2006 // 12-16 are st(0..4). Not sure why we stop at 4.
2007 // These have size 16, which is sizeof(long double) on
2008 // platforms with 8-byte alignment for that type.
2009 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16);
2010 AssignToArrayRange(Builder, Address, Sixteen8, 12, 16);
2013 // 9 is %eflags, which doesn't get a size on Darwin for some
2015 Builder.CreateAlignedStore(
2016 Four8, Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, Address, 9),
2019 // 11-16 are st(0..5). Not sure why we stop at 5.
2020 // These have size 12, which is sizeof(long double) on
2021 // platforms with 4-byte alignment for that type.
2022 llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12);
2023 AssignToArrayRange(Builder, Address, Twelve8, 11, 16);
2029 //===----------------------------------------------------------------------===//
2030 // X86-64 ABI Implementation
2031 //===----------------------------------------------------------------------===//
2035 /// The AVX ABI level for X86 targets.
2036 enum class X86AVXABILevel {
2042 /// \p returns the size in bits of the largest (native) vector for \p AVXLevel.
2043 static unsigned getNativeVectorSizeForAVXABI(X86AVXABILevel AVXLevel) {
2045 case X86AVXABILevel::AVX512:
2047 case X86AVXABILevel::AVX:
2049 case X86AVXABILevel::None:
2052 llvm_unreachable("Unknown AVXLevel");
2055 /// X86_64ABIInfo - The X86_64 ABI information.
2056 class X86_64ABIInfo : public SwiftABIInfo {
2068 /// merge - Implement the X86_64 ABI merging algorithm.
2070 /// Merge an accumulating classification \arg Accum with a field
2071 /// classification \arg Field.
2073 /// \param Accum - The accumulating classification. This should
2074 /// always be either NoClass or the result of a previous merge
2075 /// call. In addition, this should never be Memory (the caller
2076 /// should just return Memory for the aggregate).
2077 static Class merge(Class Accum, Class Field);
2079 /// postMerge - Implement the X86_64 ABI post merging algorithm.
2081 /// Post merger cleanup, reduces a malformed Hi and Lo pair to
2082 /// final MEMORY or SSE classes when necessary.
2084 /// \param AggregateSize - The size of the current aggregate in
2085 /// the classification process.
2087 /// \param Lo - The classification for the parts of the type
2088 /// residing in the low word of the containing object.
2090 /// \param Hi - The classification for the parts of the type
2091 /// residing in the higher words of the containing object.
2093 void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const;
2095 /// classify - Determine the x86_64 register classes in which the
2096 /// given type T should be passed.
2098 /// \param Lo - The classification for the parts of the type
2099 /// residing in the low word of the containing object.
2101 /// \param Hi - The classification for the parts of the type
2102 /// residing in the high word of the containing object.
2104 /// \param OffsetBase - The bit offset of this type in the
2105 /// containing object. Some parameters are classified different
2106 /// depending on whether they straddle an eightbyte boundary.
2108 /// \param isNamedArg - Whether the argument in question is a "named"
2109 /// argument, as used in AMD64-ABI 3.5.7.
2111 /// If a word is unused its result will be NoClass; if a type should
2112 /// be passed in Memory then at least the classification of \arg Lo
2115 /// The \arg Lo class will be NoClass iff the argument is ignored.
2117 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
2118 /// also be ComplexX87.
2119 void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi,
2120 bool isNamedArg) const;
2122 llvm::Type *GetByteVectorType(QualType Ty) const;
2123 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType,
2124 unsigned IROffset, QualType SourceTy,
2125 unsigned SourceOffset) const;
2126 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType,
2127 unsigned IROffset, QualType SourceTy,
2128 unsigned SourceOffset) const;
2130 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
2131 /// such that the argument will be returned in memory.
2132 ABIArgInfo getIndirectReturnResult(QualType Ty) const;
2134 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
2135 /// such that the argument will be passed in memory.
2137 /// \param freeIntRegs - The number of free integer registers remaining
2139 ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const;
2141 ABIArgInfo classifyReturnType(QualType RetTy) const;
2143 ABIArgInfo classifyArgumentType(QualType Ty, unsigned freeIntRegs,
2144 unsigned &neededInt, unsigned &neededSSE,
2145 bool isNamedArg) const;
2147 ABIArgInfo classifyRegCallStructType(QualType Ty, unsigned &NeededInt,
2148 unsigned &NeededSSE) const;
2150 ABIArgInfo classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt,
2151 unsigned &NeededSSE) const;
2153 bool IsIllegalVectorType(QualType Ty) const;
2155 /// The 0.98 ABI revision clarified a lot of ambiguities,
2156 /// unfortunately in ways that were not always consistent with
2157 /// certain previous compilers. In particular, platforms which
2158 /// required strict binary compatibility with older versions of GCC
2159 /// may need to exempt themselves.
2160 bool honorsRevision0_98() const {
2161 return !getTarget().getTriple().isOSDarwin();
2164 /// GCC classifies <1 x long long> as SSE but some platform ABIs choose to
2165 /// classify it as INTEGER (for compatibility with older clang compilers).
2166 bool classifyIntegerMMXAsSSE() const {
2167 // Clang <= 3.8 did not do this.
2168 if (getContext().getLangOpts().getClangABICompat() <=
2169 LangOptions::ClangABI::Ver3_8)
2172 const llvm::Triple &Triple = getTarget().getTriple();
2173 if (Triple.isOSDarwin() || Triple.getOS() == llvm::Triple::PS4)
2175 if (Triple.isOSFreeBSD() && Triple.getOSMajorVersion() >= 10)
2180 X86AVXABILevel AVXLevel;
2181 // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on
2183 bool Has64BitPointers;
2186 X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel) :
2187 SwiftABIInfo(CGT), AVXLevel(AVXLevel),
2188 Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) {
2191 bool isPassedUsingAVXType(QualType type) const {
2192 unsigned neededInt, neededSSE;
2193 // The freeIntRegs argument doesn't matter here.
2194 ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE,
2195 /*isNamedArg*/true);
2196 if (info.isDirect()) {
2197 llvm::Type *ty = info.getCoerceToType();
2198 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
2199 return (vectorTy->getBitWidth() > 128);
2204 void computeInfo(CGFunctionInfo &FI) const override;
2206 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
2207 QualType Ty) const override;
2208 Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
2209 QualType Ty) const override;
2211 bool has64BitPointers() const {
2212 return Has64BitPointers;
2215 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
2216 bool asReturnValue) const override {
2217 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
2219 bool isSwiftErrorInRegister() const override {
2224 /// WinX86_64ABIInfo - The Windows X86_64 ABI information.
2225 class WinX86_64ABIInfo : public SwiftABIInfo {
2227 WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
2228 : SwiftABIInfo(CGT), AVXLevel(AVXLevel),
2229 IsMingw64(getTarget().getTriple().isWindowsGNUEnvironment()) {}
2231 void computeInfo(CGFunctionInfo &FI) const override;
2233 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
2234 QualType Ty) const override;
2236 bool isHomogeneousAggregateBaseType(QualType Ty) const override {
2237 // FIXME: Assumes vectorcall is in use.
2238 return isX86VectorTypeForVectorCall(getContext(), Ty);
2241 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
2242 uint64_t NumMembers) const override {
2243 // FIXME: Assumes vectorcall is in use.
2244 return isX86VectorCallAggregateSmallEnough(NumMembers);
2247 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type *> scalars,
2248 bool asReturnValue) const override {
2249 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
2252 bool isSwiftErrorInRegister() const override {
2257 ABIArgInfo classify(QualType Ty, unsigned &FreeSSERegs, bool IsReturnType,
2258 bool IsVectorCall, bool IsRegCall) const;
2259 ABIArgInfo reclassifyHvaArgType(QualType Ty, unsigned &FreeSSERegs,
2260 const ABIArgInfo ¤t) const;
2261 void computeVectorCallArgs(CGFunctionInfo &FI, unsigned FreeSSERegs,
2262 bool IsVectorCall, bool IsRegCall) const;
2264 X86AVXABILevel AVXLevel;
2269 class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
2271 X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
2272 : TargetCodeGenInfo(new X86_64ABIInfo(CGT, AVXLevel)) {}
2274 const X86_64ABIInfo &getABIInfo() const {
2275 return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo());
2278 /// Disable tail call on x86-64. The epilogue code before the tail jump blocks
2279 /// the autoreleaseRV/retainRV optimization.
2280 bool shouldSuppressTailCallsOfRetainAutoreleasedReturnValue() const override {
2284 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
2288 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2289 llvm::Value *Address) const override {
2290 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
2292 // 0-15 are the 16 integer registers.
2294 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
2298 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
2299 StringRef Constraint,
2300 llvm::Type* Ty) const override {
2301 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
2304 bool isNoProtoCallVariadic(const CallArgList &args,
2305 const FunctionNoProtoType *fnType) const override {
2306 // The default CC on x86-64 sets %al to the number of SSA
2307 // registers used, and GCC sets this when calling an unprototyped
2308 // function, so we override the default behavior. However, don't do
2309 // that when AVX types are involved: the ABI explicitly states it is
2310 // undefined, and it doesn't work in practice because of how the ABI
2311 // defines varargs anyway.
2312 if (fnType->getCallConv() == CC_C) {
2313 bool HasAVXType = false;
2314 for (CallArgList::const_iterator
2315 it = args.begin(), ie = args.end(); it != ie; ++it) {
2316 if (getABIInfo().isPassedUsingAVXType(it->Ty)) {
2326 return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType);
2330 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
2331 unsigned Sig = (0xeb << 0) | // jmp rel8
2332 (0x06 << 8) | // .+0x08
2335 return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
2338 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2339 CodeGen::CodeGenModule &CGM) const override {
2340 if (GV->isDeclaration())
2342 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2343 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
2344 llvm::Function *Fn = cast<llvm::Function>(GV);
2345 Fn->addFnAttr("stackrealign");
2347 if (FD->hasAttr<AnyX86InterruptAttr>()) {
2348 llvm::Function *Fn = cast<llvm::Function>(GV);
2349 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
2355 static std::string qualifyWindowsLibrary(llvm::StringRef Lib) {
2356 // If the argument does not end in .lib, automatically add the suffix.
2357 // If the argument contains a space, enclose it in quotes.
2358 // This matches the behavior of MSVC.
2359 bool Quote = (Lib.find(" ") != StringRef::npos);
2360 std::string ArgStr = Quote ? "\"" : "";
2362 if (!Lib.endswith_lower(".lib") && !Lib.endswith_lower(".a"))
2364 ArgStr += Quote ? "\"" : "";
2368 class WinX86_32TargetCodeGenInfo : public X86_32TargetCodeGenInfo {
2370 WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
2371 bool DarwinVectorABI, bool RetSmallStructInRegABI, bool Win32StructABI,
2372 unsigned NumRegisterParameters)
2373 : X86_32TargetCodeGenInfo(CGT, DarwinVectorABI, RetSmallStructInRegABI,
2374 Win32StructABI, NumRegisterParameters, false) {}
2376 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2377 CodeGen::CodeGenModule &CGM) const override;
2379 void getDependentLibraryOption(llvm::StringRef Lib,
2380 llvm::SmallString<24> &Opt) const override {
2381 Opt = "/DEFAULTLIB:";
2382 Opt += qualifyWindowsLibrary(Lib);
2385 void getDetectMismatchOption(llvm::StringRef Name,
2386 llvm::StringRef Value,
2387 llvm::SmallString<32> &Opt) const override {
2388 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
2392 static void addStackProbeTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2393 CodeGen::CodeGenModule &CGM) {
2394 if (llvm::Function *Fn = dyn_cast_or_null<llvm::Function>(GV)) {
2396 if (CGM.getCodeGenOpts().StackProbeSize != 4096)
2397 Fn->addFnAttr("stack-probe-size",
2398 llvm::utostr(CGM.getCodeGenOpts().StackProbeSize));
2399 if (CGM.getCodeGenOpts().NoStackArgProbe)
2400 Fn->addFnAttr("no-stack-arg-probe");
2404 void WinX86_32TargetCodeGenInfo::setTargetAttributes(
2405 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
2406 X86_32TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
2407 if (GV->isDeclaration())
2409 addStackProbeTargetAttributes(D, GV, CGM);
2412 class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
2414 WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
2415 X86AVXABILevel AVXLevel)
2416 : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT, AVXLevel)) {}
2418 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2419 CodeGen::CodeGenModule &CGM) const override;
2421 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
2425 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2426 llvm::Value *Address) const override {
2427 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
2429 // 0-15 are the 16 integer registers.
2431 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
2435 void getDependentLibraryOption(llvm::StringRef Lib,
2436 llvm::SmallString<24> &Opt) const override {
2437 Opt = "/DEFAULTLIB:";
2438 Opt += qualifyWindowsLibrary(Lib);
2441 void getDetectMismatchOption(llvm::StringRef Name,
2442 llvm::StringRef Value,
2443 llvm::SmallString<32> &Opt) const override {
2444 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
2448 void WinX86_64TargetCodeGenInfo::setTargetAttributes(
2449 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
2450 TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
2451 if (GV->isDeclaration())
2453 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2454 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
2455 llvm::Function *Fn = cast<llvm::Function>(GV);
2456 Fn->addFnAttr("stackrealign");
2458 if (FD->hasAttr<AnyX86InterruptAttr>()) {
2459 llvm::Function *Fn = cast<llvm::Function>(GV);
2460 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
2464 addStackProbeTargetAttributes(D, GV, CGM);
2468 void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo,
2470 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
2472 // (a) If one of the classes is Memory, the whole argument is passed in
2475 // (b) If X87UP is not preceded by X87, the whole argument is passed in
2478 // (c) If the size of the aggregate exceeds two eightbytes and the first
2479 // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole
2480 // argument is passed in memory. NOTE: This is necessary to keep the
2481 // ABI working for processors that don't support the __m256 type.
2483 // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE.
2485 // Some of these are enforced by the merging logic. Others can arise
2486 // only with unions; for example:
2487 // union { _Complex double; unsigned; }
2489 // Note that clauses (b) and (c) were added in 0.98.
2493 if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
2495 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
2497 if (Hi == SSEUp && Lo != SSE)
2501 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
2502 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
2503 // classified recursively so that always two fields are
2504 // considered. The resulting class is calculated according to
2505 // the classes of the fields in the eightbyte:
2507 // (a) If both classes are equal, this is the resulting class.
2509 // (b) If one of the classes is NO_CLASS, the resulting class is
2512 // (c) If one of the classes is MEMORY, the result is the MEMORY
2515 // (d) If one of the classes is INTEGER, the result is the
2518 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
2519 // MEMORY is used as class.
2521 // (f) Otherwise class SSE is used.
2523 // Accum should never be memory (we should have returned) or
2524 // ComplexX87 (because this cannot be passed in a structure).
2525 assert((Accum != Memory && Accum != ComplexX87) &&
2526 "Invalid accumulated classification during merge.");
2527 if (Accum == Field || Field == NoClass)
2529 if (Field == Memory)
2531 if (Accum == NoClass)
2533 if (Accum == Integer || Field == Integer)
2535 if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
2536 Accum == X87 || Accum == X87Up)
2541 void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
2542 Class &Lo, Class &Hi, bool isNamedArg) const {
2543 // FIXME: This code can be simplified by introducing a simple value class for
2544 // Class pairs with appropriate constructor methods for the various
2547 // FIXME: Some of the split computations are wrong; unaligned vectors
2548 // shouldn't be passed in registers for example, so there is no chance they
2549 // can straddle an eightbyte. Verify & simplify.
2553 Class &Current = OffsetBase < 64 ? Lo : Hi;
2556 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
2557 BuiltinType::Kind k = BT->getKind();
2559 if (k == BuiltinType::Void) {
2561 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
2564 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
2566 } else if (k == BuiltinType::Float || k == BuiltinType::Double) {
2568 } else if (k == BuiltinType::LongDouble) {
2569 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
2570 if (LDF == &llvm::APFloat::IEEEquad()) {
2573 } else if (LDF == &llvm::APFloat::x87DoubleExtended()) {
2576 } else if (LDF == &llvm::APFloat::IEEEdouble()) {
2579 llvm_unreachable("unexpected long double representation!");
2581 // FIXME: _Decimal32 and _Decimal64 are SSE.
2582 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
2586 if (const EnumType *ET = Ty->getAs<EnumType>()) {
2587 // Classify the underlying integer type.
2588 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
2592 if (Ty->hasPointerRepresentation()) {
2597 if (Ty->isMemberPointerType()) {
2598 if (Ty->isMemberFunctionPointerType()) {
2599 if (Has64BitPointers) {
2600 // If Has64BitPointers, this is an {i64, i64}, so classify both
2604 // Otherwise, with 32-bit pointers, this is an {i32, i32}. If that
2605 // straddles an eightbyte boundary, Hi should be classified as well.
2606 uint64_t EB_FuncPtr = (OffsetBase) / 64;
2607 uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64;
2608 if (EB_FuncPtr != EB_ThisAdj) {
2620 if (const VectorType *VT = Ty->getAs<VectorType>()) {
2621 uint64_t Size = getContext().getTypeSize(VT);
2622 if (Size == 1 || Size == 8 || Size == 16 || Size == 32) {
2623 // gcc passes the following as integer:
2624 // 4 bytes - <4 x char>, <2 x short>, <1 x int>, <1 x float>
2625 // 2 bytes - <2 x char>, <1 x short>
2626 // 1 byte - <1 x char>
2629 // If this type crosses an eightbyte boundary, it should be
2631 uint64_t EB_Lo = (OffsetBase) / 64;
2632 uint64_t EB_Hi = (OffsetBase + Size - 1) / 64;
2635 } else if (Size == 64) {
2636 QualType ElementType = VT->getElementType();
2638 // gcc passes <1 x double> in memory. :(
2639 if (ElementType->isSpecificBuiltinType(BuiltinType::Double))
2642 // gcc passes <1 x long long> as SSE but clang used to unconditionally
2643 // pass them as integer. For platforms where clang is the de facto
2644 // platform compiler, we must continue to use integer.
2645 if (!classifyIntegerMMXAsSSE() &&
2646 (ElementType->isSpecificBuiltinType(BuiltinType::LongLong) ||
2647 ElementType->isSpecificBuiltinType(BuiltinType::ULongLong) ||
2648 ElementType->isSpecificBuiltinType(BuiltinType::Long) ||
2649 ElementType->isSpecificBuiltinType(BuiltinType::ULong)))
2654 // If this type crosses an eightbyte boundary, it should be
2656 if (OffsetBase && OffsetBase != 64)
2658 } else if (Size == 128 ||
2659 (isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) {
2660 // Arguments of 256-bits are split into four eightbyte chunks. The
2661 // least significant one belongs to class SSE and all the others to class
2662 // SSEUP. The original Lo and Hi design considers that types can't be
2663 // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense.
2664 // This design isn't correct for 256-bits, but since there're no cases
2665 // where the upper parts would need to be inspected, avoid adding
2666 // complexity and just consider Hi to match the 64-256 part.
2668 // Note that per 3.5.7 of AMD64-ABI, 256-bit args are only passed in
2669 // registers if they are "named", i.e. not part of the "..." of a
2670 // variadic function.
2672 // Similarly, per 3.2.3. of the AVX512 draft, 512-bits ("named") args are
2673 // split into eight eightbyte chunks, one SSE and seven SSEUP.
2680 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
2681 QualType ET = getContext().getCanonicalType(CT->getElementType());
2683 uint64_t Size = getContext().getTypeSize(Ty);
2684 if (ET->isIntegralOrEnumerationType()) {
2687 else if (Size <= 128)
2689 } else if (ET == getContext().FloatTy) {
2691 } else if (ET == getContext().DoubleTy) {
2693 } else if (ET == getContext().LongDoubleTy) {
2694 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
2695 if (LDF == &llvm::APFloat::IEEEquad())
2697 else if (LDF == &llvm::APFloat::x87DoubleExtended())
2698 Current = ComplexX87;
2699 else if (LDF == &llvm::APFloat::IEEEdouble())
2702 llvm_unreachable("unexpected long double representation!");
2705 // If this complex type crosses an eightbyte boundary then it
2707 uint64_t EB_Real = (OffsetBase) / 64;
2708 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64;
2709 if (Hi == NoClass && EB_Real != EB_Imag)
2715 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
2716 // Arrays are treated like structures.
2718 uint64_t Size = getContext().getTypeSize(Ty);
2720 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
2721 // than eight eightbytes, ..., it has class MEMORY.
2725 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
2726 // fields, it has class MEMORY.
2728 // Only need to check alignment of array base.
2729 if (OffsetBase % getContext().getTypeAlign(AT->getElementType()))
2732 // Otherwise implement simplified merge. We could be smarter about
2733 // this, but it isn't worth it and would be harder to verify.
2735 uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
2736 uint64_t ArraySize = AT->getSize().getZExtValue();
2738 // The only case a 256-bit wide vector could be used is when the array
2739 // contains a single 256-bit element. Since Lo and Hi logic isn't extended
2740 // to work for sizes wider than 128, early check and fallback to memory.
2743 (Size != EltSize || Size > getNativeVectorSizeForAVXABI(AVXLevel)))
2746 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
2747 Class FieldLo, FieldHi;
2748 classify(AT->getElementType(), Offset, FieldLo, FieldHi, isNamedArg);
2749 Lo = merge(Lo, FieldLo);
2750 Hi = merge(Hi, FieldHi);
2751 if (Lo == Memory || Hi == Memory)
2755 postMerge(Size, Lo, Hi);
2756 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
2760 if (const RecordType *RT = Ty->getAs<RecordType>()) {
2761 uint64_t Size = getContext().getTypeSize(Ty);
2763 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
2764 // than eight eightbytes, ..., it has class MEMORY.
2768 // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial
2769 // copy constructor or a non-trivial destructor, it is passed by invisible
2771 if (getRecordArgABI(RT, getCXXABI()))
2774 const RecordDecl *RD = RT->getDecl();
2776 // Assume variable sized types are passed in memory.
2777 if (RD->hasFlexibleArrayMember())
2780 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
2782 // Reset Lo class, this will be recomputed.
2785 // If this is a C++ record, classify the bases first.
2786 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2787 for (const auto &I : CXXRD->bases()) {
2788 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
2789 "Unexpected base class!");
2790 const CXXRecordDecl *Base =
2791 cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
2793 // Classify this field.
2795 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a
2796 // single eightbyte, each is classified separately. Each eightbyte gets
2797 // initialized to class NO_CLASS.
2798 Class FieldLo, FieldHi;
2800 OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base));
2801 classify(I.getType(), Offset, FieldLo, FieldHi, isNamedArg);
2802 Lo = merge(Lo, FieldLo);
2803 Hi = merge(Hi, FieldHi);
2804 if (Lo == Memory || Hi == Memory) {
2805 postMerge(Size, Lo, Hi);
2811 // Classify the fields one at a time, merging the results.
2813 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
2814 i != e; ++i, ++idx) {
2815 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
2816 bool BitField = i->isBitField();
2818 // Ignore padding bit-fields.
2819 if (BitField && i->isUnnamedBitfield())
2822 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than
2823 // four eightbytes, or it contains unaligned fields, it has class MEMORY.
2825 // The only case a 256-bit wide vector could be used is when the struct
2826 // contains a single 256-bit element. Since Lo and Hi logic isn't extended
2827 // to work for sizes wider than 128, early check and fallback to memory.
2829 if (Size > 128 && (Size != getContext().getTypeSize(i->getType()) ||
2830 Size > getNativeVectorSizeForAVXABI(AVXLevel))) {
2832 postMerge(Size, Lo, Hi);
2835 // Note, skip this test for bit-fields, see below.
2836 if (!BitField && Offset % getContext().getTypeAlign(i->getType())) {
2838 postMerge(Size, Lo, Hi);
2842 // Classify this field.
2844 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
2845 // exceeds a single eightbyte, each is classified
2846 // separately. Each eightbyte gets initialized to class
2848 Class FieldLo, FieldHi;
2850 // Bit-fields require special handling, they do not force the
2851 // structure to be passed in memory even if unaligned, and
2852 // therefore they can straddle an eightbyte.
2854 assert(!i->isUnnamedBitfield());
2855 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
2856 uint64_t Size = i->getBitWidthValue(getContext());
2858 uint64_t EB_Lo = Offset / 64;
2859 uint64_t EB_Hi = (Offset + Size - 1) / 64;
2862 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.");
2867 FieldHi = EB_Hi ? Integer : NoClass;
2870 classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg);
2871 Lo = merge(Lo, FieldLo);
2872 Hi = merge(Hi, FieldHi);
2873 if (Lo == Memory || Hi == Memory)
2877 postMerge(Size, Lo, Hi);
2881 ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
2882 // If this is a scalar LLVM value then assume LLVM will pass it in the right
2884 if (!isAggregateTypeForABI(Ty)) {
2885 // Treat an enum type as its underlying type.
2886 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2887 Ty = EnumTy->getDecl()->getIntegerType();
2889 return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty)
2890 : ABIArgInfo::getDirect());
2893 return getNaturalAlignIndirect(Ty);
2896 bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const {
2897 if (const VectorType *VecTy = Ty->getAs<VectorType>()) {
2898 uint64_t Size = getContext().getTypeSize(VecTy);
2899 unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel);
2900 if (Size <= 64 || Size > LargestVector)
2907 ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
2908 unsigned freeIntRegs) const {
2909 // If this is a scalar LLVM value then assume LLVM will pass it in the right
2912 // This assumption is optimistic, as there could be free registers available
2913 // when we need to pass this argument in memory, and LLVM could try to pass
2914 // the argument in the free register. This does not seem to happen currently,
2915 // but this code would be much safer if we could mark the argument with
2916 // 'onstack'. See PR12193.
2917 if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty)) {
2918 // Treat an enum type as its underlying type.
2919 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2920 Ty = EnumTy->getDecl()->getIntegerType();
2922 return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty)
2923 : ABIArgInfo::getDirect());
2926 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
2927 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
2929 // Compute the byval alignment. We specify the alignment of the byval in all
2930 // cases so that the mid-level optimizer knows the alignment of the byval.
2931 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U);
2933 // Attempt to avoid passing indirect results using byval when possible. This
2934 // is important for good codegen.
2936 // We do this by coercing the value into a scalar type which the backend can
2937 // handle naturally (i.e., without using byval).
2939 // For simplicity, we currently only do this when we have exhausted all of the
2940 // free integer registers. Doing this when there are free integer registers
2941 // would require more care, as we would have to ensure that the coerced value
2942 // did not claim the unused register. That would require either reording the
2943 // arguments to the function (so that any subsequent inreg values came first),
2944 // or only doing this optimization when there were no following arguments that
2947 // We currently expect it to be rare (particularly in well written code) for
2948 // arguments to be passed on the stack when there are still free integer
2949 // registers available (this would typically imply large structs being passed
2950 // by value), so this seems like a fair tradeoff for now.
2952 // We can revisit this if the backend grows support for 'onstack' parameter
2953 // attributes. See PR12193.
2954 if (freeIntRegs == 0) {
2955 uint64_t Size = getContext().getTypeSize(Ty);
2957 // If this type fits in an eightbyte, coerce it into the matching integral
2958 // type, which will end up on the stack (with alignment 8).
2959 if (Align == 8 && Size <= 64)
2960 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
2964 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(Align));
2967 /// The ABI specifies that a value should be passed in a full vector XMM/YMM
2968 /// register. Pick an LLVM IR type that will be passed as a vector register.
2969 llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const {
2970 // Wrapper structs/arrays that only contain vectors are passed just like
2971 // vectors; strip them off if present.
2972 if (const Type *InnerTy = isSingleElementStruct(Ty, getContext()))
2973 Ty = QualType(InnerTy, 0);
2975 llvm::Type *IRType = CGT.ConvertType(Ty);
2976 if (isa<llvm::VectorType>(IRType) ||
2977 IRType->getTypeID() == llvm::Type::FP128TyID)
2980 // We couldn't find the preferred IR vector type for 'Ty'.
2981 uint64_t Size = getContext().getTypeSize(Ty);
2982 assert((Size == 128 || Size == 256 || Size == 512) && "Invalid type found!");
2984 // Return a LLVM IR vector type based on the size of 'Ty'.
2985 return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()),
2989 /// BitsContainNoUserData - Return true if the specified [start,end) bit range
2990 /// is known to either be off the end of the specified type or being in
2991 /// alignment padding. The user type specified is known to be at most 128 bits
2992 /// in size, and have passed through X86_64ABIInfo::classify with a successful
2993 /// classification that put one of the two halves in the INTEGER class.
2995 /// It is conservatively correct to return false.
2996 static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,
2997 unsigned EndBit, ASTContext &Context) {
2998 // If the bytes being queried are off the end of the type, there is no user
2999 // data hiding here. This handles analysis of builtins, vectors and other
3000 // types that don't contain interesting padding.
3001 unsigned TySize = (unsigned)Context.getTypeSize(Ty);
3002 if (TySize <= StartBit)
3005 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
3006 unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType());
3007 unsigned NumElts = (unsigned)AT->getSize().getZExtValue();
3009 // Check each element to see if the element overlaps with the queried range.
3010 for (unsigned i = 0; i != NumElts; ++i) {
3011 // If the element is after the span we care about, then we're done..
3012 unsigned EltOffset = i*EltSize;
3013 if (EltOffset >= EndBit) break;
3015 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
3016 if (!BitsContainNoUserData(AT->getElementType(), EltStart,
3017 EndBit-EltOffset, Context))
3020 // If it overlaps no elements, then it is safe to process as padding.
3024 if (const RecordType *RT = Ty->getAs<RecordType>()) {
3025 const RecordDecl *RD = RT->getDecl();
3026 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
3028 // If this is a C++ record, check the bases first.
3029 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
3030 for (const auto &I : CXXRD->bases()) {
3031 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
3032 "Unexpected base class!");
3033 const CXXRecordDecl *Base =
3034 cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
3036 // If the base is after the span we care about, ignore it.
3037 unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base));
3038 if (BaseOffset >= EndBit) continue;
3040 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
3041 if (!BitsContainNoUserData(I.getType(), BaseStart,
3042 EndBit-BaseOffset, Context))
3047 // Verify that no field has data that overlaps the region of interest. Yes
3048 // this could be sped up a lot by being smarter about queried fields,
3049 // however we're only looking at structs up to 16 bytes, so we don't care
3052 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
3053 i != e; ++i, ++idx) {
3054 unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx);
3056 // If we found a field after the region we care about, then we're done.
3057 if (FieldOffset >= EndBit) break;
3059 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
3060 if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset,
3065 // If nothing in this record overlapped the area of interest, then we're
3073 /// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a
3074 /// float member at the specified offset. For example, {int,{float}} has a
3075 /// float at offset 4. It is conservatively correct for this routine to return
3077 static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset,
3078 const llvm::DataLayout &TD) {
3079 // Base case if we find a float.
3080 if (IROffset == 0 && IRType->isFloatTy())
3083 // If this is a struct, recurse into the field at the specified offset.
3084 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
3085 const llvm::StructLayout *SL = TD.getStructLayout(STy);
3086 unsigned Elt = SL->getElementContainingOffset(IROffset);
3087 IROffset -= SL->getElementOffset(Elt);
3088 return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD);
3091 // If this is an array, recurse into the field at the specified offset.
3092 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
3093 llvm::Type *EltTy = ATy->getElementType();
3094 unsigned EltSize = TD.getTypeAllocSize(EltTy);
3095 IROffset -= IROffset/EltSize*EltSize;
3096 return ContainsFloatAtOffset(EltTy, IROffset, TD);
3103 /// GetSSETypeAtOffset - Return a type that will be passed by the backend in the
3104 /// low 8 bytes of an XMM register, corresponding to the SSE class.
3105 llvm::Type *X86_64ABIInfo::
3106 GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset,
3107 QualType SourceTy, unsigned SourceOffset) const {
3108 // The only three choices we have are either double, <2 x float>, or float. We
3109 // pass as float if the last 4 bytes is just padding. This happens for
3110 // structs that contain 3 floats.
3111 if (BitsContainNoUserData(SourceTy, SourceOffset*8+32,
3112 SourceOffset*8+64, getContext()))
3113 return llvm::Type::getFloatTy(getVMContext());
3115 // We want to pass as <2 x float> if the LLVM IR type contains a float at
3116 // offset+0 and offset+4. Walk the LLVM IR type to find out if this is the
3118 if (ContainsFloatAtOffset(IRType, IROffset, getDataLayout()) &&
3119 ContainsFloatAtOffset(IRType, IROffset+4, getDataLayout()))
3120 return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2);
3122 return llvm::Type::getDoubleTy(getVMContext());
3126 /// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in
3127 /// an 8-byte GPR. This means that we either have a scalar or we are talking
3128 /// about the high or low part of an up-to-16-byte struct. This routine picks
3129 /// the best LLVM IR type to represent this, which may be i64 or may be anything
3130 /// else that the backend will pass in a GPR that works better (e.g. i8, %foo*,
3133 /// PrefType is an LLVM IR type that corresponds to (part of) the IR type for
3134 /// the source type. IROffset is an offset in bytes into the LLVM IR type that
3135 /// the 8-byte value references. PrefType may be null.
3137 /// SourceTy is the source-level type for the entire argument. SourceOffset is
3138 /// an offset into this that we're processing (which is always either 0 or 8).
3140 llvm::Type *X86_64ABIInfo::
3141 GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
3142 QualType SourceTy, unsigned SourceOffset) const {
3143 // If we're dealing with an un-offset LLVM IR type, then it means that we're
3144 // returning an 8-byte unit starting with it. See if we can safely use it.
3145 if (IROffset == 0) {
3146 // Pointers and int64's always fill the 8-byte unit.
3147 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) ||
3148 IRType->isIntegerTy(64))
3151 // If we have a 1/2/4-byte integer, we can use it only if the rest of the
3152 // goodness in the source type is just tail padding. This is allowed to
3153 // kick in for struct {double,int} on the int, but not on
3154 // struct{double,int,int} because we wouldn't return the second int. We
3155 // have to do this analysis on the source type because we can't depend on
3156 // unions being lowered a specific way etc.
3157 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
3158 IRType->isIntegerTy(32) ||
3159 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) {
3160 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 :
3161 cast<llvm::IntegerType>(IRType)->getBitWidth();
3163 if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth,
3164 SourceOffset*8+64, getContext()))
3169 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
3170 // If this is a struct, recurse into the field at the specified offset.
3171 const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy);
3172 if (IROffset < SL->getSizeInBytes()) {
3173 unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
3174 IROffset -= SL->getElementOffset(FieldIdx);
3176 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
3177 SourceTy, SourceOffset);
3181 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
3182 llvm::Type *EltTy = ATy->getElementType();
3183 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy);
3184 unsigned EltOffset = IROffset/EltSize*EltSize;
3185 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
3189 // Okay, we don't have any better idea of what to pass, so we pass this in an
3190 // integer register that isn't too big to fit the rest of the struct.
3191 unsigned TySizeInBytes =
3192 (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity();
3194 assert(TySizeInBytes != SourceOffset && "Empty field?");
3196 // It is always safe to classify this as an integer type up to i64 that
3197 // isn't larger than the structure.
3198 return llvm::IntegerType::get(getVMContext(),
3199 std::min(TySizeInBytes-SourceOffset, 8U)*8);
3203 /// GetX86_64ByValArgumentPair - Given a high and low type that can ideally
3204 /// be used as elements of a two register pair to pass or return, return a
3205 /// first class aggregate to represent them. For example, if the low part of
3206 /// a by-value argument should be passed as i32* and the high part as float,
3207 /// return {i32*, float}.
3209 GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi,
3210 const llvm::DataLayout &TD) {
3211 // In order to correctly satisfy the ABI, we need to the high part to start
3212 // at offset 8. If the high and low parts we inferred are both 4-byte types
3213 // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have
3214 // the second element at offset 8. Check for this:
3215 unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo);
3216 unsigned HiAlign = TD.getABITypeAlignment(Hi);
3217 unsigned HiStart = llvm::alignTo(LoSize, HiAlign);
3218 assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!");
3220 // To handle this, we have to increase the size of the low part so that the
3221 // second element will start at an 8 byte offset. We can't increase the size
3222 // of the second element because it might make us access off the end of the
3225 // There are usually two sorts of types the ABI generation code can produce
3226 // for the low part of a pair that aren't 8 bytes in size: float or
3227 // i8/i16/i32. This can also include pointers when they are 32-bit (X32 and
3229 // Promote these to a larger type.
3230 if (Lo->isFloatTy())
3231 Lo = llvm::Type::getDoubleTy(Lo->getContext());
3233 assert((Lo->isIntegerTy() || Lo->isPointerTy())
3234 && "Invalid/unknown lo type");
3235 Lo = llvm::Type::getInt64Ty(Lo->getContext());
3239 llvm::StructType *Result = llvm::StructType::get(Lo, Hi);
3241 // Verify that the second element is at an 8-byte offset.
3242 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
3243 "Invalid x86-64 argument pair!");
3247 ABIArgInfo X86_64ABIInfo::
3248 classifyReturnType(QualType RetTy) const {
3249 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
3250 // classification algorithm.
3251 X86_64ABIInfo::Class Lo, Hi;
3252 classify(RetTy, 0, Lo, Hi, /*isNamedArg*/ true);
3254 // Check some invariants.
3255 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
3256 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
3258 llvm::Type *ResType = nullptr;
3262 return ABIArgInfo::getIgnore();
3263 // If the low part is just padding, it takes no register, leave ResType
3265 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
3266 "Unknown missing lo part");
3271 llvm_unreachable("Invalid classification for lo word.");
3273 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
3276 return getIndirectReturnResult(RetTy);
3278 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
3279 // available register of the sequence %rax, %rdx is used.
3281 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
3283 // If we have a sign or zero extended integer, make sure to return Extend
3284 // so that the parameter gets the right LLVM IR attributes.
3285 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
3286 // Treat an enum type as its underlying type.
3287 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
3288 RetTy = EnumTy->getDecl()->getIntegerType();
3290 if (RetTy->isIntegralOrEnumerationType() &&
3291 RetTy->isPromotableIntegerType())
3292 return ABIArgInfo::getExtend(RetTy);
3296 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
3297 // available SSE register of the sequence %xmm0, %xmm1 is used.
3299 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
3302 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
3303 // returned on the X87 stack in %st0 as 80-bit x87 number.
3305 ResType = llvm::Type::getX86_FP80Ty(getVMContext());
3308 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
3309 // part of the value is returned in %st0 and the imaginary part in
3312 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
3313 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()),
3314 llvm::Type::getX86_FP80Ty(getVMContext()));
3318 llvm::Type *HighPart = nullptr;
3320 // Memory was handled previously and X87 should
3321 // never occur as a hi class.
3324 llvm_unreachable("Invalid classification for hi word.");
3326 case ComplexX87: // Previously handled.
3331 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
3332 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
3333 return ABIArgInfo::getDirect(HighPart, 8);
3336 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
3337 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
3338 return ABIArgInfo::getDirect(HighPart, 8);
3341 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
3342 // is passed in the next available eightbyte chunk if the last used
3345 // SSEUP should always be preceded by SSE, just widen.
3347 assert(Lo == SSE && "Unexpected SSEUp classification.");
3348 ResType = GetByteVectorType(RetTy);
3351 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
3352 // returned together with the previous X87 value in %st0.
3354 // If X87Up is preceded by X87, we don't need to do
3355 // anything. However, in some cases with unions it may not be
3356 // preceded by X87. In such situations we follow gcc and pass the
3357 // extra bits in an SSE reg.
3359 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
3360 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
3361 return ABIArgInfo::getDirect(HighPart, 8);
3366 // If a high part was specified, merge it together with the low part. It is
3367 // known to pass in the high eightbyte of the result. We do this by forming a
3368 // first class struct aggregate with the high and low part: {low, high}
3370 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
3372 return ABIArgInfo::getDirect(ResType);
3375 ABIArgInfo X86_64ABIInfo::classifyArgumentType(
3376 QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE,
3380 Ty = useFirstFieldIfTransparentUnion(Ty);
3382 X86_64ABIInfo::Class Lo, Hi;
3383 classify(Ty, 0, Lo, Hi, isNamedArg);
3385 // Check some invariants.
3386 // FIXME: Enforce these by construction.
3387 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
3388 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
3392 llvm::Type *ResType = nullptr;
3396 return ABIArgInfo::getIgnore();
3397 // If the low part is just padding, it takes no register, leave ResType
3399 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
3400 "Unknown missing lo part");
3403 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
3407 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
3408 // COMPLEX_X87, it is passed in memory.
3411 if (getRecordArgABI(Ty, getCXXABI()) == CGCXXABI::RAA_Indirect)
3413 return getIndirectResult(Ty, freeIntRegs);
3417 llvm_unreachable("Invalid classification for lo word.");
3419 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
3420 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
3425 // Pick an 8-byte type based on the preferred type.
3426 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0);
3428 // If we have a sign or zero extended integer, make sure to return Extend
3429 // so that the parameter gets the right LLVM IR attributes.
3430 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
3431 // Treat an enum type as its underlying type.
3432 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3433 Ty = EnumTy->getDecl()->getIntegerType();
3435 if (Ty->isIntegralOrEnumerationType() &&
3436 Ty->isPromotableIntegerType())
3437 return ABIArgInfo::getExtend(Ty);
3442 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
3443 // available SSE register is used, the registers are taken in the
3444 // order from %xmm0 to %xmm7.
3446 llvm::Type *IRType = CGT.ConvertType(Ty);
3447 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
3453 llvm::Type *HighPart = nullptr;
3455 // Memory was handled previously, ComplexX87 and X87 should
3456 // never occur as hi classes, and X87Up must be preceded by X87,
3457 // which is passed in memory.
3461 llvm_unreachable("Invalid classification for hi word.");
3463 case NoClass: break;
3467 // Pick an 8-byte type based on the preferred type.
3468 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
3470 if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
3471 return ABIArgInfo::getDirect(HighPart, 8);
3474 // X87Up generally doesn't occur here (long double is passed in
3475 // memory), except in situations involving unions.
3478 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
3480 if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
3481 return ABIArgInfo::getDirect(HighPart, 8);
3486 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
3487 // eightbyte is passed in the upper half of the last used SSE
3488 // register. This only happens when 128-bit vectors are passed.
3490 assert(Lo == SSE && "Unexpected SSEUp classification");
3491 ResType = GetByteVectorType(Ty);
3495 // If a high part was specified, merge it together with the low part. It is
3496 // known to pass in the high eightbyte of the result. We do this by forming a
3497 // first class struct aggregate with the high and low part: {low, high}
3499 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
3501 return ABIArgInfo::getDirect(ResType);
3505 X86_64ABIInfo::classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt,
3506 unsigned &NeededSSE) const {
3507 auto RT = Ty->getAs<RecordType>();
3508 assert(RT && "classifyRegCallStructType only valid with struct types");
3510 if (RT->getDecl()->hasFlexibleArrayMember())
3511 return getIndirectReturnResult(Ty);
3514 if (auto CXXRD = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
3515 if (CXXRD->isDynamicClass()) {
3516 NeededInt = NeededSSE = 0;
3517 return getIndirectReturnResult(Ty);
3520 for (const auto &I : CXXRD->bases())
3521 if (classifyRegCallStructTypeImpl(I.getType(), NeededInt, NeededSSE)
3523 NeededInt = NeededSSE = 0;
3524 return getIndirectReturnResult(Ty);
3529 for (const auto *FD : RT->getDecl()->fields()) {
3530 if (FD->getType()->isRecordType() && !FD->getType()->isUnionType()) {
3531 if (classifyRegCallStructTypeImpl(FD->getType(), NeededInt, NeededSSE)
3533 NeededInt = NeededSSE = 0;
3534 return getIndirectReturnResult(Ty);
3537 unsigned LocalNeededInt, LocalNeededSSE;
3538 if (classifyArgumentType(FD->getType(), UINT_MAX, LocalNeededInt,
3539 LocalNeededSSE, true)
3541 NeededInt = NeededSSE = 0;
3542 return getIndirectReturnResult(Ty);
3544 NeededInt += LocalNeededInt;
3545 NeededSSE += LocalNeededSSE;
3549 return ABIArgInfo::getDirect();
3552 ABIArgInfo X86_64ABIInfo::classifyRegCallStructType(QualType Ty,
3553 unsigned &NeededInt,
3554 unsigned &NeededSSE) const {
3559 return classifyRegCallStructTypeImpl(Ty, NeededInt, NeededSSE);
3562 void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
3564 const unsigned CallingConv = FI.getCallingConvention();
3565 // It is possible to force Win64 calling convention on any x86_64 target by
3566 // using __attribute__((ms_abi)). In such case to correctly emit Win64
3567 // compatible code delegate this call to WinX86_64ABIInfo::computeInfo.
3568 if (CallingConv == llvm::CallingConv::Win64) {
3569 WinX86_64ABIInfo Win64ABIInfo(CGT, AVXLevel);
3570 Win64ABIInfo.computeInfo(FI);
3574 bool IsRegCall = CallingConv == llvm::CallingConv::X86_RegCall;
3576 // Keep track of the number of assigned registers.
3577 unsigned FreeIntRegs = IsRegCall ? 11 : 6;
3578 unsigned FreeSSERegs = IsRegCall ? 16 : 8;
3579 unsigned NeededInt, NeededSSE;
3581 if (!::classifyReturnType(getCXXABI(), FI, *this)) {
3582 if (IsRegCall && FI.getReturnType()->getTypePtr()->isRecordType() &&
3583 !FI.getReturnType()->getTypePtr()->isUnionType()) {
3584 FI.getReturnInfo() =
3585 classifyRegCallStructType(FI.getReturnType(), NeededInt, NeededSSE);
3586 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
3587 FreeIntRegs -= NeededInt;
3588 FreeSSERegs -= NeededSSE;
3590 FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType());
3592 } else if (IsRegCall && FI.getReturnType()->getAs<ComplexType>()) {
3593 // Complex Long Double Type is passed in Memory when Regcall
3594 // calling convention is used.
3595 const ComplexType *CT = FI.getReturnType()->getAs<ComplexType>();
3596 if (getContext().getCanonicalType(CT->getElementType()) ==
3597 getContext().LongDoubleTy)
3598 FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType());
3600 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
3603 // If the return value is indirect, then the hidden argument is consuming one
3604 // integer register.
3605 if (FI.getReturnInfo().isIndirect())
3608 // The chain argument effectively gives us another free register.
3609 if (FI.isChainCall())
3612 unsigned NumRequiredArgs = FI.getNumRequiredArgs();
3613 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
3614 // get assigned (in left-to-right order) for passing as follows...
3616 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
3617 it != ie; ++it, ++ArgNo) {
3618 bool IsNamedArg = ArgNo < NumRequiredArgs;
3620 if (IsRegCall && it->type->isStructureOrClassType())
3621 it->info = classifyRegCallStructType(it->type, NeededInt, NeededSSE);
3623 it->info = classifyArgumentType(it->type, FreeIntRegs, NeededInt,
3624 NeededSSE, IsNamedArg);
3626 // AMD64-ABI 3.2.3p3: If there are no registers available for any
3627 // eightbyte of an argument, the whole argument is passed on the
3628 // stack. If registers have already been assigned for some
3629 // eightbytes of such an argument, the assignments get reverted.
3630 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
3631 FreeIntRegs -= NeededInt;
3632 FreeSSERegs -= NeededSSE;
3634 it->info = getIndirectResult(it->type, FreeIntRegs);
3639 static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF,
3640 Address VAListAddr, QualType Ty) {
3641 Address overflow_arg_area_p =
3642 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p");
3643 llvm::Value *overflow_arg_area =
3644 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
3646 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
3647 // byte boundary if alignment needed by type exceeds 8 byte boundary.
3648 // It isn't stated explicitly in the standard, but in practice we use
3649 // alignment greater than 16 where necessary.
3650 CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty);
3651 if (Align > CharUnits::fromQuantity(8)) {
3652 overflow_arg_area = emitRoundPointerUpToAlignment(CGF, overflow_arg_area,
3656 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
3657 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
3659 CGF.Builder.CreateBitCast(overflow_arg_area,
3660 llvm::PointerType::getUnqual(LTy));
3662 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
3663 // l->overflow_arg_area + sizeof(type).
3664 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to
3665 // an 8 byte boundary.
3667 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
3668 llvm::Value *Offset =
3669 llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7);
3670 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset,
3671 "overflow_arg_area.next");
3672 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
3674 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
3675 return Address(Res, Align);
3678 Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
3679 QualType Ty) const {
3680 // Assume that va_list type is correct; should be pointer to LLVM type:
3684 // i8* overflow_arg_area;
3685 // i8* reg_save_area;
3687 unsigned neededInt, neededSSE;
3689 Ty = getContext().getCanonicalType(Ty);
3690 ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE,
3691 /*isNamedArg*/false);
3693 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
3694 // in the registers. If not go to step 7.
3695 if (!neededInt && !neededSSE)
3696 return EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty);
3698 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
3699 // general purpose registers needed to pass type and num_fp to hold
3700 // the number of floating point registers needed.
3702 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into
3703 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or
3704 // l->fp_offset > 304 - num_fp * 16 go to step 7.
3706 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of
3707 // register save space).
3709 llvm::Value *InRegs = nullptr;
3710 Address gp_offset_p = Address::invalid(), fp_offset_p = Address::invalid();
3711 llvm::Value *gp_offset = nullptr, *fp_offset = nullptr;
3713 gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p");
3714 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
3715 InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8);
3716 InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp");
3720 fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p");
3721 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
3722 llvm::Value *FitsInFP =
3723 llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16);
3724 FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp");
3725 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
3728 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
3729 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
3730 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
3731 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
3733 // Emit code to load the value if it was passed in registers.
3735 CGF.EmitBlock(InRegBlock);
3737 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with
3738 // an offset of l->gp_offset and/or l->fp_offset. This may require
3739 // copying to a temporary location in case the parameter is passed
3740 // in different register classes or requires an alignment greater
3741 // than 8 for general purpose registers and 16 for XMM registers.
3743 // FIXME: This really results in shameful code when we end up needing to
3744 // collect arguments from different places; often what should result in a
3745 // simple assembling of a structure from scattered addresses has many more
3746 // loads than necessary. Can we clean this up?
3747 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
3748 llvm::Value *RegSaveArea = CGF.Builder.CreateLoad(
3749 CGF.Builder.CreateStructGEP(VAListAddr, 3), "reg_save_area");
3751 Address RegAddr = Address::invalid();
3752 if (neededInt && neededSSE) {
3754 assert(AI.isDirect() && "Unexpected ABI info for mixed regs");
3755 llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
3756 Address Tmp = CGF.CreateMemTemp(Ty);
3757 Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST);
3758 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
3759 llvm::Type *TyLo = ST->getElementType(0);
3760 llvm::Type *TyHi = ST->getElementType(1);
3761 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
3762 "Unexpected ABI info for mixed regs");
3763 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
3764 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
3765 llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegSaveArea, gp_offset);
3766 llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegSaveArea, fp_offset);
3767 llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr;
3768 llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr;
3770 // Copy the first element.
3771 // FIXME: Our choice of alignment here and below is probably pessimistic.
3772 llvm::Value *V = CGF.Builder.CreateAlignedLoad(
3773 TyLo, CGF.Builder.CreateBitCast(RegLoAddr, PTyLo),
3774 CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(TyLo)));
3775 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
3777 // Copy the second element.
3778 V = CGF.Builder.CreateAlignedLoad(
3779 TyHi, CGF.Builder.CreateBitCast(RegHiAddr, PTyHi),
3780 CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(TyHi)));
3781 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
3783 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy);
3784 } else if (neededInt) {
3785 RegAddr = Address(CGF.Builder.CreateGEP(RegSaveArea, gp_offset),
3786 CharUnits::fromQuantity(8));
3787 RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy);
3789 // Copy to a temporary if necessary to ensure the appropriate alignment.
3790 std::pair<CharUnits, CharUnits> SizeAlign =
3791 getContext().getTypeInfoInChars(Ty);
3792 uint64_t TySize = SizeAlign.first.getQuantity();
3793 CharUnits TyAlign = SizeAlign.second;
3795 // Copy into a temporary if the type is more aligned than the
3796 // register save area.
3797 if (TyAlign.getQuantity() > 8) {
3798 Address Tmp = CGF.CreateMemTemp(Ty);
3799 CGF.Builder.CreateMemCpy(Tmp, RegAddr, TySize, false);
3803 } else if (neededSSE == 1) {
3804 RegAddr = Address(CGF.Builder.CreateGEP(RegSaveArea, fp_offset),
3805 CharUnits::fromQuantity(16));
3806 RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy);
3808 assert(neededSSE == 2 && "Invalid number of needed registers!");
3809 // SSE registers are spaced 16 bytes apart in the register save
3810 // area, we need to collect the two eightbytes together.
3811 // The ABI isn't explicit about this, but it seems reasonable
3812 // to assume that the slots are 16-byte aligned, since the stack is
3813 // naturally 16-byte aligned and the prologue is expected to store
3814 // all the SSE registers to the RSA.
3815 Address RegAddrLo = Address(CGF.Builder.CreateGEP(RegSaveArea, fp_offset),
3816 CharUnits::fromQuantity(16));
3818 CGF.Builder.CreateConstInBoundsByteGEP(RegAddrLo,
3819 CharUnits::fromQuantity(16));
3820 llvm::Type *ST = AI.canHaveCoerceToType()
3821 ? AI.getCoerceToType()
3822 : llvm::StructType::get(CGF.DoubleTy, CGF.DoubleTy);
3824 Address Tmp = CGF.CreateMemTemp(Ty);
3825 Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST);
3826 V = CGF.Builder.CreateLoad(CGF.Builder.CreateElementBitCast(
3827 RegAddrLo, ST->getStructElementType(0)));
3828 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
3829 V = CGF.Builder.CreateLoad(CGF.Builder.CreateElementBitCast(
3830 RegAddrHi, ST->getStructElementType(1)));
3831 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
3833 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy);
3836 // AMD64-ABI 3.5.7p5: Step 5. Set:
3837 // l->gp_offset = l->gp_offset + num_gp * 8
3838 // l->fp_offset = l->fp_offset + num_fp * 16.
3840 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8);
3841 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
3845 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16);
3846 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
3849 CGF.EmitBranch(ContBlock);
3851 // Emit code to load the value if it was passed in memory.
3853 CGF.EmitBlock(InMemBlock);
3854 Address MemAddr = EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty);
3856 // Return the appropriate result.
3858 CGF.EmitBlock(ContBlock);
3859 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, MemAddr, InMemBlock,
3864 Address X86_64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
3865 QualType Ty) const {
3866 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
3867 CGF.getContext().getTypeInfoInChars(Ty),
3868 CharUnits::fromQuantity(8),
3869 /*allowHigherAlign*/ false);
3873 WinX86_64ABIInfo::reclassifyHvaArgType(QualType Ty, unsigned &FreeSSERegs,
3874 const ABIArgInfo ¤t) const {
3875 // Assumes vectorCall calling convention.
3876 const Type *Base = nullptr;
3877 uint64_t NumElts = 0;
3879 if (!Ty->isBuiltinType() && !Ty->isVectorType() &&
3880 isHomogeneousAggregate(Ty, Base, NumElts) && FreeSSERegs >= NumElts) {
3881 FreeSSERegs -= NumElts;
3882 return getDirectX86Hva();
3887 ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs,
3888 bool IsReturnType, bool IsVectorCall,
3889 bool IsRegCall) const {
3891 if (Ty->isVoidType())
3892 return ABIArgInfo::getIgnore();
3894 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3895 Ty = EnumTy->getDecl()->getIntegerType();
3897 TypeInfo Info = getContext().getTypeInfo(Ty);
3898 uint64_t Width = Info.Width;
3899 CharUnits Align = getContext().toCharUnitsFromBits(Info.Align);
3901 const RecordType *RT = Ty->getAs<RecordType>();
3903 if (!IsReturnType) {
3904 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()))
3905 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
3908 if (RT->getDecl()->hasFlexibleArrayMember())
3909 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
3913 const Type *Base = nullptr;
3914 uint64_t NumElts = 0;
3915 // vectorcall adds the concept of a homogenous vector aggregate, similar to
3917 if ((IsVectorCall || IsRegCall) &&
3918 isHomogeneousAggregate(Ty, Base, NumElts)) {
3920 if (FreeSSERegs >= NumElts) {
3921 FreeSSERegs -= NumElts;
3922 if (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType())
3923 return ABIArgInfo::getDirect();
3924 return ABIArgInfo::getExpand();
3926 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
3927 } else if (IsVectorCall) {
3928 if (FreeSSERegs >= NumElts &&
3929 (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType())) {
3930 FreeSSERegs -= NumElts;
3931 return ABIArgInfo::getDirect();
3932 } else if (IsReturnType) {
3933 return ABIArgInfo::getExpand();
3934 } else if (!Ty->isBuiltinType() && !Ty->isVectorType()) {
3935 // HVAs are delayed and reclassified in the 2nd step.
3936 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
3941 if (Ty->isMemberPointerType()) {
3942 // If the member pointer is represented by an LLVM int or ptr, pass it
3944 llvm::Type *LLTy = CGT.ConvertType(Ty);
3945 if (LLTy->isPointerTy() || LLTy->isIntegerTy())
3946 return ABIArgInfo::getDirect();
3949 if (RT || Ty->isAnyComplexType() || Ty->isMemberPointerType()) {
3950 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
3951 // not 1, 2, 4, or 8 bytes, must be passed by reference."
3952 if (Width > 64 || !llvm::isPowerOf2_64(Width))
3953 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
3955 // Otherwise, coerce it to a small integer.
3956 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Width));
3959 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
3960 switch (BT->getKind()) {
3961 case BuiltinType::Bool:
3962 // Bool type is always extended to the ABI, other builtin types are not
3964 return ABIArgInfo::getExtend(Ty);
3966 case BuiltinType::LongDouble:
3967 // Mingw64 GCC uses the old 80 bit extended precision floating point
3968 // unit. It passes them indirectly through memory.
3970 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
3971 if (LDF == &llvm::APFloat::x87DoubleExtended())
3972 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
3976 case BuiltinType::Int128:
3977 case BuiltinType::UInt128:
3978 // If it's a parameter type, the normal ABI rule is that arguments larger
3979 // than 8 bytes are passed indirectly. GCC follows it. We follow it too,
3980 // even though it isn't particularly efficient.
3982 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
3984 // Mingw64 GCC returns i128 in XMM0. Coerce to v2i64 to handle that.
3985 // Clang matches them for compatibility.
3986 return ABIArgInfo::getDirect(
3987 llvm::VectorType::get(llvm::Type::getInt64Ty(getVMContext()), 2));
3994 return ABIArgInfo::getDirect();
3997 void WinX86_64ABIInfo::computeVectorCallArgs(CGFunctionInfo &FI,
3998 unsigned FreeSSERegs,
4000 bool IsRegCall) const {
4002 for (auto &I : FI.arguments()) {
4003 // Vectorcall in x64 only permits the first 6 arguments to be passed
4004 // as XMM/YMM registers.
4005 if (Count < VectorcallMaxParamNumAsReg)
4006 I.info = classify(I.type, FreeSSERegs, false, IsVectorCall, IsRegCall);
4008 // Since these cannot be passed in registers, pretend no registers
4010 unsigned ZeroSSERegsAvail = 0;
4011 I.info = classify(I.type, /*FreeSSERegs=*/ZeroSSERegsAvail, false,
4012 IsVectorCall, IsRegCall);
4017 for (auto &I : FI.arguments()) {
4018 I.info = reclassifyHvaArgType(I.type, FreeSSERegs, I.info);
4022 void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
4023 const unsigned CC = FI.getCallingConvention();
4024 bool IsVectorCall = CC == llvm::CallingConv::X86_VectorCall;
4025 bool IsRegCall = CC == llvm::CallingConv::X86_RegCall;
4027 // If __attribute__((sysv_abi)) is in use, use the SysV argument
4028 // classification rules.
4029 if (CC == llvm::CallingConv::X86_64_SysV) {
4030 X86_64ABIInfo SysVABIInfo(CGT, AVXLevel);
4031 SysVABIInfo.computeInfo(FI);
4035 unsigned FreeSSERegs = 0;
4037 // We can use up to 4 SSE return registers with vectorcall.
4039 } else if (IsRegCall) {
4040 // RegCall gives us 16 SSE registers.
4044 if (!getCXXABI().classifyReturnType(FI))
4045 FI.getReturnInfo() = classify(FI.getReturnType(), FreeSSERegs, true,
4046 IsVectorCall, IsRegCall);
4049 // We can use up to 6 SSE register parameters with vectorcall.
4051 } else if (IsRegCall) {
4052 // RegCall gives us 16 SSE registers, we can reuse the return registers.
4057 computeVectorCallArgs(FI, FreeSSERegs, IsVectorCall, IsRegCall);
4059 for (auto &I : FI.arguments())
4060 I.info = classify(I.type, FreeSSERegs, false, IsVectorCall, IsRegCall);
4065 Address WinX86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4066 QualType Ty) const {
4068 bool IsIndirect = false;
4070 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
4071 // not 1, 2, 4, or 8 bytes, must be passed by reference."
4072 if (isAggregateTypeForABI(Ty) || Ty->isMemberPointerType()) {
4073 uint64_t Width = getContext().getTypeSize(Ty);
4074 IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
4077 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
4078 CGF.getContext().getTypeInfoInChars(Ty),
4079 CharUnits::fromQuantity(8),
4080 /*allowHigherAlign*/ false);
4085 /// PPC32_SVR4_ABIInfo - The 32-bit PowerPC ELF (SVR4) ABI information.
4086 class PPC32_SVR4_ABIInfo : public DefaultABIInfo {
4087 bool IsSoftFloatABI;
4089 CharUnits getParamTypeAlignment(QualType Ty) const;
4092 PPC32_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, bool SoftFloatABI)
4093 : DefaultABIInfo(CGT), IsSoftFloatABI(SoftFloatABI) {}
4095 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4096 QualType Ty) const override;
4099 class PPC32TargetCodeGenInfo : public TargetCodeGenInfo {
4101 PPC32TargetCodeGenInfo(CodeGenTypes &CGT, bool SoftFloatABI)
4102 : TargetCodeGenInfo(new PPC32_SVR4_ABIInfo(CGT, SoftFloatABI)) {}
4104 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
4105 // This is recovered from gcc output.
4106 return 1; // r1 is the dedicated stack pointer
4109 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4110 llvm::Value *Address) const override;
4114 CharUnits PPC32_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const {
4115 // Complex types are passed just like their elements
4116 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
4117 Ty = CTy->getElementType();
4119 if (Ty->isVectorType())
4120 return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16
4123 // For single-element float/vector structs, we consider the whole type
4124 // to have the same alignment requirements as its single element.
4125 const Type *AlignTy = nullptr;
4126 if (const Type *EltType = isSingleElementStruct(Ty, getContext())) {
4127 const BuiltinType *BT = EltType->getAs<BuiltinType>();
4128 if ((EltType->isVectorType() && getContext().getTypeSize(EltType) == 128) ||
4129 (BT && BT->isFloatingPoint()))
4134 return CharUnits::fromQuantity(AlignTy->isVectorType() ? 16 : 4);
4135 return CharUnits::fromQuantity(4);
4138 // TODO: this implementation is now likely redundant with
4139 // DefaultABIInfo::EmitVAArg.
4140 Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList,
4141 QualType Ty) const {
4142 if (getTarget().getTriple().isOSDarwin()) {
4143 auto TI = getContext().getTypeInfoInChars(Ty);
4144 TI.second = getParamTypeAlignment(Ty);
4146 CharUnits SlotSize = CharUnits::fromQuantity(4);
4147 return emitVoidPtrVAArg(CGF, VAList, Ty,
4148 classifyArgumentType(Ty).isIndirect(), TI, SlotSize,
4149 /*AllowHigherAlign=*/true);
4152 const unsigned OverflowLimit = 8;
4153 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
4154 // TODO: Implement this. For now ignore.
4156 return Address::invalid(); // FIXME?
4159 // struct __va_list_tag {
4160 // unsigned char gpr;
4161 // unsigned char fpr;
4162 // unsigned short reserved;
4163 // void *overflow_arg_area;
4164 // void *reg_save_area;
4167 bool isI64 = Ty->isIntegerType() && getContext().getTypeSize(Ty) == 64;
4169 Ty->isIntegerType() || Ty->isPointerType() || Ty->isAggregateType();
4170 bool isF64 = Ty->isFloatingType() && getContext().getTypeSize(Ty) == 64;
4172 // All aggregates are passed indirectly? That doesn't seem consistent
4173 // with the argument-lowering code.
4174 bool isIndirect = Ty->isAggregateType();
4176 CGBuilderTy &Builder = CGF.Builder;
4178 // The calling convention either uses 1-2 GPRs or 1 FPR.
4179 Address NumRegsAddr = Address::invalid();
4180 if (isInt || IsSoftFloatABI) {
4181 NumRegsAddr = Builder.CreateStructGEP(VAList, 0, "gpr");
4183 NumRegsAddr = Builder.CreateStructGEP(VAList, 1, "fpr");
4186 llvm::Value *NumRegs = Builder.CreateLoad(NumRegsAddr, "numUsedRegs");
4188 // "Align" the register count when TY is i64.
4189 if (isI64 || (isF64 && IsSoftFloatABI)) {
4190 NumRegs = Builder.CreateAdd(NumRegs, Builder.getInt8(1));
4191 NumRegs = Builder.CreateAnd(NumRegs, Builder.getInt8((uint8_t) ~1U));
4195 Builder.CreateICmpULT(NumRegs, Builder.getInt8(OverflowLimit), "cond");
4197 llvm::BasicBlock *UsingRegs = CGF.createBasicBlock("using_regs");
4198 llvm::BasicBlock *UsingOverflow = CGF.createBasicBlock("using_overflow");
4199 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
4201 Builder.CreateCondBr(CC, UsingRegs, UsingOverflow);
4203 llvm::Type *DirectTy = CGF.ConvertType(Ty);
4204 if (isIndirect) DirectTy = DirectTy->getPointerTo(0);
4206 // Case 1: consume registers.
4207 Address RegAddr = Address::invalid();
4209 CGF.EmitBlock(UsingRegs);
4211 Address RegSaveAreaPtr = Builder.CreateStructGEP(VAList, 4);
4212 RegAddr = Address(Builder.CreateLoad(RegSaveAreaPtr),
4213 CharUnits::fromQuantity(8));
4214 assert(RegAddr.getElementType() == CGF.Int8Ty);
4216 // Floating-point registers start after the general-purpose registers.
4217 if (!(isInt || IsSoftFloatABI)) {
4218 RegAddr = Builder.CreateConstInBoundsByteGEP(RegAddr,
4219 CharUnits::fromQuantity(32));
4222 // Get the address of the saved value by scaling the number of
4223 // registers we've used by the number of
4224 CharUnits RegSize = CharUnits::fromQuantity((isInt || IsSoftFloatABI) ? 4 : 8);
4225 llvm::Value *RegOffset =
4226 Builder.CreateMul(NumRegs, Builder.getInt8(RegSize.getQuantity()));
4227 RegAddr = Address(Builder.CreateInBoundsGEP(CGF.Int8Ty,
4228 RegAddr.getPointer(), RegOffset),
4229 RegAddr.getAlignment().alignmentOfArrayElement(RegSize));
4230 RegAddr = Builder.CreateElementBitCast(RegAddr, DirectTy);
4232 // Increase the used-register count.
4234 Builder.CreateAdd(NumRegs,
4235 Builder.getInt8((isI64 || (isF64 && IsSoftFloatABI)) ? 2 : 1));
4236 Builder.CreateStore(NumRegs, NumRegsAddr);
4238 CGF.EmitBranch(Cont);
4241 // Case 2: consume space in the overflow area.
4242 Address MemAddr = Address::invalid();
4244 CGF.EmitBlock(UsingOverflow);
4246 Builder.CreateStore(Builder.getInt8(OverflowLimit), NumRegsAddr);
4248 // Everything in the overflow area is rounded up to a size of at least 4.
4249 CharUnits OverflowAreaAlign = CharUnits::fromQuantity(4);
4253 auto TypeInfo = CGF.getContext().getTypeInfoInChars(Ty);
4254 Size = TypeInfo.first.alignTo(OverflowAreaAlign);
4256 Size = CGF.getPointerSize();
4259 Address OverflowAreaAddr = Builder.CreateStructGEP(VAList, 3);
4260 Address OverflowArea(Builder.CreateLoad(OverflowAreaAddr, "argp.cur"),
4262 // Round up address of argument to alignment
4263 CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty);
4264 if (Align > OverflowAreaAlign) {
4265 llvm::Value *Ptr = OverflowArea.getPointer();
4266 OverflowArea = Address(emitRoundPointerUpToAlignment(CGF, Ptr, Align),
4270 MemAddr = Builder.CreateElementBitCast(OverflowArea, DirectTy);
4272 // Increase the overflow area.
4273 OverflowArea = Builder.CreateConstInBoundsByteGEP(OverflowArea, Size);
4274 Builder.CreateStore(OverflowArea.getPointer(), OverflowAreaAddr);
4275 CGF.EmitBranch(Cont);
4278 CGF.EmitBlock(Cont);
4280 // Merge the cases with a phi.
4281 Address Result = emitMergePHI(CGF, RegAddr, UsingRegs, MemAddr, UsingOverflow,
4284 // Load the pointer if the argument was passed indirectly.
4286 Result = Address(Builder.CreateLoad(Result, "aggr"),
4287 getContext().getTypeAlignInChars(Ty));
4294 PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4295 llvm::Value *Address) const {
4296 // This is calculated from the LLVM and GCC tables and verified
4297 // against gcc output. AFAIK all ABIs use the same encoding.
4299 CodeGen::CGBuilderTy &Builder = CGF.Builder;
4301 llvm::IntegerType *i8 = CGF.Int8Ty;
4302 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
4303 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
4304 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
4306 // 0-31: r0-31, the 4-byte general-purpose registers
4307 AssignToArrayRange(Builder, Address, Four8, 0, 31);
4309 // 32-63: fp0-31, the 8-byte floating-point registers
4310 AssignToArrayRange(Builder, Address, Eight8, 32, 63);
4312 // 64-76 are various 4-byte special-purpose registers:
4319 AssignToArrayRange(Builder, Address, Four8, 64, 76);
4321 // 77-108: v0-31, the 16-byte vector registers
4322 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
4329 AssignToArrayRange(Builder, Address, Four8, 109, 113);
4337 /// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information.
4338 class PPC64_SVR4_ABIInfo : public SwiftABIInfo {
4346 static const unsigned GPRBits = 64;
4349 bool IsSoftFloatABI;
4351 // A vector of float or double will be promoted to <4 x f32> or <4 x f64> and
4352 // will be passed in a QPX register.
4353 bool IsQPXVectorTy(const Type *Ty) const {
4357 if (const VectorType *VT = Ty->getAs<VectorType>()) {
4358 unsigned NumElements = VT->getNumElements();
4359 if (NumElements == 1)
4362 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) {
4363 if (getContext().getTypeSize(Ty) <= 256)
4365 } else if (VT->getElementType()->
4366 isSpecificBuiltinType(BuiltinType::Float)) {
4367 if (getContext().getTypeSize(Ty) <= 128)
4375 bool IsQPXVectorTy(QualType Ty) const {
4376 return IsQPXVectorTy(Ty.getTypePtr());
4380 PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind, bool HasQPX,
4382 : SwiftABIInfo(CGT), Kind(Kind), HasQPX(HasQPX),
4383 IsSoftFloatABI(SoftFloatABI) {}
4385 bool isPromotableTypeForABI(QualType Ty) const;
4386 CharUnits getParamTypeAlignment(QualType Ty) const;
4388 ABIArgInfo classifyReturnType(QualType RetTy) const;
4389 ABIArgInfo classifyArgumentType(QualType Ty) const;
4391 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
4392 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
4393 uint64_t Members) const override;
4395 // TODO: We can add more logic to computeInfo to improve performance.
4396 // Example: For aggregate arguments that fit in a register, we could
4397 // use getDirectInReg (as is done below for structs containing a single
4398 // floating-point value) to avoid pushing them to memory on function
4399 // entry. This would require changing the logic in PPCISelLowering
4400 // when lowering the parameters in the caller and args in the callee.
4401 void computeInfo(CGFunctionInfo &FI) const override {
4402 if (!getCXXABI().classifyReturnType(FI))
4403 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
4404 for (auto &I : FI.arguments()) {
4405 // We rely on the default argument classification for the most part.
4406 // One exception: An aggregate containing a single floating-point
4407 // or vector item must be passed in a register if one is available.
4408 const Type *T = isSingleElementStruct(I.type, getContext());
4410 const BuiltinType *BT = T->getAs<BuiltinType>();
4411 if (IsQPXVectorTy(T) ||
4412 (T->isVectorType() && getContext().getTypeSize(T) == 128) ||
4413 (BT && BT->isFloatingPoint())) {
4415 I.info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT));
4419 I.info = classifyArgumentType(I.type);
4423 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4424 QualType Ty) const override;
4426 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
4427 bool asReturnValue) const override {
4428 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
4431 bool isSwiftErrorInRegister() const override {
4436 class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo {
4439 PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT,
4440 PPC64_SVR4_ABIInfo::ABIKind Kind, bool HasQPX,
4442 : TargetCodeGenInfo(new PPC64_SVR4_ABIInfo(CGT, Kind, HasQPX,
4445 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
4446 // This is recovered from gcc output.
4447 return 1; // r1 is the dedicated stack pointer
4450 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4451 llvm::Value *Address) const override;
4454 class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo {
4456 PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
4458 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
4459 // This is recovered from gcc output.
4460 return 1; // r1 is the dedicated stack pointer
4463 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4464 llvm::Value *Address) const override;
4469 // Return true if the ABI requires Ty to be passed sign- or zero-
4470 // extended to 64 bits.
4472 PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const {
4473 // Treat an enum type as its underlying type.
4474 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
4475 Ty = EnumTy->getDecl()->getIntegerType();
4477 // Promotable integer types are required to be promoted by the ABI.
4478 if (Ty->isPromotableIntegerType())
4481 // In addition to the usual promotable integer types, we also need to
4482 // extend all 32-bit types, since the ABI requires promotion to 64 bits.
4483 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
4484 switch (BT->getKind()) {
4485 case BuiltinType::Int:
4486 case BuiltinType::UInt:
4495 /// isAlignedParamType - Determine whether a type requires 16-byte or
4496 /// higher alignment in the parameter area. Always returns at least 8.
4497 CharUnits PPC64_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const {
4498 // Complex types are passed just like their elements.
4499 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
4500 Ty = CTy->getElementType();
4502 // Only vector types of size 16 bytes need alignment (larger types are
4503 // passed via reference, smaller types are not aligned).
4504 if (IsQPXVectorTy(Ty)) {
4505 if (getContext().getTypeSize(Ty) > 128)
4506 return CharUnits::fromQuantity(32);
4508 return CharUnits::fromQuantity(16);
4509 } else if (Ty->isVectorType()) {
4510 return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16 : 8);
4513 // For single-element float/vector structs, we consider the whole type
4514 // to have the same alignment requirements as its single element.
4515 const Type *AlignAsType = nullptr;
4516 const Type *EltType = isSingleElementStruct(Ty, getContext());
4518 const BuiltinType *BT = EltType->getAs<BuiltinType>();
4519 if (IsQPXVectorTy(EltType) || (EltType->isVectorType() &&
4520 getContext().getTypeSize(EltType) == 128) ||
4521 (BT && BT->isFloatingPoint()))
4522 AlignAsType = EltType;
4525 // Likewise for ELFv2 homogeneous aggregates.
4526 const Type *Base = nullptr;
4527 uint64_t Members = 0;
4528 if (!AlignAsType && Kind == ELFv2 &&
4529 isAggregateTypeForABI(Ty) && isHomogeneousAggregate(Ty, Base, Members))
4532 // With special case aggregates, only vector base types need alignment.
4533 if (AlignAsType && IsQPXVectorTy(AlignAsType)) {
4534 if (getContext().getTypeSize(AlignAsType) > 128)
4535 return CharUnits::fromQuantity(32);
4537 return CharUnits::fromQuantity(16);
4538 } else if (AlignAsType) {
4539 return CharUnits::fromQuantity(AlignAsType->isVectorType() ? 16 : 8);
4542 // Otherwise, we only need alignment for any aggregate type that
4543 // has an alignment requirement of >= 16 bytes.
4544 if (isAggregateTypeForABI(Ty) && getContext().getTypeAlign(Ty) >= 128) {
4545 if (HasQPX && getContext().getTypeAlign(Ty) >= 256)
4546 return CharUnits::fromQuantity(32);
4547 return CharUnits::fromQuantity(16);
4550 return CharUnits::fromQuantity(8);
4553 /// isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous
4554 /// aggregate. Base is set to the base element type, and Members is set
4555 /// to the number of base elements.
4556 bool ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base,
4557 uint64_t &Members) const {
4558 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
4559 uint64_t NElements = AT->getSize().getZExtValue();
4562 if (!isHomogeneousAggregate(AT->getElementType(), Base, Members))
4564 Members *= NElements;
4565 } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
4566 const RecordDecl *RD = RT->getDecl();
4567 if (RD->hasFlexibleArrayMember())
4572 // If this is a C++ record, check the bases first.
4573 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
4574 for (const auto &I : CXXRD->bases()) {
4575 // Ignore empty records.
4576 if (isEmptyRecord(getContext(), I.getType(), true))
4579 uint64_t FldMembers;
4580 if (!isHomogeneousAggregate(I.getType(), Base, FldMembers))
4583 Members += FldMembers;
4587 for (const auto *FD : RD->fields()) {
4588 // Ignore (non-zero arrays of) empty records.
4589 QualType FT = FD->getType();
4590 while (const ConstantArrayType *AT =
4591 getContext().getAsConstantArrayType(FT)) {
4592 if (AT->getSize().getZExtValue() == 0)
4594 FT = AT->getElementType();
4596 if (isEmptyRecord(getContext(), FT, true))
4599 // For compatibility with GCC, ignore empty bitfields in C++ mode.
4600 if (getContext().getLangOpts().CPlusPlus &&
4601 FD->isZeroLengthBitField(getContext()))
4604 uint64_t FldMembers;
4605 if (!isHomogeneousAggregate(FD->getType(), Base, FldMembers))
4608 Members = (RD->isUnion() ?
4609 std::max(Members, FldMembers) : Members + FldMembers);
4615 // Ensure there is no padding.
4616 if (getContext().getTypeSize(Base) * Members !=
4617 getContext().getTypeSize(Ty))
4621 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
4623 Ty = CT->getElementType();
4626 // Most ABIs only support float, double, and some vector type widths.
4627 if (!isHomogeneousAggregateBaseType(Ty))
4630 // The base type must be the same for all members. Types that
4631 // agree in both total size and mode (float vs. vector) are
4632 // treated as being equivalent here.
4633 const Type *TyPtr = Ty.getTypePtr();
4636 // If it's a non-power-of-2 vector, its size is already a power-of-2,
4637 // so make sure to widen it explicitly.
4638 if (const VectorType *VT = Base->getAs<VectorType>()) {
4639 QualType EltTy = VT->getElementType();
4640 unsigned NumElements =
4641 getContext().getTypeSize(VT) / getContext().getTypeSize(EltTy);
4643 .getVectorType(EltTy, NumElements, VT->getVectorKind())
4648 if (Base->isVectorType() != TyPtr->isVectorType() ||
4649 getContext().getTypeSize(Base) != getContext().getTypeSize(TyPtr))
4652 return Members > 0 && isHomogeneousAggregateSmallEnough(Base, Members);
4655 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
4656 // Homogeneous aggregates for ELFv2 must have base types of float,
4657 // double, long double, or 128-bit vectors.
4658 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
4659 if (BT->getKind() == BuiltinType::Float ||
4660 BT->getKind() == BuiltinType::Double ||
4661 BT->getKind() == BuiltinType::LongDouble ||
4662 (getContext().getTargetInfo().hasFloat128Type() &&
4663 (BT->getKind() == BuiltinType::Float128))) {
4669 if (const VectorType *VT = Ty->getAs<VectorType>()) {
4670 if (getContext().getTypeSize(VT) == 128 || IsQPXVectorTy(Ty))
4676 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough(
4677 const Type *Base, uint64_t Members) const {
4678 // Vector and fp128 types require one register, other floating point types
4679 // require one or two registers depending on their size.
4681 ((getContext().getTargetInfo().hasFloat128Type() &&
4682 Base->isFloat128Type()) ||
4683 Base->isVectorType()) ? 1
4684 : (getContext().getTypeSize(Base) + 63) / 64;
4686 // Homogeneous Aggregates may occupy at most 8 registers.
4687 return Members * NumRegs <= 8;
4691 PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const {
4692 Ty = useFirstFieldIfTransparentUnion(Ty);
4694 if (Ty->isAnyComplexType())
4695 return ABIArgInfo::getDirect();
4697 // Non-Altivec vector types are passed in GPRs (smaller than 16 bytes)
4698 // or via reference (larger than 16 bytes).
4699 if (Ty->isVectorType() && !IsQPXVectorTy(Ty)) {
4700 uint64_t Size = getContext().getTypeSize(Ty);
4702 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
4703 else if (Size < 128) {
4704 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
4705 return ABIArgInfo::getDirect(CoerceTy);
4709 if (isAggregateTypeForABI(Ty)) {
4710 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
4711 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
4713 uint64_t ABIAlign = getParamTypeAlignment(Ty).getQuantity();
4714 uint64_t TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity();
4716 // ELFv2 homogeneous aggregates are passed as array types.
4717 const Type *Base = nullptr;
4718 uint64_t Members = 0;
4719 if (Kind == ELFv2 &&
4720 isHomogeneousAggregate(Ty, Base, Members)) {
4721 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
4722 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
4723 return ABIArgInfo::getDirect(CoerceTy);
4726 // If an aggregate may end up fully in registers, we do not
4727 // use the ByVal method, but pass the aggregate as array.
4728 // This is usually beneficial since we avoid forcing the
4729 // back-end to store the argument to memory.
4730 uint64_t Bits = getContext().getTypeSize(Ty);
4731 if (Bits > 0 && Bits <= 8 * GPRBits) {
4732 llvm::Type *CoerceTy;
4734 // Types up to 8 bytes are passed as integer type (which will be
4735 // properly aligned in the argument save area doubleword).
4736 if (Bits <= GPRBits)
4738 llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8));
4739 // Larger types are passed as arrays, with the base type selected
4740 // according to the required alignment in the save area.
4742 uint64_t RegBits = ABIAlign * 8;
4743 uint64_t NumRegs = llvm::alignTo(Bits, RegBits) / RegBits;
4744 llvm::Type *RegTy = llvm::IntegerType::get(getVMContext(), RegBits);
4745 CoerceTy = llvm::ArrayType::get(RegTy, NumRegs);
4748 return ABIArgInfo::getDirect(CoerceTy);
4751 // All other aggregates are passed ByVal.
4752 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign),
4754 /*Realign=*/TyAlign > ABIAlign);
4757 return (isPromotableTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
4758 : ABIArgInfo::getDirect());
4762 PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
4763 if (RetTy->isVoidType())
4764 return ABIArgInfo::getIgnore();
4766 if (RetTy->isAnyComplexType())
4767 return ABIArgInfo::getDirect();
4769 // Non-Altivec vector types are returned in GPRs (smaller than 16 bytes)
4770 // or via reference (larger than 16 bytes).
4771 if (RetTy->isVectorType() && !IsQPXVectorTy(RetTy)) {
4772 uint64_t Size = getContext().getTypeSize(RetTy);
4774 return getNaturalAlignIndirect(RetTy);
4775 else if (Size < 128) {
4776 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
4777 return ABIArgInfo::getDirect(CoerceTy);
4781 if (isAggregateTypeForABI(RetTy)) {
4782 // ELFv2 homogeneous aggregates are returned as array types.
4783 const Type *Base = nullptr;
4784 uint64_t Members = 0;
4785 if (Kind == ELFv2 &&
4786 isHomogeneousAggregate(RetTy, Base, Members)) {
4787 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
4788 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
4789 return ABIArgInfo::getDirect(CoerceTy);
4792 // ELFv2 small aggregates are returned in up to two registers.
4793 uint64_t Bits = getContext().getTypeSize(RetTy);
4794 if (Kind == ELFv2 && Bits <= 2 * GPRBits) {
4796 return ABIArgInfo::getIgnore();
4798 llvm::Type *CoerceTy;
4799 if (Bits > GPRBits) {
4800 CoerceTy = llvm::IntegerType::get(getVMContext(), GPRBits);
4801 CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy);
4804 llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8));
4805 return ABIArgInfo::getDirect(CoerceTy);
4808 // All other aggregates are returned indirectly.
4809 return getNaturalAlignIndirect(RetTy);
4812 return (isPromotableTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
4813 : ABIArgInfo::getDirect());
4816 // Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine.
4817 Address PPC64_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4818 QualType Ty) const {
4819 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
4820 TypeInfo.second = getParamTypeAlignment(Ty);
4822 CharUnits SlotSize = CharUnits::fromQuantity(8);
4824 // If we have a complex type and the base type is smaller than 8 bytes,
4825 // the ABI calls for the real and imaginary parts to be right-adjusted
4826 // in separate doublewords. However, Clang expects us to produce a
4827 // pointer to a structure with the two parts packed tightly. So generate
4828 // loads of the real and imaginary parts relative to the va_list pointer,
4829 // and store them to a temporary structure.
4830 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
4831 CharUnits EltSize = TypeInfo.first / 2;
4832 if (EltSize < SlotSize) {
4833 Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, CGF.Int8Ty,
4834 SlotSize * 2, SlotSize,
4835 SlotSize, /*AllowHigher*/ true);
4837 Address RealAddr = Addr;
4838 Address ImagAddr = RealAddr;
4839 if (CGF.CGM.getDataLayout().isBigEndian()) {
4840 RealAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr,
4841 SlotSize - EltSize);
4842 ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(ImagAddr,
4843 2 * SlotSize - EltSize);
4845 ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr, SlotSize);
4848 llvm::Type *EltTy = CGF.ConvertTypeForMem(CTy->getElementType());
4849 RealAddr = CGF.Builder.CreateElementBitCast(RealAddr, EltTy);
4850 ImagAddr = CGF.Builder.CreateElementBitCast(ImagAddr, EltTy);
4851 llvm::Value *Real = CGF.Builder.CreateLoad(RealAddr, ".vareal");
4852 llvm::Value *Imag = CGF.Builder.CreateLoad(ImagAddr, ".vaimag");
4854 Address Temp = CGF.CreateMemTemp(Ty, "vacplx");
4855 CGF.EmitStoreOfComplex({Real, Imag}, CGF.MakeAddrLValue(Temp, Ty),
4861 // Otherwise, just use the general rule.
4862 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false,
4863 TypeInfo, SlotSize, /*AllowHigher*/ true);
4867 PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4868 llvm::Value *Address) {
4869 // This is calculated from the LLVM and GCC tables and verified
4870 // against gcc output. AFAIK all ABIs use the same encoding.
4872 CodeGen::CGBuilderTy &Builder = CGF.Builder;
4874 llvm::IntegerType *i8 = CGF.Int8Ty;
4875 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
4876 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
4877 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
4879 // 0-31: r0-31, the 8-byte general-purpose registers
4880 AssignToArrayRange(Builder, Address, Eight8, 0, 31);
4882 // 32-63: fp0-31, the 8-byte floating-point registers
4883 AssignToArrayRange(Builder, Address, Eight8, 32, 63);
4885 // 64-67 are various 8-byte special-purpose registers:
4890 AssignToArrayRange(Builder, Address, Eight8, 64, 67);
4892 // 68-76 are various 4-byte special-purpose registers:
4895 AssignToArrayRange(Builder, Address, Four8, 68, 76);
4897 // 77-108: v0-31, the 16-byte vector registers
4898 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
4908 AssignToArrayRange(Builder, Address, Eight8, 109, 116);
4914 PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable(
4915 CodeGen::CodeGenFunction &CGF,
4916 llvm::Value *Address) const {
4918 return PPC64_initDwarfEHRegSizeTable(CGF, Address);
4922 PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4923 llvm::Value *Address) const {
4925 return PPC64_initDwarfEHRegSizeTable(CGF, Address);
4928 //===----------------------------------------------------------------------===//
4929 // AArch64 ABI Implementation
4930 //===----------------------------------------------------------------------===//
4934 class AArch64ABIInfo : public SwiftABIInfo {
4946 AArch64ABIInfo(CodeGenTypes &CGT, ABIKind Kind)
4947 : SwiftABIInfo(CGT), Kind(Kind) {}
4950 ABIKind getABIKind() const { return Kind; }
4951 bool isDarwinPCS() const { return Kind == DarwinPCS; }
4953 ABIArgInfo classifyReturnType(QualType RetTy) const;
4954 ABIArgInfo classifyArgumentType(QualType RetTy) const;
4955 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
4956 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
4957 uint64_t Members) const override;
4959 bool isIllegalVectorType(QualType Ty) const;
4961 void computeInfo(CGFunctionInfo &FI) const override {
4962 if (!::classifyReturnType(getCXXABI(), FI, *this))
4963 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
4965 for (auto &it : FI.arguments())
4966 it.info = classifyArgumentType(it.type);
4969 Address EmitDarwinVAArg(Address VAListAddr, QualType Ty,
4970 CodeGenFunction &CGF) const;
4972 Address EmitAAPCSVAArg(Address VAListAddr, QualType Ty,
4973 CodeGenFunction &CGF) const;
4975 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4976 QualType Ty) const override {
4977 return Kind == Win64 ? EmitMSVAArg(CGF, VAListAddr, Ty)
4978 : isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
4979 : EmitAAPCSVAArg(VAListAddr, Ty, CGF);
4982 Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
4983 QualType Ty) const override;
4985 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
4986 bool asReturnValue) const override {
4987 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
4989 bool isSwiftErrorInRegister() const override {
4993 bool isLegalVectorTypeForSwift(CharUnits totalSize, llvm::Type *eltTy,
4994 unsigned elts) const override;
4997 class AArch64TargetCodeGenInfo : public TargetCodeGenInfo {
4999 AArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind Kind)
5000 : TargetCodeGenInfo(new AArch64ABIInfo(CGT, Kind)) {}
5002 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
5003 return "mov\tfp, fp\t\t// marker for objc_retainAutoreleaseReturnValue";
5006 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
5010 bool doesReturnSlotInterfereWithArgs() const override { return false; }
5012 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5013 CodeGen::CodeGenModule &CGM) const override {
5014 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
5017 llvm::Function *Fn = cast<llvm::Function>(GV);
5019 auto Kind = CGM.getCodeGenOpts().getSignReturnAddress();
5020 if (Kind != CodeGenOptions::SignReturnAddressScope::None) {
5021 Fn->addFnAttr("sign-return-address",
5022 Kind == CodeGenOptions::SignReturnAddressScope::All
5026 auto Key = CGM.getCodeGenOpts().getSignReturnAddressKey();
5027 Fn->addFnAttr("sign-return-address-key",
5028 Key == CodeGenOptions::SignReturnAddressKeyValue::AKey
5033 if (CGM.getCodeGenOpts().BranchTargetEnforcement)
5034 Fn->addFnAttr("branch-target-enforcement");
5038 class WindowsAArch64TargetCodeGenInfo : public AArch64TargetCodeGenInfo {
5040 WindowsAArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind K)
5041 : AArch64TargetCodeGenInfo(CGT, K) {}
5043 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5044 CodeGen::CodeGenModule &CGM) const override;
5046 void getDependentLibraryOption(llvm::StringRef Lib,
5047 llvm::SmallString<24> &Opt) const override {
5048 Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
5051 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value,
5052 llvm::SmallString<32> &Opt) const override {
5053 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
5057 void WindowsAArch64TargetCodeGenInfo::setTargetAttributes(
5058 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
5059 AArch64TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
5060 if (GV->isDeclaration())
5062 addStackProbeTargetAttributes(D, GV, CGM);
5066 ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty) const {
5067 Ty = useFirstFieldIfTransparentUnion(Ty);
5069 // Handle illegal vector types here.
5070 if (isIllegalVectorType(Ty)) {
5071 uint64_t Size = getContext().getTypeSize(Ty);
5072 // Android promotes <2 x i8> to i16, not i32
5073 if (isAndroid() && (Size <= 16)) {
5074 llvm::Type *ResType = llvm::Type::getInt16Ty(getVMContext());
5075 return ABIArgInfo::getDirect(ResType);
5078 llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext());
5079 return ABIArgInfo::getDirect(ResType);
5082 llvm::Type *ResType =
5083 llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2);
5084 return ABIArgInfo::getDirect(ResType);
5087 llvm::Type *ResType =
5088 llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);
5089 return ABIArgInfo::getDirect(ResType);
5091 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
5094 if (!isAggregateTypeForABI(Ty)) {
5095 // Treat an enum type as its underlying type.
5096 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
5097 Ty = EnumTy->getDecl()->getIntegerType();
5099 return (Ty->isPromotableIntegerType() && isDarwinPCS()
5100 ? ABIArgInfo::getExtend(Ty)
5101 : ABIArgInfo::getDirect());
5104 // Structures with either a non-trivial destructor or a non-trivial
5105 // copy constructor are always indirect.
5106 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
5107 return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA ==
5108 CGCXXABI::RAA_DirectInMemory);
5111 // Empty records are always ignored on Darwin, but actually passed in C++ mode
5112 // elsewhere for GNU compatibility.
5113 uint64_t Size = getContext().getTypeSize(Ty);
5114 bool IsEmpty = isEmptyRecord(getContext(), Ty, true);
5115 if (IsEmpty || Size == 0) {
5116 if (!getContext().getLangOpts().CPlusPlus || isDarwinPCS())
5117 return ABIArgInfo::getIgnore();
5119 // GNU C mode. The only argument that gets ignored is an empty one with size
5121 if (IsEmpty && Size == 0)
5122 return ABIArgInfo::getIgnore();
5123 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
5126 // Homogeneous Floating-point Aggregates (HFAs) need to be expanded.
5127 const Type *Base = nullptr;
5128 uint64_t Members = 0;
5129 if (isHomogeneousAggregate(Ty, Base, Members)) {
5130 return ABIArgInfo::getDirect(
5131 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members));
5134 // Aggregates <= 16 bytes are passed directly in registers or on the stack.
5136 // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of
5137 // same size and alignment.
5138 if (getTarget().isRenderScriptTarget()) {
5139 return coerceToIntArray(Ty, getContext(), getVMContext());
5142 if (Kind == AArch64ABIInfo::AAPCS) {
5143 Alignment = getContext().getTypeUnadjustedAlign(Ty);
5144 Alignment = Alignment < 128 ? 64 : 128;
5146 Alignment = getContext().getTypeAlign(Ty);
5148 Size = llvm::alignTo(Size, 64); // round up to multiple of 8 bytes
5150 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
5151 // For aggregates with 16-byte alignment, we use i128.
5152 if (Alignment < 128 && Size == 128) {
5153 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
5154 return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64));
5156 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
5159 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
5162 ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy) const {
5163 if (RetTy->isVoidType())
5164 return ABIArgInfo::getIgnore();
5166 // Large vector types should be returned via memory.
5167 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128)
5168 return getNaturalAlignIndirect(RetTy);
5170 if (!isAggregateTypeForABI(RetTy)) {
5171 // Treat an enum type as its underlying type.
5172 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
5173 RetTy = EnumTy->getDecl()->getIntegerType();
5175 return (RetTy->isPromotableIntegerType() && isDarwinPCS()
5176 ? ABIArgInfo::getExtend(RetTy)
5177 : ABIArgInfo::getDirect());
5180 uint64_t Size = getContext().getTypeSize(RetTy);
5181 if (isEmptyRecord(getContext(), RetTy, true) || Size == 0)
5182 return ABIArgInfo::getIgnore();
5184 const Type *Base = nullptr;
5185 uint64_t Members = 0;
5186 if (isHomogeneousAggregate(RetTy, Base, Members))
5187 // Homogeneous Floating-point Aggregates (HFAs) are returned directly.
5188 return ABIArgInfo::getDirect();
5190 // Aggregates <= 16 bytes are returned directly in registers or on the stack.
5192 // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of
5193 // same size and alignment.
5194 if (getTarget().isRenderScriptTarget()) {
5195 return coerceToIntArray(RetTy, getContext(), getVMContext());
5197 unsigned Alignment = getContext().getTypeAlign(RetTy);
5198 Size = llvm::alignTo(Size, 64); // round up to multiple of 8 bytes
5200 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
5201 // For aggregates with 16-byte alignment, we use i128.
5202 if (Alignment < 128 && Size == 128) {
5203 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
5204 return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64));
5206 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
5209 return getNaturalAlignIndirect(RetTy);
5212 /// isIllegalVectorType - check whether the vector type is legal for AArch64.
5213 bool AArch64ABIInfo::isIllegalVectorType(QualType Ty) const {
5214 if (const VectorType *VT = Ty->getAs<VectorType>()) {
5215 // Check whether VT is legal.
5216 unsigned NumElements = VT->getNumElements();
5217 uint64_t Size = getContext().getTypeSize(VT);
5218 // NumElements should be power of 2.
5219 if (!llvm::isPowerOf2_32(NumElements))
5221 return Size != 64 && (Size != 128 || NumElements == 1);
5226 bool AArch64ABIInfo::isLegalVectorTypeForSwift(CharUnits totalSize,
5228 unsigned elts) const {
5229 if (!llvm::isPowerOf2_32(elts))
5231 if (totalSize.getQuantity() != 8 &&
5232 (totalSize.getQuantity() != 16 || elts == 1))
5237 bool AArch64ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
5238 // Homogeneous aggregates for AAPCS64 must have base types of a floating
5239 // point type or a short-vector type. This is the same as the 32-bit ABI,
5240 // but with the difference that any floating-point type is allowed,
5241 // including __fp16.
5242 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
5243 if (BT->isFloatingPoint())
5245 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
5246 unsigned VecSize = getContext().getTypeSize(VT);
5247 if (VecSize == 64 || VecSize == 128)
5253 bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
5254 uint64_t Members) const {
5255 return Members <= 4;
5258 Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr,
5260 CodeGenFunction &CGF) const {
5261 ABIArgInfo AI = classifyArgumentType(Ty);
5262 bool IsIndirect = AI.isIndirect();
5264 llvm::Type *BaseTy = CGF.ConvertType(Ty);
5266 BaseTy = llvm::PointerType::getUnqual(BaseTy);
5267 else if (AI.getCoerceToType())
5268 BaseTy = AI.getCoerceToType();
5270 unsigned NumRegs = 1;
5271 if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) {
5272 BaseTy = ArrTy->getElementType();
5273 NumRegs = ArrTy->getNumElements();
5275 bool IsFPR = BaseTy->isFloatingPointTy() || BaseTy->isVectorTy();
5277 // The AArch64 va_list type and handling is specified in the Procedure Call
5278 // Standard, section B.4:
5288 llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg");
5289 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
5290 llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack");
5291 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
5293 CharUnits TySize = getContext().getTypeSizeInChars(Ty);
5294 CharUnits TyAlign = getContext().getTypeUnadjustedAlignInChars(Ty);
5296 Address reg_offs_p = Address::invalid();
5297 llvm::Value *reg_offs = nullptr;
5299 int RegSize = IsIndirect ? 8 : TySize.getQuantity();
5301 // 3 is the field number of __gr_offs
5302 reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 3, "gr_offs_p");
5303 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs");
5304 reg_top_index = 1; // field number for __gr_top
5305 RegSize = llvm::alignTo(RegSize, 8);
5307 // 4 is the field number of __vr_offs.
5308 reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 4, "vr_offs_p");
5309 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs");
5310 reg_top_index = 2; // field number for __vr_top
5311 RegSize = 16 * NumRegs;
5314 //=======================================
5315 // Find out where argument was passed
5316 //=======================================
5318 // If reg_offs >= 0 we're already using the stack for this type of
5319 // argument. We don't want to keep updating reg_offs (in case it overflows,
5320 // though anyone passing 2GB of arguments, each at most 16 bytes, deserves
5321 // whatever they get).
5322 llvm::Value *UsingStack = nullptr;
5323 UsingStack = CGF.Builder.CreateICmpSGE(
5324 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, 0));
5326 CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
5328 // Otherwise, at least some kind of argument could go in these registers, the
5329 // question is whether this particular type is too big.
5330 CGF.EmitBlock(MaybeRegBlock);
5332 // Integer arguments may need to correct register alignment (for example a
5333 // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we
5334 // align __gr_offs to calculate the potential address.
5335 if (!IsFPR && !IsIndirect && TyAlign.getQuantity() > 8) {
5336 int Align = TyAlign.getQuantity();
5338 reg_offs = CGF.Builder.CreateAdd(
5339 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, Align - 1),
5341 reg_offs = CGF.Builder.CreateAnd(
5342 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, -Align),
5346 // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list.
5347 // The fact that this is done unconditionally reflects the fact that
5348 // allocating an argument to the stack also uses up all the remaining
5349 // registers of the appropriate kind.
5350 llvm::Value *NewOffset = nullptr;
5351 NewOffset = CGF.Builder.CreateAdd(
5352 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, RegSize), "new_reg_offs");
5353 CGF.Builder.CreateStore(NewOffset, reg_offs_p);
5355 // Now we're in a position to decide whether this argument really was in
5356 // registers or not.
5357 llvm::Value *InRegs = nullptr;
5358 InRegs = CGF.Builder.CreateICmpSLE(
5359 NewOffset, llvm::ConstantInt::get(CGF.Int32Ty, 0), "inreg");
5361 CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
5363 //=======================================
5364 // Argument was in registers
5365 //=======================================
5367 // Now we emit the code for if the argument was originally passed in
5368 // registers. First start the appropriate block:
5369 CGF.EmitBlock(InRegBlock);
5371 llvm::Value *reg_top = nullptr;
5373 CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index, "reg_top_p");
5374 reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top");
5375 Address BaseAddr(CGF.Builder.CreateInBoundsGEP(reg_top, reg_offs),
5376 CharUnits::fromQuantity(IsFPR ? 16 : 8));
5377 Address RegAddr = Address::invalid();
5378 llvm::Type *MemTy = CGF.ConvertTypeForMem(Ty);
5381 // If it's been passed indirectly (actually a struct), whatever we find from
5382 // stored registers or on the stack will actually be a struct **.
5383 MemTy = llvm::PointerType::getUnqual(MemTy);
5386 const Type *Base = nullptr;
5387 uint64_t NumMembers = 0;
5388 bool IsHFA = isHomogeneousAggregate(Ty, Base, NumMembers);
5389 if (IsHFA && NumMembers > 1) {
5390 // Homogeneous aggregates passed in registers will have their elements split
5391 // and stored 16-bytes apart regardless of size (they're notionally in qN,
5392 // qN+1, ...). We reload and store into a temporary local variable
5394 assert(!IsIndirect && "Homogeneous aggregates should be passed directly");
5395 auto BaseTyInfo = getContext().getTypeInfoInChars(QualType(Base, 0));
5396 llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0));
5397 llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
5398 Address Tmp = CGF.CreateTempAlloca(HFATy,
5399 std::max(TyAlign, BaseTyInfo.second));
5401 // On big-endian platforms, the value will be right-aligned in its slot.
5403 if (CGF.CGM.getDataLayout().isBigEndian() &&
5404 BaseTyInfo.first.getQuantity() < 16)
5405 Offset = 16 - BaseTyInfo.first.getQuantity();
5407 for (unsigned i = 0; i < NumMembers; ++i) {
5408 CharUnits BaseOffset = CharUnits::fromQuantity(16 * i + Offset);
5410 CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, BaseOffset);
5411 LoadAddr = CGF.Builder.CreateElementBitCast(LoadAddr, BaseTy);
5413 Address StoreAddr = CGF.Builder.CreateConstArrayGEP(Tmp, i);
5415 llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr);
5416 CGF.Builder.CreateStore(Elem, StoreAddr);
5419 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, MemTy);
5421 // Otherwise the object is contiguous in memory.
5423 // It might be right-aligned in its slot.
5424 CharUnits SlotSize = BaseAddr.getAlignment();
5425 if (CGF.CGM.getDataLayout().isBigEndian() && !IsIndirect &&
5426 (IsHFA || !isAggregateTypeForABI(Ty)) &&
5427 TySize < SlotSize) {
5428 CharUnits Offset = SlotSize - TySize;
5429 BaseAddr = CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, Offset);
5432 RegAddr = CGF.Builder.CreateElementBitCast(BaseAddr, MemTy);
5435 CGF.EmitBranch(ContBlock);
5437 //=======================================
5438 // Argument was on the stack
5439 //=======================================
5440 CGF.EmitBlock(OnStackBlock);
5442 Address stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "stack_p");
5443 llvm::Value *OnStackPtr = CGF.Builder.CreateLoad(stack_p, "stack");
5445 // Again, stack arguments may need realignment. In this case both integer and
5446 // floating-point ones might be affected.
5447 if (!IsIndirect && TyAlign.getQuantity() > 8) {
5448 int Align = TyAlign.getQuantity();
5450 OnStackPtr = CGF.Builder.CreatePtrToInt(OnStackPtr, CGF.Int64Ty);
5452 OnStackPtr = CGF.Builder.CreateAdd(
5453 OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1),
5455 OnStackPtr = CGF.Builder.CreateAnd(
5456 OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, -Align),
5459 OnStackPtr = CGF.Builder.CreateIntToPtr(OnStackPtr, CGF.Int8PtrTy);
5461 Address OnStackAddr(OnStackPtr,
5462 std::max(CharUnits::fromQuantity(8), TyAlign));
5464 // All stack slots are multiples of 8 bytes.
5465 CharUnits StackSlotSize = CharUnits::fromQuantity(8);
5466 CharUnits StackSize;
5468 StackSize = StackSlotSize;
5470 StackSize = TySize.alignTo(StackSlotSize);
5472 llvm::Value *StackSizeC = CGF.Builder.getSize(StackSize);
5473 llvm::Value *NewStack =
5474 CGF.Builder.CreateInBoundsGEP(OnStackPtr, StackSizeC, "new_stack");
5476 // Write the new value of __stack for the next call to va_arg
5477 CGF.Builder.CreateStore(NewStack, stack_p);
5479 if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) &&
5480 TySize < StackSlotSize) {
5481 CharUnits Offset = StackSlotSize - TySize;
5482 OnStackAddr = CGF.Builder.CreateConstInBoundsByteGEP(OnStackAddr, Offset);
5485 OnStackAddr = CGF.Builder.CreateElementBitCast(OnStackAddr, MemTy);
5487 CGF.EmitBranch(ContBlock);
5489 //=======================================
5491 //=======================================
5492 CGF.EmitBlock(ContBlock);
5494 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock,
5495 OnStackAddr, OnStackBlock, "vaargs.addr");
5498 return Address(CGF.Builder.CreateLoad(ResAddr, "vaarg.addr"),
5504 Address AArch64ABIInfo::EmitDarwinVAArg(Address VAListAddr, QualType Ty,
5505 CodeGenFunction &CGF) const {
5506 // The backend's lowering doesn't support va_arg for aggregates or
5507 // illegal vector types. Lower VAArg here for these cases and use
5508 // the LLVM va_arg instruction for everything else.
5509 if (!isAggregateTypeForABI(Ty) && !isIllegalVectorType(Ty))
5510 return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect());
5512 CharUnits SlotSize = CharUnits::fromQuantity(8);
5514 // Empty records are ignored for parameter passing purposes.
5515 if (isEmptyRecord(getContext(), Ty, true)) {
5516 Address Addr(CGF.Builder.CreateLoad(VAListAddr, "ap.cur"), SlotSize);
5517 Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty));
5521 // The size of the actual thing passed, which might end up just
5522 // being a pointer for indirect types.
5523 auto TyInfo = getContext().getTypeInfoInChars(Ty);
5525 // Arguments bigger than 16 bytes which aren't homogeneous
5526 // aggregates should be passed indirectly.
5527 bool IsIndirect = false;
5528 if (TyInfo.first.getQuantity() > 16) {
5529 const Type *Base = nullptr;
5530 uint64_t Members = 0;
5531 IsIndirect = !isHomogeneousAggregate(Ty, Base, Members);
5534 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
5535 TyInfo, SlotSize, /*AllowHigherAlign*/ true);
5538 Address AArch64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
5539 QualType Ty) const {
5540 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
5541 CGF.getContext().getTypeInfoInChars(Ty),
5542 CharUnits::fromQuantity(8),
5543 /*allowHigherAlign*/ false);
5546 //===----------------------------------------------------------------------===//
5547 // ARM ABI Implementation
5548 //===----------------------------------------------------------------------===//
5552 class ARMABIInfo : public SwiftABIInfo {
5565 ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind)
5566 : SwiftABIInfo(CGT), Kind(_Kind) {
5570 bool isEABI() const {
5571 switch (getTarget().getTriple().getEnvironment()) {
5572 case llvm::Triple::Android:
5573 case llvm::Triple::EABI:
5574 case llvm::Triple::EABIHF:
5575 case llvm::Triple::GNUEABI:
5576 case llvm::Triple::GNUEABIHF:
5577 case llvm::Triple::MuslEABI:
5578 case llvm::Triple::MuslEABIHF:
5585 bool isEABIHF() const {
5586 switch (getTarget().getTriple().getEnvironment()) {
5587 case llvm::Triple::EABIHF:
5588 case llvm::Triple::GNUEABIHF:
5589 case llvm::Triple::MuslEABIHF:
5596 ABIKind getABIKind() const { return Kind; }
5599 ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic,
5600 unsigned functionCallConv) const;
5601 ABIArgInfo classifyArgumentType(QualType RetTy, bool isVariadic,
5602 unsigned functionCallConv) const;
5603 ABIArgInfo classifyHomogeneousAggregate(QualType Ty, const Type *Base,
5604 uint64_t Members) const;
5605 ABIArgInfo coerceIllegalVector(QualType Ty) const;
5606 bool isIllegalVectorType(QualType Ty) const;
5607 bool containsAnyFP16Vectors(QualType Ty) const;
5609 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
5610 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
5611 uint64_t Members) const override;
5613 bool isEffectivelyAAPCS_VFP(unsigned callConvention, bool acceptHalf) const;
5615 void computeInfo(CGFunctionInfo &FI) const override;
5617 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
5618 QualType Ty) const override;
5620 llvm::CallingConv::ID getLLVMDefaultCC() const;
5621 llvm::CallingConv::ID getABIDefaultCC() const;
5624 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
5625 bool asReturnValue) const override {
5626 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
5628 bool isSwiftErrorInRegister() const override {
5631 bool isLegalVectorTypeForSwift(CharUnits totalSize, llvm::Type *eltTy,
5632 unsigned elts) const override;
5635 class ARMTargetCodeGenInfo : public TargetCodeGenInfo {
5637 ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
5638 :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {}
5640 const ARMABIInfo &getABIInfo() const {
5641 return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo());
5644 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
5648 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
5649 return "mov\tr7, r7\t\t// marker for objc_retainAutoreleaseReturnValue";
5652 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
5653 llvm::Value *Address) const override {
5654 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
5656 // 0-15 are the 16 integer registers.
5657 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15);
5661 unsigned getSizeOfUnwindException() const override {
5662 if (getABIInfo().isEABI()) return 88;
5663 return TargetCodeGenInfo::getSizeOfUnwindException();
5666 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5667 CodeGen::CodeGenModule &CGM) const override {
5668 if (GV->isDeclaration())
5670 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
5674 const ARMInterruptAttr *Attr = FD->getAttr<ARMInterruptAttr>();
5679 switch (Attr->getInterrupt()) {
5680 case ARMInterruptAttr::Generic: Kind = ""; break;
5681 case ARMInterruptAttr::IRQ: Kind = "IRQ"; break;
5682 case ARMInterruptAttr::FIQ: Kind = "FIQ"; break;
5683 case ARMInterruptAttr::SWI: Kind = "SWI"; break;
5684 case ARMInterruptAttr::ABORT: Kind = "ABORT"; break;
5685 case ARMInterruptAttr::UNDEF: Kind = "UNDEF"; break;
5688 llvm::Function *Fn = cast<llvm::Function>(GV);
5690 Fn->addFnAttr("interrupt", Kind);
5692 ARMABIInfo::ABIKind ABI = cast<ARMABIInfo>(getABIInfo()).getABIKind();
5693 if (ABI == ARMABIInfo::APCS)
5696 // AAPCS guarantees that sp will be 8-byte aligned on any public interface,
5697 // however this is not necessarily true on taking any interrupt. Instruct
5698 // the backend to perform a realignment as part of the function prologue.
5699 llvm::AttrBuilder B;
5700 B.addStackAlignmentAttr(8);
5701 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
5705 class WindowsARMTargetCodeGenInfo : public ARMTargetCodeGenInfo {
5707 WindowsARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
5708 : ARMTargetCodeGenInfo(CGT, K) {}
5710 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5711 CodeGen::CodeGenModule &CGM) const override;
5713 void getDependentLibraryOption(llvm::StringRef Lib,
5714 llvm::SmallString<24> &Opt) const override {
5715 Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
5718 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value,
5719 llvm::SmallString<32> &Opt) const override {
5720 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
5724 void WindowsARMTargetCodeGenInfo::setTargetAttributes(
5725 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
5726 ARMTargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
5727 if (GV->isDeclaration())
5729 addStackProbeTargetAttributes(D, GV, CGM);
5733 void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
5734 if (!::classifyReturnType(getCXXABI(), FI, *this))
5735 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), FI.isVariadic(),
5736 FI.getCallingConvention());
5738 for (auto &I : FI.arguments())
5739 I.info = classifyArgumentType(I.type, FI.isVariadic(),
5740 FI.getCallingConvention());
5743 // Always honor user-specified calling convention.
5744 if (FI.getCallingConvention() != llvm::CallingConv::C)
5747 llvm::CallingConv::ID cc = getRuntimeCC();
5748 if (cc != llvm::CallingConv::C)
5749 FI.setEffectiveCallingConvention(cc);
5752 /// Return the default calling convention that LLVM will use.
5753 llvm::CallingConv::ID ARMABIInfo::getLLVMDefaultCC() const {
5754 // The default calling convention that LLVM will infer.
5755 if (isEABIHF() || getTarget().getTriple().isWatchABI())
5756 return llvm::CallingConv::ARM_AAPCS_VFP;
5758 return llvm::CallingConv::ARM_AAPCS;
5760 return llvm::CallingConv::ARM_APCS;
5763 /// Return the calling convention that our ABI would like us to use
5764 /// as the C calling convention.
5765 llvm::CallingConv::ID ARMABIInfo::getABIDefaultCC() const {
5766 switch (getABIKind()) {
5767 case APCS: return llvm::CallingConv::ARM_APCS;
5768 case AAPCS: return llvm::CallingConv::ARM_AAPCS;
5769 case AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
5770 case AAPCS16_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
5772 llvm_unreachable("bad ABI kind");
5775 void ARMABIInfo::setCCs() {
5776 assert(getRuntimeCC() == llvm::CallingConv::C);
5778 // Don't muddy up the IR with a ton of explicit annotations if
5779 // they'd just match what LLVM will infer from the triple.
5780 llvm::CallingConv::ID abiCC = getABIDefaultCC();
5781 if (abiCC != getLLVMDefaultCC())
5785 ABIArgInfo ARMABIInfo::coerceIllegalVector(QualType Ty) const {
5786 uint64_t Size = getContext().getTypeSize(Ty);
5788 llvm::Type *ResType =
5789 llvm::Type::getInt32Ty(getVMContext());
5790 return ABIArgInfo::getDirect(ResType);
5792 if (Size == 64 || Size == 128) {
5793 llvm::Type *ResType = llvm::VectorType::get(
5794 llvm::Type::getInt32Ty(getVMContext()), Size / 32);
5795 return ABIArgInfo::getDirect(ResType);
5797 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
5800 ABIArgInfo ARMABIInfo::classifyHomogeneousAggregate(QualType Ty,
5802 uint64_t Members) const {
5803 assert(Base && "Base class should be set for homogeneous aggregate");
5804 // Base can be a floating-point or a vector.
5805 if (const VectorType *VT = Base->getAs<VectorType>()) {
5806 // FP16 vectors should be converted to integer vectors
5807 if (!getTarget().hasLegalHalfType() && containsAnyFP16Vectors(Ty)) {
5808 uint64_t Size = getContext().getTypeSize(VT);
5809 llvm::Type *NewVecTy = llvm::VectorType::get(
5810 llvm::Type::getInt32Ty(getVMContext()), Size / 32);
5811 llvm::Type *Ty = llvm::ArrayType::get(NewVecTy, Members);
5812 return ABIArgInfo::getDirect(Ty, 0, nullptr, false);
5815 return ABIArgInfo::getDirect(nullptr, 0, nullptr, false);
5818 ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool isVariadic,
5819 unsigned functionCallConv) const {
5820 // 6.1.2.1 The following argument types are VFP CPRCs:
5821 // A single-precision floating-point type (including promoted
5822 // half-precision types); A double-precision floating-point type;
5823 // A 64-bit or 128-bit containerized vector type; Homogeneous Aggregate
5824 // with a Base Type of a single- or double-precision floating-point type,
5825 // 64-bit containerized vectors or 128-bit containerized vectors with one
5826 // to four Elements.
5827 // Variadic functions should always marshal to the base standard.
5829 !isVariadic && isEffectivelyAAPCS_VFP(functionCallConv, /* AAPCS16 */ false);
5831 Ty = useFirstFieldIfTransparentUnion(Ty);
5833 // Handle illegal vector types here.
5834 if (isIllegalVectorType(Ty))
5835 return coerceIllegalVector(Ty);
5837 // _Float16 and __fp16 get passed as if it were an int or float, but with
5838 // the top 16 bits unspecified. This is not done for OpenCL as it handles the
5839 // half type natively, and does not need to interwork with AAPCS code.
5840 if ((Ty->isFloat16Type() || Ty->isHalfType()) &&
5841 !getContext().getLangOpts().NativeHalfArgsAndReturns) {
5842 llvm::Type *ResType = IsAAPCS_VFP ?
5843 llvm::Type::getFloatTy(getVMContext()) :
5844 llvm::Type::getInt32Ty(getVMContext());
5845 return ABIArgInfo::getDirect(ResType);
5848 if (!isAggregateTypeForABI(Ty)) {
5849 // Treat an enum type as its underlying type.
5850 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
5851 Ty = EnumTy->getDecl()->getIntegerType();
5854 return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty)
5855 : ABIArgInfo::getDirect());
5858 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
5859 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
5862 // Ignore empty records.
5863 if (isEmptyRecord(getContext(), Ty, true))
5864 return ABIArgInfo::getIgnore();
5867 // Homogeneous Aggregates need to be expanded when we can fit the aggregate
5868 // into VFP registers.
5869 const Type *Base = nullptr;
5870 uint64_t Members = 0;
5871 if (isHomogeneousAggregate(Ty, Base, Members))
5872 return classifyHomogeneousAggregate(Ty, Base, Members);
5873 } else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) {
5874 // WatchOS does have homogeneous aggregates. Note that we intentionally use
5875 // this convention even for a variadic function: the backend will use GPRs
5877 const Type *Base = nullptr;
5878 uint64_t Members = 0;
5879 if (isHomogeneousAggregate(Ty, Base, Members)) {
5880 assert(Base && Members <= 4 && "unexpected homogeneous aggregate");
5882 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members);
5883 return ABIArgInfo::getDirect(Ty, 0, nullptr, false);
5887 if (getABIKind() == ARMABIInfo::AAPCS16_VFP &&
5888 getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(16)) {
5889 // WatchOS is adopting the 64-bit AAPCS rule on composite types: if they're
5890 // bigger than 128-bits, they get placed in space allocated by the caller,
5891 // and a pointer is passed.
5892 return ABIArgInfo::getIndirect(
5893 CharUnits::fromQuantity(getContext().getTypeAlign(Ty) / 8), false);
5896 // Support byval for ARM.
5897 // The ABI alignment for APCS is 4-byte and for AAPCS at least 4-byte and at
5898 // most 8-byte. We realign the indirect argument if type alignment is bigger
5899 // than ABI alignment.
5900 uint64_t ABIAlign = 4;
5902 if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
5903 getABIKind() == ARMABIInfo::AAPCS) {
5904 TyAlign = getContext().getTypeUnadjustedAlignInChars(Ty).getQuantity();
5905 ABIAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8);
5907 TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity();
5909 if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) {
5910 assert(getABIKind() != ARMABIInfo::AAPCS16_VFP && "unexpected byval");
5911 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign),
5913 /*Realign=*/TyAlign > ABIAlign);
5916 // On RenderScript, coerce Aggregates <= 64 bytes to an integer array of
5917 // same size and alignment.
5918 if (getTarget().isRenderScriptTarget()) {
5919 return coerceToIntArray(Ty, getContext(), getVMContext());
5922 // Otherwise, pass by coercing to a structure of the appropriate size.
5925 // FIXME: Try to match the types of the arguments more accurately where
5928 ElemTy = llvm::Type::getInt32Ty(getVMContext());
5929 SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32;
5931 ElemTy = llvm::Type::getInt64Ty(getVMContext());
5932 SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64;
5935 return ABIArgInfo::getDirect(llvm::ArrayType::get(ElemTy, SizeRegs));
5938 static bool isIntegerLikeType(QualType Ty, ASTContext &Context,
5939 llvm::LLVMContext &VMContext) {
5940 // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure
5941 // is called integer-like if its size is less than or equal to one word, and
5942 // the offset of each of its addressable sub-fields is zero.
5944 uint64_t Size = Context.getTypeSize(Ty);
5946 // Check that the type fits in a word.
5950 // FIXME: Handle vector types!
5951 if (Ty->isVectorType())
5954 // Float types are never treated as "integer like".
5955 if (Ty->isRealFloatingType())
5958 // If this is a builtin or pointer type then it is ok.
5959 if (Ty->getAs<BuiltinType>() || Ty->isPointerType())
5962 // Small complex integer types are "integer like".
5963 if (const ComplexType *CT = Ty->getAs<ComplexType>())
5964 return isIntegerLikeType(CT->getElementType(), Context, VMContext);
5966 // Single element and zero sized arrays should be allowed, by the definition
5967 // above, but they are not.
5969 // Otherwise, it must be a record type.
5970 const RecordType *RT = Ty->getAs<RecordType>();
5971 if (!RT) return false;
5973 // Ignore records with flexible arrays.
5974 const RecordDecl *RD = RT->getDecl();
5975 if (RD->hasFlexibleArrayMember())
5978 // Check that all sub-fields are at offset 0, and are themselves "integer
5980 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
5982 bool HadField = false;
5984 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
5985 i != e; ++i, ++idx) {
5986 const FieldDecl *FD = *i;
5988 // Bit-fields are not addressable, we only need to verify they are "integer
5989 // like". We still have to disallow a subsequent non-bitfield, for example:
5990 // struct { int : 0; int x }
5991 // is non-integer like according to gcc.
5992 if (FD->isBitField()) {
5996 if (!isIntegerLikeType(FD->getType(), Context, VMContext))
6002 // Check if this field is at offset 0.
6003 if (Layout.getFieldOffset(idx) != 0)
6006 if (!isIntegerLikeType(FD->getType(), Context, VMContext))
6009 // Only allow at most one field in a structure. This doesn't match the
6010 // wording above, but follows gcc in situations with a field following an
6012 if (!RD->isUnion()) {
6023 ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy, bool isVariadic,
6024 unsigned functionCallConv) const {
6026 // Variadic functions should always marshal to the base standard.
6028 !isVariadic && isEffectivelyAAPCS_VFP(functionCallConv, /* AAPCS16 */ true);
6030 if (RetTy->isVoidType())
6031 return ABIArgInfo::getIgnore();
6033 if (const VectorType *VT = RetTy->getAs<VectorType>()) {
6034 // Large vector types should be returned via memory.
6035 if (getContext().getTypeSize(RetTy) > 128)
6036 return getNaturalAlignIndirect(RetTy);
6037 // FP16 vectors should be converted to integer vectors
6038 if (!getTarget().hasLegalHalfType() &&
6039 (VT->getElementType()->isFloat16Type() ||
6040 VT->getElementType()->isHalfType()))
6041 return coerceIllegalVector(RetTy);
6044 // _Float16 and __fp16 get returned as if it were an int or float, but with
6045 // the top 16 bits unspecified. This is not done for OpenCL as it handles the
6046 // half type natively, and does not need to interwork with AAPCS code.
6047 if ((RetTy->isFloat16Type() || RetTy->isHalfType()) &&
6048 !getContext().getLangOpts().NativeHalfArgsAndReturns) {
6049 llvm::Type *ResType = IsAAPCS_VFP ?
6050 llvm::Type::getFloatTy(getVMContext()) :
6051 llvm::Type::getInt32Ty(getVMContext());
6052 return ABIArgInfo::getDirect(ResType);
6055 if (!isAggregateTypeForABI(RetTy)) {
6056 // Treat an enum type as its underlying type.
6057 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
6058 RetTy = EnumTy->getDecl()->getIntegerType();
6060 return RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend(RetTy)
6061 : ABIArgInfo::getDirect();
6064 // Are we following APCS?
6065 if (getABIKind() == APCS) {
6066 if (isEmptyRecord(getContext(), RetTy, false))
6067 return ABIArgInfo::getIgnore();
6069 // Complex types are all returned as packed integers.
6071 // FIXME: Consider using 2 x vector types if the back end handles them
6073 if (RetTy->isAnyComplexType())
6074 return ABIArgInfo::getDirect(llvm::IntegerType::get(
6075 getVMContext(), getContext().getTypeSize(RetTy)));
6077 // Integer like structures are returned in r0.
6078 if (isIntegerLikeType(RetTy, getContext(), getVMContext())) {
6079 // Return in the smallest viable integer type.
6080 uint64_t Size = getContext().getTypeSize(RetTy);
6082 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
6084 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
6085 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
6088 // Otherwise return in memory.
6089 return getNaturalAlignIndirect(RetTy);
6092 // Otherwise this is an AAPCS variant.
6094 if (isEmptyRecord(getContext(), RetTy, true))
6095 return ABIArgInfo::getIgnore();
6097 // Check for homogeneous aggregates with AAPCS-VFP.
6099 const Type *Base = nullptr;
6100 uint64_t Members = 0;
6101 if (isHomogeneousAggregate(RetTy, Base, Members))
6102 return classifyHomogeneousAggregate(RetTy, Base, Members);
6105 // Aggregates <= 4 bytes are returned in r0; other aggregates
6106 // are returned indirectly.
6107 uint64_t Size = getContext().getTypeSize(RetTy);
6109 // On RenderScript, coerce Aggregates <= 4 bytes to an integer array of
6110 // same size and alignment.
6111 if (getTarget().isRenderScriptTarget()) {
6112 return coerceToIntArray(RetTy, getContext(), getVMContext());
6114 if (getDataLayout().isBigEndian())
6115 // Return in 32 bit integer integer type (as if loaded by LDR, AAPCS 5.4)
6116 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
6118 // Return in the smallest viable integer type.
6120 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
6122 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
6123 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
6124 } else if (Size <= 128 && getABIKind() == AAPCS16_VFP) {
6125 llvm::Type *Int32Ty = llvm::Type::getInt32Ty(getVMContext());
6126 llvm::Type *CoerceTy =
6127 llvm::ArrayType::get(Int32Ty, llvm::alignTo(Size, 32) / 32);
6128 return ABIArgInfo::getDirect(CoerceTy);
6131 return getNaturalAlignIndirect(RetTy);
6134 /// isIllegalVector - check whether Ty is an illegal vector type.
6135 bool ARMABIInfo::isIllegalVectorType(QualType Ty) const {
6136 if (const VectorType *VT = Ty->getAs<VectorType> ()) {
6137 // On targets that don't support FP16, FP16 is expanded into float, and we
6138 // don't want the ABI to depend on whether or not FP16 is supported in
6139 // hardware. Thus return false to coerce FP16 vectors into integer vectors.
6140 if (!getTarget().hasLegalHalfType() &&
6141 (VT->getElementType()->isFloat16Type() ||
6142 VT->getElementType()->isHalfType()))
6145 // Android shipped using Clang 3.1, which supported a slightly different
6146 // vector ABI. The primary differences were that 3-element vector types
6147 // were legal, and so were sub 32-bit vectors (i.e. <2 x i8>). This path
6148 // accepts that legacy behavior for Android only.
6149 // Check whether VT is legal.
6150 unsigned NumElements = VT->getNumElements();
6151 // NumElements should be power of 2 or equal to 3.
6152 if (!llvm::isPowerOf2_32(NumElements) && NumElements != 3)
6155 // Check whether VT is legal.
6156 unsigned NumElements = VT->getNumElements();
6157 uint64_t Size = getContext().getTypeSize(VT);
6158 // NumElements should be power of 2.
6159 if (!llvm::isPowerOf2_32(NumElements))
6161 // Size should be greater than 32 bits.
6168 /// Return true if a type contains any 16-bit floating point vectors
6169 bool ARMABIInfo::containsAnyFP16Vectors(QualType Ty) const {
6170 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
6171 uint64_t NElements = AT->getSize().getZExtValue();
6174 return containsAnyFP16Vectors(AT->getElementType());
6175 } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
6176 const RecordDecl *RD = RT->getDecl();
6178 // If this is a C++ record, check the bases first.
6179 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
6180 if (llvm::any_of(CXXRD->bases(), [this](const CXXBaseSpecifier &B) {
6181 return containsAnyFP16Vectors(B.getType());
6185 if (llvm::any_of(RD->fields(), [this](FieldDecl *FD) {
6186 return FD && containsAnyFP16Vectors(FD->getType());
6192 if (const VectorType *VT = Ty->getAs<VectorType>())
6193 return (VT->getElementType()->isFloat16Type() ||
6194 VT->getElementType()->isHalfType());
6199 bool ARMABIInfo::isLegalVectorTypeForSwift(CharUnits vectorSize,
6201 unsigned numElts) const {
6202 if (!llvm::isPowerOf2_32(numElts))
6204 unsigned size = getDataLayout().getTypeStoreSizeInBits(eltTy);
6207 if (vectorSize.getQuantity() != 8 &&
6208 (vectorSize.getQuantity() != 16 || numElts == 1))
6213 bool ARMABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
6214 // Homogeneous aggregates for AAPCS-VFP must have base types of float,
6215 // double, or 64-bit or 128-bit vectors.
6216 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
6217 if (BT->getKind() == BuiltinType::Float ||
6218 BT->getKind() == BuiltinType::Double ||
6219 BT->getKind() == BuiltinType::LongDouble)
6221 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
6222 unsigned VecSize = getContext().getTypeSize(VT);
6223 if (VecSize == 64 || VecSize == 128)
6229 bool ARMABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
6230 uint64_t Members) const {
6231 return Members <= 4;
6234 bool ARMABIInfo::isEffectivelyAAPCS_VFP(unsigned callConvention,
6235 bool acceptHalf) const {
6236 // Give precedence to user-specified calling conventions.
6237 if (callConvention != llvm::CallingConv::C)
6238 return (callConvention == llvm::CallingConv::ARM_AAPCS_VFP);
6240 return (getABIKind() == AAPCS_VFP) ||
6241 (acceptHalf && (getABIKind() == AAPCS16_VFP));
6244 Address ARMABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6245 QualType Ty) const {
6246 CharUnits SlotSize = CharUnits::fromQuantity(4);
6248 // Empty records are ignored for parameter passing purposes.
6249 if (isEmptyRecord(getContext(), Ty, true)) {
6250 Address Addr(CGF.Builder.CreateLoad(VAListAddr), SlotSize);
6251 Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty));
6255 CharUnits TySize = getContext().getTypeSizeInChars(Ty);
6256 CharUnits TyAlignForABI = getContext().getTypeUnadjustedAlignInChars(Ty);
6258 // Use indirect if size of the illegal vector is bigger than 16 bytes.
6259 bool IsIndirect = false;
6260 const Type *Base = nullptr;
6261 uint64_t Members = 0;
6262 if (TySize > CharUnits::fromQuantity(16) && isIllegalVectorType(Ty)) {
6265 // ARMv7k passes structs bigger than 16 bytes indirectly, in space
6266 // allocated by the caller.
6267 } else if (TySize > CharUnits::fromQuantity(16) &&
6268 getABIKind() == ARMABIInfo::AAPCS16_VFP &&
6269 !isHomogeneousAggregate(Ty, Base, Members)) {
6272 // Otherwise, bound the type's ABI alignment.
6273 // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for
6274 // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte.
6275 // Our callers should be prepared to handle an under-aligned address.
6276 } else if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
6277 getABIKind() == ARMABIInfo::AAPCS) {
6278 TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4));
6279 TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(8));
6280 } else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) {
6281 // ARMv7k allows type alignment up to 16 bytes.
6282 TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4));
6283 TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(16));
6285 TyAlignForABI = CharUnits::fromQuantity(4);
6288 std::pair<CharUnits, CharUnits> TyInfo = { TySize, TyAlignForABI };
6289 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TyInfo,
6290 SlotSize, /*AllowHigherAlign*/ true);
6293 //===----------------------------------------------------------------------===//
6294 // NVPTX ABI Implementation
6295 //===----------------------------------------------------------------------===//
6299 class NVPTXABIInfo : public ABIInfo {
6301 NVPTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
6303 ABIArgInfo classifyReturnType(QualType RetTy) const;
6304 ABIArgInfo classifyArgumentType(QualType Ty) const;
6306 void computeInfo(CGFunctionInfo &FI) const override;
6307 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6308 QualType Ty) const override;
6311 class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo {
6313 NVPTXTargetCodeGenInfo(CodeGenTypes &CGT)
6314 : TargetCodeGenInfo(new NVPTXABIInfo(CGT)) {}
6316 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
6317 CodeGen::CodeGenModule &M) const override;
6318 bool shouldEmitStaticExternCAliases() const override;
6321 // Adds a NamedMDNode with F, Name, and Operand as operands, and adds the
6322 // resulting MDNode to the nvvm.annotations MDNode.
6323 static void addNVVMMetadata(llvm::Function *F, StringRef Name, int Operand);
6326 /// Checks if the type is unsupported directly by the current target.
6327 static bool isUnsupportedType(ASTContext &Context, QualType T) {
6328 if (!Context.getTargetInfo().hasFloat16Type() && T->isFloat16Type())
6330 if (!Context.getTargetInfo().hasFloat128Type() &&
6331 (T->isFloat128Type() ||
6332 (T->isRealFloatingType() && Context.getTypeSize(T) == 128)))
6334 if (!Context.getTargetInfo().hasInt128Type() && T->isIntegerType() &&
6335 Context.getTypeSize(T) > 64)
6337 if (const auto *AT = T->getAsArrayTypeUnsafe())
6338 return isUnsupportedType(Context, AT->getElementType());
6339 const auto *RT = T->getAs<RecordType>();
6342 const RecordDecl *RD = RT->getDecl();
6344 // If this is a C++ record, check the bases first.
6345 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
6346 for (const CXXBaseSpecifier &I : CXXRD->bases())
6347 if (isUnsupportedType(Context, I.getType()))
6350 for (const FieldDecl *I : RD->fields())
6351 if (isUnsupportedType(Context, I->getType()))
6356 /// Coerce the given type into an array with maximum allowed size of elements.
6357 static ABIArgInfo coerceToIntArrayWithLimit(QualType Ty, ASTContext &Context,
6358 llvm::LLVMContext &LLVMContext,
6360 // Alignment and Size are measured in bits.
6361 const uint64_t Size = Context.getTypeSize(Ty);
6362 const uint64_t Alignment = Context.getTypeAlign(Ty);
6363 const unsigned Div = std::min<unsigned>(MaxSize, Alignment);
6364 llvm::Type *IntType = llvm::Type::getIntNTy(LLVMContext, Div);
6365 const uint64_t NumElements = (Size + Div - 1) / Div;
6366 return ABIArgInfo::getDirect(llvm::ArrayType::get(IntType, NumElements));
6369 ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const {
6370 if (RetTy->isVoidType())
6371 return ABIArgInfo::getIgnore();
6373 if (getContext().getLangOpts().OpenMP &&
6374 getContext().getLangOpts().OpenMPIsDevice &&
6375 isUnsupportedType(getContext(), RetTy))
6376 return coerceToIntArrayWithLimit(RetTy, getContext(), getVMContext(), 64);
6378 // note: this is different from default ABI
6379 if (!RetTy->isScalarType())
6380 return ABIArgInfo::getDirect();
6382 // Treat an enum type as its underlying type.
6383 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
6384 RetTy = EnumTy->getDecl()->getIntegerType();
6386 return (RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend(RetTy)
6387 : ABIArgInfo::getDirect());
6390 ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const {
6391 // Treat an enum type as its underlying type.
6392 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
6393 Ty = EnumTy->getDecl()->getIntegerType();
6395 // Return aggregates type as indirect by value
6396 if (isAggregateTypeForABI(Ty))
6397 return getNaturalAlignIndirect(Ty, /* byval */ true);
6399 return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty)
6400 : ABIArgInfo::getDirect());
6403 void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
6404 if (!getCXXABI().classifyReturnType(FI))
6405 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
6406 for (auto &I : FI.arguments())
6407 I.info = classifyArgumentType(I.type);
6409 // Always honor user-specified calling convention.
6410 if (FI.getCallingConvention() != llvm::CallingConv::C)
6413 FI.setEffectiveCallingConvention(getRuntimeCC());
6416 Address NVPTXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6417 QualType Ty) const {
6418 llvm_unreachable("NVPTX does not support varargs");
6421 void NVPTXTargetCodeGenInfo::setTargetAttributes(
6422 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
6423 if (GV->isDeclaration())
6425 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
6428 llvm::Function *F = cast<llvm::Function>(GV);
6430 // Perform special handling in OpenCL mode
6431 if (M.getLangOpts().OpenCL) {
6432 // Use OpenCL function attributes to check for kernel functions
6433 // By default, all functions are device functions
6434 if (FD->hasAttr<OpenCLKernelAttr>()) {
6435 // OpenCL __kernel functions get kernel metadata
6436 // Create !{<func-ref>, metadata !"kernel", i32 1} node
6437 addNVVMMetadata(F, "kernel", 1);
6438 // And kernel functions are not subject to inlining
6439 F->addFnAttr(llvm::Attribute::NoInline);
6443 // Perform special handling in CUDA mode.
6444 if (M.getLangOpts().CUDA) {
6445 // CUDA __global__ functions get a kernel metadata entry. Since
6446 // __global__ functions cannot be called from the device, we do not
6447 // need to set the noinline attribute.
6448 if (FD->hasAttr<CUDAGlobalAttr>()) {
6449 // Create !{<func-ref>, metadata !"kernel", i32 1} node
6450 addNVVMMetadata(F, "kernel", 1);
6452 if (CUDALaunchBoundsAttr *Attr = FD->getAttr<CUDALaunchBoundsAttr>()) {
6453 // Create !{<func-ref>, metadata !"maxntidx", i32 <val>} node
6454 llvm::APSInt MaxThreads(32);
6455 MaxThreads = Attr->getMaxThreads()->EvaluateKnownConstInt(M.getContext());
6457 addNVVMMetadata(F, "maxntidx", MaxThreads.getExtValue());
6459 // min blocks is an optional argument for CUDALaunchBoundsAttr. If it was
6460 // not specified in __launch_bounds__ or if the user specified a 0 value,
6461 // we don't have to add a PTX directive.
6462 if (Attr->getMinBlocks()) {
6463 llvm::APSInt MinBlocks(32);
6464 MinBlocks = Attr->getMinBlocks()->EvaluateKnownConstInt(M.getContext());
6466 // Create !{<func-ref>, metadata !"minctasm", i32 <val>} node
6467 addNVVMMetadata(F, "minctasm", MinBlocks.getExtValue());
6473 void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::Function *F, StringRef Name,
6475 llvm::Module *M = F->getParent();
6476 llvm::LLVMContext &Ctx = M->getContext();
6478 // Get "nvvm.annotations" metadata node
6479 llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations");
6481 llvm::Metadata *MDVals[] = {
6482 llvm::ConstantAsMetadata::get(F), llvm::MDString::get(Ctx, Name),
6483 llvm::ConstantAsMetadata::get(
6484 llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), Operand))};
6485 // Append metadata to nvvm.annotations
6486 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
6489 bool NVPTXTargetCodeGenInfo::shouldEmitStaticExternCAliases() const {
6494 //===----------------------------------------------------------------------===//
6495 // SystemZ ABI Implementation
6496 //===----------------------------------------------------------------------===//
6500 class SystemZABIInfo : public SwiftABIInfo {
6504 SystemZABIInfo(CodeGenTypes &CGT, bool HV)
6505 : SwiftABIInfo(CGT), HasVector(HV) {}
6507 bool isPromotableIntegerType(QualType Ty) const;
6508 bool isCompoundType(QualType Ty) const;
6509 bool isVectorArgumentType(QualType Ty) const;
6510 bool isFPArgumentType(QualType Ty) const;
6511 QualType GetSingleElementType(QualType Ty) const;
6513 ABIArgInfo classifyReturnType(QualType RetTy) const;
6514 ABIArgInfo classifyArgumentType(QualType ArgTy) const;
6516 void computeInfo(CGFunctionInfo &FI) const override {
6517 if (!getCXXABI().classifyReturnType(FI))
6518 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
6519 for (auto &I : FI.arguments())
6520 I.info = classifyArgumentType(I.type);
6523 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6524 QualType Ty) const override;
6526 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
6527 bool asReturnValue) const override {
6528 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
6530 bool isSwiftErrorInRegister() const override {
6535 class SystemZTargetCodeGenInfo : public TargetCodeGenInfo {
6537 SystemZTargetCodeGenInfo(CodeGenTypes &CGT, bool HasVector)
6538 : TargetCodeGenInfo(new SystemZABIInfo(CGT, HasVector)) {}
6543 bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const {
6544 // Treat an enum type as its underlying type.
6545 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
6546 Ty = EnumTy->getDecl()->getIntegerType();
6548 // Promotable integer types are required to be promoted by the ABI.
6549 if (Ty->isPromotableIntegerType())
6552 // 32-bit values must also be promoted.
6553 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
6554 switch (BT->getKind()) {
6555 case BuiltinType::Int:
6556 case BuiltinType::UInt:
6564 bool SystemZABIInfo::isCompoundType(QualType Ty) const {
6565 return (Ty->isAnyComplexType() ||
6566 Ty->isVectorType() ||
6567 isAggregateTypeForABI(Ty));
6570 bool SystemZABIInfo::isVectorArgumentType(QualType Ty) const {
6571 return (HasVector &&
6572 Ty->isVectorType() &&
6573 getContext().getTypeSize(Ty) <= 128);
6576 bool SystemZABIInfo::isFPArgumentType(QualType Ty) const {
6577 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
6578 switch (BT->getKind()) {
6579 case BuiltinType::Float:
6580 case BuiltinType::Double:
6589 QualType SystemZABIInfo::GetSingleElementType(QualType Ty) const {
6590 if (const RecordType *RT = Ty->getAsStructureType()) {
6591 const RecordDecl *RD = RT->getDecl();
6594 // If this is a C++ record, check the bases first.
6595 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
6596 for (const auto &I : CXXRD->bases()) {
6597 QualType Base = I.getType();
6599 // Empty bases don't affect things either way.
6600 if (isEmptyRecord(getContext(), Base, true))
6603 if (!Found.isNull())
6605 Found = GetSingleElementType(Base);
6608 // Check the fields.
6609 for (const auto *FD : RD->fields()) {
6610 // For compatibility with GCC, ignore empty bitfields in C++ mode.
6611 // Unlike isSingleElementStruct(), empty structure and array fields
6612 // do count. So do anonymous bitfields that aren't zero-sized.
6613 if (getContext().getLangOpts().CPlusPlus &&
6614 FD->isZeroLengthBitField(getContext()))
6617 // Unlike isSingleElementStruct(), arrays do not count.
6618 // Nested structures still do though.
6619 if (!Found.isNull())
6621 Found = GetSingleElementType(FD->getType());
6624 // Unlike isSingleElementStruct(), trailing padding is allowed.
6625 // An 8-byte aligned struct s { float f; } is passed as a double.
6626 if (!Found.isNull())
6633 Address SystemZABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6634 QualType Ty) const {
6635 // Assume that va_list type is correct; should be pointer to LLVM type:
6639 // i8 *__overflow_arg_area;
6640 // i8 *__reg_save_area;
6643 // Every non-vector argument occupies 8 bytes and is passed by preference
6644 // in either GPRs or FPRs. Vector arguments occupy 8 or 16 bytes and are
6645 // always passed on the stack.
6646 Ty = getContext().getCanonicalType(Ty);
6647 auto TyInfo = getContext().getTypeInfoInChars(Ty);
6648 llvm::Type *ArgTy = CGF.ConvertTypeForMem(Ty);
6649 llvm::Type *DirectTy = ArgTy;
6650 ABIArgInfo AI = classifyArgumentType(Ty);
6651 bool IsIndirect = AI.isIndirect();
6652 bool InFPRs = false;
6653 bool IsVector = false;
6654 CharUnits UnpaddedSize;
6655 CharUnits DirectAlign;
6657 DirectTy = llvm::PointerType::getUnqual(DirectTy);
6658 UnpaddedSize = DirectAlign = CharUnits::fromQuantity(8);
6660 if (AI.getCoerceToType())
6661 ArgTy = AI.getCoerceToType();
6662 InFPRs = ArgTy->isFloatTy() || ArgTy->isDoubleTy();
6663 IsVector = ArgTy->isVectorTy();
6664 UnpaddedSize = TyInfo.first;
6665 DirectAlign = TyInfo.second;
6667 CharUnits PaddedSize = CharUnits::fromQuantity(8);
6668 if (IsVector && UnpaddedSize > PaddedSize)
6669 PaddedSize = CharUnits::fromQuantity(16);
6670 assert((UnpaddedSize <= PaddedSize) && "Invalid argument size.");
6672 CharUnits Padding = (PaddedSize - UnpaddedSize);
6674 llvm::Type *IndexTy = CGF.Int64Ty;
6675 llvm::Value *PaddedSizeV =
6676 llvm::ConstantInt::get(IndexTy, PaddedSize.getQuantity());
6679 // Work out the address of a vector argument on the stack.
6680 // Vector arguments are always passed in the high bits of a
6681 // single (8 byte) or double (16 byte) stack slot.
6682 Address OverflowArgAreaPtr =
6683 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_ptr");
6684 Address OverflowArgArea =
6685 Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"),
6688 CGF.Builder.CreateElementBitCast(OverflowArgArea, DirectTy, "mem_addr");
6690 // Update overflow_arg_area_ptr pointer
6691 llvm::Value *NewOverflowArgArea =
6692 CGF.Builder.CreateGEP(OverflowArgArea.getPointer(), PaddedSizeV,
6693 "overflow_arg_area");
6694 CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
6699 assert(PaddedSize.getQuantity() == 8);
6701 unsigned MaxRegs, RegCountField, RegSaveIndex;
6702 CharUnits RegPadding;
6704 MaxRegs = 4; // Maximum of 4 FPR arguments
6705 RegCountField = 1; // __fpr
6706 RegSaveIndex = 16; // save offset for f0
6707 RegPadding = CharUnits(); // floats are passed in the high bits of an FPR
6709 MaxRegs = 5; // Maximum of 5 GPR arguments
6710 RegCountField = 0; // __gpr
6711 RegSaveIndex = 2; // save offset for r2
6712 RegPadding = Padding; // values are passed in the low bits of a GPR
6715 Address RegCountPtr =
6716 CGF.Builder.CreateStructGEP(VAListAddr, RegCountField, "reg_count_ptr");
6717 llvm::Value *RegCount = CGF.Builder.CreateLoad(RegCountPtr, "reg_count");
6718 llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs);
6719 llvm::Value *InRegs = CGF.Builder.CreateICmpULT(RegCount, MaxRegsV,
6722 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
6723 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
6724 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
6725 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
6727 // Emit code to load the value if it was passed in registers.
6728 CGF.EmitBlock(InRegBlock);
6730 // Work out the address of an argument register.
6731 llvm::Value *ScaledRegCount =
6732 CGF.Builder.CreateMul(RegCount, PaddedSizeV, "scaled_reg_count");
6733 llvm::Value *RegBase =
6734 llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize.getQuantity()
6735 + RegPadding.getQuantity());
6736 llvm::Value *RegOffset =
6737 CGF.Builder.CreateAdd(ScaledRegCount, RegBase, "reg_offset");
6738 Address RegSaveAreaPtr =
6739 CGF.Builder.CreateStructGEP(VAListAddr, 3, "reg_save_area_ptr");
6740 llvm::Value *RegSaveArea =
6741 CGF.Builder.CreateLoad(RegSaveAreaPtr, "reg_save_area");
6742 Address RawRegAddr(CGF.Builder.CreateGEP(RegSaveArea, RegOffset,
6746 CGF.Builder.CreateElementBitCast(RawRegAddr, DirectTy, "reg_addr");
6748 // Update the register count
6749 llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1);
6750 llvm::Value *NewRegCount =
6751 CGF.Builder.CreateAdd(RegCount, One, "reg_count");
6752 CGF.Builder.CreateStore(NewRegCount, RegCountPtr);
6753 CGF.EmitBranch(ContBlock);
6755 // Emit code to load the value if it was passed in memory.
6756 CGF.EmitBlock(InMemBlock);
6758 // Work out the address of a stack argument.
6759 Address OverflowArgAreaPtr =
6760 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_ptr");
6761 Address OverflowArgArea =
6762 Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"),
6764 Address RawMemAddr =
6765 CGF.Builder.CreateConstByteGEP(OverflowArgArea, Padding, "raw_mem_addr");
6767 CGF.Builder.CreateElementBitCast(RawMemAddr, DirectTy, "mem_addr");
6769 // Update overflow_arg_area_ptr pointer
6770 llvm::Value *NewOverflowArgArea =
6771 CGF.Builder.CreateGEP(OverflowArgArea.getPointer(), PaddedSizeV,
6772 "overflow_arg_area");
6773 CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
6774 CGF.EmitBranch(ContBlock);
6776 // Return the appropriate result.
6777 CGF.EmitBlock(ContBlock);
6778 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock,
6779 MemAddr, InMemBlock, "va_arg.addr");
6782 ResAddr = Address(CGF.Builder.CreateLoad(ResAddr, "indirect_arg"),
6788 ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const {
6789 if (RetTy->isVoidType())
6790 return ABIArgInfo::getIgnore();
6791 if (isVectorArgumentType(RetTy))
6792 return ABIArgInfo::getDirect();
6793 if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64)
6794 return getNaturalAlignIndirect(RetTy);
6795 return (isPromotableIntegerType(RetTy) ? ABIArgInfo::getExtend(RetTy)
6796 : ABIArgInfo::getDirect());
6799 ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
6800 // Handle the generic C++ ABI.
6801 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
6802 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
6804 // Integers and enums are extended to full register width.
6805 if (isPromotableIntegerType(Ty))
6806 return ABIArgInfo::getExtend(Ty);
6808 // Handle vector types and vector-like structure types. Note that
6809 // as opposed to float-like structure types, we do not allow any
6810 // padding for vector-like structures, so verify the sizes match.
6811 uint64_t Size = getContext().getTypeSize(Ty);
6812 QualType SingleElementTy = GetSingleElementType(Ty);
6813 if (isVectorArgumentType(SingleElementTy) &&
6814 getContext().getTypeSize(SingleElementTy) == Size)
6815 return ABIArgInfo::getDirect(CGT.ConvertType(SingleElementTy));
6817 // Values that are not 1, 2, 4 or 8 bytes in size are passed indirectly.
6818 if (Size != 8 && Size != 16 && Size != 32 && Size != 64)
6819 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
6821 // Handle small structures.
6822 if (const RecordType *RT = Ty->getAs<RecordType>()) {
6823 // Structures with flexible arrays have variable length, so really
6824 // fail the size test above.
6825 const RecordDecl *RD = RT->getDecl();
6826 if (RD->hasFlexibleArrayMember())
6827 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
6829 // The structure is passed as an unextended integer, a float, or a double.
6831 if (isFPArgumentType(SingleElementTy)) {
6832 assert(Size == 32 || Size == 64);
6834 PassTy = llvm::Type::getFloatTy(getVMContext());
6836 PassTy = llvm::Type::getDoubleTy(getVMContext());
6838 PassTy = llvm::IntegerType::get(getVMContext(), Size);
6839 return ABIArgInfo::getDirect(PassTy);
6842 // Non-structure compounds are passed indirectly.
6843 if (isCompoundType(Ty))
6844 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
6846 return ABIArgInfo::getDirect(nullptr);
6849 //===----------------------------------------------------------------------===//
6850 // MSP430 ABI Implementation
6851 //===----------------------------------------------------------------------===//
6855 class MSP430TargetCodeGenInfo : public TargetCodeGenInfo {
6857 MSP430TargetCodeGenInfo(CodeGenTypes &CGT)
6858 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
6859 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
6860 CodeGen::CodeGenModule &M) const override;
6865 void MSP430TargetCodeGenInfo::setTargetAttributes(
6866 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
6867 if (GV->isDeclaration())
6869 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
6870 const auto *InterruptAttr = FD->getAttr<MSP430InterruptAttr>();
6874 // Handle 'interrupt' attribute:
6875 llvm::Function *F = cast<llvm::Function>(GV);
6877 // Step 1: Set ISR calling convention.
6878 F->setCallingConv(llvm::CallingConv::MSP430_INTR);
6880 // Step 2: Add attributes goodness.
6881 F->addFnAttr(llvm::Attribute::NoInline);
6882 F->addFnAttr("interrupt", llvm::utostr(InterruptAttr->getNumber()));
6886 //===----------------------------------------------------------------------===//
6887 // MIPS ABI Implementation. This works for both little-endian and
6888 // big-endian variants.
6889 //===----------------------------------------------------------------------===//
6892 class MipsABIInfo : public ABIInfo {
6894 unsigned MinABIStackAlignInBytes, StackAlignInBytes;
6895 void CoerceToIntArgs(uint64_t TySize,
6896 SmallVectorImpl<llvm::Type *> &ArgList) const;
6897 llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const;
6898 llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const;
6899 llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const;
6901 MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) :
6902 ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8),
6903 StackAlignInBytes(IsO32 ? 8 : 16) {}
6905 ABIArgInfo classifyReturnType(QualType RetTy) const;
6906 ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const;
6907 void computeInfo(CGFunctionInfo &FI) const override;
6908 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6909 QualType Ty) const override;
6910 ABIArgInfo extendType(QualType Ty) const;
6913 class MIPSTargetCodeGenInfo : public TargetCodeGenInfo {
6914 unsigned SizeOfUnwindException;
6916 MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32)
6917 : TargetCodeGenInfo(new MipsABIInfo(CGT, IsO32)),
6918 SizeOfUnwindException(IsO32 ? 24 : 32) {}
6920 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
6924 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
6925 CodeGen::CodeGenModule &CGM) const override {
6926 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
6928 llvm::Function *Fn = cast<llvm::Function>(GV);
6930 if (FD->hasAttr<MipsLongCallAttr>())
6931 Fn->addFnAttr("long-call");
6932 else if (FD->hasAttr<MipsShortCallAttr>())
6933 Fn->addFnAttr("short-call");
6935 // Other attributes do not have a meaning for declarations.
6936 if (GV->isDeclaration())
6939 if (FD->hasAttr<Mips16Attr>()) {
6940 Fn->addFnAttr("mips16");
6942 else if (FD->hasAttr<NoMips16Attr>()) {
6943 Fn->addFnAttr("nomips16");
6946 if (FD->hasAttr<MicroMipsAttr>())
6947 Fn->addFnAttr("micromips");
6948 else if (FD->hasAttr<NoMicroMipsAttr>())
6949 Fn->addFnAttr("nomicromips");
6951 const MipsInterruptAttr *Attr = FD->getAttr<MipsInterruptAttr>();
6956 switch (Attr->getInterrupt()) {
6957 case MipsInterruptAttr::eic: Kind = "eic"; break;
6958 case MipsInterruptAttr::sw0: Kind = "sw0"; break;
6959 case MipsInterruptAttr::sw1: Kind = "sw1"; break;
6960 case MipsInterruptAttr::hw0: Kind = "hw0"; break;
6961 case MipsInterruptAttr::hw1: Kind = "hw1"; break;
6962 case MipsInterruptAttr::hw2: Kind = "hw2"; break;
6963 case MipsInterruptAttr::hw3: Kind = "hw3"; break;
6964 case MipsInterruptAttr::hw4: Kind = "hw4"; break;
6965 case MipsInterruptAttr::hw5: Kind = "hw5"; break;
6968 Fn->addFnAttr("interrupt", Kind);
6972 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
6973 llvm::Value *Address) const override;
6975 unsigned getSizeOfUnwindException() const override {
6976 return SizeOfUnwindException;
6981 void MipsABIInfo::CoerceToIntArgs(
6982 uint64_t TySize, SmallVectorImpl<llvm::Type *> &ArgList) const {
6983 llvm::IntegerType *IntTy =
6984 llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8);
6986 // Add (TySize / MinABIStackAlignInBytes) args of IntTy.
6987 for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N)
6988 ArgList.push_back(IntTy);
6990 // If necessary, add one more integer type to ArgList.
6991 unsigned R = TySize % (MinABIStackAlignInBytes * 8);
6994 ArgList.push_back(llvm::IntegerType::get(getVMContext(), R));
6997 // In N32/64, an aligned double precision floating point field is passed in
6999 llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const {
7000 SmallVector<llvm::Type*, 8> ArgList, IntArgList;
7003 CoerceToIntArgs(TySize, ArgList);
7004 return llvm::StructType::get(getVMContext(), ArgList);
7007 if (Ty->isComplexType())
7008 return CGT.ConvertType(Ty);
7010 const RecordType *RT = Ty->getAs<RecordType>();
7012 // Unions/vectors are passed in integer registers.
7013 if (!RT || !RT->isStructureOrClassType()) {
7014 CoerceToIntArgs(TySize, ArgList);
7015 return llvm::StructType::get(getVMContext(), ArgList);
7018 const RecordDecl *RD = RT->getDecl();
7019 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
7020 assert(!(TySize % 8) && "Size of structure must be multiple of 8.");
7022 uint64_t LastOffset = 0;
7024 llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64);
7026 // Iterate over fields in the struct/class and check if there are any aligned
7028 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
7029 i != e; ++i, ++idx) {
7030 const QualType Ty = i->getType();
7031 const BuiltinType *BT = Ty->getAs<BuiltinType>();
7033 if (!BT || BT->getKind() != BuiltinType::Double)
7036 uint64_t Offset = Layout.getFieldOffset(idx);
7037 if (Offset % 64) // Ignore doubles that are not aligned.
7040 // Add ((Offset - LastOffset) / 64) args of type i64.
7041 for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j)
7042 ArgList.push_back(I64);
7045 ArgList.push_back(llvm::Type::getDoubleTy(getVMContext()));
7046 LastOffset = Offset + 64;
7049 CoerceToIntArgs(TySize - LastOffset, IntArgList);
7050 ArgList.append(IntArgList.begin(), IntArgList.end());
7052 return llvm::StructType::get(getVMContext(), ArgList);
7055 llvm::Type *MipsABIInfo::getPaddingType(uint64_t OrigOffset,
7056 uint64_t Offset) const {
7057 if (OrigOffset + MinABIStackAlignInBytes > Offset)
7060 return llvm::IntegerType::get(getVMContext(), (Offset - OrigOffset) * 8);
7064 MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const {
7065 Ty = useFirstFieldIfTransparentUnion(Ty);
7067 uint64_t OrigOffset = Offset;
7068 uint64_t TySize = getContext().getTypeSize(Ty);
7069 uint64_t Align = getContext().getTypeAlign(Ty) / 8;
7071 Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes),
7072 (uint64_t)StackAlignInBytes);
7073 unsigned CurrOffset = llvm::alignTo(Offset, Align);
7074 Offset = CurrOffset + llvm::alignTo(TySize, Align * 8) / 8;
7076 if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) {
7077 // Ignore empty aggregates.
7079 return ABIArgInfo::getIgnore();
7081 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
7082 Offset = OrigOffset + MinABIStackAlignInBytes;
7083 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
7086 // If we have reached here, aggregates are passed directly by coercing to
7087 // another structure type. Padding is inserted if the offset of the
7088 // aggregate is unaligned.
7089 ABIArgInfo ArgInfo =
7090 ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0,
7091 getPaddingType(OrigOffset, CurrOffset));
7092 ArgInfo.setInReg(true);
7096 // Treat an enum type as its underlying type.
7097 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
7098 Ty = EnumTy->getDecl()->getIntegerType();
7100 // All integral types are promoted to the GPR width.
7101 if (Ty->isIntegralOrEnumerationType())
7102 return extendType(Ty);
7104 return ABIArgInfo::getDirect(
7105 nullptr, 0, IsO32 ? nullptr : getPaddingType(OrigOffset, CurrOffset));
7109 MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const {
7110 const RecordType *RT = RetTy->getAs<RecordType>();
7111 SmallVector<llvm::Type*, 8> RTList;
7113 if (RT && RT->isStructureOrClassType()) {
7114 const RecordDecl *RD = RT->getDecl();
7115 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
7116 unsigned FieldCnt = Layout.getFieldCount();
7118 // N32/64 returns struct/classes in floating point registers if the
7119 // following conditions are met:
7120 // 1. The size of the struct/class is no larger than 128-bit.
7121 // 2. The struct/class has one or two fields all of which are floating
7123 // 3. The offset of the first field is zero (this follows what gcc does).
7125 // Any other composite results are returned in integer registers.
7127 if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) {
7128 RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end();
7129 for (; b != e; ++b) {
7130 const BuiltinType *BT = b->getType()->getAs<BuiltinType>();
7132 if (!BT || !BT->isFloatingPoint())
7135 RTList.push_back(CGT.ConvertType(b->getType()));
7139 return llvm::StructType::get(getVMContext(), RTList,
7140 RD->hasAttr<PackedAttr>());
7146 CoerceToIntArgs(Size, RTList);
7147 return llvm::StructType::get(getVMContext(), RTList);
7150 ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const {
7151 uint64_t Size = getContext().getTypeSize(RetTy);
7153 if (RetTy->isVoidType())
7154 return ABIArgInfo::getIgnore();
7156 // O32 doesn't treat zero-sized structs differently from other structs.
7157 // However, N32/N64 ignores zero sized return values.
7158 if (!IsO32 && Size == 0)
7159 return ABIArgInfo::getIgnore();
7161 if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) {
7163 if (RetTy->isAnyComplexType())
7164 return ABIArgInfo::getDirect();
7166 // O32 returns integer vectors in registers and N32/N64 returns all small
7167 // aggregates in registers.
7169 (RetTy->isVectorType() && !RetTy->hasFloatingRepresentation())) {
7170 ABIArgInfo ArgInfo =
7171 ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size));
7172 ArgInfo.setInReg(true);
7177 return getNaturalAlignIndirect(RetTy);
7180 // Treat an enum type as its underlying type.
7181 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
7182 RetTy = EnumTy->getDecl()->getIntegerType();
7184 if (RetTy->isPromotableIntegerType())
7185 return ABIArgInfo::getExtend(RetTy);
7187 if ((RetTy->isUnsignedIntegerOrEnumerationType() ||
7188 RetTy->isSignedIntegerOrEnumerationType()) && Size == 32 && !IsO32)
7189 return ABIArgInfo::getSignExtend(RetTy);
7191 return ABIArgInfo::getDirect();
7194 void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const {
7195 ABIArgInfo &RetInfo = FI.getReturnInfo();
7196 if (!getCXXABI().classifyReturnType(FI))
7197 RetInfo = classifyReturnType(FI.getReturnType());
7199 // Check if a pointer to an aggregate is passed as a hidden argument.
7200 uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0;
7202 for (auto &I : FI.arguments())
7203 I.info = classifyArgumentType(I.type, Offset);
7206 Address MipsABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7207 QualType OrigTy) const {
7208 QualType Ty = OrigTy;
7210 // Integer arguments are promoted to 32-bit on O32 and 64-bit on N32/N64.
7211 // Pointers are also promoted in the same way but this only matters for N32.
7212 unsigned SlotSizeInBits = IsO32 ? 32 : 64;
7213 unsigned PtrWidth = getTarget().getPointerWidth(0);
7214 bool DidPromote = false;
7215 if ((Ty->isIntegerType() &&
7216 getContext().getIntWidth(Ty) < SlotSizeInBits) ||
7217 (Ty->isPointerType() && PtrWidth < SlotSizeInBits)) {
7219 Ty = getContext().getIntTypeForBitwidth(SlotSizeInBits,
7220 Ty->isSignedIntegerType());
7223 auto TyInfo = getContext().getTypeInfoInChars(Ty);
7225 // The alignment of things in the argument area is never larger than
7226 // StackAlignInBytes.
7228 std::min(TyInfo.second, CharUnits::fromQuantity(StackAlignInBytes));
7230 // MinABIStackAlignInBytes is the size of argument slots on the stack.
7231 CharUnits ArgSlotSize = CharUnits::fromQuantity(MinABIStackAlignInBytes);
7233 Address Addr = emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
7234 TyInfo, ArgSlotSize, /*AllowHigherAlign*/ true);
7237 // If there was a promotion, "unpromote" into a temporary.
7238 // TODO: can we just use a pointer into a subset of the original slot?
7240 Address Temp = CGF.CreateMemTemp(OrigTy, "vaarg.promotion-temp");
7241 llvm::Value *Promoted = CGF.Builder.CreateLoad(Addr);
7243 // Truncate down to the right width.
7244 llvm::Type *IntTy = (OrigTy->isIntegerType() ? Temp.getElementType()
7246 llvm::Value *V = CGF.Builder.CreateTrunc(Promoted, IntTy);
7247 if (OrigTy->isPointerType())
7248 V = CGF.Builder.CreateIntToPtr(V, Temp.getElementType());
7250 CGF.Builder.CreateStore(V, Temp);
7257 ABIArgInfo MipsABIInfo::extendType(QualType Ty) const {
7258 int TySize = getContext().getTypeSize(Ty);
7260 // MIPS64 ABI requires unsigned 32 bit integers to be sign extended.
7261 if (Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32)
7262 return ABIArgInfo::getSignExtend(Ty);
7264 return ABIArgInfo::getExtend(Ty);
7268 MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
7269 llvm::Value *Address) const {
7270 // This information comes from gcc's implementation, which seems to
7271 // as canonical as it gets.
7273 // Everything on MIPS is 4 bytes. Double-precision FP registers
7274 // are aliased to pairs of single-precision FP registers.
7275 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
7277 // 0-31 are the general purpose registers, $0 - $31.
7278 // 32-63 are the floating-point registers, $f0 - $f31.
7279 // 64 and 65 are the multiply/divide registers, $hi and $lo.
7280 // 66 is the (notional, I think) register for signal-handler return.
7281 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65);
7283 // 67-74 are the floating-point status registers, $fcc0 - $fcc7.
7284 // They are one bit wide and ignored here.
7286 // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31.
7287 // (coprocessor 1 is the FP unit)
7288 // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31.
7289 // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31.
7290 // 176-181 are the DSP accumulator registers.
7291 AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181);
7295 //===----------------------------------------------------------------------===//
7296 // AVR ABI Implementation.
7297 //===----------------------------------------------------------------------===//
7300 class AVRTargetCodeGenInfo : public TargetCodeGenInfo {
7302 AVRTargetCodeGenInfo(CodeGenTypes &CGT)
7303 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) { }
7305 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
7306 CodeGen::CodeGenModule &CGM) const override {
7307 if (GV->isDeclaration())
7309 const auto *FD = dyn_cast_or_null<FunctionDecl>(D);
7311 auto *Fn = cast<llvm::Function>(GV);
7313 if (FD->getAttr<AVRInterruptAttr>())
7314 Fn->addFnAttr("interrupt");
7316 if (FD->getAttr<AVRSignalAttr>())
7317 Fn->addFnAttr("signal");
7322 //===----------------------------------------------------------------------===//
7323 // TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults.
7324 // Currently subclassed only to implement custom OpenCL C function attribute
7326 //===----------------------------------------------------------------------===//
7330 class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo {
7332 TCETargetCodeGenInfo(CodeGenTypes &CGT)
7333 : DefaultTargetCodeGenInfo(CGT) {}
7335 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
7336 CodeGen::CodeGenModule &M) const override;
7339 void TCETargetCodeGenInfo::setTargetAttributes(
7340 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
7341 if (GV->isDeclaration())
7343 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
7346 llvm::Function *F = cast<llvm::Function>(GV);
7348 if (M.getLangOpts().OpenCL) {
7349 if (FD->hasAttr<OpenCLKernelAttr>()) {
7350 // OpenCL C Kernel functions are not subject to inlining
7351 F->addFnAttr(llvm::Attribute::NoInline);
7352 const ReqdWorkGroupSizeAttr *Attr = FD->getAttr<ReqdWorkGroupSizeAttr>();
7354 // Convert the reqd_work_group_size() attributes to metadata.
7355 llvm::LLVMContext &Context = F->getContext();
7356 llvm::NamedMDNode *OpenCLMetadata =
7357 M.getModule().getOrInsertNamedMetadata(
7358 "opencl.kernel_wg_size_info");
7360 SmallVector<llvm::Metadata *, 5> Operands;
7361 Operands.push_back(llvm::ConstantAsMetadata::get(F));
7364 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
7365 M.Int32Ty, llvm::APInt(32, Attr->getXDim()))));
7367 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
7368 M.Int32Ty, llvm::APInt(32, Attr->getYDim()))));
7370 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
7371 M.Int32Ty, llvm::APInt(32, Attr->getZDim()))));
7373 // Add a boolean constant operand for "required" (true) or "hint"
7374 // (false) for implementing the work_group_size_hint attr later.
7375 // Currently always true as the hint is not yet implemented.
7377 llvm::ConstantAsMetadata::get(llvm::ConstantInt::getTrue(Context)));
7378 OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands));
7386 //===----------------------------------------------------------------------===//
7387 // Hexagon ABI Implementation
7388 //===----------------------------------------------------------------------===//
7392 class HexagonABIInfo : public ABIInfo {
7396 HexagonABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
7400 ABIArgInfo classifyReturnType(QualType RetTy) const;
7401 ABIArgInfo classifyArgumentType(QualType RetTy) const;
7403 void computeInfo(CGFunctionInfo &FI) const override;
7405 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7406 QualType Ty) const override;
7409 class HexagonTargetCodeGenInfo : public TargetCodeGenInfo {
7411 HexagonTargetCodeGenInfo(CodeGenTypes &CGT)
7412 :TargetCodeGenInfo(new HexagonABIInfo(CGT)) {}
7414 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
7421 void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const {
7422 if (!getCXXABI().classifyReturnType(FI))
7423 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
7424 for (auto &I : FI.arguments())
7425 I.info = classifyArgumentType(I.type);
7428 ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const {
7429 if (!isAggregateTypeForABI(Ty)) {
7430 // Treat an enum type as its underlying type.
7431 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
7432 Ty = EnumTy->getDecl()->getIntegerType();
7434 return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend(Ty)
7435 : ABIArgInfo::getDirect());
7438 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
7439 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
7441 // Ignore empty records.
7442 if (isEmptyRecord(getContext(), Ty, true))
7443 return ABIArgInfo::getIgnore();
7445 uint64_t Size = getContext().getTypeSize(Ty);
7447 return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
7448 // Pass in the smallest viable integer type.
7450 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
7452 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
7454 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
7456 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
7459 ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const {
7460 if (RetTy->isVoidType())
7461 return ABIArgInfo::getIgnore();
7463 // Large vector types should be returned via memory.
7464 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 64)
7465 return getNaturalAlignIndirect(RetTy);
7467 if (!isAggregateTypeForABI(RetTy)) {
7468 // Treat an enum type as its underlying type.
7469 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
7470 RetTy = EnumTy->getDecl()->getIntegerType();
7472 return (RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend(RetTy)
7473 : ABIArgInfo::getDirect());
7476 if (isEmptyRecord(getContext(), RetTy, true))
7477 return ABIArgInfo::getIgnore();
7479 // Aggregates <= 8 bytes are returned in r0; other aggregates
7480 // are returned indirectly.
7481 uint64_t Size = getContext().getTypeSize(RetTy);
7483 // Return in the smallest viable integer type.
7485 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
7487 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
7489 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
7490 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
7493 return getNaturalAlignIndirect(RetTy, /*ByVal=*/true);
7496 Address HexagonABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7497 QualType Ty) const {
7498 // FIXME: Someone needs to audit that this handle alignment correctly.
7499 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
7500 getContext().getTypeInfoInChars(Ty),
7501 CharUnits::fromQuantity(4),
7502 /*AllowHigherAlign*/ true);
7505 //===----------------------------------------------------------------------===//
7506 // Lanai ABI Implementation
7507 //===----------------------------------------------------------------------===//
7510 class LanaiABIInfo : public DefaultABIInfo {
7512 LanaiABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
7514 bool shouldUseInReg(QualType Ty, CCState &State) const;
7516 void computeInfo(CGFunctionInfo &FI) const override {
7517 CCState State(FI.getCallingConvention());
7518 // Lanai uses 4 registers to pass arguments unless the function has the
7519 // regparm attribute set.
7520 if (FI.getHasRegParm()) {
7521 State.FreeRegs = FI.getRegParm();
7526 if (!getCXXABI().classifyReturnType(FI))
7527 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
7528 for (auto &I : FI.arguments())
7529 I.info = classifyArgumentType(I.type, State);
7532 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const;
7533 ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const;
7535 } // end anonymous namespace
7537 bool LanaiABIInfo::shouldUseInReg(QualType Ty, CCState &State) const {
7538 unsigned Size = getContext().getTypeSize(Ty);
7539 unsigned SizeInRegs = llvm::alignTo(Size, 32U) / 32U;
7541 if (SizeInRegs == 0)
7544 if (SizeInRegs > State.FreeRegs) {
7549 State.FreeRegs -= SizeInRegs;
7554 ABIArgInfo LanaiABIInfo::getIndirectResult(QualType Ty, bool ByVal,
7555 CCState &State) const {
7557 if (State.FreeRegs) {
7558 --State.FreeRegs; // Non-byval indirects just use one pointer.
7559 return getNaturalAlignIndirectInReg(Ty);
7561 return getNaturalAlignIndirect(Ty, false);
7564 // Compute the byval alignment.
7565 const unsigned MinABIStackAlignInBytes = 4;
7566 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
7567 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true,
7568 /*Realign=*/TypeAlign >
7569 MinABIStackAlignInBytes);
7572 ABIArgInfo LanaiABIInfo::classifyArgumentType(QualType Ty,
7573 CCState &State) const {
7574 // Check with the C++ ABI first.
7575 const RecordType *RT = Ty->getAs<RecordType>();
7577 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
7578 if (RAA == CGCXXABI::RAA_Indirect) {
7579 return getIndirectResult(Ty, /*ByVal=*/false, State);
7580 } else if (RAA == CGCXXABI::RAA_DirectInMemory) {
7581 return getNaturalAlignIndirect(Ty, /*ByRef=*/true);
7585 if (isAggregateTypeForABI(Ty)) {
7586 // Structures with flexible arrays are always indirect.
7587 if (RT && RT->getDecl()->hasFlexibleArrayMember())
7588 return getIndirectResult(Ty, /*ByVal=*/true, State);
7590 // Ignore empty structs/unions.
7591 if (isEmptyRecord(getContext(), Ty, true))
7592 return ABIArgInfo::getIgnore();
7594 llvm::LLVMContext &LLVMContext = getVMContext();
7595 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
7596 if (SizeInRegs <= State.FreeRegs) {
7597 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
7598 SmallVector<llvm::Type *, 3> Elements(SizeInRegs, Int32);
7599 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
7600 State.FreeRegs -= SizeInRegs;
7601 return ABIArgInfo::getDirectInReg(Result);
7605 return getIndirectResult(Ty, true, State);
7608 // Treat an enum type as its underlying type.
7609 if (const auto *EnumTy = Ty->getAs<EnumType>())
7610 Ty = EnumTy->getDecl()->getIntegerType();
7612 bool InReg = shouldUseInReg(Ty, State);
7613 if (Ty->isPromotableIntegerType()) {
7615 return ABIArgInfo::getDirectInReg();
7616 return ABIArgInfo::getExtend(Ty);
7619 return ABIArgInfo::getDirectInReg();
7620 return ABIArgInfo::getDirect();
7624 class LanaiTargetCodeGenInfo : public TargetCodeGenInfo {
7626 LanaiTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
7627 : TargetCodeGenInfo(new LanaiABIInfo(CGT)) {}
7631 //===----------------------------------------------------------------------===//
7632 // AMDGPU ABI Implementation
7633 //===----------------------------------------------------------------------===//
7637 class AMDGPUABIInfo final : public DefaultABIInfo {
7639 static const unsigned MaxNumRegsForArgsRet = 16;
7641 unsigned numRegsForType(QualType Ty) const;
7643 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
7644 bool isHomogeneousAggregateSmallEnough(const Type *Base,
7645 uint64_t Members) const override;
7648 explicit AMDGPUABIInfo(CodeGen::CodeGenTypes &CGT) :
7649 DefaultABIInfo(CGT) {}
7651 ABIArgInfo classifyReturnType(QualType RetTy) const;
7652 ABIArgInfo classifyKernelArgumentType(QualType Ty) const;
7653 ABIArgInfo classifyArgumentType(QualType Ty, unsigned &NumRegsLeft) const;
7655 void computeInfo(CGFunctionInfo &FI) const override;
7658 bool AMDGPUABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
7662 bool AMDGPUABIInfo::isHomogeneousAggregateSmallEnough(
7663 const Type *Base, uint64_t Members) const {
7664 uint32_t NumRegs = (getContext().getTypeSize(Base) + 31) / 32;
7666 // Homogeneous Aggregates may occupy at most 16 registers.
7667 return Members * NumRegs <= MaxNumRegsForArgsRet;
7670 /// Estimate number of registers the type will use when passed in registers.
7671 unsigned AMDGPUABIInfo::numRegsForType(QualType Ty) const {
7672 unsigned NumRegs = 0;
7674 if (const VectorType *VT = Ty->getAs<VectorType>()) {
7675 // Compute from the number of elements. The reported size is based on the
7676 // in-memory size, which includes the padding 4th element for 3-vectors.
7677 QualType EltTy = VT->getElementType();
7678 unsigned EltSize = getContext().getTypeSize(EltTy);
7680 // 16-bit element vectors should be passed as packed.
7682 return (VT->getNumElements() + 1) / 2;
7684 unsigned EltNumRegs = (EltSize + 31) / 32;
7685 return EltNumRegs * VT->getNumElements();
7688 if (const RecordType *RT = Ty->getAs<RecordType>()) {
7689 const RecordDecl *RD = RT->getDecl();
7690 assert(!RD->hasFlexibleArrayMember());
7692 for (const FieldDecl *Field : RD->fields()) {
7693 QualType FieldTy = Field->getType();
7694 NumRegs += numRegsForType(FieldTy);
7700 return (getContext().getTypeSize(Ty) + 31) / 32;
7703 void AMDGPUABIInfo::computeInfo(CGFunctionInfo &FI) const {
7704 llvm::CallingConv::ID CC = FI.getCallingConvention();
7706 if (!getCXXABI().classifyReturnType(FI))
7707 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
7709 unsigned NumRegsLeft = MaxNumRegsForArgsRet;
7710 for (auto &Arg : FI.arguments()) {
7711 if (CC == llvm::CallingConv::AMDGPU_KERNEL) {
7712 Arg.info = classifyKernelArgumentType(Arg.type);
7714 Arg.info = classifyArgumentType(Arg.type, NumRegsLeft);
7719 ABIArgInfo AMDGPUABIInfo::classifyReturnType(QualType RetTy) const {
7720 if (isAggregateTypeForABI(RetTy)) {
7721 // Records with non-trivial destructors/copy-constructors should not be
7722 // returned by value.
7723 if (!getRecordArgABI(RetTy, getCXXABI())) {
7724 // Ignore empty structs/unions.
7725 if (isEmptyRecord(getContext(), RetTy, true))
7726 return ABIArgInfo::getIgnore();
7728 // Lower single-element structs to just return a regular value.
7729 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
7730 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
7732 if (const RecordType *RT = RetTy->getAs<RecordType>()) {
7733 const RecordDecl *RD = RT->getDecl();
7734 if (RD->hasFlexibleArrayMember())
7735 return DefaultABIInfo::classifyReturnType(RetTy);
7738 // Pack aggregates <= 4 bytes into single VGPR or pair.
7739 uint64_t Size = getContext().getTypeSize(RetTy);
7741 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
7744 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
7747 llvm::Type *I32Ty = llvm::Type::getInt32Ty(getVMContext());
7748 return ABIArgInfo::getDirect(llvm::ArrayType::get(I32Ty, 2));
7751 if (numRegsForType(RetTy) <= MaxNumRegsForArgsRet)
7752 return ABIArgInfo::getDirect();
7756 // Otherwise just do the default thing.
7757 return DefaultABIInfo::classifyReturnType(RetTy);
7760 /// For kernels all parameters are really passed in a special buffer. It doesn't
7761 /// make sense to pass anything byval, so everything must be direct.
7762 ABIArgInfo AMDGPUABIInfo::classifyKernelArgumentType(QualType Ty) const {
7763 Ty = useFirstFieldIfTransparentUnion(Ty);
7765 // TODO: Can we omit empty structs?
7767 // Coerce single element structs to its element.
7768 if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
7769 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
7771 // If we set CanBeFlattened to true, CodeGen will expand the struct to its
7772 // individual elements, which confuses the Clover OpenCL backend; therefore we
7773 // have to set it to false here. Other args of getDirect() are just defaults.
7774 return ABIArgInfo::getDirect(nullptr, 0, nullptr, false);
7777 ABIArgInfo AMDGPUABIInfo::classifyArgumentType(QualType Ty,
7778 unsigned &NumRegsLeft) const {
7779 assert(NumRegsLeft <= MaxNumRegsForArgsRet && "register estimate underflow");
7781 Ty = useFirstFieldIfTransparentUnion(Ty);
7783 if (isAggregateTypeForABI(Ty)) {
7784 // Records with non-trivial destructors/copy-constructors should not be
7786 if (auto RAA = getRecordArgABI(Ty, getCXXABI()))
7787 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
7789 // Ignore empty structs/unions.
7790 if (isEmptyRecord(getContext(), Ty, true))
7791 return ABIArgInfo::getIgnore();
7793 // Lower single-element structs to just pass a regular value. TODO: We
7794 // could do reasonable-size multiple-element structs too, using getExpand(),
7795 // though watch out for things like bitfields.
7796 if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
7797 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
7799 if (const RecordType *RT = Ty->getAs<RecordType>()) {
7800 const RecordDecl *RD = RT->getDecl();
7801 if (RD->hasFlexibleArrayMember())
7802 return DefaultABIInfo::classifyArgumentType(Ty);
7805 // Pack aggregates <= 8 bytes into single VGPR or pair.
7806 uint64_t Size = getContext().getTypeSize(Ty);
7808 unsigned NumRegs = (Size + 31) / 32;
7809 NumRegsLeft -= std::min(NumRegsLeft, NumRegs);
7812 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
7815 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
7817 // XXX: Should this be i64 instead, and should the limit increase?
7818 llvm::Type *I32Ty = llvm::Type::getInt32Ty(getVMContext());
7819 return ABIArgInfo::getDirect(llvm::ArrayType::get(I32Ty, 2));
7822 if (NumRegsLeft > 0) {
7823 unsigned NumRegs = numRegsForType(Ty);
7824 if (NumRegsLeft >= NumRegs) {
7825 NumRegsLeft -= NumRegs;
7826 return ABIArgInfo::getDirect();
7831 // Otherwise just do the default thing.
7832 ABIArgInfo ArgInfo = DefaultABIInfo::classifyArgumentType(Ty);
7833 if (!ArgInfo.isIndirect()) {
7834 unsigned NumRegs = numRegsForType(Ty);
7835 NumRegsLeft -= std::min(NumRegs, NumRegsLeft);
7841 class AMDGPUTargetCodeGenInfo : public TargetCodeGenInfo {
7843 AMDGPUTargetCodeGenInfo(CodeGenTypes &CGT)
7844 : TargetCodeGenInfo(new AMDGPUABIInfo(CGT)) {}
7845 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
7846 CodeGen::CodeGenModule &M) const override;
7847 unsigned getOpenCLKernelCallingConv() const override;
7849 llvm::Constant *getNullPointer(const CodeGen::CodeGenModule &CGM,
7850 llvm::PointerType *T, QualType QT) const override;
7852 LangAS getASTAllocaAddressSpace() const override {
7853 return getLangASFromTargetAS(
7854 getABIInfo().getDataLayout().getAllocaAddrSpace());
7856 LangAS getGlobalVarAddressSpace(CodeGenModule &CGM,
7857 const VarDecl *D) const override;
7858 llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts,
7860 llvm::AtomicOrdering Ordering,
7861 llvm::LLVMContext &Ctx) const override;
7863 createEnqueuedBlockKernel(CodeGenFunction &CGF,
7864 llvm::Function *BlockInvokeFunc,
7865 llvm::Value *BlockLiteral) const override;
7866 bool shouldEmitStaticExternCAliases() const override;
7867 void setCUDAKernelCallingConvention(const FunctionType *&FT) const override;
7871 static bool requiresAMDGPUProtectedVisibility(const Decl *D,
7872 llvm::GlobalValue *GV) {
7873 if (GV->getVisibility() != llvm::GlobalValue::HiddenVisibility)
7876 return D->hasAttr<OpenCLKernelAttr>() ||
7877 (isa<FunctionDecl>(D) && D->hasAttr<CUDAGlobalAttr>()) ||
7879 (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>() ||
7880 D->hasAttr<HIPPinnedShadowAttr>()));
7883 static bool requiresAMDGPUDefaultVisibility(const Decl *D,
7884 llvm::GlobalValue *GV) {
7885 if (GV->getVisibility() != llvm::GlobalValue::HiddenVisibility)
7888 return isa<VarDecl>(D) && D->hasAttr<HIPPinnedShadowAttr>();
7891 void AMDGPUTargetCodeGenInfo::setTargetAttributes(
7892 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
7893 if (requiresAMDGPUDefaultVisibility(D, GV)) {
7894 GV->setVisibility(llvm::GlobalValue::DefaultVisibility);
7895 GV->setDSOLocal(false);
7896 } else if (requiresAMDGPUProtectedVisibility(D, GV)) {
7897 GV->setVisibility(llvm::GlobalValue::ProtectedVisibility);
7898 GV->setDSOLocal(true);
7901 if (GV->isDeclaration())
7903 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
7907 llvm::Function *F = cast<llvm::Function>(GV);
7909 const auto *ReqdWGS = M.getLangOpts().OpenCL ?
7910 FD->getAttr<ReqdWorkGroupSizeAttr>() : nullptr;
7912 if (((M.getLangOpts().OpenCL && FD->hasAttr<OpenCLKernelAttr>()) ||
7913 (M.getLangOpts().HIP && FD->hasAttr<CUDAGlobalAttr>())) &&
7914 (M.getTriple().getOS() == llvm::Triple::AMDHSA))
7915 F->addFnAttr("amdgpu-implicitarg-num-bytes", "56");
7917 const auto *FlatWGS = FD->getAttr<AMDGPUFlatWorkGroupSizeAttr>();
7918 if (ReqdWGS || FlatWGS) {
7922 Min = FlatWGS->getMin()
7923 ->EvaluateKnownConstInt(M.getContext())
7925 Max = FlatWGS->getMax()
7926 ->EvaluateKnownConstInt(M.getContext())
7929 if (ReqdWGS && Min == 0 && Max == 0)
7930 Min = Max = ReqdWGS->getXDim() * ReqdWGS->getYDim() * ReqdWGS->getZDim();
7933 assert(Min <= Max && "Min must be less than or equal Max");
7935 std::string AttrVal = llvm::utostr(Min) + "," + llvm::utostr(Max);
7936 F->addFnAttr("amdgpu-flat-work-group-size", AttrVal);
7938 assert(Max == 0 && "Max must be zero");
7941 if (const auto *Attr = FD->getAttr<AMDGPUWavesPerEUAttr>()) {
7943 Attr->getMin()->EvaluateKnownConstInt(M.getContext()).getExtValue();
7944 unsigned Max = Attr->getMax() ? Attr->getMax()
7945 ->EvaluateKnownConstInt(M.getContext())
7950 assert((Max == 0 || Min <= Max) && "Min must be less than or equal Max");
7952 std::string AttrVal = llvm::utostr(Min);
7954 AttrVal = AttrVal + "," + llvm::utostr(Max);
7955 F->addFnAttr("amdgpu-waves-per-eu", AttrVal);
7957 assert(Max == 0 && "Max must be zero");
7960 if (const auto *Attr = FD->getAttr<AMDGPUNumSGPRAttr>()) {
7961 unsigned NumSGPR = Attr->getNumSGPR();
7964 F->addFnAttr("amdgpu-num-sgpr", llvm::utostr(NumSGPR));
7967 if (const auto *Attr = FD->getAttr<AMDGPUNumVGPRAttr>()) {
7968 uint32_t NumVGPR = Attr->getNumVGPR();
7971 F->addFnAttr("amdgpu-num-vgpr", llvm::utostr(NumVGPR));
7975 unsigned AMDGPUTargetCodeGenInfo::getOpenCLKernelCallingConv() const {
7976 return llvm::CallingConv::AMDGPU_KERNEL;
7979 // Currently LLVM assumes null pointers always have value 0,
7980 // which results in incorrectly transformed IR. Therefore, instead of
7981 // emitting null pointers in private and local address spaces, a null
7982 // pointer in generic address space is emitted which is casted to a
7983 // pointer in local or private address space.
7984 llvm::Constant *AMDGPUTargetCodeGenInfo::getNullPointer(
7985 const CodeGen::CodeGenModule &CGM, llvm::PointerType *PT,
7986 QualType QT) const {
7987 if (CGM.getContext().getTargetNullPointerValue(QT) == 0)
7988 return llvm::ConstantPointerNull::get(PT);
7990 auto &Ctx = CGM.getContext();
7991 auto NPT = llvm::PointerType::get(PT->getElementType(),
7992 Ctx.getTargetAddressSpace(LangAS::opencl_generic));
7993 return llvm::ConstantExpr::getAddrSpaceCast(
7994 llvm::ConstantPointerNull::get(NPT), PT);
7998 AMDGPUTargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM,
7999 const VarDecl *D) const {
8000 assert(!CGM.getLangOpts().OpenCL &&
8001 !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) &&
8002 "Address space agnostic languages only");
8003 LangAS DefaultGlobalAS = getLangASFromTargetAS(
8004 CGM.getContext().getTargetAddressSpace(LangAS::opencl_global));
8006 return DefaultGlobalAS;
8008 LangAS AddrSpace = D->getType().getAddressSpace();
8009 assert(AddrSpace == LangAS::Default || isTargetAddressSpace(AddrSpace));
8010 if (AddrSpace != LangAS::Default)
8013 if (CGM.isTypeConstant(D->getType(), false)) {
8014 if (auto ConstAS = CGM.getTarget().getConstantAddressSpace())
8015 return ConstAS.getValue();
8017 return DefaultGlobalAS;
8021 AMDGPUTargetCodeGenInfo::getLLVMSyncScopeID(const LangOptions &LangOpts,
8023 llvm::AtomicOrdering Ordering,
8024 llvm::LLVMContext &Ctx) const {
8027 case SyncScope::OpenCLWorkGroup:
8030 case SyncScope::OpenCLDevice:
8033 case SyncScope::OpenCLAllSVMDevices:
8036 case SyncScope::OpenCLSubGroup:
8040 if (Ordering != llvm::AtomicOrdering::SequentiallyConsistent) {
8042 Name = Twine(Twine(Name) + Twine("-")).str();
8044 Name = Twine(Twine(Name) + Twine("one-as")).str();
8047 return Ctx.getOrInsertSyncScopeID(Name);
8050 bool AMDGPUTargetCodeGenInfo::shouldEmitStaticExternCAliases() const {
8054 void AMDGPUTargetCodeGenInfo::setCUDAKernelCallingConvention(
8055 const FunctionType *&FT) const {
8056 FT = getABIInfo().getContext().adjustFunctionType(
8057 FT, FT->getExtInfo().withCallingConv(CC_OpenCLKernel));
8060 //===----------------------------------------------------------------------===//
8061 // SPARC v8 ABI Implementation.
8062 // Based on the SPARC Compliance Definition version 2.4.1.
8064 // Ensures that complex values are passed in registers.
8067 class SparcV8ABIInfo : public DefaultABIInfo {
8069 SparcV8ABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
8072 ABIArgInfo classifyReturnType(QualType RetTy) const;
8073 void computeInfo(CGFunctionInfo &FI) const override;
8075 } // end anonymous namespace
8079 SparcV8ABIInfo::classifyReturnType(QualType Ty) const {
8080 if (Ty->isAnyComplexType()) {
8081 return ABIArgInfo::getDirect();
8084 return DefaultABIInfo::classifyReturnType(Ty);
8088 void SparcV8ABIInfo::computeInfo(CGFunctionInfo &FI) const {
8090 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
8091 for (auto &Arg : FI.arguments())
8092 Arg.info = classifyArgumentType(Arg.type);
8096 class SparcV8TargetCodeGenInfo : public TargetCodeGenInfo {
8098 SparcV8TargetCodeGenInfo(CodeGenTypes &CGT)
8099 : TargetCodeGenInfo(new SparcV8ABIInfo(CGT)) {}
8101 } // end anonymous namespace
8103 //===----------------------------------------------------------------------===//
8104 // SPARC v9 ABI Implementation.
8105 // Based on the SPARC Compliance Definition version 2.4.1.
8107 // Function arguments a mapped to a nominal "parameter array" and promoted to
8108 // registers depending on their type. Each argument occupies 8 or 16 bytes in
8109 // the array, structs larger than 16 bytes are passed indirectly.
8111 // One case requires special care:
8118 // When a struct mixed is passed by value, it only occupies 8 bytes in the
8119 // parameter array, but the int is passed in an integer register, and the float
8120 // is passed in a floating point register. This is represented as two arguments
8121 // with the LLVM IR inreg attribute:
8123 // declare void f(i32 inreg %i, float inreg %f)
8125 // The code generator will only allocate 4 bytes from the parameter array for
8126 // the inreg arguments. All other arguments are allocated a multiple of 8
8130 class SparcV9ABIInfo : public ABIInfo {
8132 SparcV9ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
8135 ABIArgInfo classifyType(QualType RetTy, unsigned SizeLimit) const;
8136 void computeInfo(CGFunctionInfo &FI) const override;
8137 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
8138 QualType Ty) const override;
8140 // Coercion type builder for structs passed in registers. The coercion type
8141 // serves two purposes:
8143 // 1. Pad structs to a multiple of 64 bits, so they are passed 'left-aligned'
8145 // 2. Expose aligned floating point elements as first-level elements, so the
8146 // code generator knows to pass them in floating point registers.
8148 // We also compute the InReg flag which indicates that the struct contains
8149 // aligned 32-bit floats.
8151 struct CoerceBuilder {
8152 llvm::LLVMContext &Context;
8153 const llvm::DataLayout &DL;
8154 SmallVector<llvm::Type*, 8> Elems;
8158 CoerceBuilder(llvm::LLVMContext &c, const llvm::DataLayout &dl)
8159 : Context(c), DL(dl), Size(0), InReg(false) {}
8161 // Pad Elems with integers until Size is ToSize.
8162 void pad(uint64_t ToSize) {
8163 assert(ToSize >= Size && "Cannot remove elements");
8167 // Finish the current 64-bit word.
8168 uint64_t Aligned = llvm::alignTo(Size, 64);
8169 if (Aligned > Size && Aligned <= ToSize) {
8170 Elems.push_back(llvm::IntegerType::get(Context, Aligned - Size));
8174 // Add whole 64-bit words.
8175 while (Size + 64 <= ToSize) {
8176 Elems.push_back(llvm::Type::getInt64Ty(Context));
8180 // Final in-word padding.
8181 if (Size < ToSize) {
8182 Elems.push_back(llvm::IntegerType::get(Context, ToSize - Size));
8187 // Add a floating point element at Offset.
8188 void addFloat(uint64_t Offset, llvm::Type *Ty, unsigned Bits) {
8189 // Unaligned floats are treated as integers.
8192 // The InReg flag is only required if there are any floats < 64 bits.
8196 Elems.push_back(Ty);
8197 Size = Offset + Bits;
8200 // Add a struct type to the coercion type, starting at Offset (in bits).
8201 void addStruct(uint64_t Offset, llvm::StructType *StrTy) {
8202 const llvm::StructLayout *Layout = DL.getStructLayout(StrTy);
8203 for (unsigned i = 0, e = StrTy->getNumElements(); i != e; ++i) {
8204 llvm::Type *ElemTy = StrTy->getElementType(i);
8205 uint64_t ElemOffset = Offset + Layout->getElementOffsetInBits(i);
8206 switch (ElemTy->getTypeID()) {
8207 case llvm::Type::StructTyID:
8208 addStruct(ElemOffset, cast<llvm::StructType>(ElemTy));
8210 case llvm::Type::FloatTyID:
8211 addFloat(ElemOffset, ElemTy, 32);
8213 case llvm::Type::DoubleTyID:
8214 addFloat(ElemOffset, ElemTy, 64);
8216 case llvm::Type::FP128TyID:
8217 addFloat(ElemOffset, ElemTy, 128);
8219 case llvm::Type::PointerTyID:
8220 if (ElemOffset % 64 == 0) {
8222 Elems.push_back(ElemTy);
8232 // Check if Ty is a usable substitute for the coercion type.
8233 bool isUsableType(llvm::StructType *Ty) const {
8234 return llvm::makeArrayRef(Elems) == Ty->elements();
8237 // Get the coercion type as a literal struct type.
8238 llvm::Type *getType() const {
8239 if (Elems.size() == 1)
8240 return Elems.front();
8242 return llvm::StructType::get(Context, Elems);
8246 } // end anonymous namespace
8249 SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const {
8250 if (Ty->isVoidType())
8251 return ABIArgInfo::getIgnore();
8253 uint64_t Size = getContext().getTypeSize(Ty);
8255 // Anything too big to fit in registers is passed with an explicit indirect
8256 // pointer / sret pointer.
8257 if (Size > SizeLimit)
8258 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
8260 // Treat an enum type as its underlying type.
8261 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
8262 Ty = EnumTy->getDecl()->getIntegerType();
8264 // Integer types smaller than a register are extended.
8265 if (Size < 64 && Ty->isIntegerType())
8266 return ABIArgInfo::getExtend(Ty);
8268 // Other non-aggregates go in registers.
8269 if (!isAggregateTypeForABI(Ty))
8270 return ABIArgInfo::getDirect();
8272 // If a C++ object has either a non-trivial copy constructor or a non-trivial
8273 // destructor, it is passed with an explicit indirect pointer / sret pointer.
8274 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
8275 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
8277 // This is a small aggregate type that should be passed in registers.
8278 // Build a coercion type from the LLVM struct type.
8279 llvm::StructType *StrTy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty));
8281 return ABIArgInfo::getDirect();
8283 CoerceBuilder CB(getVMContext(), getDataLayout());
8284 CB.addStruct(0, StrTy);
8285 CB.pad(llvm::alignTo(CB.DL.getTypeSizeInBits(StrTy), 64));
8287 // Try to use the original type for coercion.
8288 llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType();
8291 return ABIArgInfo::getDirectInReg(CoerceTy);
8293 return ABIArgInfo::getDirect(CoerceTy);
8296 Address SparcV9ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
8297 QualType Ty) const {
8298 ABIArgInfo AI = classifyType(Ty, 16 * 8);
8299 llvm::Type *ArgTy = CGT.ConvertType(Ty);
8300 if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
8301 AI.setCoerceToType(ArgTy);
8303 CharUnits SlotSize = CharUnits::fromQuantity(8);
8305 CGBuilderTy &Builder = CGF.Builder;
8306 Address Addr(Builder.CreateLoad(VAListAddr, "ap.cur"), SlotSize);
8307 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
8309 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
8311 Address ArgAddr = Address::invalid();
8313 switch (AI.getKind()) {
8314 case ABIArgInfo::Expand:
8315 case ABIArgInfo::CoerceAndExpand:
8316 case ABIArgInfo::InAlloca:
8317 llvm_unreachable("Unsupported ABI kind for va_arg");
8319 case ABIArgInfo::Extend: {
8321 CharUnits Offset = SlotSize - TypeInfo.first;
8322 ArgAddr = Builder.CreateConstInBoundsByteGEP(Addr, Offset, "extend");
8326 case ABIArgInfo::Direct: {
8327 auto AllocSize = getDataLayout().getTypeAllocSize(AI.getCoerceToType());
8328 Stride = CharUnits::fromQuantity(AllocSize).alignTo(SlotSize);
8333 case ABIArgInfo::Indirect:
8335 ArgAddr = Builder.CreateElementBitCast(Addr, ArgPtrTy, "indirect");
8336 ArgAddr = Address(Builder.CreateLoad(ArgAddr, "indirect.arg"),
8340 case ABIArgInfo::Ignore:
8341 return Address(llvm::UndefValue::get(ArgPtrTy), TypeInfo.second);
8345 Address NextPtr = Builder.CreateConstInBoundsByteGEP(Addr, Stride, "ap.next");
8346 Builder.CreateStore(NextPtr.getPointer(), VAListAddr);
8348 return Builder.CreateBitCast(ArgAddr, ArgPtrTy, "arg.addr");
8351 void SparcV9ABIInfo::computeInfo(CGFunctionInfo &FI) const {
8352 FI.getReturnInfo() = classifyType(FI.getReturnType(), 32 * 8);
8353 for (auto &I : FI.arguments())
8354 I.info = classifyType(I.type, 16 * 8);
8358 class SparcV9TargetCodeGenInfo : public TargetCodeGenInfo {
8360 SparcV9TargetCodeGenInfo(CodeGenTypes &CGT)
8361 : TargetCodeGenInfo(new SparcV9ABIInfo(CGT)) {}
8363 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
8367 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
8368 llvm::Value *Address) const override;
8370 } // end anonymous namespace
8373 SparcV9TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
8374 llvm::Value *Address) const {
8375 // This is calculated from the LLVM and GCC tables and verified
8376 // against gcc output. AFAIK all ABIs use the same encoding.
8378 CodeGen::CGBuilderTy &Builder = CGF.Builder;
8380 llvm::IntegerType *i8 = CGF.Int8Ty;
8381 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
8382 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
8384 // 0-31: the 8-byte general-purpose registers
8385 AssignToArrayRange(Builder, Address, Eight8, 0, 31);
8387 // 32-63: f0-31, the 4-byte floating-point registers
8388 AssignToArrayRange(Builder, Address, Four8, 32, 63);
8398 AssignToArrayRange(Builder, Address, Eight8, 64, 71);
8400 // 72-87: d0-15, the 8-byte floating-point registers
8401 AssignToArrayRange(Builder, Address, Eight8, 72, 87);
8406 // ARC ABI implementation.
8409 class ARCABIInfo : public DefaultABIInfo {
8411 using DefaultABIInfo::DefaultABIInfo;
8414 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
8415 QualType Ty) const override;
8417 void updateState(const ABIArgInfo &Info, QualType Ty, CCState &State) const {
8418 if (!State.FreeRegs)
8420 if (Info.isIndirect() && Info.getInReg())
8422 else if (Info.isDirect() && Info.getInReg()) {
8423 unsigned sz = (getContext().getTypeSize(Ty) + 31) / 32;
8424 if (sz < State.FreeRegs)
8425 State.FreeRegs -= sz;
8431 void computeInfo(CGFunctionInfo &FI) const override {
8432 CCState State(FI.getCallingConvention());
8433 // ARC uses 8 registers to pass arguments.
8436 if (!getCXXABI().classifyReturnType(FI))
8437 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
8438 updateState(FI.getReturnInfo(), FI.getReturnType(), State);
8439 for (auto &I : FI.arguments()) {
8440 I.info = classifyArgumentType(I.type, State.FreeRegs);
8441 updateState(I.info, I.type, State);
8445 ABIArgInfo getIndirectByRef(QualType Ty, bool HasFreeRegs) const;
8446 ABIArgInfo getIndirectByValue(QualType Ty) const;
8447 ABIArgInfo classifyArgumentType(QualType Ty, uint8_t FreeRegs) const;
8448 ABIArgInfo classifyReturnType(QualType RetTy) const;
8451 class ARCTargetCodeGenInfo : public TargetCodeGenInfo {
8453 ARCTargetCodeGenInfo(CodeGenTypes &CGT)
8454 : TargetCodeGenInfo(new ARCABIInfo(CGT)) {}
8458 ABIArgInfo ARCABIInfo::getIndirectByRef(QualType Ty, bool HasFreeRegs) const {
8459 return HasFreeRegs ? getNaturalAlignIndirectInReg(Ty) :
8460 getNaturalAlignIndirect(Ty, false);
8463 ABIArgInfo ARCABIInfo::getIndirectByValue(QualType Ty) const {
8464 // Compute the byval alignment.
8465 const unsigned MinABIStackAlignInBytes = 4;
8466 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
8467 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true,
8468 TypeAlign > MinABIStackAlignInBytes);
8471 Address ARCABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
8472 QualType Ty) const {
8473 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
8474 getContext().getTypeInfoInChars(Ty),
8475 CharUnits::fromQuantity(4), true);
8478 ABIArgInfo ARCABIInfo::classifyArgumentType(QualType Ty,
8479 uint8_t FreeRegs) const {
8480 // Handle the generic C++ ABI.
8481 const RecordType *RT = Ty->getAs<RecordType>();
8483 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
8484 if (RAA == CGCXXABI::RAA_Indirect)
8485 return getIndirectByRef(Ty, FreeRegs > 0);
8487 if (RAA == CGCXXABI::RAA_DirectInMemory)
8488 return getIndirectByValue(Ty);
8491 // Treat an enum type as its underlying type.
8492 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
8493 Ty = EnumTy->getDecl()->getIntegerType();
8495 auto SizeInRegs = llvm::alignTo(getContext().getTypeSize(Ty), 32) / 32;
8497 if (isAggregateTypeForABI(Ty)) {
8498 // Structures with flexible arrays are always indirect.
8499 if (RT && RT->getDecl()->hasFlexibleArrayMember())
8500 return getIndirectByValue(Ty);
8502 // Ignore empty structs/unions.
8503 if (isEmptyRecord(getContext(), Ty, true))
8504 return ABIArgInfo::getIgnore();
8506 llvm::LLVMContext &LLVMContext = getVMContext();
8508 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
8509 SmallVector<llvm::Type *, 3> Elements(SizeInRegs, Int32);
8510 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
8512 return FreeRegs >= SizeInRegs ?
8513 ABIArgInfo::getDirectInReg(Result) :
8514 ABIArgInfo::getDirect(Result, 0, nullptr, false);
8517 return Ty->isPromotableIntegerType() ?
8518 (FreeRegs >= SizeInRegs ? ABIArgInfo::getExtendInReg(Ty) :
8519 ABIArgInfo::getExtend(Ty)) :
8520 (FreeRegs >= SizeInRegs ? ABIArgInfo::getDirectInReg() :
8521 ABIArgInfo::getDirect());
8524 ABIArgInfo ARCABIInfo::classifyReturnType(QualType RetTy) const {
8525 if (RetTy->isAnyComplexType())
8526 return ABIArgInfo::getDirectInReg();
8528 // Arguments of size > 4 registers are indirect.
8529 auto RetSize = llvm::alignTo(getContext().getTypeSize(RetTy), 32) / 32;
8531 return getIndirectByRef(RetTy, /*HasFreeRegs*/ true);
8533 return DefaultABIInfo::classifyReturnType(RetTy);
8536 } // End anonymous namespace.
8538 //===----------------------------------------------------------------------===//
8539 // XCore ABI Implementation
8540 //===----------------------------------------------------------------------===//
8544 /// A SmallStringEnc instance is used to build up the TypeString by passing
8545 /// it by reference between functions that append to it.
8546 typedef llvm::SmallString<128> SmallStringEnc;
8548 /// TypeStringCache caches the meta encodings of Types.
8550 /// The reason for caching TypeStrings is two fold:
8551 /// 1. To cache a type's encoding for later uses;
8552 /// 2. As a means to break recursive member type inclusion.
8554 /// A cache Entry can have a Status of:
8555 /// NonRecursive: The type encoding is not recursive;
8556 /// Recursive: The type encoding is recursive;
8557 /// Incomplete: An incomplete TypeString;
8558 /// IncompleteUsed: An incomplete TypeString that has been used in a
8559 /// Recursive type encoding.
8561 /// A NonRecursive entry will have all of its sub-members expanded as fully
8562 /// as possible. Whilst it may contain types which are recursive, the type
8563 /// itself is not recursive and thus its encoding may be safely used whenever
8564 /// the type is encountered.
8566 /// A Recursive entry will have all of its sub-members expanded as fully as
8567 /// possible. The type itself is recursive and it may contain other types which
8568 /// are recursive. The Recursive encoding must not be used during the expansion
8569 /// of a recursive type's recursive branch. For simplicity the code uses
8570 /// IncompleteCount to reject all usage of Recursive encodings for member types.
8572 /// An Incomplete entry is always a RecordType and only encodes its
8573 /// identifier e.g. "s(S){}". Incomplete 'StubEnc' entries are ephemeral and
8574 /// are placed into the cache during type expansion as a means to identify and
8575 /// handle recursive inclusion of types as sub-members. If there is recursion
8576 /// the entry becomes IncompleteUsed.
8578 /// During the expansion of a RecordType's members:
8580 /// If the cache contains a NonRecursive encoding for the member type, the
8581 /// cached encoding is used;
8583 /// If the cache contains a Recursive encoding for the member type, the
8584 /// cached encoding is 'Swapped' out, as it may be incorrect, and...
8586 /// If the member is a RecordType, an Incomplete encoding is placed into the
8587 /// cache to break potential recursive inclusion of itself as a sub-member;
8589 /// Once a member RecordType has been expanded, its temporary incomplete
8590 /// entry is removed from the cache. If a Recursive encoding was swapped out
8591 /// it is swapped back in;
8593 /// If an incomplete entry is used to expand a sub-member, the incomplete
8594 /// entry is marked as IncompleteUsed. The cache keeps count of how many
8595 /// IncompleteUsed entries it currently contains in IncompleteUsedCount;
8597 /// If a member's encoding is found to be a NonRecursive or Recursive viz:
8598 /// IncompleteUsedCount==0, the member's encoding is added to the cache.
8599 /// Else the member is part of a recursive type and thus the recursion has
8600 /// been exited too soon for the encoding to be correct for the member.
8602 class TypeStringCache {
8603 enum Status {NonRecursive, Recursive, Incomplete, IncompleteUsed};
8605 std::string Str; // The encoded TypeString for the type.
8606 enum Status State; // Information about the encoding in 'Str'.
8607 std::string Swapped; // A temporary place holder for a Recursive encoding
8608 // during the expansion of RecordType's members.
8610 std::map<const IdentifierInfo *, struct Entry> Map;
8611 unsigned IncompleteCount; // Number of Incomplete entries in the Map.
8612 unsigned IncompleteUsedCount; // Number of IncompleteUsed entries in the Map.
8614 TypeStringCache() : IncompleteCount(0), IncompleteUsedCount(0) {}
8615 void addIncomplete(const IdentifierInfo *ID, std::string StubEnc);
8616 bool removeIncomplete(const IdentifierInfo *ID);
8617 void addIfComplete(const IdentifierInfo *ID, StringRef Str,
8619 StringRef lookupStr(const IdentifierInfo *ID);
8622 /// TypeString encodings for enum & union fields must be order.
8623 /// FieldEncoding is a helper for this ordering process.
8624 class FieldEncoding {
8628 FieldEncoding(bool b, SmallStringEnc &e) : HasName(b), Enc(e.c_str()) {}
8629 StringRef str() { return Enc; }
8630 bool operator<(const FieldEncoding &rhs) const {
8631 if (HasName != rhs.HasName) return HasName;
8632 return Enc < rhs.Enc;
8636 class XCoreABIInfo : public DefaultABIInfo {
8638 XCoreABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
8639 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
8640 QualType Ty) const override;
8643 class XCoreTargetCodeGenInfo : public TargetCodeGenInfo {
8644 mutable TypeStringCache TSC;
8646 XCoreTargetCodeGenInfo(CodeGenTypes &CGT)
8647 :TargetCodeGenInfo(new XCoreABIInfo(CGT)) {}
8648 void emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
8649 CodeGen::CodeGenModule &M) const override;
8652 } // End anonymous namespace.
8654 // TODO: this implementation is likely now redundant with the default
8656 Address XCoreABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
8657 QualType Ty) const {
8658 CGBuilderTy &Builder = CGF.Builder;
8661 CharUnits SlotSize = CharUnits::fromQuantity(4);
8662 Address AP(Builder.CreateLoad(VAListAddr), SlotSize);
8664 // Handle the argument.
8665 ABIArgInfo AI = classifyArgumentType(Ty);
8666 CharUnits TypeAlign = getContext().getTypeAlignInChars(Ty);
8667 llvm::Type *ArgTy = CGT.ConvertType(Ty);
8668 if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
8669 AI.setCoerceToType(ArgTy);
8670 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
8672 Address Val = Address::invalid();
8673 CharUnits ArgSize = CharUnits::Zero();
8674 switch (AI.getKind()) {
8675 case ABIArgInfo::Expand:
8676 case ABIArgInfo::CoerceAndExpand:
8677 case ABIArgInfo::InAlloca:
8678 llvm_unreachable("Unsupported ABI kind for va_arg");
8679 case ABIArgInfo::Ignore:
8680 Val = Address(llvm::UndefValue::get(ArgPtrTy), TypeAlign);
8681 ArgSize = CharUnits::Zero();
8683 case ABIArgInfo::Extend:
8684 case ABIArgInfo::Direct:
8685 Val = Builder.CreateBitCast(AP, ArgPtrTy);
8686 ArgSize = CharUnits::fromQuantity(
8687 getDataLayout().getTypeAllocSize(AI.getCoerceToType()));
8688 ArgSize = ArgSize.alignTo(SlotSize);
8690 case ABIArgInfo::Indirect:
8691 Val = Builder.CreateElementBitCast(AP, ArgPtrTy);
8692 Val = Address(Builder.CreateLoad(Val), TypeAlign);
8697 // Increment the VAList.
8698 if (!ArgSize.isZero()) {
8699 Address APN = Builder.CreateConstInBoundsByteGEP(AP, ArgSize);
8700 Builder.CreateStore(APN.getPointer(), VAListAddr);
8706 /// During the expansion of a RecordType, an incomplete TypeString is placed
8707 /// into the cache as a means to identify and break recursion.
8708 /// If there is a Recursive encoding in the cache, it is swapped out and will
8709 /// be reinserted by removeIncomplete().
8710 /// All other types of encoding should have been used rather than arriving here.
8711 void TypeStringCache::addIncomplete(const IdentifierInfo *ID,
8712 std::string StubEnc) {
8716 assert( (E.Str.empty() || E.State == Recursive) &&
8717 "Incorrectly use of addIncomplete");
8718 assert(!StubEnc.empty() && "Passing an empty string to addIncomplete()");
8719 E.Swapped.swap(E.Str); // swap out the Recursive
8720 E.Str.swap(StubEnc);
8721 E.State = Incomplete;
8725 /// Once the RecordType has been expanded, the temporary incomplete TypeString
8726 /// must be removed from the cache.
8727 /// If a Recursive was swapped out by addIncomplete(), it will be replaced.
8728 /// Returns true if the RecordType was defined recursively.
8729 bool TypeStringCache::removeIncomplete(const IdentifierInfo *ID) {
8732 auto I = Map.find(ID);
8733 assert(I != Map.end() && "Entry not present");
8734 Entry &E = I->second;
8735 assert( (E.State == Incomplete ||
8736 E.State == IncompleteUsed) &&
8737 "Entry must be an incomplete type");
8738 bool IsRecursive = false;
8739 if (E.State == IncompleteUsed) {
8740 // We made use of our Incomplete encoding, thus we are recursive.
8742 --IncompleteUsedCount;
8744 if (E.Swapped.empty())
8747 // Swap the Recursive back.
8748 E.Swapped.swap(E.Str);
8750 E.State = Recursive;
8756 /// Add the encoded TypeString to the cache only if it is NonRecursive or
8757 /// Recursive (viz: all sub-members were expanded as fully as possible).
8758 void TypeStringCache::addIfComplete(const IdentifierInfo *ID, StringRef Str,
8760 if (!ID || IncompleteUsedCount)
8761 return; // No key or it is is an incomplete sub-type so don't add.
8763 if (IsRecursive && !E.Str.empty()) {
8764 assert(E.State==Recursive && E.Str.size() == Str.size() &&
8765 "This is not the same Recursive entry");
8766 // The parent container was not recursive after all, so we could have used
8767 // this Recursive sub-member entry after all, but we assumed the worse when
8768 // we started viz: IncompleteCount!=0.
8771 assert(E.Str.empty() && "Entry already present");
8773 E.State = IsRecursive? Recursive : NonRecursive;
8776 /// Return a cached TypeString encoding for the ID. If there isn't one, or we
8777 /// are recursively expanding a type (IncompleteCount != 0) and the cached
8778 /// encoding is Recursive, return an empty StringRef.
8779 StringRef TypeStringCache::lookupStr(const IdentifierInfo *ID) {
8781 return StringRef(); // We have no key.
8782 auto I = Map.find(ID);
8784 return StringRef(); // We have no encoding.
8785 Entry &E = I->second;
8786 if (E.State == Recursive && IncompleteCount)
8787 return StringRef(); // We don't use Recursive encodings for member types.
8789 if (E.State == Incomplete) {
8790 // The incomplete type is being used to break out of recursion.
8791 E.State = IncompleteUsed;
8792 ++IncompleteUsedCount;
8797 /// The XCore ABI includes a type information section that communicates symbol
8798 /// type information to the linker. The linker uses this information to verify
8799 /// safety/correctness of things such as array bound and pointers et al.
8800 /// The ABI only requires C (and XC) language modules to emit TypeStrings.
8801 /// This type information (TypeString) is emitted into meta data for all global
8802 /// symbols: definitions, declarations, functions & variables.
8804 /// The TypeString carries type, qualifier, name, size & value details.
8805 /// Please see 'Tools Development Guide' section 2.16.2 for format details:
8806 /// https://www.xmos.com/download/public/Tools-Development-Guide%28X9114A%29.pdf
8807 /// The output is tested by test/CodeGen/xcore-stringtype.c.
8809 static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
8810 CodeGen::CodeGenModule &CGM, TypeStringCache &TSC);
8812 /// XCore uses emitTargetMD to emit TypeString metadata for global symbols.
8813 void XCoreTargetCodeGenInfo::emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
8814 CodeGen::CodeGenModule &CGM) const {
8816 if (getTypeString(Enc, D, CGM, TSC)) {
8817 llvm::LLVMContext &Ctx = CGM.getModule().getContext();
8818 llvm::Metadata *MDVals[] = {llvm::ConstantAsMetadata::get(GV),
8819 llvm::MDString::get(Ctx, Enc.str())};
8820 llvm::NamedMDNode *MD =
8821 CGM.getModule().getOrInsertNamedMetadata("xcore.typestrings");
8822 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
8826 //===----------------------------------------------------------------------===//
8827 // SPIR ABI Implementation
8828 //===----------------------------------------------------------------------===//
8831 class SPIRTargetCodeGenInfo : public TargetCodeGenInfo {
8833 SPIRTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
8834 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
8835 unsigned getOpenCLKernelCallingConv() const override;
8838 } // End anonymous namespace.
8842 void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI) {
8843 DefaultABIInfo SPIRABI(CGM.getTypes());
8844 SPIRABI.computeInfo(FI);
8849 unsigned SPIRTargetCodeGenInfo::getOpenCLKernelCallingConv() const {
8850 return llvm::CallingConv::SPIR_KERNEL;
8853 static bool appendType(SmallStringEnc &Enc, QualType QType,
8854 const CodeGen::CodeGenModule &CGM,
8855 TypeStringCache &TSC);
8857 /// Helper function for appendRecordType().
8858 /// Builds a SmallVector containing the encoded field types in declaration
8860 static bool extractFieldType(SmallVectorImpl<FieldEncoding> &FE,
8861 const RecordDecl *RD,
8862 const CodeGen::CodeGenModule &CGM,
8863 TypeStringCache &TSC) {
8864 for (const auto *Field : RD->fields()) {
8867 Enc += Field->getName();
8869 if (Field->isBitField()) {
8871 llvm::raw_svector_ostream OS(Enc);
8872 OS << Field->getBitWidthValue(CGM.getContext());
8875 if (!appendType(Enc, Field->getType(), CGM, TSC))
8877 if (Field->isBitField())
8880 FE.emplace_back(!Field->getName().empty(), Enc);
8885 /// Appends structure and union types to Enc and adds encoding to cache.
8886 /// Recursively calls appendType (via extractFieldType) for each field.
8887 /// Union types have their fields ordered according to the ABI.
8888 static bool appendRecordType(SmallStringEnc &Enc, const RecordType *RT,
8889 const CodeGen::CodeGenModule &CGM,
8890 TypeStringCache &TSC, const IdentifierInfo *ID) {
8891 // Append the cached TypeString if we have one.
8892 StringRef TypeString = TSC.lookupStr(ID);
8893 if (!TypeString.empty()) {
8898 // Start to emit an incomplete TypeString.
8899 size_t Start = Enc.size();
8900 Enc += (RT->isUnionType()? 'u' : 's');
8903 Enc += ID->getName();
8906 // We collect all encoded fields and order as necessary.
8907 bool IsRecursive = false;
8908 const RecordDecl *RD = RT->getDecl()->getDefinition();
8909 if (RD && !RD->field_empty()) {
8910 // An incomplete TypeString stub is placed in the cache for this RecordType
8911 // so that recursive calls to this RecordType will use it whilst building a
8912 // complete TypeString for this RecordType.
8913 SmallVector<FieldEncoding, 16> FE;
8914 std::string StubEnc(Enc.substr(Start).str());
8915 StubEnc += '}'; // StubEnc now holds a valid incomplete TypeString.
8916 TSC.addIncomplete(ID, std::move(StubEnc));
8917 if (!extractFieldType(FE, RD, CGM, TSC)) {
8918 (void) TSC.removeIncomplete(ID);
8921 IsRecursive = TSC.removeIncomplete(ID);
8922 // The ABI requires unions to be sorted but not structures.
8923 // See FieldEncoding::operator< for sort algorithm.
8924 if (RT->isUnionType())
8926 // We can now complete the TypeString.
8927 unsigned E = FE.size();
8928 for (unsigned I = 0; I != E; ++I) {
8935 TSC.addIfComplete(ID, Enc.substr(Start), IsRecursive);
8939 /// Appends enum types to Enc and adds the encoding to the cache.
8940 static bool appendEnumType(SmallStringEnc &Enc, const EnumType *ET,
8941 TypeStringCache &TSC,
8942 const IdentifierInfo *ID) {
8943 // Append the cached TypeString if we have one.
8944 StringRef TypeString = TSC.lookupStr(ID);
8945 if (!TypeString.empty()) {
8950 size_t Start = Enc.size();
8953 Enc += ID->getName();
8956 // We collect all encoded enumerations and order them alphanumerically.
8957 if (const EnumDecl *ED = ET->getDecl()->getDefinition()) {
8958 SmallVector<FieldEncoding, 16> FE;
8959 for (auto I = ED->enumerator_begin(), E = ED->enumerator_end(); I != E;
8961 SmallStringEnc EnumEnc;
8963 EnumEnc += I->getName();
8965 I->getInitVal().toString(EnumEnc);
8967 FE.push_back(FieldEncoding(!I->getName().empty(), EnumEnc));
8970 unsigned E = FE.size();
8971 for (unsigned I = 0; I != E; ++I) {
8978 TSC.addIfComplete(ID, Enc.substr(Start), false);
8982 /// Appends type's qualifier to Enc.
8983 /// This is done prior to appending the type's encoding.
8984 static void appendQualifier(SmallStringEnc &Enc, QualType QT) {
8985 // Qualifiers are emitted in alphabetical order.
8986 static const char *const Table[]={"","c:","r:","cr:","v:","cv:","rv:","crv:"};
8988 if (QT.isConstQualified())
8990 if (QT.isRestrictQualified())
8992 if (QT.isVolatileQualified())
8994 Enc += Table[Lookup];
8997 /// Appends built-in types to Enc.
8998 static bool appendBuiltinType(SmallStringEnc &Enc, const BuiltinType *BT) {
8999 const char *EncType;
9000 switch (BT->getKind()) {
9001 case BuiltinType::Void:
9004 case BuiltinType::Bool:
9007 case BuiltinType::Char_U:
9010 case BuiltinType::UChar:
9013 case BuiltinType::SChar:
9016 case BuiltinType::UShort:
9019 case BuiltinType::Short:
9022 case BuiltinType::UInt:
9025 case BuiltinType::Int:
9028 case BuiltinType::ULong:
9031 case BuiltinType::Long:
9034 case BuiltinType::ULongLong:
9037 case BuiltinType::LongLong:
9040 case BuiltinType::Float:
9043 case BuiltinType::Double:
9046 case BuiltinType::LongDouble:
9056 /// Appends a pointer encoding to Enc before calling appendType for the pointee.
9057 static bool appendPointerType(SmallStringEnc &Enc, const PointerType *PT,
9058 const CodeGen::CodeGenModule &CGM,
9059 TypeStringCache &TSC) {
9061 if (!appendType(Enc, PT->getPointeeType(), CGM, TSC))
9067 /// Appends array encoding to Enc before calling appendType for the element.
9068 static bool appendArrayType(SmallStringEnc &Enc, QualType QT,
9069 const ArrayType *AT,
9070 const CodeGen::CodeGenModule &CGM,
9071 TypeStringCache &TSC, StringRef NoSizeEnc) {
9072 if (AT->getSizeModifier() != ArrayType::Normal)
9075 if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT))
9076 CAT->getSize().toStringUnsigned(Enc);
9078 Enc += NoSizeEnc; // Global arrays use "*", otherwise it is "".
9080 // The Qualifiers should be attached to the type rather than the array.
9081 appendQualifier(Enc, QT);
9082 if (!appendType(Enc, AT->getElementType(), CGM, TSC))
9088 /// Appends a function encoding to Enc, calling appendType for the return type
9089 /// and the arguments.
9090 static bool appendFunctionType(SmallStringEnc &Enc, const FunctionType *FT,
9091 const CodeGen::CodeGenModule &CGM,
9092 TypeStringCache &TSC) {
9094 if (!appendType(Enc, FT->getReturnType(), CGM, TSC))
9097 if (const FunctionProtoType *FPT = FT->getAs<FunctionProtoType>()) {
9098 // N.B. we are only interested in the adjusted param types.
9099 auto I = FPT->param_type_begin();
9100 auto E = FPT->param_type_end();
9103 if (!appendType(Enc, *I, CGM, TSC))
9109 if (FPT->isVariadic())
9112 if (FPT->isVariadic())
9122 /// Handles the type's qualifier before dispatching a call to handle specific
9124 static bool appendType(SmallStringEnc &Enc, QualType QType,
9125 const CodeGen::CodeGenModule &CGM,
9126 TypeStringCache &TSC) {
9128 QualType QT = QType.getCanonicalType();
9130 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe())
9131 // The Qualifiers should be attached to the type rather than the array.
9132 // Thus we don't call appendQualifier() here.
9133 return appendArrayType(Enc, QT, AT, CGM, TSC, "");
9135 appendQualifier(Enc, QT);
9137 if (const BuiltinType *BT = QT->getAs<BuiltinType>())
9138 return appendBuiltinType(Enc, BT);
9140 if (const PointerType *PT = QT->getAs<PointerType>())
9141 return appendPointerType(Enc, PT, CGM, TSC);
9143 if (const EnumType *ET = QT->getAs<EnumType>())
9144 return appendEnumType(Enc, ET, TSC, QT.getBaseTypeIdentifier());
9146 if (const RecordType *RT = QT->getAsStructureType())
9147 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier());
9149 if (const RecordType *RT = QT->getAsUnionType())
9150 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier());
9152 if (const FunctionType *FT = QT->getAs<FunctionType>())
9153 return appendFunctionType(Enc, FT, CGM, TSC);
9158 static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
9159 CodeGen::CodeGenModule &CGM, TypeStringCache &TSC) {
9163 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
9164 if (FD->getLanguageLinkage() != CLanguageLinkage)
9166 return appendType(Enc, FD->getType(), CGM, TSC);
9169 if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
9170 if (VD->getLanguageLinkage() != CLanguageLinkage)
9172 QualType QT = VD->getType().getCanonicalType();
9173 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe()) {
9174 // Global ArrayTypes are given a size of '*' if the size is unknown.
9175 // The Qualifiers should be attached to the type rather than the array.
9176 // Thus we don't call appendQualifier() here.
9177 return appendArrayType(Enc, QT, AT, CGM, TSC, "*");
9179 return appendType(Enc, QT, CGM, TSC);
9184 //===----------------------------------------------------------------------===//
9185 // RISCV ABI Implementation
9186 //===----------------------------------------------------------------------===//
9189 class RISCVABIInfo : public DefaultABIInfo {
9191 // Size of the integer ('x') registers in bits.
9193 // Size of the floating point ('f') registers in bits. Note that the target
9194 // ISA might have a wider FLen than the selected ABI (e.g. an RV32IF target
9195 // with soft float ABI has FLen==0).
9197 static const int NumArgGPRs = 8;
9198 static const int NumArgFPRs = 8;
9199 bool detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff,
9200 llvm::Type *&Field1Ty,
9201 CharUnits &Field1Off,
9202 llvm::Type *&Field2Ty,
9203 CharUnits &Field2Off) const;
9206 RISCVABIInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen, unsigned FLen)
9207 : DefaultABIInfo(CGT), XLen(XLen), FLen(FLen) {}
9209 // DefaultABIInfo's classifyReturnType and classifyArgumentType are
9210 // non-virtual, but computeInfo is virtual, so we overload it.
9211 void computeInfo(CGFunctionInfo &FI) const override;
9213 ABIArgInfo classifyArgumentType(QualType Ty, bool IsFixed, int &ArgGPRsLeft,
9214 int &ArgFPRsLeft) const;
9215 ABIArgInfo classifyReturnType(QualType RetTy) const;
9217 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
9218 QualType Ty) const override;
9220 ABIArgInfo extendType(QualType Ty) const;
9222 bool detectFPCCEligibleStruct(QualType Ty, llvm::Type *&Field1Ty,
9223 CharUnits &Field1Off, llvm::Type *&Field2Ty,
9224 CharUnits &Field2Off, int &NeededArgGPRs,
9225 int &NeededArgFPRs) const;
9226 ABIArgInfo coerceAndExpandFPCCEligibleStruct(llvm::Type *Field1Ty,
9227 CharUnits Field1Off,
9228 llvm::Type *Field2Ty,
9229 CharUnits Field2Off) const;
9231 } // end anonymous namespace
9233 void RISCVABIInfo::computeInfo(CGFunctionInfo &FI) const {
9234 QualType RetTy = FI.getReturnType();
9235 if (!getCXXABI().classifyReturnType(FI))
9236 FI.getReturnInfo() = classifyReturnType(RetTy);
9238 // IsRetIndirect is true if classifyArgumentType indicated the value should
9239 // be passed indirect or if the type size is greater than 2*xlen. e.g. fp128
9240 // is passed direct in LLVM IR, relying on the backend lowering code to
9241 // rewrite the argument list and pass indirectly on RV32.
9242 bool IsRetIndirect = FI.getReturnInfo().getKind() == ABIArgInfo::Indirect ||
9243 getContext().getTypeSize(RetTy) > (2 * XLen);
9245 // We must track the number of GPRs used in order to conform to the RISC-V
9246 // ABI, as integer scalars passed in registers should have signext/zeroext
9247 // when promoted, but are anyext if passed on the stack. As GPR usage is
9248 // different for variadic arguments, we must also track whether we are
9249 // examining a vararg or not.
9250 int ArgGPRsLeft = IsRetIndirect ? NumArgGPRs - 1 : NumArgGPRs;
9251 int ArgFPRsLeft = FLen ? NumArgFPRs : 0;
9252 int NumFixedArgs = FI.getNumRequiredArgs();
9255 for (auto &ArgInfo : FI.arguments()) {
9256 bool IsFixed = ArgNum < NumFixedArgs;
9258 classifyArgumentType(ArgInfo.type, IsFixed, ArgGPRsLeft, ArgFPRsLeft);
9263 // Returns true if the struct is a potential candidate for the floating point
9264 // calling convention. If this function returns true, the caller is
9265 // responsible for checking that if there is only a single field then that
9266 // field is a float.
9267 bool RISCVABIInfo::detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff,
9268 llvm::Type *&Field1Ty,
9269 CharUnits &Field1Off,
9270 llvm::Type *&Field2Ty,
9271 CharUnits &Field2Off) const {
9272 bool IsInt = Ty->isIntegralOrEnumerationType();
9273 bool IsFloat = Ty->isRealFloatingType();
9275 if (IsInt || IsFloat) {
9276 uint64_t Size = getContext().getTypeSize(Ty);
9277 if (IsInt && Size > XLen)
9279 // Can't be eligible if larger than the FP registers. Half precision isn't
9280 // currently supported on RISC-V and the ABI hasn't been confirmed, so
9281 // default to the integer ABI in that case.
9282 if (IsFloat && (Size > FLen || Size < 32))
9284 // Can't be eligible if an integer type was already found (int+int pairs
9285 // are not eligible).
9286 if (IsInt && Field1Ty && Field1Ty->isIntegerTy())
9289 Field1Ty = CGT.ConvertType(Ty);
9294 Field2Ty = CGT.ConvertType(Ty);
9301 if (auto CTy = Ty->getAs<ComplexType>()) {
9304 QualType EltTy = CTy->getElementType();
9305 if (getContext().getTypeSize(EltTy) > FLen)
9307 Field1Ty = CGT.ConvertType(EltTy);
9309 assert(CurOff.isZero() && "Unexpected offset for first field");
9310 Field2Ty = Field1Ty;
9311 Field2Off = Field1Off + getContext().getTypeSizeInChars(EltTy);
9315 if (const ConstantArrayType *ATy = getContext().getAsConstantArrayType(Ty)) {
9316 uint64_t ArraySize = ATy->getSize().getZExtValue();
9317 QualType EltTy = ATy->getElementType();
9318 CharUnits EltSize = getContext().getTypeSizeInChars(EltTy);
9319 for (uint64_t i = 0; i < ArraySize; ++i) {
9320 bool Ret = detectFPCCEligibleStructHelper(EltTy, CurOff, Field1Ty,
9321 Field1Off, Field2Ty, Field2Off);
9329 if (const auto *RTy = Ty->getAs<RecordType>()) {
9330 // Structures with either a non-trivial destructor or a non-trivial
9331 // copy constructor are not eligible for the FP calling convention.
9332 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, CGT.getCXXABI()))
9334 if (isEmptyRecord(getContext(), Ty, true))
9336 const RecordDecl *RD = RTy->getDecl();
9337 // Unions aren't eligible unless they're empty (which is caught above).
9340 int ZeroWidthBitFieldCount = 0;
9341 for (const FieldDecl *FD : RD->fields()) {
9342 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
9343 uint64_t FieldOffInBits = Layout.getFieldOffset(FD->getFieldIndex());
9344 QualType QTy = FD->getType();
9345 if (FD->isBitField()) {
9346 unsigned BitWidth = FD->getBitWidthValue(getContext());
9347 // Allow a bitfield with a type greater than XLen as long as the
9348 // bitwidth is XLen or less.
9349 if (getContext().getTypeSize(QTy) > XLen && BitWidth <= XLen)
9350 QTy = getContext().getIntTypeForBitwidth(XLen, false);
9351 if (BitWidth == 0) {
9352 ZeroWidthBitFieldCount++;
9357 bool Ret = detectFPCCEligibleStructHelper(
9358 QTy, CurOff + getContext().toCharUnitsFromBits(FieldOffInBits),
9359 Field1Ty, Field1Off, Field2Ty, Field2Off);
9363 // As a quirk of the ABI, zero-width bitfields aren't ignored for fp+fp
9364 // or int+fp structs, but are ignored for a struct with an fp field and
9365 // any number of zero-width bitfields.
9366 if (Field2Ty && ZeroWidthBitFieldCount > 0)
9369 return Field1Ty != nullptr;
9375 // Determine if a struct is eligible for passing according to the floating
9376 // point calling convention (i.e., when flattened it contains a single fp
9377 // value, fp+fp, or int+fp of appropriate size). If so, NeededArgFPRs and
9378 // NeededArgGPRs are incremented appropriately.
9379 bool RISCVABIInfo::detectFPCCEligibleStruct(QualType Ty, llvm::Type *&Field1Ty,
9380 CharUnits &Field1Off,
9381 llvm::Type *&Field2Ty,
9382 CharUnits &Field2Off,
9384 int &NeededArgFPRs) const {
9389 bool IsCandidate = detectFPCCEligibleStructHelper(
9390 Ty, CharUnits::Zero(), Field1Ty, Field1Off, Field2Ty, Field2Off);
9391 // Not really a candidate if we have a single int but no float.
9392 if (Field1Ty && !Field2Ty && !Field1Ty->isFloatingPointTy())
9393 return IsCandidate = false;
9396 if (Field1Ty && Field1Ty->isFloatingPointTy())
9400 if (Field2Ty && Field2Ty->isFloatingPointTy())
9407 // Call getCoerceAndExpand for the two-element flattened struct described by
9408 // Field1Ty, Field1Off, Field2Ty, Field2Off. This method will create an
9409 // appropriate coerceToType and unpaddedCoerceToType.
9410 ABIArgInfo RISCVABIInfo::coerceAndExpandFPCCEligibleStruct(
9411 llvm::Type *Field1Ty, CharUnits Field1Off, llvm::Type *Field2Ty,
9412 CharUnits Field2Off) const {
9413 SmallVector<llvm::Type *, 3> CoerceElts;
9414 SmallVector<llvm::Type *, 2> UnpaddedCoerceElts;
9415 if (!Field1Off.isZero())
9416 CoerceElts.push_back(llvm::ArrayType::get(
9417 llvm::Type::getInt8Ty(getVMContext()), Field1Off.getQuantity()));
9419 CoerceElts.push_back(Field1Ty);
9420 UnpaddedCoerceElts.push_back(Field1Ty);
9423 return ABIArgInfo::getCoerceAndExpand(
9424 llvm::StructType::get(getVMContext(), CoerceElts, !Field1Off.isZero()),
9425 UnpaddedCoerceElts[0]);
9428 CharUnits Field2Align =
9429 CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(Field2Ty));
9430 CharUnits Field1Size =
9431 CharUnits::fromQuantity(getDataLayout().getTypeStoreSize(Field1Ty));
9432 CharUnits Field2OffNoPadNoPack = Field1Size.alignTo(Field2Align);
9434 CharUnits Padding = CharUnits::Zero();
9435 if (Field2Off > Field2OffNoPadNoPack)
9436 Padding = Field2Off - Field2OffNoPadNoPack;
9437 else if (Field2Off != Field2Align && Field2Off > Field1Size)
9438 Padding = Field2Off - Field1Size;
9440 bool IsPacked = !Field2Off.isMultipleOf(Field2Align);
9442 if (!Padding.isZero())
9443 CoerceElts.push_back(llvm::ArrayType::get(
9444 llvm::Type::getInt8Ty(getVMContext()), Padding.getQuantity()));
9446 CoerceElts.push_back(Field2Ty);
9447 UnpaddedCoerceElts.push_back(Field2Ty);
9450 llvm::StructType::get(getVMContext(), CoerceElts, IsPacked);
9451 auto UnpaddedCoerceToType =
9452 llvm::StructType::get(getVMContext(), UnpaddedCoerceElts, IsPacked);
9454 return ABIArgInfo::getCoerceAndExpand(CoerceToType, UnpaddedCoerceToType);
9457 ABIArgInfo RISCVABIInfo::classifyArgumentType(QualType Ty, bool IsFixed,
9459 int &ArgFPRsLeft) const {
9460 assert(ArgGPRsLeft <= NumArgGPRs && "Arg GPR tracking underflow");
9461 Ty = useFirstFieldIfTransparentUnion(Ty);
9463 // Structures with either a non-trivial destructor or a non-trivial
9464 // copy constructor are always passed indirectly.
9465 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
9468 return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA ==
9469 CGCXXABI::RAA_DirectInMemory);
9472 // Ignore empty structs/unions.
9473 if (isEmptyRecord(getContext(), Ty, true))
9474 return ABIArgInfo::getIgnore();
9476 uint64_t Size = getContext().getTypeSize(Ty);
9478 // Pass floating point values via FPRs if possible.
9479 if (IsFixed && Ty->isFloatingType() && FLen >= Size && ArgFPRsLeft) {
9481 return ABIArgInfo::getDirect();
9484 // Complex types for the hard float ABI must be passed direct rather than
9485 // using CoerceAndExpand.
9486 if (IsFixed && Ty->isComplexType() && FLen && ArgFPRsLeft >= 2) {
9487 QualType EltTy = Ty->getAs<ComplexType>()->getElementType();
9488 if (getContext().getTypeSize(EltTy) <= FLen) {
9490 return ABIArgInfo::getDirect();
9494 if (IsFixed && FLen && Ty->isStructureOrClassType()) {
9495 llvm::Type *Field1Ty = nullptr;
9496 llvm::Type *Field2Ty = nullptr;
9497 CharUnits Field1Off = CharUnits::Zero();
9498 CharUnits Field2Off = CharUnits::Zero();
9502 detectFPCCEligibleStruct(Ty, Field1Ty, Field1Off, Field2Ty, Field2Off,
9503 NeededArgGPRs, NeededArgFPRs);
9504 if (IsCandidate && NeededArgGPRs <= ArgGPRsLeft &&
9505 NeededArgFPRs <= ArgFPRsLeft) {
9506 ArgGPRsLeft -= NeededArgGPRs;
9507 ArgFPRsLeft -= NeededArgFPRs;
9508 return coerceAndExpandFPCCEligibleStruct(Field1Ty, Field1Off, Field2Ty,
9513 uint64_t NeededAlign = getContext().getTypeAlign(Ty);
9514 bool MustUseStack = false;
9515 // Determine the number of GPRs needed to pass the current argument
9516 // according to the ABI. 2*XLen-aligned varargs are passed in "aligned"
9517 // register pairs, so may consume 3 registers.
9518 int NeededArgGPRs = 1;
9519 if (!IsFixed && NeededAlign == 2 * XLen)
9520 NeededArgGPRs = 2 + (ArgGPRsLeft % 2);
9521 else if (Size > XLen && Size <= 2 * XLen)
9524 if (NeededArgGPRs > ArgGPRsLeft) {
9525 MustUseStack = true;
9526 NeededArgGPRs = ArgGPRsLeft;
9529 ArgGPRsLeft -= NeededArgGPRs;
9531 if (!isAggregateTypeForABI(Ty) && !Ty->isVectorType()) {
9532 // Treat an enum type as its underlying type.
9533 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
9534 Ty = EnumTy->getDecl()->getIntegerType();
9536 // All integral types are promoted to XLen width, unless passed on the
9538 if (Size < XLen && Ty->isIntegralOrEnumerationType() && !MustUseStack) {
9539 return extendType(Ty);
9542 return ABIArgInfo::getDirect();
9545 // Aggregates which are <= 2*XLen will be passed in registers if possible,
9546 // so coerce to integers.
9547 if (Size <= 2 * XLen) {
9548 unsigned Alignment = getContext().getTypeAlign(Ty);
9550 // Use a single XLen int if possible, 2*XLen if 2*XLen alignment is
9551 // required, and a 2-element XLen array if only XLen alignment is required.
9553 return ABIArgInfo::getDirect(
9554 llvm::IntegerType::get(getVMContext(), XLen));
9555 } else if (Alignment == 2 * XLen) {
9556 return ABIArgInfo::getDirect(
9557 llvm::IntegerType::get(getVMContext(), 2 * XLen));
9559 return ABIArgInfo::getDirect(llvm::ArrayType::get(
9560 llvm::IntegerType::get(getVMContext(), XLen), 2));
9563 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
9566 ABIArgInfo RISCVABIInfo::classifyReturnType(QualType RetTy) const {
9567 if (RetTy->isVoidType())
9568 return ABIArgInfo::getIgnore();
9570 int ArgGPRsLeft = 2;
9571 int ArgFPRsLeft = FLen ? 2 : 0;
9573 // The rules for return and argument types are the same, so defer to
9574 // classifyArgumentType.
9575 return classifyArgumentType(RetTy, /*IsFixed=*/true, ArgGPRsLeft,
9579 Address RISCVABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
9580 QualType Ty) const {
9581 CharUnits SlotSize = CharUnits::fromQuantity(XLen / 8);
9583 // Empty records are ignored for parameter passing purposes.
9584 if (isEmptyRecord(getContext(), Ty, true)) {
9585 Address Addr(CGF.Builder.CreateLoad(VAListAddr), SlotSize);
9586 Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty));
9590 std::pair<CharUnits, CharUnits> SizeAndAlign =
9591 getContext().getTypeInfoInChars(Ty);
9593 // Arguments bigger than 2*Xlen bytes are passed indirectly.
9594 bool IsIndirect = SizeAndAlign.first > 2 * SlotSize;
9596 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, SizeAndAlign,
9597 SlotSize, /*AllowHigherAlign=*/true);
9600 ABIArgInfo RISCVABIInfo::extendType(QualType Ty) const {
9601 int TySize = getContext().getTypeSize(Ty);
9602 // RV64 ABI requires unsigned 32 bit integers to be sign extended.
9603 if (XLen == 64 && Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32)
9604 return ABIArgInfo::getSignExtend(Ty);
9605 return ABIArgInfo::getExtend(Ty);
9609 class RISCVTargetCodeGenInfo : public TargetCodeGenInfo {
9611 RISCVTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen,
9613 : TargetCodeGenInfo(new RISCVABIInfo(CGT, XLen, FLen)) {}
9615 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
9616 CodeGen::CodeGenModule &CGM) const override {
9617 const auto *FD = dyn_cast_or_null<FunctionDecl>(D);
9620 const auto *Attr = FD->getAttr<RISCVInterruptAttr>();
9625 switch (Attr->getInterrupt()) {
9626 case RISCVInterruptAttr::user: Kind = "user"; break;
9627 case RISCVInterruptAttr::supervisor: Kind = "supervisor"; break;
9628 case RISCVInterruptAttr::machine: Kind = "machine"; break;
9631 auto *Fn = cast<llvm::Function>(GV);
9633 Fn->addFnAttr("interrupt", Kind);
9638 //===----------------------------------------------------------------------===//
9640 //===----------------------------------------------------------------------===//
9642 bool CodeGenModule::supportsCOMDAT() const {
9643 return getTriple().supportsCOMDAT();
9646 const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
9647 if (TheTargetCodeGenInfo)
9648 return *TheTargetCodeGenInfo;
9650 // Helper to set the unique_ptr while still keeping the return value.
9651 auto SetCGInfo = [&](TargetCodeGenInfo *P) -> const TargetCodeGenInfo & {
9652 this->TheTargetCodeGenInfo.reset(P);
9656 const llvm::Triple &Triple = getTarget().getTriple();
9657 switch (Triple.getArch()) {
9659 return SetCGInfo(new DefaultTargetCodeGenInfo(Types));
9661 case llvm::Triple::le32:
9662 return SetCGInfo(new PNaClTargetCodeGenInfo(Types));
9663 case llvm::Triple::mips:
9664 case llvm::Triple::mipsel:
9665 if (Triple.getOS() == llvm::Triple::NaCl)
9666 return SetCGInfo(new PNaClTargetCodeGenInfo(Types));
9667 return SetCGInfo(new MIPSTargetCodeGenInfo(Types, true));
9669 case llvm::Triple::mips64:
9670 case llvm::Triple::mips64el:
9671 return SetCGInfo(new MIPSTargetCodeGenInfo(Types, false));
9673 case llvm::Triple::avr:
9674 return SetCGInfo(new AVRTargetCodeGenInfo(Types));
9676 case llvm::Triple::aarch64:
9677 case llvm::Triple::aarch64_be: {
9678 AArch64ABIInfo::ABIKind Kind = AArch64ABIInfo::AAPCS;
9679 if (getTarget().getABI() == "darwinpcs")
9680 Kind = AArch64ABIInfo::DarwinPCS;
9681 else if (Triple.isOSWindows())
9683 new WindowsAArch64TargetCodeGenInfo(Types, AArch64ABIInfo::Win64));
9685 return SetCGInfo(new AArch64TargetCodeGenInfo(Types, Kind));
9688 case llvm::Triple::wasm32:
9689 case llvm::Triple::wasm64:
9690 return SetCGInfo(new WebAssemblyTargetCodeGenInfo(Types));
9692 case llvm::Triple::arm:
9693 case llvm::Triple::armeb:
9694 case llvm::Triple::thumb:
9695 case llvm::Triple::thumbeb: {
9696 if (Triple.getOS() == llvm::Triple::Win32) {
9698 new WindowsARMTargetCodeGenInfo(Types, ARMABIInfo::AAPCS_VFP));
9701 ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS;
9702 StringRef ABIStr = getTarget().getABI();
9703 if (ABIStr == "apcs-gnu")
9704 Kind = ARMABIInfo::APCS;
9705 else if (ABIStr == "aapcs16")
9706 Kind = ARMABIInfo::AAPCS16_VFP;
9707 else if (CodeGenOpts.FloatABI == "hard" ||
9708 (CodeGenOpts.FloatABI != "soft" &&
9709 (Triple.getEnvironment() == llvm::Triple::GNUEABIHF ||
9710 Triple.getEnvironment() == llvm::Triple::MuslEABIHF ||
9711 Triple.getEnvironment() == llvm::Triple::EABIHF)))
9712 Kind = ARMABIInfo::AAPCS_VFP;
9714 return SetCGInfo(new ARMTargetCodeGenInfo(Types, Kind));
9717 case llvm::Triple::ppc:
9719 new PPC32TargetCodeGenInfo(Types, CodeGenOpts.FloatABI == "soft" ||
9720 getTarget().hasFeature("spe")));
9721 case llvm::Triple::ppc64:
9722 if (Triple.isOSBinFormatELF()) {
9723 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv1;
9724 if (getTarget().getABI() == "elfv2")
9725 Kind = PPC64_SVR4_ABIInfo::ELFv2;
9726 bool HasQPX = getTarget().getABI() == "elfv1-qpx";
9727 bool IsSoftFloat = CodeGenOpts.FloatABI == "soft";
9729 return SetCGInfo(new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX,
9732 return SetCGInfo(new PPC64TargetCodeGenInfo(Types));
9733 case llvm::Triple::ppc64le: {
9734 assert(Triple.isOSBinFormatELF() && "PPC64 LE non-ELF not supported!");
9735 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv2;
9736 if (getTarget().getABI() == "elfv1" || getTarget().getABI() == "elfv1-qpx")
9737 Kind = PPC64_SVR4_ABIInfo::ELFv1;
9738 bool HasQPX = getTarget().getABI() == "elfv1-qpx";
9739 bool IsSoftFloat = CodeGenOpts.FloatABI == "soft";
9741 return SetCGInfo(new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX,
9745 case llvm::Triple::nvptx:
9746 case llvm::Triple::nvptx64:
9747 return SetCGInfo(new NVPTXTargetCodeGenInfo(Types));
9749 case llvm::Triple::msp430:
9750 return SetCGInfo(new MSP430TargetCodeGenInfo(Types));
9752 case llvm::Triple::riscv32:
9753 case llvm::Triple::riscv64: {
9754 StringRef ABIStr = getTarget().getABI();
9755 unsigned XLen = getTarget().getPointerWidth(0);
9756 unsigned ABIFLen = 0;
9757 if (ABIStr.endswith("f"))
9759 else if (ABIStr.endswith("d"))
9761 return SetCGInfo(new RISCVTargetCodeGenInfo(Types, XLen, ABIFLen));
9764 case llvm::Triple::systemz: {
9765 bool HasVector = getTarget().getABI() == "vector";
9766 return SetCGInfo(new SystemZTargetCodeGenInfo(Types, HasVector));
9769 case llvm::Triple::tce:
9770 case llvm::Triple::tcele:
9771 return SetCGInfo(new TCETargetCodeGenInfo(Types));
9773 case llvm::Triple::x86: {
9774 bool IsDarwinVectorABI = Triple.isOSDarwin();
9775 bool RetSmallStructInRegABI =
9776 X86_32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts);
9777 bool IsWin32FloatStructABI = Triple.isOSWindows() && !Triple.isOSCygMing();
9779 if (Triple.getOS() == llvm::Triple::Win32) {
9780 return SetCGInfo(new WinX86_32TargetCodeGenInfo(
9781 Types, IsDarwinVectorABI, RetSmallStructInRegABI,
9782 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters));
9784 return SetCGInfo(new X86_32TargetCodeGenInfo(
9785 Types, IsDarwinVectorABI, RetSmallStructInRegABI,
9786 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters,
9787 CodeGenOpts.FloatABI == "soft"));
9791 case llvm::Triple::x86_64: {
9792 StringRef ABI = getTarget().getABI();
9793 X86AVXABILevel AVXLevel =
9795 ? X86AVXABILevel::AVX512
9796 : ABI == "avx" ? X86AVXABILevel::AVX : X86AVXABILevel::None);
9798 switch (Triple.getOS()) {
9799 case llvm::Triple::Win32:
9800 return SetCGInfo(new WinX86_64TargetCodeGenInfo(Types, AVXLevel));
9802 return SetCGInfo(new X86_64TargetCodeGenInfo(Types, AVXLevel));
9805 case llvm::Triple::hexagon:
9806 return SetCGInfo(new HexagonTargetCodeGenInfo(Types));
9807 case llvm::Triple::lanai:
9808 return SetCGInfo(new LanaiTargetCodeGenInfo(Types));
9809 case llvm::Triple::r600:
9810 return SetCGInfo(new AMDGPUTargetCodeGenInfo(Types));
9811 case llvm::Triple::amdgcn:
9812 return SetCGInfo(new AMDGPUTargetCodeGenInfo(Types));
9813 case llvm::Triple::sparc:
9814 return SetCGInfo(new SparcV8TargetCodeGenInfo(Types));
9815 case llvm::Triple::sparcv9:
9816 return SetCGInfo(new SparcV9TargetCodeGenInfo(Types));
9817 case llvm::Triple::xcore:
9818 return SetCGInfo(new XCoreTargetCodeGenInfo(Types));
9819 case llvm::Triple::arc:
9820 return SetCGInfo(new ARCTargetCodeGenInfo(Types));
9821 case llvm::Triple::spir:
9822 case llvm::Triple::spir64:
9823 return SetCGInfo(new SPIRTargetCodeGenInfo(Types));
9827 /// Create an OpenCL kernel for an enqueued block.
9829 /// The kernel has the same function type as the block invoke function. Its
9830 /// name is the name of the block invoke function postfixed with "_kernel".
9831 /// It simply calls the block invoke function then returns.
9833 TargetCodeGenInfo::createEnqueuedBlockKernel(CodeGenFunction &CGF,
9834 llvm::Function *Invoke,
9835 llvm::Value *BlockLiteral) const {
9836 auto *InvokeFT = Invoke->getFunctionType();
9837 llvm::SmallVector<llvm::Type *, 2> ArgTys;
9838 for (auto &P : InvokeFT->params())
9839 ArgTys.push_back(P);
9840 auto &C = CGF.getLLVMContext();
9841 std::string Name = Invoke->getName().str() + "_kernel";
9842 auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(C), ArgTys, false);
9843 auto *F = llvm::Function::Create(FT, llvm::GlobalValue::InternalLinkage, Name,
9844 &CGF.CGM.getModule());
9845 auto IP = CGF.Builder.saveIP();
9846 auto *BB = llvm::BasicBlock::Create(C, "entry", F);
9847 auto &Builder = CGF.Builder;
9848 Builder.SetInsertPoint(BB);
9849 llvm::SmallVector<llvm::Value *, 2> Args;
9850 for (auto &A : F->args())
9852 Builder.CreateCall(Invoke, Args);
9853 Builder.CreateRetVoid();
9854 Builder.restoreIP(IP);
9858 /// Create an OpenCL kernel for an enqueued block.
9860 /// The type of the first argument (the block literal) is the struct type
9861 /// of the block literal instead of a pointer type. The first argument
9862 /// (block literal) is passed directly by value to the kernel. The kernel
9863 /// allocates the same type of struct on stack and stores the block literal
9864 /// to it and passes its pointer to the block invoke function. The kernel
9865 /// has "enqueued-block" function attribute and kernel argument metadata.
9866 llvm::Function *AMDGPUTargetCodeGenInfo::createEnqueuedBlockKernel(
9867 CodeGenFunction &CGF, llvm::Function *Invoke,
9868 llvm::Value *BlockLiteral) const {
9869 auto &Builder = CGF.Builder;
9870 auto &C = CGF.getLLVMContext();
9872 auto *BlockTy = BlockLiteral->getType()->getPointerElementType();
9873 auto *InvokeFT = Invoke->getFunctionType();
9874 llvm::SmallVector<llvm::Type *, 2> ArgTys;
9875 llvm::SmallVector<llvm::Metadata *, 8> AddressQuals;
9876 llvm::SmallVector<llvm::Metadata *, 8> AccessQuals;
9877 llvm::SmallVector<llvm::Metadata *, 8> ArgTypeNames;
9878 llvm::SmallVector<llvm::Metadata *, 8> ArgBaseTypeNames;
9879 llvm::SmallVector<llvm::Metadata *, 8> ArgTypeQuals;
9880 llvm::SmallVector<llvm::Metadata *, 8> ArgNames;
9882 ArgTys.push_back(BlockTy);
9883 ArgTypeNames.push_back(llvm::MDString::get(C, "__block_literal"));
9884 AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(0)));
9885 ArgBaseTypeNames.push_back(llvm::MDString::get(C, "__block_literal"));
9886 ArgTypeQuals.push_back(llvm::MDString::get(C, ""));
9887 AccessQuals.push_back(llvm::MDString::get(C, "none"));
9888 ArgNames.push_back(llvm::MDString::get(C, "block_literal"));
9889 for (unsigned I = 1, E = InvokeFT->getNumParams(); I < E; ++I) {
9890 ArgTys.push_back(InvokeFT->getParamType(I));
9891 ArgTypeNames.push_back(llvm::MDString::get(C, "void*"));
9892 AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(3)));
9893 AccessQuals.push_back(llvm::MDString::get(C, "none"));
9894 ArgBaseTypeNames.push_back(llvm::MDString::get(C, "void*"));
9895 ArgTypeQuals.push_back(llvm::MDString::get(C, ""));
9897 llvm::MDString::get(C, (Twine("local_arg") + Twine(I)).str()));
9899 std::string Name = Invoke->getName().str() + "_kernel";
9900 auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(C), ArgTys, false);
9901 auto *F = llvm::Function::Create(FT, llvm::GlobalValue::InternalLinkage, Name,
9902 &CGF.CGM.getModule());
9903 F->addFnAttr("enqueued-block");
9904 auto IP = CGF.Builder.saveIP();
9905 auto *BB = llvm::BasicBlock::Create(C, "entry", F);
9906 Builder.SetInsertPoint(BB);
9907 unsigned BlockAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(BlockTy);
9908 auto *BlockPtr = Builder.CreateAlloca(BlockTy, nullptr);
9909 BlockPtr->setAlignment(BlockAlign);
9910 Builder.CreateAlignedStore(F->arg_begin(), BlockPtr, BlockAlign);
9911 auto *Cast = Builder.CreatePointerCast(BlockPtr, InvokeFT->getParamType(0));
9912 llvm::SmallVector<llvm::Value *, 2> Args;
9913 Args.push_back(Cast);
9914 for (auto I = F->arg_begin() + 1, E = F->arg_end(); I != E; ++I)
9916 Builder.CreateCall(Invoke, Args);
9917 Builder.CreateRetVoid();
9918 Builder.restoreIP(IP);
9920 F->setMetadata("kernel_arg_addr_space", llvm::MDNode::get(C, AddressQuals));
9921 F->setMetadata("kernel_arg_access_qual", llvm::MDNode::get(C, AccessQuals));
9922 F->setMetadata("kernel_arg_type", llvm::MDNode::get(C, ArgTypeNames));
9923 F->setMetadata("kernel_arg_base_type",
9924 llvm::MDNode::get(C, ArgBaseTypeNames));
9925 F->setMetadata("kernel_arg_type_qual", llvm::MDNode::get(C, ArgTypeQuals));
9926 if (CGF.CGM.getCodeGenOpts().EmitOpenCLArgMetadata)
9927 F->setMetadata("kernel_arg_name", llvm::MDNode::get(C, ArgNames));