1 //===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // These classes wrap the information about a call or function
11 // definition used to handle ABI compliancy.
13 //===----------------------------------------------------------------------===//
15 #include "TargetInfo.h"
19 #include "CodeGenFunction.h"
20 #include "clang/AST/RecordLayout.h"
21 #include "clang/CodeGen/CGFunctionInfo.h"
22 #include "clang/CodeGen/SwiftCallingConv.h"
23 #include "clang/Frontend/CodeGenOptions.h"
24 #include "llvm/ADT/StringExtras.h"
25 #include "llvm/ADT/Triple.h"
26 #include "llvm/IR/DataLayout.h"
27 #include "llvm/IR/Type.h"
28 #include "llvm/Support/raw_ostream.h"
29 #include <algorithm> // std::sort
31 using namespace clang;
32 using namespace CodeGen;
34 // Helper for coercing an aggregate argument or return value into an integer
35 // array of the same size (including padding) and alignment. This alternate
36 // coercion happens only for the RenderScript ABI and can be removed after
37 // runtimes that rely on it are no longer supported.
39 // RenderScript assumes that the size of the argument / return value in the IR
40 // is the same as the size of the corresponding qualified type. This helper
41 // coerces the aggregate type into an array of the same size (including
42 // padding). This coercion is used in lieu of expansion of struct members or
43 // other canonical coercions that return a coerced-type of larger size.
45 // Ty - The argument / return value type
46 // Context - The associated ASTContext
47 // LLVMContext - The associated LLVMContext
48 static ABIArgInfo coerceToIntArray(QualType Ty,
50 llvm::LLVMContext &LLVMContext) {
51 // Alignment and Size are measured in bits.
52 const uint64_t Size = Context.getTypeSize(Ty);
53 const uint64_t Alignment = Context.getTypeAlign(Ty);
54 llvm::Type *IntType = llvm::Type::getIntNTy(LLVMContext, Alignment);
55 const uint64_t NumElements = (Size + Alignment - 1) / Alignment;
56 return ABIArgInfo::getDirect(llvm::ArrayType::get(IntType, NumElements));
59 static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder,
64 // Alternatively, we could emit this as a loop in the source.
65 for (unsigned I = FirstIndex; I <= LastIndex; ++I) {
67 Builder.CreateConstInBoundsGEP1_32(Builder.getInt8Ty(), Array, I);
68 Builder.CreateAlignedStore(Value, Cell, CharUnits::One());
72 static bool isAggregateTypeForABI(QualType T) {
73 return !CodeGenFunction::hasScalarEvaluationKind(T) ||
74 T->isMemberFunctionPointerType();
78 ABIInfo::getNaturalAlignIndirect(QualType Ty, bool ByRef, bool Realign,
79 llvm::Type *Padding) const {
80 return ABIArgInfo::getIndirect(getContext().getTypeAlignInChars(Ty),
81 ByRef, Realign, Padding);
85 ABIInfo::getNaturalAlignIndirectInReg(QualType Ty, bool Realign) const {
86 return ABIArgInfo::getIndirectInReg(getContext().getTypeAlignInChars(Ty),
87 /*ByRef*/ false, Realign);
90 Address ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
92 return Address::invalid();
95 ABIInfo::~ABIInfo() {}
97 /// Does the given lowering require more than the given number of
98 /// registers when expanded?
100 /// This is intended to be the basis of a reasonable basic implementation
101 /// of should{Pass,Return}IndirectlyForSwift.
103 /// For most targets, a limit of four total registers is reasonable; this
104 /// limits the amount of code required in order to move around the value
105 /// in case it wasn't produced immediately prior to the call by the caller
106 /// (or wasn't produced in exactly the right registers) or isn't used
107 /// immediately within the callee. But some targets may need to further
108 /// limit the register count due to an inability to support that many
109 /// return registers.
110 static bool occupiesMoreThan(CodeGenTypes &cgt,
111 ArrayRef<llvm::Type*> scalarTypes,
112 unsigned maxAllRegisters) {
113 unsigned intCount = 0, fpCount = 0;
114 for (llvm::Type *type : scalarTypes) {
115 if (type->isPointerTy()) {
117 } else if (auto intTy = dyn_cast<llvm::IntegerType>(type)) {
118 auto ptrWidth = cgt.getTarget().getPointerWidth(0);
119 intCount += (intTy->getBitWidth() + ptrWidth - 1) / ptrWidth;
121 assert(type->isVectorTy() || type->isFloatingPointTy());
126 return (intCount + fpCount > maxAllRegisters);
129 bool SwiftABIInfo::isLegalVectorTypeForSwift(CharUnits vectorSize,
131 unsigned numElts) const {
132 // The default implementation of this assumes that the target guarantees
133 // 128-bit SIMD support but nothing more.
134 return (vectorSize.getQuantity() > 8 && vectorSize.getQuantity() <= 16);
137 static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT,
139 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
141 return CGCXXABI::RAA_Default;
142 return CXXABI.getRecordArgABI(RD);
145 static CGCXXABI::RecordArgABI getRecordArgABI(QualType T,
147 const RecordType *RT = T->getAs<RecordType>();
149 return CGCXXABI::RAA_Default;
150 return getRecordArgABI(RT, CXXABI);
153 /// Pass transparent unions as if they were the type of the first element. Sema
154 /// should ensure that all elements of the union have the same "machine type".
155 static QualType useFirstFieldIfTransparentUnion(QualType Ty) {
156 if (const RecordType *UT = Ty->getAsUnionType()) {
157 const RecordDecl *UD = UT->getDecl();
158 if (UD->hasAttr<TransparentUnionAttr>()) {
159 assert(!UD->field_empty() && "sema created an empty transparent union");
160 return UD->field_begin()->getType();
166 CGCXXABI &ABIInfo::getCXXABI() const {
167 return CGT.getCXXABI();
170 ASTContext &ABIInfo::getContext() const {
171 return CGT.getContext();
174 llvm::LLVMContext &ABIInfo::getVMContext() const {
175 return CGT.getLLVMContext();
178 const llvm::DataLayout &ABIInfo::getDataLayout() const {
179 return CGT.getDataLayout();
182 const TargetInfo &ABIInfo::getTarget() const {
183 return CGT.getTarget();
186 const CodeGenOptions &ABIInfo::getCodeGenOpts() const {
187 return CGT.getCodeGenOpts();
190 bool ABIInfo::isAndroid() const { return getTarget().getTriple().isAndroid(); }
192 bool ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
196 bool ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
197 uint64_t Members) const {
201 bool ABIInfo::shouldSignExtUnsignedType(QualType Ty) const {
205 LLVM_DUMP_METHOD void ABIArgInfo::dump() const {
206 raw_ostream &OS = llvm::errs();
207 OS << "(ABIArgInfo Kind=";
210 OS << "Direct Type=";
211 if (llvm::Type *Ty = getCoerceToType())
223 OS << "InAlloca Offset=" << getInAllocaFieldIndex();
226 OS << "Indirect Align=" << getIndirectAlign().getQuantity()
227 << " ByVal=" << getIndirectByVal()
228 << " Realign=" << getIndirectRealign();
233 case CoerceAndExpand:
234 OS << "CoerceAndExpand Type=";
235 getCoerceAndExpandType()->print(OS);
241 // Dynamically round a pointer up to a multiple of the given alignment.
242 static llvm::Value *emitRoundPointerUpToAlignment(CodeGenFunction &CGF,
245 llvm::Value *PtrAsInt = Ptr;
246 // OverflowArgArea = (OverflowArgArea + Align - 1) & -Align;
247 PtrAsInt = CGF.Builder.CreatePtrToInt(PtrAsInt, CGF.IntPtrTy);
248 PtrAsInt = CGF.Builder.CreateAdd(PtrAsInt,
249 llvm::ConstantInt::get(CGF.IntPtrTy, Align.getQuantity() - 1));
250 PtrAsInt = CGF.Builder.CreateAnd(PtrAsInt,
251 llvm::ConstantInt::get(CGF.IntPtrTy, -Align.getQuantity()));
252 PtrAsInt = CGF.Builder.CreateIntToPtr(PtrAsInt,
254 Ptr->getName() + ".aligned");
258 /// Emit va_arg for a platform using the common void* representation,
259 /// where arguments are simply emitted in an array of slots on the stack.
261 /// This version implements the core direct-value passing rules.
263 /// \param SlotSize - The size and alignment of a stack slot.
264 /// Each argument will be allocated to a multiple of this number of
265 /// slots, and all the slots will be aligned to this value.
266 /// \param AllowHigherAlign - The slot alignment is not a cap;
267 /// an argument type with an alignment greater than the slot size
268 /// will be emitted on a higher-alignment address, potentially
269 /// leaving one or more empty slots behind as padding. If this
270 /// is false, the returned address might be less-aligned than
272 static Address emitVoidPtrDirectVAArg(CodeGenFunction &CGF,
274 llvm::Type *DirectTy,
275 CharUnits DirectSize,
276 CharUnits DirectAlign,
278 bool AllowHigherAlign) {
279 // Cast the element type to i8* if necessary. Some platforms define
280 // va_list as a struct containing an i8* instead of just an i8*.
281 if (VAListAddr.getElementType() != CGF.Int8PtrTy)
282 VAListAddr = CGF.Builder.CreateElementBitCast(VAListAddr, CGF.Int8PtrTy);
284 llvm::Value *Ptr = CGF.Builder.CreateLoad(VAListAddr, "argp.cur");
286 // If the CC aligns values higher than the slot size, do so if needed.
287 Address Addr = Address::invalid();
288 if (AllowHigherAlign && DirectAlign > SlotSize) {
289 Addr = Address(emitRoundPointerUpToAlignment(CGF, Ptr, DirectAlign),
292 Addr = Address(Ptr, SlotSize);
295 // Advance the pointer past the argument, then store that back.
296 CharUnits FullDirectSize = DirectSize.alignTo(SlotSize);
297 llvm::Value *NextPtr =
298 CGF.Builder.CreateConstInBoundsByteGEP(Addr.getPointer(), FullDirectSize,
300 CGF.Builder.CreateStore(NextPtr, VAListAddr);
302 // If the argument is smaller than a slot, and this is a big-endian
303 // target, the argument will be right-adjusted in its slot.
304 if (DirectSize < SlotSize && CGF.CGM.getDataLayout().isBigEndian() &&
305 !DirectTy->isStructTy()) {
306 Addr = CGF.Builder.CreateConstInBoundsByteGEP(Addr, SlotSize - DirectSize);
309 Addr = CGF.Builder.CreateElementBitCast(Addr, DirectTy);
313 /// Emit va_arg for a platform using the common void* representation,
314 /// where arguments are simply emitted in an array of slots on the stack.
316 /// \param IsIndirect - Values of this type are passed indirectly.
317 /// \param ValueInfo - The size and alignment of this type, generally
318 /// computed with getContext().getTypeInfoInChars(ValueTy).
319 /// \param SlotSizeAndAlign - The size and alignment of a stack slot.
320 /// Each argument will be allocated to a multiple of this number of
321 /// slots, and all the slots will be aligned to this value.
322 /// \param AllowHigherAlign - The slot alignment is not a cap;
323 /// an argument type with an alignment greater than the slot size
324 /// will be emitted on a higher-alignment address, potentially
325 /// leaving one or more empty slots behind as padding.
326 static Address emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr,
327 QualType ValueTy, bool IsIndirect,
328 std::pair<CharUnits, CharUnits> ValueInfo,
329 CharUnits SlotSizeAndAlign,
330 bool AllowHigherAlign) {
331 // The size and alignment of the value that was passed directly.
332 CharUnits DirectSize, DirectAlign;
334 DirectSize = CGF.getPointerSize();
335 DirectAlign = CGF.getPointerAlign();
337 DirectSize = ValueInfo.first;
338 DirectAlign = ValueInfo.second;
341 // Cast the address we've calculated to the right type.
342 llvm::Type *DirectTy = CGF.ConvertTypeForMem(ValueTy);
344 DirectTy = DirectTy->getPointerTo(0);
346 Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, DirectTy,
347 DirectSize, DirectAlign,
352 Addr = Address(CGF.Builder.CreateLoad(Addr), ValueInfo.second);
359 static Address emitMergePHI(CodeGenFunction &CGF,
360 Address Addr1, llvm::BasicBlock *Block1,
361 Address Addr2, llvm::BasicBlock *Block2,
362 const llvm::Twine &Name = "") {
363 assert(Addr1.getType() == Addr2.getType());
364 llvm::PHINode *PHI = CGF.Builder.CreatePHI(Addr1.getType(), 2, Name);
365 PHI->addIncoming(Addr1.getPointer(), Block1);
366 PHI->addIncoming(Addr2.getPointer(), Block2);
367 CharUnits Align = std::min(Addr1.getAlignment(), Addr2.getAlignment());
368 return Address(PHI, Align);
371 TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; }
373 // If someone can figure out a general rule for this, that would be great.
374 // It's probably just doomed to be platform-dependent, though.
375 unsigned TargetCodeGenInfo::getSizeOfUnwindException() const {
377 // x86-64 FreeBSD, Linux, Darwin
378 // x86-32 FreeBSD, Linux, Darwin
379 // PowerPC Linux, Darwin
380 // ARM Darwin (*not* EABI)
385 bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args,
386 const FunctionNoProtoType *fnType) const {
387 // The following conventions are known to require this to be false:
390 // For everything else, we just prefer false unless we opt out.
395 TargetCodeGenInfo::getDependentLibraryOption(llvm::StringRef Lib,
396 llvm::SmallString<24> &Opt) const {
397 // This assumes the user is passing a library name like "rt" instead of a
398 // filename like "librt.a/so", and that they don't care whether it's static or
404 unsigned TargetCodeGenInfo::getOpenCLKernelCallingConv() const {
405 // OpenCL kernels are called via an explicit runtime API with arguments
406 // set with clSetKernelArg(), not as normal sub-functions.
407 // Return SPIR_KERNEL by default as the kernel calling convention to
408 // ensure the fingerprint is fixed such way that each OpenCL argument
409 // gets one matching argument in the produced kernel function argument
410 // list to enable feasible implementation of clSetKernelArg() with
411 // aggregates etc. In case we would use the default C calling conv here,
412 // clSetKernelArg() might break depending on the target-specific
413 // conventions; different targets might split structs passed as values
414 // to multiple function arguments etc.
415 return llvm::CallingConv::SPIR_KERNEL;
418 llvm::Constant *TargetCodeGenInfo::getNullPointer(const CodeGen::CodeGenModule &CGM,
419 llvm::PointerType *T, QualType QT) const {
420 return llvm::ConstantPointerNull::get(T);
423 unsigned TargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM,
424 const VarDecl *D) const {
425 assert(!CGM.getLangOpts().OpenCL &&
426 !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) &&
427 "Address space agnostic languages only");
428 return D ? D->getType().getAddressSpace()
429 : static_cast<unsigned>(LangAS::Default);
432 llvm::Value *TargetCodeGenInfo::performAddrSpaceCast(
433 CodeGen::CodeGenFunction &CGF, llvm::Value *Src, unsigned SrcAddr,
434 unsigned DestAddr, llvm::Type *DestTy, bool isNonNull) const {
435 // Since target may map different address spaces in AST to the same address
436 // space, an address space conversion may end up as a bitcast.
437 if (auto *C = dyn_cast<llvm::Constant>(Src))
438 return performAddrSpaceCast(CGF.CGM, C, SrcAddr, DestAddr, DestTy);
439 return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DestTy);
443 TargetCodeGenInfo::performAddrSpaceCast(CodeGenModule &CGM, llvm::Constant *Src,
444 unsigned SrcAddr, unsigned DestAddr,
445 llvm::Type *DestTy) const {
446 // Since target may map different address spaces in AST to the same address
447 // space, an address space conversion may end up as a bitcast.
448 return llvm::ConstantExpr::getPointerCast(Src, DestTy);
451 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays);
453 /// isEmptyField - Return true iff a the field is "empty", that is it
454 /// is an unnamed bit-field or an (array of) empty record(s).
455 static bool isEmptyField(ASTContext &Context, const FieldDecl *FD,
457 if (FD->isUnnamedBitfield())
460 QualType FT = FD->getType();
462 // Constant arrays of empty records count as empty, strip them off.
463 // Constant arrays of zero length always count as empty.
465 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
466 if (AT->getSize() == 0)
468 FT = AT->getElementType();
471 const RecordType *RT = FT->getAs<RecordType>();
475 // C++ record fields are never empty, at least in the Itanium ABI.
477 // FIXME: We should use a predicate for whether this behavior is true in the
479 if (isa<CXXRecordDecl>(RT->getDecl()))
482 return isEmptyRecord(Context, FT, AllowArrays);
485 /// isEmptyRecord - Return true iff a structure contains only empty
486 /// fields. Note that a structure with a flexible array member is not
487 /// considered empty.
488 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) {
489 const RecordType *RT = T->getAs<RecordType>();
492 const RecordDecl *RD = RT->getDecl();
493 if (RD->hasFlexibleArrayMember())
496 // If this is a C++ record, check the bases first.
497 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
498 for (const auto &I : CXXRD->bases())
499 if (!isEmptyRecord(Context, I.getType(), true))
502 for (const auto *I : RD->fields())
503 if (!isEmptyField(Context, I, AllowArrays))
508 /// isSingleElementStruct - Determine if a structure is a "single
509 /// element struct", i.e. it has exactly one non-empty field or
510 /// exactly one field which is itself a single element
511 /// struct. Structures with flexible array members are never
512 /// considered single element structs.
514 /// \return The field declaration for the single non-empty field, if
516 static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
517 const RecordType *RT = T->getAs<RecordType>();
521 const RecordDecl *RD = RT->getDecl();
522 if (RD->hasFlexibleArrayMember())
525 const Type *Found = nullptr;
527 // If this is a C++ record, check the bases first.
528 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
529 for (const auto &I : CXXRD->bases()) {
530 // Ignore empty records.
531 if (isEmptyRecord(Context, I.getType(), true))
534 // If we already found an element then this isn't a single-element struct.
538 // If this is non-empty and not a single element struct, the composite
539 // cannot be a single element struct.
540 Found = isSingleElementStruct(I.getType(), Context);
546 // Check for single element.
547 for (const auto *FD : RD->fields()) {
548 QualType FT = FD->getType();
550 // Ignore empty fields.
551 if (isEmptyField(Context, FD, true))
554 // If we already found an element then this isn't a single-element
559 // Treat single element arrays as the element.
560 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
561 if (AT->getSize().getZExtValue() != 1)
563 FT = AT->getElementType();
566 if (!isAggregateTypeForABI(FT)) {
567 Found = FT.getTypePtr();
569 Found = isSingleElementStruct(FT, Context);
575 // We don't consider a struct a single-element struct if it has
576 // padding beyond the element type.
577 if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T))
584 Address EmitVAArgInstr(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
585 const ABIArgInfo &AI) {
586 // This default implementation defers to the llvm backend's va_arg
587 // instruction. It can handle only passing arguments directly
588 // (typically only handled in the backend for primitive types), or
589 // aggregates passed indirectly by pointer (NOTE: if the "byval"
590 // flag has ABI impact in the callee, this implementation cannot
593 // Only a few cases are covered here at the moment -- those needed
594 // by the default abi.
597 if (AI.isIndirect()) {
598 assert(!AI.getPaddingType() &&
599 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!");
601 !AI.getIndirectRealign() &&
602 "Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!");
604 auto TyInfo = CGF.getContext().getTypeInfoInChars(Ty);
605 CharUnits TyAlignForABI = TyInfo.second;
608 llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty));
610 CGF.Builder.CreateVAArg(VAListAddr.getPointer(), BaseTy);
611 return Address(Addr, TyAlignForABI);
613 assert((AI.isDirect() || AI.isExtend()) &&
614 "Unexpected ArgInfo Kind in generic VAArg emitter!");
616 assert(!AI.getInReg() &&
617 "Unexpected InReg seen in arginfo in generic VAArg emitter!");
618 assert(!AI.getPaddingType() &&
619 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!");
620 assert(!AI.getDirectOffset() &&
621 "Unexpected DirectOffset seen in arginfo in generic VAArg emitter!");
622 assert(!AI.getCoerceToType() &&
623 "Unexpected CoerceToType seen in arginfo in generic VAArg emitter!");
625 Address Temp = CGF.CreateMemTemp(Ty, "varet");
626 Val = CGF.Builder.CreateVAArg(VAListAddr.getPointer(), CGF.ConvertType(Ty));
627 CGF.Builder.CreateStore(Val, Temp);
632 /// DefaultABIInfo - The default implementation for ABI specific
633 /// details. This implementation provides information which results in
634 /// self-consistent and sensible LLVM IR generation, but does not
635 /// conform to any particular ABI.
636 class DefaultABIInfo : public ABIInfo {
638 DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
640 ABIArgInfo classifyReturnType(QualType RetTy) const;
641 ABIArgInfo classifyArgumentType(QualType RetTy) const;
643 void computeInfo(CGFunctionInfo &FI) const override {
644 if (!getCXXABI().classifyReturnType(FI))
645 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
646 for (auto &I : FI.arguments())
647 I.info = classifyArgumentType(I.type);
650 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
651 QualType Ty) const override {
652 return EmitVAArgInstr(CGF, VAListAddr, Ty, classifyArgumentType(Ty));
656 class DefaultTargetCodeGenInfo : public TargetCodeGenInfo {
658 DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
659 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
662 ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
663 Ty = useFirstFieldIfTransparentUnion(Ty);
665 if (isAggregateTypeForABI(Ty)) {
666 // Records with non-trivial destructors/copy-constructors should not be
668 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
669 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
671 return getNaturalAlignIndirect(Ty);
674 // Treat an enum type as its underlying type.
675 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
676 Ty = EnumTy->getDecl()->getIntegerType();
678 return (Ty->isPromotableIntegerType() ?
679 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
682 ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
683 if (RetTy->isVoidType())
684 return ABIArgInfo::getIgnore();
686 if (isAggregateTypeForABI(RetTy))
687 return getNaturalAlignIndirect(RetTy);
689 // Treat an enum type as its underlying type.
690 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
691 RetTy = EnumTy->getDecl()->getIntegerType();
693 return (RetTy->isPromotableIntegerType() ?
694 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
697 //===----------------------------------------------------------------------===//
698 // WebAssembly ABI Implementation
700 // This is a very simple ABI that relies a lot on DefaultABIInfo.
701 //===----------------------------------------------------------------------===//
703 class WebAssemblyABIInfo final : public DefaultABIInfo {
705 explicit WebAssemblyABIInfo(CodeGen::CodeGenTypes &CGT)
706 : DefaultABIInfo(CGT) {}
709 ABIArgInfo classifyReturnType(QualType RetTy) const;
710 ABIArgInfo classifyArgumentType(QualType Ty) const;
712 // DefaultABIInfo's classifyReturnType and classifyArgumentType are
713 // non-virtual, but computeInfo and EmitVAArg are virtual, so we
715 void computeInfo(CGFunctionInfo &FI) const override {
716 if (!getCXXABI().classifyReturnType(FI))
717 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
718 for (auto &Arg : FI.arguments())
719 Arg.info = classifyArgumentType(Arg.type);
722 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
723 QualType Ty) const override;
726 class WebAssemblyTargetCodeGenInfo final : public TargetCodeGenInfo {
728 explicit WebAssemblyTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
729 : TargetCodeGenInfo(new WebAssemblyABIInfo(CGT)) {}
732 /// \brief Classify argument of given type \p Ty.
733 ABIArgInfo WebAssemblyABIInfo::classifyArgumentType(QualType Ty) const {
734 Ty = useFirstFieldIfTransparentUnion(Ty);
736 if (isAggregateTypeForABI(Ty)) {
737 // Records with non-trivial destructors/copy-constructors should not be
739 if (auto RAA = getRecordArgABI(Ty, getCXXABI()))
740 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
741 // Ignore empty structs/unions.
742 if (isEmptyRecord(getContext(), Ty, true))
743 return ABIArgInfo::getIgnore();
744 // Lower single-element structs to just pass a regular value. TODO: We
745 // could do reasonable-size multiple-element structs too, using getExpand(),
746 // though watch out for things like bitfields.
747 if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
748 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
751 // Otherwise just do the default thing.
752 return DefaultABIInfo::classifyArgumentType(Ty);
755 ABIArgInfo WebAssemblyABIInfo::classifyReturnType(QualType RetTy) const {
756 if (isAggregateTypeForABI(RetTy)) {
757 // Records with non-trivial destructors/copy-constructors should not be
758 // returned by value.
759 if (!getRecordArgABI(RetTy, getCXXABI())) {
760 // Ignore empty structs/unions.
761 if (isEmptyRecord(getContext(), RetTy, true))
762 return ABIArgInfo::getIgnore();
763 // Lower single-element structs to just return a regular value. TODO: We
764 // could do reasonable-size multiple-element structs too, using
765 // ABIArgInfo::getDirect().
766 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
767 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
771 // Otherwise just do the default thing.
772 return DefaultABIInfo::classifyReturnType(RetTy);
775 Address WebAssemblyABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
777 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect=*/ false,
778 getContext().getTypeInfoInChars(Ty),
779 CharUnits::fromQuantity(4),
780 /*AllowHigherAlign=*/ true);
783 //===----------------------------------------------------------------------===//
784 // le32/PNaCl bitcode ABI Implementation
786 // This is a simplified version of the x86_32 ABI. Arguments and return values
787 // are always passed on the stack.
788 //===----------------------------------------------------------------------===//
790 class PNaClABIInfo : public ABIInfo {
792 PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
794 ABIArgInfo classifyReturnType(QualType RetTy) const;
795 ABIArgInfo classifyArgumentType(QualType RetTy) const;
797 void computeInfo(CGFunctionInfo &FI) const override;
798 Address EmitVAArg(CodeGenFunction &CGF,
799 Address VAListAddr, QualType Ty) const override;
802 class PNaClTargetCodeGenInfo : public TargetCodeGenInfo {
804 PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
805 : TargetCodeGenInfo(new PNaClABIInfo(CGT)) {}
808 void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const {
809 if (!getCXXABI().classifyReturnType(FI))
810 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
812 for (auto &I : FI.arguments())
813 I.info = classifyArgumentType(I.type);
816 Address PNaClABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
818 // The PNaCL ABI is a bit odd, in that varargs don't use normal
819 // function classification. Structs get passed directly for varargs
820 // functions, through a rewriting transform in
821 // pnacl-llvm/lib/Transforms/NaCl/ExpandVarArgs.cpp, which allows
822 // this target to actually support a va_arg instructions with an
823 // aggregate type, unlike other targets.
824 return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect());
827 /// \brief Classify argument of given type \p Ty.
828 ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty) const {
829 if (isAggregateTypeForABI(Ty)) {
830 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
831 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
832 return getNaturalAlignIndirect(Ty);
833 } else if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
834 // Treat an enum type as its underlying type.
835 Ty = EnumTy->getDecl()->getIntegerType();
836 } else if (Ty->isFloatingType()) {
837 // Floating-point types don't go inreg.
838 return ABIArgInfo::getDirect();
841 return (Ty->isPromotableIntegerType() ?
842 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
845 ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const {
846 if (RetTy->isVoidType())
847 return ABIArgInfo::getIgnore();
849 // In the PNaCl ABI we always return records/structures on the stack.
850 if (isAggregateTypeForABI(RetTy))
851 return getNaturalAlignIndirect(RetTy);
853 // Treat an enum type as its underlying type.
854 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
855 RetTy = EnumTy->getDecl()->getIntegerType();
857 return (RetTy->isPromotableIntegerType() ?
858 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
861 /// IsX86_MMXType - Return true if this is an MMX type.
862 bool IsX86_MMXType(llvm::Type *IRType) {
863 // Return true if the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>.
864 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
865 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
866 IRType->getScalarSizeInBits() != 64;
869 static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
870 StringRef Constraint,
872 if ((Constraint == "y" || Constraint == "&y") && Ty->isVectorTy()) {
873 if (cast<llvm::VectorType>(Ty)->getBitWidth() != 64) {
874 // Invalid MMX constraint
878 return llvm::Type::getX86_MMXTy(CGF.getLLVMContext());
881 // No operation needed
885 /// Returns true if this type can be passed in SSE registers with the
886 /// X86_VectorCall calling convention. Shared between x86_32 and x86_64.
887 static bool isX86VectorTypeForVectorCall(ASTContext &Context, QualType Ty) {
888 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
889 if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half)
891 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
892 // vectorcall can pass XMM, YMM, and ZMM vectors. We don't pass SSE1 MMX
893 // registers specially.
894 unsigned VecSize = Context.getTypeSize(VT);
895 if (VecSize == 128 || VecSize == 256 || VecSize == 512)
901 /// Returns true if this aggregate is small enough to be passed in SSE registers
902 /// in the X86_VectorCall calling convention. Shared between x86_32 and x86_64.
903 static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) {
904 return NumMembers <= 4;
907 /// Returns a Homogeneous Vector Aggregate ABIArgInfo, used in X86.
908 static ABIArgInfo getDirectX86Hva(llvm::Type* T = nullptr) {
909 auto AI = ABIArgInfo::getDirect(T);
911 AI.setCanBeFlattened(false);
915 //===----------------------------------------------------------------------===//
916 // X86-32 ABI Implementation
917 //===----------------------------------------------------------------------===//
919 /// \brief Similar to llvm::CCState, but for Clang.
921 CCState(unsigned CC) : CC(CC), FreeRegs(0), FreeSSERegs(0) {}
925 unsigned FreeSSERegs;
929 // Vectorcall only allows the first 6 parameters to be passed in registers.
930 VectorcallMaxParamNumAsReg = 6
933 /// X86_32ABIInfo - The X86-32 ABI information.
934 class X86_32ABIInfo : public SwiftABIInfo {
940 static const unsigned MinABIStackAlignInBytes = 4;
942 bool IsDarwinVectorABI;
943 bool IsRetSmallStructInRegABI;
944 bool IsWin32StructABI;
947 unsigned DefaultNumRegisterParameters;
949 static bool isRegisterSize(unsigned Size) {
950 return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
953 bool isHomogeneousAggregateBaseType(QualType Ty) const override {
954 // FIXME: Assumes vectorcall is in use.
955 return isX86VectorTypeForVectorCall(getContext(), Ty);
958 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
959 uint64_t NumMembers) const override {
960 // FIXME: Assumes vectorcall is in use.
961 return isX86VectorCallAggregateSmallEnough(NumMembers);
964 bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context) const;
966 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
967 /// such that the argument will be passed in memory.
968 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const;
970 ABIArgInfo getIndirectReturnResult(QualType Ty, CCState &State) const;
972 /// \brief Return the alignment to use for the given type on the stack.
973 unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const;
975 Class classify(QualType Ty) const;
976 ABIArgInfo classifyReturnType(QualType RetTy, CCState &State) const;
977 ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const;
979 /// \brief Updates the number of available free registers, returns
980 /// true if any registers were allocated.
981 bool updateFreeRegs(QualType Ty, CCState &State) const;
983 bool shouldAggregateUseDirect(QualType Ty, CCState &State, bool &InReg,
984 bool &NeedsPadding) const;
985 bool shouldPrimitiveUseInReg(QualType Ty, CCState &State) const;
987 bool canExpandIndirectArgument(QualType Ty) const;
989 /// \brief Rewrite the function info so that all memory arguments use
991 void rewriteWithInAlloca(CGFunctionInfo &FI) const;
993 void addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
994 CharUnits &StackOffset, ABIArgInfo &Info,
995 QualType Type) const;
996 void computeVectorCallArgs(CGFunctionInfo &FI, CCState &State,
997 bool &UsedInAlloca) const;
1001 void computeInfo(CGFunctionInfo &FI) const override;
1002 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
1003 QualType Ty) const override;
1005 X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI,
1006 bool RetSmallStructInRegABI, bool Win32StructABI,
1007 unsigned NumRegisterParameters, bool SoftFloatABI)
1008 : SwiftABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI),
1009 IsRetSmallStructInRegABI(RetSmallStructInRegABI),
1010 IsWin32StructABI(Win32StructABI),
1011 IsSoftFloatABI(SoftFloatABI),
1012 IsMCUABI(CGT.getTarget().getTriple().isOSIAMCU()),
1013 DefaultNumRegisterParameters(NumRegisterParameters) {}
1015 bool shouldPassIndirectlyForSwift(CharUnits totalSize,
1016 ArrayRef<llvm::Type*> scalars,
1017 bool asReturnValue) const override {
1018 // LLVM's x86-32 lowering currently only assigns up to three
1019 // integer registers and three fp registers. Oddly, it'll use up to
1020 // four vector registers for vectors, but those can overlap with the
1021 // scalar registers.
1022 return occupiesMoreThan(CGT, scalars, /*total*/ 3);
1025 bool isSwiftErrorInRegister() const override {
1026 // x86-32 lowering does not support passing swifterror in a register.
1031 class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
1033 X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI,
1034 bool RetSmallStructInRegABI, bool Win32StructABI,
1035 unsigned NumRegisterParameters, bool SoftFloatABI)
1036 : TargetCodeGenInfo(new X86_32ABIInfo(
1037 CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
1038 NumRegisterParameters, SoftFloatABI)) {}
1040 static bool isStructReturnInRegABI(
1041 const llvm::Triple &Triple, const CodeGenOptions &Opts);
1043 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
1044 CodeGen::CodeGenModule &CGM) const override;
1046 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
1047 // Darwin uses different dwarf register numbers for EH.
1048 if (CGM.getTarget().getTriple().isOSDarwin()) return 5;
1052 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
1053 llvm::Value *Address) const override;
1055 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
1056 StringRef Constraint,
1057 llvm::Type* Ty) const override {
1058 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
1061 void addReturnRegisterOutputs(CodeGenFunction &CGF, LValue ReturnValue,
1062 std::string &Constraints,
1063 std::vector<llvm::Type *> &ResultRegTypes,
1064 std::vector<llvm::Type *> &ResultTruncRegTypes,
1065 std::vector<LValue> &ResultRegDests,
1066 std::string &AsmString,
1067 unsigned NumOutputs) const override;
1070 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
1071 unsigned Sig = (0xeb << 0) | // jmp rel8
1072 (0x06 << 8) | // .+0x08
1075 return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
1078 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
1079 return "movl\t%ebp, %ebp"
1080 "\t\t## marker for objc_retainAutoreleaseReturnValue";
1086 /// Rewrite input constraint references after adding some output constraints.
1087 /// In the case where there is one output and one input and we add one output,
1088 /// we need to replace all operand references greater than or equal to 1:
1091 /// The result will be:
1094 static void rewriteInputConstraintReferences(unsigned FirstIn,
1095 unsigned NumNewOuts,
1096 std::string &AsmString) {
1098 llvm::raw_string_ostream OS(Buf);
1100 while (Pos < AsmString.size()) {
1101 size_t DollarStart = AsmString.find('$', Pos);
1102 if (DollarStart == std::string::npos)
1103 DollarStart = AsmString.size();
1104 size_t DollarEnd = AsmString.find_first_not_of('$', DollarStart);
1105 if (DollarEnd == std::string::npos)
1106 DollarEnd = AsmString.size();
1107 OS << StringRef(&AsmString[Pos], DollarEnd - Pos);
1109 size_t NumDollars = DollarEnd - DollarStart;
1110 if (NumDollars % 2 != 0 && Pos < AsmString.size()) {
1111 // We have an operand reference.
1112 size_t DigitStart = Pos;
1113 size_t DigitEnd = AsmString.find_first_not_of("0123456789", DigitStart);
1114 if (DigitEnd == std::string::npos)
1115 DigitEnd = AsmString.size();
1116 StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart);
1117 unsigned OperandIndex;
1118 if (!OperandStr.getAsInteger(10, OperandIndex)) {
1119 if (OperandIndex >= FirstIn)
1120 OperandIndex += NumNewOuts;
1128 AsmString = std::move(OS.str());
1131 /// Add output constraints for EAX:EDX because they are return registers.
1132 void X86_32TargetCodeGenInfo::addReturnRegisterOutputs(
1133 CodeGenFunction &CGF, LValue ReturnSlot, std::string &Constraints,
1134 std::vector<llvm::Type *> &ResultRegTypes,
1135 std::vector<llvm::Type *> &ResultTruncRegTypes,
1136 std::vector<LValue> &ResultRegDests, std::string &AsmString,
1137 unsigned NumOutputs) const {
1138 uint64_t RetWidth = CGF.getContext().getTypeSize(ReturnSlot.getType());
1140 // Use the EAX constraint if the width is 32 or smaller and EAX:EDX if it is
1142 if (!Constraints.empty())
1144 if (RetWidth <= 32) {
1145 Constraints += "={eax}";
1146 ResultRegTypes.push_back(CGF.Int32Ty);
1148 // Use the 'A' constraint for EAX:EDX.
1149 Constraints += "=A";
1150 ResultRegTypes.push_back(CGF.Int64Ty);
1153 // Truncate EAX or EAX:EDX to an integer of the appropriate size.
1154 llvm::Type *CoerceTy = llvm::IntegerType::get(CGF.getLLVMContext(), RetWidth);
1155 ResultTruncRegTypes.push_back(CoerceTy);
1157 // Coerce the integer by bitcasting the return slot pointer.
1158 ReturnSlot.setAddress(CGF.Builder.CreateBitCast(ReturnSlot.getAddress(),
1159 CoerceTy->getPointerTo()));
1160 ResultRegDests.push_back(ReturnSlot);
1162 rewriteInputConstraintReferences(NumOutputs, 1, AsmString);
1165 /// shouldReturnTypeInRegister - Determine if the given type should be
1166 /// returned in a register (for the Darwin and MCU ABI).
1167 bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
1168 ASTContext &Context) const {
1169 uint64_t Size = Context.getTypeSize(Ty);
1171 // For i386, type must be register sized.
1172 // For the MCU ABI, it only needs to be <= 8-byte
1173 if ((IsMCUABI && Size > 64) || (!IsMCUABI && !isRegisterSize(Size)))
1176 if (Ty->isVectorType()) {
1177 // 64- and 128- bit vectors inside structures are not returned in
1179 if (Size == 64 || Size == 128)
1185 // If this is a builtin, pointer, enum, complex type, member pointer, or
1186 // member function pointer it is ok.
1187 if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() ||
1188 Ty->isAnyComplexType() || Ty->isEnumeralType() ||
1189 Ty->isBlockPointerType() || Ty->isMemberPointerType())
1192 // Arrays are treated like records.
1193 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
1194 return shouldReturnTypeInRegister(AT->getElementType(), Context);
1196 // Otherwise, it must be a record type.
1197 const RecordType *RT = Ty->getAs<RecordType>();
1198 if (!RT) return false;
1200 // FIXME: Traverse bases here too.
1202 // Structure types are passed in register if all fields would be
1203 // passed in a register.
1204 for (const auto *FD : RT->getDecl()->fields()) {
1205 // Empty fields are ignored.
1206 if (isEmptyField(Context, FD, true))
1209 // Check fields recursively.
1210 if (!shouldReturnTypeInRegister(FD->getType(), Context))
1216 static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
1217 // Treat complex types as the element type.
1218 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
1219 Ty = CTy->getElementType();
1221 // Check for a type which we know has a simple scalar argument-passing
1222 // convention without any padding. (We're specifically looking for 32
1223 // and 64-bit integer and integer-equivalents, float, and double.)
1224 if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() &&
1225 !Ty->isEnumeralType() && !Ty->isBlockPointerType())
1228 uint64_t Size = Context.getTypeSize(Ty);
1229 return Size == 32 || Size == 64;
1232 static bool addFieldSizes(ASTContext &Context, const RecordDecl *RD,
1234 for (const auto *FD : RD->fields()) {
1235 // Scalar arguments on the stack get 4 byte alignment on x86. If the
1236 // argument is smaller than 32-bits, expanding the struct will create
1237 // alignment padding.
1238 if (!is32Or64BitBasicType(FD->getType(), Context))
1241 // FIXME: Reject bit-fields wholesale; there are two problems, we don't know
1242 // how to expand them yet, and the predicate for telling if a bitfield still
1243 // counts as "basic" is more complicated than what we were doing previously.
1244 if (FD->isBitField())
1247 Size += Context.getTypeSize(FD->getType());
1252 static bool addBaseAndFieldSizes(ASTContext &Context, const CXXRecordDecl *RD,
1254 // Don't do this if there are any non-empty bases.
1255 for (const CXXBaseSpecifier &Base : RD->bases()) {
1256 if (!addBaseAndFieldSizes(Context, Base.getType()->getAsCXXRecordDecl(),
1260 if (!addFieldSizes(Context, RD, Size))
1265 /// Test whether an argument type which is to be passed indirectly (on the
1266 /// stack) would have the equivalent layout if it was expanded into separate
1267 /// arguments. If so, we prefer to do the latter to avoid inhibiting
1269 bool X86_32ABIInfo::canExpandIndirectArgument(QualType Ty) const {
1270 // We can only expand structure types.
1271 const RecordType *RT = Ty->getAs<RecordType>();
1274 const RecordDecl *RD = RT->getDecl();
1276 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
1277 if (!IsWin32StructABI) {
1278 // On non-Windows, we have to conservatively match our old bitcode
1279 // prototypes in order to be ABI-compatible at the bitcode level.
1280 if (!CXXRD->isCLike())
1283 // Don't do this for dynamic classes.
1284 if (CXXRD->isDynamicClass())
1287 if (!addBaseAndFieldSizes(getContext(), CXXRD, Size))
1290 if (!addFieldSizes(getContext(), RD, Size))
1294 // We can do this if there was no alignment padding.
1295 return Size == getContext().getTypeSize(Ty);
1298 ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(QualType RetTy, CCState &State) const {
1299 // If the return value is indirect, then the hidden argument is consuming one
1300 // integer register.
1301 if (State.FreeRegs) {
1304 return getNaturalAlignIndirectInReg(RetTy);
1306 return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
1309 ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
1310 CCState &State) const {
1311 if (RetTy->isVoidType())
1312 return ABIArgInfo::getIgnore();
1314 const Type *Base = nullptr;
1315 uint64_t NumElts = 0;
1316 if ((State.CC == llvm::CallingConv::X86_VectorCall ||
1317 State.CC == llvm::CallingConv::X86_RegCall) &&
1318 isHomogeneousAggregate(RetTy, Base, NumElts)) {
1319 // The LLVM struct type for such an aggregate should lower properly.
1320 return ABIArgInfo::getDirect();
1323 if (const VectorType *VT = RetTy->getAs<VectorType>()) {
1324 // On Darwin, some vectors are returned in registers.
1325 if (IsDarwinVectorABI) {
1326 uint64_t Size = getContext().getTypeSize(RetTy);
1328 // 128-bit vectors are a special case; they are returned in
1329 // registers and we need to make sure to pick a type the LLVM
1330 // backend will like.
1332 return ABIArgInfo::getDirect(llvm::VectorType::get(
1333 llvm::Type::getInt64Ty(getVMContext()), 2));
1335 // Always return in register if it fits in a general purpose
1336 // register, or if it is 64 bits and has a single element.
1337 if ((Size == 8 || Size == 16 || Size == 32) ||
1338 (Size == 64 && VT->getNumElements() == 1))
1339 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
1342 return getIndirectReturnResult(RetTy, State);
1345 return ABIArgInfo::getDirect();
1348 if (isAggregateTypeForABI(RetTy)) {
1349 if (const RecordType *RT = RetTy->getAs<RecordType>()) {
1350 // Structures with flexible arrays are always indirect.
1351 if (RT->getDecl()->hasFlexibleArrayMember())
1352 return getIndirectReturnResult(RetTy, State);
1355 // If specified, structs and unions are always indirect.
1356 if (!IsRetSmallStructInRegABI && !RetTy->isAnyComplexType())
1357 return getIndirectReturnResult(RetTy, State);
1359 // Ignore empty structs/unions.
1360 if (isEmptyRecord(getContext(), RetTy, true))
1361 return ABIArgInfo::getIgnore();
1363 // Small structures which are register sized are generally returned
1365 if (shouldReturnTypeInRegister(RetTy, getContext())) {
1366 uint64_t Size = getContext().getTypeSize(RetTy);
1368 // As a special-case, if the struct is a "single-element" struct, and
1369 // the field is of type "float" or "double", return it in a
1370 // floating-point register. (MSVC does not apply this special case.)
1371 // We apply a similar transformation for pointer types to improve the
1372 // quality of the generated IR.
1373 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
1374 if ((!IsWin32StructABI && SeltTy->isRealFloatingType())
1375 || SeltTy->hasPointerRepresentation())
1376 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
1378 // FIXME: We should be able to narrow this integer in cases with dead
1380 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size));
1383 return getIndirectReturnResult(RetTy, State);
1386 // Treat an enum type as its underlying type.
1387 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
1388 RetTy = EnumTy->getDecl()->getIntegerType();
1390 return (RetTy->isPromotableIntegerType() ?
1391 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
1394 static bool isSSEVectorType(ASTContext &Context, QualType Ty) {
1395 return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128;
1398 static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) {
1399 const RecordType *RT = Ty->getAs<RecordType>();
1402 const RecordDecl *RD = RT->getDecl();
1404 // If this is a C++ record, check the bases first.
1405 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
1406 for (const auto &I : CXXRD->bases())
1407 if (!isRecordWithSSEVectorType(Context, I.getType()))
1410 for (const auto *i : RD->fields()) {
1411 QualType FT = i->getType();
1413 if (isSSEVectorType(Context, FT))
1416 if (isRecordWithSSEVectorType(Context, FT))
1423 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty,
1424 unsigned Align) const {
1425 // Otherwise, if the alignment is less than or equal to the minimum ABI
1426 // alignment, just use the default; the backend will handle this.
1427 if (Align <= MinABIStackAlignInBytes)
1428 return 0; // Use default alignment.
1430 // On non-Darwin, the stack type alignment is always 4.
1431 if (!IsDarwinVectorABI) {
1432 // Set explicit alignment, since we may need to realign the top.
1433 return MinABIStackAlignInBytes;
1436 // Otherwise, if the type contains an SSE vector type, the alignment is 16.
1437 if (Align >= 16 && (isSSEVectorType(getContext(), Ty) ||
1438 isRecordWithSSEVectorType(getContext(), Ty)))
1441 return MinABIStackAlignInBytes;
1444 ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal,
1445 CCState &State) const {
1447 if (State.FreeRegs) {
1448 --State.FreeRegs; // Non-byval indirects just use one pointer.
1450 return getNaturalAlignIndirectInReg(Ty);
1452 return getNaturalAlignIndirect(Ty, false);
1455 // Compute the byval alignment.
1456 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
1457 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
1458 if (StackAlign == 0)
1459 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true);
1461 // If the stack alignment is less than the type alignment, realign the
1463 bool Realign = TypeAlign > StackAlign;
1464 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(StackAlign),
1465 /*ByVal=*/true, Realign);
1468 X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const {
1469 const Type *T = isSingleElementStruct(Ty, getContext());
1471 T = Ty.getTypePtr();
1473 if (const BuiltinType *BT = T->getAs<BuiltinType>()) {
1474 BuiltinType::Kind K = BT->getKind();
1475 if (K == BuiltinType::Float || K == BuiltinType::Double)
1481 bool X86_32ABIInfo::updateFreeRegs(QualType Ty, CCState &State) const {
1482 if (!IsSoftFloatABI) {
1483 Class C = classify(Ty);
1488 unsigned Size = getContext().getTypeSize(Ty);
1489 unsigned SizeInRegs = (Size + 31) / 32;
1491 if (SizeInRegs == 0)
1495 if (SizeInRegs > State.FreeRegs) {
1500 // The MCU psABI allows passing parameters in-reg even if there are
1501 // earlier parameters that are passed on the stack. Also,
1502 // it does not allow passing >8-byte structs in-register,
1503 // even if there are 3 free registers available.
1504 if (SizeInRegs > State.FreeRegs || SizeInRegs > 2)
1508 State.FreeRegs -= SizeInRegs;
1512 bool X86_32ABIInfo::shouldAggregateUseDirect(QualType Ty, CCState &State,
1514 bool &NeedsPadding) const {
1515 // On Windows, aggregates other than HFAs are never passed in registers, and
1516 // they do not consume register slots. Homogenous floating-point aggregates
1517 // (HFAs) have already been dealt with at this point.
1518 if (IsWin32StructABI && isAggregateTypeForABI(Ty))
1521 NeedsPadding = false;
1524 if (!updateFreeRegs(Ty, State))
1530 if (State.CC == llvm::CallingConv::X86_FastCall ||
1531 State.CC == llvm::CallingConv::X86_VectorCall ||
1532 State.CC == llvm::CallingConv::X86_RegCall) {
1533 if (getContext().getTypeSize(Ty) <= 32 && State.FreeRegs)
1534 NeedsPadding = true;
1542 bool X86_32ABIInfo::shouldPrimitiveUseInReg(QualType Ty, CCState &State) const {
1543 if (!updateFreeRegs(Ty, State))
1549 if (State.CC == llvm::CallingConv::X86_FastCall ||
1550 State.CC == llvm::CallingConv::X86_VectorCall ||
1551 State.CC == llvm::CallingConv::X86_RegCall) {
1552 if (getContext().getTypeSize(Ty) > 32)
1555 return (Ty->isIntegralOrEnumerationType() || Ty->isPointerType() ||
1556 Ty->isReferenceType());
1562 ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
1563 CCState &State) const {
1564 // FIXME: Set alignment on indirect arguments.
1566 Ty = useFirstFieldIfTransparentUnion(Ty);
1568 // Check with the C++ ABI first.
1569 const RecordType *RT = Ty->getAs<RecordType>();
1571 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
1572 if (RAA == CGCXXABI::RAA_Indirect) {
1573 return getIndirectResult(Ty, false, State);
1574 } else if (RAA == CGCXXABI::RAA_DirectInMemory) {
1575 // The field index doesn't matter, we'll fix it up later.
1576 return ABIArgInfo::getInAlloca(/*FieldIndex=*/0);
1580 // Regcall uses the concept of a homogenous vector aggregate, similar
1581 // to other targets.
1582 const Type *Base = nullptr;
1583 uint64_t NumElts = 0;
1584 if (State.CC == llvm::CallingConv::X86_RegCall &&
1585 isHomogeneousAggregate(Ty, Base, NumElts)) {
1587 if (State.FreeSSERegs >= NumElts) {
1588 State.FreeSSERegs -= NumElts;
1589 if (Ty->isBuiltinType() || Ty->isVectorType())
1590 return ABIArgInfo::getDirect();
1591 return ABIArgInfo::getExpand();
1593 return getIndirectResult(Ty, /*ByVal=*/false, State);
1596 if (isAggregateTypeForABI(Ty)) {
1597 // Structures with flexible arrays are always indirect.
1598 // FIXME: This should not be byval!
1599 if (RT && RT->getDecl()->hasFlexibleArrayMember())
1600 return getIndirectResult(Ty, true, State);
1602 // Ignore empty structs/unions on non-Windows.
1603 if (!IsWin32StructABI && isEmptyRecord(getContext(), Ty, true))
1604 return ABIArgInfo::getIgnore();
1606 llvm::LLVMContext &LLVMContext = getVMContext();
1607 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
1608 bool NeedsPadding = false;
1610 if (shouldAggregateUseDirect(Ty, State, InReg, NeedsPadding)) {
1611 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
1612 SmallVector<llvm::Type*, 3> Elements(SizeInRegs, Int32);
1613 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
1615 return ABIArgInfo::getDirectInReg(Result);
1617 return ABIArgInfo::getDirect(Result);
1619 llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : nullptr;
1621 // Expand small (<= 128-bit) record types when we know that the stack layout
1622 // of those arguments will match the struct. This is important because the
1623 // LLVM backend isn't smart enough to remove byval, which inhibits many
1625 // Don't do this for the MCU if there are still free integer registers
1626 // (see X86_64 ABI for full explanation).
1627 if (getContext().getTypeSize(Ty) <= 4 * 32 &&
1628 (!IsMCUABI || State.FreeRegs == 0) && canExpandIndirectArgument(Ty))
1629 return ABIArgInfo::getExpandWithPadding(
1630 State.CC == llvm::CallingConv::X86_FastCall ||
1631 State.CC == llvm::CallingConv::X86_VectorCall ||
1632 State.CC == llvm::CallingConv::X86_RegCall,
1635 return getIndirectResult(Ty, true, State);
1638 if (const VectorType *VT = Ty->getAs<VectorType>()) {
1639 // On Darwin, some vectors are passed in memory, we handle this by passing
1640 // it as an i8/i16/i32/i64.
1641 if (IsDarwinVectorABI) {
1642 uint64_t Size = getContext().getTypeSize(Ty);
1643 if ((Size == 8 || Size == 16 || Size == 32) ||
1644 (Size == 64 && VT->getNumElements() == 1))
1645 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
1649 if (IsX86_MMXType(CGT.ConvertType(Ty)))
1650 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 64));
1652 return ABIArgInfo::getDirect();
1656 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
1657 Ty = EnumTy->getDecl()->getIntegerType();
1659 bool InReg = shouldPrimitiveUseInReg(Ty, State);
1661 if (Ty->isPromotableIntegerType()) {
1663 return ABIArgInfo::getExtendInReg();
1664 return ABIArgInfo::getExtend();
1668 return ABIArgInfo::getDirectInReg();
1669 return ABIArgInfo::getDirect();
1672 void X86_32ABIInfo::computeVectorCallArgs(CGFunctionInfo &FI, CCState &State,
1673 bool &UsedInAlloca) const {
1674 // Vectorcall x86 works subtly different than in x64, so the format is
1675 // a bit different than the x64 version. First, all vector types (not HVAs)
1676 // are assigned, with the first 6 ending up in the YMM0-5 or XMM0-5 registers.
1677 // This differs from the x64 implementation, where the first 6 by INDEX get
1679 // After that, integers AND HVAs are assigned Left to Right in the same pass.
1680 // Integers are passed as ECX/EDX if one is available (in order). HVAs will
1681 // first take up the remaining YMM/XMM registers. If insufficient registers
1682 // remain but an integer register (ECX/EDX) is available, it will be passed
1683 // in that, else, on the stack.
1684 for (auto &I : FI.arguments()) {
1685 // First pass do all the vector types.
1686 const Type *Base = nullptr;
1687 uint64_t NumElts = 0;
1688 const QualType& Ty = I.type;
1689 if ((Ty->isVectorType() || Ty->isBuiltinType()) &&
1690 isHomogeneousAggregate(Ty, Base, NumElts)) {
1691 if (State.FreeSSERegs >= NumElts) {
1692 State.FreeSSERegs -= NumElts;
1693 I.info = ABIArgInfo::getDirect();
1695 I.info = classifyArgumentType(Ty, State);
1697 UsedInAlloca |= (I.info.getKind() == ABIArgInfo::InAlloca);
1701 for (auto &I : FI.arguments()) {
1702 // Second pass, do the rest!
1703 const Type *Base = nullptr;
1704 uint64_t NumElts = 0;
1705 const QualType& Ty = I.type;
1706 bool IsHva = isHomogeneousAggregate(Ty, Base, NumElts);
1708 if (IsHva && !Ty->isVectorType() && !Ty->isBuiltinType()) {
1709 // Assign true HVAs (non vector/native FP types).
1710 if (State.FreeSSERegs >= NumElts) {
1711 State.FreeSSERegs -= NumElts;
1712 I.info = getDirectX86Hva();
1714 I.info = getIndirectResult(Ty, /*ByVal=*/false, State);
1716 } else if (!IsHva) {
1717 // Assign all Non-HVAs, so this will exclude Vector/FP args.
1718 I.info = classifyArgumentType(Ty, State);
1719 UsedInAlloca |= (I.info.getKind() == ABIArgInfo::InAlloca);
1724 void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
1725 CCState State(FI.getCallingConvention());
1728 else if (State.CC == llvm::CallingConv::X86_FastCall)
1730 else if (State.CC == llvm::CallingConv::X86_VectorCall) {
1732 State.FreeSSERegs = 6;
1733 } else if (FI.getHasRegParm())
1734 State.FreeRegs = FI.getRegParm();
1735 else if (State.CC == llvm::CallingConv::X86_RegCall) {
1737 State.FreeSSERegs = 8;
1739 State.FreeRegs = DefaultNumRegisterParameters;
1741 if (!getCXXABI().classifyReturnType(FI)) {
1742 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), State);
1743 } else if (FI.getReturnInfo().isIndirect()) {
1744 // The C++ ABI is not aware of register usage, so we have to check if the
1745 // return value was sret and put it in a register ourselves if appropriate.
1746 if (State.FreeRegs) {
1747 --State.FreeRegs; // The sret parameter consumes a register.
1749 FI.getReturnInfo().setInReg(true);
1753 // The chain argument effectively gives us another free register.
1754 if (FI.isChainCall())
1757 bool UsedInAlloca = false;
1758 if (State.CC == llvm::CallingConv::X86_VectorCall) {
1759 computeVectorCallArgs(FI, State, UsedInAlloca);
1761 // If not vectorcall, revert to normal behavior.
1762 for (auto &I : FI.arguments()) {
1763 I.info = classifyArgumentType(I.type, State);
1764 UsedInAlloca |= (I.info.getKind() == ABIArgInfo::InAlloca);
1768 // If we needed to use inalloca for any argument, do a second pass and rewrite
1769 // all the memory arguments to use inalloca.
1771 rewriteWithInAlloca(FI);
1775 X86_32ABIInfo::addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
1776 CharUnits &StackOffset, ABIArgInfo &Info,
1777 QualType Type) const {
1778 // Arguments are always 4-byte-aligned.
1779 CharUnits FieldAlign = CharUnits::fromQuantity(4);
1781 assert(StackOffset.isMultipleOf(FieldAlign) && "unaligned inalloca struct");
1782 Info = ABIArgInfo::getInAlloca(FrameFields.size());
1783 FrameFields.push_back(CGT.ConvertTypeForMem(Type));
1784 StackOffset += getContext().getTypeSizeInChars(Type);
1786 // Insert padding bytes to respect alignment.
1787 CharUnits FieldEnd = StackOffset;
1788 StackOffset = FieldEnd.alignTo(FieldAlign);
1789 if (StackOffset != FieldEnd) {
1790 CharUnits NumBytes = StackOffset - FieldEnd;
1791 llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext());
1792 Ty = llvm::ArrayType::get(Ty, NumBytes.getQuantity());
1793 FrameFields.push_back(Ty);
1797 static bool isArgInAlloca(const ABIArgInfo &Info) {
1798 // Leave ignored and inreg arguments alone.
1799 switch (Info.getKind()) {
1800 case ABIArgInfo::InAlloca:
1802 case ABIArgInfo::Indirect:
1803 assert(Info.getIndirectByVal());
1805 case ABIArgInfo::Ignore:
1807 case ABIArgInfo::Direct:
1808 case ABIArgInfo::Extend:
1809 if (Info.getInReg())
1812 case ABIArgInfo::Expand:
1813 case ABIArgInfo::CoerceAndExpand:
1814 // These are aggregate types which are never passed in registers when
1815 // inalloca is involved.
1818 llvm_unreachable("invalid enum");
1821 void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const {
1822 assert(IsWin32StructABI && "inalloca only supported on win32");
1824 // Build a packed struct type for all of the arguments in memory.
1825 SmallVector<llvm::Type *, 6> FrameFields;
1827 // The stack alignment is always 4.
1828 CharUnits StackAlign = CharUnits::fromQuantity(4);
1830 CharUnits StackOffset;
1831 CGFunctionInfo::arg_iterator I = FI.arg_begin(), E = FI.arg_end();
1833 // Put 'this' into the struct before 'sret', if necessary.
1835 FI.getCallingConvention() == llvm::CallingConv::X86_ThisCall;
1836 ABIArgInfo &Ret = FI.getReturnInfo();
1837 if (Ret.isIndirect() && Ret.isSRetAfterThis() && !IsThisCall &&
1838 isArgInAlloca(I->info)) {
1839 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
1843 // Put the sret parameter into the inalloca struct if it's in memory.
1844 if (Ret.isIndirect() && !Ret.getInReg()) {
1845 CanQualType PtrTy = getContext().getPointerType(FI.getReturnType());
1846 addFieldToArgStruct(FrameFields, StackOffset, Ret, PtrTy);
1847 // On Windows, the hidden sret parameter is always returned in eax.
1848 Ret.setInAllocaSRet(IsWin32StructABI);
1851 // Skip the 'this' parameter in ecx.
1855 // Put arguments passed in memory into the struct.
1856 for (; I != E; ++I) {
1857 if (isArgInAlloca(I->info))
1858 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
1861 FI.setArgStruct(llvm::StructType::get(getVMContext(), FrameFields,
1866 Address X86_32ABIInfo::EmitVAArg(CodeGenFunction &CGF,
1867 Address VAListAddr, QualType Ty) const {
1869 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
1871 // x86-32 changes the alignment of certain arguments on the stack.
1873 // Just messing with TypeInfo like this works because we never pass
1874 // anything indirectly.
1875 TypeInfo.second = CharUnits::fromQuantity(
1876 getTypeStackAlignInBytes(Ty, TypeInfo.second.getQuantity()));
1878 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false,
1879 TypeInfo, CharUnits::fromQuantity(4),
1880 /*AllowHigherAlign*/ true);
1883 bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
1884 const llvm::Triple &Triple, const CodeGenOptions &Opts) {
1885 assert(Triple.getArch() == llvm::Triple::x86);
1887 switch (Opts.getStructReturnConvention()) {
1888 case CodeGenOptions::SRCK_Default:
1890 case CodeGenOptions::SRCK_OnStack: // -fpcc-struct-return
1892 case CodeGenOptions::SRCK_InRegs: // -freg-struct-return
1896 if (Triple.isOSDarwin() || Triple.isOSIAMCU())
1899 switch (Triple.getOS()) {
1900 case llvm::Triple::DragonFly:
1901 case llvm::Triple::FreeBSD:
1902 case llvm::Triple::OpenBSD:
1903 case llvm::Triple::Bitrig:
1904 case llvm::Triple::Win32:
1911 void X86_32TargetCodeGenInfo::setTargetAttributes(const Decl *D,
1912 llvm::GlobalValue *GV,
1913 CodeGen::CodeGenModule &CGM) const {
1914 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
1915 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1916 // Get the LLVM function.
1917 llvm::Function *Fn = cast<llvm::Function>(GV);
1919 // Now add the 'alignstack' attribute with a value of 16.
1920 llvm::AttrBuilder B;
1921 B.addStackAlignmentAttr(16);
1922 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
1924 if (FD->hasAttr<AnyX86InterruptAttr>()) {
1925 llvm::Function *Fn = cast<llvm::Function>(GV);
1926 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
1931 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
1932 CodeGen::CodeGenFunction &CGF,
1933 llvm::Value *Address) const {
1934 CodeGen::CGBuilderTy &Builder = CGF.Builder;
1936 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
1938 // 0-7 are the eight integer registers; the order is different
1939 // on Darwin (for EH), but the range is the same.
1941 AssignToArrayRange(Builder, Address, Four8, 0, 8);
1943 if (CGF.CGM.getTarget().getTriple().isOSDarwin()) {
1944 // 12-16 are st(0..4). Not sure why we stop at 4.
1945 // These have size 16, which is sizeof(long double) on
1946 // platforms with 8-byte alignment for that type.
1947 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16);
1948 AssignToArrayRange(Builder, Address, Sixteen8, 12, 16);
1951 // 9 is %eflags, which doesn't get a size on Darwin for some
1953 Builder.CreateAlignedStore(
1954 Four8, Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, Address, 9),
1957 // 11-16 are st(0..5). Not sure why we stop at 5.
1958 // These have size 12, which is sizeof(long double) on
1959 // platforms with 4-byte alignment for that type.
1960 llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12);
1961 AssignToArrayRange(Builder, Address, Twelve8, 11, 16);
1967 //===----------------------------------------------------------------------===//
1968 // X86-64 ABI Implementation
1969 //===----------------------------------------------------------------------===//
1973 /// The AVX ABI level for X86 targets.
1974 enum class X86AVXABILevel {
1980 /// \p returns the size in bits of the largest (native) vector for \p AVXLevel.
1981 static unsigned getNativeVectorSizeForAVXABI(X86AVXABILevel AVXLevel) {
1983 case X86AVXABILevel::AVX512:
1985 case X86AVXABILevel::AVX:
1987 case X86AVXABILevel::None:
1990 llvm_unreachable("Unknown AVXLevel");
1993 /// X86_64ABIInfo - The X86_64 ABI information.
1994 class X86_64ABIInfo : public SwiftABIInfo {
2006 /// merge - Implement the X86_64 ABI merging algorithm.
2008 /// Merge an accumulating classification \arg Accum with a field
2009 /// classification \arg Field.
2011 /// \param Accum - The accumulating classification. This should
2012 /// always be either NoClass or the result of a previous merge
2013 /// call. In addition, this should never be Memory (the caller
2014 /// should just return Memory for the aggregate).
2015 static Class merge(Class Accum, Class Field);
2017 /// postMerge - Implement the X86_64 ABI post merging algorithm.
2019 /// Post merger cleanup, reduces a malformed Hi and Lo pair to
2020 /// final MEMORY or SSE classes when necessary.
2022 /// \param AggregateSize - The size of the current aggregate in
2023 /// the classification process.
2025 /// \param Lo - The classification for the parts of the type
2026 /// residing in the low word of the containing object.
2028 /// \param Hi - The classification for the parts of the type
2029 /// residing in the higher words of the containing object.
2031 void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const;
2033 /// classify - Determine the x86_64 register classes in which the
2034 /// given type T should be passed.
2036 /// \param Lo - The classification for the parts of the type
2037 /// residing in the low word of the containing object.
2039 /// \param Hi - The classification for the parts of the type
2040 /// residing in the high word of the containing object.
2042 /// \param OffsetBase - The bit offset of this type in the
2043 /// containing object. Some parameters are classified different
2044 /// depending on whether they straddle an eightbyte boundary.
2046 /// \param isNamedArg - Whether the argument in question is a "named"
2047 /// argument, as used in AMD64-ABI 3.5.7.
2049 /// If a word is unused its result will be NoClass; if a type should
2050 /// be passed in Memory then at least the classification of \arg Lo
2053 /// The \arg Lo class will be NoClass iff the argument is ignored.
2055 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
2056 /// also be ComplexX87.
2057 void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi,
2058 bool isNamedArg) const;
2060 llvm::Type *GetByteVectorType(QualType Ty) const;
2061 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType,
2062 unsigned IROffset, QualType SourceTy,
2063 unsigned SourceOffset) const;
2064 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType,
2065 unsigned IROffset, QualType SourceTy,
2066 unsigned SourceOffset) const;
2068 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
2069 /// such that the argument will be returned in memory.
2070 ABIArgInfo getIndirectReturnResult(QualType Ty) const;
2072 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
2073 /// such that the argument will be passed in memory.
2075 /// \param freeIntRegs - The number of free integer registers remaining
2077 ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const;
2079 ABIArgInfo classifyReturnType(QualType RetTy) const;
2081 ABIArgInfo classifyArgumentType(QualType Ty, unsigned freeIntRegs,
2082 unsigned &neededInt, unsigned &neededSSE,
2083 bool isNamedArg) const;
2085 ABIArgInfo classifyRegCallStructType(QualType Ty, unsigned &NeededInt,
2086 unsigned &NeededSSE) const;
2088 ABIArgInfo classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt,
2089 unsigned &NeededSSE) const;
2091 bool IsIllegalVectorType(QualType Ty) const;
2093 /// The 0.98 ABI revision clarified a lot of ambiguities,
2094 /// unfortunately in ways that were not always consistent with
2095 /// certain previous compilers. In particular, platforms which
2096 /// required strict binary compatibility with older versions of GCC
2097 /// may need to exempt themselves.
2098 bool honorsRevision0_98() const {
2099 return !getTarget().getTriple().isOSDarwin();
2102 /// GCC classifies <1 x long long> as SSE but some platform ABIs choose to
2103 /// classify it as INTEGER (for compatibility with older clang compilers).
2104 bool classifyIntegerMMXAsSSE() const {
2105 // Clang <= 3.8 did not do this.
2106 if (getCodeGenOpts().getClangABICompat() <=
2107 CodeGenOptions::ClangABI::Ver3_8)
2110 const llvm::Triple &Triple = getTarget().getTriple();
2111 if (Triple.isOSDarwin() || Triple.getOS() == llvm::Triple::PS4)
2113 if (Triple.isOSFreeBSD() && Triple.getOSMajorVersion() >= 10)
2118 X86AVXABILevel AVXLevel;
2119 // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on
2121 bool Has64BitPointers;
2124 X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel) :
2125 SwiftABIInfo(CGT), AVXLevel(AVXLevel),
2126 Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) {
2129 bool isPassedUsingAVXType(QualType type) const {
2130 unsigned neededInt, neededSSE;
2131 // The freeIntRegs argument doesn't matter here.
2132 ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE,
2133 /*isNamedArg*/true);
2134 if (info.isDirect()) {
2135 llvm::Type *ty = info.getCoerceToType();
2136 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
2137 return (vectorTy->getBitWidth() > 128);
2142 void computeInfo(CGFunctionInfo &FI) const override;
2144 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
2145 QualType Ty) const override;
2146 Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
2147 QualType Ty) const override;
2149 bool has64BitPointers() const {
2150 return Has64BitPointers;
2153 bool shouldPassIndirectlyForSwift(CharUnits totalSize,
2154 ArrayRef<llvm::Type*> scalars,
2155 bool asReturnValue) const override {
2156 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
2158 bool isSwiftErrorInRegister() const override {
2163 /// WinX86_64ABIInfo - The Windows X86_64 ABI information.
2164 class WinX86_64ABIInfo : public SwiftABIInfo {
2166 WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT)
2167 : SwiftABIInfo(CGT),
2168 IsMingw64(getTarget().getTriple().isWindowsGNUEnvironment()) {}
2170 void computeInfo(CGFunctionInfo &FI) const override;
2172 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
2173 QualType Ty) const override;
2175 bool isHomogeneousAggregateBaseType(QualType Ty) const override {
2176 // FIXME: Assumes vectorcall is in use.
2177 return isX86VectorTypeForVectorCall(getContext(), Ty);
2180 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
2181 uint64_t NumMembers) const override {
2182 // FIXME: Assumes vectorcall is in use.
2183 return isX86VectorCallAggregateSmallEnough(NumMembers);
2186 bool shouldPassIndirectlyForSwift(CharUnits totalSize,
2187 ArrayRef<llvm::Type *> scalars,
2188 bool asReturnValue) const override {
2189 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
2192 bool isSwiftErrorInRegister() const override {
2197 ABIArgInfo classify(QualType Ty, unsigned &FreeSSERegs, bool IsReturnType,
2198 bool IsVectorCall, bool IsRegCall) const;
2199 ABIArgInfo reclassifyHvaArgType(QualType Ty, unsigned &FreeSSERegs,
2200 const ABIArgInfo ¤t) const;
2201 void computeVectorCallArgs(CGFunctionInfo &FI, unsigned FreeSSERegs,
2202 bool IsVectorCall, bool IsRegCall) const;
2207 class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
2209 X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
2210 : TargetCodeGenInfo(new X86_64ABIInfo(CGT, AVXLevel)) {}
2212 const X86_64ABIInfo &getABIInfo() const {
2213 return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo());
2216 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
2220 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2221 llvm::Value *Address) const override {
2222 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
2224 // 0-15 are the 16 integer registers.
2226 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
2230 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
2231 StringRef Constraint,
2232 llvm::Type* Ty) const override {
2233 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
2236 bool isNoProtoCallVariadic(const CallArgList &args,
2237 const FunctionNoProtoType *fnType) const override {
2238 // The default CC on x86-64 sets %al to the number of SSA
2239 // registers used, and GCC sets this when calling an unprototyped
2240 // function, so we override the default behavior. However, don't do
2241 // that when AVX types are involved: the ABI explicitly states it is
2242 // undefined, and it doesn't work in practice because of how the ABI
2243 // defines varargs anyway.
2244 if (fnType->getCallConv() == CC_C) {
2245 bool HasAVXType = false;
2246 for (CallArgList::const_iterator
2247 it = args.begin(), ie = args.end(); it != ie; ++it) {
2248 if (getABIInfo().isPassedUsingAVXType(it->Ty)) {
2258 return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType);
2262 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
2264 if (getABIInfo().has64BitPointers())
2265 Sig = (0xeb << 0) | // jmp rel8
2266 (0x0a << 8) | // .+0x0c
2270 Sig = (0xeb << 0) | // jmp rel8
2271 (0x06 << 8) | // .+0x08
2274 return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
2277 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2278 CodeGen::CodeGenModule &CGM) const override {
2279 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2280 if (FD->hasAttr<AnyX86InterruptAttr>()) {
2281 llvm::Function *Fn = cast<llvm::Function>(GV);
2282 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
2288 class PS4TargetCodeGenInfo : public X86_64TargetCodeGenInfo {
2290 PS4TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
2291 : X86_64TargetCodeGenInfo(CGT, AVXLevel) {}
2293 void getDependentLibraryOption(llvm::StringRef Lib,
2294 llvm::SmallString<24> &Opt) const override {
2296 // If the argument contains a space, enclose it in quotes.
2297 if (Lib.find(" ") != StringRef::npos)
2298 Opt += "\"" + Lib.str() + "\"";
2304 static std::string qualifyWindowsLibrary(llvm::StringRef Lib) {
2305 // If the argument does not end in .lib, automatically add the suffix.
2306 // If the argument contains a space, enclose it in quotes.
2307 // This matches the behavior of MSVC.
2308 bool Quote = (Lib.find(" ") != StringRef::npos);
2309 std::string ArgStr = Quote ? "\"" : "";
2311 if (!Lib.endswith_lower(".lib"))
2313 ArgStr += Quote ? "\"" : "";
2317 class WinX86_32TargetCodeGenInfo : public X86_32TargetCodeGenInfo {
2319 WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
2320 bool DarwinVectorABI, bool RetSmallStructInRegABI, bool Win32StructABI,
2321 unsigned NumRegisterParameters)
2322 : X86_32TargetCodeGenInfo(CGT, DarwinVectorABI, RetSmallStructInRegABI,
2323 Win32StructABI, NumRegisterParameters, false) {}
2325 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2326 CodeGen::CodeGenModule &CGM) const override;
2328 void getDependentLibraryOption(llvm::StringRef Lib,
2329 llvm::SmallString<24> &Opt) const override {
2330 Opt = "/DEFAULTLIB:";
2331 Opt += qualifyWindowsLibrary(Lib);
2334 void getDetectMismatchOption(llvm::StringRef Name,
2335 llvm::StringRef Value,
2336 llvm::SmallString<32> &Opt) const override {
2337 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
2341 static void addStackProbeSizeTargetAttribute(const Decl *D,
2342 llvm::GlobalValue *GV,
2343 CodeGen::CodeGenModule &CGM) {
2344 if (D && isa<FunctionDecl>(D)) {
2345 if (CGM.getCodeGenOpts().StackProbeSize != 4096) {
2346 llvm::Function *Fn = cast<llvm::Function>(GV);
2348 Fn->addFnAttr("stack-probe-size",
2349 llvm::utostr(CGM.getCodeGenOpts().StackProbeSize));
2354 void WinX86_32TargetCodeGenInfo::setTargetAttributes(const Decl *D,
2355 llvm::GlobalValue *GV,
2356 CodeGen::CodeGenModule &CGM) const {
2357 X86_32TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
2359 addStackProbeSizeTargetAttribute(D, GV, CGM);
2362 class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
2364 WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
2365 X86AVXABILevel AVXLevel)
2366 : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)) {}
2368 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2369 CodeGen::CodeGenModule &CGM) const override;
2371 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
2375 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2376 llvm::Value *Address) const override {
2377 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
2379 // 0-15 are the 16 integer registers.
2381 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
2385 void getDependentLibraryOption(llvm::StringRef Lib,
2386 llvm::SmallString<24> &Opt) const override {
2387 Opt = "/DEFAULTLIB:";
2388 Opt += qualifyWindowsLibrary(Lib);
2391 void getDetectMismatchOption(llvm::StringRef Name,
2392 llvm::StringRef Value,
2393 llvm::SmallString<32> &Opt) const override {
2394 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
2398 void WinX86_64TargetCodeGenInfo::setTargetAttributes(const Decl *D,
2399 llvm::GlobalValue *GV,
2400 CodeGen::CodeGenModule &CGM) const {
2401 TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
2403 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2404 if (FD->hasAttr<AnyX86InterruptAttr>()) {
2405 llvm::Function *Fn = cast<llvm::Function>(GV);
2406 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
2410 addStackProbeSizeTargetAttribute(D, GV, CGM);
2414 void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo,
2416 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
2418 // (a) If one of the classes is Memory, the whole argument is passed in
2421 // (b) If X87UP is not preceded by X87, the whole argument is passed in
2424 // (c) If the size of the aggregate exceeds two eightbytes and the first
2425 // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole
2426 // argument is passed in memory. NOTE: This is necessary to keep the
2427 // ABI working for processors that don't support the __m256 type.
2429 // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE.
2431 // Some of these are enforced by the merging logic. Others can arise
2432 // only with unions; for example:
2433 // union { _Complex double; unsigned; }
2435 // Note that clauses (b) and (c) were added in 0.98.
2439 if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
2441 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
2443 if (Hi == SSEUp && Lo != SSE)
2447 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
2448 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
2449 // classified recursively so that always two fields are
2450 // considered. The resulting class is calculated according to
2451 // the classes of the fields in the eightbyte:
2453 // (a) If both classes are equal, this is the resulting class.
2455 // (b) If one of the classes is NO_CLASS, the resulting class is
2458 // (c) If one of the classes is MEMORY, the result is the MEMORY
2461 // (d) If one of the classes is INTEGER, the result is the
2464 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
2465 // MEMORY is used as class.
2467 // (f) Otherwise class SSE is used.
2469 // Accum should never be memory (we should have returned) or
2470 // ComplexX87 (because this cannot be passed in a structure).
2471 assert((Accum != Memory && Accum != ComplexX87) &&
2472 "Invalid accumulated classification during merge.");
2473 if (Accum == Field || Field == NoClass)
2475 if (Field == Memory)
2477 if (Accum == NoClass)
2479 if (Accum == Integer || Field == Integer)
2481 if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
2482 Accum == X87 || Accum == X87Up)
2487 void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
2488 Class &Lo, Class &Hi, bool isNamedArg) const {
2489 // FIXME: This code can be simplified by introducing a simple value class for
2490 // Class pairs with appropriate constructor methods for the various
2493 // FIXME: Some of the split computations are wrong; unaligned vectors
2494 // shouldn't be passed in registers for example, so there is no chance they
2495 // can straddle an eightbyte. Verify & simplify.
2499 Class &Current = OffsetBase < 64 ? Lo : Hi;
2502 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
2503 BuiltinType::Kind k = BT->getKind();
2505 if (k == BuiltinType::Void) {
2507 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
2510 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
2512 } else if (k == BuiltinType::Float || k == BuiltinType::Double) {
2514 } else if (k == BuiltinType::LongDouble) {
2515 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
2516 if (LDF == &llvm::APFloat::IEEEquad()) {
2519 } else if (LDF == &llvm::APFloat::x87DoubleExtended()) {
2522 } else if (LDF == &llvm::APFloat::IEEEdouble()) {
2525 llvm_unreachable("unexpected long double representation!");
2527 // FIXME: _Decimal32 and _Decimal64 are SSE.
2528 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
2532 if (const EnumType *ET = Ty->getAs<EnumType>()) {
2533 // Classify the underlying integer type.
2534 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
2538 if (Ty->hasPointerRepresentation()) {
2543 if (Ty->isMemberPointerType()) {
2544 if (Ty->isMemberFunctionPointerType()) {
2545 if (Has64BitPointers) {
2546 // If Has64BitPointers, this is an {i64, i64}, so classify both
2550 // Otherwise, with 32-bit pointers, this is an {i32, i32}. If that
2551 // straddles an eightbyte boundary, Hi should be classified as well.
2552 uint64_t EB_FuncPtr = (OffsetBase) / 64;
2553 uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64;
2554 if (EB_FuncPtr != EB_ThisAdj) {
2566 if (const VectorType *VT = Ty->getAs<VectorType>()) {
2567 uint64_t Size = getContext().getTypeSize(VT);
2568 if (Size == 1 || Size == 8 || Size == 16 || Size == 32) {
2569 // gcc passes the following as integer:
2570 // 4 bytes - <4 x char>, <2 x short>, <1 x int>, <1 x float>
2571 // 2 bytes - <2 x char>, <1 x short>
2572 // 1 byte - <1 x char>
2575 // If this type crosses an eightbyte boundary, it should be
2577 uint64_t EB_Lo = (OffsetBase) / 64;
2578 uint64_t EB_Hi = (OffsetBase + Size - 1) / 64;
2581 } else if (Size == 64) {
2582 QualType ElementType = VT->getElementType();
2584 // gcc passes <1 x double> in memory. :(
2585 if (ElementType->isSpecificBuiltinType(BuiltinType::Double))
2588 // gcc passes <1 x long long> as SSE but clang used to unconditionally
2589 // pass them as integer. For platforms where clang is the de facto
2590 // platform compiler, we must continue to use integer.
2591 if (!classifyIntegerMMXAsSSE() &&
2592 (ElementType->isSpecificBuiltinType(BuiltinType::LongLong) ||
2593 ElementType->isSpecificBuiltinType(BuiltinType::ULongLong) ||
2594 ElementType->isSpecificBuiltinType(BuiltinType::Long) ||
2595 ElementType->isSpecificBuiltinType(BuiltinType::ULong)))
2600 // If this type crosses an eightbyte boundary, it should be
2602 if (OffsetBase && OffsetBase != 64)
2604 } else if (Size == 128 ||
2605 (isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) {
2606 // Arguments of 256-bits are split into four eightbyte chunks. The
2607 // least significant one belongs to class SSE and all the others to class
2608 // SSEUP. The original Lo and Hi design considers that types can't be
2609 // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense.
2610 // This design isn't correct for 256-bits, but since there're no cases
2611 // where the upper parts would need to be inspected, avoid adding
2612 // complexity and just consider Hi to match the 64-256 part.
2614 // Note that per 3.5.7 of AMD64-ABI, 256-bit args are only passed in
2615 // registers if they are "named", i.e. not part of the "..." of a
2616 // variadic function.
2618 // Similarly, per 3.2.3. of the AVX512 draft, 512-bits ("named") args are
2619 // split into eight eightbyte chunks, one SSE and seven SSEUP.
2626 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
2627 QualType ET = getContext().getCanonicalType(CT->getElementType());
2629 uint64_t Size = getContext().getTypeSize(Ty);
2630 if (ET->isIntegralOrEnumerationType()) {
2633 else if (Size <= 128)
2635 } else if (ET == getContext().FloatTy) {
2637 } else if (ET == getContext().DoubleTy) {
2639 } else if (ET == getContext().LongDoubleTy) {
2640 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
2641 if (LDF == &llvm::APFloat::IEEEquad())
2643 else if (LDF == &llvm::APFloat::x87DoubleExtended())
2644 Current = ComplexX87;
2645 else if (LDF == &llvm::APFloat::IEEEdouble())
2648 llvm_unreachable("unexpected long double representation!");
2651 // If this complex type crosses an eightbyte boundary then it
2653 uint64_t EB_Real = (OffsetBase) / 64;
2654 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64;
2655 if (Hi == NoClass && EB_Real != EB_Imag)
2661 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
2662 // Arrays are treated like structures.
2664 uint64_t Size = getContext().getTypeSize(Ty);
2666 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
2667 // than eight eightbytes, ..., it has class MEMORY.
2671 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
2672 // fields, it has class MEMORY.
2674 // Only need to check alignment of array base.
2675 if (OffsetBase % getContext().getTypeAlign(AT->getElementType()))
2678 // Otherwise implement simplified merge. We could be smarter about
2679 // this, but it isn't worth it and would be harder to verify.
2681 uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
2682 uint64_t ArraySize = AT->getSize().getZExtValue();
2684 // The only case a 256-bit wide vector could be used is when the array
2685 // contains a single 256-bit element. Since Lo and Hi logic isn't extended
2686 // to work for sizes wider than 128, early check and fallback to memory.
2689 (Size != EltSize || Size > getNativeVectorSizeForAVXABI(AVXLevel)))
2692 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
2693 Class FieldLo, FieldHi;
2694 classify(AT->getElementType(), Offset, FieldLo, FieldHi, isNamedArg);
2695 Lo = merge(Lo, FieldLo);
2696 Hi = merge(Hi, FieldHi);
2697 if (Lo == Memory || Hi == Memory)
2701 postMerge(Size, Lo, Hi);
2702 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
2706 if (const RecordType *RT = Ty->getAs<RecordType>()) {
2707 uint64_t Size = getContext().getTypeSize(Ty);
2709 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
2710 // than eight eightbytes, ..., it has class MEMORY.
2714 // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial
2715 // copy constructor or a non-trivial destructor, it is passed by invisible
2717 if (getRecordArgABI(RT, getCXXABI()))
2720 const RecordDecl *RD = RT->getDecl();
2722 // Assume variable sized types are passed in memory.
2723 if (RD->hasFlexibleArrayMember())
2726 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
2728 // Reset Lo class, this will be recomputed.
2731 // If this is a C++ record, classify the bases first.
2732 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2733 for (const auto &I : CXXRD->bases()) {
2734 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
2735 "Unexpected base class!");
2736 const CXXRecordDecl *Base =
2737 cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
2739 // Classify this field.
2741 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a
2742 // single eightbyte, each is classified separately. Each eightbyte gets
2743 // initialized to class NO_CLASS.
2744 Class FieldLo, FieldHi;
2746 OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base));
2747 classify(I.getType(), Offset, FieldLo, FieldHi, isNamedArg);
2748 Lo = merge(Lo, FieldLo);
2749 Hi = merge(Hi, FieldHi);
2750 if (Lo == Memory || Hi == Memory) {
2751 postMerge(Size, Lo, Hi);
2757 // Classify the fields one at a time, merging the results.
2759 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
2760 i != e; ++i, ++idx) {
2761 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
2762 bool BitField = i->isBitField();
2764 // Ignore padding bit-fields.
2765 if (BitField && i->isUnnamedBitfield())
2768 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than
2769 // four eightbytes, or it contains unaligned fields, it has class MEMORY.
2771 // The only case a 256-bit wide vector could be used is when the struct
2772 // contains a single 256-bit element. Since Lo and Hi logic isn't extended
2773 // to work for sizes wider than 128, early check and fallback to memory.
2775 if (Size > 128 && (Size != getContext().getTypeSize(i->getType()) ||
2776 Size > getNativeVectorSizeForAVXABI(AVXLevel))) {
2778 postMerge(Size, Lo, Hi);
2781 // Note, skip this test for bit-fields, see below.
2782 if (!BitField && Offset % getContext().getTypeAlign(i->getType())) {
2784 postMerge(Size, Lo, Hi);
2788 // Classify this field.
2790 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
2791 // exceeds a single eightbyte, each is classified
2792 // separately. Each eightbyte gets initialized to class
2794 Class FieldLo, FieldHi;
2796 // Bit-fields require special handling, they do not force the
2797 // structure to be passed in memory even if unaligned, and
2798 // therefore they can straddle an eightbyte.
2800 assert(!i->isUnnamedBitfield());
2801 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
2802 uint64_t Size = i->getBitWidthValue(getContext());
2804 uint64_t EB_Lo = Offset / 64;
2805 uint64_t EB_Hi = (Offset + Size - 1) / 64;
2808 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.");
2813 FieldHi = EB_Hi ? Integer : NoClass;
2816 classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg);
2817 Lo = merge(Lo, FieldLo);
2818 Hi = merge(Hi, FieldHi);
2819 if (Lo == Memory || Hi == Memory)
2823 postMerge(Size, Lo, Hi);
2827 ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
2828 // If this is a scalar LLVM value then assume LLVM will pass it in the right
2830 if (!isAggregateTypeForABI(Ty)) {
2831 // Treat an enum type as its underlying type.
2832 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2833 Ty = EnumTy->getDecl()->getIntegerType();
2835 return (Ty->isPromotableIntegerType() ?
2836 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
2839 return getNaturalAlignIndirect(Ty);
2842 bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const {
2843 if (const VectorType *VecTy = Ty->getAs<VectorType>()) {
2844 uint64_t Size = getContext().getTypeSize(VecTy);
2845 unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel);
2846 if (Size <= 64 || Size > LargestVector)
2853 ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
2854 unsigned freeIntRegs) const {
2855 // If this is a scalar LLVM value then assume LLVM will pass it in the right
2858 // This assumption is optimistic, as there could be free registers available
2859 // when we need to pass this argument in memory, and LLVM could try to pass
2860 // the argument in the free register. This does not seem to happen currently,
2861 // but this code would be much safer if we could mark the argument with
2862 // 'onstack'. See PR12193.
2863 if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty)) {
2864 // Treat an enum type as its underlying type.
2865 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2866 Ty = EnumTy->getDecl()->getIntegerType();
2868 return (Ty->isPromotableIntegerType() ?
2869 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
2872 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
2873 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
2875 // Compute the byval alignment. We specify the alignment of the byval in all
2876 // cases so that the mid-level optimizer knows the alignment of the byval.
2877 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U);
2879 // Attempt to avoid passing indirect results using byval when possible. This
2880 // is important for good codegen.
2882 // We do this by coercing the value into a scalar type which the backend can
2883 // handle naturally (i.e., without using byval).
2885 // For simplicity, we currently only do this when we have exhausted all of the
2886 // free integer registers. Doing this when there are free integer registers
2887 // would require more care, as we would have to ensure that the coerced value
2888 // did not claim the unused register. That would require either reording the
2889 // arguments to the function (so that any subsequent inreg values came first),
2890 // or only doing this optimization when there were no following arguments that
2893 // We currently expect it to be rare (particularly in well written code) for
2894 // arguments to be passed on the stack when there are still free integer
2895 // registers available (this would typically imply large structs being passed
2896 // by value), so this seems like a fair tradeoff for now.
2898 // We can revisit this if the backend grows support for 'onstack' parameter
2899 // attributes. See PR12193.
2900 if (freeIntRegs == 0) {
2901 uint64_t Size = getContext().getTypeSize(Ty);
2903 // If this type fits in an eightbyte, coerce it into the matching integral
2904 // type, which will end up on the stack (with alignment 8).
2905 if (Align == 8 && Size <= 64)
2906 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
2910 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(Align));
2913 /// The ABI specifies that a value should be passed in a full vector XMM/YMM
2914 /// register. Pick an LLVM IR type that will be passed as a vector register.
2915 llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const {
2916 // Wrapper structs/arrays that only contain vectors are passed just like
2917 // vectors; strip them off if present.
2918 if (const Type *InnerTy = isSingleElementStruct(Ty, getContext()))
2919 Ty = QualType(InnerTy, 0);
2921 llvm::Type *IRType = CGT.ConvertType(Ty);
2922 if (isa<llvm::VectorType>(IRType) ||
2923 IRType->getTypeID() == llvm::Type::FP128TyID)
2926 // We couldn't find the preferred IR vector type for 'Ty'.
2927 uint64_t Size = getContext().getTypeSize(Ty);
2928 assert((Size == 128 || Size == 256 || Size == 512) && "Invalid type found!");
2930 // Return a LLVM IR vector type based on the size of 'Ty'.
2931 return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()),
2935 /// BitsContainNoUserData - Return true if the specified [start,end) bit range
2936 /// is known to either be off the end of the specified type or being in
2937 /// alignment padding. The user type specified is known to be at most 128 bits
2938 /// in size, and have passed through X86_64ABIInfo::classify with a successful
2939 /// classification that put one of the two halves in the INTEGER class.
2941 /// It is conservatively correct to return false.
2942 static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,
2943 unsigned EndBit, ASTContext &Context) {
2944 // If the bytes being queried are off the end of the type, there is no user
2945 // data hiding here. This handles analysis of builtins, vectors and other
2946 // types that don't contain interesting padding.
2947 unsigned TySize = (unsigned)Context.getTypeSize(Ty);
2948 if (TySize <= StartBit)
2951 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
2952 unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType());
2953 unsigned NumElts = (unsigned)AT->getSize().getZExtValue();
2955 // Check each element to see if the element overlaps with the queried range.
2956 for (unsigned i = 0; i != NumElts; ++i) {
2957 // If the element is after the span we care about, then we're done..
2958 unsigned EltOffset = i*EltSize;
2959 if (EltOffset >= EndBit) break;
2961 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
2962 if (!BitsContainNoUserData(AT->getElementType(), EltStart,
2963 EndBit-EltOffset, Context))
2966 // If it overlaps no elements, then it is safe to process as padding.
2970 if (const RecordType *RT = Ty->getAs<RecordType>()) {
2971 const RecordDecl *RD = RT->getDecl();
2972 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
2974 // If this is a C++ record, check the bases first.
2975 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2976 for (const auto &I : CXXRD->bases()) {
2977 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
2978 "Unexpected base class!");
2979 const CXXRecordDecl *Base =
2980 cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
2982 // If the base is after the span we care about, ignore it.
2983 unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base));
2984 if (BaseOffset >= EndBit) continue;
2986 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
2987 if (!BitsContainNoUserData(I.getType(), BaseStart,
2988 EndBit-BaseOffset, Context))
2993 // Verify that no field has data that overlaps the region of interest. Yes
2994 // this could be sped up a lot by being smarter about queried fields,
2995 // however we're only looking at structs up to 16 bytes, so we don't care
2998 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
2999 i != e; ++i, ++idx) {
3000 unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx);
3002 // If we found a field after the region we care about, then we're done.
3003 if (FieldOffset >= EndBit) break;
3005 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
3006 if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset,
3011 // If nothing in this record overlapped the area of interest, then we're
3019 /// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a
3020 /// float member at the specified offset. For example, {int,{float}} has a
3021 /// float at offset 4. It is conservatively correct for this routine to return
3023 static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset,
3024 const llvm::DataLayout &TD) {
3025 // Base case if we find a float.
3026 if (IROffset == 0 && IRType->isFloatTy())
3029 // If this is a struct, recurse into the field at the specified offset.
3030 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
3031 const llvm::StructLayout *SL = TD.getStructLayout(STy);
3032 unsigned Elt = SL->getElementContainingOffset(IROffset);
3033 IROffset -= SL->getElementOffset(Elt);
3034 return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD);
3037 // If this is an array, recurse into the field at the specified offset.
3038 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
3039 llvm::Type *EltTy = ATy->getElementType();
3040 unsigned EltSize = TD.getTypeAllocSize(EltTy);
3041 IROffset -= IROffset/EltSize*EltSize;
3042 return ContainsFloatAtOffset(EltTy, IROffset, TD);
3049 /// GetSSETypeAtOffset - Return a type that will be passed by the backend in the
3050 /// low 8 bytes of an XMM register, corresponding to the SSE class.
3051 llvm::Type *X86_64ABIInfo::
3052 GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset,
3053 QualType SourceTy, unsigned SourceOffset) const {
3054 // The only three choices we have are either double, <2 x float>, or float. We
3055 // pass as float if the last 4 bytes is just padding. This happens for
3056 // structs that contain 3 floats.
3057 if (BitsContainNoUserData(SourceTy, SourceOffset*8+32,
3058 SourceOffset*8+64, getContext()))
3059 return llvm::Type::getFloatTy(getVMContext());
3061 // We want to pass as <2 x float> if the LLVM IR type contains a float at
3062 // offset+0 and offset+4. Walk the LLVM IR type to find out if this is the
3064 if (ContainsFloatAtOffset(IRType, IROffset, getDataLayout()) &&
3065 ContainsFloatAtOffset(IRType, IROffset+4, getDataLayout()))
3066 return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2);
3068 return llvm::Type::getDoubleTy(getVMContext());
3072 /// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in
3073 /// an 8-byte GPR. This means that we either have a scalar or we are talking
3074 /// about the high or low part of an up-to-16-byte struct. This routine picks
3075 /// the best LLVM IR type to represent this, which may be i64 or may be anything
3076 /// else that the backend will pass in a GPR that works better (e.g. i8, %foo*,
3079 /// PrefType is an LLVM IR type that corresponds to (part of) the IR type for
3080 /// the source type. IROffset is an offset in bytes into the LLVM IR type that
3081 /// the 8-byte value references. PrefType may be null.
3083 /// SourceTy is the source-level type for the entire argument. SourceOffset is
3084 /// an offset into this that we're processing (which is always either 0 or 8).
3086 llvm::Type *X86_64ABIInfo::
3087 GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
3088 QualType SourceTy, unsigned SourceOffset) const {
3089 // If we're dealing with an un-offset LLVM IR type, then it means that we're
3090 // returning an 8-byte unit starting with it. See if we can safely use it.
3091 if (IROffset == 0) {
3092 // Pointers and int64's always fill the 8-byte unit.
3093 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) ||
3094 IRType->isIntegerTy(64))
3097 // If we have a 1/2/4-byte integer, we can use it only if the rest of the
3098 // goodness in the source type is just tail padding. This is allowed to
3099 // kick in for struct {double,int} on the int, but not on
3100 // struct{double,int,int} because we wouldn't return the second int. We
3101 // have to do this analysis on the source type because we can't depend on
3102 // unions being lowered a specific way etc.
3103 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
3104 IRType->isIntegerTy(32) ||
3105 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) {
3106 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 :
3107 cast<llvm::IntegerType>(IRType)->getBitWidth();
3109 if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth,
3110 SourceOffset*8+64, getContext()))
3115 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
3116 // If this is a struct, recurse into the field at the specified offset.
3117 const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy);
3118 if (IROffset < SL->getSizeInBytes()) {
3119 unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
3120 IROffset -= SL->getElementOffset(FieldIdx);
3122 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
3123 SourceTy, SourceOffset);
3127 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
3128 llvm::Type *EltTy = ATy->getElementType();
3129 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy);
3130 unsigned EltOffset = IROffset/EltSize*EltSize;
3131 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
3135 // Okay, we don't have any better idea of what to pass, so we pass this in an
3136 // integer register that isn't too big to fit the rest of the struct.
3137 unsigned TySizeInBytes =
3138 (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity();
3140 assert(TySizeInBytes != SourceOffset && "Empty field?");
3142 // It is always safe to classify this as an integer type up to i64 that
3143 // isn't larger than the structure.
3144 return llvm::IntegerType::get(getVMContext(),
3145 std::min(TySizeInBytes-SourceOffset, 8U)*8);
3149 /// GetX86_64ByValArgumentPair - Given a high and low type that can ideally
3150 /// be used as elements of a two register pair to pass or return, return a
3151 /// first class aggregate to represent them. For example, if the low part of
3152 /// a by-value argument should be passed as i32* and the high part as float,
3153 /// return {i32*, float}.
3155 GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi,
3156 const llvm::DataLayout &TD) {
3157 // In order to correctly satisfy the ABI, we need to the high part to start
3158 // at offset 8. If the high and low parts we inferred are both 4-byte types
3159 // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have
3160 // the second element at offset 8. Check for this:
3161 unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo);
3162 unsigned HiAlign = TD.getABITypeAlignment(Hi);
3163 unsigned HiStart = llvm::alignTo(LoSize, HiAlign);
3164 assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!");
3166 // To handle this, we have to increase the size of the low part so that the
3167 // second element will start at an 8 byte offset. We can't increase the size
3168 // of the second element because it might make us access off the end of the
3171 // There are usually two sorts of types the ABI generation code can produce
3172 // for the low part of a pair that aren't 8 bytes in size: float or
3173 // i8/i16/i32. This can also include pointers when they are 32-bit (X32 and
3175 // Promote these to a larger type.
3176 if (Lo->isFloatTy())
3177 Lo = llvm::Type::getDoubleTy(Lo->getContext());
3179 assert((Lo->isIntegerTy() || Lo->isPointerTy())
3180 && "Invalid/unknown lo type");
3181 Lo = llvm::Type::getInt64Ty(Lo->getContext());
3185 llvm::StructType *Result = llvm::StructType::get(Lo, Hi);
3187 // Verify that the second element is at an 8-byte offset.
3188 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
3189 "Invalid x86-64 argument pair!");
3193 ABIArgInfo X86_64ABIInfo::
3194 classifyReturnType(QualType RetTy) const {
3195 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
3196 // classification algorithm.
3197 X86_64ABIInfo::Class Lo, Hi;
3198 classify(RetTy, 0, Lo, Hi, /*isNamedArg*/ true);
3200 // Check some invariants.
3201 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
3202 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
3204 llvm::Type *ResType = nullptr;
3208 return ABIArgInfo::getIgnore();
3209 // If the low part is just padding, it takes no register, leave ResType
3211 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
3212 "Unknown missing lo part");
3217 llvm_unreachable("Invalid classification for lo word.");
3219 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
3222 return getIndirectReturnResult(RetTy);
3224 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
3225 // available register of the sequence %rax, %rdx is used.
3227 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
3229 // If we have a sign or zero extended integer, make sure to return Extend
3230 // so that the parameter gets the right LLVM IR attributes.
3231 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
3232 // Treat an enum type as its underlying type.
3233 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
3234 RetTy = EnumTy->getDecl()->getIntegerType();
3236 if (RetTy->isIntegralOrEnumerationType() &&
3237 RetTy->isPromotableIntegerType())
3238 return ABIArgInfo::getExtend();
3242 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
3243 // available SSE register of the sequence %xmm0, %xmm1 is used.
3245 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
3248 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
3249 // returned on the X87 stack in %st0 as 80-bit x87 number.
3251 ResType = llvm::Type::getX86_FP80Ty(getVMContext());
3254 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
3255 // part of the value is returned in %st0 and the imaginary part in
3258 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
3259 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()),
3260 llvm::Type::getX86_FP80Ty(getVMContext()));
3264 llvm::Type *HighPart = nullptr;
3266 // Memory was handled previously and X87 should
3267 // never occur as a hi class.
3270 llvm_unreachable("Invalid classification for hi word.");
3272 case ComplexX87: // Previously handled.
3277 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
3278 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
3279 return ABIArgInfo::getDirect(HighPart, 8);
3282 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
3283 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
3284 return ABIArgInfo::getDirect(HighPart, 8);
3287 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
3288 // is passed in the next available eightbyte chunk if the last used
3291 // SSEUP should always be preceded by SSE, just widen.
3293 assert(Lo == SSE && "Unexpected SSEUp classification.");
3294 ResType = GetByteVectorType(RetTy);
3297 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
3298 // returned together with the previous X87 value in %st0.
3300 // If X87Up is preceded by X87, we don't need to do
3301 // anything. However, in some cases with unions it may not be
3302 // preceded by X87. In such situations we follow gcc and pass the
3303 // extra bits in an SSE reg.
3305 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
3306 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
3307 return ABIArgInfo::getDirect(HighPart, 8);
3312 // If a high part was specified, merge it together with the low part. It is
3313 // known to pass in the high eightbyte of the result. We do this by forming a
3314 // first class struct aggregate with the high and low part: {low, high}
3316 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
3318 return ABIArgInfo::getDirect(ResType);
3321 ABIArgInfo X86_64ABIInfo::classifyArgumentType(
3322 QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE,
3326 Ty = useFirstFieldIfTransparentUnion(Ty);
3328 X86_64ABIInfo::Class Lo, Hi;
3329 classify(Ty, 0, Lo, Hi, isNamedArg);
3331 // Check some invariants.
3332 // FIXME: Enforce these by construction.
3333 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
3334 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
3338 llvm::Type *ResType = nullptr;
3342 return ABIArgInfo::getIgnore();
3343 // If the low part is just padding, it takes no register, leave ResType
3345 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
3346 "Unknown missing lo part");
3349 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
3353 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
3354 // COMPLEX_X87, it is passed in memory.
3357 if (getRecordArgABI(Ty, getCXXABI()) == CGCXXABI::RAA_Indirect)
3359 return getIndirectResult(Ty, freeIntRegs);
3363 llvm_unreachable("Invalid classification for lo word.");
3365 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
3366 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
3371 // Pick an 8-byte type based on the preferred type.
3372 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0);
3374 // If we have a sign or zero extended integer, make sure to return Extend
3375 // so that the parameter gets the right LLVM IR attributes.
3376 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
3377 // Treat an enum type as its underlying type.
3378 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3379 Ty = EnumTy->getDecl()->getIntegerType();
3381 if (Ty->isIntegralOrEnumerationType() &&
3382 Ty->isPromotableIntegerType())
3383 return ABIArgInfo::getExtend();
3388 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
3389 // available SSE register is used, the registers are taken in the
3390 // order from %xmm0 to %xmm7.
3392 llvm::Type *IRType = CGT.ConvertType(Ty);
3393 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
3399 llvm::Type *HighPart = nullptr;
3401 // Memory was handled previously, ComplexX87 and X87 should
3402 // never occur as hi classes, and X87Up must be preceded by X87,
3403 // which is passed in memory.
3407 llvm_unreachable("Invalid classification for hi word.");
3409 case NoClass: break;
3413 // Pick an 8-byte type based on the preferred type.
3414 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
3416 if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
3417 return ABIArgInfo::getDirect(HighPart, 8);
3420 // X87Up generally doesn't occur here (long double is passed in
3421 // memory), except in situations involving unions.
3424 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
3426 if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
3427 return ABIArgInfo::getDirect(HighPart, 8);
3432 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
3433 // eightbyte is passed in the upper half of the last used SSE
3434 // register. This only happens when 128-bit vectors are passed.
3436 assert(Lo == SSE && "Unexpected SSEUp classification");
3437 ResType = GetByteVectorType(Ty);
3441 // If a high part was specified, merge it together with the low part. It is
3442 // known to pass in the high eightbyte of the result. We do this by forming a
3443 // first class struct aggregate with the high and low part: {low, high}
3445 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
3447 return ABIArgInfo::getDirect(ResType);
3451 X86_64ABIInfo::classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt,
3452 unsigned &NeededSSE) const {
3453 auto RT = Ty->getAs<RecordType>();
3454 assert(RT && "classifyRegCallStructType only valid with struct types");
3456 if (RT->getDecl()->hasFlexibleArrayMember())
3457 return getIndirectReturnResult(Ty);
3460 if (auto CXXRD = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
3461 if (CXXRD->isDynamicClass()) {
3462 NeededInt = NeededSSE = 0;
3463 return getIndirectReturnResult(Ty);
3466 for (const auto &I : CXXRD->bases())
3467 if (classifyRegCallStructTypeImpl(I.getType(), NeededInt, NeededSSE)
3469 NeededInt = NeededSSE = 0;
3470 return getIndirectReturnResult(Ty);
3475 for (const auto *FD : RT->getDecl()->fields()) {
3476 if (FD->getType()->isRecordType() && !FD->getType()->isUnionType()) {
3477 if (classifyRegCallStructTypeImpl(FD->getType(), NeededInt, NeededSSE)
3479 NeededInt = NeededSSE = 0;
3480 return getIndirectReturnResult(Ty);
3483 unsigned LocalNeededInt, LocalNeededSSE;
3484 if (classifyArgumentType(FD->getType(), UINT_MAX, LocalNeededInt,
3485 LocalNeededSSE, true)
3487 NeededInt = NeededSSE = 0;
3488 return getIndirectReturnResult(Ty);
3490 NeededInt += LocalNeededInt;
3491 NeededSSE += LocalNeededSSE;
3495 return ABIArgInfo::getDirect();
3498 ABIArgInfo X86_64ABIInfo::classifyRegCallStructType(QualType Ty,
3499 unsigned &NeededInt,
3500 unsigned &NeededSSE) const {
3505 return classifyRegCallStructTypeImpl(Ty, NeededInt, NeededSSE);
3508 void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
3510 bool IsRegCall = FI.getCallingConvention() == llvm::CallingConv::X86_RegCall;
3512 // Keep track of the number of assigned registers.
3513 unsigned FreeIntRegs = IsRegCall ? 11 : 6;
3514 unsigned FreeSSERegs = IsRegCall ? 16 : 8;
3515 unsigned NeededInt, NeededSSE;
3517 if (IsRegCall && FI.getReturnType()->getTypePtr()->isRecordType() &&
3518 !FI.getReturnType()->getTypePtr()->isUnionType()) {
3519 FI.getReturnInfo() =
3520 classifyRegCallStructType(FI.getReturnType(), NeededInt, NeededSSE);
3521 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
3522 FreeIntRegs -= NeededInt;
3523 FreeSSERegs -= NeededSSE;
3525 FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType());
3527 } else if (!getCXXABI().classifyReturnType(FI))
3528 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
3530 // If the return value is indirect, then the hidden argument is consuming one
3531 // integer register.
3532 if (FI.getReturnInfo().isIndirect())
3535 // The chain argument effectively gives us another free register.
3536 if (FI.isChainCall())
3539 unsigned NumRequiredArgs = FI.getNumRequiredArgs();
3540 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
3541 // get assigned (in left-to-right order) for passing as follows...
3543 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
3544 it != ie; ++it, ++ArgNo) {
3545 bool IsNamedArg = ArgNo < NumRequiredArgs;
3547 if (IsRegCall && it->type->isStructureOrClassType())
3548 it->info = classifyRegCallStructType(it->type, NeededInt, NeededSSE);
3550 it->info = classifyArgumentType(it->type, FreeIntRegs, NeededInt,
3551 NeededSSE, IsNamedArg);
3553 // AMD64-ABI 3.2.3p3: If there are no registers available for any
3554 // eightbyte of an argument, the whole argument is passed on the
3555 // stack. If registers have already been assigned for some
3556 // eightbytes of such an argument, the assignments get reverted.
3557 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
3558 FreeIntRegs -= NeededInt;
3559 FreeSSERegs -= NeededSSE;
3561 it->info = getIndirectResult(it->type, FreeIntRegs);
3566 static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF,
3567 Address VAListAddr, QualType Ty) {
3568 Address overflow_arg_area_p = CGF.Builder.CreateStructGEP(
3569 VAListAddr, 2, CharUnits::fromQuantity(8), "overflow_arg_area_p");
3570 llvm::Value *overflow_arg_area =
3571 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
3573 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
3574 // byte boundary if alignment needed by type exceeds 8 byte boundary.
3575 // It isn't stated explicitly in the standard, but in practice we use
3576 // alignment greater than 16 where necessary.
3577 CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty);
3578 if (Align > CharUnits::fromQuantity(8)) {
3579 overflow_arg_area = emitRoundPointerUpToAlignment(CGF, overflow_arg_area,
3583 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
3584 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
3586 CGF.Builder.CreateBitCast(overflow_arg_area,
3587 llvm::PointerType::getUnqual(LTy));
3589 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
3590 // l->overflow_arg_area + sizeof(type).
3591 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to
3592 // an 8 byte boundary.
3594 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
3595 llvm::Value *Offset =
3596 llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7);
3597 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset,
3598 "overflow_arg_area.next");
3599 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
3601 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
3602 return Address(Res, Align);
3605 Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
3606 QualType Ty) const {
3607 // Assume that va_list type is correct; should be pointer to LLVM type:
3611 // i8* overflow_arg_area;
3612 // i8* reg_save_area;
3614 unsigned neededInt, neededSSE;
3616 Ty = getContext().getCanonicalType(Ty);
3617 ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE,
3618 /*isNamedArg*/false);
3620 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
3621 // in the registers. If not go to step 7.
3622 if (!neededInt && !neededSSE)
3623 return EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty);
3625 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
3626 // general purpose registers needed to pass type and num_fp to hold
3627 // the number of floating point registers needed.
3629 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into
3630 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or
3631 // l->fp_offset > 304 - num_fp * 16 go to step 7.
3633 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of
3634 // register save space).
3636 llvm::Value *InRegs = nullptr;
3637 Address gp_offset_p = Address::invalid(), fp_offset_p = Address::invalid();
3638 llvm::Value *gp_offset = nullptr, *fp_offset = nullptr;
3641 CGF.Builder.CreateStructGEP(VAListAddr, 0, CharUnits::Zero(),
3643 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
3644 InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8);
3645 InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp");
3650 CGF.Builder.CreateStructGEP(VAListAddr, 1, CharUnits::fromQuantity(4),
3652 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
3653 llvm::Value *FitsInFP =
3654 llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16);
3655 FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp");
3656 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
3659 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
3660 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
3661 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
3662 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
3664 // Emit code to load the value if it was passed in registers.
3666 CGF.EmitBlock(InRegBlock);
3668 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with
3669 // an offset of l->gp_offset and/or l->fp_offset. This may require
3670 // copying to a temporary location in case the parameter is passed
3671 // in different register classes or requires an alignment greater
3672 // than 8 for general purpose registers and 16 for XMM registers.
3674 // FIXME: This really results in shameful code when we end up needing to
3675 // collect arguments from different places; often what should result in a
3676 // simple assembling of a structure from scattered addresses has many more
3677 // loads than necessary. Can we clean this up?
3678 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
3679 llvm::Value *RegSaveArea = CGF.Builder.CreateLoad(
3680 CGF.Builder.CreateStructGEP(VAListAddr, 3, CharUnits::fromQuantity(16)),
3683 Address RegAddr = Address::invalid();
3684 if (neededInt && neededSSE) {
3686 assert(AI.isDirect() && "Unexpected ABI info for mixed regs");
3687 llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
3688 Address Tmp = CGF.CreateMemTemp(Ty);
3689 Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST);
3690 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
3691 llvm::Type *TyLo = ST->getElementType(0);
3692 llvm::Type *TyHi = ST->getElementType(1);
3693 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
3694 "Unexpected ABI info for mixed regs");
3695 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
3696 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
3697 llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegSaveArea, gp_offset);
3698 llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegSaveArea, fp_offset);
3699 llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr;
3700 llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr;
3702 // Copy the first element.
3703 // FIXME: Our choice of alignment here and below is probably pessimistic.
3704 llvm::Value *V = CGF.Builder.CreateAlignedLoad(
3705 TyLo, CGF.Builder.CreateBitCast(RegLoAddr, PTyLo),
3706 CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(TyLo)));
3707 CGF.Builder.CreateStore(V,
3708 CGF.Builder.CreateStructGEP(Tmp, 0, CharUnits::Zero()));
3710 // Copy the second element.
3711 V = CGF.Builder.CreateAlignedLoad(
3712 TyHi, CGF.Builder.CreateBitCast(RegHiAddr, PTyHi),
3713 CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(TyHi)));
3714 CharUnits Offset = CharUnits::fromQuantity(
3715 getDataLayout().getStructLayout(ST)->getElementOffset(1));
3716 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1, Offset));
3718 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy);
3719 } else if (neededInt) {
3720 RegAddr = Address(CGF.Builder.CreateGEP(RegSaveArea, gp_offset),
3721 CharUnits::fromQuantity(8));
3722 RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy);
3724 // Copy to a temporary if necessary to ensure the appropriate alignment.
3725 std::pair<CharUnits, CharUnits> SizeAlign =
3726 getContext().getTypeInfoInChars(Ty);
3727 uint64_t TySize = SizeAlign.first.getQuantity();
3728 CharUnits TyAlign = SizeAlign.second;
3730 // Copy into a temporary if the type is more aligned than the
3731 // register save area.
3732 if (TyAlign.getQuantity() > 8) {
3733 Address Tmp = CGF.CreateMemTemp(Ty);
3734 CGF.Builder.CreateMemCpy(Tmp, RegAddr, TySize, false);
3738 } else if (neededSSE == 1) {
3739 RegAddr = Address(CGF.Builder.CreateGEP(RegSaveArea, fp_offset),
3740 CharUnits::fromQuantity(16));
3741 RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy);
3743 assert(neededSSE == 2 && "Invalid number of needed registers!");
3744 // SSE registers are spaced 16 bytes apart in the register save
3745 // area, we need to collect the two eightbytes together.
3746 // The ABI isn't explicit about this, but it seems reasonable
3747 // to assume that the slots are 16-byte aligned, since the stack is
3748 // naturally 16-byte aligned and the prologue is expected to store
3749 // all the SSE registers to the RSA.
3750 Address RegAddrLo = Address(CGF.Builder.CreateGEP(RegSaveArea, fp_offset),
3751 CharUnits::fromQuantity(16));
3753 CGF.Builder.CreateConstInBoundsByteGEP(RegAddrLo,
3754 CharUnits::fromQuantity(16));
3755 llvm::Type *DoubleTy = CGF.DoubleTy;
3756 llvm::StructType *ST = llvm::StructType::get(DoubleTy, DoubleTy);
3758 Address Tmp = CGF.CreateMemTemp(Ty);
3759 Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST);
3760 V = CGF.Builder.CreateLoad(
3761 CGF.Builder.CreateElementBitCast(RegAddrLo, DoubleTy));
3762 CGF.Builder.CreateStore(V,
3763 CGF.Builder.CreateStructGEP(Tmp, 0, CharUnits::Zero()));
3764 V = CGF.Builder.CreateLoad(
3765 CGF.Builder.CreateElementBitCast(RegAddrHi, DoubleTy));
3766 CGF.Builder.CreateStore(V,
3767 CGF.Builder.CreateStructGEP(Tmp, 1, CharUnits::fromQuantity(8)));
3769 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy);
3772 // AMD64-ABI 3.5.7p5: Step 5. Set:
3773 // l->gp_offset = l->gp_offset + num_gp * 8
3774 // l->fp_offset = l->fp_offset + num_fp * 16.
3776 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8);
3777 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
3781 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16);
3782 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
3785 CGF.EmitBranch(ContBlock);
3787 // Emit code to load the value if it was passed in memory.
3789 CGF.EmitBlock(InMemBlock);
3790 Address MemAddr = EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty);
3792 // Return the appropriate result.
3794 CGF.EmitBlock(ContBlock);
3795 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, MemAddr, InMemBlock,
3800 Address X86_64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
3801 QualType Ty) const {
3802 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
3803 CGF.getContext().getTypeInfoInChars(Ty),
3804 CharUnits::fromQuantity(8),
3805 /*allowHigherAlign*/ false);
3809 WinX86_64ABIInfo::reclassifyHvaArgType(QualType Ty, unsigned &FreeSSERegs,
3810 const ABIArgInfo ¤t) const {
3811 // Assumes vectorCall calling convention.
3812 const Type *Base = nullptr;
3813 uint64_t NumElts = 0;
3815 if (!Ty->isBuiltinType() && !Ty->isVectorType() &&
3816 isHomogeneousAggregate(Ty, Base, NumElts) && FreeSSERegs >= NumElts) {
3817 FreeSSERegs -= NumElts;
3818 return getDirectX86Hva();
3823 ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs,
3824 bool IsReturnType, bool IsVectorCall,
3825 bool IsRegCall) const {
3827 if (Ty->isVoidType())
3828 return ABIArgInfo::getIgnore();
3830 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3831 Ty = EnumTy->getDecl()->getIntegerType();
3833 TypeInfo Info = getContext().getTypeInfo(Ty);
3834 uint64_t Width = Info.Width;
3835 CharUnits Align = getContext().toCharUnitsFromBits(Info.Align);
3837 const RecordType *RT = Ty->getAs<RecordType>();
3839 if (!IsReturnType) {
3840 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()))
3841 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
3844 if (RT->getDecl()->hasFlexibleArrayMember())
3845 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
3849 const Type *Base = nullptr;
3850 uint64_t NumElts = 0;
3851 // vectorcall adds the concept of a homogenous vector aggregate, similar to
3853 if ((IsVectorCall || IsRegCall) &&
3854 isHomogeneousAggregate(Ty, Base, NumElts)) {
3856 if (FreeSSERegs >= NumElts) {
3857 FreeSSERegs -= NumElts;
3858 if (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType())
3859 return ABIArgInfo::getDirect();
3860 return ABIArgInfo::getExpand();
3862 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
3863 } else if (IsVectorCall) {
3864 if (FreeSSERegs >= NumElts &&
3865 (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType())) {
3866 FreeSSERegs -= NumElts;
3867 return ABIArgInfo::getDirect();
3868 } else if (IsReturnType) {
3869 return ABIArgInfo::getExpand();
3870 } else if (!Ty->isBuiltinType() && !Ty->isVectorType()) {
3871 // HVAs are delayed and reclassified in the 2nd step.
3872 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
3877 if (Ty->isMemberPointerType()) {
3878 // If the member pointer is represented by an LLVM int or ptr, pass it
3880 llvm::Type *LLTy = CGT.ConvertType(Ty);
3881 if (LLTy->isPointerTy() || LLTy->isIntegerTy())
3882 return ABIArgInfo::getDirect();
3885 if (RT || Ty->isAnyComplexType() || Ty->isMemberPointerType()) {
3886 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
3887 // not 1, 2, 4, or 8 bytes, must be passed by reference."
3888 if (Width > 64 || !llvm::isPowerOf2_64(Width))
3889 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
3891 // Otherwise, coerce it to a small integer.
3892 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Width));
3895 // Bool type is always extended to the ABI, other builtin types are not
3897 const BuiltinType *BT = Ty->getAs<BuiltinType>();
3898 if (BT && BT->getKind() == BuiltinType::Bool)
3899 return ABIArgInfo::getExtend();
3901 // Mingw64 GCC uses the old 80 bit extended precision floating point unit. It
3902 // passes them indirectly through memory.
3903 if (IsMingw64 && BT && BT->getKind() == BuiltinType::LongDouble) {
3904 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
3905 if (LDF == &llvm::APFloat::x87DoubleExtended())
3906 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
3909 return ABIArgInfo::getDirect();
3912 void WinX86_64ABIInfo::computeVectorCallArgs(CGFunctionInfo &FI,
3913 unsigned FreeSSERegs,
3915 bool IsRegCall) const {
3917 for (auto &I : FI.arguments()) {
3918 // Vectorcall in x64 only permits the first 6 arguments to be passed
3919 // as XMM/YMM registers.
3920 if (Count < VectorcallMaxParamNumAsReg)
3921 I.info = classify(I.type, FreeSSERegs, false, IsVectorCall, IsRegCall);
3923 // Since these cannot be passed in registers, pretend no registers
3925 unsigned ZeroSSERegsAvail = 0;
3926 I.info = classify(I.type, /*FreeSSERegs=*/ZeroSSERegsAvail, false,
3927 IsVectorCall, IsRegCall);
3932 for (auto &I : FI.arguments()) {
3933 I.info = reclassifyHvaArgType(I.type, FreeSSERegs, I.info);
3937 void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
3939 FI.getCallingConvention() == llvm::CallingConv::X86_VectorCall;
3940 bool IsRegCall = FI.getCallingConvention() == llvm::CallingConv::X86_RegCall;
3942 unsigned FreeSSERegs = 0;
3944 // We can use up to 4 SSE return registers with vectorcall.
3946 } else if (IsRegCall) {
3947 // RegCall gives us 16 SSE registers.
3951 if (!getCXXABI().classifyReturnType(FI))
3952 FI.getReturnInfo() = classify(FI.getReturnType(), FreeSSERegs, true,
3953 IsVectorCall, IsRegCall);
3956 // We can use up to 6 SSE register parameters with vectorcall.
3958 } else if (IsRegCall) {
3959 // RegCall gives us 16 SSE registers, we can reuse the return registers.
3964 computeVectorCallArgs(FI, FreeSSERegs, IsVectorCall, IsRegCall);
3966 for (auto &I : FI.arguments())
3967 I.info = classify(I.type, FreeSSERegs, false, IsVectorCall, IsRegCall);
3972 Address WinX86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
3973 QualType Ty) const {
3975 bool IsIndirect = false;
3977 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
3978 // not 1, 2, 4, or 8 bytes, must be passed by reference."
3979 if (isAggregateTypeForABI(Ty) || Ty->isMemberPointerType()) {
3980 uint64_t Width = getContext().getTypeSize(Ty);
3981 IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
3984 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
3985 CGF.getContext().getTypeInfoInChars(Ty),
3986 CharUnits::fromQuantity(8),
3987 /*allowHigherAlign*/ false);
3992 /// PPC32_SVR4_ABIInfo - The 32-bit PowerPC ELF (SVR4) ABI information.
3993 class PPC32_SVR4_ABIInfo : public DefaultABIInfo {
3994 bool IsSoftFloatABI;
3996 PPC32_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, bool SoftFloatABI)
3997 : DefaultABIInfo(CGT), IsSoftFloatABI(SoftFloatABI) {}
3999 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4000 QualType Ty) const override;
4003 class PPC32TargetCodeGenInfo : public TargetCodeGenInfo {
4005 PPC32TargetCodeGenInfo(CodeGenTypes &CGT, bool SoftFloatABI)
4006 : TargetCodeGenInfo(new PPC32_SVR4_ABIInfo(CGT, SoftFloatABI)) {}
4008 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
4009 // This is recovered from gcc output.
4010 return 1; // r1 is the dedicated stack pointer
4013 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4014 llvm::Value *Address) const override;
4019 // TODO: this implementation is now likely redundant with
4020 // DefaultABIInfo::EmitVAArg.
4021 Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList,
4022 QualType Ty) const {
4023 const unsigned OverflowLimit = 8;
4024 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
4025 // TODO: Implement this. For now ignore.
4027 return Address::invalid(); // FIXME?
4030 // struct __va_list_tag {
4031 // unsigned char gpr;
4032 // unsigned char fpr;
4033 // unsigned short reserved;
4034 // void *overflow_arg_area;
4035 // void *reg_save_area;
4038 bool isI64 = Ty->isIntegerType() && getContext().getTypeSize(Ty) == 64;
4040 Ty->isIntegerType() || Ty->isPointerType() || Ty->isAggregateType();
4041 bool isF64 = Ty->isFloatingType() && getContext().getTypeSize(Ty) == 64;
4043 // All aggregates are passed indirectly? That doesn't seem consistent
4044 // with the argument-lowering code.
4045 bool isIndirect = Ty->isAggregateType();
4047 CGBuilderTy &Builder = CGF.Builder;
4049 // The calling convention either uses 1-2 GPRs or 1 FPR.
4050 Address NumRegsAddr = Address::invalid();
4051 if (isInt || IsSoftFloatABI) {
4052 NumRegsAddr = Builder.CreateStructGEP(VAList, 0, CharUnits::Zero(), "gpr");
4054 NumRegsAddr = Builder.CreateStructGEP(VAList, 1, CharUnits::One(), "fpr");
4057 llvm::Value *NumRegs = Builder.CreateLoad(NumRegsAddr, "numUsedRegs");
4059 // "Align" the register count when TY is i64.
4060 if (isI64 || (isF64 && IsSoftFloatABI)) {
4061 NumRegs = Builder.CreateAdd(NumRegs, Builder.getInt8(1));
4062 NumRegs = Builder.CreateAnd(NumRegs, Builder.getInt8((uint8_t) ~1U));
4066 Builder.CreateICmpULT(NumRegs, Builder.getInt8(OverflowLimit), "cond");
4068 llvm::BasicBlock *UsingRegs = CGF.createBasicBlock("using_regs");
4069 llvm::BasicBlock *UsingOverflow = CGF.createBasicBlock("using_overflow");
4070 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
4072 Builder.CreateCondBr(CC, UsingRegs, UsingOverflow);
4074 llvm::Type *DirectTy = CGF.ConvertType(Ty);
4075 if (isIndirect) DirectTy = DirectTy->getPointerTo(0);
4077 // Case 1: consume registers.
4078 Address RegAddr = Address::invalid();
4080 CGF.EmitBlock(UsingRegs);
4082 Address RegSaveAreaPtr =
4083 Builder.CreateStructGEP(VAList, 4, CharUnits::fromQuantity(8));
4084 RegAddr = Address(Builder.CreateLoad(RegSaveAreaPtr),
4085 CharUnits::fromQuantity(8));
4086 assert(RegAddr.getElementType() == CGF.Int8Ty);
4088 // Floating-point registers start after the general-purpose registers.
4089 if (!(isInt || IsSoftFloatABI)) {
4090 RegAddr = Builder.CreateConstInBoundsByteGEP(RegAddr,
4091 CharUnits::fromQuantity(32));
4094 // Get the address of the saved value by scaling the number of
4095 // registers we've used by the number of
4096 CharUnits RegSize = CharUnits::fromQuantity((isInt || IsSoftFloatABI) ? 4 : 8);
4097 llvm::Value *RegOffset =
4098 Builder.CreateMul(NumRegs, Builder.getInt8(RegSize.getQuantity()));
4099 RegAddr = Address(Builder.CreateInBoundsGEP(CGF.Int8Ty,
4100 RegAddr.getPointer(), RegOffset),
4101 RegAddr.getAlignment().alignmentOfArrayElement(RegSize));
4102 RegAddr = Builder.CreateElementBitCast(RegAddr, DirectTy);
4104 // Increase the used-register count.
4106 Builder.CreateAdd(NumRegs,
4107 Builder.getInt8((isI64 || (isF64 && IsSoftFloatABI)) ? 2 : 1));
4108 Builder.CreateStore(NumRegs, NumRegsAddr);
4110 CGF.EmitBranch(Cont);
4113 // Case 2: consume space in the overflow area.
4114 Address MemAddr = Address::invalid();
4116 CGF.EmitBlock(UsingOverflow);
4118 Builder.CreateStore(Builder.getInt8(OverflowLimit), NumRegsAddr);
4120 // Everything in the overflow area is rounded up to a size of at least 4.
4121 CharUnits OverflowAreaAlign = CharUnits::fromQuantity(4);
4125 auto TypeInfo = CGF.getContext().getTypeInfoInChars(Ty);
4126 Size = TypeInfo.first.alignTo(OverflowAreaAlign);
4128 Size = CGF.getPointerSize();
4131 Address OverflowAreaAddr =
4132 Builder.CreateStructGEP(VAList, 3, CharUnits::fromQuantity(4));
4133 Address OverflowArea(Builder.CreateLoad(OverflowAreaAddr, "argp.cur"),
4135 // Round up address of argument to alignment
4136 CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty);
4137 if (Align > OverflowAreaAlign) {
4138 llvm::Value *Ptr = OverflowArea.getPointer();
4139 OverflowArea = Address(emitRoundPointerUpToAlignment(CGF, Ptr, Align),
4143 MemAddr = Builder.CreateElementBitCast(OverflowArea, DirectTy);
4145 // Increase the overflow area.
4146 OverflowArea = Builder.CreateConstInBoundsByteGEP(OverflowArea, Size);
4147 Builder.CreateStore(OverflowArea.getPointer(), OverflowAreaAddr);
4148 CGF.EmitBranch(Cont);
4151 CGF.EmitBlock(Cont);
4153 // Merge the cases with a phi.
4154 Address Result = emitMergePHI(CGF, RegAddr, UsingRegs, MemAddr, UsingOverflow,
4157 // Load the pointer if the argument was passed indirectly.
4159 Result = Address(Builder.CreateLoad(Result, "aggr"),
4160 getContext().getTypeAlignInChars(Ty));
4167 PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4168 llvm::Value *Address) const {
4169 // This is calculated from the LLVM and GCC tables and verified
4170 // against gcc output. AFAIK all ABIs use the same encoding.
4172 CodeGen::CGBuilderTy &Builder = CGF.Builder;
4174 llvm::IntegerType *i8 = CGF.Int8Ty;
4175 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
4176 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
4177 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
4179 // 0-31: r0-31, the 4-byte general-purpose registers
4180 AssignToArrayRange(Builder, Address, Four8, 0, 31);
4182 // 32-63: fp0-31, the 8-byte floating-point registers
4183 AssignToArrayRange(Builder, Address, Eight8, 32, 63);
4185 // 64-76 are various 4-byte special-purpose registers:
4192 AssignToArrayRange(Builder, Address, Four8, 64, 76);
4194 // 77-108: v0-31, the 16-byte vector registers
4195 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
4202 AssignToArrayRange(Builder, Address, Four8, 109, 113);
4210 /// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information.
4211 class PPC64_SVR4_ABIInfo : public ABIInfo {
4219 static const unsigned GPRBits = 64;
4222 bool IsSoftFloatABI;
4224 // A vector of float or double will be promoted to <4 x f32> or <4 x f64> and
4225 // will be passed in a QPX register.
4226 bool IsQPXVectorTy(const Type *Ty) const {
4230 if (const VectorType *VT = Ty->getAs<VectorType>()) {
4231 unsigned NumElements = VT->getNumElements();
4232 if (NumElements == 1)
4235 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) {
4236 if (getContext().getTypeSize(Ty) <= 256)
4238 } else if (VT->getElementType()->
4239 isSpecificBuiltinType(BuiltinType::Float)) {
4240 if (getContext().getTypeSize(Ty) <= 128)
4248 bool IsQPXVectorTy(QualType Ty) const {
4249 return IsQPXVectorTy(Ty.getTypePtr());
4253 PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind, bool HasQPX,
4255 : ABIInfo(CGT), Kind(Kind), HasQPX(HasQPX),
4256 IsSoftFloatABI(SoftFloatABI) {}
4258 bool isPromotableTypeForABI(QualType Ty) const;
4259 CharUnits getParamTypeAlignment(QualType Ty) const;
4261 ABIArgInfo classifyReturnType(QualType RetTy) const;
4262 ABIArgInfo classifyArgumentType(QualType Ty) const;
4264 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
4265 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
4266 uint64_t Members) const override;
4268 // TODO: We can add more logic to computeInfo to improve performance.
4269 // Example: For aggregate arguments that fit in a register, we could
4270 // use getDirectInReg (as is done below for structs containing a single
4271 // floating-point value) to avoid pushing them to memory on function
4272 // entry. This would require changing the logic in PPCISelLowering
4273 // when lowering the parameters in the caller and args in the callee.
4274 void computeInfo(CGFunctionInfo &FI) const override {
4275 if (!getCXXABI().classifyReturnType(FI))
4276 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
4277 for (auto &I : FI.arguments()) {
4278 // We rely on the default argument classification for the most part.
4279 // One exception: An aggregate containing a single floating-point
4280 // or vector item must be passed in a register if one is available.
4281 const Type *T = isSingleElementStruct(I.type, getContext());
4283 const BuiltinType *BT = T->getAs<BuiltinType>();
4284 if (IsQPXVectorTy(T) ||
4285 (T->isVectorType() && getContext().getTypeSize(T) == 128) ||
4286 (BT && BT->isFloatingPoint())) {
4288 I.info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT));
4292 I.info = classifyArgumentType(I.type);
4296 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4297 QualType Ty) const override;
4300 class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo {
4303 PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT,
4304 PPC64_SVR4_ABIInfo::ABIKind Kind, bool HasQPX,
4306 : TargetCodeGenInfo(new PPC64_SVR4_ABIInfo(CGT, Kind, HasQPX,
4309 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
4310 // This is recovered from gcc output.
4311 return 1; // r1 is the dedicated stack pointer
4314 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4315 llvm::Value *Address) const override;
4318 class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo {
4320 PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
4322 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
4323 // This is recovered from gcc output.
4324 return 1; // r1 is the dedicated stack pointer
4327 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4328 llvm::Value *Address) const override;
4333 // Return true if the ABI requires Ty to be passed sign- or zero-
4334 // extended to 64 bits.
4336 PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const {
4337 // Treat an enum type as its underlying type.
4338 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
4339 Ty = EnumTy->getDecl()->getIntegerType();
4341 // Promotable integer types are required to be promoted by the ABI.
4342 if (Ty->isPromotableIntegerType())
4345 // In addition to the usual promotable integer types, we also need to
4346 // extend all 32-bit types, since the ABI requires promotion to 64 bits.
4347 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
4348 switch (BT->getKind()) {
4349 case BuiltinType::Int:
4350 case BuiltinType::UInt:
4359 /// isAlignedParamType - Determine whether a type requires 16-byte or
4360 /// higher alignment in the parameter area. Always returns at least 8.
4361 CharUnits PPC64_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const {
4362 // Complex types are passed just like their elements.
4363 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
4364 Ty = CTy->getElementType();
4366 // Only vector types of size 16 bytes need alignment (larger types are
4367 // passed via reference, smaller types are not aligned).
4368 if (IsQPXVectorTy(Ty)) {
4369 if (getContext().getTypeSize(Ty) > 128)
4370 return CharUnits::fromQuantity(32);
4372 return CharUnits::fromQuantity(16);
4373 } else if (Ty->isVectorType()) {
4374 return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16 : 8);
4377 // For single-element float/vector structs, we consider the whole type
4378 // to have the same alignment requirements as its single element.
4379 const Type *AlignAsType = nullptr;
4380 const Type *EltType = isSingleElementStruct(Ty, getContext());
4382 const BuiltinType *BT = EltType->getAs<BuiltinType>();
4383 if (IsQPXVectorTy(EltType) || (EltType->isVectorType() &&
4384 getContext().getTypeSize(EltType) == 128) ||
4385 (BT && BT->isFloatingPoint()))
4386 AlignAsType = EltType;
4389 // Likewise for ELFv2 homogeneous aggregates.
4390 const Type *Base = nullptr;
4391 uint64_t Members = 0;
4392 if (!AlignAsType && Kind == ELFv2 &&
4393 isAggregateTypeForABI(Ty) && isHomogeneousAggregate(Ty, Base, Members))
4396 // With special case aggregates, only vector base types need alignment.
4397 if (AlignAsType && IsQPXVectorTy(AlignAsType)) {
4398 if (getContext().getTypeSize(AlignAsType) > 128)
4399 return CharUnits::fromQuantity(32);
4401 return CharUnits::fromQuantity(16);
4402 } else if (AlignAsType) {
4403 return CharUnits::fromQuantity(AlignAsType->isVectorType() ? 16 : 8);
4406 // Otherwise, we only need alignment for any aggregate type that
4407 // has an alignment requirement of >= 16 bytes.
4408 if (isAggregateTypeForABI(Ty) && getContext().getTypeAlign(Ty) >= 128) {
4409 if (HasQPX && getContext().getTypeAlign(Ty) >= 256)
4410 return CharUnits::fromQuantity(32);
4411 return CharUnits::fromQuantity(16);
4414 return CharUnits::fromQuantity(8);
4417 /// isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous
4418 /// aggregate. Base is set to the base element type, and Members is set
4419 /// to the number of base elements.
4420 bool ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base,
4421 uint64_t &Members) const {
4422 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
4423 uint64_t NElements = AT->getSize().getZExtValue();
4426 if (!isHomogeneousAggregate(AT->getElementType(), Base, Members))
4428 Members *= NElements;
4429 } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
4430 const RecordDecl *RD = RT->getDecl();
4431 if (RD->hasFlexibleArrayMember())
4436 // If this is a C++ record, check the bases first.
4437 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
4438 for (const auto &I : CXXRD->bases()) {
4439 // Ignore empty records.
4440 if (isEmptyRecord(getContext(), I.getType(), true))
4443 uint64_t FldMembers;
4444 if (!isHomogeneousAggregate(I.getType(), Base, FldMembers))
4447 Members += FldMembers;
4451 for (const auto *FD : RD->fields()) {
4452 // Ignore (non-zero arrays of) empty records.
4453 QualType FT = FD->getType();
4454 while (const ConstantArrayType *AT =
4455 getContext().getAsConstantArrayType(FT)) {
4456 if (AT->getSize().getZExtValue() == 0)
4458 FT = AT->getElementType();
4460 if (isEmptyRecord(getContext(), FT, true))
4463 // For compatibility with GCC, ignore empty bitfields in C++ mode.
4464 if (getContext().getLangOpts().CPlusPlus &&
4465 FD->isBitField() && FD->getBitWidthValue(getContext()) == 0)
4468 uint64_t FldMembers;
4469 if (!isHomogeneousAggregate(FD->getType(), Base, FldMembers))
4472 Members = (RD->isUnion() ?
4473 std::max(Members, FldMembers) : Members + FldMembers);
4479 // Ensure there is no padding.
4480 if (getContext().getTypeSize(Base) * Members !=
4481 getContext().getTypeSize(Ty))
4485 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
4487 Ty = CT->getElementType();
4490 // Most ABIs only support float, double, and some vector type widths.
4491 if (!isHomogeneousAggregateBaseType(Ty))
4494 // The base type must be the same for all members. Types that
4495 // agree in both total size and mode (float vs. vector) are
4496 // treated as being equivalent here.
4497 const Type *TyPtr = Ty.getTypePtr();
4500 // If it's a non-power-of-2 vector, its size is already a power-of-2,
4501 // so make sure to widen it explicitly.
4502 if (const VectorType *VT = Base->getAs<VectorType>()) {
4503 QualType EltTy = VT->getElementType();
4504 unsigned NumElements =
4505 getContext().getTypeSize(VT) / getContext().getTypeSize(EltTy);
4507 .getVectorType(EltTy, NumElements, VT->getVectorKind())
4512 if (Base->isVectorType() != TyPtr->isVectorType() ||
4513 getContext().getTypeSize(Base) != getContext().getTypeSize(TyPtr))
4516 return Members > 0 && isHomogeneousAggregateSmallEnough(Base, Members);
4519 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
4520 // Homogeneous aggregates for ELFv2 must have base types of float,
4521 // double, long double, or 128-bit vectors.
4522 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
4523 if (BT->getKind() == BuiltinType::Float ||
4524 BT->getKind() == BuiltinType::Double ||
4525 BT->getKind() == BuiltinType::LongDouble) {
4531 if (const VectorType *VT = Ty->getAs<VectorType>()) {
4532 if (getContext().getTypeSize(VT) == 128 || IsQPXVectorTy(Ty))
4538 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough(
4539 const Type *Base, uint64_t Members) const {
4540 // Vector types require one register, floating point types require one
4541 // or two registers depending on their size.
4543 Base->isVectorType() ? 1 : (getContext().getTypeSize(Base) + 63) / 64;
4545 // Homogeneous Aggregates may occupy at most 8 registers.
4546 return Members * NumRegs <= 8;
4550 PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const {
4551 Ty = useFirstFieldIfTransparentUnion(Ty);
4553 if (Ty->isAnyComplexType())
4554 return ABIArgInfo::getDirect();
4556 // Non-Altivec vector types are passed in GPRs (smaller than 16 bytes)
4557 // or via reference (larger than 16 bytes).
4558 if (Ty->isVectorType() && !IsQPXVectorTy(Ty)) {
4559 uint64_t Size = getContext().getTypeSize(Ty);
4561 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
4562 else if (Size < 128) {
4563 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
4564 return ABIArgInfo::getDirect(CoerceTy);
4568 if (isAggregateTypeForABI(Ty)) {
4569 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
4570 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
4572 uint64_t ABIAlign = getParamTypeAlignment(Ty).getQuantity();
4573 uint64_t TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity();
4575 // ELFv2 homogeneous aggregates are passed as array types.
4576 const Type *Base = nullptr;
4577 uint64_t Members = 0;
4578 if (Kind == ELFv2 &&
4579 isHomogeneousAggregate(Ty, Base, Members)) {
4580 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
4581 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
4582 return ABIArgInfo::getDirect(CoerceTy);
4585 // If an aggregate may end up fully in registers, we do not
4586 // use the ByVal method, but pass the aggregate as array.
4587 // This is usually beneficial since we avoid forcing the
4588 // back-end to store the argument to memory.
4589 uint64_t Bits = getContext().getTypeSize(Ty);
4590 if (Bits > 0 && Bits <= 8 * GPRBits) {
4591 llvm::Type *CoerceTy;
4593 // Types up to 8 bytes are passed as integer type (which will be
4594 // properly aligned in the argument save area doubleword).
4595 if (Bits <= GPRBits)
4597 llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8));
4598 // Larger types are passed as arrays, with the base type selected
4599 // according to the required alignment in the save area.
4601 uint64_t RegBits = ABIAlign * 8;
4602 uint64_t NumRegs = llvm::alignTo(Bits, RegBits) / RegBits;
4603 llvm::Type *RegTy = llvm::IntegerType::get(getVMContext(), RegBits);
4604 CoerceTy = llvm::ArrayType::get(RegTy, NumRegs);
4607 return ABIArgInfo::getDirect(CoerceTy);
4610 // All other aggregates are passed ByVal.
4611 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign),
4613 /*Realign=*/TyAlign > ABIAlign);
4616 return (isPromotableTypeForABI(Ty) ?
4617 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
4621 PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
4622 if (RetTy->isVoidType())
4623 return ABIArgInfo::getIgnore();
4625 if (RetTy->isAnyComplexType())
4626 return ABIArgInfo::getDirect();
4628 // Non-Altivec vector types are returned in GPRs (smaller than 16 bytes)
4629 // or via reference (larger than 16 bytes).
4630 if (RetTy->isVectorType() && !IsQPXVectorTy(RetTy)) {
4631 uint64_t Size = getContext().getTypeSize(RetTy);
4633 return getNaturalAlignIndirect(RetTy);
4634 else if (Size < 128) {
4635 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
4636 return ABIArgInfo::getDirect(CoerceTy);
4640 if (isAggregateTypeForABI(RetTy)) {
4641 // ELFv2 homogeneous aggregates are returned as array types.
4642 const Type *Base = nullptr;
4643 uint64_t Members = 0;
4644 if (Kind == ELFv2 &&
4645 isHomogeneousAggregate(RetTy, Base, Members)) {
4646 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
4647 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
4648 return ABIArgInfo::getDirect(CoerceTy);
4651 // ELFv2 small aggregates are returned in up to two registers.
4652 uint64_t Bits = getContext().getTypeSize(RetTy);
4653 if (Kind == ELFv2 && Bits <= 2 * GPRBits) {
4655 return ABIArgInfo::getIgnore();
4657 llvm::Type *CoerceTy;
4658 if (Bits > GPRBits) {
4659 CoerceTy = llvm::IntegerType::get(getVMContext(), GPRBits);
4660 CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy);
4663 llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8));
4664 return ABIArgInfo::getDirect(CoerceTy);
4667 // All other aggregates are returned indirectly.
4668 return getNaturalAlignIndirect(RetTy);
4671 return (isPromotableTypeForABI(RetTy) ?
4672 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
4675 // Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine.
4676 Address PPC64_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4677 QualType Ty) const {
4678 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
4679 TypeInfo.second = getParamTypeAlignment(Ty);
4681 CharUnits SlotSize = CharUnits::fromQuantity(8);
4683 // If we have a complex type and the base type is smaller than 8 bytes,
4684 // the ABI calls for the real and imaginary parts to be right-adjusted
4685 // in separate doublewords. However, Clang expects us to produce a
4686 // pointer to a structure with the two parts packed tightly. So generate
4687 // loads of the real and imaginary parts relative to the va_list pointer,
4688 // and store them to a temporary structure.
4689 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
4690 CharUnits EltSize = TypeInfo.first / 2;
4691 if (EltSize < SlotSize) {
4692 Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, CGF.Int8Ty,
4693 SlotSize * 2, SlotSize,
4694 SlotSize, /*AllowHigher*/ true);
4696 Address RealAddr = Addr;
4697 Address ImagAddr = RealAddr;
4698 if (CGF.CGM.getDataLayout().isBigEndian()) {
4699 RealAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr,
4700 SlotSize - EltSize);
4701 ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(ImagAddr,
4702 2 * SlotSize - EltSize);
4704 ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr, SlotSize);
4707 llvm::Type *EltTy = CGF.ConvertTypeForMem(CTy->getElementType());
4708 RealAddr = CGF.Builder.CreateElementBitCast(RealAddr, EltTy);
4709 ImagAddr = CGF.Builder.CreateElementBitCast(ImagAddr, EltTy);
4710 llvm::Value *Real = CGF.Builder.CreateLoad(RealAddr, ".vareal");
4711 llvm::Value *Imag = CGF.Builder.CreateLoad(ImagAddr, ".vaimag");
4713 Address Temp = CGF.CreateMemTemp(Ty, "vacplx");
4714 CGF.EmitStoreOfComplex({Real, Imag}, CGF.MakeAddrLValue(Temp, Ty),
4720 // Otherwise, just use the general rule.
4721 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false,
4722 TypeInfo, SlotSize, /*AllowHigher*/ true);
4726 PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4727 llvm::Value *Address) {
4728 // This is calculated from the LLVM and GCC tables and verified
4729 // against gcc output. AFAIK all ABIs use the same encoding.
4731 CodeGen::CGBuilderTy &Builder = CGF.Builder;
4733 llvm::IntegerType *i8 = CGF.Int8Ty;
4734 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
4735 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
4736 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
4738 // 0-31: r0-31, the 8-byte general-purpose registers
4739 AssignToArrayRange(Builder, Address, Eight8, 0, 31);
4741 // 32-63: fp0-31, the 8-byte floating-point registers
4742 AssignToArrayRange(Builder, Address, Eight8, 32, 63);
4744 // 64-67 are various 8-byte special-purpose registers:
4749 AssignToArrayRange(Builder, Address, Eight8, 64, 67);
4751 // 68-76 are various 4-byte special-purpose registers:
4754 AssignToArrayRange(Builder, Address, Four8, 68, 76);
4756 // 77-108: v0-31, the 16-byte vector registers
4757 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
4767 AssignToArrayRange(Builder, Address, Eight8, 109, 116);
4773 PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable(
4774 CodeGen::CodeGenFunction &CGF,
4775 llvm::Value *Address) const {
4777 return PPC64_initDwarfEHRegSizeTable(CGF, Address);
4781 PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4782 llvm::Value *Address) const {
4784 return PPC64_initDwarfEHRegSizeTable(CGF, Address);
4787 //===----------------------------------------------------------------------===//
4788 // AArch64 ABI Implementation
4789 //===----------------------------------------------------------------------===//
4793 class AArch64ABIInfo : public SwiftABIInfo {
4805 AArch64ABIInfo(CodeGenTypes &CGT, ABIKind Kind)
4806 : SwiftABIInfo(CGT), Kind(Kind) {}
4809 ABIKind getABIKind() const { return Kind; }
4810 bool isDarwinPCS() const { return Kind == DarwinPCS; }
4812 ABIArgInfo classifyReturnType(QualType RetTy) const;
4813 ABIArgInfo classifyArgumentType(QualType RetTy) const;
4814 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
4815 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
4816 uint64_t Members) const override;
4818 bool isIllegalVectorType(QualType Ty) const;
4820 void computeInfo(CGFunctionInfo &FI) const override {
4821 if (!getCXXABI().classifyReturnType(FI))
4822 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
4824 for (auto &it : FI.arguments())
4825 it.info = classifyArgumentType(it.type);
4828 Address EmitDarwinVAArg(Address VAListAddr, QualType Ty,
4829 CodeGenFunction &CGF) const;
4831 Address EmitAAPCSVAArg(Address VAListAddr, QualType Ty,
4832 CodeGenFunction &CGF) const;
4834 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4835 QualType Ty) const override {
4836 return Kind == Win64 ? EmitMSVAArg(CGF, VAListAddr, Ty)
4837 : isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
4838 : EmitAAPCSVAArg(VAListAddr, Ty, CGF);
4841 Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
4842 QualType Ty) const override;
4844 bool shouldPassIndirectlyForSwift(CharUnits totalSize,
4845 ArrayRef<llvm::Type*> scalars,
4846 bool asReturnValue) const override {
4847 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
4849 bool isSwiftErrorInRegister() const override {
4853 bool isLegalVectorTypeForSwift(CharUnits totalSize, llvm::Type *eltTy,
4854 unsigned elts) const override;
4857 class AArch64TargetCodeGenInfo : public TargetCodeGenInfo {
4859 AArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind Kind)
4860 : TargetCodeGenInfo(new AArch64ABIInfo(CGT, Kind)) {}
4862 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
4863 return "mov\tfp, fp\t\t# marker for objc_retainAutoreleaseReturnValue";
4866 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
4870 bool doesReturnSlotInterfereWithArgs() const override { return false; }
4874 ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty) const {
4875 Ty = useFirstFieldIfTransparentUnion(Ty);
4877 // Handle illegal vector types here.
4878 if (isIllegalVectorType(Ty)) {
4879 uint64_t Size = getContext().getTypeSize(Ty);
4880 // Android promotes <2 x i8> to i16, not i32
4881 if (isAndroid() && (Size <= 16)) {
4882 llvm::Type *ResType = llvm::Type::getInt16Ty(getVMContext());
4883 return ABIArgInfo::getDirect(ResType);
4886 llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext());
4887 return ABIArgInfo::getDirect(ResType);
4890 llvm::Type *ResType =
4891 llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2);
4892 return ABIArgInfo::getDirect(ResType);
4895 llvm::Type *ResType =
4896 llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);
4897 return ABIArgInfo::getDirect(ResType);
4899 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
4902 if (!isAggregateTypeForABI(Ty)) {
4903 // Treat an enum type as its underlying type.
4904 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
4905 Ty = EnumTy->getDecl()->getIntegerType();
4907 return (Ty->isPromotableIntegerType() && isDarwinPCS()
4908 ? ABIArgInfo::getExtend()
4909 : ABIArgInfo::getDirect());
4912 // Structures with either a non-trivial destructor or a non-trivial
4913 // copy constructor are always indirect.
4914 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
4915 return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA ==
4916 CGCXXABI::RAA_DirectInMemory);
4919 // Empty records are always ignored on Darwin, but actually passed in C++ mode
4920 // elsewhere for GNU compatibility.
4921 uint64_t Size = getContext().getTypeSize(Ty);
4922 bool IsEmpty = isEmptyRecord(getContext(), Ty, true);
4923 if (IsEmpty || Size == 0) {
4924 if (!getContext().getLangOpts().CPlusPlus || isDarwinPCS())
4925 return ABIArgInfo::getIgnore();
4927 // GNU C mode. The only argument that gets ignored is an empty one with size
4929 if (IsEmpty && Size == 0)
4930 return ABIArgInfo::getIgnore();
4931 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
4934 // Homogeneous Floating-point Aggregates (HFAs) need to be expanded.
4935 const Type *Base = nullptr;
4936 uint64_t Members = 0;
4937 if (isHomogeneousAggregate(Ty, Base, Members)) {
4938 return ABIArgInfo::getDirect(
4939 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members));
4942 // Aggregates <= 16 bytes are passed directly in registers or on the stack.
4944 // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of
4945 // same size and alignment.
4946 if (getTarget().isRenderScriptTarget()) {
4947 return coerceToIntArray(Ty, getContext(), getVMContext());
4949 unsigned Alignment = getContext().getTypeAlign(Ty);
4950 Size = llvm::alignTo(Size, 64); // round up to multiple of 8 bytes
4952 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
4953 // For aggregates with 16-byte alignment, we use i128.
4954 if (Alignment < 128 && Size == 128) {
4955 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
4956 return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64));
4958 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
4961 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
4964 ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy) const {
4965 if (RetTy->isVoidType())
4966 return ABIArgInfo::getIgnore();
4968 // Large vector types should be returned via memory.
4969 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128)
4970 return getNaturalAlignIndirect(RetTy);
4972 if (!isAggregateTypeForABI(RetTy)) {
4973 // Treat an enum type as its underlying type.
4974 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
4975 RetTy = EnumTy->getDecl()->getIntegerType();
4977 return (RetTy->isPromotableIntegerType() && isDarwinPCS()
4978 ? ABIArgInfo::getExtend()
4979 : ABIArgInfo::getDirect());
4982 uint64_t Size = getContext().getTypeSize(RetTy);
4983 if (isEmptyRecord(getContext(), RetTy, true) || Size == 0)
4984 return ABIArgInfo::getIgnore();
4986 const Type *Base = nullptr;
4987 uint64_t Members = 0;
4988 if (isHomogeneousAggregate(RetTy, Base, Members))
4989 // Homogeneous Floating-point Aggregates (HFAs) are returned directly.
4990 return ABIArgInfo::getDirect();
4992 // Aggregates <= 16 bytes are returned directly in registers or on the stack.
4994 // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of
4995 // same size and alignment.
4996 if (getTarget().isRenderScriptTarget()) {
4997 return coerceToIntArray(RetTy, getContext(), getVMContext());
4999 unsigned Alignment = getContext().getTypeAlign(RetTy);
5000 Size = llvm::alignTo(Size, 64); // round up to multiple of 8 bytes
5002 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
5003 // For aggregates with 16-byte alignment, we use i128.
5004 if (Alignment < 128 && Size == 128) {
5005 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
5006 return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64));
5008 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
5011 return getNaturalAlignIndirect(RetTy);
5014 /// isIllegalVectorType - check whether the vector type is legal for AArch64.
5015 bool AArch64ABIInfo::isIllegalVectorType(QualType Ty) const {
5016 if (const VectorType *VT = Ty->getAs<VectorType>()) {
5017 // Check whether VT is legal.
5018 unsigned NumElements = VT->getNumElements();
5019 uint64_t Size = getContext().getTypeSize(VT);
5020 // NumElements should be power of 2.
5021 if (!llvm::isPowerOf2_32(NumElements))
5023 return Size != 64 && (Size != 128 || NumElements == 1);
5028 bool AArch64ABIInfo::isLegalVectorTypeForSwift(CharUnits totalSize,
5030 unsigned elts) const {
5031 if (!llvm::isPowerOf2_32(elts))
5033 if (totalSize.getQuantity() != 8 &&
5034 (totalSize.getQuantity() != 16 || elts == 1))
5039 bool AArch64ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
5040 // Homogeneous aggregates for AAPCS64 must have base types of a floating
5041 // point type or a short-vector type. This is the same as the 32-bit ABI,
5042 // but with the difference that any floating-point type is allowed,
5043 // including __fp16.
5044 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
5045 if (BT->isFloatingPoint())
5047 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
5048 unsigned VecSize = getContext().getTypeSize(VT);
5049 if (VecSize == 64 || VecSize == 128)
5055 bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
5056 uint64_t Members) const {
5057 return Members <= 4;
5060 Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr,
5062 CodeGenFunction &CGF) const {
5063 ABIArgInfo AI = classifyArgumentType(Ty);
5064 bool IsIndirect = AI.isIndirect();
5066 llvm::Type *BaseTy = CGF.ConvertType(Ty);
5068 BaseTy = llvm::PointerType::getUnqual(BaseTy);
5069 else if (AI.getCoerceToType())
5070 BaseTy = AI.getCoerceToType();
5072 unsigned NumRegs = 1;
5073 if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) {
5074 BaseTy = ArrTy->getElementType();
5075 NumRegs = ArrTy->getNumElements();
5077 bool IsFPR = BaseTy->isFloatingPointTy() || BaseTy->isVectorTy();
5079 // The AArch64 va_list type and handling is specified in the Procedure Call
5080 // Standard, section B.4:
5090 llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg");
5091 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
5092 llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack");
5093 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
5095 auto TyInfo = getContext().getTypeInfoInChars(Ty);
5096 CharUnits TyAlign = TyInfo.second;
5098 Address reg_offs_p = Address::invalid();
5099 llvm::Value *reg_offs = nullptr;
5101 CharUnits reg_top_offset;
5102 int RegSize = IsIndirect ? 8 : TyInfo.first.getQuantity();
5104 // 3 is the field number of __gr_offs
5106 CGF.Builder.CreateStructGEP(VAListAddr, 3, CharUnits::fromQuantity(24),
5108 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs");
5109 reg_top_index = 1; // field number for __gr_top
5110 reg_top_offset = CharUnits::fromQuantity(8);
5111 RegSize = llvm::alignTo(RegSize, 8);
5113 // 4 is the field number of __vr_offs.
5115 CGF.Builder.CreateStructGEP(VAListAddr, 4, CharUnits::fromQuantity(28),
5117 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs");
5118 reg_top_index = 2; // field number for __vr_top
5119 reg_top_offset = CharUnits::fromQuantity(16);
5120 RegSize = 16 * NumRegs;
5123 //=======================================
5124 // Find out where argument was passed
5125 //=======================================
5127 // If reg_offs >= 0 we're already using the stack for this type of
5128 // argument. We don't want to keep updating reg_offs (in case it overflows,
5129 // though anyone passing 2GB of arguments, each at most 16 bytes, deserves
5130 // whatever they get).
5131 llvm::Value *UsingStack = nullptr;
5132 UsingStack = CGF.Builder.CreateICmpSGE(
5133 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, 0));
5135 CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
5137 // Otherwise, at least some kind of argument could go in these registers, the
5138 // question is whether this particular type is too big.
5139 CGF.EmitBlock(MaybeRegBlock);
5141 // Integer arguments may need to correct register alignment (for example a
5142 // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we
5143 // align __gr_offs to calculate the potential address.
5144 if (!IsFPR && !IsIndirect && TyAlign.getQuantity() > 8) {
5145 int Align = TyAlign.getQuantity();
5147 reg_offs = CGF.Builder.CreateAdd(
5148 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, Align - 1),
5150 reg_offs = CGF.Builder.CreateAnd(
5151 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, -Align),
5155 // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list.
5156 // The fact that this is done unconditionally reflects the fact that
5157 // allocating an argument to the stack also uses up all the remaining
5158 // registers of the appropriate kind.
5159 llvm::Value *NewOffset = nullptr;
5160 NewOffset = CGF.Builder.CreateAdd(
5161 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, RegSize), "new_reg_offs");
5162 CGF.Builder.CreateStore(NewOffset, reg_offs_p);
5164 // Now we're in a position to decide whether this argument really was in
5165 // registers or not.
5166 llvm::Value *InRegs = nullptr;
5167 InRegs = CGF.Builder.CreateICmpSLE(
5168 NewOffset, llvm::ConstantInt::get(CGF.Int32Ty, 0), "inreg");
5170 CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
5172 //=======================================
5173 // Argument was in registers
5174 //=======================================
5176 // Now we emit the code for if the argument was originally passed in
5177 // registers. First start the appropriate block:
5178 CGF.EmitBlock(InRegBlock);
5180 llvm::Value *reg_top = nullptr;
5181 Address reg_top_p = CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index,
5182 reg_top_offset, "reg_top_p");
5183 reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top");
5184 Address BaseAddr(CGF.Builder.CreateInBoundsGEP(reg_top, reg_offs),
5185 CharUnits::fromQuantity(IsFPR ? 16 : 8));
5186 Address RegAddr = Address::invalid();
5187 llvm::Type *MemTy = CGF.ConvertTypeForMem(Ty);
5190 // If it's been passed indirectly (actually a struct), whatever we find from
5191 // stored registers or on the stack will actually be a struct **.
5192 MemTy = llvm::PointerType::getUnqual(MemTy);
5195 const Type *Base = nullptr;
5196 uint64_t NumMembers = 0;
5197 bool IsHFA = isHomogeneousAggregate(Ty, Base, NumMembers);
5198 if (IsHFA && NumMembers > 1) {
5199 // Homogeneous aggregates passed in registers will have their elements split
5200 // and stored 16-bytes apart regardless of size (they're notionally in qN,
5201 // qN+1, ...). We reload and store into a temporary local variable
5203 assert(!IsIndirect && "Homogeneous aggregates should be passed directly");
5204 auto BaseTyInfo = getContext().getTypeInfoInChars(QualType(Base, 0));
5205 llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0));
5206 llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
5207 Address Tmp = CGF.CreateTempAlloca(HFATy,
5208 std::max(TyAlign, BaseTyInfo.second));
5210 // On big-endian platforms, the value will be right-aligned in its slot.
5212 if (CGF.CGM.getDataLayout().isBigEndian() &&
5213 BaseTyInfo.first.getQuantity() < 16)
5214 Offset = 16 - BaseTyInfo.first.getQuantity();
5216 for (unsigned i = 0; i < NumMembers; ++i) {
5217 CharUnits BaseOffset = CharUnits::fromQuantity(16 * i + Offset);
5219 CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, BaseOffset);
5220 LoadAddr = CGF.Builder.CreateElementBitCast(LoadAddr, BaseTy);
5223 CGF.Builder.CreateConstArrayGEP(Tmp, i, BaseTyInfo.first);
5225 llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr);
5226 CGF.Builder.CreateStore(Elem, StoreAddr);
5229 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, MemTy);
5231 // Otherwise the object is contiguous in memory.
5233 // It might be right-aligned in its slot.
5234 CharUnits SlotSize = BaseAddr.getAlignment();
5235 if (CGF.CGM.getDataLayout().isBigEndian() && !IsIndirect &&
5236 (IsHFA || !isAggregateTypeForABI(Ty)) &&
5237 TyInfo.first < SlotSize) {
5238 CharUnits Offset = SlotSize - TyInfo.first;
5239 BaseAddr = CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, Offset);
5242 RegAddr = CGF.Builder.CreateElementBitCast(BaseAddr, MemTy);
5245 CGF.EmitBranch(ContBlock);
5247 //=======================================
5248 // Argument was on the stack
5249 //=======================================
5250 CGF.EmitBlock(OnStackBlock);
5252 Address stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0,
5253 CharUnits::Zero(), "stack_p");
5254 llvm::Value *OnStackPtr = CGF.Builder.CreateLoad(stack_p, "stack");
5256 // Again, stack arguments may need realignment. In this case both integer and
5257 // floating-point ones might be affected.
5258 if (!IsIndirect && TyAlign.getQuantity() > 8) {
5259 int Align = TyAlign.getQuantity();
5261 OnStackPtr = CGF.Builder.CreatePtrToInt(OnStackPtr, CGF.Int64Ty);
5263 OnStackPtr = CGF.Builder.CreateAdd(
5264 OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1),
5266 OnStackPtr = CGF.Builder.CreateAnd(
5267 OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, -Align),
5270 OnStackPtr = CGF.Builder.CreateIntToPtr(OnStackPtr, CGF.Int8PtrTy);
5272 Address OnStackAddr(OnStackPtr,
5273 std::max(CharUnits::fromQuantity(8), TyAlign));
5275 // All stack slots are multiples of 8 bytes.
5276 CharUnits StackSlotSize = CharUnits::fromQuantity(8);
5277 CharUnits StackSize;
5279 StackSize = StackSlotSize;
5281 StackSize = TyInfo.first.alignTo(StackSlotSize);
5283 llvm::Value *StackSizeC = CGF.Builder.getSize(StackSize);
5284 llvm::Value *NewStack =
5285 CGF.Builder.CreateInBoundsGEP(OnStackPtr, StackSizeC, "new_stack");
5287 // Write the new value of __stack for the next call to va_arg
5288 CGF.Builder.CreateStore(NewStack, stack_p);
5290 if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) &&
5291 TyInfo.first < StackSlotSize) {
5292 CharUnits Offset = StackSlotSize - TyInfo.first;
5293 OnStackAddr = CGF.Builder.CreateConstInBoundsByteGEP(OnStackAddr, Offset);
5296 OnStackAddr = CGF.Builder.CreateElementBitCast(OnStackAddr, MemTy);
5298 CGF.EmitBranch(ContBlock);
5300 //=======================================
5302 //=======================================
5303 CGF.EmitBlock(ContBlock);
5305 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock,
5306 OnStackAddr, OnStackBlock, "vaargs.addr");
5309 return Address(CGF.Builder.CreateLoad(ResAddr, "vaarg.addr"),
5315 Address AArch64ABIInfo::EmitDarwinVAArg(Address VAListAddr, QualType Ty,
5316 CodeGenFunction &CGF) const {
5317 // The backend's lowering doesn't support va_arg for aggregates or
5318 // illegal vector types. Lower VAArg here for these cases and use
5319 // the LLVM va_arg instruction for everything else.
5320 if (!isAggregateTypeForABI(Ty) && !isIllegalVectorType(Ty))
5321 return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect());
5323 CharUnits SlotSize = CharUnits::fromQuantity(8);
5325 // Empty records are ignored for parameter passing purposes.
5326 if (isEmptyRecord(getContext(), Ty, true)) {
5327 Address Addr(CGF.Builder.CreateLoad(VAListAddr, "ap.cur"), SlotSize);
5328 Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty));
5332 // The size of the actual thing passed, which might end up just
5333 // being a pointer for indirect types.
5334 auto TyInfo = getContext().getTypeInfoInChars(Ty);
5336 // Arguments bigger than 16 bytes which aren't homogeneous
5337 // aggregates should be passed indirectly.
5338 bool IsIndirect = false;
5339 if (TyInfo.first.getQuantity() > 16) {
5340 const Type *Base = nullptr;
5341 uint64_t Members = 0;
5342 IsIndirect = !isHomogeneousAggregate(Ty, Base, Members);
5345 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
5346 TyInfo, SlotSize, /*AllowHigherAlign*/ true);
5349 Address AArch64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
5350 QualType Ty) const {
5351 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
5352 CGF.getContext().getTypeInfoInChars(Ty),
5353 CharUnits::fromQuantity(8),
5354 /*allowHigherAlign*/ false);
5357 //===----------------------------------------------------------------------===//
5358 // ARM ABI Implementation
5359 //===----------------------------------------------------------------------===//
5363 class ARMABIInfo : public SwiftABIInfo {
5376 ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind)
5377 : SwiftABIInfo(CGT), Kind(_Kind) {
5381 bool isEABI() const {
5382 switch (getTarget().getTriple().getEnvironment()) {
5383 case llvm::Triple::Android:
5384 case llvm::Triple::EABI:
5385 case llvm::Triple::EABIHF:
5386 case llvm::Triple::GNUEABI:
5387 case llvm::Triple::GNUEABIHF:
5388 case llvm::Triple::MuslEABI:
5389 case llvm::Triple::MuslEABIHF:
5396 bool isEABIHF() const {
5397 switch (getTarget().getTriple().getEnvironment()) {
5398 case llvm::Triple::EABIHF:
5399 case llvm::Triple::GNUEABIHF:
5400 case llvm::Triple::MuslEABIHF:
5407 ABIKind getABIKind() const { return Kind; }
5410 ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic) const;
5411 ABIArgInfo classifyArgumentType(QualType RetTy, bool isVariadic) const;
5412 bool isIllegalVectorType(QualType Ty) const;
5414 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
5415 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
5416 uint64_t Members) const override;
5418 void computeInfo(CGFunctionInfo &FI) const override;
5420 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
5421 QualType Ty) const override;
5423 llvm::CallingConv::ID getLLVMDefaultCC() const;
5424 llvm::CallingConv::ID getABIDefaultCC() const;
5427 bool shouldPassIndirectlyForSwift(CharUnits totalSize,
5428 ArrayRef<llvm::Type*> scalars,
5429 bool asReturnValue) const override {
5430 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
5432 bool isSwiftErrorInRegister() const override {
5435 bool isLegalVectorTypeForSwift(CharUnits totalSize, llvm::Type *eltTy,
5436 unsigned elts) const override;
5439 class ARMTargetCodeGenInfo : public TargetCodeGenInfo {
5441 ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
5442 :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {}
5444 const ARMABIInfo &getABIInfo() const {
5445 return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo());
5448 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
5452 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
5453 return "mov\tr7, r7\t\t@ marker for objc_retainAutoreleaseReturnValue";
5456 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
5457 llvm::Value *Address) const override {
5458 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
5460 // 0-15 are the 16 integer registers.
5461 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15);
5465 unsigned getSizeOfUnwindException() const override {
5466 if (getABIInfo().isEABI()) return 88;
5467 return TargetCodeGenInfo::getSizeOfUnwindException();
5470 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5471 CodeGen::CodeGenModule &CGM) const override {
5472 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
5476 const ARMInterruptAttr *Attr = FD->getAttr<ARMInterruptAttr>();
5481 switch (Attr->getInterrupt()) {
5482 case ARMInterruptAttr::Generic: Kind = ""; break;
5483 case ARMInterruptAttr::IRQ: Kind = "IRQ"; break;
5484 case ARMInterruptAttr::FIQ: Kind = "FIQ"; break;
5485 case ARMInterruptAttr::SWI: Kind = "SWI"; break;
5486 case ARMInterruptAttr::ABORT: Kind = "ABORT"; break;
5487 case ARMInterruptAttr::UNDEF: Kind = "UNDEF"; break;
5490 llvm::Function *Fn = cast<llvm::Function>(GV);
5492 Fn->addFnAttr("interrupt", Kind);
5494 ARMABIInfo::ABIKind ABI = cast<ARMABIInfo>(getABIInfo()).getABIKind();
5495 if (ABI == ARMABIInfo::APCS)
5498 // AAPCS guarantees that sp will be 8-byte aligned on any public interface,
5499 // however this is not necessarily true on taking any interrupt. Instruct
5500 // the backend to perform a realignment as part of the function prologue.
5501 llvm::AttrBuilder B;
5502 B.addStackAlignmentAttr(8);
5503 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
5507 class WindowsARMTargetCodeGenInfo : public ARMTargetCodeGenInfo {
5509 WindowsARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
5510 : ARMTargetCodeGenInfo(CGT, K) {}
5512 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5513 CodeGen::CodeGenModule &CGM) const override;
5515 void getDependentLibraryOption(llvm::StringRef Lib,
5516 llvm::SmallString<24> &Opt) const override {
5517 Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
5520 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value,
5521 llvm::SmallString<32> &Opt) const override {
5522 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
5526 void WindowsARMTargetCodeGenInfo::setTargetAttributes(
5527 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
5528 ARMTargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
5529 addStackProbeSizeTargetAttribute(D, GV, CGM);
5533 void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
5534 if (!getCXXABI().classifyReturnType(FI))
5535 FI.getReturnInfo() =
5536 classifyReturnType(FI.getReturnType(), FI.isVariadic());
5538 for (auto &I : FI.arguments())
5539 I.info = classifyArgumentType(I.type, FI.isVariadic());
5541 // Always honor user-specified calling convention.
5542 if (FI.getCallingConvention() != llvm::CallingConv::C)
5545 llvm::CallingConv::ID cc = getRuntimeCC();
5546 if (cc != llvm::CallingConv::C)
5547 FI.setEffectiveCallingConvention(cc);
5550 /// Return the default calling convention that LLVM will use.
5551 llvm::CallingConv::ID ARMABIInfo::getLLVMDefaultCC() const {
5552 // The default calling convention that LLVM will infer.
5553 if (isEABIHF() || getTarget().getTriple().isWatchABI())
5554 return llvm::CallingConv::ARM_AAPCS_VFP;
5556 return llvm::CallingConv::ARM_AAPCS;
5558 return llvm::CallingConv::ARM_APCS;
5561 /// Return the calling convention that our ABI would like us to use
5562 /// as the C calling convention.
5563 llvm::CallingConv::ID ARMABIInfo::getABIDefaultCC() const {
5564 switch (getABIKind()) {
5565 case APCS: return llvm::CallingConv::ARM_APCS;
5566 case AAPCS: return llvm::CallingConv::ARM_AAPCS;
5567 case AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
5568 case AAPCS16_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
5570 llvm_unreachable("bad ABI kind");
5573 void ARMABIInfo::setCCs() {
5574 assert(getRuntimeCC() == llvm::CallingConv::C);
5576 // Don't muddy up the IR with a ton of explicit annotations if
5577 // they'd just match what LLVM will infer from the triple.
5578 llvm::CallingConv::ID abiCC = getABIDefaultCC();
5579 if (abiCC != getLLVMDefaultCC())
5582 // AAPCS apparently requires runtime support functions to be soft-float, but
5583 // that's almost certainly for historic reasons (Thumb1 not supporting VFP
5584 // most likely). It's more convenient for AAPCS16_VFP to be hard-float.
5586 // The Run-time ABI for the ARM Architecture section 4.1.2 requires
5587 // AEABI-complying FP helper functions to use the base AAPCS.
5588 // These AEABI functions are expanded in the ARM llvm backend, all the builtin
5589 // support functions emitted by clang such as the _Complex helpers follow the
5591 if (abiCC != getLLVMDefaultCC())
5595 ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty,
5596 bool isVariadic) const {
5597 // 6.1.2.1 The following argument types are VFP CPRCs:
5598 // A single-precision floating-point type (including promoted
5599 // half-precision types); A double-precision floating-point type;
5600 // A 64-bit or 128-bit containerized vector type; Homogeneous Aggregate
5601 // with a Base Type of a single- or double-precision floating-point type,
5602 // 64-bit containerized vectors or 128-bit containerized vectors with one
5603 // to four Elements.
5604 bool IsEffectivelyAAPCS_VFP = getABIKind() == AAPCS_VFP && !isVariadic;
5606 Ty = useFirstFieldIfTransparentUnion(Ty);
5608 // Handle illegal vector types here.
5609 if (isIllegalVectorType(Ty)) {
5610 uint64_t Size = getContext().getTypeSize(Ty);
5612 llvm::Type *ResType =
5613 llvm::Type::getInt32Ty(getVMContext());
5614 return ABIArgInfo::getDirect(ResType);
5617 llvm::Type *ResType = llvm::VectorType::get(
5618 llvm::Type::getInt32Ty(getVMContext()), 2);
5619 return ABIArgInfo::getDirect(ResType);
5622 llvm::Type *ResType = llvm::VectorType::get(
5623 llvm::Type::getInt32Ty(getVMContext()), 4);
5624 return ABIArgInfo::getDirect(ResType);
5626 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
5629 // __fp16 gets passed as if it were an int or float, but with the top 16 bits
5630 // unspecified. This is not done for OpenCL as it handles the half type
5631 // natively, and does not need to interwork with AAPCS code.
5632 if (Ty->isHalfType() && !getContext().getLangOpts().NativeHalfArgsAndReturns) {
5633 llvm::Type *ResType = IsEffectivelyAAPCS_VFP ?
5634 llvm::Type::getFloatTy(getVMContext()) :
5635 llvm::Type::getInt32Ty(getVMContext());
5636 return ABIArgInfo::getDirect(ResType);
5639 if (!isAggregateTypeForABI(Ty)) {
5640 // Treat an enum type as its underlying type.
5641 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
5642 Ty = EnumTy->getDecl()->getIntegerType();
5645 return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend()
5646 : ABIArgInfo::getDirect());
5649 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
5650 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
5653 // Ignore empty records.
5654 if (isEmptyRecord(getContext(), Ty, true))
5655 return ABIArgInfo::getIgnore();
5657 if (IsEffectivelyAAPCS_VFP) {
5658 // Homogeneous Aggregates need to be expanded when we can fit the aggregate
5659 // into VFP registers.
5660 const Type *Base = nullptr;
5661 uint64_t Members = 0;
5662 if (isHomogeneousAggregate(Ty, Base, Members)) {
5663 assert(Base && "Base class should be set for homogeneous aggregate");
5664 // Base can be a floating-point or a vector.
5665 return ABIArgInfo::getDirect(nullptr, 0, nullptr, false);
5667 } else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) {
5668 // WatchOS does have homogeneous aggregates. Note that we intentionally use
5669 // this convention even for a variadic function: the backend will use GPRs
5671 const Type *Base = nullptr;
5672 uint64_t Members = 0;
5673 if (isHomogeneousAggregate(Ty, Base, Members)) {
5674 assert(Base && Members <= 4 && "unexpected homogeneous aggregate");
5676 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members);
5677 return ABIArgInfo::getDirect(Ty, 0, nullptr, false);
5681 if (getABIKind() == ARMABIInfo::AAPCS16_VFP &&
5682 getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(16)) {
5683 // WatchOS is adopting the 64-bit AAPCS rule on composite types: if they're
5684 // bigger than 128-bits, they get placed in space allocated by the caller,
5685 // and a pointer is passed.
5686 return ABIArgInfo::getIndirect(
5687 CharUnits::fromQuantity(getContext().getTypeAlign(Ty) / 8), false);
5690 // Support byval for ARM.
5691 // The ABI alignment for APCS is 4-byte and for AAPCS at least 4-byte and at
5692 // most 8-byte. We realign the indirect argument if type alignment is bigger
5693 // than ABI alignment.
5694 uint64_t ABIAlign = 4;
5695 uint64_t TyAlign = getContext().getTypeAlign(Ty) / 8;
5696 if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
5697 getABIKind() == ARMABIInfo::AAPCS)
5698 ABIAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8);
5700 if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) {
5701 assert(getABIKind() != ARMABIInfo::AAPCS16_VFP && "unexpected byval");
5702 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign),
5704 /*Realign=*/TyAlign > ABIAlign);
5707 // On RenderScript, coerce Aggregates <= 64 bytes to an integer array of
5708 // same size and alignment.
5709 if (getTarget().isRenderScriptTarget()) {
5710 return coerceToIntArray(Ty, getContext(), getVMContext());
5713 // Otherwise, pass by coercing to a structure of the appropriate size.
5716 // FIXME: Try to match the types of the arguments more accurately where
5718 if (getContext().getTypeAlign(Ty) <= 32) {
5719 ElemTy = llvm::Type::getInt32Ty(getVMContext());
5720 SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32;
5722 ElemTy = llvm::Type::getInt64Ty(getVMContext());
5723 SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64;
5726 return ABIArgInfo::getDirect(llvm::ArrayType::get(ElemTy, SizeRegs));
5729 static bool isIntegerLikeType(QualType Ty, ASTContext &Context,
5730 llvm::LLVMContext &VMContext) {
5731 // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure
5732 // is called integer-like if its size is less than or equal to one word, and
5733 // the offset of each of its addressable sub-fields is zero.
5735 uint64_t Size = Context.getTypeSize(Ty);
5737 // Check that the type fits in a word.
5741 // FIXME: Handle vector types!
5742 if (Ty->isVectorType())
5745 // Float types are never treated as "integer like".
5746 if (Ty->isRealFloatingType())
5749 // If this is a builtin or pointer type then it is ok.
5750 if (Ty->getAs<BuiltinType>() || Ty->isPointerType())
5753 // Small complex integer types are "integer like".
5754 if (const ComplexType *CT = Ty->getAs<ComplexType>())
5755 return isIntegerLikeType(CT->getElementType(), Context, VMContext);
5757 // Single element and zero sized arrays should be allowed, by the definition
5758 // above, but they are not.
5760 // Otherwise, it must be a record type.
5761 const RecordType *RT = Ty->getAs<RecordType>();
5762 if (!RT) return false;
5764 // Ignore records with flexible arrays.
5765 const RecordDecl *RD = RT->getDecl();
5766 if (RD->hasFlexibleArrayMember())
5769 // Check that all sub-fields are at offset 0, and are themselves "integer
5771 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
5773 bool HadField = false;
5775 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
5776 i != e; ++i, ++idx) {
5777 const FieldDecl *FD = *i;
5779 // Bit-fields are not addressable, we only need to verify they are "integer
5780 // like". We still have to disallow a subsequent non-bitfield, for example:
5781 // struct { int : 0; int x }
5782 // is non-integer like according to gcc.
5783 if (FD->isBitField()) {
5787 if (!isIntegerLikeType(FD->getType(), Context, VMContext))
5793 // Check if this field is at offset 0.
5794 if (Layout.getFieldOffset(idx) != 0)
5797 if (!isIntegerLikeType(FD->getType(), Context, VMContext))
5800 // Only allow at most one field in a structure. This doesn't match the
5801 // wording above, but follows gcc in situations with a field following an
5803 if (!RD->isUnion()) {
5814 ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
5815 bool isVariadic) const {
5816 bool IsEffectivelyAAPCS_VFP =
5817 (getABIKind() == AAPCS_VFP || getABIKind() == AAPCS16_VFP) && !isVariadic;
5819 if (RetTy->isVoidType())
5820 return ABIArgInfo::getIgnore();
5822 // Large vector types should be returned via memory.
5823 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) {
5824 return getNaturalAlignIndirect(RetTy);
5827 // __fp16 gets returned as if it were an int or float, but with the top 16
5828 // bits unspecified. This is not done for OpenCL as it handles the half type
5829 // natively, and does not need to interwork with AAPCS code.
5830 if (RetTy->isHalfType() && !getContext().getLangOpts().NativeHalfArgsAndReturns) {
5831 llvm::Type *ResType = IsEffectivelyAAPCS_VFP ?
5832 llvm::Type::getFloatTy(getVMContext()) :
5833 llvm::Type::getInt32Ty(getVMContext());
5834 return ABIArgInfo::getDirect(ResType);
5837 if (!isAggregateTypeForABI(RetTy)) {
5838 // Treat an enum type as its underlying type.
5839 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
5840 RetTy = EnumTy->getDecl()->getIntegerType();
5842 return RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend()
5843 : ABIArgInfo::getDirect();
5846 // Are we following APCS?
5847 if (getABIKind() == APCS) {
5848 if (isEmptyRecord(getContext(), RetTy, false))
5849 return ABIArgInfo::getIgnore();
5851 // Complex types are all returned as packed integers.
5853 // FIXME: Consider using 2 x vector types if the back end handles them
5855 if (RetTy->isAnyComplexType())
5856 return ABIArgInfo::getDirect(llvm::IntegerType::get(
5857 getVMContext(), getContext().getTypeSize(RetTy)));
5859 // Integer like structures are returned in r0.
5860 if (isIntegerLikeType(RetTy, getContext(), getVMContext())) {
5861 // Return in the smallest viable integer type.
5862 uint64_t Size = getContext().getTypeSize(RetTy);
5864 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
5866 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
5867 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
5870 // Otherwise return in memory.
5871 return getNaturalAlignIndirect(RetTy);
5874 // Otherwise this is an AAPCS variant.
5876 if (isEmptyRecord(getContext(), RetTy, true))
5877 return ABIArgInfo::getIgnore();
5879 // Check for homogeneous aggregates with AAPCS-VFP.
5880 if (IsEffectivelyAAPCS_VFP) {
5881 const Type *Base = nullptr;
5882 uint64_t Members = 0;
5883 if (isHomogeneousAggregate(RetTy, Base, Members)) {
5884 assert(Base && "Base class should be set for homogeneous aggregate");
5885 // Homogeneous Aggregates are returned directly.
5886 return ABIArgInfo::getDirect(nullptr, 0, nullptr, false);
5890 // Aggregates <= 4 bytes are returned in r0; other aggregates
5891 // are returned indirectly.
5892 uint64_t Size = getContext().getTypeSize(RetTy);
5894 // On RenderScript, coerce Aggregates <= 4 bytes to an integer array of
5895 // same size and alignment.
5896 if (getTarget().isRenderScriptTarget()) {
5897 return coerceToIntArray(RetTy, getContext(), getVMContext());
5899 if (getDataLayout().isBigEndian())
5900 // Return in 32 bit integer integer type (as if loaded by LDR, AAPCS 5.4)
5901 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
5903 // Return in the smallest viable integer type.
5905 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
5907 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
5908 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
5909 } else if (Size <= 128 && getABIKind() == AAPCS16_VFP) {
5910 llvm::Type *Int32Ty = llvm::Type::getInt32Ty(getVMContext());
5911 llvm::Type *CoerceTy =
5912 llvm::ArrayType::get(Int32Ty, llvm::alignTo(Size, 32) / 32);
5913 return ABIArgInfo::getDirect(CoerceTy);
5916 return getNaturalAlignIndirect(RetTy);
5919 /// isIllegalVector - check whether Ty is an illegal vector type.
5920 bool ARMABIInfo::isIllegalVectorType(QualType Ty) const {
5921 if (const VectorType *VT = Ty->getAs<VectorType> ()) {
5923 // Android shipped using Clang 3.1, which supported a slightly different
5924 // vector ABI. The primary differences were that 3-element vector types
5925 // were legal, and so were sub 32-bit vectors (i.e. <2 x i8>). This path
5926 // accepts that legacy behavior for Android only.
5927 // Check whether VT is legal.
5928 unsigned NumElements = VT->getNumElements();
5929 // NumElements should be power of 2 or equal to 3.
5930 if (!llvm::isPowerOf2_32(NumElements) && NumElements != 3)
5933 // Check whether VT is legal.
5934 unsigned NumElements = VT->getNumElements();
5935 uint64_t Size = getContext().getTypeSize(VT);
5936 // NumElements should be power of 2.
5937 if (!llvm::isPowerOf2_32(NumElements))
5939 // Size should be greater than 32 bits.
5946 bool ARMABIInfo::isLegalVectorTypeForSwift(CharUnits vectorSize,
5948 unsigned numElts) const {
5949 if (!llvm::isPowerOf2_32(numElts))
5951 unsigned size = getDataLayout().getTypeStoreSizeInBits(eltTy);
5954 if (vectorSize.getQuantity() != 8 &&
5955 (vectorSize.getQuantity() != 16 || numElts == 1))
5960 bool ARMABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
5961 // Homogeneous aggregates for AAPCS-VFP must have base types of float,
5962 // double, or 64-bit or 128-bit vectors.
5963 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
5964 if (BT->getKind() == BuiltinType::Float ||
5965 BT->getKind() == BuiltinType::Double ||
5966 BT->getKind() == BuiltinType::LongDouble)
5968 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
5969 unsigned VecSize = getContext().getTypeSize(VT);
5970 if (VecSize == 64 || VecSize == 128)
5976 bool ARMABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
5977 uint64_t Members) const {
5978 return Members <= 4;
5981 Address ARMABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
5982 QualType Ty) const {
5983 CharUnits SlotSize = CharUnits::fromQuantity(4);
5985 // Empty records are ignored for parameter passing purposes.
5986 if (isEmptyRecord(getContext(), Ty, true)) {
5987 Address Addr(CGF.Builder.CreateLoad(VAListAddr), SlotSize);
5988 Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty));
5992 auto TyInfo = getContext().getTypeInfoInChars(Ty);
5993 CharUnits TyAlignForABI = TyInfo.second;
5995 // Use indirect if size of the illegal vector is bigger than 16 bytes.
5996 bool IsIndirect = false;
5997 const Type *Base = nullptr;
5998 uint64_t Members = 0;
5999 if (TyInfo.first > CharUnits::fromQuantity(16) && isIllegalVectorType(Ty)) {
6002 // ARMv7k passes structs bigger than 16 bytes indirectly, in space
6003 // allocated by the caller.
6004 } else if (TyInfo.first > CharUnits::fromQuantity(16) &&
6005 getABIKind() == ARMABIInfo::AAPCS16_VFP &&
6006 !isHomogeneousAggregate(Ty, Base, Members)) {
6009 // Otherwise, bound the type's ABI alignment.
6010 // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for
6011 // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte.
6012 // Our callers should be prepared to handle an under-aligned address.
6013 } else if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
6014 getABIKind() == ARMABIInfo::AAPCS) {
6015 TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4));
6016 TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(8));
6017 } else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) {
6018 // ARMv7k allows type alignment up to 16 bytes.
6019 TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4));
6020 TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(16));
6022 TyAlignForABI = CharUnits::fromQuantity(4);
6024 TyInfo.second = TyAlignForABI;
6026 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TyInfo,
6027 SlotSize, /*AllowHigherAlign*/ true);
6030 //===----------------------------------------------------------------------===//
6031 // NVPTX ABI Implementation
6032 //===----------------------------------------------------------------------===//
6036 class NVPTXABIInfo : public ABIInfo {
6038 NVPTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
6040 ABIArgInfo classifyReturnType(QualType RetTy) const;
6041 ABIArgInfo classifyArgumentType(QualType Ty) const;
6043 void computeInfo(CGFunctionInfo &FI) const override;
6044 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6045 QualType Ty) const override;
6048 class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo {
6050 NVPTXTargetCodeGenInfo(CodeGenTypes &CGT)
6051 : TargetCodeGenInfo(new NVPTXABIInfo(CGT)) {}
6053 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
6054 CodeGen::CodeGenModule &M) const override;
6056 // Adds a NamedMDNode with F, Name, and Operand as operands, and adds the
6057 // resulting MDNode to the nvvm.annotations MDNode.
6058 static void addNVVMMetadata(llvm::Function *F, StringRef Name, int Operand);
6061 ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const {
6062 if (RetTy->isVoidType())
6063 return ABIArgInfo::getIgnore();
6065 // note: this is different from default ABI
6066 if (!RetTy->isScalarType())
6067 return ABIArgInfo::getDirect();
6069 // Treat an enum type as its underlying type.
6070 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
6071 RetTy = EnumTy->getDecl()->getIntegerType();
6073 return (RetTy->isPromotableIntegerType() ?
6074 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
6077 ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const {
6078 // Treat an enum type as its underlying type.
6079 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
6080 Ty = EnumTy->getDecl()->getIntegerType();
6082 // Return aggregates type as indirect by value
6083 if (isAggregateTypeForABI(Ty))
6084 return getNaturalAlignIndirect(Ty, /* byval */ true);
6086 return (Ty->isPromotableIntegerType() ?
6087 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
6090 void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
6091 if (!getCXXABI().classifyReturnType(FI))
6092 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
6093 for (auto &I : FI.arguments())
6094 I.info = classifyArgumentType(I.type);
6096 // Always honor user-specified calling convention.
6097 if (FI.getCallingConvention() != llvm::CallingConv::C)
6100 FI.setEffectiveCallingConvention(getRuntimeCC());
6103 Address NVPTXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6104 QualType Ty) const {
6105 llvm_unreachable("NVPTX does not support varargs");
6108 void NVPTXTargetCodeGenInfo::
6109 setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
6110 CodeGen::CodeGenModule &M) const{
6111 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
6114 llvm::Function *F = cast<llvm::Function>(GV);
6116 // Perform special handling in OpenCL mode
6117 if (M.getLangOpts().OpenCL) {
6118 // Use OpenCL function attributes to check for kernel functions
6119 // By default, all functions are device functions
6120 if (FD->hasAttr<OpenCLKernelAttr>()) {
6121 // OpenCL __kernel functions get kernel metadata
6122 // Create !{<func-ref>, metadata !"kernel", i32 1} node
6123 addNVVMMetadata(F, "kernel", 1);
6124 // And kernel functions are not subject to inlining
6125 F->addFnAttr(llvm::Attribute::NoInline);
6129 // Perform special handling in CUDA mode.
6130 if (M.getLangOpts().CUDA) {
6131 // CUDA __global__ functions get a kernel metadata entry. Since
6132 // __global__ functions cannot be called from the device, we do not
6133 // need to set the noinline attribute.
6134 if (FD->hasAttr<CUDAGlobalAttr>()) {
6135 // Create !{<func-ref>, metadata !"kernel", i32 1} node
6136 addNVVMMetadata(F, "kernel", 1);
6138 if (CUDALaunchBoundsAttr *Attr = FD->getAttr<CUDALaunchBoundsAttr>()) {
6139 // Create !{<func-ref>, metadata !"maxntidx", i32 <val>} node
6140 llvm::APSInt MaxThreads(32);
6141 MaxThreads = Attr->getMaxThreads()->EvaluateKnownConstInt(M.getContext());
6143 addNVVMMetadata(F, "maxntidx", MaxThreads.getExtValue());
6145 // min blocks is an optional argument for CUDALaunchBoundsAttr. If it was
6146 // not specified in __launch_bounds__ or if the user specified a 0 value,
6147 // we don't have to add a PTX directive.
6148 if (Attr->getMinBlocks()) {
6149 llvm::APSInt MinBlocks(32);
6150 MinBlocks = Attr->getMinBlocks()->EvaluateKnownConstInt(M.getContext());
6152 // Create !{<func-ref>, metadata !"minctasm", i32 <val>} node
6153 addNVVMMetadata(F, "minctasm", MinBlocks.getExtValue());
6159 void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::Function *F, StringRef Name,
6161 llvm::Module *M = F->getParent();
6162 llvm::LLVMContext &Ctx = M->getContext();
6164 // Get "nvvm.annotations" metadata node
6165 llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations");
6167 llvm::Metadata *MDVals[] = {
6168 llvm::ConstantAsMetadata::get(F), llvm::MDString::get(Ctx, Name),
6169 llvm::ConstantAsMetadata::get(
6170 llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), Operand))};
6171 // Append metadata to nvvm.annotations
6172 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
6176 //===----------------------------------------------------------------------===//
6177 // SystemZ ABI Implementation
6178 //===----------------------------------------------------------------------===//
6182 class SystemZABIInfo : public SwiftABIInfo {
6186 SystemZABIInfo(CodeGenTypes &CGT, bool HV)
6187 : SwiftABIInfo(CGT), HasVector(HV) {}
6189 bool isPromotableIntegerType(QualType Ty) const;
6190 bool isCompoundType(QualType Ty) const;
6191 bool isVectorArgumentType(QualType Ty) const;
6192 bool isFPArgumentType(QualType Ty) const;
6193 QualType GetSingleElementType(QualType Ty) const;
6195 ABIArgInfo classifyReturnType(QualType RetTy) const;
6196 ABIArgInfo classifyArgumentType(QualType ArgTy) const;
6198 void computeInfo(CGFunctionInfo &FI) const override {
6199 if (!getCXXABI().classifyReturnType(FI))
6200 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
6201 for (auto &I : FI.arguments())
6202 I.info = classifyArgumentType(I.type);
6205 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6206 QualType Ty) const override;
6208 bool shouldPassIndirectlyForSwift(CharUnits totalSize,
6209 ArrayRef<llvm::Type*> scalars,
6210 bool asReturnValue) const override {
6211 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
6213 bool isSwiftErrorInRegister() const override {
6218 class SystemZTargetCodeGenInfo : public TargetCodeGenInfo {
6220 SystemZTargetCodeGenInfo(CodeGenTypes &CGT, bool HasVector)
6221 : TargetCodeGenInfo(new SystemZABIInfo(CGT, HasVector)) {}
6226 bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const {
6227 // Treat an enum type as its underlying type.
6228 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
6229 Ty = EnumTy->getDecl()->getIntegerType();
6231 // Promotable integer types are required to be promoted by the ABI.
6232 if (Ty->isPromotableIntegerType())
6235 // 32-bit values must also be promoted.
6236 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
6237 switch (BT->getKind()) {
6238 case BuiltinType::Int:
6239 case BuiltinType::UInt:
6247 bool SystemZABIInfo::isCompoundType(QualType Ty) const {
6248 return (Ty->isAnyComplexType() ||
6249 Ty->isVectorType() ||
6250 isAggregateTypeForABI(Ty));
6253 bool SystemZABIInfo::isVectorArgumentType(QualType Ty) const {
6254 return (HasVector &&
6255 Ty->isVectorType() &&
6256 getContext().getTypeSize(Ty) <= 128);
6259 bool SystemZABIInfo::isFPArgumentType(QualType Ty) const {
6260 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
6261 switch (BT->getKind()) {
6262 case BuiltinType::Float:
6263 case BuiltinType::Double:
6272 QualType SystemZABIInfo::GetSingleElementType(QualType Ty) const {
6273 if (const RecordType *RT = Ty->getAsStructureType()) {
6274 const RecordDecl *RD = RT->getDecl();
6277 // If this is a C++ record, check the bases first.
6278 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
6279 for (const auto &I : CXXRD->bases()) {
6280 QualType Base = I.getType();
6282 // Empty bases don't affect things either way.
6283 if (isEmptyRecord(getContext(), Base, true))
6286 if (!Found.isNull())
6288 Found = GetSingleElementType(Base);
6291 // Check the fields.
6292 for (const auto *FD : RD->fields()) {
6293 // For compatibility with GCC, ignore empty bitfields in C++ mode.
6294 // Unlike isSingleElementStruct(), empty structure and array fields
6295 // do count. So do anonymous bitfields that aren't zero-sized.
6296 if (getContext().getLangOpts().CPlusPlus &&
6297 FD->isBitField() && FD->getBitWidthValue(getContext()) == 0)
6300 // Unlike isSingleElementStruct(), arrays do not count.
6301 // Nested structures still do though.
6302 if (!Found.isNull())
6304 Found = GetSingleElementType(FD->getType());
6307 // Unlike isSingleElementStruct(), trailing padding is allowed.
6308 // An 8-byte aligned struct s { float f; } is passed as a double.
6309 if (!Found.isNull())
6316 Address SystemZABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6317 QualType Ty) const {
6318 // Assume that va_list type is correct; should be pointer to LLVM type:
6322 // i8 *__overflow_arg_area;
6323 // i8 *__reg_save_area;
6326 // Every non-vector argument occupies 8 bytes and is passed by preference
6327 // in either GPRs or FPRs. Vector arguments occupy 8 or 16 bytes and are
6328 // always passed on the stack.
6329 Ty = getContext().getCanonicalType(Ty);
6330 auto TyInfo = getContext().getTypeInfoInChars(Ty);
6331 llvm::Type *ArgTy = CGF.ConvertTypeForMem(Ty);
6332 llvm::Type *DirectTy = ArgTy;
6333 ABIArgInfo AI = classifyArgumentType(Ty);
6334 bool IsIndirect = AI.isIndirect();
6335 bool InFPRs = false;
6336 bool IsVector = false;
6337 CharUnits UnpaddedSize;
6338 CharUnits DirectAlign;
6340 DirectTy = llvm::PointerType::getUnqual(DirectTy);
6341 UnpaddedSize = DirectAlign = CharUnits::fromQuantity(8);
6343 if (AI.getCoerceToType())
6344 ArgTy = AI.getCoerceToType();
6345 InFPRs = ArgTy->isFloatTy() || ArgTy->isDoubleTy();
6346 IsVector = ArgTy->isVectorTy();
6347 UnpaddedSize = TyInfo.first;
6348 DirectAlign = TyInfo.second;
6350 CharUnits PaddedSize = CharUnits::fromQuantity(8);
6351 if (IsVector && UnpaddedSize > PaddedSize)
6352 PaddedSize = CharUnits::fromQuantity(16);
6353 assert((UnpaddedSize <= PaddedSize) && "Invalid argument size.");
6355 CharUnits Padding = (PaddedSize - UnpaddedSize);
6357 llvm::Type *IndexTy = CGF.Int64Ty;
6358 llvm::Value *PaddedSizeV =
6359 llvm::ConstantInt::get(IndexTy, PaddedSize.getQuantity());
6362 // Work out the address of a vector argument on the stack.
6363 // Vector arguments are always passed in the high bits of a
6364 // single (8 byte) or double (16 byte) stack slot.
6365 Address OverflowArgAreaPtr =
6366 CGF.Builder.CreateStructGEP(VAListAddr, 2, CharUnits::fromQuantity(16),
6367 "overflow_arg_area_ptr");
6368 Address OverflowArgArea =
6369 Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"),
6372 CGF.Builder.CreateElementBitCast(OverflowArgArea, DirectTy, "mem_addr");
6374 // Update overflow_arg_area_ptr pointer
6375 llvm::Value *NewOverflowArgArea =
6376 CGF.Builder.CreateGEP(OverflowArgArea.getPointer(), PaddedSizeV,
6377 "overflow_arg_area");
6378 CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
6383 assert(PaddedSize.getQuantity() == 8);
6385 unsigned MaxRegs, RegCountField, RegSaveIndex;
6386 CharUnits RegPadding;
6388 MaxRegs = 4; // Maximum of 4 FPR arguments
6389 RegCountField = 1; // __fpr
6390 RegSaveIndex = 16; // save offset for f0
6391 RegPadding = CharUnits(); // floats are passed in the high bits of an FPR
6393 MaxRegs = 5; // Maximum of 5 GPR arguments
6394 RegCountField = 0; // __gpr
6395 RegSaveIndex = 2; // save offset for r2
6396 RegPadding = Padding; // values are passed in the low bits of a GPR
6399 Address RegCountPtr = CGF.Builder.CreateStructGEP(
6400 VAListAddr, RegCountField, RegCountField * CharUnits::fromQuantity(8),
6402 llvm::Value *RegCount = CGF.Builder.CreateLoad(RegCountPtr, "reg_count");
6403 llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs);
6404 llvm::Value *InRegs = CGF.Builder.CreateICmpULT(RegCount, MaxRegsV,
6407 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
6408 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
6409 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
6410 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
6412 // Emit code to load the value if it was passed in registers.
6413 CGF.EmitBlock(InRegBlock);
6415 // Work out the address of an argument register.
6416 llvm::Value *ScaledRegCount =
6417 CGF.Builder.CreateMul(RegCount, PaddedSizeV, "scaled_reg_count");
6418 llvm::Value *RegBase =
6419 llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize.getQuantity()
6420 + RegPadding.getQuantity());
6421 llvm::Value *RegOffset =
6422 CGF.Builder.CreateAdd(ScaledRegCount, RegBase, "reg_offset");
6423 Address RegSaveAreaPtr =
6424 CGF.Builder.CreateStructGEP(VAListAddr, 3, CharUnits::fromQuantity(24),
6425 "reg_save_area_ptr");
6426 llvm::Value *RegSaveArea =
6427 CGF.Builder.CreateLoad(RegSaveAreaPtr, "reg_save_area");
6428 Address RawRegAddr(CGF.Builder.CreateGEP(RegSaveArea, RegOffset,
6432 CGF.Builder.CreateElementBitCast(RawRegAddr, DirectTy, "reg_addr");
6434 // Update the register count
6435 llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1);
6436 llvm::Value *NewRegCount =
6437 CGF.Builder.CreateAdd(RegCount, One, "reg_count");
6438 CGF.Builder.CreateStore(NewRegCount, RegCountPtr);
6439 CGF.EmitBranch(ContBlock);
6441 // Emit code to load the value if it was passed in memory.
6442 CGF.EmitBlock(InMemBlock);
6444 // Work out the address of a stack argument.
6445 Address OverflowArgAreaPtr = CGF.Builder.CreateStructGEP(
6446 VAListAddr, 2, CharUnits::fromQuantity(16), "overflow_arg_area_ptr");
6447 Address OverflowArgArea =
6448 Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"),
6450 Address RawMemAddr =
6451 CGF.Builder.CreateConstByteGEP(OverflowArgArea, Padding, "raw_mem_addr");
6453 CGF.Builder.CreateElementBitCast(RawMemAddr, DirectTy, "mem_addr");
6455 // Update overflow_arg_area_ptr pointer
6456 llvm::Value *NewOverflowArgArea =
6457 CGF.Builder.CreateGEP(OverflowArgArea.getPointer(), PaddedSizeV,
6458 "overflow_arg_area");
6459 CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
6460 CGF.EmitBranch(ContBlock);
6462 // Return the appropriate result.
6463 CGF.EmitBlock(ContBlock);
6464 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock,
6465 MemAddr, InMemBlock, "va_arg.addr");
6468 ResAddr = Address(CGF.Builder.CreateLoad(ResAddr, "indirect_arg"),
6474 ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const {
6475 if (RetTy->isVoidType())
6476 return ABIArgInfo::getIgnore();
6477 if (isVectorArgumentType(RetTy))
6478 return ABIArgInfo::getDirect();
6479 if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64)
6480 return getNaturalAlignIndirect(RetTy);
6481 return (isPromotableIntegerType(RetTy) ?
6482 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
6485 ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
6486 // Handle the generic C++ ABI.
6487 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
6488 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
6490 // Integers and enums are extended to full register width.
6491 if (isPromotableIntegerType(Ty))
6492 return ABIArgInfo::getExtend();
6494 // Handle vector types and vector-like structure types. Note that
6495 // as opposed to float-like structure types, we do not allow any
6496 // padding for vector-like structures, so verify the sizes match.
6497 uint64_t Size = getContext().getTypeSize(Ty);
6498 QualType SingleElementTy = GetSingleElementType(Ty);
6499 if (isVectorArgumentType(SingleElementTy) &&
6500 getContext().getTypeSize(SingleElementTy) == Size)
6501 return ABIArgInfo::getDirect(CGT.ConvertType(SingleElementTy));
6503 // Values that are not 1, 2, 4 or 8 bytes in size are passed indirectly.
6504 if (Size != 8 && Size != 16 && Size != 32 && Size != 64)
6505 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
6507 // Handle small structures.
6508 if (const RecordType *RT = Ty->getAs<RecordType>()) {
6509 // Structures with flexible arrays have variable length, so really
6510 // fail the size test above.
6511 const RecordDecl *RD = RT->getDecl();
6512 if (RD->hasFlexibleArrayMember())
6513 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
6515 // The structure is passed as an unextended integer, a float, or a double.
6517 if (isFPArgumentType(SingleElementTy)) {
6518 assert(Size == 32 || Size == 64);
6520 PassTy = llvm::Type::getFloatTy(getVMContext());
6522 PassTy = llvm::Type::getDoubleTy(getVMContext());
6524 PassTy = llvm::IntegerType::get(getVMContext(), Size);
6525 return ABIArgInfo::getDirect(PassTy);
6528 // Non-structure compounds are passed indirectly.
6529 if (isCompoundType(Ty))
6530 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
6532 return ABIArgInfo::getDirect(nullptr);
6535 //===----------------------------------------------------------------------===//
6536 // MSP430 ABI Implementation
6537 //===----------------------------------------------------------------------===//
6541 class MSP430TargetCodeGenInfo : public TargetCodeGenInfo {
6543 MSP430TargetCodeGenInfo(CodeGenTypes &CGT)
6544 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
6545 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
6546 CodeGen::CodeGenModule &M) const override;
6551 void MSP430TargetCodeGenInfo::setTargetAttributes(const Decl *D,
6552 llvm::GlobalValue *GV,
6553 CodeGen::CodeGenModule &M) const {
6554 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
6555 if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) {
6556 // Handle 'interrupt' attribute:
6557 llvm::Function *F = cast<llvm::Function>(GV);
6559 // Step 1: Set ISR calling convention.
6560 F->setCallingConv(llvm::CallingConv::MSP430_INTR);
6562 // Step 2: Add attributes goodness.
6563 F->addFnAttr(llvm::Attribute::NoInline);
6565 // Step 3: Emit ISR vector alias.
6566 unsigned Num = attr->getNumber() / 2;
6567 llvm::GlobalAlias::create(llvm::Function::ExternalLinkage,
6568 "__isr_" + Twine(Num), F);
6573 //===----------------------------------------------------------------------===//
6574 // MIPS ABI Implementation. This works for both little-endian and
6575 // big-endian variants.
6576 //===----------------------------------------------------------------------===//
6579 class MipsABIInfo : public ABIInfo {
6581 unsigned MinABIStackAlignInBytes, StackAlignInBytes;
6582 void CoerceToIntArgs(uint64_t TySize,
6583 SmallVectorImpl<llvm::Type *> &ArgList) const;
6584 llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const;
6585 llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const;
6586 llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const;
6588 MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) :
6589 ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8),
6590 StackAlignInBytes(IsO32 ? 8 : 16) {}
6592 ABIArgInfo classifyReturnType(QualType RetTy) const;
6593 ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const;
6594 void computeInfo(CGFunctionInfo &FI) const override;
6595 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6596 QualType Ty) const override;
6597 bool shouldSignExtUnsignedType(QualType Ty) const override;
6600 class MIPSTargetCodeGenInfo : public TargetCodeGenInfo {
6601 unsigned SizeOfUnwindException;
6603 MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32)
6604 : TargetCodeGenInfo(new MipsABIInfo(CGT, IsO32)),
6605 SizeOfUnwindException(IsO32 ? 24 : 32) {}
6607 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
6611 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
6612 CodeGen::CodeGenModule &CGM) const override {
6613 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
6615 llvm::Function *Fn = cast<llvm::Function>(GV);
6616 if (FD->hasAttr<Mips16Attr>()) {
6617 Fn->addFnAttr("mips16");
6619 else if (FD->hasAttr<NoMips16Attr>()) {
6620 Fn->addFnAttr("nomips16");
6623 if (FD->hasAttr<MicroMipsAttr>())
6624 Fn->addFnAttr("micromips");
6625 else if (FD->hasAttr<NoMicroMipsAttr>())
6626 Fn->addFnAttr("nomicromips");
6628 const MipsInterruptAttr *Attr = FD->getAttr<MipsInterruptAttr>();
6633 switch (Attr->getInterrupt()) {
6634 case MipsInterruptAttr::eic: Kind = "eic"; break;
6635 case MipsInterruptAttr::sw0: Kind = "sw0"; break;
6636 case MipsInterruptAttr::sw1: Kind = "sw1"; break;
6637 case MipsInterruptAttr::hw0: Kind = "hw0"; break;
6638 case MipsInterruptAttr::hw1: Kind = "hw1"; break;
6639 case MipsInterruptAttr::hw2: Kind = "hw2"; break;
6640 case MipsInterruptAttr::hw3: Kind = "hw3"; break;
6641 case MipsInterruptAttr::hw4: Kind = "hw4"; break;
6642 case MipsInterruptAttr::hw5: Kind = "hw5"; break;
6645 Fn->addFnAttr("interrupt", Kind);
6649 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
6650 llvm::Value *Address) const override;
6652 unsigned getSizeOfUnwindException() const override {
6653 return SizeOfUnwindException;
6658 void MipsABIInfo::CoerceToIntArgs(
6659 uint64_t TySize, SmallVectorImpl<llvm::Type *> &ArgList) const {
6660 llvm::IntegerType *IntTy =
6661 llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8);
6663 // Add (TySize / MinABIStackAlignInBytes) args of IntTy.
6664 for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N)
6665 ArgList.push_back(IntTy);
6667 // If necessary, add one more integer type to ArgList.
6668 unsigned R = TySize % (MinABIStackAlignInBytes * 8);
6671 ArgList.push_back(llvm::IntegerType::get(getVMContext(), R));
6674 // In N32/64, an aligned double precision floating point field is passed in
6676 llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const {
6677 SmallVector<llvm::Type*, 8> ArgList, IntArgList;
6680 CoerceToIntArgs(TySize, ArgList);
6681 return llvm::StructType::get(getVMContext(), ArgList);
6684 if (Ty->isComplexType())
6685 return CGT.ConvertType(Ty);
6687 const RecordType *RT = Ty->getAs<RecordType>();
6689 // Unions/vectors are passed in integer registers.
6690 if (!RT || !RT->isStructureOrClassType()) {
6691 CoerceToIntArgs(TySize, ArgList);
6692 return llvm::StructType::get(getVMContext(), ArgList);
6695 const RecordDecl *RD = RT->getDecl();
6696 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
6697 assert(!(TySize % 8) && "Size of structure must be multiple of 8.");
6699 uint64_t LastOffset = 0;
6701 llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64);
6703 // Iterate over fields in the struct/class and check if there are any aligned
6705 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
6706 i != e; ++i, ++idx) {
6707 const QualType Ty = i->getType();
6708 const BuiltinType *BT = Ty->getAs<BuiltinType>();
6710 if (!BT || BT->getKind() != BuiltinType::Double)
6713 uint64_t Offset = Layout.getFieldOffset(idx);
6714 if (Offset % 64) // Ignore doubles that are not aligned.
6717 // Add ((Offset - LastOffset) / 64) args of type i64.
6718 for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j)
6719 ArgList.push_back(I64);
6722 ArgList.push_back(llvm::Type::getDoubleTy(getVMContext()));
6723 LastOffset = Offset + 64;
6726 CoerceToIntArgs(TySize - LastOffset, IntArgList);
6727 ArgList.append(IntArgList.begin(), IntArgList.end());
6729 return llvm::StructType::get(getVMContext(), ArgList);
6732 llvm::Type *MipsABIInfo::getPaddingType(uint64_t OrigOffset,
6733 uint64_t Offset) const {
6734 if (OrigOffset + MinABIStackAlignInBytes > Offset)
6737 return llvm::IntegerType::get(getVMContext(), (Offset - OrigOffset) * 8);
6741 MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const {
6742 Ty = useFirstFieldIfTransparentUnion(Ty);
6744 uint64_t OrigOffset = Offset;
6745 uint64_t TySize = getContext().getTypeSize(Ty);
6746 uint64_t Align = getContext().getTypeAlign(Ty) / 8;
6748 Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes),
6749 (uint64_t)StackAlignInBytes);
6750 unsigned CurrOffset = llvm::alignTo(Offset, Align);
6751 Offset = CurrOffset + llvm::alignTo(TySize, Align * 8) / 8;
6753 if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) {
6754 // Ignore empty aggregates.
6756 return ABIArgInfo::getIgnore();
6758 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
6759 Offset = OrigOffset + MinABIStackAlignInBytes;
6760 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
6763 // If we have reached here, aggregates are passed directly by coercing to
6764 // another structure type. Padding is inserted if the offset of the
6765 // aggregate is unaligned.
6766 ABIArgInfo ArgInfo =
6767 ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0,
6768 getPaddingType(OrigOffset, CurrOffset));
6769 ArgInfo.setInReg(true);
6773 // Treat an enum type as its underlying type.
6774 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
6775 Ty = EnumTy->getDecl()->getIntegerType();
6777 // All integral types are promoted to the GPR width.
6778 if (Ty->isIntegralOrEnumerationType())
6779 return ABIArgInfo::getExtend();
6781 return ABIArgInfo::getDirect(
6782 nullptr, 0, IsO32 ? nullptr : getPaddingType(OrigOffset, CurrOffset));
6786 MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const {
6787 const RecordType *RT = RetTy->getAs<RecordType>();
6788 SmallVector<llvm::Type*, 8> RTList;
6790 if (RT && RT->isStructureOrClassType()) {
6791 const RecordDecl *RD = RT->getDecl();
6792 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
6793 unsigned FieldCnt = Layout.getFieldCount();
6795 // N32/64 returns struct/classes in floating point registers if the
6796 // following conditions are met:
6797 // 1. The size of the struct/class is no larger than 128-bit.
6798 // 2. The struct/class has one or two fields all of which are floating
6800 // 3. The offset of the first field is zero (this follows what gcc does).
6802 // Any other composite results are returned in integer registers.
6804 if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) {
6805 RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end();
6806 for (; b != e; ++b) {
6807 const BuiltinType *BT = b->getType()->getAs<BuiltinType>();
6809 if (!BT || !BT->isFloatingPoint())
6812 RTList.push_back(CGT.ConvertType(b->getType()));
6816 return llvm::StructType::get(getVMContext(), RTList,
6817 RD->hasAttr<PackedAttr>());
6823 CoerceToIntArgs(Size, RTList);
6824 return llvm::StructType::get(getVMContext(), RTList);
6827 ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const {
6828 uint64_t Size = getContext().getTypeSize(RetTy);
6830 if (RetTy->isVoidType())
6831 return ABIArgInfo::getIgnore();
6833 // O32 doesn't treat zero-sized structs differently from other structs.
6834 // However, N32/N64 ignores zero sized return values.
6835 if (!IsO32 && Size == 0)
6836 return ABIArgInfo::getIgnore();
6838 if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) {
6840 if (RetTy->isAnyComplexType())
6841 return ABIArgInfo::getDirect();
6843 // O32 returns integer vectors in registers and N32/N64 returns all small
6844 // aggregates in registers.
6846 (RetTy->isVectorType() && !RetTy->hasFloatingRepresentation())) {
6847 ABIArgInfo ArgInfo =
6848 ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size));
6849 ArgInfo.setInReg(true);
6854 return getNaturalAlignIndirect(RetTy);
6857 // Treat an enum type as its underlying type.
6858 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
6859 RetTy = EnumTy->getDecl()->getIntegerType();
6861 return (RetTy->isPromotableIntegerType() ?
6862 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
6865 void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const {
6866 ABIArgInfo &RetInfo = FI.getReturnInfo();
6867 if (!getCXXABI().classifyReturnType(FI))
6868 RetInfo = classifyReturnType(FI.getReturnType());
6870 // Check if a pointer to an aggregate is passed as a hidden argument.
6871 uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0;
6873 for (auto &I : FI.arguments())
6874 I.info = classifyArgumentType(I.type, Offset);
6877 Address MipsABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6878 QualType OrigTy) const {
6879 QualType Ty = OrigTy;
6881 // Integer arguments are promoted to 32-bit on O32 and 64-bit on N32/N64.
6882 // Pointers are also promoted in the same way but this only matters for N32.
6883 unsigned SlotSizeInBits = IsO32 ? 32 : 64;
6884 unsigned PtrWidth = getTarget().getPointerWidth(0);
6885 bool DidPromote = false;
6886 if ((Ty->isIntegerType() &&
6887 getContext().getIntWidth(Ty) < SlotSizeInBits) ||
6888 (Ty->isPointerType() && PtrWidth < SlotSizeInBits)) {
6890 Ty = getContext().getIntTypeForBitwidth(SlotSizeInBits,
6891 Ty->isSignedIntegerType());
6894 auto TyInfo = getContext().getTypeInfoInChars(Ty);
6896 // The alignment of things in the argument area is never larger than
6897 // StackAlignInBytes.
6899 std::min(TyInfo.second, CharUnits::fromQuantity(StackAlignInBytes));
6901 // MinABIStackAlignInBytes is the size of argument slots on the stack.
6902 CharUnits ArgSlotSize = CharUnits::fromQuantity(MinABIStackAlignInBytes);
6904 Address Addr = emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
6905 TyInfo, ArgSlotSize, /*AllowHigherAlign*/ true);
6908 // If there was a promotion, "unpromote" into a temporary.
6909 // TODO: can we just use a pointer into a subset of the original slot?
6911 Address Temp = CGF.CreateMemTemp(OrigTy, "vaarg.promotion-temp");
6912 llvm::Value *Promoted = CGF.Builder.CreateLoad(Addr);
6914 // Truncate down to the right width.
6915 llvm::Type *IntTy = (OrigTy->isIntegerType() ? Temp.getElementType()
6917 llvm::Value *V = CGF.Builder.CreateTrunc(Promoted, IntTy);
6918 if (OrigTy->isPointerType())
6919 V = CGF.Builder.CreateIntToPtr(V, Temp.getElementType());
6921 CGF.Builder.CreateStore(V, Temp);
6928 bool MipsABIInfo::shouldSignExtUnsignedType(QualType Ty) const {
6929 int TySize = getContext().getTypeSize(Ty);
6931 // MIPS64 ABI requires unsigned 32 bit integers to be sign extended.
6932 if (Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32)
6939 MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
6940 llvm::Value *Address) const {
6941 // This information comes from gcc's implementation, which seems to
6942 // as canonical as it gets.
6944 // Everything on MIPS is 4 bytes. Double-precision FP registers
6945 // are aliased to pairs of single-precision FP registers.
6946 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
6948 // 0-31 are the general purpose registers, $0 - $31.
6949 // 32-63 are the floating-point registers, $f0 - $f31.
6950 // 64 and 65 are the multiply/divide registers, $hi and $lo.
6951 // 66 is the (notional, I think) register for signal-handler return.
6952 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65);
6954 // 67-74 are the floating-point status registers, $fcc0 - $fcc7.
6955 // They are one bit wide and ignored here.
6957 // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31.
6958 // (coprocessor 1 is the FP unit)
6959 // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31.
6960 // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31.
6961 // 176-181 are the DSP accumulator registers.
6962 AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181);
6966 //===----------------------------------------------------------------------===//
6967 // AVR ABI Implementation.
6968 //===----------------------------------------------------------------------===//
6971 class AVRTargetCodeGenInfo : public TargetCodeGenInfo {
6973 AVRTargetCodeGenInfo(CodeGenTypes &CGT)
6974 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) { }
6976 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
6977 CodeGen::CodeGenModule &CGM) const override {
6978 const auto *FD = dyn_cast_or_null<FunctionDecl>(D);
6980 auto *Fn = cast<llvm::Function>(GV);
6982 if (FD->getAttr<AVRInterruptAttr>())
6983 Fn->addFnAttr("interrupt");
6985 if (FD->getAttr<AVRSignalAttr>())
6986 Fn->addFnAttr("signal");
6991 //===----------------------------------------------------------------------===//
6992 // TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults.
6993 // Currently subclassed only to implement custom OpenCL C function attribute
6995 //===----------------------------------------------------------------------===//
6999 class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo {
7001 TCETargetCodeGenInfo(CodeGenTypes &CGT)
7002 : DefaultTargetCodeGenInfo(CGT) {}
7004 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
7005 CodeGen::CodeGenModule &M) const override;
7008 void TCETargetCodeGenInfo::setTargetAttributes(
7009 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
7010 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
7013 llvm::Function *F = cast<llvm::Function>(GV);
7015 if (M.getLangOpts().OpenCL) {
7016 if (FD->hasAttr<OpenCLKernelAttr>()) {
7017 // OpenCL C Kernel functions are not subject to inlining
7018 F->addFnAttr(llvm::Attribute::NoInline);
7019 const ReqdWorkGroupSizeAttr *Attr = FD->getAttr<ReqdWorkGroupSizeAttr>();
7021 // Convert the reqd_work_group_size() attributes to metadata.
7022 llvm::LLVMContext &Context = F->getContext();
7023 llvm::NamedMDNode *OpenCLMetadata =
7024 M.getModule().getOrInsertNamedMetadata(
7025 "opencl.kernel_wg_size_info");
7027 SmallVector<llvm::Metadata *, 5> Operands;
7028 Operands.push_back(llvm::ConstantAsMetadata::get(F));
7031 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
7032 M.Int32Ty, llvm::APInt(32, Attr->getXDim()))));
7034 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
7035 M.Int32Ty, llvm::APInt(32, Attr->getYDim()))));
7037 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
7038 M.Int32Ty, llvm::APInt(32, Attr->getZDim()))));
7040 // Add a boolean constant operand for "required" (true) or "hint"
7041 // (false) for implementing the work_group_size_hint attr later.
7042 // Currently always true as the hint is not yet implemented.
7044 llvm::ConstantAsMetadata::get(llvm::ConstantInt::getTrue(Context)));
7045 OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands));
7053 //===----------------------------------------------------------------------===//
7054 // Hexagon ABI Implementation
7055 //===----------------------------------------------------------------------===//
7059 class HexagonABIInfo : public ABIInfo {
7063 HexagonABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
7067 ABIArgInfo classifyReturnType(QualType RetTy) const;
7068 ABIArgInfo classifyArgumentType(QualType RetTy) const;
7070 void computeInfo(CGFunctionInfo &FI) const override;
7072 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7073 QualType Ty) const override;
7076 class HexagonTargetCodeGenInfo : public TargetCodeGenInfo {
7078 HexagonTargetCodeGenInfo(CodeGenTypes &CGT)
7079 :TargetCodeGenInfo(new HexagonABIInfo(CGT)) {}
7081 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
7088 void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const {
7089 if (!getCXXABI().classifyReturnType(FI))
7090 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
7091 for (auto &I : FI.arguments())
7092 I.info = classifyArgumentType(I.type);
7095 ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const {
7096 if (!isAggregateTypeForABI(Ty)) {
7097 // Treat an enum type as its underlying type.
7098 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
7099 Ty = EnumTy->getDecl()->getIntegerType();
7101 return (Ty->isPromotableIntegerType() ?
7102 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
7105 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
7106 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
7108 // Ignore empty records.
7109 if (isEmptyRecord(getContext(), Ty, true))
7110 return ABIArgInfo::getIgnore();
7112 uint64_t Size = getContext().getTypeSize(Ty);
7114 return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
7115 // Pass in the smallest viable integer type.
7117 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
7119 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
7121 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
7123 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
7126 ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const {
7127 if (RetTy->isVoidType())
7128 return ABIArgInfo::getIgnore();
7130 // Large vector types should be returned via memory.
7131 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 64)
7132 return getNaturalAlignIndirect(RetTy);
7134 if (!isAggregateTypeForABI(RetTy)) {
7135 // Treat an enum type as its underlying type.
7136 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
7137 RetTy = EnumTy->getDecl()->getIntegerType();
7139 return (RetTy->isPromotableIntegerType() ?
7140 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
7143 if (isEmptyRecord(getContext(), RetTy, true))
7144 return ABIArgInfo::getIgnore();
7146 // Aggregates <= 8 bytes are returned in r0; other aggregates
7147 // are returned indirectly.
7148 uint64_t Size = getContext().getTypeSize(RetTy);
7150 // Return in the smallest viable integer type.
7152 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
7154 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
7156 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
7157 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
7160 return getNaturalAlignIndirect(RetTy, /*ByVal=*/true);
7163 Address HexagonABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7164 QualType Ty) const {
7165 // FIXME: Someone needs to audit that this handle alignment correctly.
7166 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
7167 getContext().getTypeInfoInChars(Ty),
7168 CharUnits::fromQuantity(4),
7169 /*AllowHigherAlign*/ true);
7172 //===----------------------------------------------------------------------===//
7173 // Lanai ABI Implementation
7174 //===----------------------------------------------------------------------===//
7177 class LanaiABIInfo : public DefaultABIInfo {
7179 LanaiABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
7181 bool shouldUseInReg(QualType Ty, CCState &State) const;
7183 void computeInfo(CGFunctionInfo &FI) const override {
7184 CCState State(FI.getCallingConvention());
7185 // Lanai uses 4 registers to pass arguments unless the function has the
7186 // regparm attribute set.
7187 if (FI.getHasRegParm()) {
7188 State.FreeRegs = FI.getRegParm();
7193 if (!getCXXABI().classifyReturnType(FI))
7194 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
7195 for (auto &I : FI.arguments())
7196 I.info = classifyArgumentType(I.type, State);
7199 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const;
7200 ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const;
7202 } // end anonymous namespace
7204 bool LanaiABIInfo::shouldUseInReg(QualType Ty, CCState &State) const {
7205 unsigned Size = getContext().getTypeSize(Ty);
7206 unsigned SizeInRegs = llvm::alignTo(Size, 32U) / 32U;
7208 if (SizeInRegs == 0)
7211 if (SizeInRegs > State.FreeRegs) {
7216 State.FreeRegs -= SizeInRegs;
7221 ABIArgInfo LanaiABIInfo::getIndirectResult(QualType Ty, bool ByVal,
7222 CCState &State) const {
7224 if (State.FreeRegs) {
7225 --State.FreeRegs; // Non-byval indirects just use one pointer.
7226 return getNaturalAlignIndirectInReg(Ty);
7228 return getNaturalAlignIndirect(Ty, false);
7231 // Compute the byval alignment.
7232 const unsigned MinABIStackAlignInBytes = 4;
7233 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
7234 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true,
7235 /*Realign=*/TypeAlign >
7236 MinABIStackAlignInBytes);
7239 ABIArgInfo LanaiABIInfo::classifyArgumentType(QualType Ty,
7240 CCState &State) const {
7241 // Check with the C++ ABI first.
7242 const RecordType *RT = Ty->getAs<RecordType>();
7244 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
7245 if (RAA == CGCXXABI::RAA_Indirect) {
7246 return getIndirectResult(Ty, /*ByVal=*/false, State);
7247 } else if (RAA == CGCXXABI::RAA_DirectInMemory) {
7248 return getNaturalAlignIndirect(Ty, /*ByRef=*/true);
7252 if (isAggregateTypeForABI(Ty)) {
7253 // Structures with flexible arrays are always indirect.
7254 if (RT && RT->getDecl()->hasFlexibleArrayMember())
7255 return getIndirectResult(Ty, /*ByVal=*/true, State);
7257 // Ignore empty structs/unions.
7258 if (isEmptyRecord(getContext(), Ty, true))
7259 return ABIArgInfo::getIgnore();
7261 llvm::LLVMContext &LLVMContext = getVMContext();
7262 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
7263 if (SizeInRegs <= State.FreeRegs) {
7264 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
7265 SmallVector<llvm::Type *, 3> Elements(SizeInRegs, Int32);
7266 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
7267 State.FreeRegs -= SizeInRegs;
7268 return ABIArgInfo::getDirectInReg(Result);
7272 return getIndirectResult(Ty, true, State);
7275 // Treat an enum type as its underlying type.
7276 if (const auto *EnumTy = Ty->getAs<EnumType>())
7277 Ty = EnumTy->getDecl()->getIntegerType();
7279 bool InReg = shouldUseInReg(Ty, State);
7280 if (Ty->isPromotableIntegerType()) {
7282 return ABIArgInfo::getDirectInReg();
7283 return ABIArgInfo::getExtend();
7286 return ABIArgInfo::getDirectInReg();
7287 return ABIArgInfo::getDirect();
7291 class LanaiTargetCodeGenInfo : public TargetCodeGenInfo {
7293 LanaiTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
7294 : TargetCodeGenInfo(new LanaiABIInfo(CGT)) {}
7298 //===----------------------------------------------------------------------===//
7299 // AMDGPU ABI Implementation
7300 //===----------------------------------------------------------------------===//
7304 class AMDGPUABIInfo final : public DefaultABIInfo {
7306 explicit AMDGPUABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
7309 ABIArgInfo classifyArgumentType(QualType Ty) const;
7311 void computeInfo(CGFunctionInfo &FI) const override;
7314 void AMDGPUABIInfo::computeInfo(CGFunctionInfo &FI) const {
7315 if (!getCXXABI().classifyReturnType(FI))
7316 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
7318 unsigned CC = FI.getCallingConvention();
7319 for (auto &Arg : FI.arguments())
7320 if (CC == llvm::CallingConv::AMDGPU_KERNEL)
7321 Arg.info = classifyArgumentType(Arg.type);
7323 Arg.info = DefaultABIInfo::classifyArgumentType(Arg.type);
7326 /// \brief Classify argument of given type \p Ty.
7327 ABIArgInfo AMDGPUABIInfo::classifyArgumentType(QualType Ty) const {
7328 llvm::StructType *StrTy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty));
7330 return DefaultABIInfo::classifyArgumentType(Ty);
7333 // Coerce single element structs to its element.
7334 if (StrTy->getNumElements() == 1) {
7335 return ABIArgInfo::getDirect();
7338 // If we set CanBeFlattened to true, CodeGen will expand the struct to its
7339 // individual elements, which confuses the Clover OpenCL backend; therefore we
7340 // have to set it to false here. Other args of getDirect() are just defaults.
7341 return ABIArgInfo::getDirect(nullptr, 0, nullptr, false);
7344 class AMDGPUTargetCodeGenInfo : public TargetCodeGenInfo {
7346 AMDGPUTargetCodeGenInfo(CodeGenTypes &CGT)
7347 : TargetCodeGenInfo(new AMDGPUABIInfo(CGT)) {}
7348 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
7349 CodeGen::CodeGenModule &M) const override;
7350 unsigned getOpenCLKernelCallingConv() const override;
7352 llvm::Constant *getNullPointer(const CodeGen::CodeGenModule &CGM,
7353 llvm::PointerType *T, QualType QT) const override;
7355 unsigned getASTAllocaAddressSpace() const override {
7356 return LangAS::FirstTargetAddressSpace +
7357 getABIInfo().getDataLayout().getAllocaAddrSpace();
7359 unsigned getGlobalVarAddressSpace(CodeGenModule &CGM,
7360 const VarDecl *D) const override;
7364 void AMDGPUTargetCodeGenInfo::setTargetAttributes(
7366 llvm::GlobalValue *GV,
7367 CodeGen::CodeGenModule &M) const {
7368 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
7372 llvm::Function *F = cast<llvm::Function>(GV);
7374 const auto *ReqdWGS = M.getLangOpts().OpenCL ?
7375 FD->getAttr<ReqdWorkGroupSizeAttr>() : nullptr;
7376 const auto *FlatWGS = FD->getAttr<AMDGPUFlatWorkGroupSizeAttr>();
7377 if (ReqdWGS || FlatWGS) {
7378 unsigned Min = FlatWGS ? FlatWGS->getMin() : 0;
7379 unsigned Max = FlatWGS ? FlatWGS->getMax() : 0;
7380 if (ReqdWGS && Min == 0 && Max == 0)
7381 Min = Max = ReqdWGS->getXDim() * ReqdWGS->getYDim() * ReqdWGS->getZDim();
7384 assert(Min <= Max && "Min must be less than or equal Max");
7386 std::string AttrVal = llvm::utostr(Min) + "," + llvm::utostr(Max);
7387 F->addFnAttr("amdgpu-flat-work-group-size", AttrVal);
7389 assert(Max == 0 && "Max must be zero");
7392 if (const auto *Attr = FD->getAttr<AMDGPUWavesPerEUAttr>()) {
7393 unsigned Min = Attr->getMin();
7394 unsigned Max = Attr->getMax();
7397 assert((Max == 0 || Min <= Max) && "Min must be less than or equal Max");
7399 std::string AttrVal = llvm::utostr(Min);
7401 AttrVal = AttrVal + "," + llvm::utostr(Max);
7402 F->addFnAttr("amdgpu-waves-per-eu", AttrVal);
7404 assert(Max == 0 && "Max must be zero");
7407 if (const auto *Attr = FD->getAttr<AMDGPUNumSGPRAttr>()) {
7408 unsigned NumSGPR = Attr->getNumSGPR();
7411 F->addFnAttr("amdgpu-num-sgpr", llvm::utostr(NumSGPR));
7414 if (const auto *Attr = FD->getAttr<AMDGPUNumVGPRAttr>()) {
7415 uint32_t NumVGPR = Attr->getNumVGPR();
7418 F->addFnAttr("amdgpu-num-vgpr", llvm::utostr(NumVGPR));
7422 unsigned AMDGPUTargetCodeGenInfo::getOpenCLKernelCallingConv() const {
7423 return llvm::CallingConv::AMDGPU_KERNEL;
7426 // Currently LLVM assumes null pointers always have value 0,
7427 // which results in incorrectly transformed IR. Therefore, instead of
7428 // emitting null pointers in private and local address spaces, a null
7429 // pointer in generic address space is emitted which is casted to a
7430 // pointer in local or private address space.
7431 llvm::Constant *AMDGPUTargetCodeGenInfo::getNullPointer(
7432 const CodeGen::CodeGenModule &CGM, llvm::PointerType *PT,
7433 QualType QT) const {
7434 if (CGM.getContext().getTargetNullPointerValue(QT) == 0)
7435 return llvm::ConstantPointerNull::get(PT);
7437 auto &Ctx = CGM.getContext();
7438 auto NPT = llvm::PointerType::get(PT->getElementType(),
7439 Ctx.getTargetAddressSpace(LangAS::opencl_generic));
7440 return llvm::ConstantExpr::getAddrSpaceCast(
7441 llvm::ConstantPointerNull::get(NPT), PT);
7445 AMDGPUTargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM,
7446 const VarDecl *D) const {
7447 assert(!CGM.getLangOpts().OpenCL &&
7448 !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) &&
7449 "Address space agnostic languages only");
7450 unsigned DefaultGlobalAS =
7451 LangAS::FirstTargetAddressSpace +
7452 CGM.getContext().getTargetAddressSpace(LangAS::opencl_global);
7454 return DefaultGlobalAS;
7456 unsigned AddrSpace = D->getType().getAddressSpace();
7457 assert(AddrSpace == LangAS::Default ||
7458 AddrSpace >= LangAS::FirstTargetAddressSpace);
7459 if (AddrSpace != LangAS::Default)
7462 if (CGM.isTypeConstant(D->getType(), false)) {
7463 if (auto ConstAS = CGM.getTarget().getConstantAddressSpace())
7464 return ConstAS.getValue();
7466 return DefaultGlobalAS;
7469 //===----------------------------------------------------------------------===//
7470 // SPARC v8 ABI Implementation.
7471 // Based on the SPARC Compliance Definition version 2.4.1.
7473 // Ensures that complex values are passed in registers.
7476 class SparcV8ABIInfo : public DefaultABIInfo {
7478 SparcV8ABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
7481 ABIArgInfo classifyReturnType(QualType RetTy) const;
7482 void computeInfo(CGFunctionInfo &FI) const override;
7484 } // end anonymous namespace
7488 SparcV8ABIInfo::classifyReturnType(QualType Ty) const {
7489 if (Ty->isAnyComplexType()) {
7490 return ABIArgInfo::getDirect();
7493 return DefaultABIInfo::classifyReturnType(Ty);
7497 void SparcV8ABIInfo::computeInfo(CGFunctionInfo &FI) const {
7499 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
7500 for (auto &Arg : FI.arguments())
7501 Arg.info = classifyArgumentType(Arg.type);
7505 class SparcV8TargetCodeGenInfo : public TargetCodeGenInfo {
7507 SparcV8TargetCodeGenInfo(CodeGenTypes &CGT)
7508 : TargetCodeGenInfo(new SparcV8ABIInfo(CGT)) {}
7510 } // end anonymous namespace
7512 //===----------------------------------------------------------------------===//
7513 // SPARC v9 ABI Implementation.
7514 // Based on the SPARC Compliance Definition version 2.4.1.
7516 // Function arguments a mapped to a nominal "parameter array" and promoted to
7517 // registers depending on their type. Each argument occupies 8 or 16 bytes in
7518 // the array, structs larger than 16 bytes are passed indirectly.
7520 // One case requires special care:
7527 // When a struct mixed is passed by value, it only occupies 8 bytes in the
7528 // parameter array, but the int is passed in an integer register, and the float
7529 // is passed in a floating point register. This is represented as two arguments
7530 // with the LLVM IR inreg attribute:
7532 // declare void f(i32 inreg %i, float inreg %f)
7534 // The code generator will only allocate 4 bytes from the parameter array for
7535 // the inreg arguments. All other arguments are allocated a multiple of 8
7539 class SparcV9ABIInfo : public ABIInfo {
7541 SparcV9ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
7544 ABIArgInfo classifyType(QualType RetTy, unsigned SizeLimit) const;
7545 void computeInfo(CGFunctionInfo &FI) const override;
7546 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7547 QualType Ty) const override;
7549 // Coercion type builder for structs passed in registers. The coercion type
7550 // serves two purposes:
7552 // 1. Pad structs to a multiple of 64 bits, so they are passed 'left-aligned'
7554 // 2. Expose aligned floating point elements as first-level elements, so the
7555 // code generator knows to pass them in floating point registers.
7557 // We also compute the InReg flag which indicates that the struct contains
7558 // aligned 32-bit floats.
7560 struct CoerceBuilder {
7561 llvm::LLVMContext &Context;
7562 const llvm::DataLayout &DL;
7563 SmallVector<llvm::Type*, 8> Elems;
7567 CoerceBuilder(llvm::LLVMContext &c, const llvm::DataLayout &dl)
7568 : Context(c), DL(dl), Size(0), InReg(false) {}
7570 // Pad Elems with integers until Size is ToSize.
7571 void pad(uint64_t ToSize) {
7572 assert(ToSize >= Size && "Cannot remove elements");
7576 // Finish the current 64-bit word.
7577 uint64_t Aligned = llvm::alignTo(Size, 64);
7578 if (Aligned > Size && Aligned <= ToSize) {
7579 Elems.push_back(llvm::IntegerType::get(Context, Aligned - Size));
7583 // Add whole 64-bit words.
7584 while (Size + 64 <= ToSize) {
7585 Elems.push_back(llvm::Type::getInt64Ty(Context));
7589 // Final in-word padding.
7590 if (Size < ToSize) {
7591 Elems.push_back(llvm::IntegerType::get(Context, ToSize - Size));
7596 // Add a floating point element at Offset.
7597 void addFloat(uint64_t Offset, llvm::Type *Ty, unsigned Bits) {
7598 // Unaligned floats are treated as integers.
7601 // The InReg flag is only required if there are any floats < 64 bits.
7605 Elems.push_back(Ty);
7606 Size = Offset + Bits;
7609 // Add a struct type to the coercion type, starting at Offset (in bits).
7610 void addStruct(uint64_t Offset, llvm::StructType *StrTy) {
7611 const llvm::StructLayout *Layout = DL.getStructLayout(StrTy);
7612 for (unsigned i = 0, e = StrTy->getNumElements(); i != e; ++i) {
7613 llvm::Type *ElemTy = StrTy->getElementType(i);
7614 uint64_t ElemOffset = Offset + Layout->getElementOffsetInBits(i);
7615 switch (ElemTy->getTypeID()) {
7616 case llvm::Type::StructTyID:
7617 addStruct(ElemOffset, cast<llvm::StructType>(ElemTy));
7619 case llvm::Type::FloatTyID:
7620 addFloat(ElemOffset, ElemTy, 32);
7622 case llvm::Type::DoubleTyID:
7623 addFloat(ElemOffset, ElemTy, 64);
7625 case llvm::Type::FP128TyID:
7626 addFloat(ElemOffset, ElemTy, 128);
7628 case llvm::Type::PointerTyID:
7629 if (ElemOffset % 64 == 0) {
7631 Elems.push_back(ElemTy);
7641 // Check if Ty is a usable substitute for the coercion type.
7642 bool isUsableType(llvm::StructType *Ty) const {
7643 return llvm::makeArrayRef(Elems) == Ty->elements();
7646 // Get the coercion type as a literal struct type.
7647 llvm::Type *getType() const {
7648 if (Elems.size() == 1)
7649 return Elems.front();
7651 return llvm::StructType::get(Context, Elems);
7655 } // end anonymous namespace
7658 SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const {
7659 if (Ty->isVoidType())
7660 return ABIArgInfo::getIgnore();
7662 uint64_t Size = getContext().getTypeSize(Ty);
7664 // Anything too big to fit in registers is passed with an explicit indirect
7665 // pointer / sret pointer.
7666 if (Size > SizeLimit)
7667 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
7669 // Treat an enum type as its underlying type.
7670 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
7671 Ty = EnumTy->getDecl()->getIntegerType();
7673 // Integer types smaller than a register are extended.
7674 if (Size < 64 && Ty->isIntegerType())
7675 return ABIArgInfo::getExtend();
7677 // Other non-aggregates go in registers.
7678 if (!isAggregateTypeForABI(Ty))
7679 return ABIArgInfo::getDirect();
7681 // If a C++ object has either a non-trivial copy constructor or a non-trivial
7682 // destructor, it is passed with an explicit indirect pointer / sret pointer.
7683 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
7684 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
7686 // This is a small aggregate type that should be passed in registers.
7687 // Build a coercion type from the LLVM struct type.
7688 llvm::StructType *StrTy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty));
7690 return ABIArgInfo::getDirect();
7692 CoerceBuilder CB(getVMContext(), getDataLayout());
7693 CB.addStruct(0, StrTy);
7694 CB.pad(llvm::alignTo(CB.DL.getTypeSizeInBits(StrTy), 64));
7696 // Try to use the original type for coercion.
7697 llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType();
7700 return ABIArgInfo::getDirectInReg(CoerceTy);
7702 return ABIArgInfo::getDirect(CoerceTy);
7705 Address SparcV9ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7706 QualType Ty) const {
7707 ABIArgInfo AI = classifyType(Ty, 16 * 8);
7708 llvm::Type *ArgTy = CGT.ConvertType(Ty);
7709 if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
7710 AI.setCoerceToType(ArgTy);
7712 CharUnits SlotSize = CharUnits::fromQuantity(8);
7714 CGBuilderTy &Builder = CGF.Builder;
7715 Address Addr(Builder.CreateLoad(VAListAddr, "ap.cur"), SlotSize);
7716 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
7718 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
7720 Address ArgAddr = Address::invalid();
7722 switch (AI.getKind()) {
7723 case ABIArgInfo::Expand:
7724 case ABIArgInfo::CoerceAndExpand:
7725 case ABIArgInfo::InAlloca:
7726 llvm_unreachable("Unsupported ABI kind for va_arg");
7728 case ABIArgInfo::Extend: {
7730 CharUnits Offset = SlotSize - TypeInfo.first;
7731 ArgAddr = Builder.CreateConstInBoundsByteGEP(Addr, Offset, "extend");
7735 case ABIArgInfo::Direct: {
7736 auto AllocSize = getDataLayout().getTypeAllocSize(AI.getCoerceToType());
7737 Stride = CharUnits::fromQuantity(AllocSize).alignTo(SlotSize);
7742 case ABIArgInfo::Indirect:
7744 ArgAddr = Builder.CreateElementBitCast(Addr, ArgPtrTy, "indirect");
7745 ArgAddr = Address(Builder.CreateLoad(ArgAddr, "indirect.arg"),
7749 case ABIArgInfo::Ignore:
7750 return Address(llvm::UndefValue::get(ArgPtrTy), TypeInfo.second);
7754 llvm::Value *NextPtr =
7755 Builder.CreateConstInBoundsByteGEP(Addr.getPointer(), Stride, "ap.next");
7756 Builder.CreateStore(NextPtr, VAListAddr);
7758 return Builder.CreateBitCast(ArgAddr, ArgPtrTy, "arg.addr");
7761 void SparcV9ABIInfo::computeInfo(CGFunctionInfo &FI) const {
7762 FI.getReturnInfo() = classifyType(FI.getReturnType(), 32 * 8);
7763 for (auto &I : FI.arguments())
7764 I.info = classifyType(I.type, 16 * 8);
7768 class SparcV9TargetCodeGenInfo : public TargetCodeGenInfo {
7770 SparcV9TargetCodeGenInfo(CodeGenTypes &CGT)
7771 : TargetCodeGenInfo(new SparcV9ABIInfo(CGT)) {}
7773 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
7777 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
7778 llvm::Value *Address) const override;
7780 } // end anonymous namespace
7783 SparcV9TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
7784 llvm::Value *Address) const {
7785 // This is calculated from the LLVM and GCC tables and verified
7786 // against gcc output. AFAIK all ABIs use the same encoding.
7788 CodeGen::CGBuilderTy &Builder = CGF.Builder;
7790 llvm::IntegerType *i8 = CGF.Int8Ty;
7791 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
7792 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
7794 // 0-31: the 8-byte general-purpose registers
7795 AssignToArrayRange(Builder, Address, Eight8, 0, 31);
7797 // 32-63: f0-31, the 4-byte floating-point registers
7798 AssignToArrayRange(Builder, Address, Four8, 32, 63);
7808 AssignToArrayRange(Builder, Address, Eight8, 64, 71);
7810 // 72-87: d0-15, the 8-byte floating-point registers
7811 AssignToArrayRange(Builder, Address, Eight8, 72, 87);
7817 //===----------------------------------------------------------------------===//
7818 // XCore ABI Implementation
7819 //===----------------------------------------------------------------------===//
7823 /// A SmallStringEnc instance is used to build up the TypeString by passing
7824 /// it by reference between functions that append to it.
7825 typedef llvm::SmallString<128> SmallStringEnc;
7827 /// TypeStringCache caches the meta encodings of Types.
7829 /// The reason for caching TypeStrings is two fold:
7830 /// 1. To cache a type's encoding for later uses;
7831 /// 2. As a means to break recursive member type inclusion.
7833 /// A cache Entry can have a Status of:
7834 /// NonRecursive: The type encoding is not recursive;
7835 /// Recursive: The type encoding is recursive;
7836 /// Incomplete: An incomplete TypeString;
7837 /// IncompleteUsed: An incomplete TypeString that has been used in a
7838 /// Recursive type encoding.
7840 /// A NonRecursive entry will have all of its sub-members expanded as fully
7841 /// as possible. Whilst it may contain types which are recursive, the type
7842 /// itself is not recursive and thus its encoding may be safely used whenever
7843 /// the type is encountered.
7845 /// A Recursive entry will have all of its sub-members expanded as fully as
7846 /// possible. The type itself is recursive and it may contain other types which
7847 /// are recursive. The Recursive encoding must not be used during the expansion
7848 /// of a recursive type's recursive branch. For simplicity the code uses
7849 /// IncompleteCount to reject all usage of Recursive encodings for member types.
7851 /// An Incomplete entry is always a RecordType and only encodes its
7852 /// identifier e.g. "s(S){}". Incomplete 'StubEnc' entries are ephemeral and
7853 /// are placed into the cache during type expansion as a means to identify and
7854 /// handle recursive inclusion of types as sub-members. If there is recursion
7855 /// the entry becomes IncompleteUsed.
7857 /// During the expansion of a RecordType's members:
7859 /// If the cache contains a NonRecursive encoding for the member type, the
7860 /// cached encoding is used;
7862 /// If the cache contains a Recursive encoding for the member type, the
7863 /// cached encoding is 'Swapped' out, as it may be incorrect, and...
7865 /// If the member is a RecordType, an Incomplete encoding is placed into the
7866 /// cache to break potential recursive inclusion of itself as a sub-member;
7868 /// Once a member RecordType has been expanded, its temporary incomplete
7869 /// entry is removed from the cache. If a Recursive encoding was swapped out
7870 /// it is swapped back in;
7872 /// If an incomplete entry is used to expand a sub-member, the incomplete
7873 /// entry is marked as IncompleteUsed. The cache keeps count of how many
7874 /// IncompleteUsed entries it currently contains in IncompleteUsedCount;
7876 /// If a member's encoding is found to be a NonRecursive or Recursive viz:
7877 /// IncompleteUsedCount==0, the member's encoding is added to the cache.
7878 /// Else the member is part of a recursive type and thus the recursion has
7879 /// been exited too soon for the encoding to be correct for the member.
7881 class TypeStringCache {
7882 enum Status {NonRecursive, Recursive, Incomplete, IncompleteUsed};
7884 std::string Str; // The encoded TypeString for the type.
7885 enum Status State; // Information about the encoding in 'Str'.
7886 std::string Swapped; // A temporary place holder for a Recursive encoding
7887 // during the expansion of RecordType's members.
7889 std::map<const IdentifierInfo *, struct Entry> Map;
7890 unsigned IncompleteCount; // Number of Incomplete entries in the Map.
7891 unsigned IncompleteUsedCount; // Number of IncompleteUsed entries in the Map.
7893 TypeStringCache() : IncompleteCount(0), IncompleteUsedCount(0) {}
7894 void addIncomplete(const IdentifierInfo *ID, std::string StubEnc);
7895 bool removeIncomplete(const IdentifierInfo *ID);
7896 void addIfComplete(const IdentifierInfo *ID, StringRef Str,
7898 StringRef lookupStr(const IdentifierInfo *ID);
7901 /// TypeString encodings for enum & union fields must be order.
7902 /// FieldEncoding is a helper for this ordering process.
7903 class FieldEncoding {
7907 FieldEncoding(bool b, SmallStringEnc &e) : HasName(b), Enc(e.c_str()) {}
7908 StringRef str() { return Enc; }
7909 bool operator<(const FieldEncoding &rhs) const {
7910 if (HasName != rhs.HasName) return HasName;
7911 return Enc < rhs.Enc;
7915 class XCoreABIInfo : public DefaultABIInfo {
7917 XCoreABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
7918 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7919 QualType Ty) const override;
7922 class XCoreTargetCodeGenInfo : public TargetCodeGenInfo {
7923 mutable TypeStringCache TSC;
7925 XCoreTargetCodeGenInfo(CodeGenTypes &CGT)
7926 :TargetCodeGenInfo(new XCoreABIInfo(CGT)) {}
7927 void emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
7928 CodeGen::CodeGenModule &M) const override;
7931 } // End anonymous namespace.
7933 // TODO: this implementation is likely now redundant with the default
7935 Address XCoreABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7936 QualType Ty) const {
7937 CGBuilderTy &Builder = CGF.Builder;
7940 CharUnits SlotSize = CharUnits::fromQuantity(4);
7941 Address AP(Builder.CreateLoad(VAListAddr), SlotSize);
7943 // Handle the argument.
7944 ABIArgInfo AI = classifyArgumentType(Ty);
7945 CharUnits TypeAlign = getContext().getTypeAlignInChars(Ty);
7946 llvm::Type *ArgTy = CGT.ConvertType(Ty);
7947 if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
7948 AI.setCoerceToType(ArgTy);
7949 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
7951 Address Val = Address::invalid();
7952 CharUnits ArgSize = CharUnits::Zero();
7953 switch (AI.getKind()) {
7954 case ABIArgInfo::Expand:
7955 case ABIArgInfo::CoerceAndExpand:
7956 case ABIArgInfo::InAlloca:
7957 llvm_unreachable("Unsupported ABI kind for va_arg");
7958 case ABIArgInfo::Ignore:
7959 Val = Address(llvm::UndefValue::get(ArgPtrTy), TypeAlign);
7960 ArgSize = CharUnits::Zero();
7962 case ABIArgInfo::Extend:
7963 case ABIArgInfo::Direct:
7964 Val = Builder.CreateBitCast(AP, ArgPtrTy);
7965 ArgSize = CharUnits::fromQuantity(
7966 getDataLayout().getTypeAllocSize(AI.getCoerceToType()));
7967 ArgSize = ArgSize.alignTo(SlotSize);
7969 case ABIArgInfo::Indirect:
7970 Val = Builder.CreateElementBitCast(AP, ArgPtrTy);
7971 Val = Address(Builder.CreateLoad(Val), TypeAlign);
7976 // Increment the VAList.
7977 if (!ArgSize.isZero()) {
7979 Builder.CreateConstInBoundsByteGEP(AP.getPointer(), ArgSize);
7980 Builder.CreateStore(APN, VAListAddr);
7986 /// During the expansion of a RecordType, an incomplete TypeString is placed
7987 /// into the cache as a means to identify and break recursion.
7988 /// If there is a Recursive encoding in the cache, it is swapped out and will
7989 /// be reinserted by removeIncomplete().
7990 /// All other types of encoding should have been used rather than arriving here.
7991 void TypeStringCache::addIncomplete(const IdentifierInfo *ID,
7992 std::string StubEnc) {
7996 assert( (E.Str.empty() || E.State == Recursive) &&
7997 "Incorrectly use of addIncomplete");
7998 assert(!StubEnc.empty() && "Passing an empty string to addIncomplete()");
7999 E.Swapped.swap(E.Str); // swap out the Recursive
8000 E.Str.swap(StubEnc);
8001 E.State = Incomplete;
8005 /// Once the RecordType has been expanded, the temporary incomplete TypeString
8006 /// must be removed from the cache.
8007 /// If a Recursive was swapped out by addIncomplete(), it will be replaced.
8008 /// Returns true if the RecordType was defined recursively.
8009 bool TypeStringCache::removeIncomplete(const IdentifierInfo *ID) {
8012 auto I = Map.find(ID);
8013 assert(I != Map.end() && "Entry not present");
8014 Entry &E = I->second;
8015 assert( (E.State == Incomplete ||
8016 E.State == IncompleteUsed) &&
8017 "Entry must be an incomplete type");
8018 bool IsRecursive = false;
8019 if (E.State == IncompleteUsed) {
8020 // We made use of our Incomplete encoding, thus we are recursive.
8022 --IncompleteUsedCount;
8024 if (E.Swapped.empty())
8027 // Swap the Recursive back.
8028 E.Swapped.swap(E.Str);
8030 E.State = Recursive;
8036 /// Add the encoded TypeString to the cache only if it is NonRecursive or
8037 /// Recursive (viz: all sub-members were expanded as fully as possible).
8038 void TypeStringCache::addIfComplete(const IdentifierInfo *ID, StringRef Str,
8040 if (!ID || IncompleteUsedCount)
8041 return; // No key or it is is an incomplete sub-type so don't add.
8043 if (IsRecursive && !E.Str.empty()) {
8044 assert(E.State==Recursive && E.Str.size() == Str.size() &&
8045 "This is not the same Recursive entry");
8046 // The parent container was not recursive after all, so we could have used
8047 // this Recursive sub-member entry after all, but we assumed the worse when
8048 // we started viz: IncompleteCount!=0.
8051 assert(E.Str.empty() && "Entry already present");
8053 E.State = IsRecursive? Recursive : NonRecursive;
8056 /// Return a cached TypeString encoding for the ID. If there isn't one, or we
8057 /// are recursively expanding a type (IncompleteCount != 0) and the cached
8058 /// encoding is Recursive, return an empty StringRef.
8059 StringRef TypeStringCache::lookupStr(const IdentifierInfo *ID) {
8061 return StringRef(); // We have no key.
8062 auto I = Map.find(ID);
8064 return StringRef(); // We have no encoding.
8065 Entry &E = I->second;
8066 if (E.State == Recursive && IncompleteCount)
8067 return StringRef(); // We don't use Recursive encodings for member types.
8069 if (E.State == Incomplete) {
8070 // The incomplete type is being used to break out of recursion.
8071 E.State = IncompleteUsed;
8072 ++IncompleteUsedCount;
8077 /// The XCore ABI includes a type information section that communicates symbol
8078 /// type information to the linker. The linker uses this information to verify
8079 /// safety/correctness of things such as array bound and pointers et al.
8080 /// The ABI only requires C (and XC) language modules to emit TypeStrings.
8081 /// This type information (TypeString) is emitted into meta data for all global
8082 /// symbols: definitions, declarations, functions & variables.
8084 /// The TypeString carries type, qualifier, name, size & value details.
8085 /// Please see 'Tools Development Guide' section 2.16.2 for format details:
8086 /// https://www.xmos.com/download/public/Tools-Development-Guide%28X9114A%29.pdf
8087 /// The output is tested by test/CodeGen/xcore-stringtype.c.
8089 static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
8090 CodeGen::CodeGenModule &CGM, TypeStringCache &TSC);
8092 /// XCore uses emitTargetMD to emit TypeString metadata for global symbols.
8093 void XCoreTargetCodeGenInfo::emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
8094 CodeGen::CodeGenModule &CGM) const {
8096 if (getTypeString(Enc, D, CGM, TSC)) {
8097 llvm::LLVMContext &Ctx = CGM.getModule().getContext();
8098 llvm::Metadata *MDVals[] = {llvm::ConstantAsMetadata::get(GV),
8099 llvm::MDString::get(Ctx, Enc.str())};
8100 llvm::NamedMDNode *MD =
8101 CGM.getModule().getOrInsertNamedMetadata("xcore.typestrings");
8102 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
8106 //===----------------------------------------------------------------------===//
8107 // SPIR ABI Implementation
8108 //===----------------------------------------------------------------------===//
8111 class SPIRTargetCodeGenInfo : public TargetCodeGenInfo {
8113 SPIRTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
8114 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
8115 unsigned getOpenCLKernelCallingConv() const override;
8118 } // End anonymous namespace.
8122 void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI) {
8123 DefaultABIInfo SPIRABI(CGM.getTypes());
8124 SPIRABI.computeInfo(FI);
8129 unsigned SPIRTargetCodeGenInfo::getOpenCLKernelCallingConv() const {
8130 return llvm::CallingConv::SPIR_KERNEL;
8133 static bool appendType(SmallStringEnc &Enc, QualType QType,
8134 const CodeGen::CodeGenModule &CGM,
8135 TypeStringCache &TSC);
8137 /// Helper function for appendRecordType().
8138 /// Builds a SmallVector containing the encoded field types in declaration
8140 static bool extractFieldType(SmallVectorImpl<FieldEncoding> &FE,
8141 const RecordDecl *RD,
8142 const CodeGen::CodeGenModule &CGM,
8143 TypeStringCache &TSC) {
8144 for (const auto *Field : RD->fields()) {
8147 Enc += Field->getName();
8149 if (Field->isBitField()) {
8151 llvm::raw_svector_ostream OS(Enc);
8152 OS << Field->getBitWidthValue(CGM.getContext());
8155 if (!appendType(Enc, Field->getType(), CGM, TSC))
8157 if (Field->isBitField())
8160 FE.emplace_back(!Field->getName().empty(), Enc);
8165 /// Appends structure and union types to Enc and adds encoding to cache.
8166 /// Recursively calls appendType (via extractFieldType) for each field.
8167 /// Union types have their fields ordered according to the ABI.
8168 static bool appendRecordType(SmallStringEnc &Enc, const RecordType *RT,
8169 const CodeGen::CodeGenModule &CGM,
8170 TypeStringCache &TSC, const IdentifierInfo *ID) {
8171 // Append the cached TypeString if we have one.
8172 StringRef TypeString = TSC.lookupStr(ID);
8173 if (!TypeString.empty()) {
8178 // Start to emit an incomplete TypeString.
8179 size_t Start = Enc.size();
8180 Enc += (RT->isUnionType()? 'u' : 's');
8183 Enc += ID->getName();
8186 // We collect all encoded fields and order as necessary.
8187 bool IsRecursive = false;
8188 const RecordDecl *RD = RT->getDecl()->getDefinition();
8189 if (RD && !RD->field_empty()) {
8190 // An incomplete TypeString stub is placed in the cache for this RecordType
8191 // so that recursive calls to this RecordType will use it whilst building a
8192 // complete TypeString for this RecordType.
8193 SmallVector<FieldEncoding, 16> FE;
8194 std::string StubEnc(Enc.substr(Start).str());
8195 StubEnc += '}'; // StubEnc now holds a valid incomplete TypeString.
8196 TSC.addIncomplete(ID, std::move(StubEnc));
8197 if (!extractFieldType(FE, RD, CGM, TSC)) {
8198 (void) TSC.removeIncomplete(ID);
8201 IsRecursive = TSC.removeIncomplete(ID);
8202 // The ABI requires unions to be sorted but not structures.
8203 // See FieldEncoding::operator< for sort algorithm.
8204 if (RT->isUnionType())
8205 std::sort(FE.begin(), FE.end());
8206 // We can now complete the TypeString.
8207 unsigned E = FE.size();
8208 for (unsigned I = 0; I != E; ++I) {
8215 TSC.addIfComplete(ID, Enc.substr(Start), IsRecursive);
8219 /// Appends enum types to Enc and adds the encoding to the cache.
8220 static bool appendEnumType(SmallStringEnc &Enc, const EnumType *ET,
8221 TypeStringCache &TSC,
8222 const IdentifierInfo *ID) {
8223 // Append the cached TypeString if we have one.
8224 StringRef TypeString = TSC.lookupStr(ID);
8225 if (!TypeString.empty()) {
8230 size_t Start = Enc.size();
8233 Enc += ID->getName();
8236 // We collect all encoded enumerations and order them alphanumerically.
8237 if (const EnumDecl *ED = ET->getDecl()->getDefinition()) {
8238 SmallVector<FieldEncoding, 16> FE;
8239 for (auto I = ED->enumerator_begin(), E = ED->enumerator_end(); I != E;
8241 SmallStringEnc EnumEnc;
8243 EnumEnc += I->getName();
8245 I->getInitVal().toString(EnumEnc);
8247 FE.push_back(FieldEncoding(!I->getName().empty(), EnumEnc));
8249 std::sort(FE.begin(), FE.end());
8250 unsigned E = FE.size();
8251 for (unsigned I = 0; I != E; ++I) {
8258 TSC.addIfComplete(ID, Enc.substr(Start), false);
8262 /// Appends type's qualifier to Enc.
8263 /// This is done prior to appending the type's encoding.
8264 static void appendQualifier(SmallStringEnc &Enc, QualType QT) {
8265 // Qualifiers are emitted in alphabetical order.
8266 static const char *const Table[]={"","c:","r:","cr:","v:","cv:","rv:","crv:"};
8268 if (QT.isConstQualified())
8270 if (QT.isRestrictQualified())
8272 if (QT.isVolatileQualified())
8274 Enc += Table[Lookup];
8277 /// Appends built-in types to Enc.
8278 static bool appendBuiltinType(SmallStringEnc &Enc, const BuiltinType *BT) {
8279 const char *EncType;
8280 switch (BT->getKind()) {
8281 case BuiltinType::Void:
8284 case BuiltinType::Bool:
8287 case BuiltinType::Char_U:
8290 case BuiltinType::UChar:
8293 case BuiltinType::SChar:
8296 case BuiltinType::UShort:
8299 case BuiltinType::Short:
8302 case BuiltinType::UInt:
8305 case BuiltinType::Int:
8308 case BuiltinType::ULong:
8311 case BuiltinType::Long:
8314 case BuiltinType::ULongLong:
8317 case BuiltinType::LongLong:
8320 case BuiltinType::Float:
8323 case BuiltinType::Double:
8326 case BuiltinType::LongDouble:
8336 /// Appends a pointer encoding to Enc before calling appendType for the pointee.
8337 static bool appendPointerType(SmallStringEnc &Enc, const PointerType *PT,
8338 const CodeGen::CodeGenModule &CGM,
8339 TypeStringCache &TSC) {
8341 if (!appendType(Enc, PT->getPointeeType(), CGM, TSC))
8347 /// Appends array encoding to Enc before calling appendType for the element.
8348 static bool appendArrayType(SmallStringEnc &Enc, QualType QT,
8349 const ArrayType *AT,
8350 const CodeGen::CodeGenModule &CGM,
8351 TypeStringCache &TSC, StringRef NoSizeEnc) {
8352 if (AT->getSizeModifier() != ArrayType::Normal)
8355 if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT))
8356 CAT->getSize().toStringUnsigned(Enc);
8358 Enc += NoSizeEnc; // Global arrays use "*", otherwise it is "".
8360 // The Qualifiers should be attached to the type rather than the array.
8361 appendQualifier(Enc, QT);
8362 if (!appendType(Enc, AT->getElementType(), CGM, TSC))
8368 /// Appends a function encoding to Enc, calling appendType for the return type
8369 /// and the arguments.
8370 static bool appendFunctionType(SmallStringEnc &Enc, const FunctionType *FT,
8371 const CodeGen::CodeGenModule &CGM,
8372 TypeStringCache &TSC) {
8374 if (!appendType(Enc, FT->getReturnType(), CGM, TSC))
8377 if (const FunctionProtoType *FPT = FT->getAs<FunctionProtoType>()) {
8378 // N.B. we are only interested in the adjusted param types.
8379 auto I = FPT->param_type_begin();
8380 auto E = FPT->param_type_end();
8383 if (!appendType(Enc, *I, CGM, TSC))
8389 if (FPT->isVariadic())
8392 if (FPT->isVariadic())
8402 /// Handles the type's qualifier before dispatching a call to handle specific
8404 static bool appendType(SmallStringEnc &Enc, QualType QType,
8405 const CodeGen::CodeGenModule &CGM,
8406 TypeStringCache &TSC) {
8408 QualType QT = QType.getCanonicalType();
8410 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe())
8411 // The Qualifiers should be attached to the type rather than the array.
8412 // Thus we don't call appendQualifier() here.
8413 return appendArrayType(Enc, QT, AT, CGM, TSC, "");
8415 appendQualifier(Enc, QT);
8417 if (const BuiltinType *BT = QT->getAs<BuiltinType>())
8418 return appendBuiltinType(Enc, BT);
8420 if (const PointerType *PT = QT->getAs<PointerType>())
8421 return appendPointerType(Enc, PT, CGM, TSC);
8423 if (const EnumType *ET = QT->getAs<EnumType>())
8424 return appendEnumType(Enc, ET, TSC, QT.getBaseTypeIdentifier());
8426 if (const RecordType *RT = QT->getAsStructureType())
8427 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier());
8429 if (const RecordType *RT = QT->getAsUnionType())
8430 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier());
8432 if (const FunctionType *FT = QT->getAs<FunctionType>())
8433 return appendFunctionType(Enc, FT, CGM, TSC);
8438 static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
8439 CodeGen::CodeGenModule &CGM, TypeStringCache &TSC) {
8443 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
8444 if (FD->getLanguageLinkage() != CLanguageLinkage)
8446 return appendType(Enc, FD->getType(), CGM, TSC);
8449 if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
8450 if (VD->getLanguageLinkage() != CLanguageLinkage)
8452 QualType QT = VD->getType().getCanonicalType();
8453 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe()) {
8454 // Global ArrayTypes are given a size of '*' if the size is unknown.
8455 // The Qualifiers should be attached to the type rather than the array.
8456 // Thus we don't call appendQualifier() here.
8457 return appendArrayType(Enc, QT, AT, CGM, TSC, "*");
8459 return appendType(Enc, QT, CGM, TSC);
8465 //===----------------------------------------------------------------------===//
8467 //===----------------------------------------------------------------------===//
8469 bool CodeGenModule::supportsCOMDAT() const {
8470 return getTriple().supportsCOMDAT();
8473 const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
8474 if (TheTargetCodeGenInfo)
8475 return *TheTargetCodeGenInfo;
8477 // Helper to set the unique_ptr while still keeping the return value.
8478 auto SetCGInfo = [&](TargetCodeGenInfo *P) -> const TargetCodeGenInfo & {
8479 this->TheTargetCodeGenInfo.reset(P);
8483 const llvm::Triple &Triple = getTarget().getTriple();
8484 switch (Triple.getArch()) {
8486 return SetCGInfo(new DefaultTargetCodeGenInfo(Types));
8488 case llvm::Triple::le32:
8489 return SetCGInfo(new PNaClTargetCodeGenInfo(Types));
8490 case llvm::Triple::mips:
8491 case llvm::Triple::mipsel:
8492 if (Triple.getOS() == llvm::Triple::NaCl)
8493 return SetCGInfo(new PNaClTargetCodeGenInfo(Types));
8494 return SetCGInfo(new MIPSTargetCodeGenInfo(Types, true));
8496 case llvm::Triple::mips64:
8497 case llvm::Triple::mips64el:
8498 return SetCGInfo(new MIPSTargetCodeGenInfo(Types, false));
8500 case llvm::Triple::avr:
8501 return SetCGInfo(new AVRTargetCodeGenInfo(Types));
8503 case llvm::Triple::aarch64:
8504 case llvm::Triple::aarch64_be: {
8505 AArch64ABIInfo::ABIKind Kind = AArch64ABIInfo::AAPCS;
8506 if (getTarget().getABI() == "darwinpcs")
8507 Kind = AArch64ABIInfo::DarwinPCS;
8508 else if (Triple.isOSWindows())
8509 Kind = AArch64ABIInfo::Win64;
8511 return SetCGInfo(new AArch64TargetCodeGenInfo(Types, Kind));
8514 case llvm::Triple::wasm32:
8515 case llvm::Triple::wasm64:
8516 return SetCGInfo(new WebAssemblyTargetCodeGenInfo(Types));
8518 case llvm::Triple::arm:
8519 case llvm::Triple::armeb:
8520 case llvm::Triple::thumb:
8521 case llvm::Triple::thumbeb: {
8522 if (Triple.getOS() == llvm::Triple::Win32) {
8524 new WindowsARMTargetCodeGenInfo(Types, ARMABIInfo::AAPCS_VFP));
8527 ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS;
8528 StringRef ABIStr = getTarget().getABI();
8529 if (ABIStr == "apcs-gnu")
8530 Kind = ARMABIInfo::APCS;
8531 else if (ABIStr == "aapcs16")
8532 Kind = ARMABIInfo::AAPCS16_VFP;
8533 else if (CodeGenOpts.FloatABI == "hard" ||
8534 (CodeGenOpts.FloatABI != "soft" &&
8535 (Triple.getEnvironment() == llvm::Triple::GNUEABIHF ||
8536 Triple.getEnvironment() == llvm::Triple::MuslEABIHF ||
8537 Triple.getEnvironment() == llvm::Triple::EABIHF)))
8538 Kind = ARMABIInfo::AAPCS_VFP;
8540 return SetCGInfo(new ARMTargetCodeGenInfo(Types, Kind));
8543 case llvm::Triple::ppc:
8545 new PPC32TargetCodeGenInfo(Types, CodeGenOpts.FloatABI == "soft"));
8546 case llvm::Triple::ppc64:
8547 if (Triple.isOSBinFormatELF()) {
8548 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv1;
8549 if (getTarget().getABI() == "elfv2")
8550 Kind = PPC64_SVR4_ABIInfo::ELFv2;
8551 bool HasQPX = getTarget().getABI() == "elfv1-qpx";
8552 bool IsSoftFloat = CodeGenOpts.FloatABI == "soft";
8554 return SetCGInfo(new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX,
8557 return SetCGInfo(new PPC64TargetCodeGenInfo(Types));
8558 case llvm::Triple::ppc64le: {
8559 assert(Triple.isOSBinFormatELF() && "PPC64 LE non-ELF not supported!");
8560 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv2;
8561 if (getTarget().getABI() == "elfv1" || getTarget().getABI() == "elfv1-qpx")
8562 Kind = PPC64_SVR4_ABIInfo::ELFv1;
8563 bool HasQPX = getTarget().getABI() == "elfv1-qpx";
8564 bool IsSoftFloat = CodeGenOpts.FloatABI == "soft";
8566 return SetCGInfo(new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX,
8570 case llvm::Triple::nvptx:
8571 case llvm::Triple::nvptx64:
8572 return SetCGInfo(new NVPTXTargetCodeGenInfo(Types));
8574 case llvm::Triple::msp430:
8575 return SetCGInfo(new MSP430TargetCodeGenInfo(Types));
8577 case llvm::Triple::systemz: {
8578 bool HasVector = getTarget().getABI() == "vector";
8579 return SetCGInfo(new SystemZTargetCodeGenInfo(Types, HasVector));
8582 case llvm::Triple::tce:
8583 case llvm::Triple::tcele:
8584 return SetCGInfo(new TCETargetCodeGenInfo(Types));
8586 case llvm::Triple::x86: {
8587 bool IsDarwinVectorABI = Triple.isOSDarwin();
8588 bool RetSmallStructInRegABI =
8589 X86_32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts);
8590 bool IsWin32FloatStructABI = Triple.isOSWindows() && !Triple.isOSCygMing();
8592 if (Triple.getOS() == llvm::Triple::Win32) {
8593 return SetCGInfo(new WinX86_32TargetCodeGenInfo(
8594 Types, IsDarwinVectorABI, RetSmallStructInRegABI,
8595 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters));
8597 return SetCGInfo(new X86_32TargetCodeGenInfo(
8598 Types, IsDarwinVectorABI, RetSmallStructInRegABI,
8599 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters,
8600 CodeGenOpts.FloatABI == "soft"));
8604 case llvm::Triple::x86_64: {
8605 StringRef ABI = getTarget().getABI();
8606 X86AVXABILevel AVXLevel =
8608 ? X86AVXABILevel::AVX512
8609 : ABI == "avx" ? X86AVXABILevel::AVX : X86AVXABILevel::None);
8611 switch (Triple.getOS()) {
8612 case llvm::Triple::Win32:
8613 return SetCGInfo(new WinX86_64TargetCodeGenInfo(Types, AVXLevel));
8614 case llvm::Triple::PS4:
8615 return SetCGInfo(new PS4TargetCodeGenInfo(Types, AVXLevel));
8617 return SetCGInfo(new X86_64TargetCodeGenInfo(Types, AVXLevel));
8620 case llvm::Triple::hexagon:
8621 return SetCGInfo(new HexagonTargetCodeGenInfo(Types));
8622 case llvm::Triple::lanai:
8623 return SetCGInfo(new LanaiTargetCodeGenInfo(Types));
8624 case llvm::Triple::r600:
8625 return SetCGInfo(new AMDGPUTargetCodeGenInfo(Types));
8626 case llvm::Triple::amdgcn:
8627 return SetCGInfo(new AMDGPUTargetCodeGenInfo(Types));
8628 case llvm::Triple::sparc:
8629 return SetCGInfo(new SparcV8TargetCodeGenInfo(Types));
8630 case llvm::Triple::sparcv9:
8631 return SetCGInfo(new SparcV9TargetCodeGenInfo(Types));
8632 case llvm::Triple::xcore:
8633 return SetCGInfo(new XCoreTargetCodeGenInfo(Types));
8634 case llvm::Triple::spir:
8635 case llvm::Triple::spir64:
8636 return SetCGInfo(new SPIRTargetCodeGenInfo(Types));